[VPU][GT][Tests] Make gemmTranspose pass layout agnostic (#2666)
authorAndrew Bakalin <andrew.bakalin@intel.com>
Thu, 22 Oct 2020 12:04:53 +0000 (15:04 +0300)
committerGitHub <noreply@github.com>
Thu, 22 Oct 2020 12:04:53 +0000 (15:04 +0300)
* [VPU][GT] Make permTranspose pass layout agnostic

* [IE][Tests] Improve MatMul common test class

* [VPU][Tests] Add tests for MatMul

* [VPU][Tests] Review fixes

* [Tests] Add combineShapes for MatMul

* [VPU][GT] Fix assertion condition

inference-engine/src/vpu/graph_transformer/src/middleend/passes/gemm_transpose.cpp
inference-engine/src/vpu/graph_transformer/src/stages/gemm.cpp
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/mat_mul.cpp
inference-engine/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/mat_mul.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/shared/include/single_layer_tests/mat_mul.hpp
inference-engine/tests/functional/plugin/shared/src/single_layer_tests/mat_mul.cpp

index 2def48f..643c553 100644 (file)
@@ -56,23 +56,36 @@ void PassImpl::run(const Model& model) {
 
         const auto inputDimsA = inputA->desc().dims();
 
-        const auto K = inputDimsA[Dim::H];
-        const auto M = inputDimsA[Dim::W];
-        const auto batch1 = inputDimsA[Dim::N];
-        const auto batch2 = inputDimsA[Dim::C];
+        VPU_THROW_UNLESS(inputDimsA.size() >= 2 && inputDimsA.size() <= 4,
+            "Processing layer {} with type {} failed: first inputs' ({} with usage {}) dimensions number should be in range [2, 4], but it actually has {}",
+            stage->name(), stage->type(), inputA->name(), inputA->usage(), inputDimsA.size());
 
-        const auto inputATranspose = model->duplicateData(inputA, "@reshape", DataDesc{K, M, batch2, batch1});
+        const auto perm = DimsOrder::fromNumDims(inputDimsA.size()).toPermutation();
+
+        std::vector<int> batchDims;
+        DimValues_<Dim> permMap = { {perm[0], perm[1]}, {perm[1], perm[0]} };
+        for (std::size_t i = 2; i < inputDimsA.size(); i++) {
+            batchDims.push_back(inputDimsA[perm[i]]);
+            permMap.set(perm[i], perm[i]);
+        }
+
+        std::vector<int> transposedDims = {inputDimsA[perm[1]], inputDimsA[perm[0]]};
+        transposedDims.insert(transposedDims.end(), batchDims.begin(), batchDims.end());
+
+        const auto inputATranspose = model->duplicateData(inputA, "@reshape", DataDesc{transposedDims});
 
         stage->attrs().set<bool>("transposeA", false);
         model->replaceStageInput(stage->inputEdge(0), inputATranspose);
 
+
+
         _stageBuilder->addPermuteStage(
             model,
             stage->name() + "@transpose",
             stage->origLayer(),
             inputA,
             inputATranspose,
-            DimValues_<Dim>{{Dim::W, Dim::H}, {Dim::H, Dim::W}, {Dim::D, Dim::D}, {Dim::C, Dim::C}, {Dim::N, Dim::N}});
+            permMap);
     }
 }
 
index 01a098d..13b2669 100644 (file)
@@ -99,6 +99,19 @@ void FrontEnd::parseGEMM(const Model& model, const ie::CNNLayerPtr& _layer, cons
     IE_ASSERT(inputs.size() == 2 || inputs.size() == 3);
     IE_ASSERT(outputs.size() == 1);
 
+    const auto input1 = inputs[0];
+    const auto input2 = inputs[1];
+
+    VPU_THROW_UNLESS(input1->desc().numDims() >= 2 && input1->desc().numDims() <= 4,
+        "Processing layer {} with type {} failed: first inputs' ({} with usage {}) dimensions number should be in range [2, 4], but it actually has {}",
+        _layer->name, _layer->type, input1->name(), input1->usage(), input1->desc().numDims());
+    VPU_THROW_UNLESS(input2->desc().numDims() >= 2 && input2->desc().numDims() <= 4,
+        "Processing layer {} with type {} failed: second inputs' ({} with usage {}) dimensions number should be in range [2, 4], but it actually has {}",
+        _layer->name, _layer->type, input2->name(), input2->usage(), input2->desc().numDims());
+    VPU_THROW_UNLESS(inputs.size() < 3 || inputs[2]->desc().numDims() >= 2 && inputs[2]->desc().numDims() <= 4,
+        "Processing layer {} with type {} failed: third inputs' ({} with usage {}) dimensions number should be in range [2, 4], but it actually has {}",
+        _layer->name, _layer->type, inputs[2]->name(), inputs[2]->usage(), inputs[2]->desc().numDims());
+
     auto layer = std::dynamic_pointer_cast<ie::GemmLayer>(_layer);
     IE_ASSERT(layer != nullptr);
 
index 46abeb3..33558b2 100644 (file)
@@ -14,12 +14,8 @@ const std::vector<InferenceEngine::Precision> inputPrecisions = {
         InferenceEngine::Precision::FP32
 };
 
-const std::vector<std::vector<size_t>> shapesA = {
-        {1, 4, 5, 6}
-};
-
-const std::vector<std::vector<size_t>> shapesB = {
-        {1, 4, 6, 4}
+const std::vector<ShapeRelatedParams> shapeRelatedParams = {
+        { { {1, 4, 5, 6}, false }, { {1, 4, 6, 4}, false } }
 };
 
 std::vector<ngraph::helpers::InputLayerType> secondaryInputTypes = {
@@ -27,18 +23,18 @@ std::vector<ngraph::helpers::InputLayerType> secondaryInputTypes = {
         ngraph::helpers::InputLayerType::PARAMETER,
 };
 
+std::map<std::string, std::string> additional_config = {};
+
 INSTANTIATE_TEST_CASE_P(smoke_MatMul, MatMulTest,
         ::testing::Combine(
+                ::testing::ValuesIn(shapeRelatedParams),
                 ::testing::ValuesIn(inputPrecisions),
                 ::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
                 ::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
                 ::testing::Values(InferenceEngine::Layout::ANY),
-                ::testing::ValuesIn(shapesA),
-                ::testing::ValuesIn(shapesB),
-                ::testing::Values(false),
-                ::testing::Values(false),
                 ::testing::ValuesIn(secondaryInputTypes),
-                ::testing::Values(CommonTestUtils::DEVICE_CPU)),
+                ::testing::Values(CommonTestUtils::DEVICE_CPU),
+                ::testing::Values(additional_config)),
         MatMulTest::getTestCaseName);
 
 } // namespace
diff --git a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/mat_mul.cpp b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/mat_mul.cpp
new file mode 100644 (file)
index 0000000..f8871b6
--- /dev/null
@@ -0,0 +1,46 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "single_layer_tests/mat_mul.hpp"
+#include <vpu/private_plugin_config.hpp>
+
+using namespace LayerTestsDefinitions;
+
+namespace {
+
+typedef std::map<std::string, std::string> Config;
+
+const std::vector<InferenceEngine::Precision> inputPrecisions = {
+        InferenceEngine::Precision::FP32
+};
+
+const std::vector<ShapeRelatedParams> shapeRelatedParams = {
+        { { {1, 2, 7, 5}, true }, { {1, 2, 7, 11}, false } },
+        { { {10, 1, 1, 16}, false }, { {10, 1, 16, 1024}, false } },
+        { { {1, 5, 3}, true }, { {1, 5, 6}, false } },
+        { { {12, 8, 17}, false }, { {12, 17, 32}, false } },
+        { { {6, 128, 128}, false }, { {6, 128, 128}, false } },
+        { { {128, 384}, true }, { {128, 384}, false } },
+        { { {384, 128}, false }, { {372, 128}, true } },
+        { { {1, 2, 128, 384}, true }, { {1, 2, 128, 372}, false } },
+        { { {4, 3}, true }, { {5, 4}, true } },
+};
+
+Config additionalConfig = {
+        {InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH, CONFIG_VALUE(NO)}
+};
+
+INSTANTIATE_TEST_CASE_P(smoke_MatMul, MatMulTest,
+        ::testing::Combine(
+            ::testing::ValuesIn(shapeRelatedParams),
+            ::testing::ValuesIn(inputPrecisions),
+            ::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
+            ::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
+            ::testing::Values(InferenceEngine::Layout::ANY),
+            ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER),
+            ::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
+            ::testing::Values(additionalConfig)),
+        MatMulTest::getTestCaseName);
+
+} // namespace
index 31db51f..16ffc26 100644 (file)
 
 #include "functional_test_utils/layer_test_utils.hpp"
 
+struct ShapeRelatedParams {
+    std::pair<InferenceEngine::SizeVector, bool> input1, input2;
+};
+
 typedef std::tuple<
-        InferenceEngine::Precision,
-        InferenceEngine::Precision,    // Input precision
-        InferenceEngine::Precision,    // Output precision
-        InferenceEngine::Layout,       // Input layout
-        InferenceEngine::SizeVector,
-        InferenceEngine::SizeVector,
-        bool,
-        bool,
-        ngraph::helpers::InputLayerType,
-        LayerTestsUtils::TargetDevice
+        ShapeRelatedParams,
+        InferenceEngine::Precision,        // Network precision
+        InferenceEngine::Precision,        // Input precision
+        InferenceEngine::Precision,        // Output precision
+        InferenceEngine::Layout,           // Input layout
+        ngraph::helpers::InputLayerType,   // Secondary input type
+        LayerTestsUtils::TargetDevice,     // Device name
+        std::map<std::string, std::string> // Additional network configuration
 > MatMulLayerTestParamsSet;
 
 namespace LayerTestsDefinitions {
@@ -29,6 +31,10 @@ namespace LayerTestsDefinitions {
 class MatMulTest : public testing::WithParamInterface<MatMulLayerTestParamsSet>, virtual public LayerTestsUtils::LayerTestsCommon {
 public:
     static std::string getTestCaseName(const testing::TestParamInfo<MatMulLayerTestParamsSet> &obj);
+    static std::vector<ShapeRelatedParams> combineShapes(const std::vector<std::vector<size_t>>& firstInputShapes,
+                                                         const std::vector<std::vector<size_t>>& secondInputShapes,
+                                                         bool transposeA,
+                                                         bool transposeB);
 
 protected:
     void SetUp() override;
index 9769e6a..0d05760 100644 (file)
 
 namespace LayerTestsDefinitions {
 
+std::vector<ShapeRelatedParams> MatMulTest::combineShapes(const std::vector<std::vector<size_t>>& firstInputShapes,
+                                                          const std::vector<std::vector<size_t>>& secondInputShapes,
+                                                          bool transposeA,
+                                                          bool transposeB) {
+    std::vector<ShapeRelatedParams> resVec;
+    for (const auto& firstInputShape : firstInputShapes) {
+        for (const auto& secondInputShape : secondInputShapes) {
+            resVec.push_back(ShapeRelatedParams{ {firstInputShape, transposeA}, {secondInputShape, transposeB } });
+        }
+    }
+    return resVec;
+}
+
 std::string MatMulTest::getTestCaseName(const testing::TestParamInfo<MatMulLayerTestParamsSet> &obj) {
     InferenceEngine::Precision netPrecision;
     InferenceEngine::Precision inPrc, outPrc;
     InferenceEngine::Layout inLayout;
-    InferenceEngine::SizeVector inputShape0;
-    InferenceEngine::SizeVector inputShape1;
-    bool transpose_a;
-    bool transpose_b;
+    ShapeRelatedParams shapeRelatedParams;
     ngraph::helpers::InputLayerType secondaryInputType;
     std::string targetDevice;
-    std::tie(netPrecision, inPrc, outPrc, inLayout, inputShape0, inputShape1, transpose_a, transpose_b, secondaryInputType, targetDevice) =
+    std::map<std::string, std::string> additionalConfig;
+    std::tie(shapeRelatedParams, netPrecision, inPrc, outPrc, inLayout, secondaryInputType, targetDevice, additionalConfig) =
         obj.param;
 
     std::ostringstream result;
-    result << "IS0=" << CommonTestUtils::vec2str(inputShape0) << "_";
-    result << "IS1=" << CommonTestUtils::vec2str(inputShape1) << "_";
-    result << "transpose_a=" << transpose_a << "_";
-    result << "transpose_b=" << transpose_b << "_";
+    result << "IS0=" << CommonTestUtils::vec2str(shapeRelatedParams.input1.first) << "_";
+    result << "IS1=" << CommonTestUtils::vec2str(shapeRelatedParams.input2.first) << "_";
+    result << "transpose_a=" << shapeRelatedParams.input1.second << "_";
+    result << "transpose_b=" << shapeRelatedParams.input2.second << "_";
     result << "secondaryInputType=" << secondaryInputType << "_";
     result << "netPRC=" << netPrecision.name() << "_";
     result << "inPRC=" << inPrc.name() << "_";
     result << "outPRC=" << outPrc.name() << "_";
     result << "inL=" << inLayout << "_";
     result << "trgDev=" << targetDevice;
+    result << "config=(";
+    for (const auto configEntry : additionalConfig) {
+        result << configEntry.first << ", " << configEntry.second << ":";
+    }
+    result << ")";
     return result.str();
 }
 
 void MatMulTest::SetUp() {
-    InferenceEngine::SizeVector inputShape0;
-    InferenceEngine::SizeVector inputShape1;
-    bool transpose_a;
-    bool transpose_b;
+    ShapeRelatedParams shapeRelatedParams;
     ngraph::helpers::InputLayerType secondaryInputType;
     auto netPrecision = InferenceEngine::Precision::UNSPECIFIED;
-    std::tie(netPrecision, inPrc, outPrc, inLayout, inputShape0, inputShape1, transpose_a, transpose_b, secondaryInputType, targetDevice) =
+    std::map<std::string, std::string> additionalConfig;
+    std::tie(shapeRelatedParams, netPrecision, inPrc, outPrc, inLayout, secondaryInputType, targetDevice, additionalConfig) =
         this->GetParam();
+
+    configuration.insert(additionalConfig.begin(), additionalConfig.end());
+
     auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
-    auto params = ngraph::builder::makeParams(ngPrc, {inputShape0});
+    auto params = ngraph::builder::makeParams(ngPrc, {shapeRelatedParams.input1.first});
 
-    auto secondaryInput = ngraph::builder::makeInputLayer(ngPrc, secondaryInputType, inputShape1);
+    auto secondaryInput = ngraph::builder::makeInputLayer(ngPrc, secondaryInputType, shapeRelatedParams.input2.first);
     if (secondaryInputType == ngraph::helpers::InputLayerType::PARAMETER) {
         params.push_back(std::dynamic_pointer_cast<ngraph::opset3::Parameter>(secondaryInput));
     }
     auto paramOuts = ngraph::helpers::convert2OutputVector(
             ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
     auto MatMul = std::dynamic_pointer_cast<ngraph::opset3::MatMul>(
-            ngraph::builder::makeMatMul(paramOuts[0], secondaryInput, transpose_a, transpose_b));
+            ngraph::builder::makeMatMul(paramOuts[0], secondaryInput, shapeRelatedParams.input1.second, shapeRelatedParams.input2.second));
     ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(MatMul)};
     function = std::make_shared<ngraph::Function>(results, params, "MatMul");
 }