const auto inputDimsA = inputA->desc().dims();
- const auto K = inputDimsA[Dim::H];
- const auto M = inputDimsA[Dim::W];
- const auto batch1 = inputDimsA[Dim::N];
- const auto batch2 = inputDimsA[Dim::C];
+ VPU_THROW_UNLESS(inputDimsA.size() >= 2 && inputDimsA.size() <= 4,
+ "Processing layer {} with type {} failed: first inputs' ({} with usage {}) dimensions number should be in range [2, 4], but it actually has {}",
+ stage->name(), stage->type(), inputA->name(), inputA->usage(), inputDimsA.size());
- const auto inputATranspose = model->duplicateData(inputA, "@reshape", DataDesc{K, M, batch2, batch1});
+ const auto perm = DimsOrder::fromNumDims(inputDimsA.size()).toPermutation();
+
+ std::vector<int> batchDims;
+ DimValues_<Dim> permMap = { {perm[0], perm[1]}, {perm[1], perm[0]} };
+ for (std::size_t i = 2; i < inputDimsA.size(); i++) {
+ batchDims.push_back(inputDimsA[perm[i]]);
+ permMap.set(perm[i], perm[i]);
+ }
+
+ std::vector<int> transposedDims = {inputDimsA[perm[1]], inputDimsA[perm[0]]};
+ transposedDims.insert(transposedDims.end(), batchDims.begin(), batchDims.end());
+
+ const auto inputATranspose = model->duplicateData(inputA, "@reshape", DataDesc{transposedDims});
stage->attrs().set<bool>("transposeA", false);
model->replaceStageInput(stage->inputEdge(0), inputATranspose);
+
+
_stageBuilder->addPermuteStage(
model,
stage->name() + "@transpose",
stage->origLayer(),
inputA,
inputATranspose,
- DimValues_<Dim>{{Dim::W, Dim::H}, {Dim::H, Dim::W}, {Dim::D, Dim::D}, {Dim::C, Dim::C}, {Dim::N, Dim::N}});
+ permMap);
}
}
IE_ASSERT(inputs.size() == 2 || inputs.size() == 3);
IE_ASSERT(outputs.size() == 1);
+ const auto input1 = inputs[0];
+ const auto input2 = inputs[1];
+
+ VPU_THROW_UNLESS(input1->desc().numDims() >= 2 && input1->desc().numDims() <= 4,
+ "Processing layer {} with type {} failed: first inputs' ({} with usage {}) dimensions number should be in range [2, 4], but it actually has {}",
+ _layer->name, _layer->type, input1->name(), input1->usage(), input1->desc().numDims());
+ VPU_THROW_UNLESS(input2->desc().numDims() >= 2 && input2->desc().numDims() <= 4,
+ "Processing layer {} with type {} failed: second inputs' ({} with usage {}) dimensions number should be in range [2, 4], but it actually has {}",
+ _layer->name, _layer->type, input2->name(), input2->usage(), input2->desc().numDims());
+ VPU_THROW_UNLESS(inputs.size() < 3 || inputs[2]->desc().numDims() >= 2 && inputs[2]->desc().numDims() <= 4,
+ "Processing layer {} with type {} failed: third inputs' ({} with usage {}) dimensions number should be in range [2, 4], but it actually has {}",
+ _layer->name, _layer->type, inputs[2]->name(), inputs[2]->usage(), inputs[2]->desc().numDims());
+
auto layer = std::dynamic_pointer_cast<ie::GemmLayer>(_layer);
IE_ASSERT(layer != nullptr);
InferenceEngine::Precision::FP32
};
-const std::vector<std::vector<size_t>> shapesA = {
- {1, 4, 5, 6}
-};
-
-const std::vector<std::vector<size_t>> shapesB = {
- {1, 4, 6, 4}
+const std::vector<ShapeRelatedParams> shapeRelatedParams = {
+ { { {1, 4, 5, 6}, false }, { {1, 4, 6, 4}, false } }
};
std::vector<ngraph::helpers::InputLayerType> secondaryInputTypes = {
ngraph::helpers::InputLayerType::PARAMETER,
};
+std::map<std::string, std::string> additional_config = {};
+
INSTANTIATE_TEST_CASE_P(smoke_MatMul, MatMulTest,
::testing::Combine(
+ ::testing::ValuesIn(shapeRelatedParams),
::testing::ValuesIn(inputPrecisions),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
::testing::Values(InferenceEngine::Layout::ANY),
- ::testing::ValuesIn(shapesA),
- ::testing::ValuesIn(shapesB),
- ::testing::Values(false),
- ::testing::Values(false),
::testing::ValuesIn(secondaryInputTypes),
- ::testing::Values(CommonTestUtils::DEVICE_CPU)),
+ ::testing::Values(CommonTestUtils::DEVICE_CPU),
+ ::testing::Values(additional_config)),
MatMulTest::getTestCaseName);
} // namespace
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "single_layer_tests/mat_mul.hpp"
+#include <vpu/private_plugin_config.hpp>
+
+using namespace LayerTestsDefinitions;
+
+namespace {
+
+typedef std::map<std::string, std::string> Config;
+
+const std::vector<InferenceEngine::Precision> inputPrecisions = {
+ InferenceEngine::Precision::FP32
+};
+
+const std::vector<ShapeRelatedParams> shapeRelatedParams = {
+ { { {1, 2, 7, 5}, true }, { {1, 2, 7, 11}, false } },
+ { { {10, 1, 1, 16}, false }, { {10, 1, 16, 1024}, false } },
+ { { {1, 5, 3}, true }, { {1, 5, 6}, false } },
+ { { {12, 8, 17}, false }, { {12, 17, 32}, false } },
+ { { {6, 128, 128}, false }, { {6, 128, 128}, false } },
+ { { {128, 384}, true }, { {128, 384}, false } },
+ { { {384, 128}, false }, { {372, 128}, true } },
+ { { {1, 2, 128, 384}, true }, { {1, 2, 128, 372}, false } },
+ { { {4, 3}, true }, { {5, 4}, true } },
+};
+
+Config additionalConfig = {
+ {InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH, CONFIG_VALUE(NO)}
+};
+
+INSTANTIATE_TEST_CASE_P(smoke_MatMul, MatMulTest,
+ ::testing::Combine(
+ ::testing::ValuesIn(shapeRelatedParams),
+ ::testing::ValuesIn(inputPrecisions),
+ ::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
+ ::testing::Values(InferenceEngine::Precision::UNSPECIFIED),
+ ::testing::Values(InferenceEngine::Layout::ANY),
+ ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER),
+ ::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
+ ::testing::Values(additionalConfig)),
+ MatMulTest::getTestCaseName);
+
+} // namespace
#include "functional_test_utils/layer_test_utils.hpp"
+struct ShapeRelatedParams {
+ std::pair<InferenceEngine::SizeVector, bool> input1, input2;
+};
+
typedef std::tuple<
- InferenceEngine::Precision,
- InferenceEngine::Precision, // Input precision
- InferenceEngine::Precision, // Output precision
- InferenceEngine::Layout, // Input layout
- InferenceEngine::SizeVector,
- InferenceEngine::SizeVector,
- bool,
- bool,
- ngraph::helpers::InputLayerType,
- LayerTestsUtils::TargetDevice
+ ShapeRelatedParams,
+ InferenceEngine::Precision, // Network precision
+ InferenceEngine::Precision, // Input precision
+ InferenceEngine::Precision, // Output precision
+ InferenceEngine::Layout, // Input layout
+ ngraph::helpers::InputLayerType, // Secondary input type
+ LayerTestsUtils::TargetDevice, // Device name
+ std::map<std::string, std::string> // Additional network configuration
> MatMulLayerTestParamsSet;
namespace LayerTestsDefinitions {
class MatMulTest : public testing::WithParamInterface<MatMulLayerTestParamsSet>, virtual public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(const testing::TestParamInfo<MatMulLayerTestParamsSet> &obj);
+ static std::vector<ShapeRelatedParams> combineShapes(const std::vector<std::vector<size_t>>& firstInputShapes,
+ const std::vector<std::vector<size_t>>& secondInputShapes,
+ bool transposeA,
+ bool transposeB);
protected:
void SetUp() override;
namespace LayerTestsDefinitions {
+std::vector<ShapeRelatedParams> MatMulTest::combineShapes(const std::vector<std::vector<size_t>>& firstInputShapes,
+ const std::vector<std::vector<size_t>>& secondInputShapes,
+ bool transposeA,
+ bool transposeB) {
+ std::vector<ShapeRelatedParams> resVec;
+ for (const auto& firstInputShape : firstInputShapes) {
+ for (const auto& secondInputShape : secondInputShapes) {
+ resVec.push_back(ShapeRelatedParams{ {firstInputShape, transposeA}, {secondInputShape, transposeB } });
+ }
+ }
+ return resVec;
+}
+
std::string MatMulTest::getTestCaseName(const testing::TestParamInfo<MatMulLayerTestParamsSet> &obj) {
InferenceEngine::Precision netPrecision;
InferenceEngine::Precision inPrc, outPrc;
InferenceEngine::Layout inLayout;
- InferenceEngine::SizeVector inputShape0;
- InferenceEngine::SizeVector inputShape1;
- bool transpose_a;
- bool transpose_b;
+ ShapeRelatedParams shapeRelatedParams;
ngraph::helpers::InputLayerType secondaryInputType;
std::string targetDevice;
- std::tie(netPrecision, inPrc, outPrc, inLayout, inputShape0, inputShape1, transpose_a, transpose_b, secondaryInputType, targetDevice) =
+ std::map<std::string, std::string> additionalConfig;
+ std::tie(shapeRelatedParams, netPrecision, inPrc, outPrc, inLayout, secondaryInputType, targetDevice, additionalConfig) =
obj.param;
std::ostringstream result;
- result << "IS0=" << CommonTestUtils::vec2str(inputShape0) << "_";
- result << "IS1=" << CommonTestUtils::vec2str(inputShape1) << "_";
- result << "transpose_a=" << transpose_a << "_";
- result << "transpose_b=" << transpose_b << "_";
+ result << "IS0=" << CommonTestUtils::vec2str(shapeRelatedParams.input1.first) << "_";
+ result << "IS1=" << CommonTestUtils::vec2str(shapeRelatedParams.input2.first) << "_";
+ result << "transpose_a=" << shapeRelatedParams.input1.second << "_";
+ result << "transpose_b=" << shapeRelatedParams.input2.second << "_";
result << "secondaryInputType=" << secondaryInputType << "_";
result << "netPRC=" << netPrecision.name() << "_";
result << "inPRC=" << inPrc.name() << "_";
result << "outPRC=" << outPrc.name() << "_";
result << "inL=" << inLayout << "_";
result << "trgDev=" << targetDevice;
+ result << "config=(";
+ for (const auto configEntry : additionalConfig) {
+ result << configEntry.first << ", " << configEntry.second << ":";
+ }
+ result << ")";
return result.str();
}
void MatMulTest::SetUp() {
- InferenceEngine::SizeVector inputShape0;
- InferenceEngine::SizeVector inputShape1;
- bool transpose_a;
- bool transpose_b;
+ ShapeRelatedParams shapeRelatedParams;
ngraph::helpers::InputLayerType secondaryInputType;
auto netPrecision = InferenceEngine::Precision::UNSPECIFIED;
- std::tie(netPrecision, inPrc, outPrc, inLayout, inputShape0, inputShape1, transpose_a, transpose_b, secondaryInputType, targetDevice) =
+ std::map<std::string, std::string> additionalConfig;
+ std::tie(shapeRelatedParams, netPrecision, inPrc, outPrc, inLayout, secondaryInputType, targetDevice, additionalConfig) =
this->GetParam();
+
+ configuration.insert(additionalConfig.begin(), additionalConfig.end());
+
auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
- auto params = ngraph::builder::makeParams(ngPrc, {inputShape0});
+ auto params = ngraph::builder::makeParams(ngPrc, {shapeRelatedParams.input1.first});
- auto secondaryInput = ngraph::builder::makeInputLayer(ngPrc, secondaryInputType, inputShape1);
+ auto secondaryInput = ngraph::builder::makeInputLayer(ngPrc, secondaryInputType, shapeRelatedParams.input2.first);
if (secondaryInputType == ngraph::helpers::InputLayerType::PARAMETER) {
params.push_back(std::dynamic_pointer_cast<ngraph::opset3::Parameter>(secondaryInput));
}
auto paramOuts = ngraph::helpers::convert2OutputVector(
ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(params));
auto MatMul = std::dynamic_pointer_cast<ngraph::opset3::MatMul>(
- ngraph::builder::makeMatMul(paramOuts[0], secondaryInput, transpose_a, transpose_b));
+ ngraph::builder::makeMatMul(paramOuts[0], secondaryInput, shapeRelatedParams.input1.second, shapeRelatedParams.input2.second));
ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(MatMul)};
function = std::make_shared<ngraph::Function>(results, params, "MatMul");
}