From 2e3378c50feb96df2bb8cb719bf0745705e35ad9 Mon Sep 17 00:00:00 2001 From: Anton Zaytsev Date: Wed, 8 Jul 2020 20:59:24 +0300 Subject: [PATCH] [IE TESTS] dynavic batch for mvn layer (#1010) * [ci-skip][IE TESTS] dynavic batch for mvn layer * update instance v0 * [ci-skip][IE TESTS] update instance for mvn layer * [ci-skip][IE TESTS] fix * [ci-skip][IE TESTS] add dynamic batch for singleLayer basic class * [ci-skip][IE TESTS] update dynamic batch for singleLayer basic class * [ci-skip][IE TESTS] removing bathFlag * [IE TESTS] removing bathSize * [IE TESTS] refactor dynamic batch in basic class * [IE TESTS] refactor dynamic batch in basic class --- .../src/mkldnn_plugin/mkldnn_exec_network.cpp | 5 +++-- .../src/mkldnn_plugin/nodes/mkldnn_mvn_node.cpp | 3 ++- .../shared_tests_instances/single_layer_tests/mvn.cpp | 18 +++++++++++++++++- .../shared_tests_instances/single_layer_tests/mvn.cpp | 8 +++++++- .../plugin/shared/include/single_layer_tests/mvn.hpp | 15 ++++++++------- .../plugin/shared/src/single_layer_tests/mvn.cpp | 13 +++++++++---- .../functional_test_utils/layer_test_utils.cpp | 17 ++++++++++++++--- .../functional_test_utils/layer_test_utils.hpp | 3 +-- 8 files changed, 61 insertions(+), 21 deletions(-) diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp index f6fee5a..a4779cb 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_exec_network.cpp @@ -102,7 +102,7 @@ MKLDNNExecNetwork::MKLDNNExecNetwork(const InferenceEngine::ICNNNetwork &network MKLDNNGraph::ApplyUnrollPasses(static_cast(*_clonedNetwork)); - if (_cfg.batchLimit > 1) { + if (_cfg.enableDynamicBatch) { // check topology for applicability if (!CanProcessDynBatch(*_clonedNetwork)) { THROW_IE_EXCEPTION << "MKLDNNGraph::CreateGraph: such topology cannot be compiled for dynamic batch!"; @@ -292,7 +292,8 @@ bool MKLDNNExecNetwork::CanProcessDynBatch(const InferenceEngine::ICNNNetwork &n type != Eltwise && type != Crop && type != BatchNormalization && - type != Copy) { + type != Copy && + type != MVN) { check_result = false; } }, false); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.cpp index 5cb9553..0566c23 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.cpp @@ -990,7 +990,8 @@ void MKLDNNMVNNode::mvn_blk(const in_data_t* src_data, out_data_t* dst_data, con std::vector mean_buffer(aux_buffer_size * threads_num); std::vector variance_buffer(aux_buffer_size * threads_num); - for (size_t b = 0lu; b < N; b++) { + int actual_N = batchToProcess(); + for (size_t b = 0lu; b < actual_N; b++) { size_t ccb = is_nhwc ? b * C2 : b * C3; if (across_channels) { // mean for this instance in batch diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/mvn.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/mvn.cpp index cd78920..30548fb 100644 --- a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/mvn.cpp +++ b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/mvn.cpp @@ -38,14 +38,30 @@ const std::vector epsilon = { 0.000000001 }; +const std::vector> Configs = { + {} +}; + const auto MvnCases = ::testing::Combine( ::testing::ValuesIn(inputShapes), ::testing::Values(InferenceEngine::Precision::FP32), ::testing::ValuesIn(acrossChannels), ::testing::ValuesIn(normalizeVariance), ::testing::ValuesIn(epsilon), - ::testing::Values(CommonTestUtils::DEVICE_CPU) + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::ValuesIn(Configs) ); INSTANTIATE_TEST_CASE_P(smoke_MKLDNN_TestsMVN, MvnLayerTest, MvnCases, MvnLayerTest::getTestCaseName); + +INSTANTIATE_TEST_CASE_P(smoke_MKLDNN_MvnLayerCheckDynBatch, MvnLayerTest, + ::testing::Combine( + ::testing::Values(std::vector({5, 8, 3, 5})), + ::testing::Values(InferenceEngine::Precision::FP32), + ::testing::ValuesIn(acrossChannels), + ::testing::ValuesIn(normalizeVariance), + ::testing::ValuesIn(epsilon), + ::testing::Values(CommonTestUtils::DEVICE_CPU), + ::testing::Values(std::map({{CONFIG_KEY(DYN_BATCH_ENABLED), CONFIG_VALUE(YES)}}))), + MvnLayerTest::getTestCaseName); \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/mvn.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/mvn.cpp index 8cbd56f..d338554 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/mvn.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/mvn.cpp @@ -38,13 +38,19 @@ const std::vector epsilon = { 0.000000001 }; + +const std::vector> Configs = { + {} +}; + const auto MvnCases = ::testing::Combine( ::testing::ValuesIn(inputShapes), ::testing::Values(InferenceEngine::Precision::FP32), ::testing::ValuesIn(acrossChannels), ::testing::ValuesIn(normalizeVariance), ::testing::ValuesIn(epsilon), - ::testing::Values(CommonTestUtils::DEVICE_GPU) + ::testing::Values(CommonTestUtils::DEVICE_GPU), + ::testing::ValuesIn(Configs) ); INSTANTIATE_TEST_CASE_P(smoke_CLDNN_TestsMVN, MvnLayerTest, MvnCases, MvnLayerTest::getTestCaseName); diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/mvn.hpp b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/mvn.hpp index 88bc54a..b147028 100644 --- a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/mvn.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/mvn.hpp @@ -13,17 +13,18 @@ namespace LayerTestsDefinitions { typedef std::tuple< - InferenceEngine::SizeVector, // Input shapes - InferenceEngine::Precision, // Input precision - bool, // Across channels - bool, // Normalize variance - double, // Epsilon - std::string> mvnParams; // Device name + InferenceEngine::SizeVector, // Input shapes + InferenceEngine::Precision, // Input precision + bool, // Across channels + bool, // Normalize variance + double, // Epsilon + std::string, // Device name + std::map // Config + > mvnParams; class MvnLayerTest : public testing::WithParamInterface, public LayerTestsUtils::LayerTestsCommon { public: static std::string getTestCaseName(testing::TestParamInfo obj); - protected: void SetUp() override; }; diff --git a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/mvn.cpp b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/mvn.cpp index e21fd77..23fab21 100644 --- a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/mvn.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/mvn.cpp @@ -27,7 +27,8 @@ std::string MvnLayerTest::getTestCaseName(testing::TestParamInfo obj) bool acrossChannels, normalizeVariance; double eps; std::string targetDevice; - std::tie(inputShapes, inputPrecision, acrossChannels, normalizeVariance, eps, targetDevice) = obj.param; + std::map configuration; + std::tie(inputShapes, inputPrecision, acrossChannels, normalizeVariance, eps, targetDevice, configuration) = obj.param; std::ostringstream result; result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_"; result << "Precision=" << inputPrecision.name() << "_"; @@ -35,6 +36,11 @@ std::string MvnLayerTest::getTestCaseName(testing::TestParamInfo obj) result << "NormalizeVariance=" << (normalizeVariance ? "TRUE" : "FALSE") << "_"; result << "Epsilon=" << eps << "_"; result << "TargetDevice=" << targetDevice; + if (!configuration.empty()) { + for (auto& configItem : configuration) { + result << "configItem=" << configItem.first << "_" << configItem.second << "_"; + } + } return result.str(); } @@ -43,7 +49,7 @@ void MvnLayerTest::SetUp() { InferenceEngine::Precision inputPrecision; bool acrossChanels, normalizeVariance; double eps; - std::tie(inputShapes, inputPrecision, acrossChanels, normalizeVariance, eps, targetDevice) = this->GetParam(); + std::tie(inputShapes, inputPrecision, acrossChanels, normalizeVariance, eps, targetDevice, configuration) = this->GetParam(); auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); auto param = ngraph::builder::makeParams(inType, {inputShapes}); auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes(param)); @@ -54,6 +60,5 @@ void MvnLayerTest::SetUp() { TEST_P(MvnLayerTest, CompareWithRefs) { Run(); -}; - +} } // namespace LayerTestsDefinitions \ No newline at end of file diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.cpp b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.cpp index 1990f93..f8731f5 100644 --- a/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.cpp +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.cpp @@ -39,7 +39,14 @@ void LayerTestsCommon::Compare(const std::vector &expected, const const auto actualBuffer = lockedMemory.as(); const auto &precision = actual->getTensorDesc().getPrecision(); - const auto &size = actual->size(); + auto bufferSize = actual->size(); + // With dynamic batch, you need to size + if (configuration.count(InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED)) { + auto batchSize = actual->getTensorDesc().getDims()[0]; + auto halfBatchSize = batchSize > 1 ? batchSize/ 2 : 1; + bufferSize = (actual->size() * halfBatchSize / batchSize); + } + const auto &size = bufferSize; switch (precision) { case InferenceEngine::Precision::FP32: Compare(reinterpret_cast(expectedBuffer), reinterpret_cast(actualBuffer), @@ -54,7 +61,7 @@ void LayerTestsCommon::Compare(const std::vector &expected, const } } -void LayerTestsCommon::ConfigurePlugin() const { +void LayerTestsCommon::ConfigurePlugin() { if (!configuration.empty()) { core->SetConfig(configuration, targetDevice); } @@ -92,11 +99,15 @@ void LayerTestsCommon::Infer() { for (const auto &input : cnnNetwork.getInputsInfo()) { const auto &info = input.second; - auto blob = GenerateInput(*info); inferRequest.SetBlob(info->name(), blob); inputs.push_back(blob); } + if (configuration.count(InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED) && + configuration.count(InferenceEngine::PluginConfigParams::YES)) { + auto batchSize = cnnNetwork.getInputsInfo().begin()->second->getTensorDesc().getDims()[0] / 2; + inferRequest.SetBatch(batchSize); + } inferRequest.Infer(); } diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.hpp b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.hpp index 7b06129..250d013 100644 --- a/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.hpp +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/layer_test_utils.hpp @@ -84,7 +84,7 @@ protected: return refMode; } - void ConfigurePlugin() const; + void ConfigurePlugin(); void LoadNetwork(); @@ -102,7 +102,6 @@ protected: std::vector inputs; float threshold; InferenceEngine::CNNNetwork cnnNetwork; - virtual void Validate(); virtual std::vector> CalculateRefs(); -- 2.7.4