From: Anton Dudchenko Date: Thu, 10 Sep 2020 11:49:19 +0000 (+0300) Subject: [IE][VPU]: Support ngraph::Function in VPU QueryNetwork (#1929) X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=c7633e7016331e5453c92799a8f52a44ce146502;p=platform%2Fupstream%2Fdldt.git [IE][VPU]: Support ngraph::Function in VPU QueryNetwork (#1929) * Support ngraph::Function in VPU QueryNetwork --- diff --git a/inference-engine/src/vpu/graph_transformer/include/vpu/frontend/frontend.hpp b/inference-engine/src/vpu/graph_transformer/include/vpu/frontend/frontend.hpp index 07c32ce..124eea7 100644 --- a/inference-engine/src/vpu/graph_transformer/include/vpu/frontend/frontend.hpp +++ b/inference-engine/src/vpu/graph_transformer/include/vpu/frontend/frontend.hpp @@ -181,6 +181,8 @@ public: // static CustomLayer::Ptr getSuitableCustomLayer(const std::vector& customLayers, const ie::CNNLayerPtr&cnnLayer); + static ie::ICNNNetwork::Ptr convertNetwork(ie::ICNNNetwork& network); + bool isLayerSupported(const std::string& type); private: Data getVpuData(const ie::DataPtr& ieData) const; diff --git a/inference-engine/src/vpu/graph_transformer/src/frontend/frontend.cpp b/inference-engine/src/vpu/graph_transformer/src/frontend/frontend.cpp index b1b2cda..0a2af54 100644 --- a/inference-engine/src/vpu/graph_transformer/src/frontend/frontend.cpp +++ b/inference-engine/src/vpu/graph_transformer/src/frontend/frontend.cpp @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -140,6 +141,47 @@ ModelPtr FrontEnd::buildInitialModel(ie::ICNNNetwork& network) { return runCommonPasses(network); } +bool FrontEnd::isLayerSupported(const std::string& type) { + return parsers.count(type) != 0; +} + +ie::ICNNNetwork::Ptr FrontEnd::convertNetwork(ie::ICNNNetwork& network) { + std::shared_ptr convertedNetwork; + // disable transformations for some cases + const auto transformationsPredicate = [](const std::shared_ptr &node) -> bool { + const bool casesWithDynamicOrStaticUsage = std::dynamic_pointer_cast(node) || + std::dynamic_pointer_cast(node); + + const bool casesWithOnlyDynamicUsage = (std::dynamic_pointer_cast(node) || + std::dynamic_pointer_cast(node)) && + std::dynamic_pointer_cast(node->input_value(0).get_node_shared_ptr()); + + return casesWithDynamicOrStaticUsage || casesWithOnlyDynamicUsage; + }; + + auto nGraphFunc = network.getFunction(); + // Disable shape inference (WA for generic operations) + ngraph::op::GenericIE::DisableReshape noReshape(nGraphFunc); + + ngraph::pass::Manager manager; + + manager.register_pass(); + manager.register_pass(); + manager.register_pass(); + manager.register_pass(); + manager.set_callback(transformationsPredicate); + manager.run_passes(nGraphFunc); + + ngraph::pass::Manager ti_manager; + ti_manager.register_pass(manager); + ti_manager.run_passes(nGraphFunc); + + vpu::MergeSubsequentDSROperations().run_on_function(nGraphFunc); + + convertedNetwork = InferenceEngine::details::convertFunctionToICNNNetwork(nGraphFunc, network); + return convertedNetwork; +} + std::set FrontEnd::checkSupportedLayers(ie::ICNNNetwork& network) { VPU_PROFILE(checkSupportedLayers); @@ -324,7 +366,6 @@ ModelPtr FrontEnd::runCommonPasses(ie::ICNNNetwork& network) { { defaultOnUnsupportedLayerCallback(model, layer, inputs, outputs, extraMessage); }); } - ModelPtr FrontEnd::runCommonPasses(ie::ICNNNetwork& network, const UnsupportedLayerCallback& unsupportedLayer, const SupportedLayerCallback& supportedLayer) { // NGraph -> CNN conversion may be called in 2 different moments: at // the beginning if conversion was forced by configuration or after detect @@ -383,48 +424,16 @@ ModelPtr FrontEnd::runCommonPasses(ie::ICNNNetwork& network, const UnsupportedLa env.log->trace("Update IE Network"); VPU_LOGGER_SECTION(env.log); - auto convertNetwork = [&convertedNetwork, &originalOrConvertNetwork]() { - // disable transformations for some cases - const auto transformationsPredicate = [](const std::shared_ptr &node) -> bool { - const bool casesWithDynamicOrStaticUsage = std::dynamic_pointer_cast(node) || - std::dynamic_pointer_cast(node); - - const bool casesWithOnlyDynamicUsage = (std::dynamic_pointer_cast(node) || - std::dynamic_pointer_cast(node)) && - std::dynamic_pointer_cast(node->input_value(0).get_node_shared_ptr()); - - return casesWithDynamicOrStaticUsage || casesWithOnlyDynamicUsage; - }; - - auto nGraphFunc = originalOrConvertNetwork->getFunction(); - // Disable shape inference (WA for generic operations) - ngraph::op::GenericIE::DisableReshape noReshape(nGraphFunc); - - ngraph::pass::Manager manager; - manager.register_pass(); - manager.register_pass(); - manager.register_pass(); - manager.set_callback(transformationsPredicate); - manager.run_passes(nGraphFunc); - - ngraph::pass::Manager ti_manager; - ti_manager.register_pass(manager); - ti_manager.run_passes(nGraphFunc); - - vpu::MergeSubsequentDSROperations().run_on_function(nGraphFunc); - - convertedNetwork = InferenceEngine::details::convertFunctionToICNNNetwork(nGraphFunc, *originalOrConvertNetwork); - originalOrConvertNetwork = convertedNetwork.get(); - }; - if (originalOrConvertNetwork->getFunction() && env.config.forceDeprecatedCnnConversion) { - convertNetwork(); + convertedNetwork = convertNetwork(*originalOrConvertNetwork); + originalOrConvertNetwork = convertedNetwork.get(); } detectNetworkBatch(*originalOrConvertNetwork, model); if (originalOrConvertNetwork->getFunction()) { - convertNetwork(); + convertedNetwork = convertNetwork(*originalOrConvertNetwork); + originalOrConvertNetwork = convertedNetwork.get(); } ie::NetPass::ConvertPrecision(*originalOrConvertNetwork, ie::Precision::I64, ie::Precision::I32); diff --git a/inference-engine/src/vpu/myriad_plugin/myriad_plugin.cpp b/inference-engine/src/vpu/myriad_plugin/myriad_plugin.cpp index 17d3b3c..8f64b5b 100644 --- a/inference-engine/src/vpu/myriad_plugin/myriad_plugin.cpp +++ b/inference-engine/src/vpu/myriad_plugin/myriad_plugin.cpp @@ -14,11 +14,15 @@ #include #include +#include #include #include #include #include +#include #include +#include +#include #include "vpu/ngraph/transformations/dynamic_to_static_shape.hpp" #include "vpu/ngraph/transformations/eliminate_shapeof_after_dsr.hpp" @@ -32,6 +36,7 @@ using namespace InferenceEngine::PluginConfigParams; using namespace InferenceEngine::VPUConfigParams; using namespace vpu::MyriadPlugin; + ExecutableNetworkInternal::Ptr Engine::LoadExeNetworkImpl( const ICNNNetwork& network, const std::map& config) { @@ -98,24 +103,160 @@ void Engine::QueryNetwork( VPU_THROW_UNLESS(!(std::find(deviceIDs.begin(), deviceIDs.end(), deviceName) == deviceIDs.end()), "Myriad device: {} not found.", deviceName); } - if (network.getFunction()) { - THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str << " ngraph::Function is not supported natively"; - } + if (auto function = network.getFunction()) { + std::unordered_set originalOps; + for (auto& node : function->get_ops()) { + originalOps.emplace(node->get_friendly_name()); + } - const auto log = std::make_shared( - "GraphCompiler", - parsedConfigCopy.logLevel(), - defaultOutput(parsedConfigCopy.compilerLogFilePath())); + auto clonedNetwork = cloneNetwork(network); + auto convertedNetwork = vpu::FrontEnd::convertNetwork(*clonedNetwork); + + std::unordered_set supported; + std::unordered_set unsupported; + + std::unordered_set splitNames; + std::unordered_set concatNames; + + ngraph::NodeVector splits; + ngraph::NodeVector concats; + + const auto isLayerSupported = [this, &splitNames, &concatNames, &concats, &splits](CNNNetworkIterator& layer) -> bool { + auto node = (*layer)->getNode(); + if (std::dynamic_pointer_cast(node) != nullptr) { + splitNames.emplace(node->get_friendly_name()); + splits.push_back(node); + return false; + } else if (std::dynamic_pointer_cast(node) != nullptr) { + concatNames.emplace(node->get_friendly_name()); + concats.push_back(node); + return false; + } else { + auto stageBuilder = std::make_shared(); + auto frontEnd = std::make_shared(stageBuilder, GetCore()); + return frontEnd->isLayerSupported((*layer)->type); + } + }; + + for (CNNNetworkIterator itLayer{convertedNetwork.get()}; + itLayer != CNNNetworkIterator(); + itLayer++) { + const auto fusedNode = (*itLayer)->getNode(); + if (fusedNode == nullptr) { + continue; + } + + for (auto& fusedLayerName : ngraph::getFusedNamesVector(fusedNode)) { + if (contains(originalOps, fusedLayerName)) { + if (isLayerSupported(itLayer)) { + supported.emplace(fusedLayerName); + } else { + unsupported.emplace(fusedLayerName); + } + } + } + } - const auto layerNames = getSupportedLayers( - network, - static_cast(parsedConfigCopy.platform()), - parsedConfigCopy.compileConfig(), - log, - GetCore()); + for (const auto& layerName : supported) { + if (contains(unsupported, layerName)) { + supported.erase(layerName); + } + } + + unsupported.clear(); + + std::function)> markParentSplitAsUnsupported = [&markParentSplitAsUnsupported, &supported, &splitNames] + (const std::shared_ptr& split) { + const auto inputs = split->inputs(); + for (const auto& input : inputs) { + const auto& parentName = input.get_source_output().get_node()->get_friendly_name(); + if (contains(supported, parentName) && + contains(splitNames, parentName)) { + markParentSplitAsUnsupported(input.get_source_output().get_node_shared_ptr()); + } + } + const auto& name = split->get_friendly_name(); + if (contains(supported, name)) { + supported.erase(name); + } + }; + + for (const auto& split : splits) { + // We will mark split as a supported only if all consumers is supported + bool is_supported = true; + const auto outputs = split->outputs(); + for (const auto& output : outputs) { + for (const auto& consumer : output.get_target_inputs()) { + const auto& name = consumer.get_node()->get_friendly_name(); + if (!contains(supported, name) && + !contains(concatNames, name) && + !contains(splitNames, name)) { + is_supported = false; + break; + } + } + } + if (is_supported) { + supported.emplace(split->get_friendly_name()); + } else { + // If Split is not supported and it's parent is also Split, mark parent as unsupported + markParentSplitAsUnsupported(split); + } + } - for (const auto& layerName : layerNames) { - res.supportedLayersMap.insert({ layerName, GetName() }); + for (const auto& concat : concats) { + // We will mark concat as a supported only if all parent layers is supported + bool is_supported = true; + const auto inputs = concat->inputs(); + for (const auto& input : inputs) { + const auto& name = input.get_source_output().get_node()->get_friendly_name(); + if (!contains(supported, name) && + !contains(concatNames, name)) { + is_supported = false; + break; + } + } + if (is_supported) { + supported.emplace(concat->get_friendly_name()); + } + } + + for (const auto& node : function->get_ops()) { + if (contains(supported, node->get_friendly_name())) { + for (const auto& inputNodeOutput : node->input_values()) { + if (ngraph::op::is_constant(inputNodeOutput.get_node()) || ngraph::op::is_parameter(inputNodeOutput.get_node())) { + supported.emplace(inputNodeOutput.get_node()->get_friendly_name()); + } + } + for (const auto& outputs : node->outputs()) { + for (const auto& outputNodeInput : outputs.get_target_inputs()) { + if (ngraph::op::is_output(outputNodeInput.get_node())) { + supported.emplace(outputNodeInput.get_node()->get_friendly_name()); + } + } + } + } + } + + for (const auto& layerName : supported) { + res.supportedLayersMap.emplace(layerName, GetName()); + } + } else { + const auto log = std::make_shared( + "GraphCompiler", + parsedConfigCopy.logLevel(), + defaultOutput(parsedConfigCopy.compilerLogFilePath())); + + const auto layerNames = getSupportedLayers( + network, + static_cast(parsedConfigCopy.platform()), + parsedConfigCopy.compileConfig(), + log, + GetCore()); + + for (const auto& layerName : layerNames) { + res.supportedLayersMap.insert({ layerName, GetName() }); + } } } diff --git a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/query_network.cpp b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/query_network.cpp new file mode 100644 index 0000000..ec66867 --- /dev/null +++ b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/query_network.cpp @@ -0,0 +1,24 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "hetero/query_network.hpp" +#include "ngraph_functions/builders.hpp" +#include "ngraph_functions/subgraph_builders.hpp" + +namespace { +using namespace HeteroTests; + +auto ConvBias = ngraph::builder::subgraph::makeConvBias(); +auto TIwithLSTMcell = ngraph::builder::subgraph::makeTIwithLSTMcell(); +auto SplitConvConcat = ngraph::builder::subgraph::makeNestedSplitConvConcat(); +auto BranchSplitConvConcat = ngraph::builder::subgraph::makeSplitConvConcatNestedInBranch(); + +INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, QueryNetworkTest, + ::testing::Combine( + ::testing::Values("MYRIAD", "HETERO:MYRIAD,CPU", "MULTI:MYRIAD,CPU"), + ::testing::Values(ConvBias, TIwithLSTMcell, SplitConvConcat, BranchSplitConvConcat)), + QueryNetworkTest::getTestCaseName); +} // namespace