[IE][VPU]: Support ngraph::Function in VPU QueryNetwork (#1929)
authorAnton Dudchenko <anton.dudchenko@intel.com>
Thu, 10 Sep 2020 11:49:19 +0000 (14:49 +0300)
committerGitHub <noreply@github.com>
Thu, 10 Sep 2020 11:49:19 +0000 (14:49 +0300)
* Support ngraph::Function in VPU QueryNetwork

inference-engine/src/vpu/graph_transformer/include/vpu/frontend/frontend.hpp
inference-engine/src/vpu/graph_transformer/src/frontend/frontend.cpp
inference-engine/src/vpu/myriad_plugin/myriad_plugin.cpp
inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/query_network.cpp [new file with mode: 0644]

index 07c32ce..124eea7 100644 (file)
@@ -181,6 +181,8 @@ public:
     //
 
     static CustomLayer::Ptr getSuitableCustomLayer(const std::vector<CustomLayer::Ptr>& customLayers, const ie::CNNLayerPtr&cnnLayer);
+    static ie::ICNNNetwork::Ptr convertNetwork(ie::ICNNNetwork& network);
+    bool isLayerSupported(const std::string& type);
 
 private:
     Data getVpuData(const ie::DataPtr& ieData) const;
index b1b2cda..0a2af54 100644 (file)
@@ -27,6 +27,7 @@
 #include <transformations/convert_opset3_to_opset2/convert_opset3_to_opset2.hpp>
 #include <transformations/convert_opset2_to_opset1/convert_opset2_to_opset1.hpp>
 #include <transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.hpp>
+#include <transformations/common_optimizations/common_optimizations.hpp>
 #include <vpu/ngraph/transformations/merge_subsequent_dsr_operations.hpp>
 #include <vpu/ngraph/operations/dynamic_shape_resolver.hpp>
 
@@ -140,6 +141,47 @@ ModelPtr FrontEnd::buildInitialModel(ie::ICNNNetwork& network) {
     return runCommonPasses(network);
 }
 
+bool FrontEnd::isLayerSupported(const std::string& type) {
+    return parsers.count(type) != 0;
+}
+
+ie::ICNNNetwork::Ptr FrontEnd::convertNetwork(ie::ICNNNetwork& network) {
+        std::shared_ptr<ie::ICNNNetwork> convertedNetwork;
+        // disable transformations for some cases
+        const auto transformationsPredicate = [](const std::shared_ptr<const ngraph::Node> &node) -> bool {
+            const bool casesWithDynamicOrStaticUsage = std::dynamic_pointer_cast<const ngraph::opset3::Gelu>(node) ||
+                                                        std::dynamic_pointer_cast<const ngraph::opset4::SoftPlus>(node);
+
+            const bool casesWithOnlyDynamicUsage = (std::dynamic_pointer_cast<const ngraph::opset3::MatMul>(node) ||
+                                                    std::dynamic_pointer_cast<const ngraph::opset3::StridedSlice>(node)) &&
+                    std::dynamic_pointer_cast<const ngraph::vpu::op::DynamicShapeResolver>(node->input_value(0).get_node_shared_ptr());
+
+            return casesWithDynamicOrStaticUsage || casesWithOnlyDynamicUsage;
+        };
+
+        auto nGraphFunc = network.getFunction();
+        // Disable shape inference (WA for generic operations)
+        ngraph::op::GenericIE::DisableReshape noReshape(nGraphFunc);
+
+        ngraph::pass::Manager manager;
+
+        manager.register_pass<ngraph::pass::CommonOptimizations>();
+        manager.register_pass<ngraph::pass::ConvertOpSet3ToOpSet2>();
+        manager.register_pass<ngraph::pass::ConvertOpSet2ToOpSet1>();
+        manager.register_pass<ngraph::pass::ConvertOpSet1ToLegacy>();
+        manager.set_callback(transformationsPredicate);
+        manager.run_passes(nGraphFunc);
+
+        ngraph::pass::Manager ti_manager;
+        ti_manager.register_pass<ngraph::pass::ApplyTransformationsToTIBody>(manager);
+        ti_manager.run_passes(nGraphFunc);
+
+        vpu::MergeSubsequentDSROperations().run_on_function(nGraphFunc);
+
+        convertedNetwork = InferenceEngine::details::convertFunctionToICNNNetwork(nGraphFunc, network);
+        return convertedNetwork;
+}
+
 std::set<std::string> FrontEnd::checkSupportedLayers(ie::ICNNNetwork& network) {
     VPU_PROFILE(checkSupportedLayers);
 
@@ -324,7 +366,6 @@ ModelPtr FrontEnd::runCommonPasses(ie::ICNNNetwork& network) {
         { defaultOnUnsupportedLayerCallback(model, layer, inputs, outputs, extraMessage); });
 }
 
-
 ModelPtr FrontEnd::runCommonPasses(ie::ICNNNetwork& network, const UnsupportedLayerCallback& unsupportedLayer, const SupportedLayerCallback& supportedLayer) {
     // NGraph -> CNN conversion may be called in 2 different moments: at
     // the beginning if conversion was forced by configuration or after detect
@@ -383,48 +424,16 @@ ModelPtr FrontEnd::runCommonPasses(ie::ICNNNetwork& network, const UnsupportedLa
         env.log->trace("Update IE Network");
         VPU_LOGGER_SECTION(env.log);
 
-        auto convertNetwork = [&convertedNetwork, &originalOrConvertNetwork]() {
-            // disable transformations for some cases
-            const auto transformationsPredicate = [](const std::shared_ptr<const ngraph::Node> &node) -> bool {
-                const bool casesWithDynamicOrStaticUsage = std::dynamic_pointer_cast<const ngraph::opset3::Gelu>(node) ||
-                                                           std::dynamic_pointer_cast<const ngraph::opset4::SoftPlus>(node);
-
-                const bool casesWithOnlyDynamicUsage = (std::dynamic_pointer_cast<const ngraph::opset3::MatMul>(node) ||
-                                                        std::dynamic_pointer_cast<const ngraph::opset3::StridedSlice>(node)) &&
-                        std::dynamic_pointer_cast<const ngraph::vpu::op::DynamicShapeResolver>(node->input_value(0).get_node_shared_ptr());
-
-                return casesWithDynamicOrStaticUsage || casesWithOnlyDynamicUsage;
-            };
-
-            auto nGraphFunc = originalOrConvertNetwork->getFunction();
-            // Disable shape inference (WA for generic operations)
-            ngraph::op::GenericIE::DisableReshape noReshape(nGraphFunc);
-
-            ngraph::pass::Manager manager;
-            manager.register_pass<ngraph::pass::ConvertOpSet3ToOpSet2>();
-            manager.register_pass<ngraph::pass::ConvertOpSet2ToOpSet1>();
-            manager.register_pass<ngraph::pass::ConvertOpSet1ToLegacy>();
-            manager.set_callback(transformationsPredicate);
-            manager.run_passes(nGraphFunc);
-
-            ngraph::pass::Manager ti_manager;
-            ti_manager.register_pass<ngraph::pass::ApplyTransformationsToTIBody>(manager);
-            ti_manager.run_passes(nGraphFunc);
-
-            vpu::MergeSubsequentDSROperations().run_on_function(nGraphFunc);
-
-            convertedNetwork = InferenceEngine::details::convertFunctionToICNNNetwork(nGraphFunc, *originalOrConvertNetwork);
-            originalOrConvertNetwork = convertedNetwork.get();
-        };
-
         if (originalOrConvertNetwork->getFunction() && env.config.forceDeprecatedCnnConversion) {
-            convertNetwork();
+            convertedNetwork = convertNetwork(*originalOrConvertNetwork);
+            originalOrConvertNetwork = convertedNetwork.get();
         }
 
         detectNetworkBatch(*originalOrConvertNetwork, model);
 
         if (originalOrConvertNetwork->getFunction()) {
-            convertNetwork();
+            convertedNetwork = convertNetwork(*originalOrConvertNetwork);
+            originalOrConvertNetwork = convertedNetwork.get();
         }
 
         ie::NetPass::ConvertPrecision(*originalOrConvertNetwork, ie::Precision::I64, ie::Precision::I32);
index 17d3b3c..8f64b5b 100644 (file)
 
 #include <vpu/vpu_plugin_config.hpp>
 #include <vpu/parsed_config.hpp>
+#include <vpu/frontend/frontend.hpp>
 #include <vpu/utils/profiling.hpp>
 #include <vpu/utils/error.hpp>
 #include <transformations/tensor_iterator_transformations/apply_transformations_to_ti_body.hpp>
 #include <transformations/common_optimizations/common_optimizations.hpp>
+#include <transformations/rt_info/fused_names_attribute.hpp>
 #include <vpu/ngraph/transformations/convert_nms_4_to_nms_dynamic.hpp>
+#include <ngraph/op/util/op_types.hpp>
+#include <ngraph/opsets/opset3.hpp>
 
 #include "vpu/ngraph/transformations/dynamic_to_static_shape.hpp"
 #include "vpu/ngraph/transformations/eliminate_shapeof_after_dsr.hpp"
@@ -32,6 +36,7 @@ using namespace InferenceEngine::PluginConfigParams;
 using namespace InferenceEngine::VPUConfigParams;
 using namespace vpu::MyriadPlugin;
 
+
 ExecutableNetworkInternal::Ptr Engine::LoadExeNetworkImpl(
         const ICNNNetwork& network,
         const std::map<std::string, std::string>& config) {
@@ -98,24 +103,160 @@ void Engine::QueryNetwork(
         VPU_THROW_UNLESS(!(std::find(deviceIDs.begin(), deviceIDs.end(), deviceName) == deviceIDs.end()), "Myriad device: {} not found.", deviceName);
     }
 
-    if (network.getFunction()) {
-        THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str << " ngraph::Function is not supported natively";
-    }
+    if (auto function = network.getFunction()) {
+        std::unordered_set<std::string> originalOps;
+        for (auto& node : function->get_ops()) {
+            originalOps.emplace(node->get_friendly_name());
+        }
 
-    const auto log = std::make_shared<Logger>(
-        "GraphCompiler",
-        parsedConfigCopy.logLevel(),
-        defaultOutput(parsedConfigCopy.compilerLogFilePath()));
+        auto clonedNetwork = cloneNetwork(network);
+        auto convertedNetwork = vpu::FrontEnd::convertNetwork(*clonedNetwork);
+
+        std::unordered_set<std::string> supported;
+        std::unordered_set<std::string> unsupported;
+
+        std::unordered_set<std::string> splitNames;
+        std::unordered_set<std::string> concatNames;
+
+        ngraph::NodeVector splits;
+        ngraph::NodeVector concats;
+
+        const auto isLayerSupported = [this, &splitNames, &concatNames, &concats, &splits](CNNNetworkIterator& layer) -> bool {
+                auto node = (*layer)->getNode();
+                if (std::dynamic_pointer_cast<const ::ngraph::opset3::Split>(node) != nullptr) {
+                    splitNames.emplace(node->get_friendly_name());
+                    splits.push_back(node);
+                    return false;
+                } else if (std::dynamic_pointer_cast<const ::ngraph::opset3::Concat>(node) != nullptr) {
+                    concatNames.emplace(node->get_friendly_name());
+                    concats.push_back(node);
+                    return false;
+                } else {
+                    auto stageBuilder = std::make_shared<StageBuilder>();
+                    auto frontEnd = std::make_shared<FrontEnd>(stageBuilder, GetCore());
+                    return frontEnd->isLayerSupported((*layer)->type);
+                }
+        };
+
+        for (CNNNetworkIterator itLayer{convertedNetwork.get()};
+             itLayer != CNNNetworkIterator();
+             itLayer++) {
+            const auto fusedNode = (*itLayer)->getNode();
+            if (fusedNode == nullptr) {
+                continue;
+            }
+
+            for (auto& fusedLayerName : ngraph::getFusedNamesVector(fusedNode)) {
+                if (contains(originalOps, fusedLayerName)) {
+                    if (isLayerSupported(itLayer)) {
+                        supported.emplace(fusedLayerName);
+                    } else {
+                        unsupported.emplace(fusedLayerName);
+                    }
+                }
+            }
+        }
 
-    const auto layerNames = getSupportedLayers(
-        network,
-        static_cast<Platform>(parsedConfigCopy.platform()),
-        parsedConfigCopy.compileConfig(),
-        log,
-        GetCore());
+        for (const auto& layerName : supported) {
+            if (contains(unsupported, layerName)) {
+                supported.erase(layerName);
+            }
+        }
+
+        unsupported.clear();
+
+        std::function<void(std::shared_ptr<ngraph::Node>)> markParentSplitAsUnsupported = [&markParentSplitAsUnsupported, &supported, &splitNames]
+                                                                                          (const std::shared_ptr<ngraph::Node>& split) {
+            const auto inputs = split->inputs();
+            for (const auto& input : inputs) {
+                const auto& parentName = input.get_source_output().get_node()->get_friendly_name();
+                if (contains(supported, parentName) &&
+                    contains(splitNames, parentName)) {
+                    markParentSplitAsUnsupported(input.get_source_output().get_node_shared_ptr());
+                }
+            }
+            const auto& name = split->get_friendly_name();
+            if (contains(supported, name)) {
+                supported.erase(name);
+            }
+        };
+
+        for (const auto& split : splits) {
+            // We will mark split as a supported only if all consumers is supported
+            bool is_supported = true;
+            const auto outputs = split->outputs();
+            for (const auto& output : outputs) {
+                for (const auto& consumer : output.get_target_inputs()) {
+                    const auto& name = consumer.get_node()->get_friendly_name();
+                    if (!contains(supported, name) &&
+                        !contains(concatNames, name) &&
+                        !contains(splitNames, name)) {
+                        is_supported = false;
+                        break;
+                    }
+                }
+            }
+            if (is_supported) {
+                supported.emplace(split->get_friendly_name());
+            } else {
+                // If Split is not supported and it's parent is also Split, mark parent as unsupported
+                markParentSplitAsUnsupported(split);
+            }
+        }
 
-    for (const auto& layerName : layerNames) {
-        res.supportedLayersMap.insert({ layerName, GetName() });
+        for (const auto& concat : concats) {
+            // We will mark concat as a supported only if all parent layers is supported
+            bool is_supported = true;
+            const auto inputs = concat->inputs();
+            for (const auto& input : inputs) {
+                const auto& name = input.get_source_output().get_node()->get_friendly_name();
+                if (!contains(supported, name) &&
+                    !contains(concatNames, name)) {
+                    is_supported = false;
+                    break;
+                }
+            }
+            if (is_supported) {
+                supported.emplace(concat->get_friendly_name());
+            }
+        }
+
+        for (const auto& node : function->get_ops()) {
+            if (contains(supported, node->get_friendly_name())) {
+                for (const auto& inputNodeOutput : node->input_values()) {
+                    if (ngraph::op::is_constant(inputNodeOutput.get_node()) || ngraph::op::is_parameter(inputNodeOutput.get_node())) {
+                        supported.emplace(inputNodeOutput.get_node()->get_friendly_name());
+                    }
+                }
+                for (const auto& outputs : node->outputs()) {
+                    for (const auto& outputNodeInput : outputs.get_target_inputs()) {
+                        if (ngraph::op::is_output(outputNodeInput.get_node())) {
+                            supported.emplace(outputNodeInput.get_node()->get_friendly_name());
+                        }
+                    }
+                }
+            }
+        }
+
+        for (const auto& layerName : supported) {
+            res.supportedLayersMap.emplace(layerName, GetName());
+        }
+    } else {
+        const auto log = std::make_shared<Logger>(
+            "GraphCompiler",
+            parsedConfigCopy.logLevel(),
+            defaultOutput(parsedConfigCopy.compilerLogFilePath()));
+
+        const auto layerNames = getSupportedLayers(
+            network,
+            static_cast<Platform>(parsedConfigCopy.platform()),
+            parsedConfigCopy.compileConfig(),
+            log,
+            GetCore());
+
+        for (const auto& layerName : layerNames) {
+            res.supportedLayersMap.insert({ layerName, GetName() });
+        }
     }
 }
 
diff --git a/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/query_network.cpp b/inference-engine/tests/functional/plugin/myriad/shared_tests_instances/behavior/query_network.cpp
new file mode 100644 (file)
index 0000000..ec66867
--- /dev/null
@@ -0,0 +1,24 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+
+#include "hetero/query_network.hpp"
+#include "ngraph_functions/builders.hpp"
+#include "ngraph_functions/subgraph_builders.hpp"
+
+namespace {
+using namespace HeteroTests;
+
+auto ConvBias = ngraph::builder::subgraph::makeConvBias();
+auto TIwithLSTMcell = ngraph::builder::subgraph::makeTIwithLSTMcell();
+auto SplitConvConcat = ngraph::builder::subgraph::makeNestedSplitConvConcat();
+auto BranchSplitConvConcat = ngraph::builder::subgraph::makeSplitConvConcatNestedInBranch();
+
+INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, QueryNetworkTest,
+                        ::testing::Combine(
+                                ::testing::Values("MYRIAD", "HETERO:MYRIAD,CPU", "MULTI:MYRIAD,CPU"),
+                                ::testing::Values(ConvBias, TIwithLSTMcell, SplitConvConcat, BranchSplitConvConcat)),
+                        QueryNetworkTest::getTestCaseName);
+}  // namespace