Execution graph via ngraph for CPU plugin (#510)
authorIlya Lavrenov <ilya.lavrenov@intel.com>
Wed, 17 Jun 2020 11:42:41 +0000 (14:42 +0300)
committerGitHub <noreply@github.com>
Wed, 17 Jun 2020 11:42:41 +0000 (14:42 +0300)
* Execution graph via ngraph for CPU plugin

* Fixes

* Migrated to VariantImpl instead of Parameter

* Reverted to dedicated ExecutionNode once again

* Re-use new execution graph in tests

* Fixed one more tests to use execution graph via ngraph::Function

15 files changed:
inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp
inference-engine/src/inference_engine/ie_rtti.cpp
inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp
inference-engine/src/legacy_api/src/network_serializer.cpp
inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp
inference-engine/src/mkldnn_plugin/mkldnn_graph.h
inference-engine/src/mkldnn_plugin/mkldnn_graph_dumper.cpp
inference-engine/src/mkldnn_plugin/mkldnn_graph_dumper.h
inference-engine/src/plugin_api/exec_graph_info.hpp
inference-engine/tests/functional/plugin/cpu/single_layer_tests/cpu_test_utils.hpp
inference-engine/tests/functional/plugin/shared/include/behavior/exec_graph_info.hpp
inference-engine/tests/functional/plugin/shared/src/execution_graph_tests/num_inputs_fusing_bin_conv.cpp
inference-engine/tests/functional/plugin/shared/src/execution_graph_tests/unique_node_names.cpp
inference-engine/tests/functional/plugin/shared/src/ngraph_conversion_tests/conv_bias_fusion.cpp
inference-engine/tests_deprecated/behavior/shared_tests/plugin_tests/behavior_test_plugin_infer_request_fixture.cpp

index 1619098..194a598 100644 (file)
@@ -28,6 +28,7 @@
 
 #include "ngraph_ops/eltwise.hpp"
 #include "graph_tools.hpp"
+#include "exec_graph_info.hpp"
 #include "graph_transformer.h"
 #include "ie_util_internal.hpp"
 #include "ie_ngraph_utils.hpp"
@@ -458,6 +459,29 @@ StatusCode CNNNetworkNGraphImpl::serialize(const std::string& xmlPath, const std
                                            ResponseDesc* resp) const noexcept {
     auto network = cnnNetwork;
     if (!network) {
+        // TODO: once Serialization::Serialize supports true IR v10
+        // remove this conversion and WA for execution graph
+        try {
+            bool isExecutionGraph = true;
+            for (const auto & op : _ngraph_function->get_ops()) {
+                auto & rtInfo = op->get_rt_info();
+                if (rtInfo.find(ExecGraphInfoSerialization::PERF_COUNTER) == rtInfo.end()) {
+                    isExecutionGraph = false;
+                    break;
+                }
+            }
+            if (isExecutionGraph) {
+                Serialization::Serialize(xmlPath, binPath, (InferenceEngine::ICNNNetwork&)*this);
+                return OK;
+            }
+        } catch (const InferenceEngineException& e) {
+            return DescriptionBuffer(GENERAL_ERROR, resp) << e.what();
+        } catch (const std::exception& e) {
+            return DescriptionBuffer(UNEXPECTED, resp) << e.what();
+        } catch (...) {
+            return DescriptionBuffer(UNEXPECTED, resp);
+        }
+
         auto graph = cloneFunction();
         // Disable shape inference (WA for generic operations)
         ::ngraph::op::GenericIE::DisableReshape noReshape(graph);
index 58c893c..8a9481d 100644 (file)
 #include <ie_parameter.hpp>
 #include <ie_iextension.h>
 #include <ie_extension.h>
+#include <exec_graph_info.hpp>
 
 #include <ngraph/opsets/opset.hpp>
 
 using namespace InferenceEngine;
 
 //
+// exec_graph_info.hpp
+//
+constexpr ngraph::NodeTypeInfo ExecGraphInfoSerialization::ExecutionNode::type_info;
+
+const ngraph::NodeTypeInfo&
+ExecGraphInfoSerialization::ExecutionNode::get_type_info() const {
+    return type_info;
+}
+
+//
 // ie_blob.h
 //
 
index 87ecec6..b34e599 100644 (file)
@@ -35,6 +35,7 @@
 #include "ngraph_ops/rnn_cell_ie.hpp"
 #include "ngraph_ops/topk_ie.hpp"
 #include "generic_ie.hpp"
+#include "exec_graph_info.hpp"
 
 #include "ie_profiling.hpp"
 #include "ie_cnn_layer_builder_ngraph.h"
@@ -732,7 +733,7 @@ std::shared_ptr<CNNNetworkImpl> convertFunctionToICNNNetwork(const std::shared_p
         // Set originalLayersNames from FusedNames
         std::string originalNames = ::ngraph::getFusedNames(layer);
         if (!originalNames.empty()) {
-            cnnLayer->params["originalLayersNames"] = originalNames;
+            cnnLayer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES] = originalNames;
         }
 
         std::string primitivesPriority = ::ngraph::getPrimitivesPriority(layer);
@@ -872,7 +873,11 @@ std::shared_ptr<CNNNetworkImpl> convertFunctionToICNNNetwork(const std::shared_p
                                    << " is not connected to any data";
             }
         }
-        layer->validateLayer();
+
+        // execution ngraph is fake graph and should not be validated
+        if (layer->params.count(ExecGraphInfoSerialization::PERF_COUNTER) == 0) {
+            layer->validateLayer();
+        }
     }
 
     if (!cnnNetworkImpl) THROW_IE_EXCEPTION << "Cannot convert nGraph function to CNNNetworkImpl!";
index 442dc5f..12ad778 100644 (file)
 #include <string>
 #include <vector>
 #include <unordered_set>
+#include <sstream>
 
 #include "details/caseless.hpp"
 #include "details/ie_cnn_network_tools.h"
 #include "exec_graph_info.hpp"
 #include "xml_parse_utils.h"
+#include "ie_ngraph_utils.hpp"
+#include <ngraph/variant.hpp>
 
 namespace InferenceEngine {
 namespace Serialization {
@@ -412,9 +415,125 @@ std::vector<CNNLayerPtr> TopologicalSort(const ICNNNetwork& network) {
     return ordered;
 }
 
+namespace {
+
+void FillXmlDocWithExecutionNGraph(const InferenceEngine::ICNNNetwork& network,
+                                   pugi::xml_document& doc) {
+    std::shared_ptr<const ngraph::Function> function = network.getFunction();
+    if (function == nullptr) {
+        THROW_IE_EXCEPTION << network.getName() << " does not represent ngraph::Function";
+    }
+
+    std::vector<std::shared_ptr<ngraph::Node>> ordered = function->get_ordered_ops();
+    pugi::xml_node netXml = doc.append_child("net");
+    netXml.append_attribute("name").set_value(network.getName().c_str());
+
+    pugi::xml_node layers = netXml.append_child("layers");
+    std::unordered_map<std::shared_ptr<ngraph::Node>, size_t> matching;
+
+    for (size_t i = 0; i < ordered.size(); ++i) {
+        matching[ordered[i]] = i;
+        const std::shared_ptr<ngraph::Node> node = ordered[i];
+        auto params = node->get_rt_info();
+
+        auto layerTypeVariant = params.find(ExecGraphInfoSerialization::LAYER_TYPE);
+        if (layerTypeVariant == params.end()) {
+            THROW_IE_EXCEPTION << node->get_friendly_name() << " does not define "
+                               << ExecGraphInfoSerialization::LAYER_TYPE << " attribute.";
+        }
+        using VariantString = ngraph::VariantImpl<std::string>;
+        auto layerTypeValueStr = std::dynamic_pointer_cast<VariantString>(layerTypeVariant->second);
+        IE_ASSERT(layerTypeValueStr != nullptr);
+        params.erase(layerTypeVariant);
+
+        pugi::xml_node layer = layers.append_child("layer");
+        layer.append_attribute("name").set_value(node->get_friendly_name().c_str());
+        layer.append_attribute("type").set_value(layerTypeValueStr->get().c_str());
+        layer.append_attribute("id").set_value(i);
+
+        if (!params.empty()) {
+            pugi::xml_node data = layer.append_child("data");
+
+            for (const auto& it : params) {
+                if (auto strValue = std::dynamic_pointer_cast<VariantString>(it.second))
+                    data.append_attribute(it.first.c_str()).set_value(strValue->get().c_str());
+            }
+        }
+
+        if (node->get_input_size() > 0) {
+            pugi::xml_node input = layer.append_child("input");
+
+            for (size_t iport = 0; iport < node->get_input_size(); iport++) {
+                const ngraph::Shape & dims = node->get_input_shape(iport);
+                pugi::xml_node port = input.append_child("port");
+
+                port.append_attribute("id").set_value(iport);
+                for (auto dim : dims) {
+                    port.append_child("dim").text().set(dim);
+                }
+            }
+        }
+        if (node->get_output_size() > 0 &&
+            // ngraph::op::Result still have single output while we should not print it
+            !std::dynamic_pointer_cast<ngraph::op::Result>(node)) {
+            pugi::xml_node output = layer.append_child("output");
+
+            for (size_t oport = 0; oport < node->get_output_size(); oport++) {
+                pugi::xml_node port = output.append_child("port");
+                Precision outputPrecision = details::convertPrecision(node->get_output_element_type(oport));
+
+                port.append_attribute("id").set_value(node->get_input_size() + oport);
+                port.append_attribute("precision").set_value(outputPrecision.name());
+
+                for (const auto dim : node->get_output_shape(oport)) {
+                    port.append_child("dim").text().set(dim);
+                }
+            }
+        }
+    }
+
+    pugi::xml_node edges = netXml.append_child("edges");
+
+    for (const auto& ord : ordered) {
+        const std::shared_ptr<ngraph::Node> parentNode = ord;
+
+        if (parentNode->get_output_size() > 0) {
+            auto itFrom = matching.find(parentNode);
+            if (itFrom == matching.end()) {
+                THROW_IE_EXCEPTION << "Internal error, cannot find " << parentNode->get_friendly_name()
+                                   << " in matching container during serialization of IR";
+            }
+            for (size_t oport = 0; oport < parentNode->get_output_size(); oport++) {
+                ngraph::Output<ngraph::Node> parentPort = parentNode->output(oport);
+                for (const auto& childPort : parentPort.get_target_inputs()) {
+                    ngraph::Node * childNode = childPort.get_node();
+                    for (int iport = 0; iport < childNode->get_input_size(); iport++) {
+                        if (childNode->input_value(iport).get_node() == parentPort.get_node()) {
+                            auto itTo = matching.find(childNode->shared_from_this());
+                            if (itTo == matching.end()) {
+                                THROW_IE_EXCEPTION << "Broken edge form layer "
+                                                   << parentNode->get_friendly_name() << " to layer "
+                                                   << childNode->get_friendly_name()
+                                                   << "during serialization of IR";
+                            }
+                            pugi::xml_node edge = edges.append_child("edge");
+                            edge.append_attribute("from-layer").set_value(itFrom->second);
+                            edge.append_attribute("from-port").set_value(oport + parentNode->get_input_size());
+
+                            edge.append_attribute("to-layer").set_value(itTo->second);
+                            edge.append_attribute("to-port").set_value(iport);
+                        }
+                    }
+                }
+            }
+        }
+    }
+}
+
+}  // namespace
 
 std::size_t FillXmlDoc(const InferenceEngine::ICNNNetwork& network, pugi::xml_document& doc,
-                                          const bool execGraphInfoSerialization, const bool dumpWeights) {
+                       const bool execGraphInfoSerialization, const bool dumpWeights) {
     const std::vector<CNNLayerPtr> ordered = TopologicalSort(network);
     pugi::xml_node netXml = doc.append_child("net");
     netXml.append_attribute("name").set_value(network.getName().c_str());
@@ -581,11 +700,35 @@ void SerializeBlobs(std::ostream& stream, const InferenceEngine::ICNNNetwork& ne
 }
 
 void Serialize(const std::string& xmlPath, const std::string& binPath,
-                                  const InferenceEngine::ICNNNetwork& network) {
-    const std::vector<CNNLayerPtr> ordered = TopologicalSort(network);
-
+               const InferenceEngine::ICNNNetwork& network) {
     // A flag for serializing executable graph information (not complete IR)
     bool execGraphInfoSerialization = false;
+    pugi::xml_document doc;
+
+    if (auto function = network.getFunction()) {
+        execGraphInfoSerialization = true;
+
+        // go over all operations and check whether performance stat is set
+        for (const auto & op : function->get_ops()) {
+            auto & rtInfo = op->get_rt_info();
+            if (rtInfo.find(ExecGraphInfoSerialization::PERF_COUNTER) == rtInfo.end()) {
+                execGraphInfoSerialization = false;
+                break;
+            }
+        }
+
+        if (execGraphInfoSerialization) {
+            FillXmlDocWithExecutionNGraph(network, doc);
+
+            if (!doc.save_file(xmlPath.c_str())) {
+                THROW_IE_EXCEPTION << "file '" << xmlPath << "' was not serialized";
+            }
+
+            return;
+        }
+    }
+
+    const std::vector<CNNLayerPtr> ordered = TopologicalSort(network);
     // If first layer has perfCounter parameter set then it's executable graph info serialization.
     // All other layers must also have this parameter set.
     if (ordered[0]->params.find(ExecGraphInfoSerialization::PERF_COUNTER) != ordered[0]->params.end()) {
@@ -599,8 +742,6 @@ void Serialize(const std::string& xmlPath, const std::string& binPath,
     }
 
     bool dumpWeights = !execGraphInfoSerialization & !binPath.empty();
-
-    pugi::xml_document doc;
     FillXmlDoc(network, doc, execGraphInfoSerialization, dumpWeights);
 
     if (!doc.save_file(xmlPath.c_str())) {
index 9774692..7218645 100644 (file)
@@ -1199,5 +1199,5 @@ void MKLDNNGraph::do_after(const std::string &dir, const MKLDNNNodePtr &node) {
 }
 
 InferenceEngine::ICNNNetwork::Ptr MKLDNNGraph::dump() const {
-    return dump_graph_as_ie_net(*this);
+    return dump_graph_as_ie_ngraph_net(*this);
 }
index 03a504a..4cacba4 100644 (file)
@@ -142,6 +142,7 @@ protected:
     friend class MKLDNNInferRequest;
     friend class MKLDNNGraphlessInferRequest;
     friend std::shared_ptr<InferenceEngine::ICNNNetwork> dump_graph_as_ie_net(const MKLDNNGraph &graph);
+    friend std::shared_ptr<InferenceEngine::ICNNNetwork> dump_graph_as_ie_ngraph_net(const MKLDNNGraph &graph);
 
 private:
     void dumpToDotFile(std::string file) const;
index 1678ee5..7cba1ba 100644 (file)
@@ -5,8 +5,11 @@
 #include "mkldnn_graph_dumper.h"
 #include "cnn_network_impl.hpp"
 #include "ie_util_internal.hpp"
+#include "ie_ngraph_utils.hpp"
 #include "exec_graph_info.hpp"
 #include "mkldnn_debug.h"
+#include "generic_ie.hpp"
+#include <ngraph/variant.hpp>
 
 #include <vector>
 #include <string>
@@ -17,12 +20,19 @@ using namespace InferenceEngine;
 
 namespace MKLDNNPlugin {
 
-static void copy_node_metadata(const MKLDNNNodePtr &, CNNLayer::Ptr &);
-static void drawer_callback(const InferenceEngine::CNNLayerPtr, ordered_properties &, ordered_properties &);
+namespace {
 
-CNNLayer::Ptr convert_node(const MKLDNNNodePtr &node) {
-    CNNLayer::Ptr layer(new CNNLayer({"name", "type", Precision::FP32}));
-    copy_node_metadata(node, layer);
+std::map<std::string, std::string> extract_node_metadata(const MKLDNNNodePtr &);
+void drawer_callback(const InferenceEngine::CNNLayerPtr, ordered_properties &, ordered_properties &);
+
+}  // namespace
+
+CNNLayer::Ptr create_cnnlayer(const MKLDNNNodePtr &node) {
+    CNNLayer::Ptr layer(new CNNLayer({node->getName(), "type", Precision::FP32}));
+
+    layer->params = extract_node_metadata(node);
+    layer->type = layer->params[ExecGraphInfoSerialization::LAYER_TYPE];
+    layer->params.erase(ExecGraphInfoSerialization::LAYER_TYPE);
 
     auto &cfg = node->getSelectedPrimitiveDescriptor()->getConfig();
     layer->insData.resize(cfg.inConfs.size());
@@ -31,6 +41,91 @@ CNNLayer::Ptr convert_node(const MKLDNNNodePtr &node) {
     return layer;
 }
 
+std::shared_ptr<ICNNNetwork> dump_graph_as_ie_ngraph_net(const MKLDNNGraph &graph) {
+    std::map<MKLDNNNodePtr, std::shared_ptr<ngraph::Node> > node2layer;
+
+    ngraph::ResultVector results;
+    ngraph::ParameterVector params;
+
+    auto get_inputs = [&] (const MKLDNNNodePtr & node) {
+        auto pr_edges = node->getParentEdges();
+        ngraph::OutputVector inputs(pr_edges.size());
+
+        for (int i = 0; i < pr_edges.size(); i++) {
+            auto edge = node->getParentEdgeAt(i);
+            int pr_port = edge->getInputNum();
+            int ch_port = edge->getOutputNum();
+            auto pr_node = edge->getParent();
+
+            IE_ASSERT(node2layer.count(pr_node) == 1);
+            auto pr = node2layer[pr_node];
+
+            inputs[ch_port] = pr->output(pr_port);
+        }
+
+        return inputs;
+    };
+
+    auto create_ngraph_node = [&](const MKLDNNNodePtr &node) {
+        bool is_input = false, is_output = false;
+        for (auto && kvp : graph.inputNodes) {
+            if (kvp.second == node) {
+                is_input = true;
+                break;
+            }
+        }
+
+        for (auto && onode : graph.outputNodes) {
+            if (onode == node) {
+                is_output = true;
+                break;
+            }
+        }
+
+        auto meta_data = extract_node_metadata(node);
+        std::shared_ptr<ngraph::Node> return_node;
+        if (is_input) {
+            auto desc = node->getChildEdgeAt(0)->getDesc();
+            auto param = std::make_shared<ngraph::op::Parameter>(
+                details::convertPrecision(desc.getPrecision()),
+                ngraph::PartialShape(desc.getDims()));
+            return_node = param;
+            params.push_back(param);
+        } else if (is_output) {
+            results.emplace_back(std::make_shared<ngraph::op::Result>(get_inputs(node).back()));
+            return_node = results.back();
+        } else {
+            return_node = std::make_shared<ExecGraphInfoSerialization::ExecutionNode>(
+                get_inputs(node), node->getSelectedPrimitiveDescriptor()->getConfig().outConfs.size());
+
+            for (size_t port = 0; port < return_node->get_output_size(); ++port) {
+                auto desc = node->getChildEdgeAt(port)->getDesc();
+                return_node->set_output_type(port,
+                    details::convertPrecision(desc.getPrecision()),
+                    ngraph::PartialShape(desc.getDims()));
+            }
+        }
+
+        for (auto && kvp : meta_data)
+            return_node->get_rt_info()[kvp.first] = std::make_shared<::ngraph::VariantWrapper<std::string>>(kvp.second);
+        return_node->set_friendly_name(node->getName());
+
+        return return_node;
+    };
+
+    ngraph::NodeVector nodes;
+    nodes.reserve(graph.graphNodes.size());
+    for (auto &node : graph.graphNodes) {  // important: graph.graphNodes are in topological order
+        nodes.emplace_back(create_ngraph_node(node));
+        node2layer[node] = nodes.back();
+    }
+
+    ngraph::op::GenericIE::DisableReshape reshape(nodes);
+    auto function = std::make_shared<ngraph::Function>(results, params, graph._name);
+    InferenceEngine::CNNNetwork net(function);
+    return net;
+}
+
 std::shared_ptr<ICNNNetwork> dump_graph_as_ie_net(const MKLDNNGraph &graph) {
     auto net = std::make_shared<details::CNNNetworkImpl>();
 
@@ -40,7 +135,7 @@ std::shared_ptr<ICNNNetwork> dump_graph_as_ie_net(const MKLDNNGraph &graph) {
 
     // Copy all nodes to network
     for (auto &node : graph.graphNodes) {
-        auto layer = convert_node(node);
+        auto layer = create_cnnlayer(node);
         node2layer[node] = layer;
         net->addLayer(layer);
     }
@@ -95,26 +190,26 @@ void dump_graph_as_dot(const MKLDNNGraph &graph, std::ostream &out) {
 // Special converters of meta data
 //**********************************
 
-static const char BLUE[]  = "#D8D9F1";
-static const char GREEN[] = "#D9EAD3";
+namespace {
+
+std::map<std::string, std::string> extract_node_metadata(const MKLDNNNodePtr &node) {
+    std::map<std::string, std::string> serialization_info;
 
-void copy_node_metadata(const MKLDNNNodePtr &node, CNNLayer::Ptr &layer) {
     if (node->getType() == Input && node->isConstant()) {
         // We need to separate Input and Const layers
-        layer->type = "Const";
+        serialization_info[ExecGraphInfoSerialization::LAYER_TYPE] = "Const";
     } else if (node->getType() == Generic) {
         // Path to print actual name for extension layers
-        layer->type = node->getTypeStr();
+        serialization_info[ExecGraphInfoSerialization::LAYER_TYPE] = node->getTypeStr();
     } else {
-        layer->type = NameFromType(node->getType());
+        serialization_info[ExecGraphInfoSerialization::LAYER_TYPE] = NameFromType(node->getType());
     }
-    layer->name = node->getName();
 
     // Original layers
-    layer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES] = node->getOriginalLayers();
+    serialization_info[ExecGraphInfoSerialization::ORIGINAL_NAMES] = node->getOriginalLayers();
 
     // Implementation type name
-    layer->params[ExecGraphInfoSerialization::IMPL_TYPE] = node->getPrimitiveDescriptorType();
+    serialization_info[ExecGraphInfoSerialization::IMPL_TYPE] = node->getPrimitiveDescriptorType();
 
     std::string outputPrecisionsStr;
     if (!node->getChildEdges().empty()) {
@@ -139,7 +234,7 @@ void copy_node_metadata(const MKLDNNNodePtr &node, CNNLayer::Ptr &layer) {
             outputPrecisionsStr = node->getParentEdgeAt(0)->getDesc().getPrecision().name();
         }
     }
-    layer->params[ExecGraphInfoSerialization::OUTPUT_PRECISIONS] = outputPrecisionsStr;
+    serialization_info[ExecGraphInfoSerialization::OUTPUT_PRECISIONS] = outputPrecisionsStr;
 
     std::string outputLayoutsStr;
     auto outLayouts = node->getSelectedPrimitiveDescriptor()->getOutputLayouts();
@@ -162,21 +257,26 @@ void copy_node_metadata(const MKLDNNNodePtr &node, CNNLayer::Ptr &layer) {
     } else {
         outputLayoutsStr = mkldnn_fmt2str(mkldnn_format_undef);
     }
-    layer->params[ExecGraphInfoSerialization::OUTPUT_LAYOUTS] = outputLayoutsStr;
+    serialization_info[ExecGraphInfoSerialization::OUTPUT_LAYOUTS] = outputLayoutsStr;
 
     // Performance
     if (node->PerfCounter().avg() != 0) {
-        layer->params[ExecGraphInfoSerialization::PERF_COUNTER] = std::to_string(node->PerfCounter().avg());
+        serialization_info[ExecGraphInfoSerialization::PERF_COUNTER] = std::to_string(node->PerfCounter().avg());
     } else {
-        layer->params[ExecGraphInfoSerialization::PERF_COUNTER] = "not_executed";  // it means it was not calculated yet
+        serialization_info[ExecGraphInfoSerialization::PERF_COUNTER] = "not_executed";  // it means it was not calculated yet
     }
 
-    layer->params[ExecGraphInfoSerialization::EXECUTION_ORDER] = std::to_string(node->getExecIndex());
+    serialization_info[ExecGraphInfoSerialization::EXECUTION_ORDER] = std::to_string(node->getExecIndex());
+
+    return serialization_info;
 }
 
+const char BLUE[]  = "#D8D9F1";
+const char GREEN[] = "#D9EAD3";
+
 void drawer_callback(const InferenceEngine::CNNLayerPtr layer,
-        ordered_properties &printed_properties,
-        ordered_properties &node_properties) {
+                     ordered_properties &printed_properties,
+                     ordered_properties &node_properties) {
     const auto &params = layer->params;
 
     // Implementation
@@ -204,4 +304,6 @@ void drawer_callback(const InferenceEngine::CNNLayerPtr layer,
     node_properties.push_back({"xlabel", (perf != layer->params.end()) ? perf->second : ""});
 }
 
+}  // namespace
+
 }  // namespace MKLDNNPlugin
index 4fb9b34..f0901a4 100644 (file)
@@ -11,8 +11,9 @@
 
 namespace MKLDNNPlugin {
 
-    void dump_graph_as_dot(const MKLDNNGraph &graph, std::ostream &out);
+void dump_graph_as_dot(const MKLDNNGraph &graph, std::ostream &out);
 
-    std::shared_ptr<InferenceEngine::ICNNNetwork> dump_graph_as_ie_net(const MKLDNNGraph &graph);
+std::shared_ptr<InferenceEngine::ICNNNetwork> dump_graph_as_ie_net(const MKLDNNGraph &graph);
+std::shared_ptr<InferenceEngine::ICNNNetwork> dump_graph_as_ie_ngraph_net(const MKLDNNGraph &graph);
 
 }  // namespace MKLDNNPlugin
index dee9e6a..6c22c60 100644 (file)
 
 #pragma once
 
+#include <ie_api.h>
+#include <ie_parameter.hpp>
 #include <string>
 
+#include <ngraph/node.hpp>
+#include <ngraph/function.hpp>
+
 /**
  * @brief A namespace with const values for Execution Graph parameters names.
  *  
 namespace ExecGraphInfoSerialization {
 
 /**
- * @brief A general key for CNNLayer::params map. Used to get a string of layer names separated by a comma
+ * @brief Used to get a string of layer names separated by a comma
  *        from the original IR, which were fused/merged to the current executable primitive.
  */
 static const char ORIGINAL_NAMES[] = "originalLayersNames";
 
 /**
- * @brief A general key for CNNLayer::params map. Used to get a type of the executable primitive.
+ * @brief Used to get a type of the executable primitive.
  */
 static const char IMPL_TYPE[] = "primitiveType";
 
 /**
- * @brief A general key for CNNLayer::params map. Used to get output precisions of the executable primitive.
+ * @brief Used to get output precisions of the executable primitive.
  */
 static const char OUTPUT_PRECISIONS[] = "outputPrecisions";
 
 /**
- * @brief A general key for CNNLayer::params map. Used to get value of execution time of the executable primitive.
+ * @brief Used to get a value of execution time of the executable primitive.
  */
 static const char PERF_COUNTER[] = "execTimeMcs";
 
 /**
- * @brief A general key for CNNLayer::params map. Used to get output layouts of primitive.
+ * @brief Used to get output layouts of primitive.
  */
 static const char OUTPUT_LAYOUTS[] = "outputLayouts";
 
 /**
- * @brief A general key for CNNLayer::params map. Used to get an execution order of primitive.
+ * @brief Used to get an execution order of primitive.
  */
 static const char EXECUTION_ORDER[] = "execOrder";
 
+/**
+ * @brief Used to get a type of primitive.
+ */
+static const char LAYER_TYPE[] = "layerType";
+
+class INFERENCE_ENGINE_API_CLASS(ExecutionNode) : public ngraph::Node {
+public:
+    static constexpr ngraph::NodeTypeInfo type_info { "ExecutionNode", 0 };
+    const ngraph::NodeTypeInfo& get_type_info() const override;
+
+    ExecutionNode() = default;
+
+    ExecutionNode(const ngraph::OutputVector& arguments, size_t output_size = 1) :
+        Node(arguments, output_size) { }
+
+    std::shared_ptr<ngraph::Node> clone_with_new_inputs(const ngraph::OutputVector& inputs) const override {
+        auto cloned = std::make_shared<ExecutionNode>();
+
+        cloned->set_arguments(inputs);
+
+        for (auto kvp : get_rt_info())
+            cloned->get_rt_info()[kvp.first] = kvp.second;
+
+        for (size_t i = 0; i < get_output_size(); ++i)
+            cloned->set_output_type(i, get_output_element_type(i), get_output_partial_shape(i));
+
+        return cloned;
+    }
+};
+
 }  // namespace ExecGraphInfoSerialization
index de6841b..72a63c9 100644 (file)
@@ -9,6 +9,10 @@
 #include "network_serializer.h"
 #include "ie_system_conf.h"
 
+#include <ngraph/function.hpp>
+#include <ngraph/variant.hpp>
+#include <exec_graph_info.hpp>
+
 namespace CPUTestUtils {
 
 typedef enum {
@@ -71,31 +75,51 @@ IE_SUPPRESS_DEPRECATED_START
 void inline CheckCPUImpl(InferenceEngine::ExecutableNetwork &execNet, std::string nodeType, std::vector<cpu_memory_format_t> inputMemoryFormats,
                          std::vector<cpu_memory_format_t> outputMemoryFormats, std::string selectedType) {
     InferenceEngine::CNNNetwork execGraphInfo = execNet.GetExecGraphInfo();
-    auto nodes = InferenceEngine::Serialization::TopologicalSort(execGraphInfo);
-    for (auto &node : nodes) {
-        if (node->type == nodeType) {
-            ASSERT_LE(inputMemoryFormats.size(), node->insData.size());
-            ASSERT_LE(outputMemoryFormats.size(), node->outData.size());
+    auto function = execGraphInfo.getFunction();
+    ASSERT_NE(nullptr, function);
+
+    for (const auto &node : function->get_ops()) {
+        const auto & rtInfo = node->get_rt_info();
+        auto getExecValue = [&rtInfo](const std::string & paramName) -> std::string {
+            auto it = rtInfo.find(paramName);
+            IE_ASSERT(rtInfo.end() != it);
+            auto value = std::dynamic_pointer_cast<ngraph::VariantImpl<std::string>>(it->second);
+            IE_ASSERT(nullptr != value);
+
+            return value->get();
+        };
+
+        auto getExecValueOutputsLayout = [] (std::shared_ptr<ngraph::Node> node) -> std::string {
+            auto rtInfo = node->get_rt_info();
+            auto it = rtInfo.find(ExecGraphInfoSerialization::OUTPUT_LAYOUTS);
+            IE_ASSERT(rtInfo.end() != it);
+            auto value = std::dynamic_pointer_cast<ngraph::VariantImpl<std::string>>(it->second);
+            IE_ASSERT(nullptr != value);
+
+            return value->get();
+        };
+
+        if (getExecValue(ExecGraphInfoSerialization::LAYER_TYPE) == nodeType) {
+            ASSERT_LE(inputMemoryFormats.size(), node->get_input_size());
+            ASSERT_LE(outputMemoryFormats.size(), node->get_output_size());
             for (int i = 0; i < inputMemoryFormats.size(); i++) {
-                for (auto &parentNode : nodes) {
-                    for (int j = 0; j < parentNode->outData.size(); j++) {
-                        if (parentNode->outData[j]->getName() == node->insData[i].lock()->getName()) {
-                            auto actualInputMemoryFormat = parentNode->params.find("outputLayouts");
-                            ASSERT_NE(actualInputMemoryFormat, parentNode->params.end());
-                            ASSERT_EQ(inputMemoryFormats[i], cpu_str2fmt(actualInputMemoryFormat->second.c_str()));
+                for (const auto & parentPort : node->input_values()) {
+                    for (const auto & port : node->inputs()) {
+                        if (port.get_tensor_ptr() == parentPort.get_tensor_ptr()) {
+                            auto parentNode = parentPort.get_node_shared_ptr();
+                            auto actualInputMemoryFormat = getExecValueOutputsLayout(parentNode);
+                            ASSERT_EQ(inputMemoryFormats[i], cpu_str2fmt(actualInputMemoryFormat.c_str()));
                         }
                     }
                 }
             }
             for (int i = 0; i < outputMemoryFormats.size(); i++) {
-                auto actualOutputMemoryFormat = node->params.find("outputLayouts");
-                ASSERT_NE(actualOutputMemoryFormat, node->params.end());
-                ASSERT_EQ(outputMemoryFormats[i], cpu_str2fmt(actualOutputMemoryFormat->second.c_str()));
+                auto actualOutputMemoryFormat = getExecValue(ExecGraphInfoSerialization::OUTPUT_LAYOUTS);
+                ASSERT_EQ(outputMemoryFormats[i], cpu_str2fmt(actualOutputMemoryFormat.c_str()));
             }
 
-            auto primType = node->params.find("primitiveType");
-            ASSERT_NE(primType, node->params.end());
-            ASSERT_EQ(selectedType, primType->second);
+            auto primType = getExecValue(ExecGraphInfoSerialization::IMPL_TYPE);
+            ASSERT_EQ(selectedType, primType);
         }
     }
 }
index db102b5..7d377f4 100644 (file)
@@ -3,6 +3,7 @@
 //
 
 #include <memory>
+#include <ngraph/variant.hpp>
 #include "ie_extension.h"
 #include <condition_variable>
 #include "functional_test_utils/layer_test_utils.hpp"
@@ -55,26 +56,57 @@ TEST_P(ExecGraphTests, CheckExecGraphInfoBeforeExecution) {
             originalLayersMap[layer->get_friendly_name()] = 0;
         }
         int IteratorForLayersConstant = 0;
-        // Store all the layers from the executable graph information represented as CNNNetwork
-        IE_SUPPRESS_DEPRECATED_START
-        const std::vector<InferenceEngine::CNNLayerPtr> execGraphLayers =
-                InferenceEngine::details::CNNNetSortTopologically(execGraph);
-        for (const auto &execLayer : execGraphLayers) {
-            // Each layer from the execGraphInfo network must have PM data option set
-            ASSERT_EQ("not_executed", execLayer->params[ExecGraphInfoSerialization::PERF_COUNTER]);
-            // Parse origin layer names (fused/merged layers) from the executable graph
-            // and compare with layers from the original model
-            auto origFromExecLayer = execLayer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES];
-            if (origFromExecLayer == "")
-                IteratorForLayersConstant++;
-            std::vector<std::string> origFromExecLayerSep = separateStrToVec(origFromExecLayer, ',');
-            std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) {
-                auto origLayer = originalLayersMap.find(layer);
-                ASSERT_NE(originalLayersMap.end(), origLayer) << layer;
-                origLayer->second++;
-            });
+
+        if (auto function = execGraph.getFunction()) {
+            for (const auto & op : function->get_ops()) {
+                const auto & rtInfo = op->get_rt_info();
+
+                auto getExecValue = [&rtInfo](const std::string & paramName) -> std::string {
+                    auto it = rtInfo.find(paramName);
+                    IE_ASSERT(rtInfo.end() != it);
+                    auto value = std::dynamic_pointer_cast<ngraph::VariantImpl<std::string>>(it->second);
+                    IE_ASSERT(nullptr != value);
+
+                    return value->get();
+                };
+
+                // Each layer from the execGraphInfo network must have PM data option set
+                ASSERT_EQ("not_executed", getExecValue(ExecGraphInfoSerialization::PERF_COUNTER));
+                // Parse origin layer names (fused/merged layers) from the executable graph
+                // and compare with layers from the original model
+                auto origFromExecLayer = getExecValue(ExecGraphInfoSerialization::ORIGINAL_NAMES);
+                if (origFromExecLayer == "")
+                    IteratorForLayersConstant++;
+                std::vector<std::string> origFromExecLayerSep = separateStrToVec(origFromExecLayer, ',');
+                std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) {
+                    auto origLayer = originalLayersMap.find(layer);
+                    ASSERT_NE(originalLayersMap.end(), origLayer) << layer;
+                    origLayer->second++;
+                });
+            }
+        } else {
+            IE_SUPPRESS_DEPRECATED_START
+            // Store all the layers from the executable graph information represented as CNNNetwork
+            const std::vector<InferenceEngine::CNNLayerPtr> execGraphLayers =
+                    InferenceEngine::details::CNNNetSortTopologically(execGraph);
+            for (const auto &execLayer : execGraphLayers) {
+                // Each layer from the execGraphInfo network must have PM data option set
+                ASSERT_EQ("not_executed", execLayer->params[ExecGraphInfoSerialization::PERF_COUNTER]);
+                // Parse origin layer names (fused/merged layers) from the executable graph
+                // and compare with layers from the original model
+                auto origFromExecLayer = execLayer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES];
+                if (origFromExecLayer == "")
+                    IteratorForLayersConstant++;
+                std::vector<std::string> origFromExecLayerSep = separateStrToVec(origFromExecLayer, ',');
+                std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) {
+                    auto origLayer = originalLayersMap.find(layer);
+                    ASSERT_NE(originalLayersMap.end(), origLayer) << layer;
+                    origLayer->second++;
+                });
+            }
+            IE_SUPPRESS_DEPRECATED_END
         }
-        IE_SUPPRESS_DEPRECATED_END
+
         // All layers from the original IR must be present with in ExecGraphInfo
         for (auto &layer : originalLayersMap) {
             if ((layer.second == 0) && (IteratorForLayersConstant > 0)) {
@@ -110,32 +142,69 @@ TEST_P(ExecGraphTests, CheckExecGraphInfoAfterExecution) {
         }
         int IteratorForLayersConstant = 0;
         // Store all the layers from the executable graph information represented as CNNNetwork
-        IE_SUPPRESS_DEPRECATED_START
-        const std::vector<InferenceEngine::CNNLayerPtr> execGraphLayers =
-                InferenceEngine::details::CNNNetSortTopologically(execGraph);
         bool has_layer_with_valid_time = false;
-        for (const auto &execLayer : execGraphLayers) {
-            // At least one layer in the topology should be executed and have valid perf counter value
-            try {
-                float x = static_cast<float>(std::atof(
-                        execLayer->params[ExecGraphInfoSerialization::PERF_COUNTER].c_str()));
-                ASSERT_GE(x, 0.0f);
-                has_layer_with_valid_time = true;
-            } catch (std::exception &) {}
-
-            // Parse origin layer names (fused/merged layers) from the executable graph
-            // and compare with layers from the original model
-            auto origFromExecLayer = execLayer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES];
-            std::vector<std::string> origFromExecLayerSep = separateStrToVec(origFromExecLayer, ',');
-            if (origFromExecLayer == "")
-                IteratorForLayersConstant++;
-            std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) {
-                auto origLayer = originalLayersMap.find(layer);
-                ASSERT_NE(originalLayersMap.end(), origLayer) << layer;
-                origLayer->second++;
-            });
+
+        if (auto function = execGraph.getFunction()) {
+            for (const auto & op : function->get_ops()) {
+                const auto & rtInfo = op->get_rt_info();
+
+                auto getExecValue = [&rtInfo](const std::string & paramName) -> std::string {
+                    auto it = rtInfo.find(paramName);
+                    IE_ASSERT(rtInfo.end() != it);
+                    auto value = std::dynamic_pointer_cast<ngraph::VariantImpl<std::string>>(it->second);
+                    IE_ASSERT(nullptr != value);
+
+                    return value->get();
+                };
+
+                // At least one layer in the topology should be executed and have valid perf counter value
+                try {
+                    float x = static_cast<float>(std::atof(
+                            getExecValue(ExecGraphInfoSerialization::PERF_COUNTER).c_str()));
+                    ASSERT_GE(x, 0.0f);
+                    has_layer_with_valid_time = true;
+                } catch (std::exception &) {}
+
+                // Parse origin layer names (fused/merged layers) from the executable graph
+                // and compare with layers from the original model
+                auto origFromExecLayer = getExecValue(ExecGraphInfoSerialization::ORIGINAL_NAMES);
+                std::vector<std::string> origFromExecLayerSep = separateStrToVec(origFromExecLayer, ',');
+                if (origFromExecLayer == "")
+                    IteratorForLayersConstant++;
+                std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) {
+                    auto origLayer = originalLayersMap.find(layer);
+                    ASSERT_NE(originalLayersMap.end(), origLayer) << layer;
+                    origLayer->second++;
+                });
+            }
+        } else {
+            IE_SUPPRESS_DEPRECATED_START
+            const std::vector<InferenceEngine::CNNLayerPtr> execGraphLayers =
+                InferenceEngine::details::CNNNetSortTopologically(execGraph);
+            for (const auto &execLayer : execGraphLayers) {
+                // At least one layer in the topology should be executed and have valid perf counter value
+                try {
+                    float x = static_cast<float>(std::atof(
+                            execLayer->params[ExecGraphInfoSerialization::PERF_COUNTER].c_str()));
+                    ASSERT_GE(x, 0.0f);
+                    has_layer_with_valid_time = true;
+                } catch (std::exception &) {}
+
+                // Parse origin layer names (fused/merged layers) from the executable graph
+                // and compare with layers from the original model
+                auto origFromExecLayer = execLayer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES];
+                std::vector<std::string> origFromExecLayerSep = separateStrToVec(origFromExecLayer, ',');
+                if (origFromExecLayer == "")
+                    IteratorForLayersConstant++;
+                std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) {
+                    auto origLayer = originalLayersMap.find(layer);
+                    ASSERT_NE(originalLayersMap.end(), origLayer) << layer;
+                    origLayer->second++;
+                });
+            }
+            IE_SUPPRESS_DEPRECATED_END
         }
-        IE_SUPPRESS_DEPRECATED_END
+
         ASSERT_TRUE(has_layer_with_valid_time);
 
         // All layers from the original IR must be present within ExecGraphInfo
index e458828..cd17acb 100644 (file)
@@ -6,6 +6,9 @@
 
 #include <ie_core.hpp>
 
+#include <ngraph/function.hpp>
+#include <ngraph/variant.hpp>
+
 #include "functional_test_utils/plugin_cache.hpp"
 #include "functional_test_utils/layer_test_utils.hpp"
 
@@ -52,19 +55,43 @@ TEST_P(ExecGraphInputsFusingBinConv, CheckNumInputsInBinConvFusingWithConv) {
     auto ie = PluginCache::get().ie();
     auto execNet = ie->LoadNetwork(cnnNet, targetDevice);
 
-    IE_SUPPRESS_DEPRECATED_START
     InferenceEngine::CNNNetwork execGraphInfo = execNet.GetExecGraphInfo();
-    std::vector<InferenceEngine::CNNLayerPtr> nodes;
-    ASSERT_NO_THROW(nodes = InferenceEngine::Serialization::TopologicalSort(execGraphInfo));
-    for (auto &node : nodes) {
-        if (node->type == "BinaryConvolution") {
-            std::string originalLayersNames = node->params["originalLayersNames"];
-            ASSERT_TRUE(originalLayersNames.find("BinaryConvolution") != std::string::npos);
-            ASSERT_TRUE(originalLayersNames.find("Add") != std::string::npos);
-            ASSERT_EQ(node->insData.size(), 1);
+
+    if (auto function = execGraphInfo.getFunction()) {
+        for (const auto & op : function->get_ops()) {
+            const auto & rtInfo = op->get_rt_info();
+
+            auto getExecValue = [&rtInfo](const std::string & paramName) -> std::string {
+                auto it = rtInfo.find(paramName);
+                IE_ASSERT(rtInfo.end() != it);
+                auto value = std::dynamic_pointer_cast<ngraph::VariantImpl<std::string>>(it->second);
+                IE_ASSERT(nullptr != value);
+
+                return value->get();
+            };
+
+            auto layerType = getExecValue("layerType");
+            if (layerType == "BinaryConvolution") {
+                auto originalLayersNames = getExecValue("originalLayersNames");
+                ASSERT_TRUE(originalLayersNames.find("BinaryConvolution") != std::string::npos);
+                ASSERT_TRUE(originalLayersNames.find("Add") != std::string::npos);
+                ASSERT_EQ(op->get_input_size(), 1);
+            }
+        }
+    } else {
+        IE_SUPPRESS_DEPRECATED_START
+        std::vector<InferenceEngine::CNNLayerPtr> nodes;
+        ASSERT_NO_THROW(nodes = InferenceEngine::Serialization::TopologicalSort(execGraphInfo));
+        for (auto &node : nodes) {
+            if (node->type == "BinaryConvolution") {
+                std::string originalLayersNames = node->params["originalLayersNames"];
+                ASSERT_TRUE(originalLayersNames.find("BinaryConvolution") != std::string::npos);
+                ASSERT_TRUE(originalLayersNames.find("Add") != std::string::npos);
+                ASSERT_EQ(node->insData.size(), 1);
+            }
         }
+        IE_SUPPRESS_DEPRECATED_END
     }
-    IE_SUPPRESS_DEPRECATED_END
 
     fnPtr.reset();
 };
index 7a4b213..185e201 100644 (file)
@@ -10,6 +10,9 @@
 #include <functional>
 
 #include <ie_core.hpp>
+#include <ngraph/function.hpp>
+#include <exec_graph_info.hpp>
+#include <ngraph/variant.hpp>
 
 #include "common_test_utils/common_utils.hpp"
 #include "functional_test_utils/plugin_cache.hpp"
@@ -66,20 +69,41 @@ TEST_P(ExecGraphUniqueNodeNames, CheckUniqueNodeNames) {
     auto execNet = ie->LoadNetwork(cnnNet, targetDevice);
 
     InferenceEngine::CNNNetwork execGraphInfo = execNet.GetExecGraphInfo();
-    auto nodes = InferenceEngine::Serialization::TopologicalSort(execGraphInfo);
 
     int numReorders = 0;
     int expectedReorders = 2;
     std::unordered_set<std::string> names;
-    for (auto &node : nodes) {
-        IE_SUPPRESS_DEPRECATED_START
-        ASSERT_TRUE(names.find(node->name) == names.end()) << "Node with name " << node->name << "already exists";
-        names.insert(node->name);
-        if (node->type == "Reorder") {
-            numReorders++;
+
+    if (auto function = execGraphInfo.getFunction()) {
+        for (const auto & op : function->get_ops()) {
+            ASSERT_TRUE(names.find(op->get_friendly_name()) == names.end()) <<
+                "Node with name " << op->get_friendly_name() << "already exists";
+            names.insert(op->get_friendly_name());
+
+            const auto & rtInfo = op->get_rt_info();
+            auto it = rtInfo.find(ExecGraphInfoSerialization::LAYER_TYPE);
+            ASSERT_NE(rtInfo.end(), it);
+            auto opType = std::dynamic_pointer_cast<ngraph::VariantImpl<std::string>>(it->second);
+            ASSERT_NE(nullptr, opType);
+
+            if (opType->get() == "Reorder") {
+                numReorders++;
+            }
+        }
+    } else {
+        auto nodes = InferenceEngine::Serialization::TopologicalSort(execGraphInfo);
+        for (auto &node : nodes) {
+            IE_SUPPRESS_DEPRECATED_START
+            ASSERT_TRUE(names.find(node->name) == names.end()) <<
+                "Node with name " << node->name << "already exists";
+            names.insert(node->name);
+            if (node->type == "Reorder") {
+                numReorders++;
+            }
+            IE_SUPPRESS_DEPRECATED_END
         }
-        IE_SUPPRESS_DEPRECATED_END
     }
+
     ASSERT_TRUE(numReorders == expectedReorders) << "Expected reorders: " << expectedReorders << ", actual reorders: " << numReorders;
 
     fnPtr.reset();
index 14a55ab..93a5518 100644 (file)
@@ -4,6 +4,7 @@
 //
 
 #include "ngraph_conversion_tests/conv_bias_fusion.hpp"
+#include <ngraph/variant.hpp>
 
 namespace NGraphConversionTestsDefinitions {
 
@@ -43,10 +44,24 @@ TEST_P(ConvBiasFusion, ConvBiasFusion) {
     InferenceEngine::ExecutableNetwork exeNetwork = ie.LoadNetwork(network, device);
     auto net = exeNetwork.GetExecGraphInfo();
 
-    IE_SUPPRESS_DEPRECATED_START
-    auto add_layer = net.getLayerByName(getOutputName().c_str());
-    ASSERT_EQ(add_layer->params["originalLayersNames"], "add,conv");
-    IE_SUPPRESS_DEPRECATED_END
+    if (auto function = net.getFunction()) {
+        for (const auto & op : function->get_ops()) {
+            if (op->get_friendly_name() ==  getOutputName()) {
+                auto rtInfo = op->get_rt_info();
+                auto it = rtInfo.find("originalLayersNames");
+                ASSERT_NE(rtInfo.end(), it);
+                auto variant = std::dynamic_pointer_cast<ngraph::VariantImpl<std::string>>(it->second);
+                ASSERT_NE(nullptr, variant);
+                ASSERT_EQ(variant->get(), "add,conv");
+                break;
+            }
+        }
+    } else {
+        IE_SUPPRESS_DEPRECATED_START
+        auto add_layer = net.getLayerByName(getOutputName().c_str());
+        ASSERT_EQ(add_layer->params["originalLayersNames"], "add,conv");
+        IE_SUPPRESS_DEPRECATED_END
+    }
 }
 
 }  // namespace NGraphConversionTestsDefinitions
\ No newline at end of file
index 590f03c..d9cf3f3 100644 (file)
@@ -67,8 +67,7 @@ void BehaviorPluginTestInferRequest::_createAndCheckInferRequest(
         ASSERT_NO_THROW(testEnv->core.AddExtension(extension));
     }
 
-    Core ie;
-    testEnv->network = ie.ReadNetwork(param.model_xml_str, param.weights_blob);
+    testEnv->network = testEnv->core.ReadNetwork(param.model_xml_str, param.weights_blob);
     /* Call conversion from CNNNetwork NgraphImpl to CNNNetwork */
     testEnv->network.begin();
 
@@ -82,8 +81,6 @@ void BehaviorPluginTestInferRequest::_createAndCheckInferRequest(
     full_config[PluginConfigParams::KEY_DUMP_EXEC_GRAPH_AS_DOT] = "behavior_tests_execution_graph_dump";
 #endif
 
-     ResponseDesc response;
-//     ASSERT_NO_THROW(testEnv->exeNetwork = testEnv->core.LoadNetwork(testEnv->network, param.device, full_config));
      try {
          testEnv->exeNetwork = testEnv->core.LoadNetwork(testEnv->network, param.device, full_config);
      } catch (InferenceEngineException ex) {