#include "ngraph_ops/eltwise.hpp"
#include "graph_tools.hpp"
+#include "exec_graph_info.hpp"
#include "graph_transformer.h"
#include "ie_util_internal.hpp"
#include "ie_ngraph_utils.hpp"
ResponseDesc* resp) const noexcept {
auto network = cnnNetwork;
if (!network) {
+ // TODO: once Serialization::Serialize supports true IR v10
+ // remove this conversion and WA for execution graph
+ try {
+ bool isExecutionGraph = true;
+ for (const auto & op : _ngraph_function->get_ops()) {
+ auto & rtInfo = op->get_rt_info();
+ if (rtInfo.find(ExecGraphInfoSerialization::PERF_COUNTER) == rtInfo.end()) {
+ isExecutionGraph = false;
+ break;
+ }
+ }
+ if (isExecutionGraph) {
+ Serialization::Serialize(xmlPath, binPath, (InferenceEngine::ICNNNetwork&)*this);
+ return OK;
+ }
+ } catch (const InferenceEngineException& e) {
+ return DescriptionBuffer(GENERAL_ERROR, resp) << e.what();
+ } catch (const std::exception& e) {
+ return DescriptionBuffer(UNEXPECTED, resp) << e.what();
+ } catch (...) {
+ return DescriptionBuffer(UNEXPECTED, resp);
+ }
+
auto graph = cloneFunction();
// Disable shape inference (WA for generic operations)
::ngraph::op::GenericIE::DisableReshape noReshape(graph);
#include <ie_parameter.hpp>
#include <ie_iextension.h>
#include <ie_extension.h>
+#include <exec_graph_info.hpp>
#include <ngraph/opsets/opset.hpp>
using namespace InferenceEngine;
//
+// exec_graph_info.hpp
+//
+constexpr ngraph::NodeTypeInfo ExecGraphInfoSerialization::ExecutionNode::type_info;
+
+const ngraph::NodeTypeInfo&
+ExecGraphInfoSerialization::ExecutionNode::get_type_info() const {
+ return type_info;
+}
+
+//
// ie_blob.h
//
#include "ngraph_ops/rnn_cell_ie.hpp"
#include "ngraph_ops/topk_ie.hpp"
#include "generic_ie.hpp"
+#include "exec_graph_info.hpp"
#include "ie_profiling.hpp"
#include "ie_cnn_layer_builder_ngraph.h"
// Set originalLayersNames from FusedNames
std::string originalNames = ::ngraph::getFusedNames(layer);
if (!originalNames.empty()) {
- cnnLayer->params["originalLayersNames"] = originalNames;
+ cnnLayer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES] = originalNames;
}
std::string primitivesPriority = ::ngraph::getPrimitivesPriority(layer);
<< " is not connected to any data";
}
}
- layer->validateLayer();
+
+ // execution ngraph is fake graph and should not be validated
+ if (layer->params.count(ExecGraphInfoSerialization::PERF_COUNTER) == 0) {
+ layer->validateLayer();
+ }
}
if (!cnnNetworkImpl) THROW_IE_EXCEPTION << "Cannot convert nGraph function to CNNNetworkImpl!";
#include <string>
#include <vector>
#include <unordered_set>
+#include <sstream>
#include "details/caseless.hpp"
#include "details/ie_cnn_network_tools.h"
#include "exec_graph_info.hpp"
#include "xml_parse_utils.h"
+#include "ie_ngraph_utils.hpp"
+#include <ngraph/variant.hpp>
namespace InferenceEngine {
namespace Serialization {
return ordered;
}
+namespace {
+
+void FillXmlDocWithExecutionNGraph(const InferenceEngine::ICNNNetwork& network,
+ pugi::xml_document& doc) {
+ std::shared_ptr<const ngraph::Function> function = network.getFunction();
+ if (function == nullptr) {
+ THROW_IE_EXCEPTION << network.getName() << " does not represent ngraph::Function";
+ }
+
+ std::vector<std::shared_ptr<ngraph::Node>> ordered = function->get_ordered_ops();
+ pugi::xml_node netXml = doc.append_child("net");
+ netXml.append_attribute("name").set_value(network.getName().c_str());
+
+ pugi::xml_node layers = netXml.append_child("layers");
+ std::unordered_map<std::shared_ptr<ngraph::Node>, size_t> matching;
+
+ for (size_t i = 0; i < ordered.size(); ++i) {
+ matching[ordered[i]] = i;
+ const std::shared_ptr<ngraph::Node> node = ordered[i];
+ auto params = node->get_rt_info();
+
+ auto layerTypeVariant = params.find(ExecGraphInfoSerialization::LAYER_TYPE);
+ if (layerTypeVariant == params.end()) {
+ THROW_IE_EXCEPTION << node->get_friendly_name() << " does not define "
+ << ExecGraphInfoSerialization::LAYER_TYPE << " attribute.";
+ }
+ using VariantString = ngraph::VariantImpl<std::string>;
+ auto layerTypeValueStr = std::dynamic_pointer_cast<VariantString>(layerTypeVariant->second);
+ IE_ASSERT(layerTypeValueStr != nullptr);
+ params.erase(layerTypeVariant);
+
+ pugi::xml_node layer = layers.append_child("layer");
+ layer.append_attribute("name").set_value(node->get_friendly_name().c_str());
+ layer.append_attribute("type").set_value(layerTypeValueStr->get().c_str());
+ layer.append_attribute("id").set_value(i);
+
+ if (!params.empty()) {
+ pugi::xml_node data = layer.append_child("data");
+
+ for (const auto& it : params) {
+ if (auto strValue = std::dynamic_pointer_cast<VariantString>(it.second))
+ data.append_attribute(it.first.c_str()).set_value(strValue->get().c_str());
+ }
+ }
+
+ if (node->get_input_size() > 0) {
+ pugi::xml_node input = layer.append_child("input");
+
+ for (size_t iport = 0; iport < node->get_input_size(); iport++) {
+ const ngraph::Shape & dims = node->get_input_shape(iport);
+ pugi::xml_node port = input.append_child("port");
+
+ port.append_attribute("id").set_value(iport);
+ for (auto dim : dims) {
+ port.append_child("dim").text().set(dim);
+ }
+ }
+ }
+ if (node->get_output_size() > 0 &&
+ // ngraph::op::Result still have single output while we should not print it
+ !std::dynamic_pointer_cast<ngraph::op::Result>(node)) {
+ pugi::xml_node output = layer.append_child("output");
+
+ for (size_t oport = 0; oport < node->get_output_size(); oport++) {
+ pugi::xml_node port = output.append_child("port");
+ Precision outputPrecision = details::convertPrecision(node->get_output_element_type(oport));
+
+ port.append_attribute("id").set_value(node->get_input_size() + oport);
+ port.append_attribute("precision").set_value(outputPrecision.name());
+
+ for (const auto dim : node->get_output_shape(oport)) {
+ port.append_child("dim").text().set(dim);
+ }
+ }
+ }
+ }
+
+ pugi::xml_node edges = netXml.append_child("edges");
+
+ for (const auto& ord : ordered) {
+ const std::shared_ptr<ngraph::Node> parentNode = ord;
+
+ if (parentNode->get_output_size() > 0) {
+ auto itFrom = matching.find(parentNode);
+ if (itFrom == matching.end()) {
+ THROW_IE_EXCEPTION << "Internal error, cannot find " << parentNode->get_friendly_name()
+ << " in matching container during serialization of IR";
+ }
+ for (size_t oport = 0; oport < parentNode->get_output_size(); oport++) {
+ ngraph::Output<ngraph::Node> parentPort = parentNode->output(oport);
+ for (const auto& childPort : parentPort.get_target_inputs()) {
+ ngraph::Node * childNode = childPort.get_node();
+ for (int iport = 0; iport < childNode->get_input_size(); iport++) {
+ if (childNode->input_value(iport).get_node() == parentPort.get_node()) {
+ auto itTo = matching.find(childNode->shared_from_this());
+ if (itTo == matching.end()) {
+ THROW_IE_EXCEPTION << "Broken edge form layer "
+ << parentNode->get_friendly_name() << " to layer "
+ << childNode->get_friendly_name()
+ << "during serialization of IR";
+ }
+ pugi::xml_node edge = edges.append_child("edge");
+ edge.append_attribute("from-layer").set_value(itFrom->second);
+ edge.append_attribute("from-port").set_value(oport + parentNode->get_input_size());
+
+ edge.append_attribute("to-layer").set_value(itTo->second);
+ edge.append_attribute("to-port").set_value(iport);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+} // namespace
std::size_t FillXmlDoc(const InferenceEngine::ICNNNetwork& network, pugi::xml_document& doc,
- const bool execGraphInfoSerialization, const bool dumpWeights) {
+ const bool execGraphInfoSerialization, const bool dumpWeights) {
const std::vector<CNNLayerPtr> ordered = TopologicalSort(network);
pugi::xml_node netXml = doc.append_child("net");
netXml.append_attribute("name").set_value(network.getName().c_str());
}
void Serialize(const std::string& xmlPath, const std::string& binPath,
- const InferenceEngine::ICNNNetwork& network) {
- const std::vector<CNNLayerPtr> ordered = TopologicalSort(network);
-
+ const InferenceEngine::ICNNNetwork& network) {
// A flag for serializing executable graph information (not complete IR)
bool execGraphInfoSerialization = false;
+ pugi::xml_document doc;
+
+ if (auto function = network.getFunction()) {
+ execGraphInfoSerialization = true;
+
+ // go over all operations and check whether performance stat is set
+ for (const auto & op : function->get_ops()) {
+ auto & rtInfo = op->get_rt_info();
+ if (rtInfo.find(ExecGraphInfoSerialization::PERF_COUNTER) == rtInfo.end()) {
+ execGraphInfoSerialization = false;
+ break;
+ }
+ }
+
+ if (execGraphInfoSerialization) {
+ FillXmlDocWithExecutionNGraph(network, doc);
+
+ if (!doc.save_file(xmlPath.c_str())) {
+ THROW_IE_EXCEPTION << "file '" << xmlPath << "' was not serialized";
+ }
+
+ return;
+ }
+ }
+
+ const std::vector<CNNLayerPtr> ordered = TopologicalSort(network);
// If first layer has perfCounter parameter set then it's executable graph info serialization.
// All other layers must also have this parameter set.
if (ordered[0]->params.find(ExecGraphInfoSerialization::PERF_COUNTER) != ordered[0]->params.end()) {
}
bool dumpWeights = !execGraphInfoSerialization & !binPath.empty();
-
- pugi::xml_document doc;
FillXmlDoc(network, doc, execGraphInfoSerialization, dumpWeights);
if (!doc.save_file(xmlPath.c_str())) {
}
InferenceEngine::ICNNNetwork::Ptr MKLDNNGraph::dump() const {
- return dump_graph_as_ie_net(*this);
+ return dump_graph_as_ie_ngraph_net(*this);
}
friend class MKLDNNInferRequest;
friend class MKLDNNGraphlessInferRequest;
friend std::shared_ptr<InferenceEngine::ICNNNetwork> dump_graph_as_ie_net(const MKLDNNGraph &graph);
+ friend std::shared_ptr<InferenceEngine::ICNNNetwork> dump_graph_as_ie_ngraph_net(const MKLDNNGraph &graph);
private:
void dumpToDotFile(std::string file) const;
#include "mkldnn_graph_dumper.h"
#include "cnn_network_impl.hpp"
#include "ie_util_internal.hpp"
+#include "ie_ngraph_utils.hpp"
#include "exec_graph_info.hpp"
#include "mkldnn_debug.h"
+#include "generic_ie.hpp"
+#include <ngraph/variant.hpp>
#include <vector>
#include <string>
namespace MKLDNNPlugin {
-static void copy_node_metadata(const MKLDNNNodePtr &, CNNLayer::Ptr &);
-static void drawer_callback(const InferenceEngine::CNNLayerPtr, ordered_properties &, ordered_properties &);
+namespace {
-CNNLayer::Ptr convert_node(const MKLDNNNodePtr &node) {
- CNNLayer::Ptr layer(new CNNLayer({"name", "type", Precision::FP32}));
- copy_node_metadata(node, layer);
+std::map<std::string, std::string> extract_node_metadata(const MKLDNNNodePtr &);
+void drawer_callback(const InferenceEngine::CNNLayerPtr, ordered_properties &, ordered_properties &);
+
+} // namespace
+
+CNNLayer::Ptr create_cnnlayer(const MKLDNNNodePtr &node) {
+ CNNLayer::Ptr layer(new CNNLayer({node->getName(), "type", Precision::FP32}));
+
+ layer->params = extract_node_metadata(node);
+ layer->type = layer->params[ExecGraphInfoSerialization::LAYER_TYPE];
+ layer->params.erase(ExecGraphInfoSerialization::LAYER_TYPE);
auto &cfg = node->getSelectedPrimitiveDescriptor()->getConfig();
layer->insData.resize(cfg.inConfs.size());
return layer;
}
+std::shared_ptr<ICNNNetwork> dump_graph_as_ie_ngraph_net(const MKLDNNGraph &graph) {
+ std::map<MKLDNNNodePtr, std::shared_ptr<ngraph::Node> > node2layer;
+
+ ngraph::ResultVector results;
+ ngraph::ParameterVector params;
+
+ auto get_inputs = [&] (const MKLDNNNodePtr & node) {
+ auto pr_edges = node->getParentEdges();
+ ngraph::OutputVector inputs(pr_edges.size());
+
+ for (int i = 0; i < pr_edges.size(); i++) {
+ auto edge = node->getParentEdgeAt(i);
+ int pr_port = edge->getInputNum();
+ int ch_port = edge->getOutputNum();
+ auto pr_node = edge->getParent();
+
+ IE_ASSERT(node2layer.count(pr_node) == 1);
+ auto pr = node2layer[pr_node];
+
+ inputs[ch_port] = pr->output(pr_port);
+ }
+
+ return inputs;
+ };
+
+ auto create_ngraph_node = [&](const MKLDNNNodePtr &node) {
+ bool is_input = false, is_output = false;
+ for (auto && kvp : graph.inputNodes) {
+ if (kvp.second == node) {
+ is_input = true;
+ break;
+ }
+ }
+
+ for (auto && onode : graph.outputNodes) {
+ if (onode == node) {
+ is_output = true;
+ break;
+ }
+ }
+
+ auto meta_data = extract_node_metadata(node);
+ std::shared_ptr<ngraph::Node> return_node;
+ if (is_input) {
+ auto desc = node->getChildEdgeAt(0)->getDesc();
+ auto param = std::make_shared<ngraph::op::Parameter>(
+ details::convertPrecision(desc.getPrecision()),
+ ngraph::PartialShape(desc.getDims()));
+ return_node = param;
+ params.push_back(param);
+ } else if (is_output) {
+ results.emplace_back(std::make_shared<ngraph::op::Result>(get_inputs(node).back()));
+ return_node = results.back();
+ } else {
+ return_node = std::make_shared<ExecGraphInfoSerialization::ExecutionNode>(
+ get_inputs(node), node->getSelectedPrimitiveDescriptor()->getConfig().outConfs.size());
+
+ for (size_t port = 0; port < return_node->get_output_size(); ++port) {
+ auto desc = node->getChildEdgeAt(port)->getDesc();
+ return_node->set_output_type(port,
+ details::convertPrecision(desc.getPrecision()),
+ ngraph::PartialShape(desc.getDims()));
+ }
+ }
+
+ for (auto && kvp : meta_data)
+ return_node->get_rt_info()[kvp.first] = std::make_shared<::ngraph::VariantWrapper<std::string>>(kvp.second);
+ return_node->set_friendly_name(node->getName());
+
+ return return_node;
+ };
+
+ ngraph::NodeVector nodes;
+ nodes.reserve(graph.graphNodes.size());
+ for (auto &node : graph.graphNodes) { // important: graph.graphNodes are in topological order
+ nodes.emplace_back(create_ngraph_node(node));
+ node2layer[node] = nodes.back();
+ }
+
+ ngraph::op::GenericIE::DisableReshape reshape(nodes);
+ auto function = std::make_shared<ngraph::Function>(results, params, graph._name);
+ InferenceEngine::CNNNetwork net(function);
+ return net;
+}
+
std::shared_ptr<ICNNNetwork> dump_graph_as_ie_net(const MKLDNNGraph &graph) {
auto net = std::make_shared<details::CNNNetworkImpl>();
// Copy all nodes to network
for (auto &node : graph.graphNodes) {
- auto layer = convert_node(node);
+ auto layer = create_cnnlayer(node);
node2layer[node] = layer;
net->addLayer(layer);
}
// Special converters of meta data
//**********************************
-static const char BLUE[] = "#D8D9F1";
-static const char GREEN[] = "#D9EAD3";
+namespace {
+
+std::map<std::string, std::string> extract_node_metadata(const MKLDNNNodePtr &node) {
+ std::map<std::string, std::string> serialization_info;
-void copy_node_metadata(const MKLDNNNodePtr &node, CNNLayer::Ptr &layer) {
if (node->getType() == Input && node->isConstant()) {
// We need to separate Input and Const layers
- layer->type = "Const";
+ serialization_info[ExecGraphInfoSerialization::LAYER_TYPE] = "Const";
} else if (node->getType() == Generic) {
// Path to print actual name for extension layers
- layer->type = node->getTypeStr();
+ serialization_info[ExecGraphInfoSerialization::LAYER_TYPE] = node->getTypeStr();
} else {
- layer->type = NameFromType(node->getType());
+ serialization_info[ExecGraphInfoSerialization::LAYER_TYPE] = NameFromType(node->getType());
}
- layer->name = node->getName();
// Original layers
- layer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES] = node->getOriginalLayers();
+ serialization_info[ExecGraphInfoSerialization::ORIGINAL_NAMES] = node->getOriginalLayers();
// Implementation type name
- layer->params[ExecGraphInfoSerialization::IMPL_TYPE] = node->getPrimitiveDescriptorType();
+ serialization_info[ExecGraphInfoSerialization::IMPL_TYPE] = node->getPrimitiveDescriptorType();
std::string outputPrecisionsStr;
if (!node->getChildEdges().empty()) {
outputPrecisionsStr = node->getParentEdgeAt(0)->getDesc().getPrecision().name();
}
}
- layer->params[ExecGraphInfoSerialization::OUTPUT_PRECISIONS] = outputPrecisionsStr;
+ serialization_info[ExecGraphInfoSerialization::OUTPUT_PRECISIONS] = outputPrecisionsStr;
std::string outputLayoutsStr;
auto outLayouts = node->getSelectedPrimitiveDescriptor()->getOutputLayouts();
} else {
outputLayoutsStr = mkldnn_fmt2str(mkldnn_format_undef);
}
- layer->params[ExecGraphInfoSerialization::OUTPUT_LAYOUTS] = outputLayoutsStr;
+ serialization_info[ExecGraphInfoSerialization::OUTPUT_LAYOUTS] = outputLayoutsStr;
// Performance
if (node->PerfCounter().avg() != 0) {
- layer->params[ExecGraphInfoSerialization::PERF_COUNTER] = std::to_string(node->PerfCounter().avg());
+ serialization_info[ExecGraphInfoSerialization::PERF_COUNTER] = std::to_string(node->PerfCounter().avg());
} else {
- layer->params[ExecGraphInfoSerialization::PERF_COUNTER] = "not_executed"; // it means it was not calculated yet
+ serialization_info[ExecGraphInfoSerialization::PERF_COUNTER] = "not_executed"; // it means it was not calculated yet
}
- layer->params[ExecGraphInfoSerialization::EXECUTION_ORDER] = std::to_string(node->getExecIndex());
+ serialization_info[ExecGraphInfoSerialization::EXECUTION_ORDER] = std::to_string(node->getExecIndex());
+
+ return serialization_info;
}
+const char BLUE[] = "#D8D9F1";
+const char GREEN[] = "#D9EAD3";
+
void drawer_callback(const InferenceEngine::CNNLayerPtr layer,
- ordered_properties &printed_properties,
- ordered_properties &node_properties) {
+ ordered_properties &printed_properties,
+ ordered_properties &node_properties) {
const auto ¶ms = layer->params;
// Implementation
node_properties.push_back({"xlabel", (perf != layer->params.end()) ? perf->second : ""});
}
+} // namespace
+
} // namespace MKLDNNPlugin
namespace MKLDNNPlugin {
- void dump_graph_as_dot(const MKLDNNGraph &graph, std::ostream &out);
+void dump_graph_as_dot(const MKLDNNGraph &graph, std::ostream &out);
- std::shared_ptr<InferenceEngine::ICNNNetwork> dump_graph_as_ie_net(const MKLDNNGraph &graph);
+std::shared_ptr<InferenceEngine::ICNNNetwork> dump_graph_as_ie_net(const MKLDNNGraph &graph);
+std::shared_ptr<InferenceEngine::ICNNNetwork> dump_graph_as_ie_ngraph_net(const MKLDNNGraph &graph);
} // namespace MKLDNNPlugin
#pragma once
+#include <ie_api.h>
+#include <ie_parameter.hpp>
#include <string>
+#include <ngraph/node.hpp>
+#include <ngraph/function.hpp>
+
/**
* @brief A namespace with const values for Execution Graph parameters names.
*
namespace ExecGraphInfoSerialization {
/**
- * @brief A general key for CNNLayer::params map. Used to get a string of layer names separated by a comma
+ * @brief Used to get a string of layer names separated by a comma
* from the original IR, which were fused/merged to the current executable primitive.
*/
static const char ORIGINAL_NAMES[] = "originalLayersNames";
/**
- * @brief A general key for CNNLayer::params map. Used to get a type of the executable primitive.
+ * @brief Used to get a type of the executable primitive.
*/
static const char IMPL_TYPE[] = "primitiveType";
/**
- * @brief A general key for CNNLayer::params map. Used to get output precisions of the executable primitive.
+ * @brief Used to get output precisions of the executable primitive.
*/
static const char OUTPUT_PRECISIONS[] = "outputPrecisions";
/**
- * @brief A general key for CNNLayer::params map. Used to get value of execution time of the executable primitive.
+ * @brief Used to get a value of execution time of the executable primitive.
*/
static const char PERF_COUNTER[] = "execTimeMcs";
/**
- * @brief A general key for CNNLayer::params map. Used to get output layouts of primitive.
+ * @brief Used to get output layouts of primitive.
*/
static const char OUTPUT_LAYOUTS[] = "outputLayouts";
/**
- * @brief A general key for CNNLayer::params map. Used to get an execution order of primitive.
+ * @brief Used to get an execution order of primitive.
*/
static const char EXECUTION_ORDER[] = "execOrder";
+/**
+ * @brief Used to get a type of primitive.
+ */
+static const char LAYER_TYPE[] = "layerType";
+
+class INFERENCE_ENGINE_API_CLASS(ExecutionNode) : public ngraph::Node {
+public:
+ static constexpr ngraph::NodeTypeInfo type_info { "ExecutionNode", 0 };
+ const ngraph::NodeTypeInfo& get_type_info() const override;
+
+ ExecutionNode() = default;
+
+ ExecutionNode(const ngraph::OutputVector& arguments, size_t output_size = 1) :
+ Node(arguments, output_size) { }
+
+ std::shared_ptr<ngraph::Node> clone_with_new_inputs(const ngraph::OutputVector& inputs) const override {
+ auto cloned = std::make_shared<ExecutionNode>();
+
+ cloned->set_arguments(inputs);
+
+ for (auto kvp : get_rt_info())
+ cloned->get_rt_info()[kvp.first] = kvp.second;
+
+ for (size_t i = 0; i < get_output_size(); ++i)
+ cloned->set_output_type(i, get_output_element_type(i), get_output_partial_shape(i));
+
+ return cloned;
+ }
+};
+
} // namespace ExecGraphInfoSerialization
#include "network_serializer.h"
#include "ie_system_conf.h"
+#include <ngraph/function.hpp>
+#include <ngraph/variant.hpp>
+#include <exec_graph_info.hpp>
+
namespace CPUTestUtils {
typedef enum {
void inline CheckCPUImpl(InferenceEngine::ExecutableNetwork &execNet, std::string nodeType, std::vector<cpu_memory_format_t> inputMemoryFormats,
std::vector<cpu_memory_format_t> outputMemoryFormats, std::string selectedType) {
InferenceEngine::CNNNetwork execGraphInfo = execNet.GetExecGraphInfo();
- auto nodes = InferenceEngine::Serialization::TopologicalSort(execGraphInfo);
- for (auto &node : nodes) {
- if (node->type == nodeType) {
- ASSERT_LE(inputMemoryFormats.size(), node->insData.size());
- ASSERT_LE(outputMemoryFormats.size(), node->outData.size());
+ auto function = execGraphInfo.getFunction();
+ ASSERT_NE(nullptr, function);
+
+ for (const auto &node : function->get_ops()) {
+ const auto & rtInfo = node->get_rt_info();
+ auto getExecValue = [&rtInfo](const std::string & paramName) -> std::string {
+ auto it = rtInfo.find(paramName);
+ IE_ASSERT(rtInfo.end() != it);
+ auto value = std::dynamic_pointer_cast<ngraph::VariantImpl<std::string>>(it->second);
+ IE_ASSERT(nullptr != value);
+
+ return value->get();
+ };
+
+ auto getExecValueOutputsLayout = [] (std::shared_ptr<ngraph::Node> node) -> std::string {
+ auto rtInfo = node->get_rt_info();
+ auto it = rtInfo.find(ExecGraphInfoSerialization::OUTPUT_LAYOUTS);
+ IE_ASSERT(rtInfo.end() != it);
+ auto value = std::dynamic_pointer_cast<ngraph::VariantImpl<std::string>>(it->second);
+ IE_ASSERT(nullptr != value);
+
+ return value->get();
+ };
+
+ if (getExecValue(ExecGraphInfoSerialization::LAYER_TYPE) == nodeType) {
+ ASSERT_LE(inputMemoryFormats.size(), node->get_input_size());
+ ASSERT_LE(outputMemoryFormats.size(), node->get_output_size());
for (int i = 0; i < inputMemoryFormats.size(); i++) {
- for (auto &parentNode : nodes) {
- for (int j = 0; j < parentNode->outData.size(); j++) {
- if (parentNode->outData[j]->getName() == node->insData[i].lock()->getName()) {
- auto actualInputMemoryFormat = parentNode->params.find("outputLayouts");
- ASSERT_NE(actualInputMemoryFormat, parentNode->params.end());
- ASSERT_EQ(inputMemoryFormats[i], cpu_str2fmt(actualInputMemoryFormat->second.c_str()));
+ for (const auto & parentPort : node->input_values()) {
+ for (const auto & port : node->inputs()) {
+ if (port.get_tensor_ptr() == parentPort.get_tensor_ptr()) {
+ auto parentNode = parentPort.get_node_shared_ptr();
+ auto actualInputMemoryFormat = getExecValueOutputsLayout(parentNode);
+ ASSERT_EQ(inputMemoryFormats[i], cpu_str2fmt(actualInputMemoryFormat.c_str()));
}
}
}
}
for (int i = 0; i < outputMemoryFormats.size(); i++) {
- auto actualOutputMemoryFormat = node->params.find("outputLayouts");
- ASSERT_NE(actualOutputMemoryFormat, node->params.end());
- ASSERT_EQ(outputMemoryFormats[i], cpu_str2fmt(actualOutputMemoryFormat->second.c_str()));
+ auto actualOutputMemoryFormat = getExecValue(ExecGraphInfoSerialization::OUTPUT_LAYOUTS);
+ ASSERT_EQ(outputMemoryFormats[i], cpu_str2fmt(actualOutputMemoryFormat.c_str()));
}
- auto primType = node->params.find("primitiveType");
- ASSERT_NE(primType, node->params.end());
- ASSERT_EQ(selectedType, primType->second);
+ auto primType = getExecValue(ExecGraphInfoSerialization::IMPL_TYPE);
+ ASSERT_EQ(selectedType, primType);
}
}
}
//
#include <memory>
+#include <ngraph/variant.hpp>
#include "ie_extension.h"
#include <condition_variable>
#include "functional_test_utils/layer_test_utils.hpp"
originalLayersMap[layer->get_friendly_name()] = 0;
}
int IteratorForLayersConstant = 0;
- // Store all the layers from the executable graph information represented as CNNNetwork
- IE_SUPPRESS_DEPRECATED_START
- const std::vector<InferenceEngine::CNNLayerPtr> execGraphLayers =
- InferenceEngine::details::CNNNetSortTopologically(execGraph);
- for (const auto &execLayer : execGraphLayers) {
- // Each layer from the execGraphInfo network must have PM data option set
- ASSERT_EQ("not_executed", execLayer->params[ExecGraphInfoSerialization::PERF_COUNTER]);
- // Parse origin layer names (fused/merged layers) from the executable graph
- // and compare with layers from the original model
- auto origFromExecLayer = execLayer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES];
- if (origFromExecLayer == "")
- IteratorForLayersConstant++;
- std::vector<std::string> origFromExecLayerSep = separateStrToVec(origFromExecLayer, ',');
- std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) {
- auto origLayer = originalLayersMap.find(layer);
- ASSERT_NE(originalLayersMap.end(), origLayer) << layer;
- origLayer->second++;
- });
+
+ if (auto function = execGraph.getFunction()) {
+ for (const auto & op : function->get_ops()) {
+ const auto & rtInfo = op->get_rt_info();
+
+ auto getExecValue = [&rtInfo](const std::string & paramName) -> std::string {
+ auto it = rtInfo.find(paramName);
+ IE_ASSERT(rtInfo.end() != it);
+ auto value = std::dynamic_pointer_cast<ngraph::VariantImpl<std::string>>(it->second);
+ IE_ASSERT(nullptr != value);
+
+ return value->get();
+ };
+
+ // Each layer from the execGraphInfo network must have PM data option set
+ ASSERT_EQ("not_executed", getExecValue(ExecGraphInfoSerialization::PERF_COUNTER));
+ // Parse origin layer names (fused/merged layers) from the executable graph
+ // and compare with layers from the original model
+ auto origFromExecLayer = getExecValue(ExecGraphInfoSerialization::ORIGINAL_NAMES);
+ if (origFromExecLayer == "")
+ IteratorForLayersConstant++;
+ std::vector<std::string> origFromExecLayerSep = separateStrToVec(origFromExecLayer, ',');
+ std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) {
+ auto origLayer = originalLayersMap.find(layer);
+ ASSERT_NE(originalLayersMap.end(), origLayer) << layer;
+ origLayer->second++;
+ });
+ }
+ } else {
+ IE_SUPPRESS_DEPRECATED_START
+ // Store all the layers from the executable graph information represented as CNNNetwork
+ const std::vector<InferenceEngine::CNNLayerPtr> execGraphLayers =
+ InferenceEngine::details::CNNNetSortTopologically(execGraph);
+ for (const auto &execLayer : execGraphLayers) {
+ // Each layer from the execGraphInfo network must have PM data option set
+ ASSERT_EQ("not_executed", execLayer->params[ExecGraphInfoSerialization::PERF_COUNTER]);
+ // Parse origin layer names (fused/merged layers) from the executable graph
+ // and compare with layers from the original model
+ auto origFromExecLayer = execLayer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES];
+ if (origFromExecLayer == "")
+ IteratorForLayersConstant++;
+ std::vector<std::string> origFromExecLayerSep = separateStrToVec(origFromExecLayer, ',');
+ std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) {
+ auto origLayer = originalLayersMap.find(layer);
+ ASSERT_NE(originalLayersMap.end(), origLayer) << layer;
+ origLayer->second++;
+ });
+ }
+ IE_SUPPRESS_DEPRECATED_END
}
- IE_SUPPRESS_DEPRECATED_END
+
// All layers from the original IR must be present with in ExecGraphInfo
for (auto &layer : originalLayersMap) {
if ((layer.second == 0) && (IteratorForLayersConstant > 0)) {
}
int IteratorForLayersConstant = 0;
// Store all the layers from the executable graph information represented as CNNNetwork
- IE_SUPPRESS_DEPRECATED_START
- const std::vector<InferenceEngine::CNNLayerPtr> execGraphLayers =
- InferenceEngine::details::CNNNetSortTopologically(execGraph);
bool has_layer_with_valid_time = false;
- for (const auto &execLayer : execGraphLayers) {
- // At least one layer in the topology should be executed and have valid perf counter value
- try {
- float x = static_cast<float>(std::atof(
- execLayer->params[ExecGraphInfoSerialization::PERF_COUNTER].c_str()));
- ASSERT_GE(x, 0.0f);
- has_layer_with_valid_time = true;
- } catch (std::exception &) {}
-
- // Parse origin layer names (fused/merged layers) from the executable graph
- // and compare with layers from the original model
- auto origFromExecLayer = execLayer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES];
- std::vector<std::string> origFromExecLayerSep = separateStrToVec(origFromExecLayer, ',');
- if (origFromExecLayer == "")
- IteratorForLayersConstant++;
- std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) {
- auto origLayer = originalLayersMap.find(layer);
- ASSERT_NE(originalLayersMap.end(), origLayer) << layer;
- origLayer->second++;
- });
+
+ if (auto function = execGraph.getFunction()) {
+ for (const auto & op : function->get_ops()) {
+ const auto & rtInfo = op->get_rt_info();
+
+ auto getExecValue = [&rtInfo](const std::string & paramName) -> std::string {
+ auto it = rtInfo.find(paramName);
+ IE_ASSERT(rtInfo.end() != it);
+ auto value = std::dynamic_pointer_cast<ngraph::VariantImpl<std::string>>(it->second);
+ IE_ASSERT(nullptr != value);
+
+ return value->get();
+ };
+
+ // At least one layer in the topology should be executed and have valid perf counter value
+ try {
+ float x = static_cast<float>(std::atof(
+ getExecValue(ExecGraphInfoSerialization::PERF_COUNTER).c_str()));
+ ASSERT_GE(x, 0.0f);
+ has_layer_with_valid_time = true;
+ } catch (std::exception &) {}
+
+ // Parse origin layer names (fused/merged layers) from the executable graph
+ // and compare with layers from the original model
+ auto origFromExecLayer = getExecValue(ExecGraphInfoSerialization::ORIGINAL_NAMES);
+ std::vector<std::string> origFromExecLayerSep = separateStrToVec(origFromExecLayer, ',');
+ if (origFromExecLayer == "")
+ IteratorForLayersConstant++;
+ std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) {
+ auto origLayer = originalLayersMap.find(layer);
+ ASSERT_NE(originalLayersMap.end(), origLayer) << layer;
+ origLayer->second++;
+ });
+ }
+ } else {
+ IE_SUPPRESS_DEPRECATED_START
+ const std::vector<InferenceEngine::CNNLayerPtr> execGraphLayers =
+ InferenceEngine::details::CNNNetSortTopologically(execGraph);
+ for (const auto &execLayer : execGraphLayers) {
+ // At least one layer in the topology should be executed and have valid perf counter value
+ try {
+ float x = static_cast<float>(std::atof(
+ execLayer->params[ExecGraphInfoSerialization::PERF_COUNTER].c_str()));
+ ASSERT_GE(x, 0.0f);
+ has_layer_with_valid_time = true;
+ } catch (std::exception &) {}
+
+ // Parse origin layer names (fused/merged layers) from the executable graph
+ // and compare with layers from the original model
+ auto origFromExecLayer = execLayer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES];
+ std::vector<std::string> origFromExecLayerSep = separateStrToVec(origFromExecLayer, ',');
+ if (origFromExecLayer == "")
+ IteratorForLayersConstant++;
+ std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) {
+ auto origLayer = originalLayersMap.find(layer);
+ ASSERT_NE(originalLayersMap.end(), origLayer) << layer;
+ origLayer->second++;
+ });
+ }
+ IE_SUPPRESS_DEPRECATED_END
}
- IE_SUPPRESS_DEPRECATED_END
+
ASSERT_TRUE(has_layer_with_valid_time);
// All layers from the original IR must be present within ExecGraphInfo
#include <ie_core.hpp>
+#include <ngraph/function.hpp>
+#include <ngraph/variant.hpp>
+
#include "functional_test_utils/plugin_cache.hpp"
#include "functional_test_utils/layer_test_utils.hpp"
auto ie = PluginCache::get().ie();
auto execNet = ie->LoadNetwork(cnnNet, targetDevice);
- IE_SUPPRESS_DEPRECATED_START
InferenceEngine::CNNNetwork execGraphInfo = execNet.GetExecGraphInfo();
- std::vector<InferenceEngine::CNNLayerPtr> nodes;
- ASSERT_NO_THROW(nodes = InferenceEngine::Serialization::TopologicalSort(execGraphInfo));
- for (auto &node : nodes) {
- if (node->type == "BinaryConvolution") {
- std::string originalLayersNames = node->params["originalLayersNames"];
- ASSERT_TRUE(originalLayersNames.find("BinaryConvolution") != std::string::npos);
- ASSERT_TRUE(originalLayersNames.find("Add") != std::string::npos);
- ASSERT_EQ(node->insData.size(), 1);
+
+ if (auto function = execGraphInfo.getFunction()) {
+ for (const auto & op : function->get_ops()) {
+ const auto & rtInfo = op->get_rt_info();
+
+ auto getExecValue = [&rtInfo](const std::string & paramName) -> std::string {
+ auto it = rtInfo.find(paramName);
+ IE_ASSERT(rtInfo.end() != it);
+ auto value = std::dynamic_pointer_cast<ngraph::VariantImpl<std::string>>(it->second);
+ IE_ASSERT(nullptr != value);
+
+ return value->get();
+ };
+
+ auto layerType = getExecValue("layerType");
+ if (layerType == "BinaryConvolution") {
+ auto originalLayersNames = getExecValue("originalLayersNames");
+ ASSERT_TRUE(originalLayersNames.find("BinaryConvolution") != std::string::npos);
+ ASSERT_TRUE(originalLayersNames.find("Add") != std::string::npos);
+ ASSERT_EQ(op->get_input_size(), 1);
+ }
+ }
+ } else {
+ IE_SUPPRESS_DEPRECATED_START
+ std::vector<InferenceEngine::CNNLayerPtr> nodes;
+ ASSERT_NO_THROW(nodes = InferenceEngine::Serialization::TopologicalSort(execGraphInfo));
+ for (auto &node : nodes) {
+ if (node->type == "BinaryConvolution") {
+ std::string originalLayersNames = node->params["originalLayersNames"];
+ ASSERT_TRUE(originalLayersNames.find("BinaryConvolution") != std::string::npos);
+ ASSERT_TRUE(originalLayersNames.find("Add") != std::string::npos);
+ ASSERT_EQ(node->insData.size(), 1);
+ }
}
+ IE_SUPPRESS_DEPRECATED_END
}
- IE_SUPPRESS_DEPRECATED_END
fnPtr.reset();
};
#include <functional>
#include <ie_core.hpp>
+#include <ngraph/function.hpp>
+#include <exec_graph_info.hpp>
+#include <ngraph/variant.hpp>
#include "common_test_utils/common_utils.hpp"
#include "functional_test_utils/plugin_cache.hpp"
auto execNet = ie->LoadNetwork(cnnNet, targetDevice);
InferenceEngine::CNNNetwork execGraphInfo = execNet.GetExecGraphInfo();
- auto nodes = InferenceEngine::Serialization::TopologicalSort(execGraphInfo);
int numReorders = 0;
int expectedReorders = 2;
std::unordered_set<std::string> names;
- for (auto &node : nodes) {
- IE_SUPPRESS_DEPRECATED_START
- ASSERT_TRUE(names.find(node->name) == names.end()) << "Node with name " << node->name << "already exists";
- names.insert(node->name);
- if (node->type == "Reorder") {
- numReorders++;
+
+ if (auto function = execGraphInfo.getFunction()) {
+ for (const auto & op : function->get_ops()) {
+ ASSERT_TRUE(names.find(op->get_friendly_name()) == names.end()) <<
+ "Node with name " << op->get_friendly_name() << "already exists";
+ names.insert(op->get_friendly_name());
+
+ const auto & rtInfo = op->get_rt_info();
+ auto it = rtInfo.find(ExecGraphInfoSerialization::LAYER_TYPE);
+ ASSERT_NE(rtInfo.end(), it);
+ auto opType = std::dynamic_pointer_cast<ngraph::VariantImpl<std::string>>(it->second);
+ ASSERT_NE(nullptr, opType);
+
+ if (opType->get() == "Reorder") {
+ numReorders++;
+ }
+ }
+ } else {
+ auto nodes = InferenceEngine::Serialization::TopologicalSort(execGraphInfo);
+ for (auto &node : nodes) {
+ IE_SUPPRESS_DEPRECATED_START
+ ASSERT_TRUE(names.find(node->name) == names.end()) <<
+ "Node with name " << node->name << "already exists";
+ names.insert(node->name);
+ if (node->type == "Reorder") {
+ numReorders++;
+ }
+ IE_SUPPRESS_DEPRECATED_END
}
- IE_SUPPRESS_DEPRECATED_END
}
+
ASSERT_TRUE(numReorders == expectedReorders) << "Expected reorders: " << expectedReorders << ", actual reorders: " << numReorders;
fnPtr.reset();
//
#include "ngraph_conversion_tests/conv_bias_fusion.hpp"
+#include <ngraph/variant.hpp>
namespace NGraphConversionTestsDefinitions {
InferenceEngine::ExecutableNetwork exeNetwork = ie.LoadNetwork(network, device);
auto net = exeNetwork.GetExecGraphInfo();
- IE_SUPPRESS_DEPRECATED_START
- auto add_layer = net.getLayerByName(getOutputName().c_str());
- ASSERT_EQ(add_layer->params["originalLayersNames"], "add,conv");
- IE_SUPPRESS_DEPRECATED_END
+ if (auto function = net.getFunction()) {
+ for (const auto & op : function->get_ops()) {
+ if (op->get_friendly_name() == getOutputName()) {
+ auto rtInfo = op->get_rt_info();
+ auto it = rtInfo.find("originalLayersNames");
+ ASSERT_NE(rtInfo.end(), it);
+ auto variant = std::dynamic_pointer_cast<ngraph::VariantImpl<std::string>>(it->second);
+ ASSERT_NE(nullptr, variant);
+ ASSERT_EQ(variant->get(), "add,conv");
+ break;
+ }
+ }
+ } else {
+ IE_SUPPRESS_DEPRECATED_START
+ auto add_layer = net.getLayerByName(getOutputName().c_str());
+ ASSERT_EQ(add_layer->params["originalLayersNames"], "add,conv");
+ IE_SUPPRESS_DEPRECATED_END
+ }
}
} // namespace NGraphConversionTestsDefinitions
\ No newline at end of file
ASSERT_NO_THROW(testEnv->core.AddExtension(extension));
}
- Core ie;
- testEnv->network = ie.ReadNetwork(param.model_xml_str, param.weights_blob);
+ testEnv->network = testEnv->core.ReadNetwork(param.model_xml_str, param.weights_blob);
/* Call conversion from CNNNetwork NgraphImpl to CNNNetwork */
testEnv->network.begin();
full_config[PluginConfigParams::KEY_DUMP_EXEC_GRAPH_AS_DOT] = "behavior_tests_execution_graph_dump";
#endif
- ResponseDesc response;
-// ASSERT_NO_THROW(testEnv->exeNetwork = testEnv->core.LoadNetwork(testEnv->network, param.device, full_config));
try {
testEnv->exeNetwork = testEnv->core.LoadNetwork(testEnv->network, param.device, full_config);
} catch (InferenceEngineException ex) {