cdef class DataPtr:
cdef C.DataPtr _ptr
+ cdef C.IENetwork * _ptr_network
cdef class CDataPtr:
cdef C.CDataPtr _ptr
cdef class InputInfoPtr:
cdef InputInfo.Ptr _ptr
+ cdef C.IENetwork * _ptr_network
cdef class InputInfoCPtr:
cdef InputInfo.CPtr _ptr
def input_data(self):
cdef C.DataPtr c_data_ptr = deref(self._ptr).getInputData()
data_ptr = DataPtr()
+ data_ptr._ptr_network = self._ptr_network
data_ptr._ptr = c_data_ptr
return data_ptr
## This class is the layer data representation.
cdef class DataPtr:
+ ## Default constructor
+ def __init__(self):
+ self._ptr_network = NULL
+
## Name of the data object
@property
def name(self):
@property
def creator_layer(self):
- cdef C.CNNLayerWeakPtr _l_ptr = C.getCreatorLayer(self._ptr)
+ cdef C.CNNLayerWeakPtr _l_ptr
cdef IENetLayer creator_layer
+
+ if self._ptr_network != NULL:
+ deref(self._ptr_network).convertToOldRepresentation()
+ _l_ptr = C.getCreatorLayer(self._ptr)
+
creator_layer = IENetLayer()
if _l_ptr.lock() != NULL:
creator_layer._ptr = _l_ptr.lock()
@property
def input_to(self):
- cdef map[string, C.CNNLayerPtr] _l_ptr_map = C.getInputTo(self._ptr)
+ cdef map[string, C.CNNLayerPtr] _l_ptr_map
cdef IENetLayer input_to
+
+ if self._ptr_network != NULL:
+ deref(self._ptr_network).convertToOldRepresentation()
+ _l_ptr_map = C.getInputTo(self._ptr)
+
input_to_list = []
for layer in _l_ptr_map:
input_to = IENetLayer()
for input in c_inputs:
input_info_ptr = InputInfoPtr()
input_info_ptr._ptr = input.second
+ input_info_ptr._ptr_network = &self.impl
inputs[input.first.decode()] = input_info_ptr
return inputs
cdef DataPtr data_ptr
for input in c_inputs:
data_ptr = DataPtr()
+ data_ptr._ptr_network = &self.impl
data_ptr._ptr = input.second
inputs[input.first.decode()] = data_ptr
return inputs
cdef DataPtr data_ptr
for output in c_outputs:
data_ptr = DataPtr()
+ data_ptr._ptr_network = &self.impl
data_ptr._ptr = output.second
outputs[output.first.decode()] = data_ptr
return outputs
#include "hetero/hetero_plugin_config.hpp"
#include "ie_iinfer_request.hpp"
#include "details/ie_cnn_network_tools.h"
+#include "cnn_network_impl.hpp"
const std::string EXPORTED_NETWORK_NAME = "undefined";
std::map <std::string, InferenceEngine::Precision> precision_map = {{"FP32", InferenceEngine::Precision::FP32},
actual->serialize(path_to_xml, path_to_bin);
}
+void InferenceEnginePython::IENetwork::convertToOldRepresentation() {
+ if (actual->getFunction()) {
+ // convert to old representation
+ auto convertedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(*actual);
+ actual = std::make_shared<InferenceEngine::CNNNetwork>(convertedNetwork);
+ }
+}
+
const std::vector <InferenceEngine::CNNLayerPtr>
InferenceEnginePython::IENetwork::getLayers() {
+ convertToOldRepresentation();
IE_SUPPRESS_DEPRECATED_START
std::vector<InferenceEngine::CNNLayerPtr> result;
std::vector<InferenceEngine::CNNLayerPtr> sorted_layers = InferenceEngine::details::CNNNetSortTopologically(*actual);
IENetwork(PyObject* network);
IENetwork() = default;
+
+ void convertToOldRepresentation();
};
ctypedef weak_ptr[Data] DataWeakPtr
ctypedef shared_ptr[const Data] CDataPtr
-
cdef cppclass InputInfo:
ctypedef shared_ptr[InputInfo] Ptr
ctypedef shared_ptr[const InputInfo] CPtr
void reshape(map[string, vector[size_t]] input_shapes) except +
void load_from_buffer(const char*xml, size_t xml_size, uint8_t*bin, size_t bin_size) except +
object getFunction() except +
+ void convertToOldRepresentation() except +
cdef cppclass InferRequestWrap:
double exec_time;
assert len(input_to) == 1
assert input_to[0].name == '27'
+def test_input_to_via_input_info():
+ ie = IECore()
+ net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+ input_infos = net.input_info
+ assert len(input_infos) == 1
+ input_to = input_infos['data'].input_data.input_to
+ assert len(input_to) == 1
+ assert input_to[0].name == '19/Fused_Add_'
+
+def test_input_to_via_inputs():
+ ie = IECore()
+ net = ie.read_network(model=test_net_xml, weights=test_net_bin)
+ inputs = net.inputs
+ assert len(inputs) == 1
+ input_to = inputs['data'].input_to
+ assert len(input_to) == 1
+ assert input_to[0].name == '19/Fused_Add_'
+
def test_creator_layer():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
assert recwarn.pop(DeprecationWarning)
-def test_affinuty_getter():
+def test_affinity_getter():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
assert net.layers['27'].affinity == ""
// Verify device id
GetDeviceInfo(config);
+ if (network.getFunction()) {
+ THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str << " ngraph::Function is not supported natively";
+ }
+
std::vector<CNNLayerPtr> sortedLayers = CNNNetSortTopologically(network);
for (auto layer : sortedLayers) {
if (CaselessEq<std::string>()(layer->type, "DetectionOutput")) {
#include <map>
#include <vector>
-#include <net_pass.h>
#include <cpp_interfaces/impl/ie_executable_network_thread_safe_default.hpp>
#include "gna_infer_request.hpp"
#include "gna_plugin.hpp"
GNAExecutableNetwork(InferenceEngine::ICNNNetwork &network, std::shared_ptr<GNAPlugin> plg)
: plg(plg) {
- InferenceEngine::NetPass::ConvertPrecision(network, InferenceEngine::Precision::I64, InferenceEngine::Precision::I32);
- InferenceEngine::NetPass::ConvertPrecision(network, InferenceEngine::Precision::U64, InferenceEngine::Precision::I32);
- InferenceEngine::NetPass::ConvertPrecision(network, InferenceEngine::Precision::U32, InferenceEngine::Precision::I32);
plg->LoadNetwork(network);
}
#include <low_precision_transformations/blob_transformation.hpp>
#include <graph_tools.hpp>
+#include <net_pass.h>
#include <debug.h>
#include <gna/gna_config.hpp>
#include "gna_plugin_config.hpp"
graphCompiler.setGNAMemoryPtr(gnamem);
}
-void GNAPlugin::LoadNetwork(ICNNNetwork &network) {
+void GNAPlugin::LoadNetwork(ICNNNetwork & _network) {
+ std::shared_ptr<InferenceEngine::details::CNNNetworkImpl> convertedNetwork;
+ if (_network.getFunction()) {
+ convertedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(_network);
+ }
+ InferenceEngine::ICNNNetwork &network = convertedNetwork ? *convertedNetwork : _network;
+
+ NetPass::ConvertPrecision(network, Precision::I64, Precision::I32);
+ NetPass::ConvertPrecision(network, Precision::U64, Precision::I32);
+ NetPass::ConvertPrecision(network, Precision::U32, Precision::I32);
+
// move blobs from Constant layers to Convolution, Deconvolution, FullyConnected layers attributes
BlobTransformation blobsTransformation;
blobsTransformation.transform(network, true);
void GNAPlugin::QueryNetwork(const InferenceEngine::ICNNNetwork& network,
const std::map<std::string, std::string>& config,
InferenceEngine::QueryNetworkResult& res) const {
+ if (network.getFunction()) {
+ THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str << " ngraph::Function is not supported natively";
+ }
+
std::unordered_set<CNNLayer *> allLayers;
InferenceEngine::InputsDataMap inputs;
res.supportedLayersMap.insert({ layer->name, GetName() });
}
}, false);
- }
+}
updated_config.UpdateFromMap(config);
auto plg = std::make_shared<GNAPlugin>(updated_config.key_config_map);
plgPtr = plg;
- return std::make_shared<GNAExecutableNetwork>(*cloneNet(network), plg);
+ return std::make_shared<GNAExecutableNetwork>(*cloneNetwork(network), plg);
}
void SetConfig(const std::map<std::string, std::string> &config) override {
std::unordered_set<std::string> devices;
NodeMap<std::string> affinities;
// Check that all nodes has user or plugin defined affinities
+ std::shared_ptr<InferenceEngine::details::CNNNetworkImpl> convertedNetwork;
for (auto&& node : orderedOps) {
auto itAffinity = queryNetworkResult.supportedLayersMap.find(node->get_friendly_name());
if (itAffinity != queryNetworkResult.supportedLayersMap.end()) {
affinities[node.get()] = itAffinity->second;
if (dumpDotFile) {
devices.insert(itAffinity->second);
- for (details::CNNNetworkIterator el(&network); el != details::CNNNetworkIterator(); el++) {
+ convertedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(network);
+ for (details::CNNNetworkIterator el(convertedNetwork.get()); el != details::CNNNetworkIterator(); el++) {
CNNLayer::Ptr layer = *el;
if (CaselessEq<std::string>()(layer->name, node->get_friendly_name())) {
layer->affinity = itAffinity->second;
if (dumpDotFile) {
std::ofstream ofstream{"hetero_affinity_" + _name + ".dot"};
- saveGraphToDot(network, ofstream, HeteroLayerColorer{{devices.begin(), devices.end()}});
+ saveGraphToDot(*convertedNetwork, ofstream, HeteroLayerColorer{{devices.begin(), devices.end()}});
}
NodeMap<InputSet> nodeInputDependencies;
}
if (dumpDotFile) {
std::ofstream ofstream{"hetero_subgraphs_" + _name + ".dot"};
- dumpGraph(network, subFunctions, ofstream);
+ dumpGraph(*convertedNetwork, subFunctions, ofstream);
}
for (auto&& network : networks) {
auto cfg = _config;
}
auto subnetworksNode = heteroNode.append_child("subnetworks");
+ std::map<std::shared_ptr<const ngraph::Function>, ::CNNNetwork> convertedNetworks;
for (auto&& subnetwork : networks) {
+ auto subnet = subnetwork._clonedNetwork;
+ if (subnet.getFunction()) {
+ subnet = convertedNetworks[subnet.getFunction()] =
+ InferenceEngine::CNNNetwork(
+ std::make_shared<InferenceEngine::details::CNNNetworkImpl>(subnetwork._clonedNetwork));
+ }
auto subnetworkNode = subnetworksNode.append_child("subnetwork");
subnetworkNode.append_attribute("device").set_value(subnetwork._device.c_str());
auto subnetworkInputsNode = subnetworkNode.append_child("inputs");
- auto inputInfo = subnetwork._clonedNetwork.getInputsInfo();
+ auto inputInfo = subnet.getInputsInfo();
for (auto&& input : inputInfo) {
auto inputNode = subnetworkInputsNode.append_child("input");
inputNode.append_attribute("name").set_value(input.first.c_str());
inputNode.append_attribute("precision").set_value(input.second->getPrecision().name());
}
auto subnetworkOutputsNode = subnetworkNode.append_child("outputs");
- auto outputInfo = subnetwork._clonedNetwork.getOutputsInfo();
+ auto outputInfo = subnet.getOutputsInfo();
for (auto&& output : outputInfo) {
auto outputNode = subnetworkOutputsNode.append_child("output");
auto creator = getCreatorLayer(output.second).lock();
for (auto&& subnetwork : networks) {
try {
subnetwork._network.Export(heteroModel);
- } catch(InferenceEngine::details::InferenceEngineException& ie_ex) {
+ } catch (InferenceEngine::details::InferenceEngineException& ie_ex) {
if (std::string::npos != std::string{ie_ex.what()}.find(NOT_IMPLEMENTED_str)) {
pugi::xml_document doc;
- auto dataSize = static_cast<std::uint64_t>(InferenceEngine::Serialization::FillXmlDoc(subnetwork._clonedNetwork, doc));
+ auto subnet = subnetwork._clonedNetwork;
+ if (subnet.getFunction()) {
+ subnet = convertedNetworks[subnet.getFunction()];
+ }
+ auto dataSize = static_cast<std::uint64_t>(InferenceEngine::Serialization::FillXmlDoc(subnet, doc));
doc.save(heteroModel, nullptr, pugi::format_raw);
heteroModel << std::endl;
heteroModel.write(reinterpret_cast<char*>(&dataSize), sizeof(dataSize));
- InferenceEngine::Serialization::SerializeBlobs(heteroModel, subnetwork._clonedNetwork);
+ InferenceEngine::Serialization::SerializeBlobs(heteroModel, subnet);
} else {
throw;
}
}
DeviceMetaInformationMap metaDevices = GetDevicePlugins(it->second, tconfig);
- if (auto function = network.getFunction()) {
- auto anyDeviceDoNotSupportNgraph =
- std::any_of(std::begin(metaDevices), std::end(metaDevices),
- [&] (const DeviceMetaInformationMap::value_type& metaDevice) {
+ if (network.getFunction()) {
+ auto allSupportsNgraph =
+ std::all_of(std::begin(metaDevices), std::end(metaDevices),
+ [&] (const DeviceMetaInformationMap::value_type& metaDevice) -> bool {
auto& deviceName = metaDevice.first;
auto clonedNetwork = cloneNetwork(network);
- GetCore()->QueryNetwork(*clonedNetwork, deviceName, metaDevice.second);
- return (clonedNetwork->getFunction() == nullptr);
+ try { GetCore()->QueryNetwork(network, deviceName, metaDevice.second); }
+ catch (const InferenceEngine::details::InferenceEngineException & ex) {
+ std::string message = ex.what();
+ return message.find(NOT_IMPLEMENTED_str) == std::string::npos;
+ }
+ return true;
});
- if (anyDeviceDoNotSupportNgraph) {
+ if (!allSupportsNgraph) {
auto cnnNetworkImpl = std::make_shared<details::CNNNetworkImpl>(network);
return std::make_shared<HeteroExecutableNetwork>(
- *cnnNetworkImpl,
- mergeConfigs(_config, config), this);
+ *cnnNetworkImpl, mergeConfigs(_config, config), this);
} else {
return std::make_shared<HeteroExecutableNetwork>(*cloneNetwork(network), mergeConfigs(_config, config), this);
}
DeviceMetaInformationMap metaDevices = GetDevicePlugins(fallbackDevicesStr, tconfig);
std::map<std::string, QueryNetworkResult> queryResults;
- // go over devices and call query network
- for (auto&& metaDevice : metaDevices) {
- auto& deviceName = metaDevice.first;
- auto clonedNetwork = cloneNetwork(network);
- queryResults[deviceName] = GetCore()->QueryNetwork(*clonedNetwork, deviceName, metaDevice.second);
+ auto queryNetwork = [&] (const InferenceEngine::ICNNNetwork & networkObject) {
+ // go over devices and call query network
+ for (auto&& metaDevice : metaDevices) {
+ auto& deviceName = metaDevice.first;
+ auto clonedNetwork = cloneNetwork(networkObject);
+ queryResults[deviceName] = GetCore()->QueryNetwork(*clonedNetwork, deviceName, metaDevice.second);
+ }
+ return queryResults;
+ };
+
+ if (network.getFunction()) {
+ auto allSupportsNgraph =
+ std::all_of(std::begin(metaDevices), std::end(metaDevices),
+ [&] (const DeviceMetaInformationMap::value_type& metaDevice) -> bool {
+ auto& deviceName = metaDevice.first;
+ auto clonedNetwork = cloneNetwork(network);
+ try { GetCore()->QueryNetwork(*clonedNetwork, deviceName, metaDevice.second); }
+ catch (const InferenceEngine::details::InferenceEngineException & ex) {
+ std::string message = ex.what();
+ return message.find(NOT_IMPLEMENTED_str) == std::string::npos;
+ }
+ return true;
+ });
+ if (!allSupportsNgraph) {
+ auto cnnNetworkImpl = std::make_shared<details::CNNNetworkImpl>(network);
+ queryNetwork(*cnnNetworkImpl);
+ } else {
+ queryNetwork(network);
+ }
+ } else {
+ queryNetwork(network);
}
// WARNING: Here is devices with user set priority
}
std::map<std::string, CNNLayerPtr>& InferenceEngine::getInputTo(const DataPtr & data) {
- if (auto ndata = std::dynamic_pointer_cast<details::NGraphData>(data)) {
- return ndata->getInputTo();
- } else {
- return data->_impl->inputTo;
- }
+ return data->_impl->inputTo;
}
std::map<std::string, CNNLayerPtr>& InferenceEngine::getInputTo(Data * data) {
- if (auto ndata = dynamic_cast<details::NGraphData *>(data)) {
- return ndata->getInputTo();
- } else {
- return data->_impl->inputTo;
- }
+ return data->_impl->inputTo;
}
CNNLayerWeakPtr& details::NGraphData::getCreatorLayer() {
- if (_impl->creatorLayer.lock() == nullptr && network != nullptr) {
- network->convertToCNNNetworkImpl();
- }
return _impl->creatorLayer;
}
std::map<std::string, CNNLayerPtr>& details::NGraphData::getInputTo() {
- if (_impl->inputTo.empty() && network != nullptr) {
- network->convertToCNNNetworkImpl();
- }
-
return _impl->inputTo;
}
#include <cnn_network_ngraph_impl.hpp>
#include <precision_utils.h>
#include <cpp/ie_cnn_network.h>
+#include <cnn_network_impl.hpp>
#include <limits>
#include <cmath>
std::map<std::string, std::vector<TensorDesc>> layer_name_to_tensor_desc;
{
auto tiBody = std::make_shared<details::TINGraphBody>(std::make_shared<ngraph::Function>(results, parameters));
- CNNNetwork net(tiBody);
+ CNNNetwork ngraphNet(tiBody);
+ CNNNetwork net(std::make_shared<InferenceEngine::details::CNNNetworkImpl>(ngraphNet));
// Paranoid check for cycles
bool res = CNNNetForestDFS(
CNNNetGetAllInputLayers(net), [](const CNNLayerPtr& layer) {}, false);
DeviceMap<DeviceInformation> metaDevices = ParseMetaDevices(priorities->second, fullConfig);
std::unordered_set<std::string> supportedLayers;
+
+ auto allSupportsNgraph =
+ std::all_of(std::begin(metaDevices), std::end(metaDevices),
+ [&] (const DeviceMap<DeviceInformation>::value_type & value) -> bool {
+ auto& deviceName = value.first;
+ auto& metaDevice = value.second;
+ auto clonedNetwork = cloneNetwork(network);
+ try { GetCore()->QueryNetwork(*clonedNetwork, deviceName, metaDevice.config); }
+ catch (const InferenceEngine::details::InferenceEngineException & ex) {
+ std::string message = ex.what();
+ return message.find(NOT_IMPLEMENTED_str) == std::string::npos;
+ }
+ return true;
+ });
+
for (auto&& value : metaDevices) {
auto& deviceName = value.first;
auto& metaDevice = value.second;
- auto clonedNetwork = cloneNetwork(network);
- auto deviceQr = GetCore()->QueryNetwork(*clonedNetwork, deviceName, metaDevice.config);
- std::unordered_set<std::string> deviceSupportedLayers;
- for (auto&& layerQr : deviceQr.supportedLayersMap) {
- deviceSupportedLayers.emplace(layerQr.first);
+
+ auto queryNetwork = [&] (const InferenceEngine::ICNNNetwork & networkObject) {
+ auto clonedNetwork = cloneNetwork(networkObject);
+ auto deviceQr = GetCore()->QueryNetwork(*clonedNetwork, deviceName, metaDevice.config);
+ std::unordered_set<std::string> deviceSupportedLayers;
+ for (auto&& layerQr : deviceQr.supportedLayersMap) {
+ deviceSupportedLayers.emplace(layerQr.first);
+ }
+ supportedLayers = supportedLayers.empty()
+ ? deviceSupportedLayers : (deviceSupportedLayers.empty()
+ ? supportedLayers : Intersection(supportedLayers, deviceSupportedLayers));
+ };
+
+ if (network.getFunction()) {
+ if (!allSupportsNgraph) {
+ auto cnnNetworkImpl = std::make_shared<details::CNNNetworkImpl>(network);
+ queryNetwork(*cnnNetworkImpl);
+ } else {
+ queryNetwork(network);
+ }
+ } else {
+ queryNetwork(network);
}
- supportedLayers = supportedLayers.empty()
- ? deviceSupportedLayers : (deviceSupportedLayers.empty()
- ? supportedLayers : Intersection(supportedLayers, deviceSupportedLayers));
}
+
for (auto&& supportedLayer : supportedLayers) {
queryResult.supportedLayersMap[supportedLayer] = GetName();
}
VPU_THROW_UNLESS(!(std::find(deviceIDs.begin(), deviceIDs.end(), deviceName) == deviceIDs.end()), "Myriad device: {} not found.", deviceName);
}
+ if (network.getFunction()) {
+ THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str << " ngraph::Function is not supported natively";
+ }
+
const auto log = std::make_shared<Logger>(
"GraphCompiler",
parsedConfigCopy.logLevel(),
InferenceEngine::CNNNetwork cnnNet(ngraph);
// convert to old representation
- getCreatorLayer(cnnNet.getInputsInfo().begin()->second->getInputData());
- cnnNet.addOutput(testLayerName);
+ InferenceEngine::CNNNetwork convertedNetwork(std::make_shared<details::CNNNetworkImpl>(cnnNet));
+ convertedNetwork.addOutput(testLayerName);
InferenceEngine::OutputsDataMap outs = cnnNet.getOutputsInfo();
- ASSERT_EQ(2, outs.size());
+ InferenceEngine::OutputsDataMap convertedOuts = convertedNetwork.getOutputsInfo();
+ ASSERT_EQ(1, outs.size());
+ ASSERT_EQ(2, convertedOuts.size());
}
TEST(CNNNGraphImplTests, TestSetCurrentBatch) {
}
InferenceEngine::CNNNetwork cnnNet(ngraph);
- auto cnnLayer = CommonTestUtils::getLayerByName(cnnNet, "testReLU");
+ auto convertedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(cnnNet);
+ auto cnnLayer = CommonTestUtils::getLayerByName(convertedNetwork.get(), "testReLU");
ASSERT_NE(nullptr, cnnLayer);
ASSERT_EQ(cnnLayer->affinity, testAffinity);
}
ASSERT_NE(nullptr, cnnNet.getFunction());
ASSERT_EQ(5, cnnNet.layerCount());
// convert to old representation
- getCreatorLayer(cnnNet.getInputsInfo().begin()->second->getInputData());
- auto outputs = cnnNet.getOutputsInfo();
+ InferenceEngine::CNNNetwork convertedNetwork(std::make_shared<InferenceEngine::details::CNNNetworkImpl>(cnnNet));
+ auto outputs = convertedNetwork.getOutputsInfo();
ASSERT_EQ(2, outputs.size());
ASSERT_TRUE(outputs.find("relu2") != outputs.end());
ASSERT_TRUE(outputs.find(testLayerName) != outputs.end());
}
InferenceEngine::CNNNetwork cnnNet(ngraph);
- // convert to old representation
- getCreatorLayer(cnnNet.getInputsInfo().begin()->second->getInputData());
- ASSERT_EQ(4, cnnNet.layerCount());
+ auto convertedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(cnnNet);
+ ASSERT_EQ(4, convertedNetwork->layerCount());
}
TEST(CNNNGraphImplTests, SaveInputInfoAfterConversion) {
auto network = ie.ReadNetwork(model, weights);
auto inputInfo = network.getInputsInfo();
+ auto convertedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(network);
auto cnnLayer = getCreatorLayer(inputInfo.begin()->second->getInputData()).lock();
- ASSERT_TRUE(cnnLayer);
+ ASSERT_NE(nullptr, cnnLayer);
ASSERT_NE(cnnLayer->params.find("PrimitivesPriority"), cnnLayer->params.end());
ASSERT_EQ("cpu:avx2", cnnLayer->params["PrimitivesPriority"]);
}
inputsInfo.at("input")->setPrecision(Precision::FP16);
}
+ InferenceEngine::CNNNetwork convertedNetwork;
{
SCOPED_TRACE("Convert to old format");
// convert to old representation
- getCreatorLayer(cnnNet.getInputsInfo().begin()->second->getInputData());
+ convertedNetwork = InferenceEngine::CNNNetwork(
+ std::make_shared<InferenceEngine::details::CNNNetworkImpl>(cnnNet));
}
{
SCOPED_TRACE("After conversion");
- const auto inputsInfo = cnnNet.getInputsInfo();
+ const auto inputsInfo = convertedNetwork.getInputsInfo();
ASSERT_EQ(inputsInfo.at("input")->getPrecision(), Precision::FP16)
<< "Manually set presision should be left unchanged";
inputsInfo.at("input")->setLayout(Layout::NHWC);
}
+ InferenceEngine::CNNNetwork convertedNetwork;
{
SCOPED_TRACE("Convert to old format");
// convert to old representation
- getCreatorLayer(cnnNet.getInputsInfo().begin()->second->getInputData());
+ convertedNetwork = InferenceEngine::CNNNetwork(
+ std::make_shared<InferenceEngine::details::CNNNetworkImpl>(cnnNet));
}
{
SCOPED_TRACE("After conversion");
- const auto inputsInfo = cnnNet.getInputsInfo();
+ const auto inputsInfo = convertedNetwork.getInputsInfo();
ASSERT_EQ(inputsInfo.at("input")->getLayout(), Layout::NHWC)
<< "Manually set layout should be left unchanged";
outputsInfo.at("output")->setPrecision(Precision::FP16);
}
+ InferenceEngine::CNNNetwork convertedNetwork;
{
SCOPED_TRACE("Convert to old format");
// convert to old representation
- getCreatorLayer(cnnNet.getInputsInfo().begin()->second->getInputData());
+ convertedNetwork = InferenceEngine::CNNNetwork(
+ std::make_shared<InferenceEngine::details::CNNNetworkImpl>(cnnNet));
}
{
SCOPED_TRACE("After conversion");
- const auto outputsInfo = cnnNet.getOutputsInfo();
+ const auto outputsInfo = convertedNetwork.getOutputsInfo();
ASSERT_EQ(outputsInfo.at("output")->getPrecision(), Precision::FP16)
<< "Manually set presision should be left unchanged";
outputsInfo.at("output")->setLayout(Layout::NHWC);
}
+ InferenceEngine::CNNNetwork convertedNetwork;
{
SCOPED_TRACE("Convert to old format");
// convert to old representation
- getCreatorLayer(cnnNet.getInputsInfo().begin()->second->getInputData());
+ convertedNetwork = InferenceEngine::CNNNetwork(
+ std::make_shared<InferenceEngine::details::CNNNetworkImpl>(cnnNet));
}
{
SCOPED_TRACE("After conversion");
- const auto outputsInfo = cnnNet.getOutputsInfo();
+ const auto outputsInfo = convertedNetwork.getOutputsInfo();
ASSERT_EQ(outputsInfo.at("output")->getLayout(), Layout::NHWC)
<< "Manually set layout should be left unchanged";
void compareWithRef(const InferenceEngine::CNNNetwork &network,
const std::vector<InferenceEngine::CNNLayerPtr> &refLayersVec) {
IE_SUPPRESS_DEPRECATED_START
- ASSERT_NO_THROW(FuncTestUtils::compareLayerByLayer<std::vector<InferenceEngine::CNNLayerPtr>>(
- InferenceEngine::details::CNNNetSortTopologically(network),
+ auto convertedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(network);
+ ASSERT_NO_THROW(FuncTestUtils::compareLayerByLayer(
+ InferenceEngine::details::CNNNetSortTopologically(*convertedNetwork),
refLayersVec, false));
IE_SUPPRESS_DEPRECATED_END
}
{
IE_SUPPRESS_DEPRECATED_START
// convert to old representation
- getCreatorLayer(originalNetwork.getInputsInfo().begin()->second->getInputData());
+ originalNetwork = InferenceEngine::CNNNetwork(
+ std::make_shared<InferenceEngine::details::CNNNetworkImpl>(originalNetwork));
IE_SUPPRESS_DEPRECATED_END
}
originalNetwork.getInputsInfo().begin()->second->setPrecision(_netPrc);
TEST_P(CNNNetworkSerializerTest, TopoSortResultUnique) {
InferenceEngine::CNNNetwork network(ngraph::builder::subgraph::makeConvPoolRelu());
- auto sorted = InferenceEngine::Serialization::TopologicalSort(network);
+ auto convertedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(network);
+ auto sorted = InferenceEngine::Serialization::TopologicalSort(*convertedNetwork);
std::vector<std::string> actualLayerNames;
for (auto&& layer : sorted) {
IE_SUPPRESS_DEPRECATED_END
}
- std::vector<std::string> expectedLayerNames = {
- "Param_1", "Const_1", "Reshape_1", "Conv_1", "Pool_1", "Relu_1", "Const_2", "Reshape_2"
+ const std::vector<std::string> expectedLayerNames = {
+ "Param_1", "Const_1", "Reshape_1", "Conv_1",
+ "Pool_1", "Relu_1", "Const_2", "Reshape_2"
};
ASSERT_EQ(expectedLayerNames, actualLayerNames);
IE_SUPPRESS_DEPRECATED_START
// convert to old representation
- getCreatorLayer(cnn.getInputsInfo().begin()->second->getInputData());
+ auto convertedNetwork = std::make_shared<details::CNNNetworkImpl>(cnn);
+ (void)convertedNetwork;
IE_SUPPRESS_DEPRECATED_END
}
auto network = ie.ReadNetwork(modelV10, weights);
auto cnnNetwork = ie.ReadNetwork(oldModel, weights);
- FuncTestUtils::compareCNNNetworks(network, cnnNetwork, false);
IE_SUPPRESS_DEPRECATED_START
- for (auto it = details::CNNNetworkIterator(network); it != details::CNNNetworkIterator(); it++) {
+ auto convertedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(network);
+
+ FuncTestUtils::compareCNNNetworks(InferenceEngine::CNNNetwork(convertedNetwork), cnnNetwork, false);
+
+ for (auto it = details::CNNNetworkIterator(convertedNetwork.get()); it != details::CNNNetworkIterator(); it++) {
InferenceEngine::CNNLayerPtr layer = *it;
ASSERT_NE(nullptr, layer->getNode());
}
+ ASSERT_EQ(nullptr, cnnNetwork.getFunction());
for (auto it = details::CNNNetworkIterator(cnnNetwork); it != details::CNNNetworkIterator(); it++) {
InferenceEngine::CNNLayerPtr layer = *it;
ASSERT_EQ(nullptr, layer->getNode());
IE_SUPPRESS_DEPRECATED_START
// convert to old representation
- getCreatorLayer(cnn.getInputsInfo().begin()->second->getInputData());
+ auto convertedNetwork = std::make_shared<details::CNNNetworkImpl>(cnn);
+ (void)convertedNetwork;
IE_SUPPRESS_DEPRECATED_END
}
SizeVector outDims = output["activation"]->getTensorDesc().getDims();
ASSERT_EQ(outDims, refAfterReshape);
// Convert to CNNNetwork
- auto layer = CommonTestUtils::getLayerByName(network, "activation");
+ auto convertedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(network);
+ auto layer = CommonTestUtils::getLayerByName(convertedNetwork.get(), "activation");
ASSERT_EQ("CustomTestLayer", layer->type);
}
SizeVector outDims = output["activation"]->getTensorDesc().getDims();
ASSERT_EQ(outDims, refAfterReshape);
// Convert to CNNNetwork
- auto layer = CommonTestUtils::getLayerByName(network, "activation");
+ auto convertedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(network);
+ auto layer = CommonTestUtils::getLayerByName(convertedNetwork.get(), "activation");
ASSERT_EQ("CustomTestLayer", layer->type);
ASSERT_EQ("false", layer->params["test1"]);
ASSERT_EQ("3", layer->params["test2"]);
}
InferenceEngine::CNNNetwork LayerTransformation::transform(InferenceEngine::details::LayerTransformation::Params& params) {
- auto net1 = InferenceEngine::CNNNetwork(function);
- std::shared_ptr<InferenceEngine::ICNNNetwork> clonedNetwork = InferenceEngine::cloneNetwork(net1);
+ auto ngraphNetwork = InferenceEngine::CNNNetwork(function);
+ std::shared_ptr<InferenceEngine::ICNNNetwork> clonedNetwork = InferenceEngine::cloneNetwork(ngraphNetwork);
if (clonedNetwork->getFunction()) {
const auto transformations_callback = [](const std::shared_ptr<const ::ngraph::Node> &node) -> bool {
}
InferenceEngine::CNNNetwork LayerTransformation::transform(const InferenceEngine::details::LowPrecisionTransformations& transformations) {
- InferenceEngine::details::CNNNetworkImplPtr cnnNetworkImp = cloneNet(InferenceEngine::CNNNetwork(function));
+ // convert to old representation
+ InferenceEngine::CNNNetwork ngraphNetwork(function);
+ auto cnnNetworkImp = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(ngraphNetwork);
InferenceEngine::NetPass::ConvertPrecision(*cnnNetworkImp, InferenceEngine::Precision::I64, InferenceEngine::Precision::I32);
InferenceEngine::NetPass::ConvertPrecision(*cnnNetworkImp, InferenceEngine::Precision::U64, InferenceEngine::Precision::I32);
}
InferenceEngine::CNNNetwork LayerTransformation::transform(InferenceEngine::details::LayerTransformation::Params& params) {
- auto net1 = InferenceEngine::CNNNetwork(function);
- std::shared_ptr<InferenceEngine::ICNNNetwork> clonedNetwork = InferenceEngine::cloneNetwork(net1);
+ auto ngraphNetwork = InferenceEngine::CNNNetwork(function);
+ std::shared_ptr<InferenceEngine::ICNNNetwork> clonedNetwork = InferenceEngine::cloneNetwork(ngraphNetwork);
if (clonedNetwork->getFunction()) {
const auto transformations_callback = [](const std::shared_ptr<const ::ngraph::Node> &node) -> bool {
}
InferenceEngine::CNNNetwork LayerTransformation::transform(const InferenceEngine::details::LowPrecisionTransformations& transformations) {
- InferenceEngine::details::CNNNetworkImplPtr cnnNetworkImp = cloneNet(InferenceEngine::CNNNetwork(function));
+ // convert to old representation
+ InferenceEngine::CNNNetwork ngraphNetwork(function);
+ auto cnnNetworkImp = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(ngraphNetwork);
InferenceEngine::NetPass::ConvertPrecision(*cnnNetworkImp, InferenceEngine::Precision::FP16, InferenceEngine::Precision::FP32);
#include "ngraph/function.hpp"
#include "cpp/ie_cnn_network.h"
+#include "cnn_network_impl.hpp"
#include "ie_common.h"
#include "common_test_utils/test_common.hpp"
private:
void triggerConversionToCNNNetwork() {
- // convert to old representation
- getCreatorLayer(cnnNetwork.getInputsInfo().begin()->second->getInputData());
+ cnnNetwork = InferenceEngine::CNNNetwork(
+ std::make_shared<InferenceEngine::details::CNNNetworkImpl>(cnnNetwork));
}
static const char s_FriendlyName[];
#include <ie_plugin_config.hpp>
#include <memory>
#include <fstream>
+#include <ngraph/variant.hpp>
#include <hetero/hetero_plugin_config.hpp>
#include <graph_tools.hpp>
#include <functional_test_utils/plugin_cache.hpp>
#include <functional_test_utils/skip_tests_config.hpp>
#include <common_test_utils/common_utils.hpp>
+#include <common_test_utils/test_assertions.hpp>
#ifdef ENABLE_UNICODE_PATH_SUPPORT
#include <iostream>
}
}
void setHeteroNetworkAffinity(const std::string& targetDevice) {
- InferenceEngine::InputsDataMap networkInputs = actualNetwork.getInputsInfo();
-
- CNNLayerPtr layer;
- for (auto input : networkInputs) {
- InputInfo::Ptr q = input.second;
- DataPtr p = q->getInputData();
- layer = getInputTo(p).begin()->second;
- }
-
- std::map<std::string, std::string> deviceMapping = {
- {"Convololution_4", targetDevice},
- {"Convololution_7", CommonTestUtils::DEVICE_CPU},
+ const std::map<std::string, std::string> deviceMapping = {
+ {"Split_2", targetDevice},
+ {"Convolution_4", targetDevice},
+ {"Convolution_7", CommonTestUtils::DEVICE_CPU},
{"Relu_5", CommonTestUtils::DEVICE_CPU},
{"Relu_8", targetDevice},
{"Concat_9", CommonTestUtils::DEVICE_CPU}
};
- CNNNetDFS(layer, [&](const CNNLayerPtr &layer) {
- IE_SUPPRESS_DEPRECATED_START
- auto it = deviceMapping.find(layer->name);
+ for (const auto & op : actualNetwork.getFunction()->get_ops()) {
+ auto it = deviceMapping.find(op->get_friendly_name());
if (it != deviceMapping.end()) {
- layer->affinity = it->second;
- } else {
- layer->affinity = CommonTestUtils::DEVICE_CPU;
+ std::string affinity = it->second;
+ op->get_rt_info()["affinity"] = std::make_shared<ngraph::VariantWrapper<std::string>>(affinity);
}
- IE_SUPPRESS_DEPRECATED_END
- });
+ }
}
};
//
// QueryNetwork
//
+
TEST_P(IEClassNetworkTestP, QueryNetworkActualThrows) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
TEST_P(IEClassNetworkTestP, QueryNetworkActualNoThrow) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Core ie;
- ASSERT_NO_THROW(ie.QueryNetwork(actualNetwork, deviceName));
+
+ try {
+ ie.QueryNetwork(actualNetwork, deviceName);
+ } catch (const InferenceEngine::details::InferenceEngineException & ex) {
+ std::string message = ex.what();
+ ASSERT_STR_CONTAINS(message, "[NOT_IMPLEMENTED] ngraph::Function is not supported natively");
+ }
}
TEST_P(IEClassNetworkTestP, QueryNetworkHeteroActualNoThrow) {
Core ie;
if (supportsDeviceID(ie, deviceName)) {
- ASSERT_NO_THROW(ie.QueryNetwork(simpleNetwork, deviceName + ".0"));
+ try {
+ ie.QueryNetwork(simpleNetwork, deviceName + ".0");
+ } catch (const InferenceEngine::details::InferenceEngineException & ex) {
+ std::string message = ex.what();
+ ASSERT_STR_CONTAINS(message, "[NOT_IMPLEMENTED] ngraph::Function is not supported natively");
+ }
} else {
GTEST_SKIP();
}
{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), devices},
{"TARGET_FALLBACK", targetFallback}}));
+ auto convertedActualNetwork = std::make_shared<details::CNNNetworkImpl>(actualNetwork);
for (auto &&layer : result.supportedLayersMap) {
-// IE_SUPPRESS_DEPRECATED_START
- EXPECT_NO_THROW(CommonTestUtils::getLayerByName(actualNetwork, layer.first));
-// IE_SUPPRESS_DEPRECATED_END
+ EXPECT_NO_THROW(CommonTestUtils::getLayerByName(convertedActualNetwork.get(), layer.first));
}
} else {
GTEST_SKIP();
{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), devices},
{"TARGET_FALLBACK", deviceName + "," + CommonTestUtils::DEVICE_CPU}}));
+ auto convertedActualNetwork = std::make_shared<details::CNNNetworkImpl>(actualNetwork);
for (auto &&layer : result.supportedLayersMap) {
- IE_SUPPRESS_DEPRECATED_START
- EXPECT_NO_THROW(CommonTestUtils::getLayerByName(actualNetwork, layer.first));
- IE_SUPPRESS_DEPRECATED_END
+ EXPECT_NO_THROW(CommonTestUtils::getLayerByName(convertedActualNetwork.get(), layer.first));
}
} else {
GTEST_SKIP();
return l->name < r->name;
});
- compareLayerByLayer<std::vector<InferenceEngine::CNNLayerPtr>>(nodes_new, nodes_old, sameNetVersions);
+ compareLayerByLayer(nodes_new, nodes_old, sameNetVersions);
auto get_map = [](
const std::vector<InferenceEngine::DataPtr> &data) -> std::map<std::string, InferenceEngine::DataPtr> {
THROW_IE_EXCEPTION << "CNNNetworks have different batch size! " << std::to_string(network.getBatchSize())
<< " and " << std::to_string(refNetwork.getBatchSize());
- compareLayerByLayer<InferenceEngine::CNNNetwork>(network, refNetwork, sameNetVersions);
+ compareLayerByLayer(network, refNetwork, sameNetVersions);
InferenceEngine::InputsDataMap newInput = network.getInputsInfo();
InferenceEngine::InputsDataMap oldInput = refNetwork.getInputsInfo();
InferenceEngine::OutputsDataMap newOutput = network.getOutputsInfo();
compareInfo<InferenceEngine::OutputsDataMap>(newOutput, oldOutput, "CNNNetworks have different outputs!");
}
+IE_SUPPRESS_DEPRECATED_START
+
+void compareLayerByLayer(const std::vector<InferenceEngine::CNNLayerPtr>& network,
+ const std::vector<InferenceEngine::CNNLayerPtr>& refNetwork,
+ bool sameNetVersions) {
+ auto iterator = network.begin();
+ auto refIterator = refNetwork.begin();
+ if (network.size() != refNetwork.size())
+ THROW_IE_EXCEPTION << "CNNNetworks have different number of layers: " <<
+ network.size() << " vs " << refNetwork.size();
+ for (; iterator != network.end() && refIterator != refNetwork.end(); iterator++, refIterator++) {
+ InferenceEngine::CNNLayerPtr layer = *iterator;
+ InferenceEngine::CNNLayerPtr refLayer = *refIterator;
+ compareCNNNLayers(layer, refLayer, sameNetVersions);
+ }
+}
+
+void compareLayerByLayer(const InferenceEngine::CNNNetwork& network,
+ const InferenceEngine::CNNNetwork& refNetwork,
+ bool sameNetVersions) {
+ InferenceEngine::details::CNNNetworkIterator iterator, refIterator, end;
+ std::shared_ptr<InferenceEngine::details::CNNNetworkImpl> convertedNetwork, convertedRefNetwork;
+
+ auto convertNetwork = [] (const InferenceEngine::CNNNetwork & net,
+ std::shared_ptr<InferenceEngine::details::CNNNetworkImpl> & convertedNet,
+ InferenceEngine::details::CNNNetworkIterator & it) {
+ if (net.getFunction()) {
+ convertedNet.reset(new InferenceEngine::details::CNNNetworkImpl(net));
+ it = InferenceEngine::details::CNNNetworkIterator(convertedNet.get());
+ } else {
+ it = InferenceEngine::details::CNNNetworkIterator(net);
+ }
+ };
+
+ convertNetwork(network, convertedNetwork, iterator);
+ convertNetwork(refNetwork, convertedRefNetwork, refIterator);
+
+ size_t layerCount = convertedNetwork ? convertedNetwork->layerCount() : network.layerCount();
+ size_t layerRefCount = convertedRefNetwork ? convertedRefNetwork->layerCount() : refNetwork.layerCount();
+
+ if (layerCount != layerRefCount)
+ THROW_IE_EXCEPTION << "CNNNetworks have different number of layers: " << layerCount << " vs " << layerRefCount;
+ for (; iterator != end && refIterator != end; iterator++, refIterator++) {
+ InferenceEngine::CNNLayerPtr layer = *iterator;
+ InferenceEngine::CNNLayerPtr refLayer = *refIterator;
+ compareCNNNLayers(layer, refLayer, sameNetVersions);
+ }
+ std::cout << std::endl;
+}
+
+IE_SUPPRESS_DEPRECATED_END
+
} // namespace FuncTestUtils
#pragma once
#include "cpp/ie_cnn_network.h"
+#include "cnn_network_impl.hpp"
#include "details/ie_cnn_network_iterator.hpp"
namespace FuncTestUtils {
void compareCNNNLayers(const InferenceEngine::CNNLayerPtr &layer, const InferenceEngine::CNNLayerPtr &refLayer, bool sameNetVersions);
-IE_SUPPRESS_DEPRECATED_START
-template <class T>
-inline void compareLayerByLayer(const T& network, const T& refNetwork, bool sameNetVersions = true) {
- auto iterator = InferenceEngine::details::CNNNetworkIterator(network);
- auto refIterator = InferenceEngine::details::CNNNetworkIterator(refNetwork);
- auto end = InferenceEngine::details::CNNNetworkIterator();
- if (network.layerCount() != refNetwork.layerCount())
- THROW_IE_EXCEPTION << "CNNNetworks have different number of layers: " << network.layerCount() << " vs " << refNetwork.layerCount();
- for (; iterator != end && refIterator != end; iterator++, refIterator++) {
- InferenceEngine::CNNLayerPtr layer = *iterator;
- InferenceEngine::CNNLayerPtr refLayer = *refIterator;
- compareCNNNLayers(layer, refLayer, sameNetVersions);
- }
-}
-
-template <>
-inline void compareLayerByLayer(const std::vector<InferenceEngine::CNNLayerPtr>& network,
- const std::vector<InferenceEngine::CNNLayerPtr>& refNetwork,
- bool sameNetVersions) {
- auto iterator = network.begin();
- auto refIterator = refNetwork.begin();
- if (network.size() != refNetwork.size())
- THROW_IE_EXCEPTION << "CNNNetworks have different number of layers: " <<
- network.size() << " vs " << refNetwork.size();
- for (; iterator != network.end() && refIterator != refNetwork.end(); iterator++, refIterator++) {
- InferenceEngine::CNNLayerPtr layer = *iterator;
- InferenceEngine::CNNLayerPtr refLayer = *refIterator;
- compareCNNNLayers(layer, refLayer, sameNetVersions);
- }
-}
-
-IE_SUPPRESS_DEPRECATED_END
+void compareLayerByLayer(const InferenceEngine::CNNNetwork& network,
+ const InferenceEngine::CNNNetwork& refNetwork,
+ bool sameNetVersions = true);
+
+void compareLayerByLayer(const std::vector<InferenceEngine::CNNLayerPtr>& network,
+ const std::vector<InferenceEngine::CNNLayerPtr>& refNetwork,
+ bool sameNetVersions = true);
} // namespace FuncTestUtils
\ No newline at end of file
// PluginConfigInternalParams::KEY_LP_TRANSFORMS_MODE,
// transformationsParams.transformationsInPluginEnabled ? PluginConfigParams::YES : PluginConfigParams::NO);
- usedNetwork = cloneNet(network);
+ if (network.getFunction()) {
+ usedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(network);
+ } else {
+ usedNetwork = cloneNet(network);
+ }
ExecutableNetwork exeNetwork = ie.LoadNetwork(network, p.deviceName, config);
InferRequest inferRequest = exeNetwork.CreateInferRequest();
if (inputs.empty()) {
const SingleLayerTransformationsTestParams p = ::testing::WithParamInterface<SingleLayerTransformationsTestParams>::GetParam();
// TODO: ONNX enabling
CNNNetwork network = createNetwork();
+ ASSERT_EQ(nullptr, network.getFunction());
const auto inputsInfo = network.getInputsInfo();
std::unordered_map<std::string, Blob::Ptr> inputBlobs;
#endif
#include <gtest/gtest.h>
+#include <cnn_network_impl.hpp>
#include <nodes/list.hpp>
#include <mkldnn_graph.h>
#include <mkldnn_memory.h>
void CreateGraph(InferenceEngine::ICNNNetwork &network, const MKLDNNPlugin::MKLDNNExtensionManager::Ptr& extMgr,
MKLDNNPlugin::MKLDNNWeightsSharing::Ptr cache = {}) {
- MKLDNNGraph::CreateGraph(network, extMgr, cache);
+ if (network.getFunction()) {
+ auto convertedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(network);
+ MKLDNNGraph::CreateGraph(static_cast<InferenceEngine::ICNNNetwork&>(*convertedNetwork),
+ extMgr, cache);
+ } else {
+ MKLDNNGraph::CreateGraph(network, extMgr, cache);
+ }
}
void CreateGraph(InferenceEngine::ICNNNetwork &network) {
MKLDNNPlugin::MKLDNNWeightsSharing::Ptr cache;
- MKLDNNGraph::CreateGraph(network, extensionManager, cache);
+ if (network.getFunction()) {
+ auto convertedNetwork = std::make_shared<InferenceEngine::details::CNNNetworkImpl>(network);
+ MKLDNNGraph::CreateGraph(static_cast<InferenceEngine::ICNNNetwork&>(*convertedNetwork),
+ extensionManager, cache);
+ } else {
+ MKLDNNGraph::CreateGraph(network, extensionManager, cache);
+ }
}
void checkDynBatch(InferenceEngine::BlobMap& srcs, InferenceEngine::BlobMap& outputBlobs, int batch, size_t MB,