From: Vladislav Volkov Date: Wed, 30 Sep 2020 08:31:19 +0000 (+0300) Subject: MKLDNN nodes factory improvements (#2252) X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=8f1ee05385f6163ff2aa64a79bfc484fb5422de9;p=platform%2Fupstream%2Fdldt.git MKLDNN nodes factory improvements (#2252) --- diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp index 6ee13c5..98ea729 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp @@ -133,7 +133,7 @@ void MKLDNNGraph::Replicate(const TensorIterator::Body &subgraph, const MKLDNNEx for (const auto layer : NetPass::TIBodySortTopologically(subgraph)) { CNNLayerPtr _layer = layer; - const MKLDNNNodePtr node(MKLDNNNode::CreateNode(_layer, getEngine(), extMgr, weightsCache)); + const MKLDNNNodePtr node(MKLDNNNode::factory().create(_layer, getEngine(), extMgr, weightsCache)); graphNodes.push_back(node); layer2node[layer] = node; @@ -162,7 +162,7 @@ void MKLDNNGraph::Replicate(const TensorIterator::Body &subgraph, const MKLDNNEx CNNLayerPtr layer(new CNNLayer({"out_" + output->getName(), "Output", output->getTensorDesc().getPrecision()})); layer->insData.push_back(output); - const MKLDNNNodePtr node(MKLDNNNode::CreateNode(layer, getEngine(), extMgr, weightsCache)); + const MKLDNNNodePtr node(MKLDNNNode::factory().create(layer, getEngine(), extMgr, weightsCache)); MKLDNNEdgePtr edge(new MKLDNNEdge(parent_node, node, _parent_port(output), 0)); node->addEdge(edge); @@ -182,7 +182,7 @@ void MKLDNNGraph::Replicate(const TensorIterator::Body &subgraph, const MKLDNNEx CNNLayerPtr layer(new CNNLayer({"stub_" + parent_layer->name, "Output", to_stub_data->getTensorDesc().getPrecision()})); layer->insData.push_back(to_stub_data); - const MKLDNNNodePtr node(MKLDNNNode::CreateNode(layer, getEngine(), extMgr, weightsCache)); + const MKLDNNNodePtr node(MKLDNNNode::factory().create(layer, getEngine(), extMgr, weightsCache)); MKLDNNEdgePtr edge(new MKLDNNEdge(parent_node, node, _parent_port(to_stub_data), 0)); node->addEdge(edge); @@ -197,7 +197,7 @@ void MKLDNNGraph::Replicate(const TensorIterator::Body &subgraph, const MKLDNNEx CNNLayerPtr layer(new CNNLayer({"in_" + input->getName(), "Input", input->getTensorDesc().getPrecision()})); layer->outData.push_back(input); - const MKLDNNNodePtr node(MKLDNNNode::CreateNode(layer, getEngine(), extMgr, weightsCache)); + const MKLDNNNodePtr node(MKLDNNNode::factory().create(layer, getEngine(), extMgr, weightsCache)); for (auto p : getInputTo(input)) { auto consumer = p.second; @@ -251,7 +251,7 @@ void MKLDNNGraph::Replicate(const ICNNNetwork &network, const MKLDNNExtensionMan _layer->outData = layer->outData; } - const MKLDNNNodePtr node(MKLDNNNode::CreateNode(_layer, getEngine(), extMgr, weightsCache)); + const MKLDNNNodePtr node(MKLDNNNode::factory().create(_layer, getEngine(), extMgr, weightsCache)); graphNodes.push_back(node); layer2node[layer] = node; @@ -289,7 +289,7 @@ void MKLDNNGraph::Replicate(const ICNNNetwork &network, const MKLDNNExtensionMan CNNLayerPtr layer(new CNNLayer({"out_" + output.first, "Output", data->getTensorDesc().getPrecision()})); layer->insData.push_back(data); - const MKLDNNNodePtr node(MKLDNNNode::CreateNode(layer, getEngine(), extMgr, weightsCache)); + const MKLDNNNodePtr node(MKLDNNNode::factory().create(layer, getEngine(), extMgr, weightsCache)); MKLDNNEdgePtr edge(new MKLDNNEdge(parent_node, node, _parent_port(data), 0)); node->addEdge(edge); @@ -309,7 +309,7 @@ void MKLDNNGraph::Replicate(const ICNNNetwork &network, const MKLDNNExtensionMan CNNLayerPtr layer(new CNNLayer({"stub_" + parent_layer->name, "Output", to_stub_data->getTensorDesc().getPrecision()})); layer->insData.push_back(to_stub_data); - const MKLDNNNodePtr node(MKLDNNNode::CreateNode(layer, getEngine(), extMgr, weightsCache)); + const MKLDNNNodePtr node(MKLDNNNode::factory().create(layer, getEngine(), extMgr, weightsCache)); MKLDNNEdgePtr edge(new MKLDNNEdge(parent_node, node, _parent_port(to_stub_data), 0)); node->addEdge(edge); diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp index 6f87753..d956431 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_node.cpp @@ -5,12 +5,10 @@ #include "mkldnn_node.h" #include "mkldnn_extension_mngr.h" -#include "caseless.hpp" #include #include #include #include -#include #include #include @@ -150,13 +148,9 @@ Type TypeFromName(const std::string type) { } // namespace MKLDNNPlugin -std::shared_ptr MKLDNNNode::GetNodesHolder() { - static std::shared_ptr localHolder = std::make_shared(); - return localHolder; -} - -void MKLDNNNode::AddNode(const std::string& name, CreatorByLayerFunction factory) { - GetNodesHolder()->nodes[name] = factory; +MKLDNNNode::Factory & MKLDNNNode::factory() { + static Factory factoryInstance; + return factoryInstance; } MKLDNNNode::MKLDNNNode(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng, @@ -260,41 +254,6 @@ void MKLDNNNode::remove() { } } -MKLDNNNode* MKLDNNNode::CreateNode(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng, - const MKLDNNExtensionManager::Ptr& extMgr, MKLDNNWeightsSharing::Ptr &w_cache) { - MKLDNNNode *newNode = nullptr; - auto nodesHolder = GetNodesHolder(); - - if (nodesHolder->nodes.find("Generic") != nodesHolder->nodes.end()) { - std::unique_ptr ol(nodesHolder->nodes["Generic"](layer, eng, w_cache)); - if (ol != nullptr && ol->created(extMgr)) - newNode = ol.release(); - } - if (newNode == nullptr) { - for (auto maker : nodesHolder->nodes) { - std::unique_ptr ol(maker.second(layer, eng, w_cache)); - if (ol != nullptr && ol->created(extMgr)) { - newNode = ol.release(); - break; - } - } - } - - // WA-start : TI node requires all attributes to construct internal subgpath - // including extManager, socket and mkldnn::eng. -#if defined (COMPILED_CPU_MKLDNN_TENSORITERATOR_NODE) - MKLDNNTensorIteratorNode *ti = dynamic_cast(newNode); - if (ti != nullptr) - ti->setExtManager(extMgr); -#endif - // WA-end - - if (!newNode) - THROW_IE_EXCEPTION << "Unsupported primitive of type: " << layer->type << " name: " << layer->name; - - return newNode; -} - bool MKLDNNNode::isEdgesEmpty(const std::vector& edges) const { for (auto &edge : edges) { if (edge.lock()) @@ -1157,3 +1116,44 @@ Layout MKLDNNNode::getWeightsLayoutByDims(SizeVector dims, bool isGrouped) { void MKLDNNNode::appendPostOps(mkldnn::post_ops& ops) { THROW_IE_EXCEPTION << "Fusing of " << this->getType() << " operation is not implemented"; } + +MKLDNNNode* MKLDNNNode::Factory::create(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng, + const MKLDNNExtensionManager::Ptr& extMgr, MKLDNNWeightsSharing::Ptr &w_cache) { + MKLDNNNode *newNode = nullptr; + + auto builder = builders.find(Generic); + + if (builder != builders.end()) { + std::unique_ptr ol(builder->second(layer, eng, w_cache)); + if (ol != nullptr && ol->created(extMgr)) + newNode = ol.release(); + } + + if (newNode == nullptr) { + builder = builders.find(TypeFromName(layer->type)); + + if (builder != builders.end()) { + std::unique_ptr ol(builder->second(layer, eng, w_cache)); + if (ol != nullptr && ol->created(extMgr)) + newNode = ol.release(); + } + } + + // WA-start : TI node requires all attributes to construct internal subgpath + // including extManager, socket and mkldnn::eng. +#if defined (COMPILED_CPU_MKLDNN_TENSORITERATOR_NODE) + MKLDNNTensorIteratorNode *ti = dynamic_cast(newNode); + if (ti != nullptr) + ti->setExtManager(extMgr); +#endif + // WA-end + + if (!newNode) + THROW_IE_EXCEPTION << "Unsupported primitive of type: " << layer->type << " name: " << layer->name; + + return newNode; +} + +void MKLDNNNode::Factory::registerNode(Type type, builder_t builder) { + builders[type] = builder; +} diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_node.h b/inference-engine/src/mkldnn_plugin/mkldnn_node.h index be994f7..f5f6953 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_node.h +++ b/inference-engine/src/mkldnn_plugin/mkldnn_node.h @@ -8,9 +8,9 @@ #include #include #include -#include #include #include +#include #include #include "mkldnn_dims.h" #include "mkldnn_memory.h" @@ -28,12 +28,6 @@ namespace MKLDNNPlugin { using MKLDNNNodePtr = std::shared_ptr; using MKLDNNNodeWeakPtr = std::weak_ptr; -using CreatorByLayerFunction = std::function; -struct MKLDNNNodesHolder { - std::map nodes; -}; - enum Type { Unknown, Generic, @@ -266,11 +260,11 @@ private: class MKLDNNNode : public InferenceEngine::details::no_copy { public: - static void AddNode(const std::string& name, CreatorByLayerFunction factory); - static std::shared_ptr GetNodesHolder(); + class Factory; + template + class Registrar; - static MKLDNNNode* CreateNode(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng, - const MKLDNNExtensionManager::Ptr& extMgr, MKLDNNWeightsSharing::Ptr &w_cache); + static Factory & factory(); ~MKLDNNNode() override = default; @@ -483,20 +477,6 @@ public: return desc.outputNumbers(); } - template - class Register { - public: - explicit Register(const std::string& type) { - MKLDNNNode::AddNode(type, - [](const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng, - MKLDNNWeightsSharing::Ptr &w_cache) - -> MKLDNNNode* { - return new To(layer, eng, w_cache); - }); - } - }; - - protected: // TODO: It is necessary only in order to avoid modifications of cnnLayers and original topology std::vector outDims; @@ -610,8 +590,39 @@ private: ConstantType checkConstant(LOOK look, std::vector& checkNodes); }; +class MKLDNNNode::Factory : InferenceEngine::details::no_copy { +public: + using builder_t = std::function; + + MKLDNNNode* create(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng, + const MKLDNNExtensionManager::Ptr& extMgr, MKLDNNWeightsSharing::Ptr &w_cache); + + void registerNode(Type type, builder_t builder); + +private: + using map_t = std::unordered_map::type>>; + map_t builders; +}; + +template +class MKLDNNNode::Registrar { +public: + explicit Registrar(Type type) { + MKLDNNNode::factory().registerNode(type, + [type](const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng, + MKLDNNWeightsSharing::Ptr &w_cache) -> MKLDNNNode* { + MKLDNNNode *node = new To(layer, eng, w_cache); + return node; + }); + } +}; + +#define REG_MKLDNN_CONCAT2(X, Y) X ## Y +#define REG_MKLDNN_CONCAT(X, Y) REG_MKLDNN_CONCAT2(X, Y) #define REG_MKLDNN_PRIM_FOR(__prim, __type) \ -static MKLDNNNode::Register<__prim> __reg__##__type(#__type) +static MKLDNNNode::Registrar<__prim> REG_MKLDNN_CONCAT(_reg_, __LINE__)(__type) template inline T div_up(const T a, const U b) { diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp index f9e7fa5..57d23d3 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp @@ -294,7 +294,7 @@ void Engine::QueryNetwork(const ICNNNetwork& network, const std::map ptr; try { - ptr.reset(MKLDNNNode::CreateNode(*itLayer, {mkldnn::engine::kind::cpu, 0}, extensionManager, fake_w_cache)); + ptr.reset(MKLDNNNode::factory().create(*itLayer, {mkldnn::engine::kind::cpu, 0}, extensionManager, fake_w_cache)); } catch (InferenceEngine::details::InferenceEngineException&) { return false; } @@ -339,7 +339,7 @@ void Engine::QueryNetwork(const ICNNNetwork& network, const std::map(MKLDNNNode::CreateNode(*i, eng, extensionManager, fake_w_cache)); + std::unique_ptr (MKLDNNNode::factory().create(*i, eng, extensionManager, fake_w_cache)); res.supportedLayersMap.insert({ (*i)->name, GetName() }); } catch (InferenceEngine::details::InferenceEngineException&) { } diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_concat_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_concat_node.cpp index b119863..c639fbf 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_concat_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_concat_node.cpp @@ -655,4 +655,4 @@ void MKLDNNConcatNode::execute(mkldnn::stream strm) { } } -REG_MKLDNN_PRIM_FOR(MKLDNNConcatNode, Concat); +REG_MKLDNN_PRIM_FOR(MKLDNNConcatNode, Concatenation); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_def_conv_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_def_conv_node.h index f3f0193..7dcab5e 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_def_conv_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_def_conv_node.h @@ -29,7 +29,7 @@ public: } private: - static Register reg; + static Registrar reg; bool withBiases = false; bool isDW = false; bool isMerged = false; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_input_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_input_node.cpp index 8c1ef5d..56ca8bc 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_input_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_input_node.cpp @@ -200,3 +200,4 @@ void MKLDNNInputNode::execute(mkldnn::stream strm) { } REG_MKLDNN_PRIM_FOR(MKLDNNInputNode, Input); +REG_MKLDNN_PRIM_FOR(MKLDNNInputNode, Output); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_lrn_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_lrn_node.cpp index 73d5060..7a1186e 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_lrn_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_lrn_node.cpp @@ -92,4 +92,4 @@ void MKLDNNLrnNode::createDescriptor(const std::vector reg; + static Registrar reg; MKLDNNMemoryNodeVirtualEdge::Holder* holder = nullptr; }; @@ -106,7 +106,7 @@ public: MKLDNNMemoryPtr getStore(); private: MKLDNNMemoryPtr dataStore; - static Register reg; + static Registrar reg; MKLDNNMemoryNodeVirtualEdge::Holder* holder = nullptr; }; #endif diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reshape_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reshape_node.cpp index 535044e..9f8ec6c 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reshape_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reshape_node.cpp @@ -69,3 +69,4 @@ bool MKLDNNReshapeNode::created() const { return getType() == Reshape || getType() == Flatten; } REG_MKLDNN_PRIM_FOR(MKLDNNReshapeNode, Reshape); +REG_MKLDNN_PRIM_FOR(MKLDNNReshapeNode, Flatten); diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_rnn.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_rnn.cpp index 9fb34e6..d1ef1b2 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_rnn.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_rnn.cpp @@ -523,5 +523,6 @@ void MKLDNNRNN::execute(mkldnn::stream strm) { strm.submit({exec_after.begin(), exec_after.end()}); } -REG_MKLDNN_PRIM_FOR(MKLDNNRNN, RNN); +REG_MKLDNN_PRIM_FOR(MKLDNNRNN, RNNCell); +REG_MKLDNN_PRIM_FOR(MKLDNNRNN, RNNSeq); } // namespace MKLDNNPlugin diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roi_pooling_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roi_pooling_node.cpp index fafb5c8..3783060 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roi_pooling_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roi_pooling_node.cpp @@ -112,4 +112,4 @@ void MKLDNNROIPoolingNode::createDescriptor(const std::vectorinsData) { @@ -50,7 +51,7 @@ static LayerConfig make_plain_config(const CNNLayerPtr &layer) { class PortIteratorHelper : public PortMapHelper { public: PortIteratorHelper(const MKLDNNMemoryPtr &from, const MKLDNNMemoryPtr &to, - bool as_input, const TensorIterator::PortMap &port_map, const mkldnn::engine& eng, int n_iter) : as_input(as_input) { + bool as_input, const InferenceEngine::TensorIterator::PortMap &port_map, const mkldnn::engine& eng, int n_iter) : as_input(as_input) { const auto &full_blob = as_input ? from : to; const auto &part_blob = !as_input ? from : to; @@ -147,7 +148,7 @@ MKLDNNTensorIteratorNode::MKLDNNTensorIteratorNode(InferenceEngine::CNNLayerPtr MKLDNNNode(layer, eng, cache) {} void MKLDNNTensorIteratorNode::getSupportedDescriptors() { - auto *ti = dynamic_cast(getCnnLayer().get()); + auto *ti = dynamic_cast(getCnnLayer().get()); if (ti == nullptr) THROW_IE_EXCEPTION << "Cannot convert to TensorIterator layer."; @@ -189,7 +190,7 @@ void MKLDNNTensorIteratorNode::initSupportedPrimitiveDescriptors() { void MKLDNNTensorIteratorNode::createPrimitive() { - auto ti = dynamic_cast(getCnnLayer().get()); + auto ti = dynamic_cast(getCnnLayer().get()); if (ti == nullptr) THROW_IE_EXCEPTION << "Cannot convert to TensorIterator layer."; diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/graph_generic_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/graph_generic_test.cpp index eb47ac0..337abdf 100644 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/graph_generic_test.cpp +++ b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/graph_generic_test.cpp @@ -482,7 +482,7 @@ TEST_F(MKLDNNGraphGenericTests, canGetPrimitiveDescriptorsList) { mkldnn::engine eng(mkldnn::engine(mkldnn::engine::kind::cpu, 0)); MKLDNNPlugin::MKLDNNWeightsSharing::Ptr cache; - node.reset(MKLDNNPlugin::MKLDNNNode::CreateNode(layerPtr, eng, extMgr, cache)); + node.reset(MKLDNNPlugin::MKLDNNNode::factory().create(layerPtr, eng, extMgr, cache)); ASSERT_EQ(MKLDNNPlugin::Type::Generic, node->getType()); ASSERT_NO_THROW(node->getSupportedDescriptors()); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_reorder_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_reorder_test.cpp index 967a762..1aaf085 100644 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_reorder_test.cpp +++ b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_reorder_test.cpp @@ -24,7 +24,7 @@ TEST_F(MKLDNNGraphReorderTests, cannotCreatePrimitiveDescriprorsWithoutOtherLaye InferenceEngine::CNNLayerPtr layer(new InferenceEngine::CNNLayer({"TestReorder", "Reorder", InferenceEngine::Precision::FP32})); MKLDNNPlugin::MKLDNNWeightsSharing::Ptr cache; - node.reset(MKLDNNPlugin::MKLDNNNode::CreateNode(layer, eng, {}, cache)); + node.reset(MKLDNNPlugin::MKLDNNNode::factory().create(layer, eng, {}, cache)); ASSERT_EQ(MKLDNNPlugin::Type::Reorder, node->getType()); ASSERT_THROW(node->getSupportedDescriptors(), InferenceEngine::details::InferenceEngineException); diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/mkldnn_primitive_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/mkldnn_primitive_test.cpp index 28d9861..75b6261 100644 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/mkldnn_primitive_test.cpp +++ b/inference-engine/tests_deprecated/unit/engines/mkldnn/mkldnn_primitive_test.cpp @@ -33,7 +33,7 @@ protected: TEST_F(MKLDNNPrimitiveTest, DISABLED_canDeleteWeightInweitableLayer) { //simulate how convlayer gets created engine e(engine::cpu, 0); - //auto node = MKLDNNPlugin::MKLDNNNodePtr(MKLDNNPlugin::MKLDNNNode::CreateNode(MKLDNNPlugin::Generic, InferenceEngine::Precision::FP32, "")); + //auto node = MKLDNNPlugin::MKLDNNNodePtr(MKLDNNPlugin::MKLDNNNode::factory().create(MKLDNNPlugin::Generic, InferenceEngine::Precision::FP32, "")); // ChildConv *conv = new ChildConv(e); // EXPECT_CALL(*conv, die()).Times(1);