MKLDNN nodes factory improvements (#2252)
authorVladislav Volkov <vladislav.volkov@intel.com>
Wed, 30 Sep 2020 08:31:19 +0000 (11:31 +0300)
committerGitHub <noreply@github.com>
Wed, 30 Sep 2020 08:31:19 +0000 (11:31 +0300)
17 files changed:
inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp
inference-engine/src/mkldnn_plugin/mkldnn_node.cpp
inference-engine/src/mkldnn_plugin/mkldnn_node.h
inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_concat_node.cpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_def_conv_node.h
inference-engine/src/mkldnn_plugin/nodes/mkldnn_input_node.cpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_lrn_node.cpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_memory_node.hpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_reshape_node.cpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_rnn.cpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_roi_pooling_node.cpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_softmax_node.cpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_tensoriterator_node.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/graph_generic_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_reorder_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/mkldnn_primitive_test.cpp

index 6ee13c5..98ea729 100644 (file)
@@ -133,7 +133,7 @@ void MKLDNNGraph::Replicate(const TensorIterator::Body &subgraph, const MKLDNNEx
     for (const auto layer : NetPass::TIBodySortTopologically(subgraph)) {
         CNNLayerPtr _layer = layer;
 
-        const MKLDNNNodePtr node(MKLDNNNode::CreateNode(_layer, getEngine(), extMgr, weightsCache));
+        const MKLDNNNodePtr node(MKLDNNNode::factory().create(_layer, getEngine(), extMgr, weightsCache));
         graphNodes.push_back(node);
         layer2node[layer] = node;
 
@@ -162,7 +162,7 @@ void MKLDNNGraph::Replicate(const TensorIterator::Body &subgraph, const MKLDNNEx
         CNNLayerPtr layer(new CNNLayer({"out_" + output->getName(), "Output", output->getTensorDesc().getPrecision()}));
         layer->insData.push_back(output);
 
-        const MKLDNNNodePtr node(MKLDNNNode::CreateNode(layer, getEngine(), extMgr, weightsCache));
+        const MKLDNNNodePtr node(MKLDNNNode::factory().create(layer, getEngine(), extMgr, weightsCache));
 
         MKLDNNEdgePtr edge(new MKLDNNEdge(parent_node, node, _parent_port(output), 0));
         node->addEdge(edge);
@@ -182,7 +182,7 @@ void MKLDNNGraph::Replicate(const TensorIterator::Body &subgraph, const MKLDNNEx
         CNNLayerPtr layer(new CNNLayer({"stub_" + parent_layer->name, "Output", to_stub_data->getTensorDesc().getPrecision()}));
         layer->insData.push_back(to_stub_data);
 
-        const MKLDNNNodePtr node(MKLDNNNode::CreateNode(layer, getEngine(), extMgr, weightsCache));
+        const MKLDNNNodePtr node(MKLDNNNode::factory().create(layer, getEngine(), extMgr, weightsCache));
 
         MKLDNNEdgePtr edge(new MKLDNNEdge(parent_node, node, _parent_port(to_stub_data), 0));
         node->addEdge(edge);
@@ -197,7 +197,7 @@ void MKLDNNGraph::Replicate(const TensorIterator::Body &subgraph, const MKLDNNEx
         CNNLayerPtr layer(new CNNLayer({"in_" + input->getName(), "Input", input->getTensorDesc().getPrecision()}));
         layer->outData.push_back(input);
 
-        const MKLDNNNodePtr node(MKLDNNNode::CreateNode(layer, getEngine(), extMgr, weightsCache));
+        const MKLDNNNodePtr node(MKLDNNNode::factory().create(layer, getEngine(), extMgr, weightsCache));
 
         for (auto p : getInputTo(input)) {
             auto consumer = p.second;
@@ -251,7 +251,7 @@ void MKLDNNGraph::Replicate(const ICNNNetwork &network, const MKLDNNExtensionMan
             _layer->outData = layer->outData;
         }
 
-        const MKLDNNNodePtr node(MKLDNNNode::CreateNode(_layer, getEngine(), extMgr, weightsCache));
+        const MKLDNNNodePtr node(MKLDNNNode::factory().create(_layer, getEngine(), extMgr, weightsCache));
         graphNodes.push_back(node);
         layer2node[layer] = node;
 
@@ -289,7 +289,7 @@ void MKLDNNGraph::Replicate(const ICNNNetwork &network, const MKLDNNExtensionMan
         CNNLayerPtr layer(new CNNLayer({"out_" + output.first, "Output", data->getTensorDesc().getPrecision()}));
         layer->insData.push_back(data);
 
-        const MKLDNNNodePtr node(MKLDNNNode::CreateNode(layer, getEngine(), extMgr, weightsCache));
+        const MKLDNNNodePtr node(MKLDNNNode::factory().create(layer, getEngine(), extMgr, weightsCache));
 
         MKLDNNEdgePtr edge(new MKLDNNEdge(parent_node, node, _parent_port(data), 0));
         node->addEdge(edge);
@@ -309,7 +309,7 @@ void MKLDNNGraph::Replicate(const ICNNNetwork &network, const MKLDNNExtensionMan
         CNNLayerPtr layer(new CNNLayer({"stub_" + parent_layer->name, "Output", to_stub_data->getTensorDesc().getPrecision()}));
         layer->insData.push_back(to_stub_data);
 
-        const MKLDNNNodePtr node(MKLDNNNode::CreateNode(layer, getEngine(), extMgr, weightsCache));
+        const MKLDNNNodePtr node(MKLDNNNode::factory().create(layer, getEngine(), extMgr, weightsCache));
 
         MKLDNNEdgePtr edge(new MKLDNNEdge(parent_node, node, _parent_port(to_stub_data), 0));
         node->addEdge(edge);
index 6f87753..d956431 100644 (file)
@@ -5,12 +5,10 @@
 #include "mkldnn_node.h"
 #include "mkldnn_extension_mngr.h"
 
-#include "caseless.hpp"
 #include <vector>
 #include <string>
 #include <limits>
 #include <cstdint>
-#include <unordered_map>
 
 #include <nodes/mkldnn_batchnorm_node.h>
 #include <nodes/mkldnn_concat_node.h>
@@ -150,13 +148,9 @@ Type TypeFromName(const std::string type) {
 
 }  //  namespace MKLDNNPlugin
 
-std::shared_ptr<MKLDNNNodesHolder> MKLDNNNode::GetNodesHolder() {
-    static std::shared_ptr<MKLDNNNodesHolder> localHolder = std::make_shared<MKLDNNNodesHolder>();
-    return localHolder;
-}
-
-void MKLDNNNode::AddNode(const std::string& name, CreatorByLayerFunction factory) {
-    GetNodesHolder()->nodes[name] = factory;
+MKLDNNNode::Factory & MKLDNNNode::factory() {
+    static Factory factoryInstance;
+    return factoryInstance;
 }
 
 MKLDNNNode::MKLDNNNode(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
@@ -260,41 +254,6 @@ void MKLDNNNode::remove() {
     }
 }
 
-MKLDNNNode* MKLDNNNode::CreateNode(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
-                                   const MKLDNNExtensionManager::Ptr& extMgr, MKLDNNWeightsSharing::Ptr &w_cache) {
-    MKLDNNNode *newNode = nullptr;
-    auto nodesHolder = GetNodesHolder();
-
-    if (nodesHolder->nodes.find("Generic") != nodesHolder->nodes.end()) {
-        std::unique_ptr<MKLDNNNode> ol(nodesHolder->nodes["Generic"](layer, eng, w_cache));
-        if (ol != nullptr && ol->created(extMgr))
-            newNode = ol.release();
-    }
-    if (newNode == nullptr) {
-        for (auto maker : nodesHolder->nodes) {
-            std::unique_ptr<MKLDNNNode> ol(maker.second(layer, eng, w_cache));
-            if (ol != nullptr && ol->created(extMgr)) {
-                newNode = ol.release();
-                break;
-            }
-        }
-    }
-
-    //  WA-start : TI node requires all attributes to construct internal subgpath
-    //             including extManager, socket and mkldnn::eng.
-#if defined (COMPILED_CPU_MKLDNN_TENSORITERATOR_NODE)
-    MKLDNNTensorIteratorNode *ti = dynamic_cast<MKLDNNTensorIteratorNode*>(newNode);
-    if (ti != nullptr)
-        ti->setExtManager(extMgr);
-#endif
-    //  WA-end
-
-    if (!newNode)
-        THROW_IE_EXCEPTION << "Unsupported primitive of type: " << layer->type << " name: " << layer->name;
-
-    return newNode;
-}
-
 bool MKLDNNNode::isEdgesEmpty(const std::vector<MKLDNNEdgeWeakPtr>& edges) const {
     for (auto &edge : edges) {
         if (edge.lock())
@@ -1157,3 +1116,44 @@ Layout MKLDNNNode::getWeightsLayoutByDims(SizeVector dims, bool isGrouped) {
 void MKLDNNNode::appendPostOps(mkldnn::post_ops& ops) {
     THROW_IE_EXCEPTION << "Fusing of " << this->getType() << " operation is not implemented";
 }
+
+MKLDNNNode* MKLDNNNode::Factory::create(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
+                                        const MKLDNNExtensionManager::Ptr& extMgr, MKLDNNWeightsSharing::Ptr &w_cache) {
+    MKLDNNNode *newNode = nullptr;
+
+    auto builder = builders.find(Generic);
+
+    if (builder != builders.end()) {
+        std::unique_ptr<MKLDNNNode> ol(builder->second(layer, eng, w_cache));
+        if (ol != nullptr && ol->created(extMgr))
+            newNode = ol.release();
+    }
+
+    if (newNode == nullptr) {
+        builder = builders.find(TypeFromName(layer->type));
+
+        if (builder != builders.end()) {
+            std::unique_ptr<MKLDNNNode> ol(builder->second(layer, eng, w_cache));
+            if (ol != nullptr && ol->created(extMgr))
+                newNode = ol.release();
+        }
+    }
+
+    //  WA-start : TI node requires all attributes to construct internal subgpath
+    //             including extManager, socket and mkldnn::eng.
+#if defined (COMPILED_CPU_MKLDNN_TENSORITERATOR_NODE)
+    MKLDNNTensorIteratorNode *ti = dynamic_cast<MKLDNNTensorIteratorNode*>(newNode);
+    if (ti != nullptr)
+        ti->setExtManager(extMgr);
+#endif
+    //  WA-end
+
+    if (!newNode)
+        THROW_IE_EXCEPTION << "Unsupported primitive of type: " << layer->type << " name: " << layer->name;
+
+    return newNode;
+}
+
+void MKLDNNNode::Factory::registerNode(Type type, builder_t builder) {
+    builders[type] = builder;
+}
index be994f7..f5f6953 100644 (file)
@@ -8,9 +8,9 @@
 #include <memory>
 #include <vector>
 #include <string>
-#include <map>
 #include <cassert>
 #include <algorithm>
+#include <caseless.hpp>
 #include <ie_common.h>
 #include "mkldnn_dims.h"
 #include "mkldnn_memory.h"
@@ -28,12 +28,6 @@ namespace MKLDNNPlugin {
 using MKLDNNNodePtr = std::shared_ptr<MKLDNNNode>;
 using MKLDNNNodeWeakPtr = std::weak_ptr<MKLDNNNode>;
 
-using CreatorByLayerFunction = std::function<MKLDNNNode *(const InferenceEngine::CNNLayerPtr& layer,
-        const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &w_cache)>;
-struct MKLDNNNodesHolder {
-    std::map<std::string, CreatorByLayerFunction> nodes;
-};
-
 enum Type {
     Unknown,
     Generic,
@@ -266,11 +260,11 @@ private:
 
 class MKLDNNNode : public InferenceEngine::details::no_copy {
 public:
-    static void AddNode(const std::string& name, CreatorByLayerFunction factory);
-    static std::shared_ptr<MKLDNNNodesHolder> GetNodesHolder();
+    class Factory;
+    template<typename To>
+    class Registrar;
 
-    static MKLDNNNode* CreateNode(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
-                                  const MKLDNNExtensionManager::Ptr& extMgr, MKLDNNWeightsSharing::Ptr &w_cache);
+    static Factory & factory();
 
     ~MKLDNNNode() override = default;
 
@@ -483,20 +477,6 @@ public:
         return desc.outputNumbers();
     }
 
-    template<typename To>
-    class Register {
-    public:
-        explicit Register(const std::string& type) {
-            MKLDNNNode::AddNode(type,
-                    [](const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
-                            MKLDNNWeightsSharing::Ptr &w_cache)
-                    -> MKLDNNNode* {
-                        return new To(layer, eng, w_cache);
-                    });
-        }
-    };
-
-
 protected:
     // TODO: It is necessary only in order to avoid modifications of cnnLayers and original topology
     std::vector<MKLDNNDims> outDims;
@@ -610,8 +590,39 @@ private:
     ConstantType checkConstant(LOOK look, std::vector<MKLDNNNodePtr>& checkNodes);
 };
 
+class MKLDNNNode::Factory : InferenceEngine::details::no_copy {
+public:
+    using builder_t = std::function<MKLDNNNode *(const InferenceEngine::CNNLayerPtr& layer,
+        const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &w_cache)>;
+
+    MKLDNNNode* create(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
+                       const MKLDNNExtensionManager::Ptr& extMgr, MKLDNNWeightsSharing::Ptr &w_cache);
+
+    void registerNode(Type type, builder_t builder);
+
+private:
+    using map_t = std::unordered_map<Type, builder_t,
+        std::hash<std::underlying_type<MKLDNNPlugin::Type>::type>>;
+    map_t builders;
+};
+
+template<typename To>
+class MKLDNNNode::Registrar {
+public:
+    explicit Registrar(Type type) {
+        MKLDNNNode::factory().registerNode(type,
+                [type](const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
+                    MKLDNNWeightsSharing::Ptr &w_cache) -> MKLDNNNode* {
+                    MKLDNNNode *node = new To(layer, eng, w_cache);
+                    return node;
+                });
+    }
+};
+
+#define REG_MKLDNN_CONCAT2(X, Y) X ## Y
+#define REG_MKLDNN_CONCAT(X, Y) REG_MKLDNN_CONCAT2(X, Y)
 #define REG_MKLDNN_PRIM_FOR(__prim, __type) \
-static MKLDNNNode::Register<__prim> __reg__##__type(#__type)
+static MKLDNNNode::Registrar<__prim> REG_MKLDNN_CONCAT(_reg_, __LINE__)(__type)
 
 template <typename T, typename U>
 inline T div_up(const T a, const U b) {
index f9e7fa5..57d23d3 100644 (file)
@@ -294,7 +294,7 @@ void Engine::QueryNetwork(const ICNNNetwork& network, const std::map<std::string
             auto layerIsSupported = [&] {
                 std::unique_ptr<MKLDNNNode> ptr;
                 try {
-                    ptr.reset(MKLDNNNode::CreateNode(*itLayer, {mkldnn::engine::kind::cpu, 0}, extensionManager, fake_w_cache));
+                    ptr.reset(MKLDNNNode::factory().create(*itLayer, {mkldnn::engine::kind::cpu, 0}, extensionManager, fake_w_cache));
                 } catch (InferenceEngine::details::InferenceEngineException&) {
                      return false;
                 }
@@ -339,7 +339,7 @@ void Engine::QueryNetwork(const ICNNNetwork& network, const std::map<std::string
             try {
                 mkldnn::engine eng(mkldnn::engine(mkldnn::engine::kind::cpu, 0));
                 // if we can create and have not thrown exception, then layer is supported
-                std::unique_ptr <MKLDNNNode>(MKLDNNNode::CreateNode(*i, eng, extensionManager, fake_w_cache));
+                std::unique_ptr <MKLDNNNode>(MKLDNNNode::factory().create(*i, eng, extensionManager, fake_w_cache));
                 res.supportedLayersMap.insert({ (*i)->name, GetName() });
             } catch (InferenceEngine::details::InferenceEngineException&) {
             }
index b119863..c639fbf 100644 (file)
@@ -655,4 +655,4 @@ void MKLDNNConcatNode::execute(mkldnn::stream strm) {
     }
 }
 
-REG_MKLDNN_PRIM_FOR(MKLDNNConcatNode, Concat);
+REG_MKLDNN_PRIM_FOR(MKLDNNConcatNode, Concatenation);
index f3f0193..7dcab5e 100644 (file)
@@ -29,7 +29,7 @@ public:
     }
 
 private:
-    static Register<MKLDNNDeformableConvolutionNode> reg;
+    static Registrar<MKLDNNDeformableConvolutionNode> reg;
     bool withBiases = false;
     bool isDW = false;
     bool isMerged = false;
index 8c1ef5d..56ca8bc 100644 (file)
@@ -200,3 +200,4 @@ void MKLDNNInputNode::execute(mkldnn::stream strm) {
 }
 
 REG_MKLDNN_PRIM_FOR(MKLDNNInputNode, Input);
+REG_MKLDNN_PRIM_FOR(MKLDNNInputNode, Output);
index 73d5060..7a1186e 100644 (file)
@@ -92,4 +92,4 @@ void MKLDNNLrnNode::createDescriptor(const std::vector<InferenceEngine::TensorDe
             new lrn_forward::desc(prop_kind::forward_scoring, alg, in_candidate, size, alpha, beta, k)));
     descs.push_back(desc);
 }
-REG_MKLDNN_PRIM_FOR(MKLDNNLrnNode, LRN);
+REG_MKLDNN_PRIM_FOR(MKLDNNLrnNode, Lrn);
index 14a1c47..b96e145 100644 (file)
@@ -84,7 +84,7 @@ class MKLDNNMemoryOutputNode : public MKLDNNNode, public MKLDNNMemoryNode {
      * @brief keeps reference to input sibling node
      */
     MKLDNNNode* inputNode = nullptr;
-    static Register<MKLDNNMemoryOutputNode> reg;
+    static Registrar<MKLDNNMemoryOutputNode> reg;
     MKLDNNMemoryNodeVirtualEdge::Holder* holder = nullptr;
 };
 
@@ -106,7 +106,7 @@ public:
     MKLDNNMemoryPtr getStore();
  private:
     MKLDNNMemoryPtr dataStore;
-    static Register<MKLDNNMemoryInputNode> reg;
+    static Registrar<MKLDNNMemoryInputNode> reg;
     MKLDNNMemoryNodeVirtualEdge::Holder* holder = nullptr;
 };
 #endif
index 535044e..9f8ec6c 100644 (file)
@@ -69,3 +69,4 @@ bool MKLDNNReshapeNode::created() const {
     return getType() == Reshape || getType() == Flatten;
 }
 REG_MKLDNN_PRIM_FOR(MKLDNNReshapeNode, Reshape);
+REG_MKLDNN_PRIM_FOR(MKLDNNReshapeNode, Flatten);
index 9fb34e6..d1ef1b2 100644 (file)
@@ -523,5 +523,6 @@ void MKLDNNRNN::execute(mkldnn::stream strm) {
         strm.submit({exec_after.begin(), exec_after.end()});
 }
 
-REG_MKLDNN_PRIM_FOR(MKLDNNRNN, RNN);
+REG_MKLDNN_PRIM_FOR(MKLDNNRNN, RNNCell);
+REG_MKLDNN_PRIM_FOR(MKLDNNRNN, RNNSeq);
 }  // namespace MKLDNNPlugin
index fafb5c8..3783060 100644 (file)
@@ -112,4 +112,4 @@ void MKLDNNROIPoolingNode::createDescriptor(const std::vector<InferenceEngine::T
                                           spatial_scale)));
     descs.push_back(desc);
 }
-REG_MKLDNN_PRIM_FOR(MKLDNNROIPoolingNode, RoiPooling);
+REG_MKLDNN_PRIM_FOR(MKLDNNROIPoolingNode, ROIPooling);
index fa93bb5..db6d887 100644 (file)
@@ -123,4 +123,4 @@ void MKLDNNSoftMaxNode::createDescriptor(const std::vector<InferenceEngine::Tens
             new softmax_forward::desc(prop_kind::forward_scoring, in_candidate, axis)));
     descs.push_back(desc);
 }
-REG_MKLDNN_PRIM_FOR(MKLDNNSoftMaxNode, Softmax);
+REG_MKLDNN_PRIM_FOR(MKLDNNSoftMaxNode, SoftMax);
index cdb1a30..db88b0c 100644 (file)
 
 using namespace mkldnn;
 using namespace MKLDNNPlugin;
-using namespace InferenceEngine;
 using namespace InferenceEngine::details;
 
 namespace MKLDNNPlugin {
 
-static LayerConfig make_plain_config(const CNNLayerPtr &layer) {
+static InferenceEngine::LayerConfig make_plain_config(const InferenceEngine::CNNLayerPtr &layer) {
+    using namespace InferenceEngine;
+
     LayerConfig config;
 
     for (const auto &in_w : layer->insData) {
@@ -50,7 +51,7 @@ static LayerConfig make_plain_config(const CNNLayerPtr &layer) {
 class PortIteratorHelper : public PortMapHelper {
 public:
     PortIteratorHelper(const MKLDNNMemoryPtr &from, const MKLDNNMemoryPtr &to,
-            bool as_input, const TensorIterator::PortMap &port_map, const mkldnn::engine& eng, int n_iter) : as_input(as_input) {
+            bool as_input, const InferenceEngine::TensorIterator::PortMap &port_map, const mkldnn::engine& eng, int n_iter) : as_input(as_input) {
         const auto &full_blob = as_input ? from : to;
         const auto &part_blob = !as_input ? from : to;
 
@@ -147,7 +148,7 @@ MKLDNNTensorIteratorNode::MKLDNNTensorIteratorNode(InferenceEngine::CNNLayerPtr
         MKLDNNNode(layer, eng, cache) {}
 
 void MKLDNNTensorIteratorNode::getSupportedDescriptors() {
-    auto *ti = dynamic_cast<class TensorIterator*>(getCnnLayer().get());
+    auto *ti = dynamic_cast<class InferenceEngine::TensorIterator*>(getCnnLayer().get());
     if (ti == nullptr)
         THROW_IE_EXCEPTION << "Cannot convert to TensorIterator layer.";
 
@@ -189,7 +190,7 @@ void MKLDNNTensorIteratorNode::initSupportedPrimitiveDescriptors() {
 
 
 void MKLDNNTensorIteratorNode::createPrimitive() {
-    auto ti = dynamic_cast<class TensorIterator*>(getCnnLayer().get());
+    auto ti = dynamic_cast<class InferenceEngine::TensorIterator*>(getCnnLayer().get());
     if (ti == nullptr)
         THROW_IE_EXCEPTION << "Cannot convert to TensorIterator layer.";
 
index eb47ac0..337abdf 100644 (file)
@@ -482,7 +482,7 @@ TEST_F(MKLDNNGraphGenericTests, canGetPrimitiveDescriptorsList) {
 
     mkldnn::engine eng(mkldnn::engine(mkldnn::engine::kind::cpu, 0));
     MKLDNNPlugin::MKLDNNWeightsSharing::Ptr cache;
-    node.reset(MKLDNNPlugin::MKLDNNNode::CreateNode(layerPtr, eng, extMgr, cache));
+    node.reset(MKLDNNPlugin::MKLDNNNode::factory().create(layerPtr, eng, extMgr, cache));
     ASSERT_EQ(MKLDNNPlugin::Type::Generic, node->getType());
 
     ASSERT_NO_THROW(node->getSupportedDescriptors());
index 967a762..1aaf085 100644 (file)
@@ -24,7 +24,7 @@ TEST_F(MKLDNNGraphReorderTests, cannotCreatePrimitiveDescriprorsWithoutOtherLaye
 
     InferenceEngine::CNNLayerPtr layer(new InferenceEngine::CNNLayer({"TestReorder", "Reorder", InferenceEngine::Precision::FP32}));
     MKLDNNPlugin::MKLDNNWeightsSharing::Ptr cache;
-    node.reset(MKLDNNPlugin::MKLDNNNode::CreateNode(layer, eng, {}, cache));
+    node.reset(MKLDNNPlugin::MKLDNNNode::factory().create(layer, eng, {}, cache));
     ASSERT_EQ(MKLDNNPlugin::Type::Reorder, node->getType());
 
     ASSERT_THROW(node->getSupportedDescriptors(), InferenceEngine::details::InferenceEngineException);
index 28d9861..75b6261 100644 (file)
@@ -33,7 +33,7 @@ protected:
 TEST_F(MKLDNNPrimitiveTest, DISABLED_canDeleteWeightInweitableLayer) {
     //simulate how convlayer gets created
     engine e(engine::cpu, 0);
-    //auto node = MKLDNNPlugin::MKLDNNNodePtr(MKLDNNPlugin::MKLDNNNode::CreateNode(MKLDNNPlugin::Generic, InferenceEngine::Precision::FP32, ""));
+    //auto node = MKLDNNPlugin::MKLDNNNodePtr(MKLDNNPlugin::MKLDNNNode::factory().create(MKLDNNPlugin::Generic, InferenceEngine::Precision::FP32, ""));
 //    ChildConv *conv = new ChildConv(e);
 //    EXPECT_CALL(*conv, die()).Times(1);