MKLDNN nodes factory improvements (#2252)
authorVladislav Volkov <vladislav.volkov@intel.com>
Wed, 30 Sep 2020 08:31:19 +0000 (11:31 +0300)
committerGitHub <noreply@github.com>
Wed, 30 Sep 2020 08:31:19 +0000 (11:31 +0300)
17 files changed:
inference-engine/src/mkldnn_plugin/mkldnn_graph.cpp
inference-engine/src/mkldnn_plugin/mkldnn_node.cpp
inference-engine/src/mkldnn_plugin/mkldnn_node.h
inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_concat_node.cpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_def_conv_node.h
inference-engine/src/mkldnn_plugin/nodes/mkldnn_input_node.cpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_lrn_node.cpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_memory_node.hpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_reshape_node.cpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_rnn.cpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_roi_pooling_node.cpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_softmax_node.cpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_tensoriterator_node.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/graph_generic_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_reorder_test.cpp
inference-engine/tests_deprecated/unit/engines/mkldnn/mkldnn_primitive_test.cpp

index 6ee13c58c8ddd3d1e410dc6bca883a43aa077ce5..98ea729c71e7522cd560ca148169afd0217bd6f9 100644 (file)
@@ -133,7 +133,7 @@ void MKLDNNGraph::Replicate(const TensorIterator::Body &subgraph, const MKLDNNEx
     for (const auto layer : NetPass::TIBodySortTopologically(subgraph)) {
         CNNLayerPtr _layer = layer;
 
-        const MKLDNNNodePtr node(MKLDNNNode::CreateNode(_layer, getEngine(), extMgr, weightsCache));
+        const MKLDNNNodePtr node(MKLDNNNode::factory().create(_layer, getEngine(), extMgr, weightsCache));
         graphNodes.push_back(node);
         layer2node[layer] = node;
 
@@ -162,7 +162,7 @@ void MKLDNNGraph::Replicate(const TensorIterator::Body &subgraph, const MKLDNNEx
         CNNLayerPtr layer(new CNNLayer({"out_" + output->getName(), "Output", output->getTensorDesc().getPrecision()}));
         layer->insData.push_back(output);
 
-        const MKLDNNNodePtr node(MKLDNNNode::CreateNode(layer, getEngine(), extMgr, weightsCache));
+        const MKLDNNNodePtr node(MKLDNNNode::factory().create(layer, getEngine(), extMgr, weightsCache));
 
         MKLDNNEdgePtr edge(new MKLDNNEdge(parent_node, node, _parent_port(output), 0));
         node->addEdge(edge);
@@ -182,7 +182,7 @@ void MKLDNNGraph::Replicate(const TensorIterator::Body &subgraph, const MKLDNNEx
         CNNLayerPtr layer(new CNNLayer({"stub_" + parent_layer->name, "Output", to_stub_data->getTensorDesc().getPrecision()}));
         layer->insData.push_back(to_stub_data);
 
-        const MKLDNNNodePtr node(MKLDNNNode::CreateNode(layer, getEngine(), extMgr, weightsCache));
+        const MKLDNNNodePtr node(MKLDNNNode::factory().create(layer, getEngine(), extMgr, weightsCache));
 
         MKLDNNEdgePtr edge(new MKLDNNEdge(parent_node, node, _parent_port(to_stub_data), 0));
         node->addEdge(edge);
@@ -197,7 +197,7 @@ void MKLDNNGraph::Replicate(const TensorIterator::Body &subgraph, const MKLDNNEx
         CNNLayerPtr layer(new CNNLayer({"in_" + input->getName(), "Input", input->getTensorDesc().getPrecision()}));
         layer->outData.push_back(input);
 
-        const MKLDNNNodePtr node(MKLDNNNode::CreateNode(layer, getEngine(), extMgr, weightsCache));
+        const MKLDNNNodePtr node(MKLDNNNode::factory().create(layer, getEngine(), extMgr, weightsCache));
 
         for (auto p : getInputTo(input)) {
             auto consumer = p.second;
@@ -251,7 +251,7 @@ void MKLDNNGraph::Replicate(const ICNNNetwork &network, const MKLDNNExtensionMan
             _layer->outData = layer->outData;
         }
 
-        const MKLDNNNodePtr node(MKLDNNNode::CreateNode(_layer, getEngine(), extMgr, weightsCache));
+        const MKLDNNNodePtr node(MKLDNNNode::factory().create(_layer, getEngine(), extMgr, weightsCache));
         graphNodes.push_back(node);
         layer2node[layer] = node;
 
@@ -289,7 +289,7 @@ void MKLDNNGraph::Replicate(const ICNNNetwork &network, const MKLDNNExtensionMan
         CNNLayerPtr layer(new CNNLayer({"out_" + output.first, "Output", data->getTensorDesc().getPrecision()}));
         layer->insData.push_back(data);
 
-        const MKLDNNNodePtr node(MKLDNNNode::CreateNode(layer, getEngine(), extMgr, weightsCache));
+        const MKLDNNNodePtr node(MKLDNNNode::factory().create(layer, getEngine(), extMgr, weightsCache));
 
         MKLDNNEdgePtr edge(new MKLDNNEdge(parent_node, node, _parent_port(data), 0));
         node->addEdge(edge);
@@ -309,7 +309,7 @@ void MKLDNNGraph::Replicate(const ICNNNetwork &network, const MKLDNNExtensionMan
         CNNLayerPtr layer(new CNNLayer({"stub_" + parent_layer->name, "Output", to_stub_data->getTensorDesc().getPrecision()}));
         layer->insData.push_back(to_stub_data);
 
-        const MKLDNNNodePtr node(MKLDNNNode::CreateNode(layer, getEngine(), extMgr, weightsCache));
+        const MKLDNNNodePtr node(MKLDNNNode::factory().create(layer, getEngine(), extMgr, weightsCache));
 
         MKLDNNEdgePtr edge(new MKLDNNEdge(parent_node, node, _parent_port(to_stub_data), 0));
         node->addEdge(edge);
index 6f877538cdda8116ec9352c2b63cc90bd04d1a67..d95643116a4e3ab85beecfd6dc8c7977ab1c98a1 100644 (file)
@@ -5,12 +5,10 @@
 #include "mkldnn_node.h"
 #include "mkldnn_extension_mngr.h"
 
-#include "caseless.hpp"
 #include <vector>
 #include <string>
 #include <limits>
 #include <cstdint>
-#include <unordered_map>
 
 #include <nodes/mkldnn_batchnorm_node.h>
 #include <nodes/mkldnn_concat_node.h>
@@ -150,13 +148,9 @@ Type TypeFromName(const std::string type) {
 
 }  //  namespace MKLDNNPlugin
 
-std::shared_ptr<MKLDNNNodesHolder> MKLDNNNode::GetNodesHolder() {
-    static std::shared_ptr<MKLDNNNodesHolder> localHolder = std::make_shared<MKLDNNNodesHolder>();
-    return localHolder;
-}
-
-void MKLDNNNode::AddNode(const std::string& name, CreatorByLayerFunction factory) {
-    GetNodesHolder()->nodes[name] = factory;
+MKLDNNNode::Factory & MKLDNNNode::factory() {
+    static Factory factoryInstance;
+    return factoryInstance;
 }
 
 MKLDNNNode::MKLDNNNode(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
@@ -260,41 +254,6 @@ void MKLDNNNode::remove() {
     }
 }
 
-MKLDNNNode* MKLDNNNode::CreateNode(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
-                                   const MKLDNNExtensionManager::Ptr& extMgr, MKLDNNWeightsSharing::Ptr &w_cache) {
-    MKLDNNNode *newNode = nullptr;
-    auto nodesHolder = GetNodesHolder();
-
-    if (nodesHolder->nodes.find("Generic") != nodesHolder->nodes.end()) {
-        std::unique_ptr<MKLDNNNode> ol(nodesHolder->nodes["Generic"](layer, eng, w_cache));
-        if (ol != nullptr && ol->created(extMgr))
-            newNode = ol.release();
-    }
-    if (newNode == nullptr) {
-        for (auto maker : nodesHolder->nodes) {
-            std::unique_ptr<MKLDNNNode> ol(maker.second(layer, eng, w_cache));
-            if (ol != nullptr && ol->created(extMgr)) {
-                newNode = ol.release();
-                break;
-            }
-        }
-    }
-
-    //  WA-start : TI node requires all attributes to construct internal subgpath
-    //             including extManager, socket and mkldnn::eng.
-#if defined (COMPILED_CPU_MKLDNN_TENSORITERATOR_NODE)
-    MKLDNNTensorIteratorNode *ti = dynamic_cast<MKLDNNTensorIteratorNode*>(newNode);
-    if (ti != nullptr)
-        ti->setExtManager(extMgr);
-#endif
-    //  WA-end
-
-    if (!newNode)
-        THROW_IE_EXCEPTION << "Unsupported primitive of type: " << layer->type << " name: " << layer->name;
-
-    return newNode;
-}
-
 bool MKLDNNNode::isEdgesEmpty(const std::vector<MKLDNNEdgeWeakPtr>& edges) const {
     for (auto &edge : edges) {
         if (edge.lock())
@@ -1157,3 +1116,44 @@ Layout MKLDNNNode::getWeightsLayoutByDims(SizeVector dims, bool isGrouped) {
 void MKLDNNNode::appendPostOps(mkldnn::post_ops& ops) {
     THROW_IE_EXCEPTION << "Fusing of " << this->getType() << " operation is not implemented";
 }
+
+MKLDNNNode* MKLDNNNode::Factory::create(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
+                                        const MKLDNNExtensionManager::Ptr& extMgr, MKLDNNWeightsSharing::Ptr &w_cache) {
+    MKLDNNNode *newNode = nullptr;
+
+    auto builder = builders.find(Generic);
+
+    if (builder != builders.end()) {
+        std::unique_ptr<MKLDNNNode> ol(builder->second(layer, eng, w_cache));
+        if (ol != nullptr && ol->created(extMgr))
+            newNode = ol.release();
+    }
+
+    if (newNode == nullptr) {
+        builder = builders.find(TypeFromName(layer->type));
+
+        if (builder != builders.end()) {
+            std::unique_ptr<MKLDNNNode> ol(builder->second(layer, eng, w_cache));
+            if (ol != nullptr && ol->created(extMgr))
+                newNode = ol.release();
+        }
+    }
+
+    //  WA-start : TI node requires all attributes to construct internal subgpath
+    //             including extManager, socket and mkldnn::eng.
+#if defined (COMPILED_CPU_MKLDNN_TENSORITERATOR_NODE)
+    MKLDNNTensorIteratorNode *ti = dynamic_cast<MKLDNNTensorIteratorNode*>(newNode);
+    if (ti != nullptr)
+        ti->setExtManager(extMgr);
+#endif
+    //  WA-end
+
+    if (!newNode)
+        THROW_IE_EXCEPTION << "Unsupported primitive of type: " << layer->type << " name: " << layer->name;
+
+    return newNode;
+}
+
+void MKLDNNNode::Factory::registerNode(Type type, builder_t builder) {
+    builders[type] = builder;
+}
index be994f7b46fdee618837c3ead2f262e7749ad523..f5f695383f2429234da7226eb985224181a78837 100644 (file)
@@ -8,9 +8,9 @@
 #include <memory>
 #include <vector>
 #include <string>
-#include <map>
 #include <cassert>
 #include <algorithm>
+#include <caseless.hpp>
 #include <ie_common.h>
 #include "mkldnn_dims.h"
 #include "mkldnn_memory.h"
@@ -28,12 +28,6 @@ namespace MKLDNNPlugin {
 using MKLDNNNodePtr = std::shared_ptr<MKLDNNNode>;
 using MKLDNNNodeWeakPtr = std::weak_ptr<MKLDNNNode>;
 
-using CreatorByLayerFunction = std::function<MKLDNNNode *(const InferenceEngine::CNNLayerPtr& layer,
-        const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &w_cache)>;
-struct MKLDNNNodesHolder {
-    std::map<std::string, CreatorByLayerFunction> nodes;
-};
-
 enum Type {
     Unknown,
     Generic,
@@ -266,11 +260,11 @@ private:
 
 class MKLDNNNode : public InferenceEngine::details::no_copy {
 public:
-    static void AddNode(const std::string& name, CreatorByLayerFunction factory);
-    static std::shared_ptr<MKLDNNNodesHolder> GetNodesHolder();
+    class Factory;
+    template<typename To>
+    class Registrar;
 
-    static MKLDNNNode* CreateNode(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
-                                  const MKLDNNExtensionManager::Ptr& extMgr, MKLDNNWeightsSharing::Ptr &w_cache);
+    static Factory & factory();
 
     ~MKLDNNNode() override = default;
 
@@ -483,20 +477,6 @@ public:
         return desc.outputNumbers();
     }
 
-    template<typename To>
-    class Register {
-    public:
-        explicit Register(const std::string& type) {
-            MKLDNNNode::AddNode(type,
-                    [](const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
-                            MKLDNNWeightsSharing::Ptr &w_cache)
-                    -> MKLDNNNode* {
-                        return new To(layer, eng, w_cache);
-                    });
-        }
-    };
-
-
 protected:
     // TODO: It is necessary only in order to avoid modifications of cnnLayers and original topology
     std::vector<MKLDNNDims> outDims;
@@ -610,8 +590,39 @@ private:
     ConstantType checkConstant(LOOK look, std::vector<MKLDNNNodePtr>& checkNodes);
 };
 
+class MKLDNNNode::Factory : InferenceEngine::details::no_copy {
+public:
+    using builder_t = std::function<MKLDNNNode *(const InferenceEngine::CNNLayerPtr& layer,
+        const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &w_cache)>;
+
+    MKLDNNNode* create(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
+                       const MKLDNNExtensionManager::Ptr& extMgr, MKLDNNWeightsSharing::Ptr &w_cache);
+
+    void registerNode(Type type, builder_t builder);
+
+private:
+    using map_t = std::unordered_map<Type, builder_t,
+        std::hash<std::underlying_type<MKLDNNPlugin::Type>::type>>;
+    map_t builders;
+};
+
+template<typename To>
+class MKLDNNNode::Registrar {
+public:
+    explicit Registrar(Type type) {
+        MKLDNNNode::factory().registerNode(type,
+                [type](const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
+                    MKLDNNWeightsSharing::Ptr &w_cache) -> MKLDNNNode* {
+                    MKLDNNNode *node = new To(layer, eng, w_cache);
+                    return node;
+                });
+    }
+};
+
+#define REG_MKLDNN_CONCAT2(X, Y) X ## Y
+#define REG_MKLDNN_CONCAT(X, Y) REG_MKLDNN_CONCAT2(X, Y)
 #define REG_MKLDNN_PRIM_FOR(__prim, __type) \
-static MKLDNNNode::Register<__prim> __reg__##__type(#__type)
+static MKLDNNNode::Registrar<__prim> REG_MKLDNN_CONCAT(_reg_, __LINE__)(__type)
 
 template <typename T, typename U>
 inline T div_up(const T a, const U b) {
index f9e7fa58e740a7617bff71970b71592e7f6da548..57d23d3c6b172b460f310c05821748c1e893b4d6 100644 (file)
@@ -294,7 +294,7 @@ void Engine::QueryNetwork(const ICNNNetwork& network, const std::map<std::string
             auto layerIsSupported = [&] {
                 std::unique_ptr<MKLDNNNode> ptr;
                 try {
-                    ptr.reset(MKLDNNNode::CreateNode(*itLayer, {mkldnn::engine::kind::cpu, 0}, extensionManager, fake_w_cache));
+                    ptr.reset(MKLDNNNode::factory().create(*itLayer, {mkldnn::engine::kind::cpu, 0}, extensionManager, fake_w_cache));
                 } catch (InferenceEngine::details::InferenceEngineException&) {
                      return false;
                 }
@@ -339,7 +339,7 @@ void Engine::QueryNetwork(const ICNNNetwork& network, const std::map<std::string
             try {
                 mkldnn::engine eng(mkldnn::engine(mkldnn::engine::kind::cpu, 0));
                 // if we can create and have not thrown exception, then layer is supported
-                std::unique_ptr <MKLDNNNode>(MKLDNNNode::CreateNode(*i, eng, extensionManager, fake_w_cache));
+                std::unique_ptr <MKLDNNNode>(MKLDNNNode::factory().create(*i, eng, extensionManager, fake_w_cache));
                 res.supportedLayersMap.insert({ (*i)->name, GetName() });
             } catch (InferenceEngine::details::InferenceEngineException&) {
             }
index b1198636ccebcc2ea18ab96ee31e20e1cb318460..c639fbfa7b70a85d3afcb2e263ccf3c150acb440 100644 (file)
@@ -655,4 +655,4 @@ void MKLDNNConcatNode::execute(mkldnn::stream strm) {
     }
 }
 
-REG_MKLDNN_PRIM_FOR(MKLDNNConcatNode, Concat);
+REG_MKLDNN_PRIM_FOR(MKLDNNConcatNode, Concatenation);
index f3f019314db7569a64e1e9c329489dcfdf29d342..7dcab5e81ffef6126a18518e0fdb50fa200394a7 100644 (file)
@@ -29,7 +29,7 @@ public:
     }
 
 private:
-    static Register<MKLDNNDeformableConvolutionNode> reg;
+    static Registrar<MKLDNNDeformableConvolutionNode> reg;
     bool withBiases = false;
     bool isDW = false;
     bool isMerged = false;
index 8c1ef5d1c8f768c58a5ec9748edf74ff73986926..56ca8bc9174ca4b297abf693c45d19b36b9afa3b 100644 (file)
@@ -200,3 +200,4 @@ void MKLDNNInputNode::execute(mkldnn::stream strm) {
 }
 
 REG_MKLDNN_PRIM_FOR(MKLDNNInputNode, Input);
+REG_MKLDNN_PRIM_FOR(MKLDNNInputNode, Output);
index 73d506011ba8473dcc1c0a85467aae73709e1673..7a1186e9414a41e7aa363c1d4a87945035e06981 100644 (file)
@@ -92,4 +92,4 @@ void MKLDNNLrnNode::createDescriptor(const std::vector<InferenceEngine::TensorDe
             new lrn_forward::desc(prop_kind::forward_scoring, alg, in_candidate, size, alpha, beta, k)));
     descs.push_back(desc);
 }
-REG_MKLDNN_PRIM_FOR(MKLDNNLrnNode, LRN);
+REG_MKLDNN_PRIM_FOR(MKLDNNLrnNode, Lrn);
index 14a1c4792f652c5b4c5f13768bd2f9462b3b1700..b96e14545fcc3390c789e4b0802c433524d23ac7 100644 (file)
@@ -84,7 +84,7 @@ class MKLDNNMemoryOutputNode : public MKLDNNNode, public MKLDNNMemoryNode {
      * @brief keeps reference to input sibling node
      */
     MKLDNNNode* inputNode = nullptr;
-    static Register<MKLDNNMemoryOutputNode> reg;
+    static Registrar<MKLDNNMemoryOutputNode> reg;
     MKLDNNMemoryNodeVirtualEdge::Holder* holder = nullptr;
 };
 
@@ -106,7 +106,7 @@ public:
     MKLDNNMemoryPtr getStore();
  private:
     MKLDNNMemoryPtr dataStore;
-    static Register<MKLDNNMemoryInputNode> reg;
+    static Registrar<MKLDNNMemoryInputNode> reg;
     MKLDNNMemoryNodeVirtualEdge::Holder* holder = nullptr;
 };
 #endif
index 535044e870863aa6f93176487609b840b28379a3..9f8ec6c51747779c4eee2725686c24deba5f8729 100644 (file)
@@ -69,3 +69,4 @@ bool MKLDNNReshapeNode::created() const {
     return getType() == Reshape || getType() == Flatten;
 }
 REG_MKLDNN_PRIM_FOR(MKLDNNReshapeNode, Reshape);
+REG_MKLDNN_PRIM_FOR(MKLDNNReshapeNode, Flatten);
index 9fb34e6fff39c9ea674472bb2b1d53353c047728..d1ef1b28595943d9771a2f7a6be0a04f6904f61a 100644 (file)
@@ -523,5 +523,6 @@ void MKLDNNRNN::execute(mkldnn::stream strm) {
         strm.submit({exec_after.begin(), exec_after.end()});
 }
 
-REG_MKLDNN_PRIM_FOR(MKLDNNRNN, RNN);
+REG_MKLDNN_PRIM_FOR(MKLDNNRNN, RNNCell);
+REG_MKLDNN_PRIM_FOR(MKLDNNRNN, RNNSeq);
 }  // namespace MKLDNNPlugin
index fafb5c89515321f6891a4f1cf9f77126cda8a6b1..3783060b9d617a0e7502a7c0948383f07132d524 100644 (file)
@@ -112,4 +112,4 @@ void MKLDNNROIPoolingNode::createDescriptor(const std::vector<InferenceEngine::T
                                           spatial_scale)));
     descs.push_back(desc);
 }
-REG_MKLDNN_PRIM_FOR(MKLDNNROIPoolingNode, RoiPooling);
+REG_MKLDNN_PRIM_FOR(MKLDNNROIPoolingNode, ROIPooling);
index fa93bb5857ff3426b14487b2792391b11696153c..db6d8874c31f35d7d57a1994b2cde7553c53e6cb 100644 (file)
@@ -123,4 +123,4 @@ void MKLDNNSoftMaxNode::createDescriptor(const std::vector<InferenceEngine::Tens
             new softmax_forward::desc(prop_kind::forward_scoring, in_candidate, axis)));
     descs.push_back(desc);
 }
-REG_MKLDNN_PRIM_FOR(MKLDNNSoftMaxNode, Softmax);
+REG_MKLDNN_PRIM_FOR(MKLDNNSoftMaxNode, SoftMax);
index cdb1a30ca1828c90100c64fa4d7da7902ae7f5d6..db88b0c2d287d3c7d53add79bf8fd398ff7736f5 100644 (file)
 
 using namespace mkldnn;
 using namespace MKLDNNPlugin;
-using namespace InferenceEngine;
 using namespace InferenceEngine::details;
 
 namespace MKLDNNPlugin {
 
-static LayerConfig make_plain_config(const CNNLayerPtr &layer) {
+static InferenceEngine::LayerConfig make_plain_config(const InferenceEngine::CNNLayerPtr &layer) {
+    using namespace InferenceEngine;
+
     LayerConfig config;
 
     for (const auto &in_w : layer->insData) {
@@ -50,7 +51,7 @@ static LayerConfig make_plain_config(const CNNLayerPtr &layer) {
 class PortIteratorHelper : public PortMapHelper {
 public:
     PortIteratorHelper(const MKLDNNMemoryPtr &from, const MKLDNNMemoryPtr &to,
-            bool as_input, const TensorIterator::PortMap &port_map, const mkldnn::engine& eng, int n_iter) : as_input(as_input) {
+            bool as_input, const InferenceEngine::TensorIterator::PortMap &port_map, const mkldnn::engine& eng, int n_iter) : as_input(as_input) {
         const auto &full_blob = as_input ? from : to;
         const auto &part_blob = !as_input ? from : to;
 
@@ -147,7 +148,7 @@ MKLDNNTensorIteratorNode::MKLDNNTensorIteratorNode(InferenceEngine::CNNLayerPtr
         MKLDNNNode(layer, eng, cache) {}
 
 void MKLDNNTensorIteratorNode::getSupportedDescriptors() {
-    auto *ti = dynamic_cast<class TensorIterator*>(getCnnLayer().get());
+    auto *ti = dynamic_cast<class InferenceEngine::TensorIterator*>(getCnnLayer().get());
     if (ti == nullptr)
         THROW_IE_EXCEPTION << "Cannot convert to TensorIterator layer.";
 
@@ -189,7 +190,7 @@ void MKLDNNTensorIteratorNode::initSupportedPrimitiveDescriptors() {
 
 
 void MKLDNNTensorIteratorNode::createPrimitive() {
-    auto ti = dynamic_cast<class TensorIterator*>(getCnnLayer().get());
+    auto ti = dynamic_cast<class InferenceEngine::TensorIterator*>(getCnnLayer().get());
     if (ti == nullptr)
         THROW_IE_EXCEPTION << "Cannot convert to TensorIterator layer.";
 
index eb47ac0546f2fa36e21fd5c272c79f947a3fad42..337abdf7440143c5ac8ba7eefe4c0cbf34b74f19 100644 (file)
@@ -482,7 +482,7 @@ TEST_F(MKLDNNGraphGenericTests, canGetPrimitiveDescriptorsList) {
 
     mkldnn::engine eng(mkldnn::engine(mkldnn::engine::kind::cpu, 0));
     MKLDNNPlugin::MKLDNNWeightsSharing::Ptr cache;
-    node.reset(MKLDNNPlugin::MKLDNNNode::CreateNode(layerPtr, eng, extMgr, cache));
+    node.reset(MKLDNNPlugin::MKLDNNNode::factory().create(layerPtr, eng, extMgr, cache));
     ASSERT_EQ(MKLDNNPlugin::Type::Generic, node->getType());
 
     ASSERT_NO_THROW(node->getSupportedDescriptors());
index 967a762e8b9e2d8bfbc4e0b7fbf4e138767f118b..1aaf08537a0479be5470915f2c984dc7ef61ad4a 100644 (file)
@@ -24,7 +24,7 @@ TEST_F(MKLDNNGraphReorderTests, cannotCreatePrimitiveDescriprorsWithoutOtherLaye
 
     InferenceEngine::CNNLayerPtr layer(new InferenceEngine::CNNLayer({"TestReorder", "Reorder", InferenceEngine::Precision::FP32}));
     MKLDNNPlugin::MKLDNNWeightsSharing::Ptr cache;
-    node.reset(MKLDNNPlugin::MKLDNNNode::CreateNode(layer, eng, {}, cache));
+    node.reset(MKLDNNPlugin::MKLDNNNode::factory().create(layer, eng, {}, cache));
     ASSERT_EQ(MKLDNNPlugin::Type::Reorder, node->getType());
 
     ASSERT_THROW(node->getSupportedDescriptors(), InferenceEngine::details::InferenceEngineException);
index 28d9861d4df6cdb3d2e1cc7576b1ae0809b9e70c..75b6261f3aae5c7cb83e05f79c3811507233f0e2 100644 (file)
@@ -33,7 +33,7 @@ protected:
 TEST_F(MKLDNNPrimitiveTest, DISABLED_canDeleteWeightInweitableLayer) {
     //simulate how convlayer gets created
     engine e(engine::cpu, 0);
-    //auto node = MKLDNNPlugin::MKLDNNNodePtr(MKLDNNPlugin::MKLDNNNode::CreateNode(MKLDNNPlugin::Generic, InferenceEngine::Precision::FP32, ""));
+    //auto node = MKLDNNPlugin::MKLDNNNodePtr(MKLDNNPlugin::MKLDNNNode::factory().create(MKLDNNPlugin::Generic, InferenceEngine::Precision::FP32, ""));
 //    ChildConv *conv = new ChildConv(e);
 //    EXPECT_CALL(*conv, die()).Times(1);