for (const auto layer : NetPass::TIBodySortTopologically(subgraph)) {
CNNLayerPtr _layer = layer;
- const MKLDNNNodePtr node(MKLDNNNode::CreateNode(_layer, getEngine(), extMgr, weightsCache));
+ const MKLDNNNodePtr node(MKLDNNNode::factory().create(_layer, getEngine(), extMgr, weightsCache));
graphNodes.push_back(node);
layer2node[layer] = node;
CNNLayerPtr layer(new CNNLayer({"out_" + output->getName(), "Output", output->getTensorDesc().getPrecision()}));
layer->insData.push_back(output);
- const MKLDNNNodePtr node(MKLDNNNode::CreateNode(layer, getEngine(), extMgr, weightsCache));
+ const MKLDNNNodePtr node(MKLDNNNode::factory().create(layer, getEngine(), extMgr, weightsCache));
MKLDNNEdgePtr edge(new MKLDNNEdge(parent_node, node, _parent_port(output), 0));
node->addEdge(edge);
CNNLayerPtr layer(new CNNLayer({"stub_" + parent_layer->name, "Output", to_stub_data->getTensorDesc().getPrecision()}));
layer->insData.push_back(to_stub_data);
- const MKLDNNNodePtr node(MKLDNNNode::CreateNode(layer, getEngine(), extMgr, weightsCache));
+ const MKLDNNNodePtr node(MKLDNNNode::factory().create(layer, getEngine(), extMgr, weightsCache));
MKLDNNEdgePtr edge(new MKLDNNEdge(parent_node, node, _parent_port(to_stub_data), 0));
node->addEdge(edge);
CNNLayerPtr layer(new CNNLayer({"in_" + input->getName(), "Input", input->getTensorDesc().getPrecision()}));
layer->outData.push_back(input);
- const MKLDNNNodePtr node(MKLDNNNode::CreateNode(layer, getEngine(), extMgr, weightsCache));
+ const MKLDNNNodePtr node(MKLDNNNode::factory().create(layer, getEngine(), extMgr, weightsCache));
for (auto p : getInputTo(input)) {
auto consumer = p.second;
_layer->outData = layer->outData;
}
- const MKLDNNNodePtr node(MKLDNNNode::CreateNode(_layer, getEngine(), extMgr, weightsCache));
+ const MKLDNNNodePtr node(MKLDNNNode::factory().create(_layer, getEngine(), extMgr, weightsCache));
graphNodes.push_back(node);
layer2node[layer] = node;
CNNLayerPtr layer(new CNNLayer({"out_" + output.first, "Output", data->getTensorDesc().getPrecision()}));
layer->insData.push_back(data);
- const MKLDNNNodePtr node(MKLDNNNode::CreateNode(layer, getEngine(), extMgr, weightsCache));
+ const MKLDNNNodePtr node(MKLDNNNode::factory().create(layer, getEngine(), extMgr, weightsCache));
MKLDNNEdgePtr edge(new MKLDNNEdge(parent_node, node, _parent_port(data), 0));
node->addEdge(edge);
CNNLayerPtr layer(new CNNLayer({"stub_" + parent_layer->name, "Output", to_stub_data->getTensorDesc().getPrecision()}));
layer->insData.push_back(to_stub_data);
- const MKLDNNNodePtr node(MKLDNNNode::CreateNode(layer, getEngine(), extMgr, weightsCache));
+ const MKLDNNNodePtr node(MKLDNNNode::factory().create(layer, getEngine(), extMgr, weightsCache));
MKLDNNEdgePtr edge(new MKLDNNEdge(parent_node, node, _parent_port(to_stub_data), 0));
node->addEdge(edge);
#include "mkldnn_node.h"
#include "mkldnn_extension_mngr.h"
-#include "caseless.hpp"
#include <vector>
#include <string>
#include <limits>
#include <cstdint>
-#include <unordered_map>
#include <nodes/mkldnn_batchnorm_node.h>
#include <nodes/mkldnn_concat_node.h>
} // namespace MKLDNNPlugin
-std::shared_ptr<MKLDNNNodesHolder> MKLDNNNode::GetNodesHolder() {
- static std::shared_ptr<MKLDNNNodesHolder> localHolder = std::make_shared<MKLDNNNodesHolder>();
- return localHolder;
-}
-
-void MKLDNNNode::AddNode(const std::string& name, CreatorByLayerFunction factory) {
- GetNodesHolder()->nodes[name] = factory;
+MKLDNNNode::Factory & MKLDNNNode::factory() {
+ static Factory factoryInstance;
+ return factoryInstance;
}
MKLDNNNode::MKLDNNNode(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
}
}
-MKLDNNNode* MKLDNNNode::CreateNode(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
- const MKLDNNExtensionManager::Ptr& extMgr, MKLDNNWeightsSharing::Ptr &w_cache) {
- MKLDNNNode *newNode = nullptr;
- auto nodesHolder = GetNodesHolder();
-
- if (nodesHolder->nodes.find("Generic") != nodesHolder->nodes.end()) {
- std::unique_ptr<MKLDNNNode> ol(nodesHolder->nodes["Generic"](layer, eng, w_cache));
- if (ol != nullptr && ol->created(extMgr))
- newNode = ol.release();
- }
- if (newNode == nullptr) {
- for (auto maker : nodesHolder->nodes) {
- std::unique_ptr<MKLDNNNode> ol(maker.second(layer, eng, w_cache));
- if (ol != nullptr && ol->created(extMgr)) {
- newNode = ol.release();
- break;
- }
- }
- }
-
- // WA-start : TI node requires all attributes to construct internal subgpath
- // including extManager, socket and mkldnn::eng.
-#if defined (COMPILED_CPU_MKLDNN_TENSORITERATOR_NODE)
- MKLDNNTensorIteratorNode *ti = dynamic_cast<MKLDNNTensorIteratorNode*>(newNode);
- if (ti != nullptr)
- ti->setExtManager(extMgr);
-#endif
- // WA-end
-
- if (!newNode)
- THROW_IE_EXCEPTION << "Unsupported primitive of type: " << layer->type << " name: " << layer->name;
-
- return newNode;
-}
-
bool MKLDNNNode::isEdgesEmpty(const std::vector<MKLDNNEdgeWeakPtr>& edges) const {
for (auto &edge : edges) {
if (edge.lock())
void MKLDNNNode::appendPostOps(mkldnn::post_ops& ops) {
THROW_IE_EXCEPTION << "Fusing of " << this->getType() << " operation is not implemented";
}
+
+MKLDNNNode* MKLDNNNode::Factory::create(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
+ const MKLDNNExtensionManager::Ptr& extMgr, MKLDNNWeightsSharing::Ptr &w_cache) {
+ MKLDNNNode *newNode = nullptr;
+
+ auto builder = builders.find(Generic);
+
+ if (builder != builders.end()) {
+ std::unique_ptr<MKLDNNNode> ol(builder->second(layer, eng, w_cache));
+ if (ol != nullptr && ol->created(extMgr))
+ newNode = ol.release();
+ }
+
+ if (newNode == nullptr) {
+ builder = builders.find(TypeFromName(layer->type));
+
+ if (builder != builders.end()) {
+ std::unique_ptr<MKLDNNNode> ol(builder->second(layer, eng, w_cache));
+ if (ol != nullptr && ol->created(extMgr))
+ newNode = ol.release();
+ }
+ }
+
+ // WA-start : TI node requires all attributes to construct internal subgpath
+ // including extManager, socket and mkldnn::eng.
+#if defined (COMPILED_CPU_MKLDNN_TENSORITERATOR_NODE)
+ MKLDNNTensorIteratorNode *ti = dynamic_cast<MKLDNNTensorIteratorNode*>(newNode);
+ if (ti != nullptr)
+ ti->setExtManager(extMgr);
+#endif
+ // WA-end
+
+ if (!newNode)
+ THROW_IE_EXCEPTION << "Unsupported primitive of type: " << layer->type << " name: " << layer->name;
+
+ return newNode;
+}
+
+void MKLDNNNode::Factory::registerNode(Type type, builder_t builder) {
+ builders[type] = builder;
+}
#include <memory>
#include <vector>
#include <string>
-#include <map>
#include <cassert>
#include <algorithm>
+#include <caseless.hpp>
#include <ie_common.h>
#include "mkldnn_dims.h"
#include "mkldnn_memory.h"
using MKLDNNNodePtr = std::shared_ptr<MKLDNNNode>;
using MKLDNNNodeWeakPtr = std::weak_ptr<MKLDNNNode>;
-using CreatorByLayerFunction = std::function<MKLDNNNode *(const InferenceEngine::CNNLayerPtr& layer,
- const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &w_cache)>;
-struct MKLDNNNodesHolder {
- std::map<std::string, CreatorByLayerFunction> nodes;
-};
-
enum Type {
Unknown,
Generic,
class MKLDNNNode : public InferenceEngine::details::no_copy {
public:
- static void AddNode(const std::string& name, CreatorByLayerFunction factory);
- static std::shared_ptr<MKLDNNNodesHolder> GetNodesHolder();
+ class Factory;
+ template<typename To>
+ class Registrar;
- static MKLDNNNode* CreateNode(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
- const MKLDNNExtensionManager::Ptr& extMgr, MKLDNNWeightsSharing::Ptr &w_cache);
+ static Factory & factory();
~MKLDNNNode() override = default;
return desc.outputNumbers();
}
- template<typename To>
- class Register {
- public:
- explicit Register(const std::string& type) {
- MKLDNNNode::AddNode(type,
- [](const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
- MKLDNNWeightsSharing::Ptr &w_cache)
- -> MKLDNNNode* {
- return new To(layer, eng, w_cache);
- });
- }
- };
-
-
protected:
// TODO: It is necessary only in order to avoid modifications of cnnLayers and original topology
std::vector<MKLDNNDims> outDims;
ConstantType checkConstant(LOOK look, std::vector<MKLDNNNodePtr>& checkNodes);
};
+class MKLDNNNode::Factory : InferenceEngine::details::no_copy {
+public:
+ using builder_t = std::function<MKLDNNNode *(const InferenceEngine::CNNLayerPtr& layer,
+ const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &w_cache)>;
+
+ MKLDNNNode* create(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
+ const MKLDNNExtensionManager::Ptr& extMgr, MKLDNNWeightsSharing::Ptr &w_cache);
+
+ void registerNode(Type type, builder_t builder);
+
+private:
+ using map_t = std::unordered_map<Type, builder_t,
+ std::hash<std::underlying_type<MKLDNNPlugin::Type>::type>>;
+ map_t builders;
+};
+
+template<typename To>
+class MKLDNNNode::Registrar {
+public:
+ explicit Registrar(Type type) {
+ MKLDNNNode::factory().registerNode(type,
+ [type](const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
+ MKLDNNWeightsSharing::Ptr &w_cache) -> MKLDNNNode* {
+ MKLDNNNode *node = new To(layer, eng, w_cache);
+ return node;
+ });
+ }
+};
+
+#define REG_MKLDNN_CONCAT2(X, Y) X ## Y
+#define REG_MKLDNN_CONCAT(X, Y) REG_MKLDNN_CONCAT2(X, Y)
#define REG_MKLDNN_PRIM_FOR(__prim, __type) \
-static MKLDNNNode::Register<__prim> __reg__##__type(#__type)
+static MKLDNNNode::Registrar<__prim> REG_MKLDNN_CONCAT(_reg_, __LINE__)(__type)
template <typename T, typename U>
inline T div_up(const T a, const U b) {
auto layerIsSupported = [&] {
std::unique_ptr<MKLDNNNode> ptr;
try {
- ptr.reset(MKLDNNNode::CreateNode(*itLayer, {mkldnn::engine::kind::cpu, 0}, extensionManager, fake_w_cache));
+ ptr.reset(MKLDNNNode::factory().create(*itLayer, {mkldnn::engine::kind::cpu, 0}, extensionManager, fake_w_cache));
} catch (InferenceEngine::details::InferenceEngineException&) {
return false;
}
try {
mkldnn::engine eng(mkldnn::engine(mkldnn::engine::kind::cpu, 0));
// if we can create and have not thrown exception, then layer is supported
- std::unique_ptr <MKLDNNNode>(MKLDNNNode::CreateNode(*i, eng, extensionManager, fake_w_cache));
+ std::unique_ptr <MKLDNNNode>(MKLDNNNode::factory().create(*i, eng, extensionManager, fake_w_cache));
res.supportedLayersMap.insert({ (*i)->name, GetName() });
} catch (InferenceEngine::details::InferenceEngineException&) {
}
}
}
-REG_MKLDNN_PRIM_FOR(MKLDNNConcatNode, Concat);
+REG_MKLDNN_PRIM_FOR(MKLDNNConcatNode, Concatenation);
}
private:
- static Register<MKLDNNDeformableConvolutionNode> reg;
+ static Registrar<MKLDNNDeformableConvolutionNode> reg;
bool withBiases = false;
bool isDW = false;
bool isMerged = false;
}
REG_MKLDNN_PRIM_FOR(MKLDNNInputNode, Input);
+REG_MKLDNN_PRIM_FOR(MKLDNNInputNode, Output);
new lrn_forward::desc(prop_kind::forward_scoring, alg, in_candidate, size, alpha, beta, k)));
descs.push_back(desc);
}
-REG_MKLDNN_PRIM_FOR(MKLDNNLrnNode, LRN);
+REG_MKLDNN_PRIM_FOR(MKLDNNLrnNode, Lrn);
* @brief keeps reference to input sibling node
*/
MKLDNNNode* inputNode = nullptr;
- static Register<MKLDNNMemoryOutputNode> reg;
+ static Registrar<MKLDNNMemoryOutputNode> reg;
MKLDNNMemoryNodeVirtualEdge::Holder* holder = nullptr;
};
MKLDNNMemoryPtr getStore();
private:
MKLDNNMemoryPtr dataStore;
- static Register<MKLDNNMemoryInputNode> reg;
+ static Registrar<MKLDNNMemoryInputNode> reg;
MKLDNNMemoryNodeVirtualEdge::Holder* holder = nullptr;
};
#endif
return getType() == Reshape || getType() == Flatten;
}
REG_MKLDNN_PRIM_FOR(MKLDNNReshapeNode, Reshape);
+REG_MKLDNN_PRIM_FOR(MKLDNNReshapeNode, Flatten);
strm.submit({exec_after.begin(), exec_after.end()});
}
-REG_MKLDNN_PRIM_FOR(MKLDNNRNN, RNN);
+REG_MKLDNN_PRIM_FOR(MKLDNNRNN, RNNCell);
+REG_MKLDNN_PRIM_FOR(MKLDNNRNN, RNNSeq);
} // namespace MKLDNNPlugin
spatial_scale)));
descs.push_back(desc);
}
-REG_MKLDNN_PRIM_FOR(MKLDNNROIPoolingNode, RoiPooling);
+REG_MKLDNN_PRIM_FOR(MKLDNNROIPoolingNode, ROIPooling);
new softmax_forward::desc(prop_kind::forward_scoring, in_candidate, axis)));
descs.push_back(desc);
}
-REG_MKLDNN_PRIM_FOR(MKLDNNSoftMaxNode, Softmax);
+REG_MKLDNN_PRIM_FOR(MKLDNNSoftMaxNode, SoftMax);
using namespace mkldnn;
using namespace MKLDNNPlugin;
-using namespace InferenceEngine;
using namespace InferenceEngine::details;
namespace MKLDNNPlugin {
-static LayerConfig make_plain_config(const CNNLayerPtr &layer) {
+static InferenceEngine::LayerConfig make_plain_config(const InferenceEngine::CNNLayerPtr &layer) {
+ using namespace InferenceEngine;
+
LayerConfig config;
for (const auto &in_w : layer->insData) {
class PortIteratorHelper : public PortMapHelper {
public:
PortIteratorHelper(const MKLDNNMemoryPtr &from, const MKLDNNMemoryPtr &to,
- bool as_input, const TensorIterator::PortMap &port_map, const mkldnn::engine& eng, int n_iter) : as_input(as_input) {
+ bool as_input, const InferenceEngine::TensorIterator::PortMap &port_map, const mkldnn::engine& eng, int n_iter) : as_input(as_input) {
const auto &full_blob = as_input ? from : to;
const auto &part_blob = !as_input ? from : to;
MKLDNNNode(layer, eng, cache) {}
void MKLDNNTensorIteratorNode::getSupportedDescriptors() {
- auto *ti = dynamic_cast<class TensorIterator*>(getCnnLayer().get());
+ auto *ti = dynamic_cast<class InferenceEngine::TensorIterator*>(getCnnLayer().get());
if (ti == nullptr)
THROW_IE_EXCEPTION << "Cannot convert to TensorIterator layer.";
void MKLDNNTensorIteratorNode::createPrimitive() {
- auto ti = dynamic_cast<class TensorIterator*>(getCnnLayer().get());
+ auto ti = dynamic_cast<class InferenceEngine::TensorIterator*>(getCnnLayer().get());
if (ti == nullptr)
THROW_IE_EXCEPTION << "Cannot convert to TensorIterator layer.";
mkldnn::engine eng(mkldnn::engine(mkldnn::engine::kind::cpu, 0));
MKLDNNPlugin::MKLDNNWeightsSharing::Ptr cache;
- node.reset(MKLDNNPlugin::MKLDNNNode::CreateNode(layerPtr, eng, extMgr, cache));
+ node.reset(MKLDNNPlugin::MKLDNNNode::factory().create(layerPtr, eng, extMgr, cache));
ASSERT_EQ(MKLDNNPlugin::Type::Generic, node->getType());
ASSERT_NO_THROW(node->getSupportedDescriptors());
InferenceEngine::CNNLayerPtr layer(new InferenceEngine::CNNLayer({"TestReorder", "Reorder", InferenceEngine::Precision::FP32}));
MKLDNNPlugin::MKLDNNWeightsSharing::Ptr cache;
- node.reset(MKLDNNPlugin::MKLDNNNode::CreateNode(layer, eng, {}, cache));
+ node.reset(MKLDNNPlugin::MKLDNNNode::factory().create(layer, eng, {}, cache));
ASSERT_EQ(MKLDNNPlugin::Type::Reorder, node->getType());
ASSERT_THROW(node->getSupportedDescriptors(), InferenceEngine::details::InferenceEngineException);
TEST_F(MKLDNNPrimitiveTest, DISABLED_canDeleteWeightInweitableLayer) {
//simulate how convlayer gets created
engine e(engine::cpu, 0);
- //auto node = MKLDNNPlugin::MKLDNNNodePtr(MKLDNNPlugin::MKLDNNNode::CreateNode(MKLDNNPlugin::Generic, InferenceEngine::Precision::FP32, ""));
+ //auto node = MKLDNNPlugin::MKLDNNNodePtr(MKLDNNPlugin::MKLDNNNode::factory().create(MKLDNNPlugin::Generic, InferenceEngine::Precision::FP32, ""));
// ChildConv *conv = new ChildConv(e);
// EXPECT_CALL(*conv, die()).Times(1);