1 // Copyright (C) 2018-2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
5 #include "mkldnn_lrn_node.h"
6 #include "desc_iterator.hpp"
9 #include <mkldnn_extension_utils.h>
11 using namespace mkldnn;
12 using namespace MKLDNNPlugin;
13 using namespace InferenceEngine;
15 MKLDNNLrnNode::MKLDNNLrnNode(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng) : MKLDNNNode(layer, eng) {}
17 void MKLDNNLrnNode::getSupportedDescriptors() {
20 InferenceEngine::Precision precision = getCnnLayer()->insData[0].lock()->getPrecision();
21 if (precision != InferenceEngine::Precision::FP32)
22 precision = InferenceEngine::Precision::FP32;
23 auto inputDataType = MKLDNNExtensionUtils::IEPrecisionToDataType(precision);
24 auto * lrnLayer = dynamic_cast<NormLayer*>(getCnnLayer().get());
26 if (lrnLayer == nullptr)
27 THROW_IE_EXCEPTION << "Cannot convert lrn layer.";
29 if (getParentEdges().size() != 1)
30 THROW_IE_EXCEPTION << "Incorrect number of input edges for layer " << getName();
31 if (getChildEdges().empty())
32 THROW_IE_EXCEPTION << "Incorrect number of output edges for layer " << getName();
34 isAcrossMaps = lrnLayer->_isAcrossMaps;
35 alpha = lrnLayer->_alpha;
36 beta = lrnLayer->_beta;
37 size = lrnLayer->_size;
40 auto parentDims = getParentEdgeAt(0)->getDims();
42 for (auto format : getAvailableFormatsForDims(parentDims)) {
43 MKLDNNMemoryDesc in_candidate(parentDims, inputDataType, format);
44 createDescriptor({in_candidate}, {});
48 void MKLDNNLrnNode::createPrimitive() {
52 auto prim_desc = createPrimitiveDescriptor<lrn_forward::primitive_desc, lrn_forward::desc>();
54 prim.reset(new lrn_forward(prim_desc, getParentEdgeAt(0)->getMemory().GetPrimitive(),
55 getChildEdgeAt(0)->getMemory().GetPrimitive()));
58 bool MKLDNNLrnNode::created() const {
59 return getType() == Lrn;
62 void MKLDNNLrnNode::initOptimalPrimitiveDescriptor() {
63 auto config = getSelectedPrimitiveDescriptor()->getConfig();
64 if (isInitConfig(config))
67 if (config.inConfs.size() != 1 || config.outConfs.size() != 1 ||
68 (!isUninitTensorDesc(config.inConfs[0].desc) &&
69 !isUninitTensorDesc(config.outConfs[0].desc) && config.inConfs[0].desc != config.outConfs[0].desc))
70 THROW_IE_EXCEPTION << "Layer " << getName() << " has incorrect selected config!";
72 if (!isUninitTensorDesc(config.inConfs[0].desc)) {
73 config.outConfs[0].desc = config.inConfs[0].desc;
74 } else if (!isUninitTensorDesc(config.outConfs[0].desc)) {
75 config.inConfs[0].desc = config.outConfs[0].desc;
77 config.outConfs[0].desc = config.inConfs[0].desc = getConfiguredInputDesc(config, 0);
80 initDescriptor(config);
83 void MKLDNNLrnNode::createDescriptor(const std::vector<InferenceEngine::TensorDesc> &inputDesc,
84 const std::vector<InferenceEngine::TensorDesc> &outputDesc) {
85 algorithm alg = (isAcrossMaps) ? lrn_across_channels : lrn_within_channel;
86 MKLDNNMemoryDesc in_candidate(inputDesc[0]);
87 MKLDNNDescriptor desc(std::shared_ptr<lrn_forward::desc>(
88 new lrn_forward::desc(prop_kind::forward_scoring, alg, in_candidate, size, alpha, beta, k)));
89 descs.push_back(desc);