virtual std::shared_ptr<const ngraph::Function> getFunction() const noexcept = 0;
/**
- * @deprecated Network precision does not make sence, use precision on egdes. The method will be removed in 2021.1
- * @brief Returns the main network operating precision.
- *
- * This may be MIXED if not homogeneous.
- *
- * @return A precision type
- */
- INFERENCE_ENGINE_DEPRECATED("Network precision does not make sence, use precision on egdes. The method will be removed in 2021.1")
- virtual Precision getPrecision() const noexcept = 0;
-
- /**
* @brief Gets the network output Data node information. The received info is stored in the given Data node.
*
* For single and multiple outputs networks.
virtual InputInfo::Ptr getInput(const std::string& inputName) const noexcept = 0;
/**
- * @deprecated Use ICNNNetwork::getName() instead. The method will be removed in 2021.1
- * @brief Gets the network name. The name is stored in the given pName string.
- *
- * @param pName - will receive actual network name, specified in IR file,
- * pName should point to valid memory address before invoking this function
- * @param len - size in bytes of pName buffer, actual name is trimmed by this size
- */
- INFERENCE_ENGINE_DEPRECATED("Use ICNNNetwork::getName() instead. The method will be removed in 2021.1")
- virtual void getName(char* pName, size_t len) const noexcept = 0;
-
- /**
* @brief Returns the network name.
*
* @return Network name
InferenceEngine::ICNNNetwork::Ptr CLDNNGraph::GetExecGraphInfoByPrimitivesInfo(std::vector<cldnn::primitive_info>& primitives_info,
bool filter_const_primitives) {
auto net = std::make_shared<details::CNNNetworkImpl>();
- net->setPrecision(Precision::FP32);
net->setName("runtime_gpu_graph");
if (m_config.useProfiling) {
try {
// one of solution is to create not copyNet overloads, that accepts 2 functors, one for layer copy
// and another one for net copy
auto rawNet = dynamic_cast<InferenceEngine::details::CNNNetworkImpl *>(copiedNet.get());
- if (rawNet != nullptr) {
- rawNet->setPrecision(T::mandatory().getNetPrecision());
- }
// allow client code to access copied topology, to avoid copies if user would like to chain quantisation with
// another preprocessing
InferenceEngine::CNNLayerSet inputLayers;
InferenceEngine::InputsDataMap inputs;
std::unordered_set<InferenceEngine::CNNLayer *> allLayers;
- auto network_precision = network.getPrecision();
network.getInputsInfo(inputs);
IE_ASSERT(!inputs.empty());
auto network_input_precision = inputs.begin()->second->getPrecision();
auto batch_size = network.getBatchSize();
- if (network_precision != InferenceEngine::Precision::FP32 &&
- network_precision != InferenceEngine::Precision::FP16 &&
- network_precision != InferenceEngine::Precision::MIXED) {
- errMessage = "The plugin does not support networks with " +
- std::string(network_precision.name()) + " format. Supported network precisions are FP32, FP16, MIXED\n";
- return false;
- }
if (network_input_precision != InferenceEngine::Precision::FP32 &&
network_input_precision != InferenceEngine::Precision::I16 &&
network_input_precision != InferenceEngine::Precision::U8) {
}
}
- tempNetwork->setPrecision(network.getPrecision());
-
// update of pre-processing info
InputsDataMap clonedInputs;
tempNetwork->getInputsInfo(clonedInputs);
IE_SUPPRESS_DEPRECATED_END
}
-void CNNNetworkNGraphImpl::getName(char* pName, size_t len) const noexcept {
- if (cnnNetwork) {
- cnnNetwork->getName(pName, len);
- return;
- }
- // Description buffer will preserve garbage if external pointer not initialized
- if (len < 1) return;
- memset(pName, 0, len);
- DescriptionBuffer(pName, len) << _ngraph_function->get_friendly_name();
-}
-
const std::string& CNNNetworkNGraphImpl::getName() const noexcept {
if (cnnNetwork) {
return cnnNetwork->getName();
return it->second;
}
-Precision CNNNetworkNGraphImpl::getPrecision() const noexcept {
- return Precision::MIXED;
-}
-
void CNNNetworkNGraphImpl::getOutputsInfo(OutputsDataMap& out) const noexcept {
if (cnnNetwork) {
cnnNetwork->getOutputsInfo(out);
CNNNetworkNGraphImpl(const std::shared_ptr<::ngraph::Function>& nGraph);
~CNNNetworkNGraphImpl() override;
- INFERENCE_ENGINE_DEPRECATED("Use ngraph::Function directly")
- Precision getPrecision() const noexcept override;
void getOutputsInfo(std::map<std::string, DataPtr>& out) const noexcept override;
void getInputsInfo(InputsDataMap& inputs) const noexcept override;
InputInfo::Ptr getInput(const std::string& inputName) const noexcept override;
-
- INFERENCE_ENGINE_DEPRECATED("Use CNNNetworkNGraphImpl::getName() returning std::string")
- void getName(char* pName, size_t len) const noexcept override;
-
const std::string& getName() const noexcept override;
size_t layerCount() const noexcept override;
public:
CNNNetworkImpl();
~CNNNetworkImpl() override;
- Precision getPrecision() const noexcept override {
- return precision;
- }
-
- void setPrecision(Precision::ePrecision prec) {
- precision = prec;
- }
std::shared_ptr<::ngraph::Function> getFunction() noexcept override {
return nullptr;
_inputData.erase(name);
}
- void getName(char* pName, size_t len) const noexcept override {
- // Description buffer will preserve garbage if external pointer not initialized
- if (len < 1) return;
- memset(pName, 0, len);
- DescriptionBuffer(pName, len) << _name;
- }
-
const std::string& getName() const noexcept override {
return _name;
}
noexcept override;
protected:
- Precision precision {Precision::MIXED};
std::map<std::string, DataPtr> _data;
std::map<std::string, CNNLayerPtr> _layers;
InferenceEngine::InputsDataMap _inputData;
inline CNNNetPtr CNNNetCopy(const ICNNNetwork& input, const Copier& cp) {
auto net = std::make_shared<details::CNNNetworkImpl>();
- // setting base args
- IE_SUPPRESS_DEPRECATED_START
- net->setPrecision(input.getPrecision());
- IE_SUPPRESS_DEPRECATED_END
-
net->setName(input.getName());
// rest info is layer dependent so have to create graph clone
// Create network
auto cnnNetworkImpl = std::make_shared<details::CNNNetworkImpl>();
cnnNetworkImpl->setName(graph->get_friendly_name());
- // In generic case all nGraph functions have MIXED precision
- // Network precision should be deprecated
- cnnNetworkImpl->setPrecision(Precision::MIXED);
// Collect all names from current graph
// It is necessary in order to differentiate outputs from constant layers when we share constants
for (auto o : outputInfo) {
net->removeOutput(o.first);
}
- IE_SUPPRESS_DEPRECATED_START
- net->setPrecision(network.getPrecision());
- IE_SUPPRESS_DEPRECATED_END
net->setName(network.getName());
InputsDataMap externalInputsData;
outData = parent->outData[l1_out_i];
precision = context.getOriginalLayerPrecision(parent->name, outData->getName());
- IE_SUPPRESS_DEPRECATED_START
if (precision == Precision::UNSPECIFIED) {
if (child != nullptr)
precision = child->precision;
- else if (context.network.getPrecision() != Precision::MIXED)
- precision = context.network.getPrecision();
else
precision = Precision::FP32;
}
- IE_SUPPRESS_DEPRECATED_END
} else {
// TODO: FIXME
precision = Precision::FP32;
: name;
Precision ssPrecision = context.getOriginalLayerPrecision(parent->name, outData->getName());
- IE_SUPPRESS_DEPRECATED_START
if (ssPrecision == Precision::UNSPECIFIED) {
if (child != nullptr)
ssPrecision = child->precision;
- else if (context.network.getPrecision() != Precision::MIXED)
- ssPrecision = context.network.getPrecision();
else
ssPrecision = Precision::FP32;
}
- IE_SUPPRESS_DEPRECATED_END
LayerParams ssCnnLayerParams {layerName, "ScaleShift", ssPrecision};
CNNLayerPtr ssCnnLayer(new ScaleShiftLayer(ssCnnLayerParams));
// we are cloning network if we have statistics and we can transform network.
_clonedNetwork = cloneNet(network);
- IE_SUPPRESS_DEPRECATED_START
- if (Precision::FP16 == network.getPrecision()) {
- _clonedNetwork->setPrecision(Precision::FP32);
- }
- IE_SUPPRESS_DEPRECATED_END
-
// CPU Plugin doesn't natively support some precision like int64/fp16/bool
// so will convert all layer/tensors fp16->fp32 , bool->u8.
// Default int64->int32 conversion is already applied in IE common module.
std::shared_ptr<ICNNNetwork> dump_graph_as_ie_net(const MKLDNNGraph &graph) {
auto net = std::make_shared<details::CNNNetworkImpl>();
- net->setPrecision(Precision::FP32);
net->setName(graph._name);
std::map<MKLDNNNodePtr, CNNLayerPtr> node2layer;
weights_blob = transformer.convertBF16ToFloat(tweights);
} else {
// Unknown non supported data type, return an error
- THROW_IE_EXCEPTION << layer->name << "Weights for layer Normalize wiht name '" << layer->name <<
+ THROW_IE_EXCEPTION << layer->name << "Weights for layer Normalize with name '" << layer->name <<
"' has unsupported data type " << tweights->getTensorDesc().getPrecision();
}
}
_network.reset(new CNNNetworkImpl());
_network->setName(GetStrAttr(root, "name", ""));
_defPrecision = Precision::FromStr(GetStrAttr(root, "precision", "UNSPECIFIED"));
- _network->setPrecision(_defPrecision);
// parse the input Data
DataPtr inputData;
// parse the graph layers
std::vector<CNNLayer::Ptr> inputLayers;
int nodeCnt = 0;
std::map<int, CNNLayer::Ptr> layerById;
- bool identifyNetworkPrecision = _defPrecision == Precision::UNSPECIFIED;
for (auto node = allLayersNode.child("layer"); !node.empty(); node = node.next_sibling("layer")) {
LayerParseParameters lprms;
ParseGenericParams(node, lprms);
inputLayers.push_back(layer);
}
- IE_SUPPRESS_DEPRECATED_START
-
- if (identifyNetworkPrecision) {
- if (!_network->getPrecision()) {
- _network->setPrecision(lprms.prms.precision);
- }
- if (_network->getPrecision() != lprms.prms.precision) {
- _network->setPrecision(Precision::MIXED);
- identifyNetworkPrecision = false;
- }
- }
-
- IE_SUPPRESS_DEPRECATED_END
-
for (int i = 0; i < lprms.outputPorts.size(); i++) {
const auto& outPort = lprms.outputPorts[i];
const auto outId = gen_id(lprms.layerId, outPort.portId);
return found->second;
}
-DataPtr FormatParser::ParseInputData(pugi::xml_node& root) const {
- auto inputNode = root.child("input");
- if (inputNode.empty()) {
- THROW_IE_EXCEPTION << "No input node in network, missing <input>";
- }
-
- auto inputName = GetStrAttr(inputNode, "name", "input");
- SizeVector inputDims;
-
- ParseDims(inputDims, inputNode);
-
- DataPtr& inputData = _network->getData(inputName);
- inputData.reset(new Data(inputName, {_network->getPrecision(), inputDims, TensorDesc::getLayoutByDims(inputDims)}));
- return inputData;
-}
-
void FormatParser::ParsePreProcess(pugi::xml_node& root) {
/*
<pre-process mean-precision="FP32">
private:
size_t _version;
- Precision _defPrecision;
+ Precision _defPrecision = Precision::UNSPECIFIED;
std::vector<std::shared_ptr<BaseCreator>> creators;
std::map<std::string, DataPtr> _portsToData;
void SetLayerInput(CNNNetworkImpl& network, const std::string& data, CNNLayerPtr& targetLayer, int inputPort);
- DataPtr ParseInputData(pugi::xml_node& root) const;
-
void ParsePreProcess(pugi::xml_node& node);
// Generate different set of creators depending on required IR version
InferenceEngine::ICNNNetwork::Ptr buildRuntimeGraph(GraphMetaInfo &graphMetaInfo, const std::vector<float>& perfInfo) {
auto net = std::make_shared<InferenceEngine::details::CNNNetworkImpl>();
- net->setPrecision(Precision::FP16);
net->setName(graphMetaInfo.graphName);
std::map<size_t, CNNLayerPtr> stageMetaIndexToLayer;
static constexpr const char* OUTPUT_BLOB_NAME = "first_output";
const SizeVector OUTPUT_DIMENTIONS = { 1, 3, 299, 299 };
const std::string name = "test";
- Precision getPrecision() const noexcept override {
- return Precision::FP32;
- }
const std::string& getName() const noexcept override {
return name;
}
ASSERT_THROW(network.validateNetwork(), InferenceEngineException);
}
-TEST_F(CNNNetworkImplTest, canGetName) {
- InferenceEngine::details::CNNNetworkImpl net;
- net.setName("myName");
- const char* p = "33333333333";
- char name[20];
- net.getName(name, sizeof(name));
- ASSERT_STREQ(name, "myName");
-}
-
TEST_F(CNNNetworkImplTest, canGetNameStr) {
InferenceEngine::details::CNNNetworkImpl net;
net.setName("myName");
<layer name="normalize" id="2" type="Normalize">
<data across_spatial="_AS_" channel_shared="_CS_" eps="_EPS_" />
<weights offset="0" size="_WS_" />
-
<input>
<port id="3">
<dim>_IN_</dim>
}
};
-TEST_F(GraphCopyTests, copyNetworkPreserveBasicParams) {
+TEST_F(GraphCopyTests, canPreserveBatchWhenCopyNetwork) {
auto clone = CNNNetCopy<MockCopier>(*mockNet, mc);
//network was copied not just assigned
ASSERT_NE(clone.get(), mockNet.get());
- ASSERT_EQ(clone->getPrecision(), Precision::FP16);
-
- char name[20];
- clone->getName(name, sizeof(name));
- ASSERT_STREQ(name, "nm");
-}
-TEST_F(GraphCopyTests, canPreserveBatchWhenCopyNetwork) {
- auto clone = CNNNetCopy<MockCopier>(*mockNet, mc);
ASSERT_EQ(clone->getBatchSize(), 12);
}
.finalize();
- net->setPrecision(IE::Precision::Q78);
InferenceEngine::ResponseDesc resp;
net->setName("net");
}
{
auto cloned = IE::cloneNet(*net);
- EXPECT_TRUE(IE::Precision::Q78 == cloned->getPrecision());
EXPECT_EQ("net", cloned->getName());
}
{