static std::string kDefaultInpLayerName = "opencv_ngraph_empty_inp_layer_name";
static constexpr const char* kOpenCVLayersType = "opencv_ngraph_layer";
+#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2022_1)
static std::string shapesToStr(const std::vector<Mat>& mats)
{
std::ostringstream shapes;
ss >> shapes[i][j];
}
}
+#endif // OpenVINO < 2022.1
static std::vector<Ptr<NgraphBackendWrapper> >
ngraphWrappers(const std::vector<Ptr<BackendWrapper> >& ptrs)
return wrappers;
}
+#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
+
+class NgraphCustomOp: public ov::op::Op {
+public:
+ OPENVINO_OP(kOpenCVLayersType);
+
+ NgraphCustomOp(const ngraph::OutputVector& inputs, Ptr<Layer>& cvLayer, const std::vector<Mat>& outputs, const std::vector<Mat>& internals):
+ Op(inputs), cvLayer(cvLayer), outputs(outputs), internals(internals)
+ {
+ constructor_validate_and_infer_types();
+ }
+
+ void validate_and_infer_types() override
+ {
+ set_output_size(outputs.size());
+ for (int i = 0; i < outputs.size(); ++i)
+ {
+ ov::PartialShape shape;
+ for (int j = 0; j < outputs[i].dims; ++j) {
+ shape.push_back(outputs[i].size[j]);
+ }
+ set_output_type(i, get_input_element_type(0), shape);
+ }
+ }
+
+ std::shared_ptr<ngraph::Node> clone_with_new_inputs(const ngraph::OutputVector& new_args) const override
+ {
+ return std::make_shared<NgraphCustomOp>(new_args, cvLayer, outputs, internals);
+ }
+
+ bool has_evaluate() const {
+ return true;
+ }
+
+ bool evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const override {
+ std::vector<Mat> inpMats, outMats;
+ infEngineBlobsToMats(inputs, inpMats);
+ infEngineBlobsToMats(outputs, outMats);
+ try
+ {
+ cvLayer->forward(inpMats, outMats, internals);
+ return true;
+ }
+ catch (...)
+ {
+ return false;
+ }
+ }
+
+ Ptr<Layer>& cvLayer;
+ std::vector<Mat> outputs, internals;
+};
+
+#else
+
class NgraphCustomOp: public ngraph::op::Op {
public:
const ngraph::NodeTypeInfo& get_type_info() const override
#endif
};
-
+#endif // OpenVINO >= 2022.1
InfEngineNgraphNode::InfEngineNgraphNode(std::shared_ptr<ngraph::Node>&& _node)
: BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH), node(std::move(_node)) {}
std::vector<Mat>& outputs, std::vector<Mat>& internals)
: BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH), cvLayer(cvLayer_)
{
- std::ostringstream oss;
- oss << (size_t)cvLayer.get();
-
- std::map<std::string, InferenceEngine::Parameter> params = {
- {"impl", oss.str()},
- {"outputs", shapesToStr(outputs)},
- {"internals", shapesToStr(internals)}
- };
-
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_3)
ngraph::OutputVector inp_nodes;
#else
#endif
for (const auto& node : nodes)
inp_nodes.emplace_back(node.dynamicCast<InfEngineNgraphNode>()->node);
+
+#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
+ node = std::make_shared<NgraphCustomOp>(inp_nodes, cvLayer, outputs, internals);
+#else
+ std::ostringstream oss;
+ oss << (size_t)cvLayer.get();
+ std::map<std::string, InferenceEngine::Parameter> params = {
+ {"impl", oss.str()},
+ {"outputs", shapesToStr(outputs)},
+ {"internals", shapesToStr(internals)}
+ };
node = std::make_shared<NgraphCustomOp>(inp_nodes, params);
+#endif
CV_Assert(!cvLayer->name.empty());
setName(cvLayer->name);
CV_Assert(node);
CV_Assert(node->node);
const std::string& name = node->node->get_friendly_name();
- requestedOutputs.insert({name, node});
+ requestedOutputs.insert({name, node.get()});
}
void InfEngineNgraphNet::setNodePtr(std::shared_ptr<ngraph::Node>* ptr) {
CV_LOG_DEBUG(NULL, "DNN/NGRAPH: Add 'Result' output: " << output_node_it->first);
CV_Assert(output_node_it->second);
auto out = std::make_shared<ngraph::op::Result>(output_node_it->second->node);
+#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
+ out->set_friendly_name(output_node_it->first + (output_node_it->second->node->get_output_size() == 1 ? "" : ".0"));
+#endif
outs.push_back(out);
}
CV_Assert_N(!inputs_vec.empty(), !outs.empty());
}
}
+#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2022_1)
+static inline
+InferenceEngine::Layout estimateLayout(size_t dims);
+#endif
+
void InfEngineNgraphNet::init(Target targetId)
{
if (!hasNetOwner)
{
if (targetId == DNN_TARGET_OPENCL_FP16)
{
+#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
+ ov::pass::ConvertFP32ToFP16().run_on_model(ngraph_function);
+#else
auto nodes = ngraph_function->get_ordered_ops();
for (auto& node : nodes)
{
}
}
ngraph_function->validate_nodes_and_infer_types();
+#endif // OpenVINO >= 2022.1
}
cnn = InferenceEngine::CNNNetwork(ngraph_function);
CV_Error(Error::StsNotImplemented, "Unknown target");
};
- if (!hasNetOwner) {
- for (size_t i = 0; i < ngraph_function->get_output_size(); ++i) {
- auto node = ngraph_function->output(i).get_node();
- for (size_t j = 0; j < node->get_input_size(); ++j) {
- std::string name = node->input_value(j).get_node()->get_friendly_name();
- auto iter = requestedOutputs.find(name);
- if (iter != requestedOutputs.end()) {
- requestedOutputs.erase(iter);
- cnn.addOutput(name);
- }
- }
+#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
+ auto model = cnn.getFunction();
+ ov::preprocess::PrePostProcessor ppp(model);
+ int i = 0;
+ for (const auto& inp : model->inputs()) { // TODO: not sure why but ngraph_function->inputs() here causes segfault.
+ const std::string& name = inp.get_node()->get_friendly_name();
+ auto blobIt = allBlobs.find(name);
+ CV_Assert(blobIt != allBlobs.end());
+
+ auto srcT = blobIt->second.get_element_type();
+ if (srcT != inp.get_node()->get_element_type()) {
+ ppp.input(i++).tensor().set_element_type(srcT);
+ }
+ }
+
+ i = 0;
+ for (const auto& it : model->outputs())
+ {
+ const std::string& name = it.get_node()->get_friendly_name();
+ auto blobIt = allBlobs.find(name);
+ CV_Assert(blobIt != allBlobs.end());
+ const auto& src = blobIt->second;
+
+ // A workaround for single dimension output for which OpenCV allocates 2d Mat.
+ // For example, face-detection-0105 with Result of shape {200} while output blob is {200, 1}
+ auto outShape = it.get_partial_shape().get_max_shape();
+ if (outShape != src.get_shape()) {
+ size_t sz = std::accumulate(outShape.begin(), outShape.end(), 1, std::multiplies<size_t>());
+ CV_Assert(sz == src.get_size());
+ allBlobs[name] = ov::Tensor(src.get_element_type(), outShape, src.data());
}
+
+ ppp.output(i++).tensor().set_element_type(ov::element::f32); // Should be always FP32
}
+ ppp.build();
+
+#else
+
for (const auto& it : cnn.getInputsInfo())
{
const std::string& name = it.first;
const std::string& name = it.first;
auto blobIt = allBlobs.find(name);
CV_Assert(blobIt != allBlobs.end());
+ InferenceEngine::TensorDesc& desc = blobIt->second->getTensorDesc();
+
+ auto outShape = it.second->getDims();
+ if (outShape != desc.getDims()) {
+ desc.reshape(outShape, estimateLayout(outShape.size()));
+ }
+
it.second->setPrecision(blobIt->second->getTensorDesc().getPrecision()); // Should be always FP32
}
+#endif // OpenVINO >= 2022.1
initPlugin(cnn);
}
const std::string& libName = candidates[i];
try
{
+#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
+ ie.add_extension(libName);
+#else
InferenceEngine::IExtensionPtr extension =
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2021_4)
std::make_shared<InferenceEngine::Extension>(libName);
#endif
ie.AddExtension(extension, "CPU");
+#endif
CV_LOG_INFO(NULL, "DNN-IE: Loaded extension plugin: " << libName);
found = true;
break;
{
CV_LOG_WARNING(NULL, "DNN-IE: Can't load extension plugin (extra layers for some networks). Specify path via OPENCV_DNN_IE_EXTRA_PLUGIN_PATH parameter");
}
+#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2022_1)
// Some of networks can work without a library of extra layers.
// OpenCV fallbacks as extensions.
try
{
CV_LOG_INFO(NULL, "DNN-IE: Can't register OpenCV custom layers nGraph extension: " << e.what());
}
+#endif // OpenVINO < 2022.1
#ifndef _WIN32
// Limit the number of CPU threads.
if (device_name == "CPU")
+#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
+ ie.set_property(device_name, ov::inference_num_threads(getNumThreads()));
+#else
ie.SetConfig({{
InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, format("%d", getNumThreads()),
}}, device_name);
+#endif // OpenVINO >= 2022.1
#endif
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2021_2)
if (device_name.find("GPU") == 0)
if (!cache_path.empty() && cache_path != "disabled")
{
CV_LOG_INFO(NULL, "OpenCV/nGraph: using GPU kernels cache: " << cache_path);
+#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
+ ie.set_property(device_name, ov::cache_dir(cache_path));
+#else
ie.SetConfig({{
InferenceEngine::PluginConfigParams::KEY_CACHE_DIR, cache_path,
}}, device_name);
+#endif // OpenVINO >= 2022.1
}
}
#endif
std::map<std::string, std::string> config;
if (device_name == "MYRIAD" || device_name == "HDDL") {
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2020_4)
- config.emplace("MYRIAD_DETECT_NETWORK_BATCH", CONFIG_VALUE(NO));
+ config.emplace("MYRIAD_DETECT_NETWORK_BATCH", "NO");
#else
- config.emplace("VPU_DETECT_NETWORK_BATCH", CONFIG_VALUE(NO));
+ config.emplace("VPU_DETECT_NETWORK_BATCH", "NO");
#endif
}
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
- InferenceEngine::ICNNNetwork::InputShapes inShapes = t_net.getInputShapes();
- InferenceEngine::ICNNNetwork::InputShapes::iterator itr;
+ auto ngraphFunction = t_net.getFunction();
bool equal_flag = true;
- size_t i = 0;
- for (itr = inShapes.begin(); itr != inShapes.end(); ++itr)
+ std::map<std::string, std::vector<size_t> > inShapes;
+ int i = 0;
+ for (const auto& inp : ngraphFunction->get_parameters())
{
- InferenceEngine::SizeVector currentInShape(inputs[i].begin(), inputs[i].end());
- if (itr->second != currentInShape)
+ std::vector<size_t> oldShape = inp->get_shape();
+ std::vector<size_t> newShape(inputs[i].begin(), inputs[i].end());
+ inShapes.insert({inp->get_friendly_name(), newShape});
+ if (oldShape != newShape)
{
- itr->second = currentInShape;
equal_flag = false;
}
i++;
InferenceEngine::CNNNetwork curr_t_net(t_net);
curr_t_net.reshape(inShapes);
}
+#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
+ std::vector<size_t> dims;
+ for (const auto& it : ngraphFunction->outputs()) {
+ if (it.get_node()->get_friendly_name() == name) {
+ dims = it.get_partial_shape().get_max_shape();
+ }
+ }
+ if (dims.empty())
+ CV_Error(Error::StsError, format("Unable find result with name %s", name.c_str()));
+#else
std::vector<size_t> dims = t_net.getOutputsInfo()[name]->getDims();
+#endif
outputs.push_back(MatShape(dims.begin(), dims.end()));
return false;
}
CV_Error(Error::StsInternal, "Choose Inference Engine as a preferable backend.");
}
+#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
+
+ov::Tensor wrapToNgraphBlob(const Mat& m) {
+ std::vector<size_t> shape = getShape<size_t>(m);
+ if (m.type() == CV_32F)
+ return ov::Tensor(ov::element::f32, shape, m.data);
+ else if (m.type() == CV_8U)
+ return ov::Tensor(ov::element::u8, shape, m.data);
+ else if (m.type() == CV_32SC1)
+ return ov::Tensor(ov::element::i32, shape, m.data);
+ else
+ CV_Error(Error::StsNotImplemented, format("Unsupported data type %s", typeToString(m.type()).c_str()));
+}
+
+#else
static InferenceEngine::Layout estimateLayout(int dims)
{
return estimateLayout(m.dims);
}
-static InferenceEngine::DataPtr wrapToInfEngineDataNode(const Mat& m, const std::string& name = "")
-{
- std::vector<size_t> shape = getShape<size_t>(m);
- if (m.type() == CV_32F)
- return InferenceEngine::DataPtr(new InferenceEngine::Data(name,
- {InferenceEngine::Precision::FP32, shape, estimateLayout(m)}));
- else if (m.type() == CV_8U)
- return InferenceEngine::DataPtr(new InferenceEngine::Data(name,
- {InferenceEngine::Precision::U8, shape, estimateLayout(m)}));
- else
- CV_Error(Error::StsNotImplemented, format("Unsupported data type %s", typeToString(m.type()).c_str()));
-}
-
InferenceEngine::Blob::Ptr wrapToNgraphBlob(const Mat& m, const std::vector<size_t>& shape,
InferenceEngine::Layout layout)
{
else if (m.type() == CV_8U)
return InferenceEngine::make_shared_blob<uint8_t>(
{InferenceEngine::Precision::U8, shape, layout}, (uint8_t*)m.data);
+ else if (m.type() == CV_32SC1)
+ return InferenceEngine::make_shared_blob<int32_t>(
+ {InferenceEngine::Precision::I32, shape, layout}, (int32_t*)m.data);
else
CV_Error(Error::StsNotImplemented, format("Unsupported data type %s", typeToString(m.type()).c_str()));
}
return wrapToNgraphBlob(m, shape, layout);
}
+InferenceEngine::Blob::Ptr wrapToNgraphBlob(const Mat& m) { return wrapToNgraphBlob(m, estimateLayout(m)); }
+
+#endif // OpenVINO >= 2022.1
+
NgraphBackendWrapper::NgraphBackendWrapper(int targetId, const cv::Mat& m)
: BackendWrapper(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, targetId)
, host((Mat*)&m)
{
- dataPtr = wrapToInfEngineDataNode(m);
- blob = wrapToNgraphBlob(m, estimateLayout(m));
+ blob = wrapToNgraphBlob(m);
}
NgraphBackendWrapper::NgraphBackendWrapper(Ptr<BackendWrapper> wrapper)
{
Ptr<NgraphBackendWrapper> ieWrapper = wrapper.dynamicCast<NgraphBackendWrapper>();
CV_Assert(!ieWrapper.empty());
- InferenceEngine::DataPtr srcData = ieWrapper->dataPtr;
- dataPtr = InferenceEngine::DataPtr(new InferenceEngine::Data(srcData->getName(), srcData->getTensorDesc()));
+ name = ieWrapper->name;
blob = ieWrapper->blob;
}
//CV_Error(Error::StsNotImplemented, "");
}
+#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
+ov::Tensor copyBlob(const ov::Tensor& blob)
+{
+ return ov::Tensor(blob.get_element_type(), blob.get_shape());
+}
+#else
InferenceEngine::Blob::Ptr copyBlob(const InferenceEngine::Blob::Ptr& blob)
{
InferenceEngine::Blob::Ptr copy;
return copy;
}
-InferenceEngine::DataPtr ngraphDataNode(const Ptr<BackendWrapper>& ptr)
-{
- CV_Assert(!ptr.empty());
- Ptr<NgraphBackendWrapper> p = ptr.dynamicCast<NgraphBackendWrapper>();
- CV_Assert(!p.empty());
- return p->dataPtr;
-}
-
-static
-InferenceEngine::Blob::Ptr reallocateBlob(Mat &m, const InferenceEngine::TensorDesc& description)
-{
- auto dims = description.getDims();
- auto layout = estimateLayout(dims.size());
- MatShape matShape(dims.begin(), dims.end());
- if (description.getPrecision() == InferenceEngine::Precision::FP32)
- {
- m.create(matShape, CV_32FC1);
- return InferenceEngine::make_shared_blob<float>(
- {description.getPrecision(), dims, layout}, (float*)m.data);
- }
- else if (description.getPrecision() == InferenceEngine::Precision::I32)
- {
- m.create(matShape, CV_32SC1);
- return InferenceEngine::make_shared_blob<int>(
- {description.getPrecision(), dims, layout}, (int*)m.data);
- }
- else if (description.getPrecision() == InferenceEngine::Precision::U8)
- {
- m.create(matShape, CV_8UC1);
- return InferenceEngine::make_shared_blob<uchar>(
- {description.getPrecision(), dims, layout}, (uchar*)m.data);
- }
- std::ostringstream msg;
- msg << "Unsupported IE precision: " << description.getPrecision();
- CV_Error(Error::StsNotImplemented, msg.str());
-}
-
-InferenceEngine::DataPtr ngraphDataOutputNode(
- const Ptr<BackendWrapper>& ptr,
- const InferenceEngine::TensorDesc& description,
- const std::string name)
-{
- CV_Assert(!ptr.empty());
- Ptr<NgraphBackendWrapper> p = ptr.dynamicCast<NgraphBackendWrapper>();
- CV_Assert(!p.empty());
- NgraphBackendWrapper& w = *p;
- const InferenceEngine::TensorDesc& blobDesc = w.blob.get()->getTensorDesc();
- auto dims = description.getDims();
- bool reallocate = false;
- if (blobDesc.getPrecision() != description.getPrecision())
- {
- reallocate = true;
- CV_LOG_WARNING(NULL, "Reallocate output '" << name << "' blob due to wrong precision: " << blobDesc.getPrecision() << " => " << description.getPrecision() << " ndims=" << dims.size());
- }
- if (dims.size() != blobDesc.getDims().size())
- {
- reallocate = true;
- CV_LOG_WARNING(NULL, "Reallocate output '" << name << "' blob due to wrong dims: " << blobDesc.getDims().size() << " => " << dims.size());
- }
- if (reallocate)
- {
- auto layout = estimateLayout(dims.size());
- w.dataPtr = InferenceEngine::DataPtr(new InferenceEngine::Data(name,
- {description.getPrecision(), dims, layout}));
- w.blob = reallocateBlob(*w.host, description);
- }
- return w.dataPtr;
-}
-
+#endif // OpenVINO < 2022.1
void InfEngineNgraphNet::reset()
{
allBlobs.clear();
infRequests.clear();
isInit = false;
-
- outputsDesc.clear();
- for (const auto& it : cnn.getOutputsInfo())
- {
- const std::string& name = it.first;
- outputsDesc.insert({name, it.second->getTensorDesc()});
- }
}
void InfEngineNgraphNet::addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& ptrs)
auto wrappers = ngraphWrappers(ptrs);
for (const auto& wrapper : wrappers)
{
- std::string name = wrapper->dataPtr->getName();
+ std::string name = wrapper->name;
name = name.empty() ? kDefaultInpLayerName : name;
allBlobs.insert({name, wrapper->blob});
}
for (int i = 0; i < outs.size(); ++i)
{
outs[i]->futureMat = outProms[i].getArrayResult();
- outsNames[i] = outs[i]->dataPtr->getName();
- }
-}
-
-Mat ngraphBlobToMat(const InferenceEngine::Blob::Ptr& blob)
-{
- std::vector<size_t> dims = blob->getTensorDesc().getDims();
- std::vector<int> size(dims.begin(), dims.end());
- auto precision = blob->getTensorDesc().getPrecision();
-
- int type = -1;
- switch (precision)
- {
- case InferenceEngine::Precision::FP32: type = CV_32F; break;
- case InferenceEngine::Precision::U8: type = CV_8U; break;
- default:
- CV_Error(Error::StsNotImplemented, "Unsupported blob precision");
+ outsNames[i] = outs[i]->name;
}
- return Mat(size, type, (void*)blob->buffer());
}
void InfEngineNgraphNet::forward(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers, bool isAsync)
}
infRequests.push_back(reqWrapper);
+#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
+ int i = 0;
+ for (const auto& it : netExec.inputs())
+ {
+ const std::string& name = it.get_node()->get_friendly_name();
+ auto blobIt = allBlobs.find(name);
+ CV_Assert(blobIt != allBlobs.end());
+ reqWrapper->req.set_input_tensor(i++, isAsync ? copyBlob(blobIt->second) : blobIt->second);
+ }
+
+ i = 0;
+ for (const auto& it : netExec.outputs())
+ {
+ const std::string& name = it.get_node()->get_friendly_name();
+ auto blobIt = allBlobs.find(name);
+ CV_Assert(blobIt != allBlobs.end());
+ reqWrapper->req.set_output_tensor(i++, isAsync ? copyBlob(blobIt->second) : blobIt->second);
+ }
+#else
InferenceEngine::BlobMap inpBlobs, outBlobs;
for (const auto& it : cnn.getInputsInfo())
{
}
reqWrapper->req.SetInput(inpBlobs);
reqWrapper->req.SetOutput(outBlobs);
+#endif
+
+#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
+ if (isAsync) {
+ bool* isReady = &reqWrapper->isReady;
+ auto* promises = &reqWrapper->outProms;
+ auto* req = &reqWrapper->req;
+ reqWrapper->req.set_callback([isReady, promises, req](std::exception_ptr ex) {
+ CV_LOG_DEBUG(NULL, "DNN(nGraph): completionCallback(" << (int)status << ")");
+
+ size_t processedOutputs = 0;
+ try
+ {
+ for (; processedOutputs < promises->size(); ++processedOutputs)
+ {
+ Mat m = infEngineBlobToMat(req->get_output_tensor(processedOutputs));
+
+ try
+ {
+ (*promises)[processedOutputs].setValue(m.clone());
+ }
+ catch (...)
+ {
+ try {
+ (*promises)[processedOutputs].setException(std::current_exception());
+ } catch(...) {
+ CV_LOG_ERROR(NULL, "DNN: Exception occurred during async inference exception propagation");
+ }
+ }
+ }
+ }
+ catch (...)
+ {
+ std::exception_ptr e = std::current_exception();
+ for (; processedOutputs < promises->size(); ++processedOutputs)
+ {
+ try {
+ (*promises)[processedOutputs].setException(e);
+ } catch(...) {
+ CV_LOG_ERROR(NULL, "DNN: Exception occurred during async inference exception propagation");
+ }
+ }
+ }
+ *isReady = true;
+ });
+ }
+#else // OpenVINO >= 2022.1
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2021_4)
InferenceEngine::InferRequest infRequest = reqWrapper->req;
for (; processedOutputs < wrapper.outProms.size(); ++processedOutputs)
{
const std::string& name = wrapper.outsNames[processedOutputs];
- Mat m = ngraphBlobToMat(wrapper.req.GetBlob(name));
+ Mat m = infEngineBlobToMat(wrapper.req.GetBlob(name));
try
{
wrapper.isReady = true;
}
);
+#endif // OpenVINO >= 2022.1
}
+#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
+ if (isAsync)
+ {
+ // Copy actual data to infer request's input blobs.
+ int i = 0;
+ for (const auto& it : cnn.getFunction()->get_parameters())
+ {
+ const std::string& name = it->get_friendly_name();
+ auto blobIt = allBlobs.find(name);
+ Mat srcMat = infEngineBlobToMat(blobIt->second);
+ Mat dstMat = infEngineBlobToMat(reqWrapper->req.get_input_tensor(i++));
+ srcMat.copyTo(dstMat);
+ }
+
+ // Set promises to output blobs wrappers.
+ reqWrapper->makePromises(outBlobsWrappers);
+
+ reqWrapper->isReady = false;
+ reqWrapper->req.start_async();
+ }
+ else
+ {
+ reqWrapper->req.infer();
+ }
+#else
if (isAsync)
{
// Copy actual data to infer request's input blobs.
{
const std::string& name = it.first;
auto blobIt = allBlobs.find(name);
- Mat srcMat = ngraphBlobToMat(blobIt->second);
- Mat dstMat = ngraphBlobToMat(reqWrapper->req.GetBlob(name));
+ Mat srcMat = infEngineBlobToMat(blobIt->second);
+ Mat dstMat = infEngineBlobToMat(reqWrapper->req.GetBlob(name));
srcMat.copyTo(dstMat);
}
{
reqWrapper->req.Infer();
}
+#endif // OpenVINO >= 2022.1
}
#endif
(netInputLayer->outNames.size() == ld.outputBlobsWrappers.size()));
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{
- InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.outputBlobsWrappers[i]);
std::string outputName = netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i];
outputName = ld.outputBlobsWrappers.size() > 1 ? (outputName + "." + std::to_string(i)) : outputName;
- dataPtr->setName(outputName);
+ ld.outputBlobsWrappers[i].dynamicCast<NgraphBackendWrapper>()->name = outputName;
}
}
else
{
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{
- InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.outputBlobsWrappers[i]);
std::string outputName = ld.outputBlobsWrappers.size() > 1 ? (ld.name + "." + std::to_string(i)) : ld.name;
- dataPtr->setName(outputName);
+ ld.outputBlobsWrappers[i].dynamicCast<NgraphBackendWrapper>()->name = outputName;
}
}
}
{
for (int i = 0; i < ld.inputBlobsWrappers.size(); ++i)
{
- InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.inputBlobsWrappers[i]);
- dataPtr->setName(netInputLayer->outNames[i]);
- }
- }
- else
- {
- for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
- {
- auto it = ienet.outputsDesc.find(ld.name);
- if (it != ienet.outputsDesc.end())
- {
- const InferenceEngine::TensorDesc& descriptor = it->second;
- InferenceEngine::DataPtr dataPtr = ngraphDataOutputNode(ld.outputBlobsWrappers[i], descriptor, ld.name);
- dataPtr->setName(ld.name);
- }
- else
- {
- InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.outputBlobsWrappers[i]);
- dataPtr->setName(ld.name);
- }
+ ld.inputBlobsWrappers[i].dynamicCast<NgraphBackendWrapper>()->name = netInputLayer->outNames[i];
}
}
ienet.addBlobs(ld.inputBlobsWrappers);
dynamicCast<NgraphBackendWrapper>();
CV_Assert(!inpWrapper.empty());
auto iter = std::find(inputNames.begin(), inputNames.end(),
- inpWrapper->dataPtr->getName());
+ inpWrapper->name);
if (iter == inputNames.end())
{
- inputNames.push_back(inpWrapper->dataPtr->getName());
+ inputNames.push_back(inpWrapper->name);
inputs.push_back(inpLd.outputBlobs[cons_inp]);
}
curr_pos = cons + 1;
CV_LOG_DEBUG(NULL, "DNN/IE: bind output port " << lid << ":" << oid << " (" << ngraph_input_node->get_friendly_name() << ":" << ngraph_input_node->get_type_info().name << ")");
// Handle parameters from other subnets. Output port is not used in this case
+#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2020_4)
if ((ngraph::op::is_parameter(ngraph_input_node) || ngraph::op::is_constant(ngraph_input_node)) &&
+#else
+ if ((ngraph_input_node->is_parameter() || ngraph_input_node->is_constant()) &&
+#endif
+
ngraph_input_node->get_output_size() == 1)
{
inputNodes[i] = Ptr<BackendNode>(new InfEngineNgraphNode(ngraph_input_node));
CV_TRACE_REGION("register_inputs");
+ auto ngraphFunction = ieNet.getFunction();
+ CV_Assert(ngraphFunction);
+
std::vector<String> inputsNames;
std::vector<MatShape> inp_shapes;
- for (auto& it : ieNet.getInputsInfo())
+ for (auto& it : ngraphFunction->get_parameters())
{
- inputsNames.push_back(it.first);
- std::vector<size_t> dims = it.second->getTensorDesc().getDims();
+ inputsNames.push_back(it->get_friendly_name());
+ std::vector<size_t> dims = it->get_shape();
inp_shapes.push_back(std::vector<int>(dims.begin(), dims.end()));
}
+ // nGraph models produce output "Result" layers which have "/sink_port" suffix in their names.
+ // Their inputs are actual model outputs and we change friendly name to it.
+ // By this workaround, we produce similar outputs names comparing to ieNet.getOutputsInfo()
+ for (int i = 0; i < ngraphFunction->get_output_size(); ++i) {
+ auto res = ngraphFunction->output(i);
+#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
+ const std::string& name = res.get_any_name();
+#else
+ auto out = res.get_node()->input(0).get_source_output();
+ std::string name = out.get_node()->get_friendly_name();
+ if (out.get_node()->get_output_size() > 1)
+ name += "." + std::to_string(out.get_index());
+#endif
+ if (res.get_node()->get_friendly_name() != name)
+ res.get_node()->set_friendly_name(name);
+ }
Net cvNet;
Ptr<NetImplOpenVINO> openvino_impl_ptr = makePtr<NetImplOpenVINO>();
CV_TRACE_REGION_NEXT("register_outputs");
- auto ngraphFunction = ieNet.getFunction();
- CV_Assert(ngraphFunction);
std::vector<std::shared_ptr<ngraph::Node>> ngraphOperations = ngraphFunction->get_ops();
- for (auto& it : ieNet.getOutputsInfo())
+ for (auto& it : ngraphFunction->get_results())
{
CV_TRACE_REGION("output");
- const auto& outputName = it.first;
+ const auto& outputName = it->get_friendly_name();
LayerParams lp;
- int lid = cvNet.addLayer(it.first, "", lp);
+ int lid = cvNet.addLayer(outputName, "", lp);
LayerData& ld = openvino_impl.layers[lid];
InferenceEngine::CNNNetwork ieNet;
try
{
+#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
+ ov::Tensor weights_blob(ov::element::u8, {bufferWeightsSize}, (void*)bufferWeightsPtr);
+ ieNet = ie.read_model(model, weights_blob);
+#else
InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::U8, { bufferWeightsSize }, InferenceEngine::Layout::C);
InferenceEngine::Blob::CPtr weights_blob = InferenceEngine::make_shared_blob<uint8_t>(tensorDesc, (uint8_t*)bufferWeightsPtr, bufferWeightsSize);
ieNet = ie.ReadNetwork(model, weights_blob);
+#endif
}
catch (const std::exception& e)
{