1 // This file is part of OpenCV project.
2 // It is subject to the license terms in the LICENSE file found in the top-level directory
3 // of this distribution and at http://opencv.org/license.html.
5 // Copyright (C) 2018-2020 Intel Corporation
9 // needs to be included regardless if IE is present or not
10 // (cv::gapi::ie::backend() is still there and is defined always)
11 #include "backends/ie/giebackend.hpp"
13 #ifdef HAVE_INF_ENGINE
15 #if INF_ENGINE_RELEASE <= 2019010000
16 # error G-API IE module supports only OpenVINO IE >= 2019 R1
20 #include <unordered_set>
22 #include <ade/util/algorithm.hpp>
24 #include <ade/util/range.hpp>
25 #include <ade/util/zip_range.hpp>
26 #include <ade/util/chain_range.hpp>
27 #include <ade/typed_graph.hpp>
29 #include <opencv2/core/utility.hpp>
30 #include <opencv2/core/utils/logger.hpp>
32 #include <opencv2/gapi/gcommon.hpp>
33 #include <opencv2/gapi/garray.hpp>
34 #include <opencv2/gapi/gopaque.hpp>
35 #include <opencv2/gapi/util/any.hpp>
36 #include <opencv2/gapi/gtype_traits.hpp>
37 #include <opencv2/gapi/infer.hpp>
38 #include <opencv2/gapi/own/convert.hpp>
40 #include "compiler/gobjref.hpp"
41 #include "compiler/gmodel.hpp"
43 #include "backends/ie/util.hpp"
44 #include "backends/ie/giebackend/giewrapper.hpp"
46 #include "api/gbackend_priv.hpp" // FIXME: Make it part of Backend SDK!
48 namespace IE = InferenceEngine;
52 inline IE::ROI toIE(const cv::Rect &rc) {
55 , static_cast<std::size_t>(rc.x)
56 , static_cast<std::size_t>(rc.y)
57 , static_cast<std::size_t>(rc.width)
58 , static_cast<std::size_t>(rc.height)
62 inline IE::SizeVector toIE(const cv::MatSize &sz) {
63 return cv::to_own<IE::SizeVector::value_type>(sz);
65 inline std::vector<int> toCV(const IE::SizeVector &vsz) {
66 std::vector<int> result;
67 result.reserve(vsz.size());
69 result.push_back(ade::util::checked_cast<int>(sz));
74 inline IE::Layout toIELayout(const std::size_t ndims) {
75 static const IE::Layout lts[] = {
83 // FIXME: This is not really a good conversion,
84 // since it may also stand for NHWC/HW/CN/NDHWC data
85 CV_Assert(ndims < sizeof(lts) / sizeof(lts[0]));
89 inline IE::Precision toIE(int depth) {
91 case CV_8U: return IE::Precision::U8;
92 case CV_32F: return IE::Precision::FP32;
93 default: GAPI_Assert(false && "Unsupported data type");
95 return IE::Precision::UNSPECIFIED;
97 inline int toCV(IE::Precision prec) {
99 case IE::Precision::U8: return CV_8U;
100 case IE::Precision::FP32: return CV_32F;
101 default: GAPI_Assert(false && "Unsupported data type");
106 inline IE::TensorDesc toIE(const cv::Mat &mat, cv::gapi::ie::TraitAs hint) {
107 const auto &sz = mat.size;
109 // NB: For some reason RGB image is 2D image
110 // (since channel component is not counted here).
111 // Note: regular 2D vectors also fall into this category
112 if (sz.dims() == 2 && hint == cv::gapi::ie::TraitAs::IMAGE)
114 // NB: This logic is mainly taken from IE samples
115 const size_t pixsz = CV_ELEM_SIZE1(mat.type());
116 const size_t channels = mat.channels();
117 const size_t height = mat.size().height;
118 const size_t width = mat.size().width;
120 const size_t strideH = mat.step.buf[0];
121 const size_t strideW = mat.step.buf[1];
123 const bool is_dense =
124 strideW == pixsz * channels &&
125 strideH == strideW * width;
128 cv::util::throw_error(std::logic_error("Doesn't support conversion"
129 " from non-dense cv::Mat"));
131 return IE::TensorDesc(toIE(mat.depth()),
132 IE::SizeVector{1, channels, height, width},
136 return IE::TensorDesc(toIE(mat.depth()), toIE(sz), toIELayout(sz.dims()));
139 inline IE::Blob::Ptr wrapIE(const cv::Mat &mat, cv::gapi::ie::TraitAs hint) {
140 const auto tDesc = toIE(mat, hint);
141 switch (mat.depth()) {
142 // NB: Seems there's no way to create an untyped (T-less) Blob::Ptr
143 // in IE given only precision via TensorDesc. So we have to do this:
144 #define HANDLE(E,T) \
145 case CV_##E: return IE::make_shared_blob<T>(tDesc, const_cast<T*>(mat.ptr<T>()))
149 default: GAPI_Assert(false && "Unsupported data type");
151 return IE::Blob::Ptr{};
154 template<class MatType>
155 inline void copyFromIE(const IE::Blob::Ptr &blob, MatType &mat) {
156 switch (blob->getTensorDesc().getPrecision()) {
157 #define HANDLE(E,T) \
158 case IE::Precision::E: std::copy_n(blob->buffer().as<T*>(), \
160 reinterpret_cast<T*>(mat.data)); \
165 default: GAPI_Assert(false && "Unsupported data type");
169 // IE-specific metadata, represents a network with its parameters
171 static const char *name() { return "IEModelConfig"; }
173 cv::gapi::ie::detail::ParamDesc params;
175 IE::InputsDataMap inputs;
176 IE::OutputsDataMap outputs;
178 explicit IEUnit(const cv::gapi::ie::detail::ParamDesc &pp)
180 net = cv::gimpl::ie::wrap::readNetwork(params);
181 inputs = net.getInputsInfo();
182 outputs = net.getOutputsInfo();
183 // The practice shows that not all inputs and not all outputs
184 // are mandatory to specify in IE model.
185 // So what we're concerned here about is:
186 // if operation's (not topology's) input/output number is
187 // greater than 1, then we do care about input/output layer
188 // names. Otherwise, names are picked up automatically.
189 // TODO: Probably this check could be done at the API entry point? (gnet)
190 if (params.num_in > 1u && params.num_in != params.input_names.size()) {
191 cv::util::throw_error(std::logic_error("Please specify input layer names for "
192 + params.model_path));
194 if (params.num_out > 1u && params.num_out != params.output_names.size()) {
195 cv::util::throw_error(std::logic_error("Please specify output layer names for "
196 + params.model_path));
198 if (params.num_in == 1u && params.input_names.empty()) {
199 params.input_names = { inputs.begin()->first };
201 if (params.num_out == 1u && params.output_names.empty()) {
202 params.output_names = { outputs.begin()->first };
206 // This method is [supposed to be] called at Island compilation stage
207 cv::gimpl::ie::IECompiled compile() const {
208 auto plugin = cv::gimpl::ie::wrap::getPlugin(params);
209 auto this_network = cv::gimpl::ie::wrap::loadNetwork(plugin, net, params);
210 auto this_request = this_network.CreateInferRequest();
212 // Bind const data to infer request
213 for (auto &&p : params.const_inputs) {
214 // FIXME: SetBlob is known to be inefficient,
215 // it is worth to make a customizable "initializer" and pass the
216 // cv::Mat-wrapped blob there to support IE's optimal "GetBlob idiom"
217 // Still, constant data is to set only once.
218 this_request.SetBlob(p.first, wrapIE(p.second.first, p.second.second));
220 return {plugin, this_network, this_request};
226 // Input parameters passed to an inference operation.
227 std::vector<cv::GArg> args;
229 //FIXME: avoid conversion of arguments from internal representation to OpenCV one on each call
230 //to OCV kernel. (This can be achieved by a two single time conversions in GCPUExecutable::run,
231 //once on enter for input and output arguments, and once before return for output arguments only
232 //FIXME: check if the above applies to this backend (taken from CPU)
233 std::unordered_map<std::size_t, cv::GRunArgP> results;
235 // Generic accessor API
237 const T& inArg(std::size_t input) { return args.at(input).get<T>(); }
240 const cv::Mat& inMat(std::size_t input) {
241 return inArg<cv::Mat>(input);
243 cv::Mat& outMatR(std::size_t output) {
244 return *cv::util::get<cv::Mat*>(results.at(output));
247 template<typename T> std::vector<T>& outVecR(std::size_t output) { // FIXME: the same issue
248 return outVecRef(output).wref<T>();
250 cv::detail::VectorRef& outVecRef(std::size_t output) {
251 return cv::util::get<cv::detail::VectorRef>(results.at(output));
256 static const char *name() { return "IERequestCallable"; }
257 // FIXME: Make IECallContext manage them all? (3->1)
258 using Run = std::function<void(cv::gimpl::ie::IECompiled &, const IEUnit &, IECallContext &)>;
263 cv::gimpl::CustomMetaFunction::CM customMetaFunc;
267 // FIXME: Is there a way to take a typed graph (our GModel),
268 // and create a new typed graph _ATOP_ of that (by extending with a couple of
270 // Alternatively, is there a way to compose types graphs?
272 // If not, we need to introduce that!
273 using GIEModel = ade::TypedGraph
274 < cv::gimpl::Protocol
276 , cv::gimpl::NetworkParams
277 , cv::gimpl::CustomMetaFunction
282 // FIXME: Same issue with Typed and ConstTyped
283 using GConstGIEModel = ade::ConstTypedGraph
284 < cv::gimpl::Protocol
286 , cv::gimpl::NetworkParams
287 , cv::gimpl::CustomMetaFunction
291 } // anonymous namespace
293 // GCPUExcecutable implementation //////////////////////////////////////////////
294 cv::gimpl::ie::GIEExecutable::GIEExecutable(const ade::Graph &g,
295 const std::vector<ade::NodeHandle> &nodes)
296 : m_g(g), m_gm(m_g) {
297 // FIXME: Currently this backend is capable to run a single inference node only.
298 // Need to extend our island fusion with merge/not-to-merge decision making parametrization
299 GConstGIEModel iem(g);
301 for (auto &nh : nodes) {
302 switch (m_gm.metadata(nh).get<NodeType>().t) {
304 if (this_nh == nullptr) {
306 this_iec = iem.metadata(this_nh).get<IEUnit>().compile();
309 util::throw_error(std::logic_error("Multi-node inference is not supported!"));
312 case NodeType::DATA: {
313 m_dataNodes.push_back(nh);
314 const auto &desc = m_gm.metadata(nh).get<Data>();
315 if (desc.storage == Data::Storage::CONST_VAL) {
316 util::throw_error(std::logic_error("No const data please!"));
318 if (desc.storage == Data::Storage::INTERNAL) {
319 util::throw_error(std::logic_error("No internal data please!"));
323 default: util::throw_error(std::logic_error("Unsupported NodeType type"));
328 // FIXME: Document what it does
329 cv::GArg cv::gimpl::ie::GIEExecutable::packArg(const cv::GArg &arg) {
330 // No API placeholders allowed at this point
331 // FIXME: this check has to be done somewhere in compilation stage.
332 GAPI_Assert( arg.kind != cv::detail::ArgKind::GMAT
333 && arg.kind != cv::detail::ArgKind::GSCALAR
334 && arg.kind != cv::detail::ArgKind::GARRAY);
336 if (arg.kind != cv::detail::ArgKind::GOBJREF) {
337 util::throw_error(std::logic_error("Inference supports G-types ONLY!"));
339 GAPI_Assert(arg.kind == cv::detail::ArgKind::GOBJREF);
341 // Wrap associated CPU object (either host or an internal one)
342 // FIXME: object can be moved out!!! GExecutor faced that.
343 const cv::gimpl::RcDesc &ref = arg.get<cv::gimpl::RcDesc>();
346 case GShape::GMAT: return GArg(m_res.slot<cv::Mat>()[ref.id]);
348 // Note: .at() is intentional for GArray as object MUST be already there
349 // (and constructed by either bindIn/Out or resetInternal)
350 case GShape::GARRAY: return GArg(m_res.slot<cv::detail::VectorRef>().at(ref.id));
352 // Note: .at() is intentional for GOpaque as object MUST be already there
353 // (and constructed by either bindIn/Out or resetInternal)
354 case GShape::GOPAQUE: return GArg(m_res.slot<cv::detail::OpaqueRef>().at(ref.id));
357 util::throw_error(std::logic_error("Unsupported GShape type"));
362 void cv::gimpl::ie::GIEExecutable::run(std::vector<InObj> &&input_objs,
363 std::vector<OutObj> &&output_objs) {
364 // Update resources with run-time information - what this Island
365 // has received from user (or from another Island, or mix...)
366 // FIXME: Check input/output objects against GIsland protocol
368 for (auto& it : input_objs) magazine::bindInArg (m_res, it.first, it.second);
369 for (auto& it : output_objs) magazine::bindOutArg(m_res, it.first, it.second);
371 // FIXME: Running just a single node now.
372 // Not sure if need to support many of them, though
373 // FIXME: Make this island-unmergeable?
374 const auto &op = m_gm.metadata(this_nh).get<Op>();
376 // Initialize kernel's execution context:
377 // - Input parameters
378 IECallContext context;
379 context.args.reserve(op.args.size());
380 using namespace std::placeholders;
381 ade::util::transform(op.args,
382 std::back_inserter(context.args),
383 std::bind(&GIEExecutable::packArg, this, _1));
385 // - Output parameters.
386 for (const auto &out_it : ade::util::indexed(op.outs)) {
387 // FIXME: Can the same GArg type resolution mechanism be reused here?
388 const auto out_port = ade::util::index(out_it);
389 const auto out_desc = ade::util::value(out_it);
390 context.results[out_port] = magazine::getObjPtr(m_res, out_desc);
393 // And now trigger the execution
394 GConstGIEModel giem(m_g);
395 const auto &uu = giem.metadata(this_nh).get<IEUnit>();
396 const auto &kk = giem.metadata(this_nh).get<IECallable>();
397 kk.run(this_iec, uu, context);
399 for (auto &it : output_objs) magazine::writeBack(m_res, it.first, it.second);
406 struct Infer: public cv::detail::KernelTag {
407 using API = cv::GInferBase;
408 static cv::gapi::GBackend backend() { return cv::gapi::ie::backend(); }
409 static KImpl kernel() { return KImpl{outMeta, run}; }
411 static cv::GMetaArgs outMeta(const ade::Graph &gr,
412 const ade::NodeHandle &nh,
413 const cv::GMetaArgs &in_metas,
414 const cv::GArgs &/*in_args*/) {
415 // Specify network's output layer metadata to the framework
416 // Also specify the input information to the IE from the framework
417 // NB: Have no clue if network's input [dimensions] may ever define
418 // its output dimensions. It seems possible with OpenCV DNN APIs
420 cv::GMetaArgs result;
422 GConstGIEModel gm(gr);
423 const auto &uu = gm.metadata(nh).get<IEUnit>();
425 // Initialize input information
426 // Note our input layers list order matches the API order and so
428 GAPI_Assert(uu.params.input_names.size() == in_metas.size()
429 && "Known input layers count doesn't match input meta count");
431 for (auto &&it : ade::util::zip(ade::util::toRange(uu.params.input_names),
432 ade::util::toRange(in_metas))) {
433 auto &&ii = uu.inputs.at(std::get<0>(it));
434 const auto & mm = std::get<1>(it);
436 GAPI_Assert(util::holds_alternative<cv::GMatDesc>(mm)
437 && "Non-GMat inputs are not supported");
439 const auto &meta = util::get<cv::GMatDesc>(mm);
440 ii->setPrecision(toIE(meta.depth));
441 ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
444 // FIXME: It would be nice here to have an exact number of network's
445 // input/output parameters. Probably GCall should store it here for us.
446 // It doesn't, as far as I know..
447 for (const auto &out_name : uu.params.output_names) {
448 // NOTE: our output_names vector follows the API order
449 // of this operation's outputs
450 const IE::DataPtr& ie_out = uu.outputs.at(out_name);
451 const IE::SizeVector dims = ie_out->getTensorDesc().getDims();
453 cv::GMatDesc outm(toCV(ie_out->getPrecision()),
454 toCV(ie_out->getTensorDesc().getDims()));
455 result.emplace_back(outm);
460 static void run(IECompiled &iec, const IEUnit &uu, IECallContext &ctx) {
461 // non-generic version for now:
462 // - assumes all inputs/outputs are always Mats
463 for (auto i : ade::util::iota(uu.params.num_in)) {
464 // TODO: Ideally we shouldn't do SetBlob() but GetBlob() instead,
465 // and redirect our data producers to this memory
466 // (A memory dialog comes to the picture again)
468 const cv::Mat this_mat = ctx.inMat(i);
469 // FIXME: By default here we trait our inputs as images.
470 // May be we need to make some more intelligence here about it
471 IE::Blob::Ptr this_blob = wrapIE(this_mat, cv::gapi::ie::TraitAs::IMAGE);
472 iec.this_request.SetBlob(uu.params.input_names[i], this_blob);
474 iec.this_request.Infer();
475 for (auto i : ade::util::iota(uu.params.num_out)) {
476 // TODO: Think on avoiding copying here.
477 // Either we should ask IE to use our memory (what is not always the
478 // best policy) or use IE-allocated buffer inside (and pass it to the graph).
479 // Not a <very> big deal for classifiers and detectors,
480 // but may be critical to segmentation.
482 cv::Mat& out_mat = ctx.outMatR(i);
483 IE::Blob::Ptr this_blob = iec.this_request.GetBlob(uu.params.output_names[i]);
484 copyFromIE(this_blob, out_mat);
489 struct InferList: public cv::detail::KernelTag {
490 using API = cv::GInferListBase;
491 static cv::gapi::GBackend backend() { return cv::gapi::ie::backend(); }
492 static KImpl kernel() { return KImpl{outMeta, run}; }
494 static cv::GMetaArgs outMeta(const ade::Graph &gr,
495 const ade::NodeHandle &nh,
496 const cv::GMetaArgs &in_metas,
497 const cv::GArgs &/*in_args*/) {
498 // Specify the input information to the IE from the framework
499 // NB: Have no clue if network's input [dimensions] may ever define
500 // its output dimensions. It seems possible with OpenCV DNN APIs
502 GConstGIEModel gm(gr);
503 const auto &uu = gm.metadata(nh).get<IEUnit>();
505 // Initialize input information
506 // Note our input layers list order matches the API order and so
508 GAPI_Assert(uu.params.input_names.size() == (in_metas.size() - 1u)
509 && "Known input layers count doesn't match input meta count");
511 std::size_t idx = 1u;
512 for (auto &&input_name : uu.params.input_names) {
513 auto &&ii = uu.inputs.at(input_name);
514 const auto & mm = in_metas[idx++];
516 GAPI_Assert(util::holds_alternative<cv::GMatDesc>(mm)
517 && "Non-GMat inputs are not supported");
519 const auto &meta = util::get<cv::GMatDesc>(mm);
520 ii->setPrecision(toIE(meta.depth));
521 ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
524 // roi-list version is much easier at the moment.
525 // All our outputs are vectors which don't have
526 // metadata at the moment - so just create a vector of
527 // "empty" array metadatas of the required size.
528 return cv::GMetaArgs(uu.params.output_names.size(),
529 cv::GMetaArg{cv::empty_array_desc()});
532 static void run(IECompiled &iec, const IEUnit &uu, IECallContext &ctx) {
533 // non-generic version for now:
534 // - assumes zero input is always ROI list
535 // - assumes all inputs/outputs are always Mats
536 GAPI_Assert(uu.params.num_in == 1); // roi list is not counted in net's inputs
538 const auto& in_roi_vec = ctx.inArg<cv::detail::VectorRef>(0u).rref<cv::Rect>();
539 const cv::Mat this_mat = ctx.inMat(1u);
540 // Since we do a ROI list inference, always assume our input buffer is image
541 IE::Blob::Ptr this_blob = wrapIE(this_mat, cv::gapi::ie::TraitAs::IMAGE);
543 // FIXME: This could be done ONCE at graph compile stage!
544 std::vector< std::vector<int> > cached_dims(uu.params.num_out);
545 for (auto i : ade::util::iota(uu.params.num_out)) {
546 const IE::DataPtr& ie_out = uu.outputs.at(uu.params.output_names[i]);
547 cached_dims[i] = toCV(ie_out->getTensorDesc().getDims());
548 ctx.outVecR<cv::Mat>(i).clear();
549 // FIXME: Isn't this should be done automatically
550 // by some resetInternalData(), etc? (Probably at the GExecutor level)
553 for (const auto &rc : in_roi_vec) {
554 // FIXME: Assumed only 1 input
555 IE::Blob::Ptr roi_blob = IE::make_shared_blob(this_blob, toIE(rc));
556 iec.this_request.SetBlob(uu.params.input_names[0u], roi_blob);
557 iec.this_request.Infer();
559 // While input is fixed to be 1,
560 // there may be still multiple outputs
561 for (auto i : ade::util::iota(uu.params.num_out)) {
562 std::vector<cv::Mat> &out_vec = ctx.outVecR<cv::Mat>(i);
564 IE::Blob::Ptr out_blob = iec.this_request.GetBlob(uu.params.output_names[i]);
566 cv::Mat out_mat(cached_dims[i], toCV(out_blob->getTensorDesc().getPrecision()));
567 copyFromIE(out_blob, out_mat); // FIXME: Avoid data copy. Not sure if it is possible though
568 out_vec.push_back(std::move(out_mat));
574 struct InferList2: public cv::detail::KernelTag {
575 using API = cv::GInferList2Base;
576 static cv::gapi::GBackend backend() { return cv::gapi::ie::backend(); }
577 static KImpl kernel() { return KImpl{outMeta, run}; }
579 static cv::GMetaArgs outMeta(const ade::Graph &gr,
580 const ade::NodeHandle &nh,
581 const cv::GMetaArgs &in_metas,
582 const cv::GArgs &/*in_args*/) {
583 // Specify the input information to the IE from the framework
584 // NB: Have no clue if network's input [dimensions] may ever define
585 // its output dimensions. It seems possible with OpenCV DNN APIs
587 GConstGIEModel gm(gr);
588 const auto &uu = gm.metadata(nh).get<IEUnit>();
590 // Initialize input information
591 // Note our input layers list order matches the API order and so
593 GAPI_Assert(uu.params.input_names.size() == (in_metas.size() - 1u)
594 && "Known input layers count doesn't match input meta count");
596 const auto &op = gm.metadata(nh).get<Op>();
598 // In contrast to InferList, the InferList2 has only one
599 // "full-frame" image argument, and all the rest are arrays of
600 // ether ROI or blobs. So here we set the 0th arg image format
601 // to all inputs which are ROI-based (skipping the
602 // "blob"-based ones)
603 // FIXME: this is filtering not done, actually! GArrayDesc has
604 // no hint for its underlying type!
605 const auto &mm_0 = in_metas[0u];
606 const auto &meta_0 = util::get<cv::GMatDesc>(mm_0);
607 GAPI_Assert( !meta_0.isND()
609 && "Only images are supported as the 0th argument");
610 std::size_t idx = 1u;
611 for (auto &&input_name : uu.params.input_names) {
612 auto &ii = uu.inputs.at(input_name);
613 const auto &mm = in_metas[idx];
614 GAPI_Assert(util::holds_alternative<cv::GArrayDesc>(mm)
615 && "Non-array inputs are not supported");
617 if (op.k.inSpecs[idx] == cv::detail::ArgSpec::RECT) {
618 // This is a cv::Rect -- configure the IE preprocessing
619 ii->setPrecision(toIE(meta_0.depth));
620 ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
622 // This is a cv::GMat (equals to: cv::Mat)
623 // Just validate that it is really the type
624 // (other types are prohibited here)
625 GAPI_Assert(op.k.inSpecs[idx] == cv::detail::ArgSpec::GMAT);
627 idx++; // NB: Never forget to increment the counter
630 // roi-list version is much easier at the moment.
631 // All our outputs are vectors which don't have
632 // metadata at the moment - so just create a vector of
633 // "empty" array metadatas of the required size.
634 return cv::GMetaArgs(uu.params.output_names.size(),
635 cv::GMetaArg{cv::empty_array_desc()});
638 static void run(IECompiled &iec, const IEUnit &uu, IECallContext &ctx) {
639 GAPI_Assert(ctx.args.size() > 1u
640 && "This operation must have at least two arguments");
642 // Since we do a ROI list inference, always assume our input buffer is image
643 const cv::Mat mat_0 = ctx.inMat(0u);
644 IE::Blob::Ptr blob_0 = wrapIE(mat_0, cv::gapi::ie::TraitAs::IMAGE);
646 // Take the next argument, which must be vector (of any kind).
647 // Use it only to obtain the ROI list size (sizes of all other
648 // vectors must be equal to this one)
649 const auto list_size = ctx.inArg<cv::detail::VectorRef>(1u).size();
651 // FIXME: This could be done ONCE at graph compile stage!
652 std::vector< std::vector<int> > cached_dims(uu.params.num_out);
653 for (auto i : ade::util::iota(uu.params.num_out)) {
654 const IE::DataPtr& ie_out = uu.outputs.at(uu.params.output_names[i]);
655 cached_dims[i] = toCV(ie_out->getTensorDesc().getDims());
656 ctx.outVecR<cv::Mat>(i).clear();
657 // FIXME: Isn't this should be done automatically
658 // by some resetInternalData(), etc? (Probably at the GExecutor level)
661 // For every ROI in the list {{{
662 for (const auto &list_idx : ade::util::iota(list_size)) {
663 // For every input of the net {{{
664 for (auto in_idx : ade::util::iota(uu.params.num_in)) {
665 const auto &this_vec = ctx.inArg<cv::detail::VectorRef>(in_idx+1u);
666 GAPI_Assert(this_vec.size() == list_size);
668 IE::Blob::Ptr this_blob;
669 if (this_vec.spec() == cv::detail::TypeSpec::RECT) {
670 // ROI case - create an ROI blob
671 const auto &vec = this_vec.rref<cv::Rect>();
672 this_blob = IE::make_shared_blob(blob_0, toIE(vec[list_idx]));
673 } else if (this_vec.spec() == cv::detail::TypeSpec::MAT) {
674 // Mat case - create a regular blob
675 // FIXME: NOW Assume Mats are always BLOBS (not
677 const auto &vec = this_vec.rref<cv::Mat>();
678 const auto &mat = vec[list_idx];
679 this_blob = wrapIE(mat, cv::gapi::ie::TraitAs::TENSOR);
681 GAPI_Assert(false && "Only Rect and Mat types are supported for infer list 2!");
683 iec.this_request.SetBlob(uu.params.input_names[in_idx], this_blob);
684 // }}} (Preapre input)
685 } // }}} (For every input of the net)
687 // Run infer request {{{
688 iec.this_request.Infer();
689 // }}} (Run infer request)
691 // For every output of the net {{{
692 for (auto i : ade::util::iota(uu.params.num_out)) {
693 // Push results to the list {{{
694 std::vector<cv::Mat> &out_vec = ctx.outVecR<cv::Mat>(i);
695 IE::Blob::Ptr out_blob = iec.this_request.GetBlob(uu.params.output_names[i]);
696 cv::Mat out_mat(cached_dims[i], toCV(out_blob->getTensorDesc().getPrecision()));
697 copyFromIE(out_blob, out_mat); // FIXME: Avoid data copy. Not sure if it is possible though
698 out_vec.push_back(std::move(out_mat));
699 // }}} (Push results to the list)
700 } // }}} (For every output of the net)
701 } // }}} (For every ROI in the list)
710 // IE backend implementation of GBackend::Priv ///////////////////////
712 class GIEBackendImpl final: public cv::gapi::GBackend::Priv {
713 virtual void unpackKernel(ade::Graph &gr,
714 const ade::NodeHandle &nh,
715 const cv::GKernelImpl &ii) override {
716 using namespace cv::gimpl;
717 // FIXME: Introduce a DNNBackend interface which'd specify
718 // the framework for this???
720 const auto &np = gm.metadata(nh).get<NetworkParams>();
721 const auto &pp = cv::util::any_cast<cv::gapi::ie::detail::ParamDesc>(np.opaque);
722 const auto &ki = cv::util::any_cast<KImpl>(ii.opaque);
723 gm.metadata(nh).set(IEUnit{pp});
724 gm.metadata(nh).set(IECallable{ki.run});
725 gm.metadata(nh).set(CustomMetaFunction{ki.customMetaFunc});
728 virtual EPtr compile(const ade::Graph &graph,
729 const cv::GCompileArgs &,
730 const std::vector<ade::NodeHandle> &nodes) const override {
731 return EPtr{new cv::gimpl::ie::GIEExecutable(graph, nodes)};
734 virtual cv::gapi::GKernelPackage auxiliaryKernels() const override {
735 return cv::gapi::kernels< cv::gimpl::ie::Infer
736 , cv::gimpl::ie::InferList
737 , cv::gimpl::ie::InferList2
743 cv::gapi::GBackend cv::gapi::ie::backend() {
744 static cv::gapi::GBackend this_backend(std::make_shared<GIEBackendImpl>());
748 cv::Mat cv::gapi::ie::util::to_ocv(IE::Blob::Ptr blob) {
749 const auto& tdesc = blob->getTensorDesc();
750 return cv::Mat(toCV(tdesc.getDims()),
751 toCV(tdesc.getPrecision()),
752 blob->buffer().as<uint8_t*>());
755 std::vector<int> cv::gapi::ie::util::to_ocv(const IE::SizeVector &dims) {
759 IE::Blob::Ptr cv::gapi::ie::util::to_ie(cv::Mat &blob) {
760 return wrapIE(blob, cv::gapi::ie::TraitAs::IMAGE);
763 #else // HAVE_INF_ENGINE
765 cv::gapi::GBackend cv::gapi::ie::backend() {
766 // Still provide this symbol to avoid linking issues
767 util::throw_error(std::runtime_error("G-API has been compiled without OpenVINO IE support"));
769 #endif // HAVE_INF_ENGINE