cv::optional<cv::gapi::wip::onevpl::Device> vpl_preproc_device;
cv::optional<cv::gapi::wip::onevpl::Context> vpl_preproc_ctx;
+
+ enum InferMode {Sync, Async};
+ InferMode mode;
};
} // namespace detail
, {}
, {}
, {}
- , {}} {
+ , {}
+ , detail::ParamDesc::InferMode::Async} {
};
/** @overload
, {}
, {}
, {}
- , {}} {
+ , {}
+ , detail::ParamDesc::InferMode::Async} {
};
/** @brief Specifies sequence of network input layers names for inference.
return *this;
}
+ /** @brief Specifies which api will be used to run inference.
+
+ The function is used to specify mode for OpenVINO inference.
+ OpenVINO has two options to run inference:
+ 1. Asynchronous (using StartAsync: https://docs.openvino.ai/latest/classInferenceEngine_1_1InferRequest.html#doxid-class-inference-engine-1-1-infer-request-1a405293e8423d82a5b45f642a3bef0d24)
+ 2. Synchronous (using Infer: https://docs.openvino.ai/latest/classInferenceEngine_1_1InferRequest.html#doxid-class-inference-engine-1-1-infer-request-1a3391ce30894abde730523e9ca9371ce8)
+ By default asynchronous mode is used.
+
+ @param mode Inference mode which will be used.
+ @return reference to this parameter structure.
+ */
+ Params<Net>& cfgInferMode(detail::ParamDesc::InferMode mode) {
+ desc.mode = mode;
+ return *this;
+ }
+
// BEGIN(G-API's network parametrization API)
GBackend backend() const { return cv::gapi::ie::backend(); }
std::string tag() const { return Net::tag(); }
const std::string &device)
: desc{ model, weights, device, {}, {}, {}, 0u, 0u,
detail::ParamDesc::Kind::Load, true, {}, {}, {}, 1u,
- {}, {}, {}, {}},
+ {}, {}, {}, {},
+ detail::ParamDesc::InferMode::Async },
m_tag(tag) {
};
const std::string &device)
: desc{ model, {}, device, {}, {}, {}, 0u, 0u,
detail::ParamDesc::Kind::Import, true, {}, {}, {}, 1u,
- {}, {}, {}, {}},
+ {}, {}, {}, {},
+ detail::ParamDesc::InferMode::Async },
m_tag(tag) {
};
return *this;
}
+ /** @see ie::Params::cfgInferAPI */
+ Params& cfgInferMode(detail::ParamDesc::InferMode mode) {
+ desc.mode = mode;
+ return *this;
+ }
+
// BEGIN(G-API's network parametrization API)
GBackend backend() const { return cv::gapi::ie::backend(); }
std::string tag() const { return m_tag; }
params.vpl_preproc_ctx.value());
GAPI_LOG_INFO(nullptr, "VPP preproc created successfuly");
}
+
+ if (params.mode == cv::gapi::ie::detail::ParamDesc::InferMode::Sync &&
+ params.nireq != 1u) {
+ throw std::logic_error(
+ "Failed: ParamDesc::InferMode::Sync works only with nireq equal to 1.");
+ }
}
// This method is [supposed to be] called at Island compilation stage
class cv::gimpl::ie::RequestPool {
public:
- explicit RequestPool(std::vector<InferenceEngine::InferRequest>&& requests);
+ explicit RequestPool(cv::gapi::ie::detail::ParamDesc::InferMode mode,
+ std::vector<InferenceEngine::InferRequest>&& requests);
IInferExecutor::Ptr getIdleRequest();
void waitAll();
}
// RequestPool implementation //////////////////////////////////////////////
-cv::gimpl::ie::RequestPool::RequestPool(std::vector<InferenceEngine::InferRequest>&& requests) {
+cv::gimpl::ie::RequestPool::RequestPool(cv::gapi::ie::detail::ParamDesc::InferMode mode,
+ std::vector<InferenceEngine::InferRequest>&& requests) {
for (size_t i = 0; i < requests.size(); ++i) {
- m_requests.emplace_back(
- std::make_shared<AsyncInferExecutor>(std::move(requests[i]),
- std::bind(&RequestPool::release, this, i)));
+ IInferExecutor::Ptr iexec = nullptr;
+ switch (mode) {
+ case cv::gapi::ie::detail::ParamDesc::InferMode::Async:
+ iexec = std::make_shared<AsyncInferExecutor>(std::move(requests[i]),
+ std::bind(&RequestPool::release, this, i));
+ break;
+ case cv::gapi::ie::detail::ParamDesc::InferMode::Sync:
+ iexec = std::make_shared<SyncInferExecutor>(std::move(requests[i]),
+ std::bind(&RequestPool::release, this, i));
+ break;
+ default:
+ GAPI_Assert(false && "Unsupported ParamDesc::InferMode");
+ }
+ m_requests.emplace_back(std::move(iexec));
}
setup();
}
if (this_nh == nullptr) {
this_nh = nh;
this_iec = iem.metadata(this_nh).get<IEUnit>().compile();
- m_reqPool.reset(new RequestPool(this_iec.createInferRequests()));
+ m_reqPool.reset(new RequestPool(this_iec.params.mode, this_iec.createInferRequests()));
}
else
util::throw_error(std::logic_error("Multi-node inference is not supported!"));