// ! [executable_network:create_infer_request_impl]
// ! [executable_network:create_infer_request]
-void TemplatePlugin::ExecutableNetwork::CreateInferRequest(IInferRequest::Ptr& asyncRequest) {
+IInferRequest::Ptr TemplatePlugin::ExecutableNetwork::CreateInferRequest() {
+ IInferRequest::Ptr asyncRequest;
auto internalRequest = CreateInferRequestImpl(_networkInputs, _networkOutputs);
auto asyncThreadSafeImpl = std::make_shared<TemplateAsyncInferRequest>(std::static_pointer_cast<TemplateInferRequest>(internalRequest),
_taskExecutor, _plugin->_waitExecutor, _callbackExecutor);
asyncRequest.reset(new InferenceEngine::InferRequestBase<TemplateAsyncInferRequest>(asyncThreadSafeImpl),
[](InferenceEngine::IInferRequest *p) { p->Release(); });
asyncThreadSafeImpl->SetPointerToPublicInterface(asyncRequest);
+ return asyncRequest;
}
// ! [executable_network:create_infer_request]
// ! [executable_network:get_config]
-void TemplatePlugin::ExecutableNetwork::GetConfig(const std::string &name, Parameter &result, ResponseDesc *resp) const {
- result = _cfg.Get(name);
+Parameter TemplatePlugin::ExecutableNetwork::GetConfig(const std::string &name) const {
+ return _cfg.Get(name);
}
// ! [executable_network:get_config]
// ! [executable_network:get_metric]
-void TemplatePlugin::ExecutableNetwork::GetMetric(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *) const {
+InferenceEngine::Parameter TemplatePlugin::ExecutableNetwork::GetMetric(const std::string &name) const {
// TODO: return more supported values for metrics
if (METRIC_KEY(SUPPORTED_METRICS) == name) {
- result = IE_SET_METRIC(SUPPORTED_METRICS, std::vector<std::string>{
+ IE_SET_METRIC_RETURN(SUPPORTED_METRICS, std::vector<std::string>{
METRIC_KEY(NETWORK_NAME),
METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(SUPPORTED_CONFIG_KEYS),
for (auto&& configKey : streamExecutorConfigKeys) {
configKeys.emplace_back(configKey);
}
- result = IE_SET_METRIC(SUPPORTED_CONFIG_KEYS, configKeys);
+ IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, configKeys);
} else if (METRIC_KEY(NETWORK_NAME) == name) {
auto networkName = _function->get_friendly_name();
- result = IE_SET_METRIC(NETWORK_NAME, networkName);
+ IE_SET_METRIC_RETURN(NETWORK_NAME, networkName);
} else if (METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS) == name) {
unsigned int value = _cfg._streamsExecutorConfig._streams;
- result = IE_SET_METRIC(OPTIMAL_NUMBER_OF_INFER_REQUESTS, value);
+ IE_SET_METRIC_RETURN(OPTIMAL_NUMBER_OF_INFER_REQUESTS, value);
} else {
THROW_IE_EXCEPTION << "Unsupported ExecutableNetwork metric: " << name;
}
void ExportImpl(std::ostream& model) override;
InferenceEngine::InferRequestInternal::Ptr CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs) override;
- void CreateInferRequest(InferenceEngine::IInferRequest::Ptr &asyncRequest) override;
- void GetMetric(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *resp) const override;
- void GetConfig(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *resp) const override;
+ InferenceEngine::IInferRequest::Ptr CreateInferRequest() override;
+ InferenceEngine::Parameter GetMetric(const std::string &name) const override;
+ InferenceEngine::Parameter GetConfig(const std::string &name) const override;
private:
friend class TemplateInferRequest;
// ! [plugin:import_network_impl]
// ! [plugin:query_network]
-void Plugin::QueryNetwork(const ICNNNetwork &network, const ConfigMap& config, QueryNetworkResult &res) const {
+QueryNetworkResult Plugin::QueryNetwork(const ICNNNetwork &network, const ConfigMap& config) const {
+ QueryNetworkResult res;
Configuration cfg{config, _cfg, false};
+
auto function = network.getFunction();
if (function == nullptr) {
THROW_IE_EXCEPTION << "Template Plugin supports only ngraph cnn network representation";
res.supportedLayersMap.emplace(layerName, GetName());
}
}
+
+ return res;
}
// ! [plugin:query_network]
~Plugin() override;
void SetConfig(const std::map<std::string, std::string> &config) override;
- void QueryNetwork(const InferenceEngine::ICNNNetwork &network,
- const std::map<std::string, std::string>& config,
- InferenceEngine::QueryNetworkResult &res) const override;
+ InferenceEngine::QueryNetworkResult
+ QueryNetwork(const InferenceEngine::ICNNNetwork &network,
+ const std::map<std::string, std::string>& config) const override;
InferenceEngine::ExecutableNetworkInternal::Ptr
LoadExeNetworkImpl(const InferenceEngine::ICNNNetwork &network,
const std::map<std::string, std::string> &config) override;
_impl->m_config.UpdateFromMap(config);
}
-void clDNNEngine::QueryNetwork(const ICNNNetwork& network,
- const std::map<std::string,
- std::string>& config,
- QueryNetworkResult& res) const {
+QueryNetworkResult clDNNEngine::QueryNetwork(const ICNNNetwork& network,
+ const std::map<std::string, std::string>& config) const {
+ QueryNetworkResult res;
GetDeviceInfo(config); // Verify device id
auto function = network.getFunction();
if (function != nullptr) {
}
}
}
+
+ return res;
}
Parameter clDNNEngine::GetConfig(const std::string& name, const std::map<std::string, Parameter>& /*options*/) const {
void SetConfig(const std::map<std::string, std::string> &config) override;
InferenceEngine::Parameter GetConfig(const std::string& name, const std::map<std::string, InferenceEngine::Parameter>& options) const override;
InferenceEngine::Parameter GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter>& options) const override;
- void QueryNetwork(const InferenceEngine::ICNNNetwork& network,
- const std::map<std::string, std::string>& config, InferenceEngine::QueryNetworkResult& res) const override;
+ InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::ICNNNetwork& network,
+ const std::map<std::string, std::string>& config) const override;
InferenceEngine::RemoteContext::Ptr CreateContext(const InferenceEngine::ParamMap& params) override;
InferenceEngine::RemoteContext::Ptr GetDefaultContext() override;
return ptr;
}
-void CLDNNExecNetwork::CreateInferRequest(IInferRequest::Ptr &asyncRequest) {
- auto syncRequestImpl = this->CreateInferRequestImpl(_networkInputs, _networkOutputs);
- syncRequestImpl->setPointerToExecutableNetworkInternal(shared_from_this());
-
- auto asyncTreadSafeImpl = std::make_shared<CLDNNAsyncInferRequest>(syncRequestImpl, _taskExecutor, _callbackExecutor);
-
- asyncRequest.reset(new InferRequestBase<CLDNNAsyncInferRequest>(asyncTreadSafeImpl), [](IInferRequest *p) { p->Release(); });
- asyncTreadSafeImpl->SetPointerToPublicInterface(asyncRequest);
+IInferRequest::Ptr CLDNNExecNetwork::CreateInferRequest() {
+ return CreateAsyncInferRequestFromSync<CLDNNAsyncInferRequest>();
}
-void CLDNNExecNetwork::GetExecGraphInfo(InferenceEngine::ICNNNetwork::Ptr &graphPtr) {
+InferenceEngine::CNNNetwork CLDNNExecNetwork::GetExecGraphInfo() {
if (m_graphs.empty())
THROW_IE_EXCEPTION << NETWORK_NOT_LOADED_str;
- m_graphs.front()->GetExecGraphInfo(graphPtr);
+ return m_graphs.front()->GetExecGraphInfo();
}
-void CLDNNExecNetwork::GetConfig(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *resp) const {
- auto option = m_config.key_config_map.find(name);
- if (option != m_config.key_config_map.end()) {
- result = option->second;
+InferenceEngine::Parameter CLDNNExecNetwork::GetConfig(const std::string &name) const {
+ auto it = m_config.key_config_map.find(name);
+ if (it != m_config.key_config_map.end()) {
+ return it->second;
} else {
THROW_IE_EXCEPTION << "Unsupported ExecutableNetwork config key: " << name;
}
}
-void CLDNNExecNetwork::GetMetric(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *resp) const {
+InferenceEngine::Parameter CLDNNExecNetwork::GetMetric(const std::string &name) const {
if (name == METRIC_KEY(NETWORK_NAME)) {
IE_ASSERT(!m_graphs.empty());
- result = IE_SET_METRIC(NETWORK_NAME, m_graphs[0]->getName());
+ IE_SET_METRIC_RETURN(NETWORK_NAME, m_graphs[0]->getName());
} else if (name == METRIC_KEY(SUPPORTED_METRICS)) {
std::vector<std::string> metrics;
metrics.push_back(METRIC_KEY(NETWORK_NAME));
metrics.push_back(METRIC_KEY(SUPPORTED_METRICS));
metrics.push_back(METRIC_KEY(SUPPORTED_CONFIG_KEYS));
metrics.push_back(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS));
- result = IE_SET_METRIC(SUPPORTED_METRICS, metrics);
+ IE_SET_METRIC_RETURN(SUPPORTED_METRICS, metrics);
} else if (name == METRIC_KEY(SUPPORTED_CONFIG_KEYS)) {
std::vector<std::string> configKeys;
for (auto && value : m_config.key_config_map)
configKeys.push_back(value.first);
- result = IE_SET_METRIC(SUPPORTED_CONFIG_KEYS, configKeys);
+ IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, configKeys);
} else if (name == METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)) {
unsigned int nr = m_config.throughput_streams * 2u;
- result = IE_SET_METRIC(OPTIMAL_NUMBER_OF_INFER_REQUESTS, nr);
+ IE_SET_METRIC_RETURN(OPTIMAL_NUMBER_OF_INFER_REQUESTS, nr);
} else {
THROW_IE_EXCEPTION << "Unsupported ExecutableNetwork metric: " << name;
}
}
-void CLDNNExecNetwork::GetContext(RemoteContext::Ptr &pContext, ResponseDesc *resp) const {
- pContext = m_context;
+RemoteContext::Ptr CLDNNExecNetwork::GetContext() const {
+ return m_context;
}
}; // namespace CLDNNPlugin
explicit CLDNNExecNetwork(InferenceEngine::ICNNNetwork &network, RemoteContext::Ptr context, Config config);
- void GetExecGraphInfo(InferenceEngine::ICNNNetwork::Ptr &graphPtr) override;
- void CreateInferRequest(InferenceEngine::IInferRequest::Ptr &asyncRequest) override;
+ InferenceEngine::CNNNetwork GetExecGraphInfo() override;
+ InferenceEngine::IInferRequest::Ptr CreateInferRequest() override;
InferenceEngine::InferRequestInternal::Ptr CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs) override;
- void GetMetric(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *resp) const override;
- void GetConfig(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *resp) const override;
- void GetContext(RemoteContext::Ptr &pContext, ResponseDesc *resp) const override;
+ InferenceEngine::Parameter GetMetric(const std::string &name) const override;
+ InferenceEngine::Parameter GetConfig(const std::string &name) const override;
+ RemoteContext::Ptr GetContext() const override;
std::vector<std::shared_ptr<CLDNNGraph>> m_graphs;
return network;
}
-InferenceEngine::ICNNNetwork::Ptr CLDNNGraph::GetExecGraphInfoByPrimitivesInfo(std::vector<cldnn::primitive_info>& primitives_info,
+InferenceEngine::CNNNetwork CLDNNGraph::GetExecGraphInfoByPrimitivesInfo(std::vector<cldnn::primitive_info>& primitives_info,
bool filter_const_primitives) {
if (m_config.useProfiling) {
try {
return net;
}
-void CLDNNGraph::GetExecGraphInfo(InferenceEngine::ICNNNetwork::Ptr &graphPtr) {
+InferenceEngine::CNNNetwork CLDNNGraph::GetExecGraphInfo() {
auto primitives_info = GetNetwork()->get_primitives_info();
- graphPtr = GetExecGraphInfoByPrimitivesInfo(primitives_info, true);
+ return GetExecGraphInfoByPrimitivesInfo(primitives_info, true);
}
explicit CLDNNGraph(InferenceEngine::ICNNNetwork& network, gpu::ClContext::Ptr context, Config config, uint16_t stream_id = 0);
explicit CLDNNGraph(std::shared_ptr<CLDNNGraph> graph, uint16_t stream_id = 0);
- void GetExecGraphInfo(InferenceEngine::ICNNNetwork::Ptr& graphPtr);
+ InferenceEngine::CNNNetwork GetExecGraphInfo();
bool IsLoaded() const;
void Build();
void UpdateLayersMaps();
void UpdateImplementationsMap();
- InferenceEngine::ICNNNetwork::Ptr GetExecGraphInfoByPrimitivesInfo(std::vector<cldnn::primitive_info>& pi,
- bool filter_const_primitives = true);
+ InferenceEngine::CNNNetwork GetExecGraphInfoByPrimitivesInfo(std::vector<cldnn::primitive_info>& pi,
+ bool filter_const_primitives = true);
};
} // namespace CLDNNPlugin
THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str;
}
- void SetConfig(const std::map<std::string, InferenceEngine::Parameter>& config,
- InferenceEngine::ResponseDesc* /* resp */) override {
+ void SetConfig(const std::map<std::string, InferenceEngine::Parameter>& config) override {
using namespace InferenceEngine::GNAConfigParams;
if (config.empty()) {
THROW_IE_EXCEPTION << "The list of configuration values is empty";
}
}
- InferenceEngine::Parameter old_mode_parameter;
- GetConfig(KEY_GNA_DEVICE_MODE, old_mode_parameter, {});
+ InferenceEngine::Parameter old_mode_parameter = GetConfig(KEY_GNA_DEVICE_MODE);
auto old_mode = old_mode_parameter.as<std::string>();
if (old_mode == InferenceEngine::GNAConfigParams::GNA_SW_FP32) {
THROW_IE_EXCEPTION << "Dynamic switching from GNA_SW_FP32 mode is not supported for ExecutableNetwork.";
plg->SetConfig(configForPlugin);
}
- void GetConfig(const std::string &name,
- InferenceEngine::Parameter &result,
- InferenceEngine::ResponseDesc* /*resp*/) const override {
- result = plg->GetConfig(name, {});
+ InferenceEngine::Parameter GetConfig(const std::string &name) const override {
+ return plg->GetConfig(name, {});
}
- void GetMetric(const std::string& name,
- InferenceEngine::Parameter& result,
- InferenceEngine::ResponseDesc* /* resp */) const override {
- result = plg->GetMetric(name, {});
+ InferenceEngine::Parameter GetMetric(const std::string& name) const override {
+ return plg->GetMetric(name, {});
}
};
}
/**
- * @brief methods with _ThreadUnsafe prefix are to implement in plugins
- * or in default wrapper (e.g. AsyncInferRequestThreadSafeDefault)
- */
+ * @brief methods with _ThreadUnsafe prefix are to implement in plugins
+ * or in default wrapper (e.g. AsyncInferRequestThreadSafeDefault)
+ */
void StartAsyncImpl() override {
// execute input pre-processing.
execDataPreprocessing(_inputs);
_pluginName = pluginName;
}
-InferenceEngine::IExecutableNetwork::Ptr GNAPlugin::ImportNetwork(std::istream& networkModel) {
+InferenceEngine::ExecutableNetwork GNAPlugin::ImportNetwork(std::istream& networkModel) {
auto header = GNAModelSerial::ReadHeader(networkModel);
InitGNADevice();
#if GNA_LIB_VER == 2
createRequestConfigsForGnaModels();
#endif
- return nullptr;
+ return {};
}
void GNAPlugin::Export(const std::string &fileName) {
*gnaFlags = config.gnaFlags;
}
-void GNAPlugin::QueryNetwork(const InferenceEngine::ICNNNetwork& network,
- const std::map<std::string, std::string>& config,
- InferenceEngine::QueryNetworkResult& res) const {
+InferenceEngine::QueryNetworkResult GNAPlugin::QueryNetwork(const InferenceEngine::ICNNNetwork& network,
+ const std::map<std::string, std::string>& config) const {
+ InferenceEngine::QueryNetworkResult res;
+
if (network.getFunction()) {
THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str << " ngraph::Function is not supported natively";
}
res.supportedLayersMap.insert({ layer->name, GetName() });
}
}, false);
+
+ return res;
}
void SetConfig(const std::map<std::string, std::string> &config) override;
InferenceEngine::ExecutableNetwork LoadNetwork(const InferenceEngine::ICNNNetwork &network,
- const std::map<std::string, std::string> &config_map) override {
- THROW_GNA_EXCEPTION << "Not implemented";
- }
+ const std::map<std::string, std::string> &config_map) override { THROW_GNA_EXCEPTION << "Not implemented"; }
InferenceEngine::ExecutableNetwork LoadNetwork(const InferenceEngine::ICNNNetwork &network,
const std::map<std::string, std::string> &config_map,
InferenceEngine::RemoteContext::Ptr context) override { THROW_GNA_EXCEPTION << "Not implemented"; }
void SetCore(InferenceEngine::ICore*) noexcept override {}
InferenceEngine::ICore* GetCore() const noexcept override {return nullptr;}
void Reset();
- void QueryNetwork(const InferenceEngine::ICNNNetwork &network,
- const std::map<std::string, std::string>& config,
- InferenceEngine::QueryNetworkResult &res) const override;
+ InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::ICNNNetwork &network,
+ const std::map<std::string, std::string>& config) const override;
uint32_t QueueInference(const InferenceEngine::BlobMap &input, InferenceEngine::BlobMap &result);
bool Wait(uint32_t idx);
GnaWaitStatus WaitFor(uint32_t idx, int64_t millisTimeout);
void Wait(uint32_t sync, InferenceEngine::Blob &result) { THROW_GNA_EXCEPTION << "Not implemented"; }
void Export(const std::string &fileName);
- InferenceEngine::IExecutableNetwork::Ptr ImportNetwork(const std::string &modelFileName,
- const std::map<std::string, std::string> &config) override {
+ InferenceEngine::ExecutableNetwork ImportNetwork(const std::string &modelFileName,
+ const std::map<std::string, std::string> &config) override {
THROW_GNA_EXCEPTION << "Not implemented";
}
InferenceEngine::ExecutableNetwork ImportNetwork(std::istream& networkModel,
THROW_GNA_EXCEPTION << "Not implemented";
}
- InferenceEngine::IExecutableNetwork::Ptr ImportNetwork(std::istream& networkModel);
+ InferenceEngine::ExecutableNetwork ImportNetwork(std::istream& networkModel);
/**
* utility to provide input and output blobs externally to be used by InferenceEngine request API clients
defaultConfig.UpdateFromMap(config);
}
- InferenceEngine::IExecutableNetwork::Ptr ImportNetwork(
+ InferenceEngine::ExecutableNetwork ImportNetwork(
const std::string &modelFileName,
const std::map<std::string, std::string> &config) override {
Config updated_config(defaultConfig);
return GetCurrentPlugin()->GetName();
}
- void QueryNetwork(const InferenceEngine::ICNNNetwork& network,
- const std::map<std::string, std::string>& config,
- InferenceEngine::QueryNetworkResult& res) const override {
+ InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::ICNNNetwork& network,
+ const std::map<std::string, std::string>& config) const override {
auto plg = GetCurrentPlugin();
try {
plg->SetConfig(config);
} catch (InferenceEngine::details::InferenceEngineException) {}
- plg->QueryNetwork(network, config, res);
+ return plg->QueryNetwork(network, config);
}
InferenceEngine::Parameter GetMetric(const std::string& name,
using namespace HeteroPlugin;
using namespace InferenceEngine;
-HeteroAsyncInferRequest::HeteroAsyncInferRequest(const HeteroInferRequest::Ptr& request,
- const ITaskExecutor::Ptr& taskExecutor,
- const ITaskExecutor::Ptr& callbackExecutor) :
+HeteroAsyncInferRequest::HeteroAsyncInferRequest(const InferRequestInternal::Ptr& request,
+ const ITaskExecutor::Ptr& taskExecutor,
+ const ITaskExecutor::Ptr& callbackExecutor) :
AsyncInferRequestThreadSafeDefault(request, taskExecutor, callbackExecutor),
- _heteroInferRequest(request),
+ _heteroInferRequest(std::static_pointer_cast<HeteroInferRequest>(request)),
_statusCodes{_heteroInferRequest->_inferRequests.size(), StatusCode::OK} {
_pipeline.clear();
for (std::size_t requestId = 0; requestId < _heteroInferRequest->_inferRequests.size(); ++requestId) {
class HeteroAsyncInferRequest : public InferenceEngine::AsyncInferRequestThreadSafeDefault {
public:
using Ptr = std::shared_ptr<HeteroAsyncInferRequest>;
- HeteroAsyncInferRequest(const HeteroInferRequest::Ptr& request,
- const InferenceEngine::ITaskExecutor::Ptr& taskExecutor,
- const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor);
+ HeteroAsyncInferRequest(const InferenceEngine::InferRequestInternal::Ptr& request,
+ const InferenceEngine::ITaskExecutor::Ptr& taskExecutor,
+ const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor);
~HeteroAsyncInferRequest() override;
void StartAsync_ThreadUnsafe() override;
InferenceEngine::StatusCode Wait(int64_t millis_timeout) override;
if (queryNetworkResult.supportedLayersMap.empty()) {
auto it = _config.find("TARGET_FALLBACK");
if (it != _config.end()) {
- _heteroPlugin->QueryNetwork(network_, _config, queryNetworkResult);
+ queryNetworkResult = _heteroPlugin->QueryNetwork(network_, _config);
} else {
THROW_IE_EXCEPTION << "The 'TARGET_FALLBACK' option was not defined for heterogeneous plugin";
}
_blobNameMap);
}
-void HeteroExecutableNetwork::CreateInferRequest(IInferRequest::Ptr &asyncRequest) {
- auto heteroInferRequest = std::dynamic_pointer_cast<HeteroInferRequest>(
- CreateInferRequestImpl(_networkInputs, _networkOutputs));
- heteroInferRequest->setPointerToExecutableNetworkInternal(shared_from_this());
- auto asyncThreadSafeImpl = std::make_shared<HeteroAsyncInferRequest>(heteroInferRequest, _taskExecutor, _callbackExecutor);
- asyncRequest.reset(new InferRequestBase<HeteroAsyncInferRequest>(asyncThreadSafeImpl),
- [](IInferRequest *p) { p->Release(); });
- asyncThreadSafeImpl->SetPointerToPublicInterface(asyncRequest);
+IInferRequest::Ptr HeteroExecutableNetwork::CreateInferRequest() {
+ return CreateAsyncInferRequestFromSync<HeteroAsyncInferRequest>();
}
-void HeteroExecutableNetwork::GetConfig(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *) const {
+InferenceEngine::Parameter HeteroExecutableNetwork::GetConfig(const std::string &name) const {
+ InferenceEngine::Parameter result;
if (name == "TARGET_FALLBACK") {
auto it = _config.find(name);
if (it != _config.end()) {
auto param = execNetwork.GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS));
for (auto && configKey : param.as<std::vector<std::string>>()) {
if (configKey == name) {
- result = execNetwork.GetConfig(configKey);
- return;
+ return execNetwork.GetConfig(configKey);
}
}
}
THROW_IE_EXCEPTION << "Unsupported ExecutableNetwork config key: " << name;
}
+
+ return result;
}
using Metrics = std::map<std::string, Parameter>;
} // namespace
-void HeteroExecutableNetwork::GetMetric(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *) const {
+InferenceEngine::Parameter HeteroExecutableNetwork::GetMetric(const std::string &name) const {
if (METRIC_KEY(SUPPORTED_METRICS) == name) {
std::vector<std::string> heteroMetrics = {
METRIC_KEY(NETWORK_NAME),
collectPluginMetrics(heteroMetrics, pluginMetrics);
}
- result = IE_SET_METRIC(SUPPORTED_METRICS, heteroMetrics);
+ IE_SET_METRIC_RETURN(SUPPORTED_METRICS, heteroMetrics);
} else if (METRIC_KEY(SUPPORTED_CONFIG_KEYS) == name) {
std::vector<std::string> heteroConfigKeys = {
"TARGET_FALLBACK",
collectPluginMetrics(heteroConfigKeys, pluginConfigKeys);
}
- result = IE_SET_METRIC(SUPPORTED_CONFIG_KEYS, heteroConfigKeys);
+ IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, heteroConfigKeys);
} else if (METRIC_KEY(NETWORK_NAME) == name) {
- result = IE_SET_METRIC(NETWORK_NAME, _name);
+ IE_SET_METRIC_RETURN(NETWORK_NAME, _name);
} else if (METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS) == name) {
unsigned int value = 0u;
for (auto&& desc : networks) {
value = std::max(value, desc._network.GetMetric(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)).as<unsigned int>());
}
- result = IE_SET_METRIC(OPTIMAL_NUMBER_OF_INFER_REQUESTS, value);
+ IE_SET_METRIC_RETURN(OPTIMAL_NUMBER_OF_INFER_REQUESTS, value);
} else {
// find metric key among plugin metrics
for (auto&& desc : networks) {
auto param = execNetwork.GetMetric(METRIC_KEY(SUPPORTED_METRICS));
for (auto && metricKey : param.as<std::vector<std::string>>()) {
if (metricKey == name) {
- result = execNetwork.GetMetric(metricKey);
- return;
+ return execNetwork.GetMetric(metricKey);
}
}
}
InferenceEngine::InferRequestInternal::Ptr CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs) override;
- void CreateInferRequest(InferenceEngine::IInferRequest::Ptr &asyncRequest) override;
+ InferenceEngine::IInferRequest::Ptr CreateInferRequest() override;
- void GetConfig(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *resp) const override;
+ InferenceEngine::Parameter GetConfig(const std::string &name) const override;
- void GetMetric(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *resp) const override;
+ InferenceEngine::Parameter GetMetric(const std::string &name) const override;
void ExportImpl(std::ostream& modelFile) override;
}
void Engine::SetAffinity(InferenceEngine::ICNNNetwork &network, const Configs &config) {
- QueryNetworkResult qr;
- QueryNetwork(network, config, qr);
+ QueryNetworkResult qr = QueryNetwork(network, config);
details::CNNNetworkIterator i(&network);
while (i != details::CNNNetworkIterator()) {
}
}
-void Engine::QueryNetwork(const ICNNNetwork &network, const Configs& config, QueryNetworkResult &qr) const {
+QueryNetworkResult Engine::QueryNetwork(const ICNNNetwork &network, const Configs& config) const {
+ QueryNetworkResult qr;
+
if (GetCore() == nullptr) {
THROW_IE_EXCEPTION << "Please, work with HETERO device via InferencEngine::Core object";
}
// set OK status
qr.rc = StatusCode::OK;
+
+ return qr;
}
Parameter Engine::GetMetric(const std::string& name, const std::map<std::string, Parameter> & /*options*/) const {
void SetConfig(const Configs &config) override;
- void QueryNetwork(const InferenceEngine::ICNNNetwork &network,
- const Configs& config, InferenceEngine::QueryNetworkResult &res) const override;
+ InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::ICNNNetwork &network,
+ const Configs& config) const override;
InferenceEngine::Parameter GetMetric(const std::string& name, const std::map<std::string,
InferenceEngine::Parameter> & options) const override;
QueryNetworkResult QueryNetwork(const ICNNNetwork& network, const std::string& deviceName,
const std::map<std::string, std::string>& config) const override {
- QueryNetworkResult res;
auto parsed = parseDeviceNameIntoConfig(deviceName, config);
- GetCPPPluginByName(parsed._deviceName).QueryNetwork(network, parsed._config, res);
- return res;
+ return GetCPPPluginByName(parsed._deviceName).QueryNetwork(network, parsed._config);
}
Parameter GetMetric(const std::string& deviceName, const std::string& name) const override {
CALL_STATEMENT(return ExecutableNetwork(actual->ImportNetwork(modelFileName, config), actual));
}
- void QueryNetwork(const ICNNNetwork& network, const std::map<std::string, std::string>& config,
- QueryNetworkResult& res) const {
- CALL_STATEMENT(actual->QueryNetwork(network, config, res));
+ QueryNetworkResult QueryNetwork(const ICNNNetwork& network,
+ const std::map<std::string, std::string>& config) const {
+ QueryNetworkResult res;
+ CALL_STATEMENT(res = actual->QueryNetwork(network, config));
if (res.rc != OK) THROW_IE_EXCEPTION << res.resp.msg;
+ return res;
}
ExecutableNetwork ImportNetwork(std::istream& networkModel,
}
}
-void MKLDNNExecNetwork::CreateInferRequest(InferenceEngine::IInferRequest::Ptr &asyncRequest) {
- auto syncRequestImpl = CreateInferRequestImpl(_networkInputs, _networkOutputs);
- syncRequestImpl->setPointerToExecutableNetworkInternal(shared_from_this());
- auto asyncRequestImpl = std::make_shared<MKLDNNAsyncInferRequest>(syncRequestImpl, _taskExecutor, _callbackExecutor);
- asyncRequest.reset(new InferRequestBase<MKLDNNAsyncInferRequest>(asyncRequestImpl),
- [](IInferRequest *p) { p->Release(); });
-
- asyncRequestImpl->SetPointerToPublicInterface(asyncRequest);
+InferenceEngine::IInferRequest::Ptr MKLDNNExecNetwork::CreateInferRequest() {
+ return CreateAsyncInferRequestFromSync<MKLDNNAsyncInferRequest>();
}
-void MKLDNNExecNetwork::GetExecGraphInfo(InferenceEngine::ICNNNetwork::Ptr &graphPtr) {
+InferenceEngine::CNNNetwork MKLDNNExecNetwork::GetExecGraphInfo() {
if (_graphs.size() == 0)
THROW_IE_EXCEPTION << "No graph was found";
- graphPtr = _graphs.begin()->get()->dump();
+ return _graphs.begin()->get()->dump();
}
-void MKLDNNExecNetwork::GetConfig(const std::string &name, Parameter &result, ResponseDesc *resp) const {
+Parameter MKLDNNExecNetwork::GetConfig(const std::string &name) const {
if (_graphs.size() == 0)
THROW_IE_EXCEPTION << "No graph was found";
Config engConfig = _graphs.begin()->get()->getProperty();
- auto option = engConfig._config.find(name);
- if (option != engConfig._config.end()) {
- result = option->second;
+ auto it = engConfig._config.find(name);
+ if (it != engConfig._config.end()) {
+ return it->second;
} else {
THROW_IE_EXCEPTION << "Unsupported ExecutableNetwork config key: " << name;
}
}
-void MKLDNNExecNetwork::GetMetric(const std::string &name, Parameter &result, ResponseDesc *resp) const {
+InferenceEngine::Parameter MKLDNNExecNetwork::GetMetric(const std::string &name) const {
if (_graphs.size() == 0)
THROW_IE_EXCEPTION << "No graph was found";
if (name == METRIC_KEY(NETWORK_NAME)) {
- if (_graphs.begin()->get()->dump() == nullptr)
- THROW_IE_EXCEPTION << "Invalid graph dump";
- result = IE_SET_METRIC(NETWORK_NAME, _graphs.begin()->get()->dump()->getName());
+ IE_SET_METRIC_RETURN(NETWORK_NAME, _graphs.begin()->get()->GetName());
} else if (name == METRIC_KEY(SUPPORTED_METRICS)) {
std::vector<std::string> metrics;
metrics.push_back(METRIC_KEY(NETWORK_NAME));
metrics.push_back(METRIC_KEY(SUPPORTED_METRICS));
metrics.push_back(METRIC_KEY(SUPPORTED_CONFIG_KEYS));
metrics.push_back(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS));
- result = IE_SET_METRIC(SUPPORTED_METRICS, metrics);
+ IE_SET_METRIC_RETURN(SUPPORTED_METRICS, metrics);
} else if (name == METRIC_KEY(SUPPORTED_CONFIG_KEYS)) {
std::vector<std::string> configKeys;
for (auto && key : _graphs.begin()->get()->getProperty()._config) {
configKeys.push_back(key.first);
}
- result = IE_SET_METRIC(SUPPORTED_CONFIG_KEYS, configKeys);
+ IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, configKeys);
} else if (name == METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)) {
Config engConfig = _graphs.begin()->get()->getProperty();
auto option = engConfig._config.find(CONFIG_KEY(CPU_THROUGHPUT_STREAMS));
IE_ASSERT(option != engConfig._config.end());
auto streams = std::stoi(option->second);
- result = IE_SET_METRIC(OPTIMAL_NUMBER_OF_INFER_REQUESTS, static_cast<unsigned int>(
+ IE_SET_METRIC_RETURN(OPTIMAL_NUMBER_OF_INFER_REQUESTS, static_cast<unsigned int>(
streams ? streams : 1));
} else {
THROW_IE_EXCEPTION << "Unsupported ExecutableNetwork metric: " << name;
CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs) override;
- void CreateInferRequest(InferenceEngine::IInferRequest::Ptr &asyncRequest) override;
+ InferenceEngine::IInferRequest::Ptr CreateInferRequest() override;
MKLDNNExecNetwork(const InferenceEngine::ICNNNetwork &network, const Config &cfg,
const MKLDNNExtensionManager::Ptr &extMgr, NumaNodesWeights &weightsSharing);
void setProperty(const std::map<std::string, std::string> &properties);
- void GetConfig(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *resp) const override;
+ InferenceEngine::Parameter GetConfig(const std::string &name) const override;
- void GetMetric(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *resp) const override;
+ InferenceEngine::Parameter GetMetric(const std::string &name) const override;
- void GetExecGraphInfo(InferenceEngine::ICNNNetwork::Ptr &graphPtr) override;
+ InferenceEngine::CNNNetwork GetExecGraphInfo() override;
std::vector<InferenceEngine::IMemoryStateInternal::Ptr> QueryState() override;
}
}
-InferenceEngine::ICNNNetwork::Ptr MKLDNNGraph::dump() const {
+InferenceEngine::CNNNetwork MKLDNNGraph::dump() const {
return dump_graph_as_ie_ngraph_net(*this);
}
#pragma once
#include "ie_parallel.hpp"
-#include "ie_icnn_network.hpp"
+#include "cpp/ie_cnn_network.h"
#include "config.h"
#include "mkldnn_memory.h"
#include "mean_image.h"
return graphNodes;
}
+ std::string GetName() {
+ return _name;
+ }
+
std::vector<MKLDNNEdgePtr>& GetEdges() {
return graphEdges;
}
void DropNode(const MKLDNNNodePtr& node);
void DropDWConvNode(const MKLDNNNodePtr& node);
- InferenceEngine::ICNNNetwork::Ptr dump() const;
+ InferenceEngine::CNNNetwork dump() const;
template<typename NET>
static void ApplyUnrollPasses(NET &net);
friend class MKLDNNInferRequest;
friend class MKLDNNGraphlessInferRequest;
- friend std::shared_ptr<InferenceEngine::ICNNNetwork> dump_graph_as_ie_net(const MKLDNNGraph &graph);
- friend std::shared_ptr<InferenceEngine::ICNNNetwork> dump_graph_as_ie_ngraph_net(const MKLDNNGraph &graph);
+ friend InferenceEngine::CNNNetwork dump_graph_as_ie_net(const MKLDNNGraph &graph);
+ friend InferenceEngine::CNNNetwork dump_graph_as_ie_ngraph_net(const MKLDNNGraph &graph);
private:
void dumpToDotFile(std::string file) const;
return layer;
}
-std::shared_ptr<ICNNNetwork> dump_graph_as_ie_ngraph_net(const MKLDNNGraph &graph) {
+InferenceEngine::CNNNetwork dump_graph_as_ie_ngraph_net(const MKLDNNGraph &graph) {
std::map<MKLDNNNodePtr, std::shared_ptr<ngraph::Node> > node2layer;
ngraph::ResultVector results;
return net;
}
-std::shared_ptr<ICNNNetwork> dump_graph_as_ie_net(const MKLDNNGraph &graph) {
+InferenceEngine::CNNNetwork dump_graph_as_ie_net(const MKLDNNGraph &graph) {
auto net = std::make_shared<details::CNNNetworkImpl>();
net->setName(graph._name);
net->setInputInfo(in_info);
}
- return net;
+ return InferenceEngine::CNNNetwork{net};
}
void dump_graph_as_dot(const MKLDNNGraph &graph, std::ostream &out) {
- auto dump_net = dump_graph_as_ie_net(graph);
- if (dump_net == nullptr)
- THROW_IE_EXCEPTION << "Nullable net dump";
- InferenceEngine::saveGraphToDot(*dump_net, out, drawer_callback);
+ InferenceEngine::CNNNetwork dump_net = dump_graph_as_ie_net(graph);
+ InferenceEngine::saveGraphToDot(dump_net, out, drawer_callback);
}
//**********************************
#pragma once
-#include "ie_icnn_network.hpp"
+#include "cpp/ie_cnn_network.h"
#include "mkldnn_graph.h"
#include <memory>
void dump_graph_as_dot(const MKLDNNGraph &graph, std::ostream &out);
-std::shared_ptr<InferenceEngine::ICNNNetwork> dump_graph_as_ie_net(const MKLDNNGraph &graph);
-std::shared_ptr<InferenceEngine::ICNNNetwork> dump_graph_as_ie_ngraph_net(const MKLDNNGraph &graph);
+InferenceEngine::CNNNetwork dump_graph_as_ie_net(const MKLDNNGraph &graph);
+InferenceEngine::CNNNetwork dump_graph_as_ie_ngraph_net(const MKLDNNGraph &graph);
} // namespace MKLDNNPlugin
extensionManager->AddExtension(extension);
}
-void Engine::QueryNetwork(const ICNNNetwork& network, const std::map<std::string, std::string>& config, QueryNetworkResult& res) const {
+QueryNetworkResult Engine::QueryNetwork(const ICNNNetwork& network, const std::map<std::string, std::string>& config) const {
+ QueryNetworkResult res;
MKLDNNWeightsSharing::Ptr fake_w_cache;
auto function = network.getFunction();
if (function != nullptr) {
i++;
}
}
+
+ return res;
}
static const Version version = {{2, 1}, CI_BUILD_NUMBER, "MKLDNNPlugin"};
InferenceEngine::Parameter GetMetric(const std::string& name, const std::map<std::string, InferenceEngine::Parameter>& options) const override;
- void QueryNetwork(const InferenceEngine::ICNNNetwork& network,
- const std::map<std::string, std::string>& config, InferenceEngine::QueryNetworkResult& res) const override;
+ InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::ICNNNetwork& network,
+ const std::map<std::string, std::string>& config) const override;
private:
Config engConfig;
return std::make_shared<MultiDeviceInferRequest>(networkInputs, networkOutputs);
}
-void MultiDeviceExecutableNetwork::CreateInferRequest(IInferRequest::Ptr& asyncRequest) {
+IInferRequest::Ptr MultiDeviceExecutableNetwork::CreateInferRequest() {
+ IInferRequest::Ptr asyncRequest;
auto syncRequestImpl = CreateInferRequestImpl(_networkInputs, _networkOutputs);
syncRequestImpl->setPointerToExecutableNetworkInternal(shared_from_this());
auto asyncTreadSafeImpl = std::make_shared<MultiDeviceAsyncInferRequest>(std::static_pointer_cast<MultiDeviceInferRequest>(syncRequestImpl),
_callbackExecutor);
asyncRequest.reset(new InferRequestBase<MultiDeviceAsyncInferRequest>(asyncTreadSafeImpl), [](IInferRequest *p) { p->Release(); });
asyncTreadSafeImpl->SetPointerToPublicInterface(asyncRequest);
+ return asyncRequest;
}
-void MultiDeviceExecutableNetwork::SetConfig(const std::map<std::string, InferenceEngine::Parameter> &config,
- InferenceEngine::ResponseDesc * /* resp */) {
+void MultiDeviceExecutableNetwork::SetConfig(const std::map<std::string, InferenceEngine::Parameter> &config) {
auto priorities = config.find(MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES);
if (priorities == config.end() || config.size() > 1) {
THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str <<
}
}
-void MultiDeviceExecutableNetwork::GetConfig(const std::string &name, InferenceEngine::Parameter &result,
- InferenceEngine::ResponseDesc * /* resp */) const {
- auto res = _config.find(name);
- if (res != _config.end()) {
- result = res->second;
+InferenceEngine::Parameter MultiDeviceExecutableNetwork::GetConfig(const std::string &name) const {
+ auto it = _config.find(name);
+ if (it != _config.end()) {
+ return it->second;
} else {
THROW_IE_EXCEPTION << NOT_FOUND_str << name <<" not found in the ExecutableNetwork config";
}
}
-void MultiDeviceExecutableNetwork::GetMetric(const std::string &name, Parameter &result, ResponseDesc *resp) const {
+InferenceEngine::Parameter MultiDeviceExecutableNetwork::GetMetric(const std::string &name) const {
if (name == METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)) {
unsigned int res = 0u;
for (auto n : _networksPerDevice) {
<< "Failed to query the metric for the " << n.first << " with error:" << iie.what();
}
}
- result = IE_SET_METRIC(OPTIMAL_NUMBER_OF_INFER_REQUESTS, res);
+ IE_SET_METRIC_RETURN(OPTIMAL_NUMBER_OF_INFER_REQUESTS, res);
} else if (name == METRIC_KEY(NETWORK_NAME)) {
auto it = _networksPerDevice.begin();
IE_ASSERT(it != _networksPerDevice.end());
- result = IE_SET_METRIC(NETWORK_NAME, it->second.GetMetric(
+ IE_SET_METRIC_RETURN(NETWORK_NAME, it->second.GetMetric(
METRIC_KEY(NETWORK_NAME)).as<std::string>());
} else if (name == METRIC_KEY(SUPPORTED_METRICS)) {
- result = IE_SET_METRIC(SUPPORTED_METRICS, {
+ IE_SET_METRIC_RETURN(SUPPORTED_METRICS, {
METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS),
METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(NETWORK_NAME),
});
} else if (name == METRIC_KEY(SUPPORTED_CONFIG_KEYS)) {
std::vector<std::string> configKeys = { MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES };
- result = IE_SET_METRIC(SUPPORTED_CONFIG_KEYS, configKeys);
+ IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, configKeys);
} else {
THROW_IE_EXCEPTION << "Unsupported Network metric: " << name;
}
enablePerfCounters);
}
-void MultiDeviceInferencePlugin::QueryNetwork(const ICNNNetwork& network,
- const std::map<std::string, std::string>& config,
- QueryNetworkResult& queryResult) const {
+QueryNetworkResult MultiDeviceInferencePlugin::QueryNetwork(const ICNNNetwork& network,
+ const std::map<std::string, std::string>& config) const {
+ QueryNetworkResult queryResult;
+
if (GetCore() == nullptr) {
THROW_IE_EXCEPTION << "Please, work with MULTI device via InferencEngine::Core object";
}
for (auto&& supportedLayer : supportedLayers) {
queryResult.supportedLayersMap[supportedLayer] = GetName();
}
+
+ return queryResult;
}
} // namespace MultiDevicePlugin
const std::unordered_map<std::string, InferenceEngine::Parameter>& config,
const bool needPerfCounters = false);
- void SetConfig(const std::map<std::string, InferenceEngine::Parameter> &config, InferenceEngine::ResponseDesc *resp) override;
- void GetConfig(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *resp) const override;
- void GetMetric(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *resp) const override;
+ void SetConfig(const std::map<std::string, InferenceEngine::Parameter> &config) override;
+ InferenceEngine::Parameter GetConfig(const std::string &name) const override;
+ InferenceEngine::Parameter GetMetric(const std::string &name) const override;
void run(Task inferTask) override;
- void CreateInferRequest(InferenceEngine::IInferRequest::Ptr& asyncRequest) override;
+ InferenceEngine::IInferRequest::Ptr CreateInferRequest() override;
InferenceEngine::InferRequestInternal::Ptr CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs) override;
~MultiDeviceExecutableNetwork() override;
void SetConfig(const std::map<std::string, std::string>& config) override;
Parameter GetConfig(const std::string& name,
const std::map<std::string, Parameter> & options) const override;
- void QueryNetwork(const InferenceEngine::ICNNNetwork& network,
- const std::map<std::string, std::string>& config,
- InferenceEngine::QueryNetworkResult& res) const override;
+ InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::ICNNNetwork& network,
+ const std::map<std::string, std::string>& config) const override;
InferenceEngine::Parameter GetMetric(const std::string& name,
const std::map<std::string, InferenceEngine::Parameter>& options) const override;
}
StatusCode CreateInferRequest(IInferRequest::Ptr& req, ResponseDesc* resp) noexcept override {
- TO_STATUS(_impl->CreateInferRequest(req));
+ TO_STATUS(req = _impl->CreateInferRequest());
}
StatusCode Export(const std::string& modelFileName, ResponseDesc* resp) noexcept override {
}
StatusCode GetExecGraphInfo(ICNNNetwork::Ptr& graphPtr, ResponseDesc* resp) noexcept override {
- TO_STATUS(_impl->GetExecGraphInfo(graphPtr));
+ TO_STATUS(graphPtr = _impl->GetExecGraphInfo());
}
StatusCode QueryState(IMemoryState::Ptr& pState, size_t idx, ResponseDesc* resp) noexcept override {
}
StatusCode SetConfig(const std::map<std::string, Parameter>& config, ResponseDesc* resp) noexcept override {
- TO_STATUS(_impl->SetConfig(config, resp));
+ TO_STATUS(_impl->SetConfig(config));
}
StatusCode GetConfig(const std::string& name, Parameter& result, ResponseDesc* resp) const noexcept override {
- TO_STATUS(_impl->GetConfig(name, result, resp));
+ TO_STATUS(result = _impl->GetConfig(name));
}
StatusCode GetMetric(const std::string& name, Parameter& result, ResponseDesc* resp) const noexcept override {
- TO_STATUS(_impl->GetMetric(name, result, resp));
+ TO_STATUS(result = _impl->GetMetric(name));
}
StatusCode GetContext(RemoteContext::Ptr& pContext, ResponseDesc* resp) const noexcept override {
- TO_STATUS(_impl->GetContext(pContext, resp));
+ TO_STATUS(pContext = _impl->GetContext());
}
private:
}
/**
- * @def TO_STATUSVAR(x, statusVar, descBufferVar)
- * @brief Converts C++ exceptioned function call to a status variable
- * @ingroup ie_dev_api_error_debug
- */
-#define TO_STATUSVAR(x, statusVar, descBufferVar) \
- do { \
- try { \
- x; \
- statusVar = OK; \
- } catch (const InferenceEngine::details::InferenceEngineException& iex) { \
- statusVar = \
- InferenceEngine::DescriptionBuffer((iex.hasStatus() ? iex.getStatus() : GENERAL_ERROR), descBufferVar) \
- << iex.what(); \
- } catch (const std::exception& ex) { \
- statusVar = InferenceEngine::DescriptionBuffer(GENERAL_ERROR, descBufferVar) << ex.what(); \
- } catch (...) { \
- statusVar = InferenceEngine::DescriptionBuffer(UNEXPECTED); \
- } \
- } while (false)
-
-/**
* @def TO_STATUS_NO_RESP(x)
* @brief Converts C++ exceptioned function call into a status code. Does not work with a ResponseDesc object
* @ingroup ie_dev_api_error_debug
networkModel << strm.rdbuf();
}
- void GetExecGraphInfo(ICNNNetwork::Ptr& graphPtr) override {
- (void)graphPtr;
+ CNNNetwork GetExecGraphInfo() override {
THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str;
}
}
std::vector<IMemoryStateInternal::Ptr> QueryState() override {
- // meaning base plugin reports as no state available - plugin owners need to create proper override of this
- return {};
+ THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str;
}
- void SetConfig(const std::map<std::string, Parameter>& config, ResponseDesc* /* resp */) override {
+ void SetConfig(const std::map<std::string, Parameter>& config) override {
if (config.empty()) {
THROW_IE_EXCEPTION << "The list of configuration values is empty";
}
<< config.begin()->first;
}
- void GetConfig(const std::string& /* name */, Parameter& /* result */, ResponseDesc* /* resp */) const override {
+ Parameter GetConfig(const std::string& name) const override {
+ (void)name;
THROW_IE_EXCEPTION << "GetConfig for executable network is not supported by this device";
}
- void GetMetric(const std::string& /* name */, Parameter& /* result */, ResponseDesc* /* resp */) const override {
+ Parameter GetMetric(const std::string& name) const override {
+ (void)name;
THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str;
}
- void GetContext(RemoteContext::Ptr& /* pContext */, ResponseDesc* /* resp */) const override {
+ RemoteContext::Ptr GetContext() const override {
THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str;
}
/**
* @brief Creates an asynchronous inference request public implementation.
- * @param asyncRequest The asynchronous request public implementation
+ * @return The asynchronous request public implementation
*/
- void CreateInferRequest(IInferRequest::Ptr& asyncRequest) override {
+ IInferRequest::Ptr CreateInferRequest() override {
+ IInferRequest::Ptr asyncRequest;
auto asyncRequestImpl = this->CreateAsyncInferRequestImpl(_networkInputs, _networkOutputs);
asyncRequestImpl->setPointerToExecutableNetworkInternal(shared_from_this());
+
asyncRequest.reset(new InferRequestBase<AsyncInferRequestInternal>(asyncRequestImpl), [](IInferRequest* p) {
p->Release();
});
- asyncRequestImpl->SetPublicInterfacePtr(asyncRequest);
+ asyncRequestImpl->SetPointerToPublicInterface(asyncRequest);
+ return asyncRequest;
}
protected:
}
/**
- * @brief Given optional implementation of creating asynchnous inference request to avoid
+ * @brief Given optional implementation of creating asynchronous inference request to avoid
* need for it to be implemented by plugin
- * @param asyncRequest shared_ptr for the created asynchnous inference request
+ * @return shared_ptr for the created asynchronous inference request
*/
- void CreateInferRequest(IInferRequest::Ptr& asyncRequest) override {
+ IInferRequest::Ptr CreateInferRequest() override {
+ return CreateAsyncInferRequestFromSync();
+ }
+
+protected:
+ template <typename AsyncInferRequestType = AsyncInferRequestThreadSafeDefault>
+ IInferRequest::Ptr CreateAsyncInferRequestFromSync() {
+ IInferRequest::Ptr asyncRequest;
+
auto syncRequestImpl = this->CreateInferRequestImpl(_networkInputs, _networkOutputs);
syncRequestImpl->setPointerToExecutableNetworkInternal(shared_from_this());
- auto asyncTreadSafeImpl =
- std::make_shared<AsyncInferRequestThreadSafeDefault>(syncRequestImpl, _taskExecutor, _callbackExecutor);
- asyncRequest.reset(new InferRequestBase<AsyncInferRequestThreadSafeDefault>(asyncTreadSafeImpl),
- [](IInferRequest* p) {
- p->Release();
- });
- asyncTreadSafeImpl->SetPointerToPublicInterface(asyncRequest);
- }
- /**
- * @brief Gets the executor.
- * @return The executor.
- */
- ITaskExecutor::Ptr& GetExecutor() {
- return _taskExecutor;
+ auto asyncThreadSafeImpl = std::make_shared<AsyncInferRequestType>(
+ syncRequestImpl, _taskExecutor, _callbackExecutor);
+ asyncRequest.reset(new InferRequestBase<AsyncInferRequestType>(asyncThreadSafeImpl),
+ [](IInferRequest *p) { p->Release(); });
+ asyncThreadSafeImpl->SetPointerToPublicInterface(asyncRequest);
+
+ return asyncRequest;
}
-protected:
/**
- * @brief Create a synchronous inference request object used to infer the network
+ * @brief Creates a synchronous inference request object used to infer the network
* @note Used by ExecutableNetworkThreadSafeDefault::CreateInferRequest as a plugin-specific implementation
* @param networkInputs An input info map needed to create input blobs
* @param networkOutputs An output data map needed to create output blobs
* IInferRequest::CompletionCallback
* @param ptr A weak pointer to InferRequestBase
*/
- void SetPublicInterfacePtr(IInferRequest::Ptr ptr) {
+ void SetPointerToPublicInterface(IInferRequest::Ptr ptr) {
_publicInterface = ptr;
}
}
}
+ void SetBatch(int batch) override {
+ (void)batch;
+ THROW_IE_EXCEPTION << "Dynamic batch is not supported";
+ };
+
/**
* @brief Sets the pointer to executable network internal.
* @note Needed to correctly handle ownership between objects.
}
}
- void SetBatch(int batch) override {
- (void)batch;
- THROW_IE_EXCEPTION << "Dynamic batch is not supported";
- };
+protected:
+ InferenceEngine::InputsDataMap _networkInputs; //!< Holds information about network inputs info
+ InferenceEngine::OutputsDataMap _networkOutputs; //!< Holds information about network outputs data
+ InferenceEngine::BlobMap _inputs; //!< A map of network input blobs
+ InferenceEngine::BlobMap _outputs; //!< A map of network output blobs
+ std::map<std::string, PreProcessDataPtr> _preProcData; //!< A map of pre-process data per input
+ int m_curBatch; //!< Current batch value used in dynamic batching
+
+ /**
+ * @brief A shared pointer to ExecutableNetworkInternal interface
+ * @note Needed to correctly handle ownership between objects.
+ */
+ std::shared_ptr<ExecutableNetworkInternal> _exeNetwork;
/**
* @brief Checks and executes input data pre-processing if needed.
}
}
-protected:
- InferenceEngine::InputsDataMap _networkInputs; //!< Holds information about network inputs info
- InferenceEngine::OutputsDataMap _networkOutputs; //!< Holds information about network outputs data
- InferenceEngine::BlobMap _inputs; //!< A map of network input blobs
- InferenceEngine::BlobMap _outputs; //!< A map of network output blobs
- std::map<std::string, PreProcessDataPtr> _preProcData; //!< A map of pre-process data per input
- int m_curBatch; //!< Current batch value used in dynamic batching
-
- /**
- * @brief A shared pointer to ExecutableNetworkInternal interface
- * @note Needed to correctly handle ownership between objects.
- */
- std::shared_ptr<ExecutableNetworkInternal> _exeNetwork;
-
/**
* @brief Helper function to find input or output blob by name
* @param name A name of input or output blob.
public:
ExecutableNetwork LoadNetwork(const ICNNNetwork& network,
const std::map<std::string, std::string>& config) override {
- return LoadNetworkImplPrivate(network, config);
+ return LoadNetwork(network, config, nullptr);
}
ExecutableNetwork LoadNetwork(const ICNNNetwork& network, const std::map<std::string, std::string>& config,
RemoteContext::Ptr context) override {
- return LoadNetworkImplPrivate(network, config, context);;
+ InputsDataMap networkInputs, networkInputsCloned;
+ OutputsDataMap networkOutputs, networkOutputsCloned;
+ network.getInputsInfo(networkInputs);
+ network.getOutputsInfo(networkOutputs);
+ copyInputOutputInfo(networkInputs, networkOutputs, networkInputsCloned, networkOutputsCloned);
+
+ ExecutableNetworkInternal::Ptr impl;
+ if (nullptr == context) {
+ impl = LoadExeNetworkImpl(network, config);
+ } else {
+ impl = LoadExeNetworkImpl(network, context, config);
+ }
+
+ impl->setNetworkInputs(networkInputsCloned);
+ impl->setNetworkOutputs(networkOutputsCloned);
+ impl->SetPointerToPlugin(shared_from_this());
+
+ auto executableNetwork = make_executable_network(impl);
+ return ExecutableNetwork(executableNetwork);
}
- IExecutableNetwork::Ptr ImportNetwork(const std::string& modelFileName,
- const std::map<std::string, std::string>& config) override {
+ ExecutableNetwork ImportNetwork(const std::string& modelFileName,
+ const std::map<std::string, std::string>& config) override {
(void)modelFileName;
(void)config;
THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str;
THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str;
}
- void QueryNetwork(const ICNNNetwork& /*network*/, const std::map<std::string, std::string>& /*config*/,
- QueryNetworkResult& /*res*/) const override {
+ QueryNetworkResult QueryNetwork(const ICNNNetwork& /*network*/, const std::map<std::string, std::string>& /*config*/) const override {
THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str;
}
THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str;
}
-private:
- /**
- * @brief A helper method which clones a ICNNNetwork object, keeps InputsDataMap and OutputsDataMap data maps,
- * and creates an IExecutableNetwork object
- * @param network An input ICNNNetwork object used to create an executable network object
- * @param config A map of string -> string configuration options.
- * @param context An optional pointer to RemoteContext
- * @return An output executable network object
- */
- ExecutableNetwork LoadNetworkImplPrivate(const ICNNNetwork& network,
- const std::map<std::string, std::string>& config,
- RemoteContext::Ptr context = nullptr) {
- InputsDataMap networkInputs, networkInputsCloned;
- OutputsDataMap networkOutputs, networkOutputsCloned;
- network.getInputsInfo(networkInputs);
- network.getOutputsInfo(networkOutputs);
- copyInputOutputInfo(networkInputs, networkOutputs, networkInputsCloned, networkOutputsCloned);
-
- ExecutableNetworkInternal::Ptr impl;
- if (nullptr == context) {
- impl = LoadExeNetworkImpl(network, config);
- } else {
- impl = LoadExeNetworkImpl(network, context, config);
- }
-
- impl->setNetworkInputs(networkInputsCloned);
- impl->setNetworkOutputs(networkOutputsCloned);
- impl->SetPointerToPlugin(shared_from_this());
-
- auto executableNetwork = make_executable_network(impl);
- return ExecutableNetwork(executableNetwork);
- }
-
protected:
/**
* @brief Creates an executable network from a parsed network object, users can create as many networks as they need
/**
* @brief Create an inference request object used to infer the network
* Note: the returned request will have allocated input and output blobs (that can be changed later)
- * @param req - shared_ptr for the created request
+ * @return shared_ptr for the created request
*/
- virtual void CreateInferRequest(IInferRequest::Ptr& req) = 0;
+ virtual IInferRequest::Ptr CreateInferRequest() = 0;
/**
+ * @deprecated Use IExecutableNetworkInternal::Export(std::ostream& networkModel)
* @brief Export the current created executable network so it can be used later in the Import() main API
* @param modelFileName - path to the location of the exported file
*/
/**
* @brief Get executable graph information from a device
- * @param graphPtr network ptr to store executable graph information
+ * @return A network object to store executable graph information
*/
- virtual void GetExecGraphInfo(ICNNNetwork::Ptr& graphPtr) = 0;
+ virtual CNNNetwork GetExecGraphInfo() = 0;
/**
+ * @deprecated Need to implement GetVariablesInfo for ExecutableNetwork
* @brief Queries memory states.
* @return Returns memory states
*/
/**
* @brief Sets configuration for current executable network
* @param config Map of pairs: (config parameter name, config parameter value)
- * @param resp Pointer to the response message that holds a description of an error if any occurred
*/
- virtual void SetConfig(const std::map<std::string, Parameter>& config, ResponseDesc* resp) = 0;
+ virtual void SetConfig(const std::map<std::string, Parameter>& config) = 0;
/**
* @brief Gets configuration dedicated to plugin behaviour
- * @param name - config key, can be found in ie_plugin_config.hpp
- * @param result - value of config corresponding to config key
- * @param resp Pointer to the response message that holds a description of an error if any occurred
+ * @param name A config key, can be found in ie_plugin_config.hpp
+ * @return A value of config corresponding to config key
*/
- virtual void GetConfig(const std::string& name, Parameter& result, ResponseDesc* resp) const = 0;
+ virtual Parameter GetConfig(const std::string& name) const = 0;
/**
* @brief Gets general runtime metric for dedicated hardware
- * @param name - metric name to request
- * @param result - metric value corresponding to metric key
- * @param resp - Pointer to the response message that holds a description of an error if any
- * occurred
+ * @param name A metric name to request
+ * @return A metric value corresponding to metric key
*/
- virtual void GetMetric(const std::string& name, Parameter& result, ResponseDesc* resp) const = 0;
+ virtual Parameter GetMetric(const std::string& name) const = 0;
/**
* @brief Gets the remote context.
- * @param pContext A reference to a context
- * @param resp A response
+ * @return A reference to a context
*/
- virtual void GetContext(RemoteContext::Ptr& pContext, ResponseDesc* resp) const = 0;
+ virtual RemoteContext::Ptr GetContext() const = 0;
};
} // namespace InferenceEngine
virtual RemoteContext::Ptr GetDefaultContext() = 0;
/**
+ * @deprecated Use ImportNetwork(std::istream& networkModel, const std::map<std::string, std::string>& config)
* @brief Creates an executable network from an previously exported network
* @param modelFileName - path to the location of the exported file
* @param config A string -> string map of parameters
- * @return A reference to a shared ptr of the returned network interface
+ * @return An Executable network
*/
- virtual IExecutableNetwork::Ptr ImportNetwork(const std::string& modelFileName,
- const std::map<std::string, std::string>& config) = 0;
+ virtual ExecutableNetwork ImportNetwork(const std::string& modelFileName,
+ const std::map<std::string, std::string>& config) = 0;
/**
* @brief Creates an executable network from an previously exported network using plugin implementation
* @brief Creates an executable network from an previously exported network using plugin implementation
* and removes Inference Engine magic and plugin name
* @param networkModel Reference to network model output stream
- * @param context - a pointer to plugin context derived from RemoteContext class used to
+ * @param context A pointer to plugin context derived from RemoteContext class used to
* execute the network
* @param config A string -> string map of parameters
* @return An Executable network
* @brief Queries a plugin about supported layers in network
* @param[in] network The network object to query
* @param[in] config The map of configuration parameters
- * @param res The result of query operator containing supported layers map
+ * @return The result of query operator containing supported layers map
*/
- virtual void QueryNetwork(const ICNNNetwork& network, const std::map<std::string, std::string>& config,
- QueryNetworkResult& res) const = 0;
+ virtual QueryNetworkResult QueryNetwork(const ICNNNetwork& network, const std::map<std::string, std::string>& config) const = 0;
};
} // namespace InferenceEngine
/**
* @brief A namespace with const values for Execution Graph parameters names.
*
- * Executable Graph Info is represented in ICNNNetwork format with general CNNLayer nodes inside
+ * Executable Graph Info is represented in CNNNetwork format with general ExecutionNode nodes inside
* including connections between the nodes. Each node describes an executable hardware-specific
- * primitive and stores its parameters within CNNLayer::params map.
+ * primitive and stores its parameters within ExecutionNode::get_rt_info map.
* There is a list of general keys for the parameters map.
*/
namespace ExecGraphInfoSerialization {
auto meanSegmentPrecision = GetPrecisionAttr(ppNode, "mean-precision", Precision::UNSPECIFIED);
- ResponseDesc resp;
InferenceEngine::PreProcessChannel::Ptr preProcessChannel;
int lastChanNo = -1;
#pragma once
#include <vpu/utils/perf_report.hpp>
-#include <ie_icnn_network.hpp>
+#include <cpp/ie_cnn_network.h>
#include <vector>
namespace vpu {
-InferenceEngine::ICNNNetwork::Ptr buildRuntimeGraphAsIeNet(
+InferenceEngine::CNNNetwork buildRuntimeGraphAsIeNet(
GraphMetaInfo& graphMetaInfo,
const std::vector<float>& perfInfo);
-InferenceEngine::ICNNNetwork::Ptr buildRuntimeGraph(
+InferenceEngine::CNNNetwork buildRuntimeGraph(
GraphMetaInfo& graphMetaInfo,
const std::vector<float>& perfInfo);
} // namespace
-InferenceEngine::ICNNNetwork::Ptr buildRuntimeGraph(GraphMetaInfo& graphMetaInfo, const std::vector<float>& perfInfo) {
+InferenceEngine::CNNNetwork buildRuntimeGraph(GraphMetaInfo& graphMetaInfo, const std::vector<float>& perfInfo) {
std::map<size_t, std::shared_ptr<ngraph::Node>> stageMetaIndexToNode;
std::function<void(size_t)> createNodeFromMeta;
return net;
}
-InferenceEngine::ICNNNetwork::Ptr buildRuntimeGraphAsIeNet(GraphMetaInfo& graphMetaInfo, const std::vector<float>& perfInfo) {
+InferenceEngine::CNNNetwork buildRuntimeGraphAsIeNet(GraphMetaInfo& graphMetaInfo, const std::vector<float>& perfInfo) {
auto net = std::make_shared<InferenceEngine::details::CNNNetworkImpl>();
net->setName(graphMetaInfo.graphName);
net->setInputInfo(inputInfo);
}
- return net;
+ return InferenceEngine::CNNNetwork{net};
}
namespace {
Import(blobFile, devicePool, config);
}
-void ExecutableNetwork::GetMetric(const std::string &name, Parameter &result, ResponseDesc *resp) const {
+InferenceEngine::Parameter ExecutableNetwork::GetMetric(const std::string &name) const {
if (name == METRIC_KEY(NETWORK_NAME)) {
- result = IE_SET_METRIC(NETWORK_NAME, _graphDesc._name);
+ IE_SET_METRIC_RETURN(NETWORK_NAME, _graphDesc._name);
} else if (name == METRIC_KEY(SUPPORTED_METRICS)) {
- result = IE_SET_METRIC(SUPPORTED_METRICS, _supportedMetrics);
+ IE_SET_METRIC_RETURN(SUPPORTED_METRICS, _supportedMetrics);
} else if (name == METRIC_KEY(SUPPORTED_CONFIG_KEYS)) {
- result = IE_SET_METRIC(SUPPORTED_CONFIG_KEYS, std::vector<std::string>());
+ IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, std::vector<std::string>());
} else if (name == METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)) {
- result = IE_SET_METRIC(OPTIMAL_NUMBER_OF_INFER_REQUESTS, static_cast<unsigned int>(2u * _actualNumExecutors));
+ IE_SET_METRIC_RETURN(OPTIMAL_NUMBER_OF_INFER_REQUESTS, static_cast<unsigned int>(2u * _actualNumExecutors));
} else if (name == METRIC_KEY(DEVICE_THERMAL)) {
- result = IE_SET_METRIC(DEVICE_THERMAL, _executor->GetThermal(_device));
+ IE_SET_METRIC_RETURN(DEVICE_THERMAL, _executor->GetThermal(_device));
} else {
THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str;
}
}
-void ExecutableNetwork::GetExecGraphInfo(InferenceEngine::ICNNNetwork::Ptr& graphPtr) {
+InferenceEngine::CNNNetwork ExecutableNetwork::GetExecGraphInfo() {
auto perfInfo = _executor->getPerfTimeInfo(_graphDesc._graphHandle);
- graphPtr = buildRuntimeGraph(_graphMetaData, perfInfo);
+ return buildRuntimeGraph(_graphMetaData, perfInfo);
}
} // namespace MyriadPlugin
_graphMetaData.stagesMeta, _config, _log, _executor);
}
- void CreateInferRequest(InferenceEngine::IInferRequest::Ptr &asyncRequest) override {
+ InferenceEngine::IInferRequest::Ptr CreateInferRequest() override {
+ InferenceEngine::IInferRequest::Ptr asyncRequest;
if (_device == nullptr || !_device->isBooted()) {
THROW_IE_EXCEPTION << "Can not create infer request: there is no available devices with platform "
<< _device->_platform;
_executor);
syncRequestImpl->setPointerToExecutableNetworkInternal(shared_from_this());
auto taskExecutorGetResult = getNextTaskExecutor();
- auto asyncTreadSafeImpl = std::make_shared<MyriadAsyncInferRequest>(
+ auto asyncThreadSafeImpl = std::make_shared<MyriadAsyncInferRequest>(
syncRequestImpl, _taskExecutor, _callbackExecutor, taskExecutorGetResult);
asyncRequest.reset(new InferenceEngine::InferRequestBase<InferenceEngine::AsyncInferRequestThreadSafeDefault>(
- asyncTreadSafeImpl),
+ asyncThreadSafeImpl),
[](InferenceEngine::IInferRequest *p) { p->Release(); });
- asyncTreadSafeImpl->SetPointerToPublicInterface(asyncRequest);
+ asyncThreadSafeImpl->SetPointerToPublicInterface(asyncRequest);
+ return asyncRequest;
}
void Export(std::ostream& model) override {
}
}
- void GetMetric(const std::string &name, InferenceEngine::Parameter &result, InferenceEngine::ResponseDesc *resp) const override;
+ InferenceEngine::Parameter GetMetric(const std::string &name) const override;
- void GetExecGraphInfo(InferenceEngine::ICNNNetwork::Ptr &graphPtr) override;
+ InferenceEngine::CNNNetwork GetExecGraphInfo() override;
void Import(std::istream& strm,
std::vector<DevicePtr> &devicePool,
return result;
}
-void Engine::QueryNetwork(
+QueryNetworkResult Engine::QueryNetwork(
const ICNNNetwork& network,
- const std::map<std::string, std::string>& config,
- QueryNetworkResult& res) const {
+ const std::map<std::string, std::string>& config) const {
VPU_PROFILE(QueryNetwork);
+ QueryNetworkResult res;
auto parsedConfigCopy = _parsedConfig;
parsedConfigCopy.update(config);
res.supportedLayersMap.insert({ layerName, GetName() });
}
}
+
+ return res;
}
Engine::Engine(std::shared_ptr<IMvnc> mvnc) :
return make_executable_network(executableNetwork);
}
-IExecutableNetwork::Ptr Engine::ImportNetwork(
+InferenceEngine::ExecutableNetwork Engine::ImportNetwork(
const std::string& modelFileName,
const std::map<std::string, std::string>& config) {
VPU_PROFILE(ImportNetwork);
const ie::ICNNNetwork& network,
const std::map<std::string, std::string>& config) override;
- void QueryNetwork(
+ ie::QueryNetworkResult QueryNetwork(
const ie::ICNNNetwork& network,
- const std::map<std::string, std::string>& config,
- ie::QueryNetworkResult& res) const override;
+ const std::map<std::string, std::string>& config) const override;
using ie::InferencePluginInternal::ImportNetwork;
- ie::IExecutableNetwork::Ptr ImportNetwork(
+ ie::ExecutableNetwork ImportNetwork(
const std::string& modelFileName,
const std::map<std::string, std::string>& config) override;
#include <vector>
#include "ie_input_info.hpp"
-#include "ie_icnn_network.hpp"
+#include "cpp/ie_cnn_network.h"
#include "ie_iexecutable_network.hpp"
#include <cpp_interfaces/impl/ie_executable_network_internal.hpp>
public:
MOCK_METHOD1(setNetworkInputs, void(InputsDataMap));
MOCK_METHOD1(setNetworkOutputs, void(OutputsDataMap));
- MOCK_METHOD1(CreateInferRequest, void(IInferRequest::Ptr &));
+ MOCK_METHOD0(CreateInferRequest, IInferRequest::Ptr(void));
MOCK_METHOD1(Export, void(const std::string &));
- MOCK_METHOD1(GetExecGraphInfo, void(ICNNNetwork::Ptr &));
+ MOCK_METHOD0(GetExecGraphInfo, CNNNetwork(void));
void WrapOstreamExport(std::ostream& networkModel) {
ExecutableNetworkInternal::Export(networkModel);
}
class MockIExecutableNetworkInternal : public IExecutableNetworkInternal {
public:
- MOCK_CONST_METHOD0(GetOutputsInfo, ConstOutputsDataMap());
- MOCK_CONST_METHOD0(GetInputsInfo, ConstInputsDataMap());
- MOCK_METHOD1(CreateInferRequest, void(IInferRequest::Ptr &));
+ MOCK_CONST_METHOD0(GetOutputsInfo, ConstOutputsDataMap(void));
+ MOCK_CONST_METHOD0(GetInputsInfo, ConstInputsDataMap(void));
+ MOCK_METHOD0(CreateInferRequest, IInferRequest::Ptr(void));
MOCK_METHOD1(Export, void(const std::string &));
void Export(std::ostream &) override {};
- MOCK_METHOD0(QueryState, std::vector<IMemoryStateInternal::Ptr>());
- MOCK_METHOD1(GetExecGraphInfo, void(ICNNNetwork::Ptr &));
+ MOCK_METHOD0(QueryState, std::vector<IMemoryStateInternal::Ptr>(void));
+ MOCK_METHOD0(GetExecGraphInfo, CNNNetwork(void));
- MOCK_METHOD2(SetConfig, void(const std::map<std::string, Parameter> &config, ResponseDesc *resp));
- MOCK_CONST_METHOD3(GetConfig, void(const std::string &name, Parameter &result, ResponseDesc *resp));
- MOCK_CONST_METHOD3(GetMetric, void(const std::string &name, Parameter &result, ResponseDesc *resp));
- MOCK_CONST_METHOD2(GetContext, void(RemoteContext::Ptr &pContext, ResponseDesc *resp));
+ MOCK_METHOD1(SetConfig, void(const std::map<std::string, Parameter> &config));
+ MOCK_CONST_METHOD1(GetConfig, Parameter(const std::string &name));
+ MOCK_CONST_METHOD1(GetMetric, Parameter(const std::string &name));
+ MOCK_CONST_METHOD0(GetContext, RemoteContext::Ptr(void));
};
class MockIInferencePlugin : public InferenceEngine::IInferencePlugin {
public:
MOCK_METHOD1(AddExtension, void(InferenceEngine::IExtensionPtr));
- MOCK_METHOD3(LoadNetwork, void(IExecutableNetwork::Ptr&,
+ MOCK_METHOD2(LoadNetwork, InferenceEngine::ExecutableNetwork(
const ICNNNetwork&, const std::map<std::string, std::string>&));
- MOCK_METHOD2(ImportNetwork, IExecutableNetwork::Ptr(
+ MOCK_METHOD2(ImportNetwork, InferenceEngine::ExecutableNetwork(
const std::string&, const std::map<std::string, std::string>&));
MOCK_METHOD1(SetConfig, void(const std::map<std::string, std::string> &));
// CreateInferRequest
TEST_F(ExecutableNetworkBaseTests, canForwardCreateInferRequest) {
IInferRequest::Ptr req;
- EXPECT_CALL(*mock_impl.get(), CreateInferRequest(Ref(req))).Times(1);
+ EXPECT_CALL(*mock_impl.get(), CreateInferRequest()).Times(1).WillRepeatedly(Return(req));
ASSERT_EQ(OK, exeNetwork->CreateInferRequest(req, &dsc));
}
TEST_F(ExecutableNetworkBaseTests, canReportErrorInCreateInferRequest) {
- EXPECT_CALL(*mock_impl.get(), CreateInferRequest(_)).WillOnce(Throw(std::runtime_error("compare")));
+ EXPECT_CALL(*mock_impl.get(), CreateInferRequest()).WillOnce(Throw(std::runtime_error("compare")));
IInferRequest::Ptr req;
- ASSERT_NE(exeNetwork->CreateInferRequest(req, &dsc), OK);
+ ASSERT_NE(OK, exeNetwork->CreateInferRequest(req, &dsc));
ASSERT_STREQ(dsc.msg, "compare");
}
TEST_F(ExecutableNetworkBaseTests, canCatchUnknownErrorInCreateInferRequest) {
- EXPECT_CALL(*mock_impl.get(), CreateInferRequest(_)).WillOnce(Throw(5));
+ EXPECT_CALL(*mock_impl.get(), CreateInferRequest()).WillOnce(Throw(5));
IInferRequest::Ptr req;
ASSERT_EQ(UNEXPECTED, exeNetwork->CreateInferRequest(req, nullptr));
}
graph.CreateGraph(net, extMgr, cache);
auto dump_net = dump_graph_as_ie_net(graph);
- auto layers = details::CNNNetSortTopologically(*dump_net);
+ auto layers = details::CNNNetSortTopologically(dump_net);
ASSERT_EQ(layers.size(), 4);
ASSERT_EQ(layers[0]->type, "Input");
InferenceEngine::OutputsDataMap _networkOutputs = network.getOutputsInfo();
execNetwork->setNetworkInputs(_networkInputs);
execNetwork->setNetworkOutputs(_networkOutputs);
- InferenceEngine::IInferRequest::Ptr inferRequest;
- execNetwork->CreateInferRequest(inferRequest);
+ InferenceEngine::IInferRequest::Ptr inferRequest = execNetwork->CreateInferRequest();
InferenceEngine::TensorDesc desc(InferenceEngine::Precision::FP32, {1, 3, 2, 2}, InferenceEngine::NCHW);
InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float>(desc);
InferenceEngine::OutputsDataMap _networkOutputs = network.getOutputsInfo();
execNetwork->setNetworkInputs(_networkInputs);
execNetwork->setNetworkOutputs(_networkOutputs);
- InferenceEngine::IInferRequest::Ptr inferRequest;
- execNetwork->CreateInferRequest(inferRequest);
+ InferenceEngine::IInferRequest::Ptr inferRequest = execNetwork->CreateInferRequest();
InferenceEngine::TensorDesc desc(InferenceEngine::Precision::FP32, {1, 3, 224, 224}, InferenceEngine::NCHW);
InferenceEngine::Blob::Ptr src = InferenceEngine::make_shared_blob<float>(desc);
InferenceEngine::OutputsDataMap _networkOutputs = network.getOutputsInfo();
execNetwork->setNetworkInputs(_networkInputs);
execNetwork->setNetworkOutputs(_networkOutputs);
- InferenceEngine::IInferRequest::Ptr inferRequest;
- execNetwork->CreateInferRequest(inferRequest);
+ InferenceEngine::IInferRequest::Ptr inferRequest = execNetwork->CreateInferRequest();
InferenceEngine::TensorDesc desc1(InferenceEngine::Precision::FP32, {1, 3, 20, 20}, InferenceEngine::NCHW);
InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob<float>(desc1);
InferenceEngine::OutputsDataMap _networkOutputs = network.getOutputsInfo();
execNetwork->setNetworkInputs(_networkInputs);
execNetwork->setNetworkOutputs(_networkOutputs);
- InferenceEngine::IInferRequest::Ptr inferRequest;
- execNetwork->CreateInferRequest(inferRequest);
+ InferenceEngine::IInferRequest::Ptr inferRequest = execNetwork->CreateInferRequest();
InferenceEngine::TensorDesc desc1(InferenceEngine::Precision::FP32, {1, 3, 2, 2}, InferenceEngine::NCHW);
InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob<float>(desc1);
InferenceEngine::OutputsDataMap _networkOutputs = network.getOutputsInfo();
execNetwork->setNetworkInputs(_networkInputs);
execNetwork->setNetworkOutputs(_networkOutputs);
- InferenceEngine::IInferRequest::Ptr inferRequest;
- execNetwork->CreateInferRequest(inferRequest);
+ InferenceEngine::IInferRequest::Ptr inferRequest = execNetwork->CreateInferRequest();
InferenceEngine::TensorDesc desc1(InferenceEngine::Precision::FP32, {1, 3, 20, 20}, InferenceEngine::NCHW);
InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob<float>(desc1);
InferenceEngine::OutputsDataMap _networkOutputs = network.getOutputsInfo();
execNetwork->setNetworkInputs(_networkInputs);
execNetwork->setNetworkOutputs(_networkOutputs);
- InferenceEngine::IInferRequest::Ptr inferRequest;
- execNetwork->CreateInferRequest(inferRequest);
+ InferenceEngine::IInferRequest::Ptr inferRequest = execNetwork->CreateInferRequest();
InferenceEngine::TensorDesc desc1(InferenceEngine::Precision::FP32, {1, 3, 20, 20}, InferenceEngine::NCHW);
InferenceEngine::Blob::Ptr src1 = InferenceEngine::make_shared_blob<float>(desc1);