InfEngineBackendNet::InfEngineBackendNet() : netBuilder("")
{
hasNetOwner = false;
- targetDevice = InferenceEngine::TargetDevice::eCPU;
+ device_name = "CPU";
}
InfEngineBackendNet::InfEngineBackendNet(InferenceEngine::CNNNetwork& net) : netBuilder(""), cnn(net)
{
hasNetOwner = true;
- targetDevice = InferenceEngine::TargetDevice::eCPU;
+ device_name = "CPU";
}
void InfEngineBackendNet::connect(const std::vector<Ptr<BackendWrapper> >& inputs,
for (size_t i = 0; i < inpWrappers.size(); ++i)
{
const auto& inp = inpWrappers[i];
- const std::string& inpName = inp->dataPtr->name;
+ const std::string& inpName = inp->dataPtr->getName();
int inpId;
it = layers.find(inpName);
if (it == layers.end())
{
InferenceEngine::Builder::InputLayer inpLayer(!inpName.empty() ? inpName : kDefaultInpLayerName);
-
- std::vector<size_t> shape(inp->blob->dims());
- std::reverse(shape.begin(), shape.end());
-
+ std::vector<size_t> shape(inp->blob->getTensorDesc().getDims());
inpLayer.setPort(InferenceEngine::Port(shape));
inpId = netBuilder.addLayer(inpLayer);
}
CV_Assert(!outputs.empty());
InferenceEngine::DataPtr dataPtr = infEngineDataNode(outputs[0]);
+#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
dataPtr->name = layerName;
+#else
+ dataPtr->setName(layerName);
+#endif
}
void InfEngineBackendNet::init(int targetId)
switch (targetId)
{
- case DNN_TARGET_CPU:
- targetDevice = InferenceEngine::TargetDevice::eCPU;
- break;
- case DNN_TARGET_OPENCL: case DNN_TARGET_OPENCL_FP16:
- targetDevice = InferenceEngine::TargetDevice::eGPU;
- break;
- case DNN_TARGET_MYRIAD:
- targetDevice = InferenceEngine::TargetDevice::eMYRIAD;
- break;
- case DNN_TARGET_FPGA:
- targetDevice = InferenceEngine::TargetDevice::eFPGA;
- break;
- default:
- CV_Error(Error::StsError, format("Unknown target identifier: %d", targetId));
- }
+ case DNN_TARGET_CPU:
+ device_name = "CPU";
+ break;
+ case DNN_TARGET_OPENCL:
+ case DNN_TARGET_OPENCL_FP16:
+ device_name = "GPU";
+ break;
+ case DNN_TARGET_MYRIAD:
+ device_name = "MYRIAD";
+ break;
+ case DNN_TARGET_FPGA:
+ device_name = "FPGA";
+ break;
+ default:
+ CV_Error(Error::StsNotImplemented, "Unknown target");
+ };
for (const auto& name : requestedOutputs)
{
const std::string& name = it.first;
auto blobIt = allBlobs.find(name);
CV_Assert(blobIt != allBlobs.end());
- it.second->setPrecision(blobIt->second->precision());
+ it.second->setPrecision(blobIt->second->getTensorDesc().getPrecision());
}
for (const auto& it : cnn.getOutputsInfo())
{
const std::string& name = it.first;
auto blobIt = allBlobs.find(name);
CV_Assert(blobIt != allBlobs.end());
- it.second->setPrecision(blobIt->second->precision()); // Should be always FP32
+ it.second->setPrecision(blobIt->second->getTensorDesc().getPrecision()); // Should be always FP32
}
initPlugin(cnn);
static InferenceEngine::DataPtr wrapToInfEngineDataNode(const Mat& m, const std::string& name = "")
{
- std::vector<size_t> reversedShape(&m.size[0], &m.size[0] + m.dims);
- std::reverse(reversedShape.begin(), reversedShape.end());
+ std::vector<size_t> shape(&m.size[0], &m.size[0] + m.dims);
if (m.type() == CV_32F)
- return InferenceEngine::DataPtr(
- new InferenceEngine::Data(name, reversedShape, InferenceEngine::Precision::FP32, estimateLayout(m))
- );
+ return InferenceEngine::DataPtr(new InferenceEngine::Data(name,
+ {InferenceEngine::Precision::FP32, shape, estimateLayout(m)}));
else if (m.type() == CV_8U)
- return InferenceEngine::DataPtr(
- new InferenceEngine::Data(name, reversedShape, InferenceEngine::Precision::U8, estimateLayout(m))
- );
+ return InferenceEngine::DataPtr(new InferenceEngine::Data(name,
+ {InferenceEngine::Precision::U8, shape, estimateLayout(m)}));
else
CV_Error(Error::StsNotImplemented, format("Unsupported data type %d", m.type()));
}
InferenceEngine::Layout layout)
{
if (m.type() == CV_32F)
- return InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32,
- layout, shape, (float*)m.data);
+ return InferenceEngine::make_shared_blob<float>(
+ {InferenceEngine::Precision::FP32, shape, layout}, (float*)m.data);
else if (m.type() == CV_8U)
- return InferenceEngine::make_shared_blob<uint8_t>(InferenceEngine::Precision::U8,
- layout, shape, (uint8_t*)m.data);
+ return InferenceEngine::make_shared_blob<uint8_t>(
+ {InferenceEngine::Precision::U8, shape, layout}, (uint8_t*)m.data);
else
CV_Error(Error::StsNotImplemented, format("Unsupported data type %d", m.type()));
}
InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, InferenceEngine::Layout layout)
{
- std::vector<size_t> reversedShape(&m.size[0], &m.size[0] + m.dims);
- std::reverse(reversedShape.begin(), reversedShape.end());
- return wrapToInfEngineBlob(m, reversedShape, layout);
+ std::vector<size_t> shape(&m.size[0], &m.size[0] + m.dims);
+ return wrapToInfEngineBlob(m, shape, layout);
}
InferenceEngine::Blob::Ptr cloneBlob(const InferenceEngine::Blob::Ptr& blob)
{
- InferenceEngine::Precision precision = blob->precision();
InferenceEngine::Blob::Ptr copy;
+ auto description = blob->getTensorDesc();
+ InferenceEngine::Precision precision = description.getPrecision();
if (precision == InferenceEngine::Precision::FP32)
{
- copy = InferenceEngine::make_shared_blob<float>(precision, blob->layout(), blob->dims());
+ copy = InferenceEngine::make_shared_blob<float>(description);
}
else if (precision == InferenceEngine::Precision::U8)
{
- copy = InferenceEngine::make_shared_blob<uint8_t>(precision, blob->layout(), blob->dims());
+ copy = InferenceEngine::make_shared_blob<uint8_t>(description);
}
else
CV_Error(Error::StsNotImplemented, "Unsupported blob precision");
Ptr<InfEngineBackendWrapper> ieWrapper = wrapper.dynamicCast<InfEngineBackendWrapper>();
CV_Assert(!ieWrapper.empty());
InferenceEngine::DataPtr srcData = ieWrapper->dataPtr;
- dataPtr = InferenceEngine::DataPtr(
- new InferenceEngine::Data(srcData->name, srcData->dims, srcData->precision,
- srcData->layout)
- );
+
+ dataPtr = InferenceEngine::DataPtr(new InferenceEngine::Data(srcData->getName(), srcData->getTensorDesc()));
blob = ieWrapper->blob;
}
}
-static std::map<InferenceEngine::TargetDevice, InferenceEngine::InferenceEnginePluginPtr>& getSharedPlugins()
+#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
+static std::map<std::string, InferenceEngine::InferenceEnginePluginPtr>& getSharedPlugins()
{
- static std::map<InferenceEngine::TargetDevice, InferenceEngine::InferenceEnginePluginPtr> sharedPlugins;
+ static std::map<std::string, InferenceEngine::InferenceEnginePluginPtr> sharedPlugins;
return sharedPlugins;
}
-
+#else
+static InferenceEngine::Core& getCore()
+{
+ static InferenceEngine::Core core;
+ return core;
+}
+#endif
#if !defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT)
static bool detectMyriadX_()
InferenceEngine::CNNNetwork cnn = InferenceEngine::CNNNetwork(
InferenceEngine::Builder::convertToICNNNetwork(builder.build()));
- InferenceEngine::TargetDevice device = InferenceEngine::TargetDevice::eMYRIAD;
+#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
InferenceEngine::InferenceEnginePluginPtr enginePtr;
{
AutoLock lock(getInitializationMutex());
auto& sharedPlugins = getSharedPlugins();
- auto pluginIt = sharedPlugins.find(device);
+ auto pluginIt = sharedPlugins.find("MYRIAD");
if (pluginIt != sharedPlugins.end()) {
enginePtr = pluginIt->second;
} else {
auto dispatcher = InferenceEngine::PluginDispatcher({""});
- enginePtr = dispatcher.getSuitablePlugin(device);
- sharedPlugins[device] = enginePtr;
+ enginePtr = dispatcher.getPluginByDevice("MYRIAD");
+ sharedPlugins["MYRIAD"] = enginePtr;
}
}
auto plugin = InferenceEngine::InferencePlugin(enginePtr);
try
{
auto netExec = plugin.LoadNetwork(cnn, {{"VPU_PLATFORM", "VPU_2480"}});
+#else
+ try
+ {
+ auto netExec = getCore().LoadNetwork(cnn, "MYRIAD", {{"VPU_PLATFORM", "VPU_2480"}});
+#endif
auto infRequest = netExec.CreateInferRequest();
} catch(...) {
return false;
}
#endif // !defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT)
-void InfEngineBackendNet::initPlugin(InferenceEngine::ICNNNetwork& net)
+void InfEngineBackendNet::initPlugin(InferenceEngine::CNNNetwork& net)
{
CV_Assert(!isInitialized());
try
{
AutoLock lock(getInitializationMutex());
+#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
auto& sharedPlugins = getSharedPlugins();
- auto pluginIt = sharedPlugins.find(targetDevice);
+ auto pluginIt = sharedPlugins.find(device_name);
if (pluginIt != sharedPlugins.end())
{
enginePtr = pluginIt->second;
}
else
+#endif
{
+#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
auto dispatcher = InferenceEngine::PluginDispatcher({""});
- if (targetDevice == InferenceEngine::TargetDevice::eFPGA)
+ if (device_name == "FPGA")
enginePtr = dispatcher.getPluginByDevice("HETERO:FPGA,CPU");
else
- enginePtr = dispatcher.getSuitablePlugin(targetDevice);
- sharedPlugins[targetDevice] = enginePtr;
-
+ enginePtr = dispatcher.getPluginByDevice(device_name);
+ sharedPlugins[device_name] = enginePtr;
+#else
+ isInit = true;
+#endif
std::vector<std::string> candidates;
-
std::string param_pluginPath = utils::getConfigurationParameterString("OPENCV_DNN_IE_EXTRA_PLUGIN_PATH", "");
if (!param_pluginPath.empty())
{
candidates.push_back(param_pluginPath);
}
- if (targetDevice == InferenceEngine::TargetDevice::eCPU ||
- targetDevice == InferenceEngine::TargetDevice::eFPGA)
+ if (device_name == "CPU" || device_name == "FPGA")
{
std::string suffixes[] = {"_avx2", "_sse4", ""};
bool haveFeature[] = {
{
InferenceEngine::IExtensionPtr extension =
InferenceEngine::make_so_pointer<InferenceEngine::IExtension>(libName);
+
+#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
enginePtr->AddExtension(extension, 0);
+#else
+ getCore().AddExtension(extension, "CPU");
+#endif
CV_LOG_INFO(NULL, "DNN-IE: Loaded extension plugin: " << libName);
found = true;
break;
// Some of networks can work without a library of extra layers.
#ifndef _WIN32
// Limit the number of CPU threads.
+#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
enginePtr->SetConfig({{
InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, format("%d", getNumThreads()),
}}, 0);
+#else
+ if (device_name == "CPU")
+ getCore().SetConfig({{
+ InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, format("%d", getNumThreads()),
+ }}, device_name);
+#endif
#endif
}
+#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
plugin = InferenceEngine::InferencePlugin(enginePtr);
-
netExec = plugin.LoadNetwork(net, {});
+#else
+ netExec = getCore().LoadNetwork(net, device_name);
+#endif
}
catch (const std::exception& ex)
{
bool InfEngineBackendNet::isInitialized()
{
+#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
return (bool)enginePtr;
+#else
+ return isInit;
+#endif
}
void InfEngineBackendNet::addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& ptrs)
auto wrappers = infEngineWrappers(ptrs);
for (const auto& wrapper : wrappers)
{
- std::string name = wrapper->dataPtr->name;
+ std::string name = wrapper->dataPtr->getName();
name = name.empty() ? kDefaultInpLayerName : name;
allBlobs.insert({name, wrapper->blob});
}
for (int i = 0; i < outs.size(); ++i)
{
outs[i]->futureMat = outProms[i].getArrayResult();
- outsNames[i] = outs[i]->dataPtr->name;
+ outsNames[i] = outs[i]->dataPtr->getName();
}
}
Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob)
{
// NOTE: Inference Engine sizes are reversed.
- std::vector<size_t> dims = blob->dims();
- std::vector<int> size(dims.rbegin(), dims.rend());
+ std::vector<size_t> dims = blob->getTensorDesc().getDims();
+ std::vector<int> size(dims.begin(), dims.end());
+ auto precision = blob->getTensorDesc().getPrecision();
int type = -1;
- switch (blob->precision())
+ switch (precision)
{
case InferenceEngine::Precision::FP32: type = CV_32F; break;
case InferenceEngine::Precision::U8: type = CV_8U; break;
InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob)
{
- auto halfs = InferenceEngine::make_shared_blob<int16_t>(InferenceEngine::Precision::FP16, blob->layout(), blob->dims());
+ auto halfs = InferenceEngine::make_shared_blob<int16_t>({
+ InferenceEngine::Precision::FP16, blob->getTensorDesc().getDims(),
+ blob->getTensorDesc().getLayout()
+ });
halfs->allocate();
Mat floatsData(1, blob->size(), CV_32F, blob->buffer());
Mat halfsData(1, blob->size(), CV_16SC1, halfs->buffer());
{
#ifdef HAVE_INF_ENGINE
AutoLock lock(getInitializationMutex());
- getSharedPlugins().erase(InferenceEngine::TargetDevice::eMYRIAD);
+#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
+ getSharedPlugins().erase("MYRIAD");
+#else
+ getCore().UnregisterPlugin("MYRIAD");
+#endif
#endif // HAVE_INF_ENGINE
}
static inline void genData(const std::vector<size_t>& dims, Mat& m, Blob::Ptr& dataPtr)
{
- std::vector<int> reversedDims(dims.begin(), dims.end());
- std::reverse(reversedDims.begin(), reversedDims.end());
-
- m.create(reversedDims, CV_32F);
+ m.create(std::vector<int>(dims.begin(), dims.end()), CV_32F);
randu(m, -1, 1);
- dataPtr = make_shared_blob<float>(Precision::FP32, dims, (float*)m.data);
+ dataPtr = make_shared_blob<float>({Precision::FP32, dims, Layout::ANY}, (float*)m.data);
}
void runIE(Target target, const std::string& xmlPath, const std::string& binPath,
CNNNetwork net = reader.getNetwork();
+ std::string device_name;
+
+#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2019010000)
+ Core ie;
+#else
InferenceEnginePluginPtr enginePtr;
InferencePlugin plugin;
+#endif
ExecutableNetwork netExec;
InferRequest infRequest;
+
try
{
- auto dispatcher = InferenceEngine::PluginDispatcher({""});
switch (target)
{
case DNN_TARGET_CPU:
- enginePtr = dispatcher.getSuitablePlugin(TargetDevice::eCPU);
+ device_name = "CPU";
break;
case DNN_TARGET_OPENCL:
case DNN_TARGET_OPENCL_FP16:
- enginePtr = dispatcher.getSuitablePlugin(TargetDevice::eGPU);
+ device_name = "GPU";
break;
case DNN_TARGET_MYRIAD:
- enginePtr = dispatcher.getSuitablePlugin(TargetDevice::eMYRIAD);
+ device_name = "MYRIAD";
break;
case DNN_TARGET_FPGA:
- enginePtr = dispatcher.getPluginByDevice("HETERO:FPGA,CPU");
+ device_name = "FPGA";
break;
default:
CV_Error(Error::StsNotImplemented, "Unknown target");
};
+#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
+ auto dispatcher = InferenceEngine::PluginDispatcher({""});
+ enginePtr = dispatcher.getPluginByDevice(device_name);
+#endif
if (target == DNN_TARGET_CPU || target == DNN_TARGET_FPGA)
{
std::string suffixes[] = {"_avx2", "_sse4", ""};
try
{
IExtensionPtr extension = make_so_pointer<IExtension>(libName);
+#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2019010000)
+ ie.AddExtension(extension, device_name);
+#else
enginePtr->AddExtension(extension, 0);
+#endif
break;
}
catch(...) {}
}
// Some of networks can work without a library of extra layers.
}
+#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GT(2019010000)
+ netExec = ie.LoadNetwork(net, device_name);
+#else
plugin = InferencePlugin(enginePtr);
-
netExec = plugin.LoadNetwork(net, {});
+#endif
infRequest = netExec.CreateInferRequest();
}
catch (const std::exception& ex)
BlobMap inputBlobs;
for (auto& it : net.getInputsInfo())
{
- genData(it.second->getDims(), inputsMap[it.first], inputBlobs[it.first]);
+ genData(it.second->getTensorDesc().getDims(), inputsMap[it.first], inputBlobs[it.first]);
}
infRequest.SetInput(inputBlobs);
BlobMap outputBlobs;
for (auto& it : net.getOutputsInfo())
{
- genData(it.second->dims, outputsMap[it.first], outputBlobs[it.first]);
+ genData(it.second->getTensorDesc().getDims(), outputsMap[it.first], outputBlobs[it.first]);
}
infRequest.SetOutput(outputBlobs);