# ======================
-macro(ocv_ie_find_extra_libraries find_prefix find_suffix)
- file(GLOB libraries "${INF_ENGINE_LIB_DIRS}/${find_prefix}inference_engine*${find_suffix}")
- foreach(full_path IN LISTS libraries)
- get_filename_component(library "${full_path}" NAME_WE)
- string(REPLACE "${find_prefix}" "" library "${library}")
- if(library STREQUAL "inference_engine" OR library STREQUAL "inference_engined")
- # skip
- else()
- add_library(${library} UNKNOWN IMPORTED)
- set_target_properties(${library} PROPERTIES
- IMPORTED_LOCATION "${full_path}")
- list(APPEND custom_libraries ${library})
- endif()
- endforeach()
-endmacro()
-
-function(add_custom_ie_build _inc _lib _lib_rel _lib_dbg _msg)
- if(NOT _inc OR NOT (_lib OR _lib_rel OR _lib_dbg))
- return()
- endif()
- if(NOT _lib)
- if(_lib_rel)
- set(_lib "${_lib_rel}")
- else()
- set(_lib "${_lib_dbg}")
- endif()
- endif()
- add_library(inference_engine UNKNOWN IMPORTED)
- set_target_properties(inference_engine PROPERTIES
- IMPORTED_LOCATION "${_lib}"
- IMPORTED_IMPLIB_RELEASE "${_lib_rel}"
- IMPORTED_IMPLIB_DEBUG "${_lib_dbg}"
- INTERFACE_INCLUDE_DIRECTORIES "${_inc}"
- )
-
- set(custom_libraries "")
- set(__prefixes "${CMAKE_FIND_LIBRARY_PREFIXES}")
- if(NOT __prefixes)
- set(__prefixes "_empty_")
- endif()
- foreach(find_prefix ${__prefixes})
- if(find_prefix STREQUAL "_empty_") # foreach doesn't iterate over empty elements
- set(find_prefix "")
- endif()
- if(NOT DEFINED INFERENCE_ENGINE_FIND_LIBRARY_SUFFIXES) # allow custom override
- set(INFERENCE_ENGINE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
- if(APPLE)
- ocv_list_filterout(INFERENCE_ENGINE_FIND_LIBRARY_SUFFIXES "^.so$") # skip plugins (can't be linked)
- endif()
- endif()
- foreach(find_suffix ${INFERENCE_ENGINE_FIND_LIBRARY_SUFFIXES})
- ocv_ie_find_extra_libraries("${find_prefix}" "${find_suffix}")
- endforeach()
- if(NOT CMAKE_FIND_LIBRARY_SUFFIXES)
- ocv_ie_find_extra_libraries("${find_prefix}" "")
- endif()
- endforeach()
-
- if(NOT INF_ENGINE_RELEASE VERSION_GREATER "2018050000")
- find_library(INF_ENGINE_OMP_LIBRARY iomp5 PATHS "${INF_ENGINE_OMP_DIR}" NO_DEFAULT_PATH)
- if(NOT INF_ENGINE_OMP_LIBRARY)
- message(WARNING "OpenMP for IE have not been found. Set INF_ENGINE_OMP_DIR variable if you experience build errors.")
- endif()
- endif()
- if(EXISTS "${INF_ENGINE_OMP_LIBRARY}")
- set_target_properties(inference_engine PROPERTIES IMPORTED_LINK_INTERFACE_LIBRARIES "${INF_ENGINE_OMP_LIBRARY}")
- endif()
- set(INF_ENGINE_VERSION "Unknown" CACHE STRING "")
- set(INF_ENGINE_TARGET "inference_engine;${custom_libraries}" PARENT_SCOPE)
- message(STATUS "Detected InferenceEngine: ${_msg}")
-endfunction()
-
-# ======================
-
find_package(InferenceEngine QUIET)
if(InferenceEngine_FOUND)
set(INF_ENGINE_TARGET ${InferenceEngine_LIBRARIES})
endif()
set(INF_ENGINE_RELEASE "${INF_ENGINE_RELEASE_INIT}" CACHE STRING "Force IE version, should be in form YYYYAABBCC (e.g. 2020.1.0.2 -> 2020010002)")
-if(NOT INF_ENGINE_TARGET AND INF_ENGINE_LIB_DIRS AND INF_ENGINE_INCLUDE_DIRS)
- find_path(ie_custom_inc "inference_engine.hpp" PATHS "${INF_ENGINE_INCLUDE_DIRS}" NO_DEFAULT_PATH)
- if(CMAKE_BUILD_TYPE STREQUAL "Debug")
- find_library(ie_custom_lib_dbg "inference_engined" PATHS "${INF_ENGINE_LIB_DIRS}" NO_DEFAULT_PATH) # Win32 and MacOSX
- endif()
- find_library(ie_custom_lib "inference_engine" PATHS "${INF_ENGINE_LIB_DIRS}" NO_DEFAULT_PATH)
- find_library(ie_custom_lib_rel "inference_engine" PATHS "${INF_ENGINE_LIB_DIRS}/Release" NO_DEFAULT_PATH)
- find_library(ie_custom_lib_dbg "inference_engine" PATHS "${INF_ENGINE_LIB_DIRS}/Debug" NO_DEFAULT_PATH)
- add_custom_ie_build("${ie_custom_inc}" "${ie_custom_lib}" "${ie_custom_lib_rel}" "${ie_custom_lib_dbg}" "INF_ENGINE_{INCLUDE,LIB}_DIRS")
-endif()
-
-set(_loc "$ENV{INTEL_OPENVINO_DIR}")
-if(NOT _loc AND DEFINED ENV{INTEL_CVSDK_DIR})
- set(_loc "$ENV{INTEL_CVSDK_DIR}") # OpenVINO 2018.x
-endif()
-if(NOT INF_ENGINE_TARGET AND _loc)
- if(NOT INF_ENGINE_RELEASE VERSION_GREATER "2018050000")
- set(INF_ENGINE_PLATFORM_DEFAULT "ubuntu_16.04")
- else()
- set(INF_ENGINE_PLATFORM_DEFAULT "")
- endif()
- set(INF_ENGINE_PLATFORM "${INF_ENGINE_PLATFORM_DEFAULT}" CACHE STRING "InferenceEngine platform (library dir)")
- find_path(ie_custom_env_inc "inference_engine.hpp" PATHS "${_loc}/deployment_tools/inference_engine/include" NO_DEFAULT_PATH)
- if(CMAKE_BUILD_TYPE STREQUAL "Debug")
- find_library(ie_custom_env_lib_dbg "inference_engined" PATHS "${_loc}/deployment_tools/inference_engine/lib/${INF_ENGINE_PLATFORM}/intel64" NO_DEFAULT_PATH)
- endif()
- find_library(ie_custom_env_lib "inference_engine" PATHS "${_loc}/deployment_tools/inference_engine/lib/${INF_ENGINE_PLATFORM}/intel64" NO_DEFAULT_PATH)
- find_library(ie_custom_env_lib_rel "inference_engine" PATHS "${_loc}/deployment_tools/inference_engine/lib/intel64/Release" NO_DEFAULT_PATH)
- find_library(ie_custom_env_lib_dbg "inference_engine" PATHS "${_loc}/deployment_tools/inference_engine/lib/intel64/Debug" NO_DEFAULT_PATH)
- add_custom_ie_build("${ie_custom_env_inc}" "${ie_custom_env_lib}" "${ie_custom_env_lib_rel}" "${ie_custom_env_lib_dbg}" "OpenVINO (${_loc})")
-endif()
-
set(tgts)
set(defs)
endif()
set(dnn_runtime_libs "")
-if(TARGET ocv.3rdparty.openvino)
+
+ocv_option(OPENCV_DNN_OPENVINO "Build with OpenVINO support (2021.4+)" (TARGET ocv.3rdparty.openvino))
+if(TARGET ocv.3rdparty.openvino AND OPENCV_DNN_OPENVINO)
+ if(NOT HAVE_OPENVINO AND NOT HAVE_NGRAPH)
+ message(FATAL_ERROR "DNN: Inference Engine is not supported without enabled 'nGraph'. Check build configuration.")
+ endif()
list(APPEND dnn_runtime_libs ocv.3rdparty.openvino)
endif()
*/
virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs);
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs);
-
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs, const std::vector<Ptr<BackendNode> >& nodes);
/**
/* Values for 'OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE' parameter */
+/// @deprecated
#define CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API "NN_BUILDER"
+/// @deprecated
#define CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH "NGRAPH"
/** @brief Returns Inference Engine internal backend API.
*
* See values of `CV_DNN_BACKEND_INFERENCE_ENGINE_*` macros.
*
- * Default value is controlled through `OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE` runtime parameter (environment variable).
+ * `OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE` runtime parameter (environment variable) is ignored since 4.6.0.
+ *
+ * @deprecated
*/
CV_EXPORTS_W cv::String getInferenceEngineBackendType();
* See values of `CV_DNN_BACKEND_INFERENCE_ENGINE_*` macros.
*
* @returns previous value of internal backend API
+ *
+ * @deprecated
*/
CV_EXPORTS_W cv::String setInferenceEngineBackendType(const cv::String& newBackendType);
#ifdef HAVE_INF_ENGINE
if (checkIETarget(DNN_TARGET_CPU)) {
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_CPU));
-#endif
#ifdef HAVE_DNN_NGRAPH
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_CPU));
#endif
}
if (checkIETarget(DNN_TARGET_MYRIAD)) {
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_MYRIAD));
-#endif
#ifdef HAVE_DNN_NGRAPH
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_MYRIAD));
#endif
}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- if (checkIETarget(DNN_TARGET_FPGA))
- backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_FPGA));
-#endif
#ifdef HAVE_OPENCL
if (cv::ocl::useOpenCL() && ocl::Device::getDefault().isIntel())
{
if (checkIETarget(DNN_TARGET_OPENCL)) {
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_OPENCL));
-#endif
#ifdef HAVE_DNN_NGRAPH
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_OPENCL));
#endif
}
if (checkIETarget(DNN_TARGET_OPENCL_FP16)) {
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_OPENCL_FP16));
-#endif
#ifdef HAVE_DNN_NGRAPH
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_OPENCL_FP16));
#endif
be = (Backend)PARAM_DNN_BACKEND_DEFAULT;
#ifdef HAVE_INF_ENGINE
if (be == DNN_BACKEND_INFERENCE_ENGINE)
- be = getInferenceEngineBackendTypeParam();
+ be = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
#endif
std::vector<Target> result;
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_OPENCV ||
- (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && inputsData.size() == 1);
+ return backendId == DNN_BACKEND_OPENCV;
}
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
}
}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
- {
- CV_CheckEQ(inputsData.size(), (size_t)1, "");
- CV_CheckEQ(inputsData[0].dims, 4, "");
- const size_t numChannels = inputsData[0].size[1];
- CV_Assert(numChannels <= 4);
-
- // Scale
- InferenceEngine::TensorDesc td(InferenceEngine::Precision::FP32, {numChannels},
- InferenceEngine::Layout::C);
- auto weights = InferenceEngine::make_shared_blob<float>(td);
- weights->allocate();
-
- float* weight_buf = weights->buffer().as<float*>();
- std::fill(weight_buf, weight_buf + numChannels, scaleFactors[0]);
-
- // Mean subtraction
- auto biases = InferenceEngine::make_shared_blob<float>(td);
- biases->allocate();
- float* bias_buf = biases->buffer().as<float*>();
-
- for (int i = 0; i < numChannels; ++i)
- {
- bias_buf[i] = -means[0][i] * scaleFactors[0];
- }
-
- InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name);
- addConstantData("weights", weights, ieLayer);
- addConstantData("biases", biases, ieLayer);
- return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
std::vector<String> outNames;
std::vector<MatShape> shapes;
}
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- return Ptr<BackendWrapper>(new InfEngineBackendWrapper(targetId, m));
-#else
- CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support");
-#endif
+ CV_ERROR_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019;
}
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
#ifdef HAVE_DNN_NGRAPH
return Ptr<BackendWrapper>(new NgraphBackendWrapper(targetId, m));
#else
- CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of Inference Engine + nGraph");
+ CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of OpenVINO / Inference Engine + nGraph");
#endif
}
else
}
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{
- return wrapMat(preferableBackend, preferableTarget, host);
+ CV_ERROR_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019;
}
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
preferableBackend = (Backend)PARAM_DNN_BACKEND_DEFAULT;
#ifdef HAVE_INF_ENGINE
if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE)
- preferableBackend = getInferenceEngineBackendTypeParam();
+ preferableBackend = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; // = getInferenceEngineBackendTypeParam();
#endif
CV_Assert(preferableBackend != DNN_BACKEND_OPENCV ||
preferableTarget == DNN_TARGET_CPU ||
preferableTarget == DNN_TARGET_OPENCL);
#ifdef HAVE_INF_ENGINE
- if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
- preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
CV_Assert(
(preferableTarget == DNN_TARGET_CPU && (!isArmComputePlugin() || preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)) ||
CV_Assert(preferableTarget == DNN_TARGET_CPU || IS_DNN_OPENCL_TARGET(preferableTarget));
else if (preferableBackend == DNN_BACKEND_HALIDE)
initHalideBackend();
- else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
- {
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- initInfEngineBackend(blobsToKeep_);
-#else
- CV_Assert(false && "This OpenCV version is built without Inference Engine NN Builder API support");
-#endif
- }
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
#ifdef HAVE_DNN_NGRAPH
#endif
}
else
- CV_Error(Error::StsNotImplemented, "Unknown backend identifier");
+ CV_Error(Error::StsNotImplemented, cv::format("Unknown backend identifier: %d", preferableBackend));
}
void initHalideBackend()
}
}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- // Before launching Inference Engine graph we need to specify output blobs.
- // This function requests output blobs based on inputs references of
- // layers from default backend or layers from different graphs.
- void addInfEngineNetOutputs(LayerData &ld)
- {
- CV_TRACE_FUNCTION();
- Ptr<InfEngineBackendNet> layerNet;
- if (ld.backendNodes.find(preferableBackend) != ld.backendNodes.end())
- {
- Ptr<BackendNode> node = ld.backendNodes[preferableBackend];
- if (!node.empty())
- {
- Ptr<InfEngineBackendNode> ieNode = node.dynamicCast<InfEngineBackendNode>();
- CV_Assert(!ieNode.empty()); CV_Assert(!ieNode->net.empty());
- layerNet = ieNode->net;
- }
- }
- // For an every input reference we check that it belongs to one of
- // the Inference Engine backend graphs. Request an output blob if it is.
- // Do nothing if layer's input is from the same graph.
- for (int i = 0; i < ld.inputBlobsId.size(); ++i)
- {
- LayerData &inpLd = layers[ld.inputBlobsId[i].lid];
- Ptr<BackendNode> inpNode = inpLd.backendNodes[preferableBackend];
- if (!inpNode.empty())
- {
- Ptr<InfEngineBackendNode> ieInpNode = inpNode.dynamicCast<InfEngineBackendNode>();
- CV_Assert(!ieInpNode.empty()); CV_Assert(!ieInpNode->net.empty());
- if (layerNet != ieInpNode->net)
- {
- // layerNet is empty or nodes are from different graphs.
- ieInpNode->net->addOutput(ieInpNode->layer.getName());
- }
- }
- }
- }
-
- void initInfEngineBackend(const std::vector<LayerPin>& blobsToKeep_)
- {
- CV_TRACE_FUNCTION();
- CV_Assert_N(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, haveInfEngine());
- MapIdToLayerData::iterator it;
- Ptr<InfEngineBackendNet> net;
-
- for (it = layers.begin(); it != layers.end(); ++it)
- {
- LayerData &ld = it->second;
- if (ld.id == 0)
- {
- CV_Assert((netInputLayer->outNames.empty() && ld.outputBlobsWrappers.size() == 1) ||
- (netInputLayer->outNames.size() == ld.outputBlobsWrappers.size()));
- for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
- {
- InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]);
-#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
- dataPtr->name = netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i];
-#else
- dataPtr->setName(netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i]);
-#endif
- }
- }
- else
- {
- for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
- {
- InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]);
-#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
- dataPtr->name = ld.name;
-#else
- dataPtr->setName(ld.name);
-#endif
- }
- }
- }
-
- if (skipInfEngineInit)
- {
- Ptr<BackendNode> node = layers[lastLayerId].backendNodes[preferableBackend];
- CV_Assert(!node.empty());
-
- Ptr<InfEngineBackendNode> ieNode = node.dynamicCast<InfEngineBackendNode>();
- CV_Assert(!ieNode.empty());
- ieNode->net->reset();
-
- for (it = layers.begin(); it != layers.end(); ++it)
- {
- LayerData &ld = it->second;
- if (ld.id == 0)
- {
- for (int i = 0; i < ld.inputBlobsWrappers.size(); ++i)
- {
- InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.inputBlobsWrappers[i]);
-#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
- dataPtr->name = netInputLayer->outNames[i];
-#else
- dataPtr->setName(netInputLayer->outNames[i]);
-#endif
- }
- }
- else
- {
- for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
- {
- InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]);
-#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
- dataPtr->name = ld.name;
-#else
- dataPtr->setName(ld.name);
-#endif
- }
- }
- ieNode->net->addBlobs(ld.inputBlobsWrappers);
- ieNode->net->addBlobs(ld.outputBlobsWrappers);
- ld.skip = true;
- }
- layers[lastLayerId].skip = false;
- ieNode->net->init((Target)preferableTarget);
- return;
- }
-
- // Build Inference Engine networks from sets of layers that support this
- // backend. Split a whole model on several Inference Engine networks if
- // some of layers are not implemented.
-
- bool supportsCPUFallback = preferableTarget == DNN_TARGET_CPU ||
- BackendRegistry::checkIETarget(DNN_TARGET_CPU);
-
- // Set of all input and output blobs wrappers for current network.
- std::map<LayerPin, Ptr<BackendWrapper> > netBlobsWrappers;
- for (it = layers.begin(); it != layers.end(); ++it)
- {
- LayerData &ld = it->second;
- if (ld.id == 0 && ld.skip)
- continue;
- bool fused = ld.skip;
-
- Ptr<Layer> layer = ld.layerInstance;
- if (!fused && !layer->supportBackend(preferableBackend))
- {
- bool customizable = ld.id != 0 &&
- INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R2) &&
- supportsCPUFallback;
- // TODO: there is a bug in Myriad plugin with custom layers shape infer.
- if (preferableTarget == DNN_TARGET_MYRIAD)
- {
- for (int i = 0; customizable && i < ld.inputBlobs.size(); ++i)
- {
- customizable = ld.inputBlobs[i]->size[0] == 1;
- }
- }
-
- // TODO: fix these workarounds
- if (preferableTarget == DNN_TARGET_MYRIAD ||
- preferableTarget == DNN_TARGET_OPENCL ||
- preferableTarget == DNN_TARGET_OPENCL_FP16)
- customizable &= ld.type != "Concat";
-
- if (preferableTarget == DNN_TARGET_OPENCL ||
- preferableTarget == DNN_TARGET_OPENCL_FP16)
- customizable &= ld.type != "Power";
-
- if (preferableTarget == DNN_TARGET_OPENCL)
- customizable &= ld.type != "Eltwise";
-
- if (!customizable)
- {
- addInfEngineNetOutputs(ld);
- net = Ptr<InfEngineBackendNet>();
- netBlobsWrappers.clear(); // Is not used for R5 release but we don't wrap it to #ifdef.
- layer->preferableTarget = DNN_TARGET_CPU;
- continue;
- }
- }
- ld.skip = true; // Initially skip all Inference Engine supported layers.
-
- // Create a new network if one of inputs from different Inference Engine graph.
- for (int i = 0; i < ld.inputBlobsId.size(); ++i)
- {
- LayerData &inpLd = layers[ld.inputBlobsId[i].lid];
- Ptr<BackendNode> inpNode = inpLd.backendNodes[preferableBackend];
- if (!inpNode.empty())
- {
- Ptr<InfEngineBackendNode> ieInpNode = inpNode.dynamicCast<InfEngineBackendNode>();
- CV_Assert(!ieInpNode.empty()); CV_Assert(!ieInpNode->net.empty());
- if (ieInpNode->net != net)
- {
- net = Ptr<InfEngineBackendNet>();
- netBlobsWrappers.clear(); // Is not used for R5 release but we don't wrap it to #ifdef.
- break;
- }
- }
- }
-
- Ptr<BackendNode> node;
- if (!net.empty())
- {
- if (fused)
- {
- bool inPlace = ld.inputBlobsId.size() == 1 && ld.outputBlobs.size() == 1 &&
- ld.inputBlobs[0]->data == ld.outputBlobs[0].data;
- CV_Assert(inPlace);
- node = layers[ld.inputBlobsId[0].lid].backendNodes[preferableBackend];
- ld.inputBlobsWrappers = layers[ld.inputBlobsId[0].lid].inputBlobsWrappers;
- }
- }
- else
- net = Ptr<InfEngineBackendNet>(new InfEngineBackendNet());
-
- if (!fused)
- {
- if (layer->supportBackend(preferableBackend))
- node = layer->initInfEngine(ld.inputBlobsWrappers);
- else
- {
- node = Ptr<BackendNode>(new InfEngineBackendNode(
- ld.layerInstance, ld.inputBlobs, ld.outputBlobs, ld.internals));
- }
- }
- else if (node.empty())
- continue;
-
- CV_Assert(!node.empty());
- ld.backendNodes[preferableBackend] = node;
-
- Ptr<InfEngineBackendNode> ieNode = node.dynamicCast<InfEngineBackendNode>();
- CV_Assert(!ieNode.empty());
- ieNode->net = net;
-
- for (const auto& pin : blobsToKeep_)
- {
- if (pin.lid == ld.id)
- {
- ieNode->net->addOutput(ieNode->layer.getName());
- break;
- }
- }
-
- // Convert weights in FP16 for specific targets.
- if ((preferableTarget == DNN_TARGET_OPENCL_FP16 ||
- preferableTarget == DNN_TARGET_MYRIAD ||
- preferableTarget == DNN_TARGET_FPGA) && !fused)
- {
-#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
- for (const std::string& name : {"weights", "biases"})
- {
- auto it = ieNode->layer.getParameters().find(name);
- if (it != ieNode->layer.getParameters().end())
- {
- InferenceEngine::Blob::Ptr bp = it->second.as<InferenceEngine::Blob::Ptr>();
- it->second = convertFp16(std::const_pointer_cast<InferenceEngine::Blob>(bp));
- }
- }
-#else
- auto& blobs = ieNode->layer.getConstantData();
- if (blobs.empty())
- {
- // In case of non weightable layer we have to specify
- // it's precision adding dummy blob.
- auto blob = InferenceEngine::make_shared_blob<int16_t>(
- InferenceEngine::Precision::FP16,
- InferenceEngine::Layout::C, {1});
- blob->allocate();
- blobs[""] = blob;
- }
- else
- {
- for (auto& it : blobs)
- it.second = convertFp16(std::const_pointer_cast<InferenceEngine::Blob>(it.second));
- }
-#endif
- }
-
- if (!fused)
- net->addLayer(ieNode->layer);
-
- net->connect(ld.inputBlobsWrappers, ld.outputBlobsWrappers, ieNode->layer.getName());
- net->addBlobs(ld.inputBlobsWrappers);
- net->addBlobs(ld.outputBlobsWrappers);
- addInfEngineNetOutputs(ld);
- }
-
- // Initialize all networks.
- for (MapIdToLayerData::reverse_iterator it = layers.rbegin(); it != layers.rend(); ++it)
- {
- LayerData &ld = it->second;
- if (ld.backendNodes.find(preferableBackend) == ld.backendNodes.end())
- continue;
-
- Ptr<BackendNode> node = ld.backendNodes[preferableBackend];
- if (node.empty())
- continue;
-
- Ptr<InfEngineBackendNode> ieNode = node.dynamicCast<InfEngineBackendNode>();
- if (ieNode.empty())
- continue;
-
- CV_Assert(!ieNode->net.empty());
-
- if (!ieNode->net->isInitialized())
- {
- ieNode->net->init((Target)preferableTarget);
- ld.skip = false;
- }
- }
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
-
#ifdef HAVE_DNN_NGRAPH
/** mark input pins as outputs from other subnetworks
void initNgraphBackend(const std::vector<LayerPin>& blobsToKeep_)
{
CV_TRACE_FUNCTION();
- CV_Assert_N(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, haveInfEngine());
+ CV_CheckEQ(preferableBackend, DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, "");
Ptr<InfEngineNgraphNet> net;
CV_TRACE_FUNCTION();
if(!fusion || (preferableBackend != DNN_BACKEND_OPENCV &&
- preferableBackend != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 &&
preferableBackend != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH))
return;
{
forwardHalide(ld.outputBlobsWrappers, node);
}
- else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
- {
- forwardInfEngine(ld.outputBlobsWrappers, node, isAsync);
- }
+#ifdef HAVE_INF_ENGINE
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
forwardNgraph(ld.outputBlobsWrappers, node, isAsync);
}
+#endif
else
{
CV_Error(Error::StsNotImplemented, "Unknown backend identifier");
// Transfer data to CPU if it's require.
ld.outputBlobsWrappers[pin.oid]->copyToHost();
}
- CV_Assert(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
+ CV_Assert(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
- if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) {
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- Ptr<InfEngineBackendWrapper> wrapper = ld.outputBlobsWrappers[pin.oid].dynamicCast<InfEngineBackendWrapper>();
- return std::move(wrapper->futureMat);
-#else
- CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support");
-#endif
- }
- else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
- {
-#ifdef HAVE_DNN_NGRAPH
- Ptr<NgraphBackendWrapper> wrapper = ld.outputBlobsWrappers[pin.oid].dynamicCast<NgraphBackendWrapper>();
- return std::move(wrapper->futureMat);
+ Ptr<NgraphBackendWrapper> wrapper = ld.outputBlobsWrappers[pin.oid].dynamicCast<NgraphBackendWrapper>();
+ return std::move(wrapper->futureMat);
#else
- CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of Inference Engine + nGraph");
-#endif
- }
+ CV_Error(Error::StsNotImplemented, "DNN: OpenVINO/nGraph backend is required");
#endif // HAVE_INF_ENGINE
- CV_Error(Error::StsNotImplemented, "DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 backend is required");
}
AsyncArray getBlobAsync(String outputName)
CV_TRACE_REGION_NEXT("backendNode");
Ptr<BackendNode> backendNode;
-#ifdef HAVE_DNN_NGRAPH
- if (DNN_BACKEND_INFERENCE_ENGINE_NGRAPH == getInferenceEngineBackendTypeParam())
{
auto fake_node = std::make_shared<ngraph::op::Parameter>(ngraph::element::f32, ngraph::Shape{});
Ptr<InfEngineNgraphNode> backendNodeNGraph(new InfEngineNgraphNode(fake_node));
backendNodeNGraph->net = Ptr<InfEngineNgraphNet>(new InfEngineNgraphNet(*(cvNet.impl), ieNet));
backendNode = backendNodeNGraph;
}
- else
-#endif
- {
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- Ptr<InfEngineBackendNode> backendNodeNN(new InfEngineBackendNode(InferenceEngine::Builder::Layer("")));
- backendNodeNN->net = Ptr<InfEngineBackendNet>(new InfEngineBackendNet(ieNet));
- backendNode = backendNodeNN;
-#else
- CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support");
-#endif
- }
CV_TRACE_REGION_NEXT("register_outputs");
-#ifdef HAVE_DNN_NGRAPH
auto ngraphFunction = ieNet.getFunction();
-#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_2)
- std::list< std::shared_ptr<ngraph::Node> > ngraphOperations;
-#else
- std::vector< std::shared_ptr<ngraph::Node> > ngraphOperations;
-#endif
- if (ngraphFunction)
- {
- ngraphOperations = ngraphFunction->get_ops();
- }
-#endif
+ CV_Assert(ngraphFunction);
+ std::vector< std::shared_ptr<ngraph::Node> > ngraphOperations = ngraphFunction->get_ops();
for (auto& it : ieNet.getOutputsInfo())
{
LayerData& ld = cvNet.impl->layers[lid];
-#ifdef HAVE_DNN_NGRAPH
- if (DNN_BACKEND_INFERENCE_ENGINE_NGRAPH == getInferenceEngineBackendTypeParam())
{
Ptr<Layer> cvLayer(new NgraphBackendLayer(ieNet));
cvLayer->name = outputName;
auto process_layer = [&](const std::string& name) -> bool
{
- if (ngraphFunction)
- {
- CV_TRACE_REGION("ngraph_function");
- for (const auto& op : ngraphOperations)
- {
- CV_Assert(op);
- if (op->get_friendly_name() == name)
- {
- const std::string typeName = op->get_type_info().name;
- cvLayer->type = typeName;
- return true;
- }
- }
- return false;
- }
- else
+ CV_TRACE_REGION("ngraph_function");
+ for (const auto& op : ngraphOperations)
{
-#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2020_4)
- CV_Error(Error::StsNotImplemented, "This OpenCV version is built with Inference Engine which has dropped IR v7 support");
-#else
- CV_TRACE_REGION("legacy_cnn_layer");
- try
+ CV_Assert(op);
+ if (op->get_friendly_name() == name)
{
- InferenceEngine::CNNLayerPtr ieLayer = ieNet.getLayerByName(name.c_str());
- CV_Assert(ieLayer);
-
- cvLayer->type = ieLayer->type;
+ const std::string typeName = op->get_type_info().name;
+ cvLayer->type = typeName;
return true;
}
- catch (const std::exception& e)
- {
- CV_UNUSED(e);
- CV_LOG_DEBUG(NULL, "IE layer extraction failure: '" << name << "' - " << e.what());
- return false;
- }
-#endif
-
}
+ return false;
};
bool found = process_layer(outputName);
ld.layerInstance = cvLayer;
ld.backendNodes[DNN_BACKEND_INFERENCE_ENGINE_NGRAPH] = backendNode;
}
- else
-#endif
- {
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- Ptr<Layer> cvLayer(new InfEngineBackendLayer(ieNet));
-
- InferenceEngine::CNNLayerPtr ieLayer;
- try
- {
- ieLayer = ieNet.getLayerByName(outputName.c_str());
- }
- catch (...)
- {
- auto pos = outputName.rfind('.'); // cut port number: ".0"
- if (pos != std::string::npos)
- {
- std::string layerName = outputName.substr(0, pos);
- ieLayer = ieNet.getLayerByName(layerName.c_str());
- }
- }
- CV_Assert(ieLayer);
-
- cvLayer->name = outputName;
- cvLayer->type = ieLayer->type;
- ld.layerInstance = cvLayer;
-
- ld.backendNodes[DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019] = backendNode;
-#else
- CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support");
-#endif
- }
for (int i = 0; i < inputsNames.size(); ++i)
cvNet.connect(0, i, lid, i);
CV_TRACE_REGION_NEXT("finalize");
- cvNet.setPreferableBackend(getInferenceEngineBackendTypeParam());
+ cvNet.setPreferableBackend(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
cvNet.impl->skipInfEngineInit = true;
return cvNet;
FPDenormalsIgnoreHintScope fp_denormals_ignore_scope;
-#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R3)
- InferenceEngine::CNNNetReader reader;
- reader.ReadNetwork(xml);
- reader.ReadWeights(bin);
-
- InferenceEngine::CNNNetwork ieNet = reader.getNetwork();
-#else
InferenceEngine::Core& ie = getCore("");
InferenceEngine::CNNNetwork ieNet = ie.ReadNetwork(xml, bin);
-#endif
return Impl::createNetworkFromModelOptimizer(ieNet);
#endif // HAVE_INF_ENGINE
FPDenormalsIgnoreHintScope fp_denormals_ignore_scope;
-#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R3)
- InferenceEngine::CNNNetReader reader;
-
- try
- {
- reader.ReadNetwork(bufferModelConfigPtr, bufferModelConfigSize);
-
- InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::U8, { bufferWeightsSize }, InferenceEngine::Layout::C);
- InferenceEngine::TBlob<uint8_t>::Ptr weightsBlobPtr(new InferenceEngine::TBlob<uint8_t>(tensorDesc));
- weightsBlobPtr->allocate();
- std::memcpy(weightsBlobPtr->buffer(), (uchar*)bufferWeightsPtr, bufferWeightsSize);
- reader.SetWeights(weightsBlobPtr);
- }
- catch (const std::exception& e)
- {
- CV_Error(Error::StsError, std::string("DNN: IE failed to load model: ") + e.what());
- }
-
- InferenceEngine::CNNNetwork ieNet = reader.getNetwork();
-#else
InferenceEngine::Core& ie = getCore("");
std::string model; model.assign((char*)bufferModelConfigPtr, bufferModelConfigSize);
{
CV_Error(Error::StsError, std::string("DNN: IE failed to load model: ") + e.what());
}
-#endif
return Impl::createNetworkFromModelOptimizer(ieNet);
#endif // HAVE_INF_ENGINE
std::vector<LayerPin> pins(1, impl->getPinByAlias(layerName));
impl->setUpNet(pins);
- if (!(impl->preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || impl->preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH))
- CV_Error(Error::StsNotImplemented, "DNN: Asynchronous forward is supported for Inference Engine backends only");
+ if (impl->preferableBackend != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ CV_Error(Error::StsNotImplemented, "DNN: Asynchronous forward is supported for Inference Engine backend only");
impl->isAsync = true;
impl->forwardToLayer(impl->getLayerData(layerName));
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
- backendId = getInferenceEngineBackendTypeParam();
+ backendId = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
#endif
if( impl->preferableBackend != backendId )
case DNN_BACKEND_DEFAULT: backend = "DEFAULT/"; break;
case DNN_BACKEND_HALIDE: backend = "HALIDE/"; break;
case DNN_BACKEND_INFERENCE_ENGINE: // fallthru
- case DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019: backend = "DLIE/"; break;
- case DNN_BACKEND_INFERENCE_ENGINE_NGRAPH: backend = "NGRAPH/"; break;
+ case DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019: // fallthru
+ case DNN_BACKEND_INFERENCE_ENGINE_NGRAPH: backend = "OpenVINO/"; break;
case DNN_BACKEND_OPENCV: backend = "OCV/"; break;
// don't use default:
}
return Ptr<BackendNode>();
}
-Ptr<BackendNode> Layer::initInfEngine(const std::vector<Ptr<BackendWrapper> > &)
-{
- CV_Error(Error::StsNotImplemented, "Inference Engine pipeline of " + type +
- " layers is not defined.");
- return Ptr<BackendNode>();
-}
-
Ptr<BackendNode> Layer::initNgraph(const std::vector<Ptr<BackendWrapper> > & inputs, const std::vector<Ptr<BackendNode> >& nodes)
{
CV_Error(Error::StsNotImplemented, "Inference Engine pipeline of " + type +
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
+#ifdef HAVE_INF_ENGINE
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ return preferableTarget == DNN_TARGET_CPU || dims == 4;
+#endif
return (backendId == DNN_BACKEND_OPENCV) ||
- (backendId == DNN_BACKEND_HALIDE && haveHalide()) ||
- ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine() && (preferableTarget == DNN_TARGET_CPU || dims == 4));
+ (backendId == DNN_BACKEND_HALIDE && haveHalide());
}
#ifdef HAVE_OPENCL
}
#endif // HAVE_HALIDE
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
- {
- InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name);
- const size_t numChannels = weights_.total();
- addConstantData("weights", wrapToInfEngineBlob(weights_, {numChannels}, InferenceEngine::Layout::C), ieLayer);
- addConstantData("biases", wrapToInfEngineBlob(bias_, {numChannels}, InferenceEngine::Layout::C), ieLayer);
- return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_OPENCV ||
- ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
+#ifdef HAVE_INF_ENGINE
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ return true;
+#endif
+ return backendId == DNN_BACKEND_OPENCV;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
inputs[i].copyTo(outputs[i]);
}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
- {
- InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
- std::vector<size_t> dims = input->getDims();
- CV_Assert(!dims.empty());
-
- InferenceEngine::Builder::Layer ieLayer(name);
- ieLayer.setName(name);
- if (preferableTarget == DNN_TARGET_MYRIAD)
- {
- ieLayer.setType("Copy");
- }
- else
- {
- ieLayer.setType("Split");
- ieLayer.getParameters()["axis"] = dims.size() - 1;
- ieLayer.getParameters()["out_sizes"] = dims[0];
- }
- ieLayer.setInputPorts({InferenceEngine::Port(dims)});
- ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
- return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
-
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
+#ifdef HAVE_INF_ENGINE
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ return true;
+#endif
return backendId == DNN_BACKEND_OPENCV ||
- (backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1 && !padding) || // By channels
- (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && haveInfEngine() && !padding) ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
+ (backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1 && !padding);
}
class ChannelConcatInvoker : public ParallelLoopBody
return Ptr<BackendNode>();
}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
- {
- InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
-
- InferenceEngine::Builder::ConcatLayer ieLayer(name);
- ieLayer.setAxis(normalize_axis(axis, input->getDims().size()));
- ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(inputs.size()));
- return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
-
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_OPENCV ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
+#ifdef HAVE_INF_ENGINE
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ return true;
+#endif
+ return backendId == DNN_BACKEND_OPENCV;
}
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
blobs[0].copyTo(outputs[0]);
}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
- {
- InferenceEngine::Builder::ConstLayer ieLayer(name);
- ieLayer.setData(wrapToInfEngineBlob(blobs[0]));
- return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
-
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
{
size_t ksize = kernel_size.size();
#ifdef HAVE_INF_ENGINE
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
bool isArmTarget = preferableTarget == DNN_TARGET_CPU && isArmComputePlugin();
if (isArmTarget && blobs.empty())
return isArmTarget;
if (ksize == 3)
return preferableTarget != DNN_TARGET_MYRIAD && !isArmTarget;
- if ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || preferableTarget != DNN_TARGET_MYRIAD) && blobs.empty())
+ bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD;
+ if (!isMyriad && blobs.empty())
return false;
return (preferableTarget != DNN_TARGET_MYRIAD || dilation.width == dilation.height);
}
return Ptr<BackendNode>();
}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
- {
- InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
- std::vector<size_t> dims = input->getDims();
- CV_Assert(dims.size() == 4 || dims.size() == 5);
- const int inpCn = dims[1];
- const int outCn = blobs[0].size[0];
- const int inpGroupCn = blobs[0].size[1];
- const int group = inpCn / inpGroupCn;
- InferenceEngine::Layout layout = (dims.size() == 4) ? InferenceEngine::Layout::OIHW :
- InferenceEngine::Layout::NCDHW;
-
- auto ieWeights = wrapToInfEngineBlob(blobs[0], layout);
- if (fusedWeights)
- {
- if (weightsMat.isContinuous())
- {
- Mat cvWeights = weightsMat.reshape(1, blobs[0].dims, blobs[0].size);
- ieWeights = wrapToInfEngineBlob(cvWeights, layout);
- }
- else
- {
- ieWeights = InferenceEngine::make_shared_blob<float>({
- InferenceEngine::Precision::FP32,
- ieWeights->getTensorDesc().getDims(), layout
- });
- ieWeights->allocate();
-
- Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, outCn);
- Mat cvWeights = weightsMat.colRange(0, newWeights.cols);
- cvWeights.copyTo(newWeights);
- }
- }
- InferenceEngine::Blob::Ptr ieBiases;
- if (hasBias() || fusedBias)
- {
- Mat biasesMat({outCn}, CV_32F, &biasvec[0]);
- ieBiases = wrapToInfEngineBlob(biasesMat, {(size_t)outCn}, InferenceEngine::Layout::C);
- }
-
- InferenceEngine::Builder::ConvolutionLayer ieLayer(name);
-
- ieLayer.setKernel(kernel_size);
- ieLayer.setStrides(strides);
- ieLayer.setDilation(dilations);
- ieLayer.setPaddingsBegin(pads_begin);
- ieLayer.setPaddingsEnd(pads_end);
- ieLayer.setGroup((size_t)group);
- ieLayer.setOutDepth((size_t)outCn);
-
- InferenceEngine::Builder::Layer l = ieLayer;
- addConstantData("weights", ieWeights, l);
- if (ieBiases)
- addConstantData("biases", ieBiases, l);
-
- if (!padMode.empty())
- l.getParameters()["auto_pad"] = padMode == "VALID" ? std::string("valid") : std::string("same_upper");
-
- return Ptr<BackendNode>(new InfEngineBackendNode(l));
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs,
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) {
return group == 1;
}
-
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
- {
- if (kernel_size.size() == 3 && preferableTarget != DNN_TARGET_CPU) {
- return false;
- }
-
- if (std::accumulate(adjust_pads.begin(), adjust_pads.end(), 0, std::plus<size_t>()) > 0)
- {
- if (padMode.empty())
- {
- if (preferableTarget != DNN_TARGET_CPU && group != 1)
- {
- for (int i = 0; i < adjust_pads.size(); i++) {
- if (adjust_pads[i] && pads_begin[i])
- return false;
- }
- }
- for (int i = 0; i < adjust_pads.size(); i++) {
- if (pads_end[i] < adjust_pads[i])
- return false;
- }
- return true;
- }
- else if (padMode == "SAME")
- {
- for (int i = 0; i < adjust_pads.size(); i++) {
- if (kernel_size[i] < pads_begin[i] + 1 + adjust_pads[i])
- return false;
- }
- return true;
- }
- else if (padMode == "VALID")
- return false;
- }
-
- if (group != 1)
- {
- return preferableTarget == DNN_TARGET_CPU;
- }
- if (preferableTarget == DNN_TARGET_OPENCL || preferableTarget == DNN_TARGET_OPENCL_FP16)
- return std::accumulate(dilations.begin(), dilations.end(), 1, std::multiplies<size_t>()) == 1;
- return true;
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
#endif // HAVE_INF_ENGINE
{
return kernel_size.size() == 2 && (backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE);
return Ptr<BackendNode>();
}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &) CV_OVERRIDE
- {
- InferenceEngine::Layout layout = blobs[0].dims == 5? InferenceEngine::Layout::NCDHW :
- InferenceEngine::Layout::OIHW;
-
- auto ieWeights = wrapToInfEngineBlob(blobs[0], layout);
- if (fusedWeights)
- {
- ieWeights = InferenceEngine::make_shared_blob<float>({
- InferenceEngine::Precision::FP32,
- ieWeights->getTensorDesc().getDims(), layout
- });
- ieWeights->allocate();
-
- int inpCn = blobs[0].size[0];
- Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, inpCn);
- transpose(weightsMat, newWeights);
- }
-
- const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW or OIDHW layout
- const int group = numOutput / outGroupCn;
-
- InferenceEngine::Builder::DeconvolutionLayer ieLayer(name);
-
- ieLayer.setKernel(kernel_size);
- ieLayer.setStrides(strides);
- ieLayer.setDilation(dilations);
- ieLayer.setPaddingsBegin(pads_begin);
-
- if (padMode.empty())
- {
- std::vector<size_t> paddings_end;
- for (int i = 0; i < pads_end.size(); i++) {
- paddings_end.push_back(pads_end[i] - adjust_pads[i]);
- }
- ieLayer.setPaddingsEnd(paddings_end);
- }
- else if (padMode == "SAME")
- {
- std::vector<size_t> paddings_end;
- for (int i = 0; i < pads_begin.size(); i++) {
- paddings_end.push_back(kernel_size[i] - pads_begin[i] - 1 - adjust_pads[i]);
- }
- ieLayer.setPaddingsEnd(paddings_end);
- }
- ieLayer.setGroup((size_t)group);
- ieLayer.setOutDepth((size_t)numOutput);
-
- InferenceEngine::Builder::Layer l = ieLayer;
- addConstantData("weights", ieWeights, l);
- if (hasBias())
- addConstantData("biases", wrapToInfEngineBlob(biasesMat, {(size_t)numOutput}, InferenceEngine::Layout::C), l);
- return Ptr<BackendNode>(new InfEngineBackendNode(l));
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
-
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs,
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
- ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && !_locPredTransposed && _bboxesNormalized);
+ (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && !_locPredTransposed && _bboxesNormalized);
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
}
}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
- {
- InferenceEngine::Builder::DetectionOutputLayer ieLayer(name);
-
- ieLayer.setNumClasses(_numClasses);
- ieLayer.setShareLocation(_shareLocation);
- ieLayer.setBackgroudLabelId(_backgroundLabelId);
- ieLayer.setNMSThreshold(_nmsThreshold);
- ieLayer.setTopK(_topK > 0 ? _topK : _keepTopK);
- ieLayer.setKeepTopK(_keepTopK);
- ieLayer.setConfidenceThreshold(_confidenceThreshold);
- ieLayer.setVariantEncodedInTarget(_varianceEncodedInTarget);
- ieLayer.setCodeType("caffe.PriorBoxParameter." + _codeType);
- ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(3));
-
- InferenceEngine::Builder::Layer l = ieLayer;
- l.getParameters()["eta"] = std::string("1.0");
- l.getParameters()["clip"] = _clip;
-
- return Ptr<BackendNode>(new InfEngineBackendNode(l));
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
-
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
return Ptr<BackendNode>();
}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
- {
- InferenceEngine::Builder::Layer ieLayer = func.initInfEngineBuilderAPI();
- ieLayer.setName(this->name);
- return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
bool supportBackend(int backendId, int)
{
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
- return slope >= 0 || !INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2019R1);
-#endif
#ifdef HAVE_DNN_NGRAPH
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
}
#endif // HAVE_HALIDE
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
- {
- return InferenceEngine::Builder::ReLULayer("").setNegativeSlope(slope);
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
-
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
{
bool supportBackend(int backendId, int)
{
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
+#ifdef HAVE_INF_ENGINE
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ return true;
+#endif
+ return backendId == DNN_BACKEND_OPENCV ||
+ backendId == DNN_BACKEND_HALIDE;
}
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
}
#endif // HAVE_HALIDE
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
- {
- return InferenceEngine::Builder::ClampLayer("").setMinValue(minValue).setMaxValue(maxValue);
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
inline void setKernelParams(ocl::Kernel& kernel) const {}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
- {
- CV_Error(Error::StsNotImplemented, "");
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
bool supportBackend(int backendId, int)
{
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
+#ifdef HAVE_INF_ENGINE
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ return true;
+#endif
+ return backendId == DNN_BACKEND_OPENCV ||
+ backendId == DNN_BACKEND_HALIDE;
}
inline float calculate(float x) const
}
#endif // HAVE_HALIDE
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
- {
- return InferenceEngine::Builder::TanHLayer("");
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
-
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
{
bool supportBackend(int backendId, int)
{
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
+#ifdef HAVE_INF_ENGINE
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ return true;
+#endif
+ return backendId == DNN_BACKEND_OPENCV ||
+ backendId == DNN_BACKEND_HALIDE;
}
inline float calculate(float x) const
}
#endif // HAVE_HALIDE
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
- {
- return InferenceEngine::Builder::SigmoidLayer("");
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
bool supportBackend(int backendId, int)
{
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
+#ifdef HAVE_INF_ENGINE
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ return true;
+#endif
+ return backendId == DNN_BACKEND_OPENCV ||
+ backendId == DNN_BACKEND_HALIDE;
}
inline float calculate(float x) const
}
#endif // HAVE_HALIDE
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
- {
- return InferenceEngine::Builder::ELULayer("");
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
-
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
{
bool supportBackend(int backendId, int)
{
#ifdef HAVE_INF_ENGINE
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
- return !INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2019R1);
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ return true;
#endif
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
}
}
#endif // HAVE_HALIDE
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
- {
- return InferenceEngine::Builder::ReLULayer("").setNegativeSlope(-0.999999f);
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
bool supportBackend(int backendId, int targetId)
{
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
- return (targetId != DNN_TARGET_OPENCL && targetId != DNN_TARGET_OPENCL_FP16) || power == 1.0 || power == 0.5;
- else
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
+#ifdef HAVE_INF_ENGINE
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ return true;
+#endif
+ {
+ return backendId == DNN_BACKEND_OPENCV ||
+ backendId == DNN_BACKEND_HALIDE;
+ }
}
void finalize()
}
#endif // HAVE_HALIDE
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
- {
- return InferenceEngine::Builder::PowerLayer("").setPower(power)
- .setScale(scale)
- .setShift(shift);
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
bool supportBackend(int backendId, int)
{
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
+#ifdef HAVE_INF_ENGINE
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ return true;
+#endif
+ return backendId == DNN_BACKEND_OPENCV ||
+ backendId == DNN_BACKEND_HALIDE;
}
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
}
#endif // HAVE_HALIDE
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
- {
- InferenceEngine::Builder::Layer l = InferenceEngine::Builder::PReLULayer("");
- const size_t numChannels = scale.total();
- addConstantData("weights", wrapToInfEngineBlob(scale, {numChannels}, InferenceEngine::Layout::C), l);
- return l;
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
if (hasVecInput && ELTWISE_CHANNNELS_SAME)
return backendId == DNN_BACKEND_OPENCV;
+#ifdef HAVE_INF_ENGINE
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ return channelsMode == ELTWISE_CHANNNELS_SAME;
+#endif
+
return backendId == DNN_BACKEND_OPENCV ||
- backendId == DNN_BACKEND_HALIDE ||
- ((((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && (preferableTarget != DNN_TARGET_OPENCL || coeffs.empty()))
- || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && channelsMode == ELTWISE_CHANNNELS_SAME));
+ backendId == DNN_BACKEND_HALIDE;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
return Ptr<BackendNode>();
}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
- {
- InferenceEngine::Builder::EltwiseLayer ieLayer(name);
-
- ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(inputs.size()));
-
- if (op == SUM)
- ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::SUM);
- else if (op == PROD)
- ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::MUL);
- else if (op == DIV)
- ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::DIV);
- else if (op == MAX)
- ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::MAX);
- else
- CV_Error(Error::StsNotImplemented, "Unsupported eltwise operation");
-
- InferenceEngine::Builder::Layer l = ieLayer;
- if (!coeffs.empty())
- l.getParameters()["coeff"] = coeffs;
-
- return Ptr<BackendNode>(new InfEngineBackendNode(l));
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
-
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_OPENCV ||
- ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
+#ifdef HAVE_INF_ENGINE
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ return true;
+#endif
+ return backendId == DNN_BACKEND_OPENCV;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
}
}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
- {
- InferenceEngine::Builder::Layer ieLayer(name);
- ieLayer.setName(name);
- ieLayer.setType("Flatten");
- ieLayer.getParameters()["axis"] = (size_t)_startAxis;
- ieLayer.getParameters()["end_axis"] = _endAxis; // Do not cast to size_t because it might be negative.
- ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
- ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
- return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
-virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
- const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
-{
+ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
+ const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
+ {
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
std::vector<size_t> dims = ieInpNode->get_shape();
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
+#ifdef HAVE_INF_ENGINE
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ return axis == 1;
+#endif
+
return backendId == DNN_BACKEND_OPENCV ||
- (backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1) ||
- (((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && !blobs.empty()) ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && axis == 1);
+ (backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1);
}
virtual bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE
return Ptr<BackendNode>();
}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
- {
- InferenceEngine::Builder::FullyConnectedLayer ieLayer(name);
-
- const int outNum = blobs[0].size[0];
- ieLayer.setOutputNum(outNum);
-
- InferenceEngine::Builder::Layer l = ieLayer;
- addConstantData("weights", wrapToInfEngineBlob(blobs[0], {(size_t)blobs[0].size[0], (size_t)blobs[0].size[1], 1, 1}, InferenceEngine::Layout::OIHW), l);
- if (bias)
- addConstantData("biases", wrapToInfEngineBlob(blobs[1], {(size_t)outNum}, InferenceEngine::Layout::C), l);
-
- return Ptr<BackendNode>(new InfEngineBackendNode(l));
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
-
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) {
+#ifdef HAVE_INF_ENGINE
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return bias == (int)bias;
- }
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) {
- return bias == (int)bias;
- }
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
+#endif
+ return backendId == DNN_BACKEND_OPENCV ||
+ backendId == DNN_BACKEND_HALIDE;
}
#ifdef HAVE_OPENCL
#endif // HAVE_HALIDE
}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
- {
- float alphaSize = alpha;
- if (!normBySize)
- alphaSize *= (type == SPATIAL_NRM ? size*size : size);
-
- InferenceEngine::Builder::NormLayer ieLayer(name);
- ieLayer.setSize(size);
- ieLayer.setAlpha(alphaSize);
- ieLayer.setBeta(beta);
- ieLayer.setAcrossMaps(type == CHANNEL_NRM);
-
- InferenceEngine::Builder::Layer l = ieLayer;
- l.getParameters()["k"] = bias;
- return Ptr<BackendNode>(new InfEngineBackendNode(l));
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
- return !zeroDev && (preferableTarget != DNN_TARGET_MYRIAD || eps <= 1e-7f);
-#endif
-#ifdef HAVE_DNN_NGRAPH
+#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
}
}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
- {
- InferenceEngine::Builder::MVNLayer ieLayer(name);
- ieLayer.setAcrossChannels(acrossChannels);
- ieLayer.setNormalize(normVariance);
- ieLayer.setEpsilon(eps);
- return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+#ifdef HAVE_INF_ENGINE
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
if (pnorm != 2)
return false;
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && preferableTarget == DNN_TARGET_MYRIAD)
- return !acrossSpatial;
-
return startAxis == 1;
}
+#endif
return backendId == DNN_BACKEND_OPENCV;
}
}
}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
- {
- InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
- std::vector<size_t> dims = input->getDims();
- if (dims.size() == 4)
- {
- InferenceEngine::Builder::NormalizeLayer ieLayer(name);
-
- ieLayer.setChannelShared(false);
- ieLayer.setAcrossMaps(acrossSpatial);
- ieLayer.setEpsilon(epsilon);
-
- InferenceEngine::Builder::Layer l = ieLayer;
- const int numChannels = dims[1];
- InferenceEngine::Blob::Ptr weights;
- if (blobs.empty())
- {
- weights = InferenceEngine::make_shared_blob<float>({
- InferenceEngine::Precision::FP32,
- {(size_t)numChannels}, InferenceEngine::Layout::C
- });
- weights->allocate();
-
- Mat weightsMat = infEngineBlobToMat(weights).reshape(1, numChannels);
- Mat(numChannels, 1, CV_32F, Scalar(1)).copyTo(weightsMat);
- l.getParameters()["channel_shared"] = false;
- }
- else
- {
- CV_Assert(numChannels == blobs[0].total());
- weights = wrapToInfEngineBlob(blobs[0], {(size_t)numChannels}, InferenceEngine::Layout::C);
- l.getParameters()["channel_shared"] = blobs[0].total() == 1;
- }
- addConstantData("weights", weights, l);
- l.getParameters()["across_spatial"] = acrossSpatial;
- return Ptr<BackendNode>(new InfEngineBackendNode(l));
- }
- else
- {
- InferenceEngine::Builder::GRNLayer ieLayer(name);
- ieLayer.setBeta(epsilon);
-
- InferenceEngine::Builder::Layer l = ieLayer;
- l.getParameters()["bias"] = epsilon;
-
- return Ptr<BackendNode>(new InfEngineBackendNode(l));
- }
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
- if (INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) && preferableTarget == DNN_TARGET_MYRIAD)
+ bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD;
+ if (isMyriad)
return dstRanges.size() == 4 && paddings[0].first == 0 && paddings[0].second == 0;
return (dstRanges.size() <= 4 || !isArmComputePlugin());
return Ptr<BackendNode>();
}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
- {
- InferenceEngine::Builder::Layer ieLayer(name);
- ieLayer.setName(name);
- ieLayer.setType("Pad");
-
- std::vector<int> begins(paddings.size(), 0), ends(paddings.size(), 0);
- for (int i = 0; i < paddings.size(); ++i)
- {
- begins[i] = paddings[i].first;
- ends[i] = paddings[i].second;
- }
- ieLayer.getParameters()["pads_begin"] = begins;
- ieLayer.getParameters()["pads_end"] = ends;
- ieLayer.getParameters()["pad_mode"] = paddingType;
- if (paddingType == "constant")
- ieLayer.getParameters()["pad_value"] = paddingValue;
-
- ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
- ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
- return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
- }
-#endif
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && preferableTarget == DNN_TARGET_CPU)
- return _order.size() <= 4 || !isArmComputePlugin();
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ {
+ if (preferableTarget == DNN_TARGET_CPU)
+ return _order.size() <= 4 || !isArmComputePlugin();
+ return true;
+ }
#endif
- return backendId == DNN_BACKEND_OPENCV ||
- ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
+ return backendId == DNN_BACKEND_OPENCV;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
}
}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
- {
- InferenceEngine::Builder::PermuteLayer ieLayer(name);
- ieLayer.setOrder(_order);
- return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
- {
- if (computeMaxIdx)
- return false;
- if (kernel_size.size() == 3)
- return preferableTarget == DNN_TARGET_CPU;
- if (kernel_size.size() == 1)
- return false;
- if (preferableTarget == DNN_TARGET_MYRIAD) {
-#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
- if (type == MAX && (pads_begin[1] == 1 && pads_begin[0] == 1) && (strides[0] == 2 && strides[1] == 2)) {
- return !isMyriadX();
- }
-#endif
- return type == MAX || type == AVE;
- }
- else
- return type != STOCHASTIC && type != SUM;
- }
-#endif
+#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
-#ifdef HAVE_DNN_NGRAPH
return !computeMaxIdx && type != STOCHASTIC && kernel_size.size() > 1 && (kernel_size.size() != 3 || !isArmComputePlugin());
-#endif
}
- else if (backendId == DNN_BACKEND_OPENCV)
+#endif
+ if (backendId == DNN_BACKEND_OPENCV)
{
if (kernel_size.size() == 3)
return preferableTarget == DNN_TARGET_CPU;
return Ptr<BackendNode>();
}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
- {
- if (type == MAX || type == AVE)
- {
- InferenceEngine::Builder::PoolingLayer ieLayer(name);
-
- ieLayer.setKernel(kernel_size);
- ieLayer.setStrides(strides);
- ieLayer.setPaddingsBegin(pads_begin);
- ieLayer.setPaddingsEnd(pads_end);
-
- ieLayer.setPoolingType(type == MAX ?
- InferenceEngine::Builder::PoolingLayer::PoolingType::MAX :
- InferenceEngine::Builder::PoolingLayer::PoolingType::AVG);
- ieLayer.setRoundingType(ceilMode ?
- InferenceEngine::Builder::PoolingLayer::RoundingType::CEIL :
- InferenceEngine::Builder::PoolingLayer::RoundingType::FLOOR);
- ieLayer.setExcludePad(!avePoolPaddedArea);
-
- InferenceEngine::Builder::Layer l = ieLayer;
- if (!padMode.empty())
- l.getParameters()["auto_pad"] = padMode == "VALID" ? std::string("valid") : std::string("same_upper");
- return Ptr<BackendNode>(new InfEngineBackendNode(l));
- }
- else if (type == ROI)
- {
- InferenceEngine::Builder::ROIPoolingLayer ieLayer(name);
- ieLayer.setSpatialScale(spatialScale);
- ieLayer.setPooled({pooledSize.height, pooledSize.width});
- ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(2));
- return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
- }
- else if (type == PSROI)
- {
- InferenceEngine::Builder::PSROIPoolingLayer ieLayer(name);
- ieLayer.setSpatialScale(spatialScale);
- ieLayer.setOutputDim(psRoiOutChannels);
- ieLayer.setGroupSize(pooledSize.width);
- ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(2));
- return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
- }
- else
- CV_Error(Error::StsNotImplemented, "Unsupported pooling type");
- return Ptr<BackendNode>();
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
-
#ifdef HAVE_DNN_NGRAPH
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return _explicitSizes || _stepX == _stepY;
#endif
- return backendId == DNN_BACKEND_OPENCV ||
- (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && haveInfEngine() &&
- ( _explicitSizes || (_minSize.size() == 1 && _maxSize.size() <= 1)));
+ return backendId == DNN_BACKEND_OPENCV;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
}
}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
- {
- if (_explicitSizes)
- {
- InferenceEngine::Builder::PriorBoxClusteredLayer ieLayer(name);
- ieLayer.setSteps({_stepY, _stepX});
-
- CV_CheckEQ(_offsetsX.size(), (size_t)1, ""); CV_CheckEQ(_offsetsY.size(), (size_t)1, ""); CV_CheckEQ(_offsetsX[0], _offsetsY[0], "");
- ieLayer.setOffset(_offsetsX[0]);
-
- ieLayer.setClip(_clip);
- ieLayer.setFlip(false); // We already flipped aspect ratios.
-
- InferenceEngine::Builder::Layer l = ieLayer;
-
- CV_Assert_N(!_boxWidths.empty(), !_boxHeights.empty(), !_variance.empty());
- CV_Assert(_boxWidths.size() == _boxHeights.size());
- l.getParameters()["width"] = _boxWidths;
- l.getParameters()["height"] = _boxHeights;
- l.getParameters()["variance"] = _variance;
- return Ptr<BackendNode>(new InfEngineBackendNode(l));
- }
- else
- {
- InferenceEngine::Builder::PriorBoxLayer ieLayer(name);
-
- CV_Assert(!_explicitSizes);
- ieLayer.setMinSize(_minSize[0]);
- if (!_maxSize.empty())
- ieLayer.setMaxSize(_maxSize[0]);
-
- CV_CheckEQ(_offsetsX.size(), (size_t)1, ""); CV_CheckEQ(_offsetsY.size(), (size_t)1, ""); CV_CheckEQ(_offsetsX[0], _offsetsY[0], "");
- ieLayer.setOffset(_offsetsX[0]);
-
- ieLayer.setClip(_clip);
- ieLayer.setFlip(false); // We already flipped aspect ratios.
-
- InferenceEngine::Builder::Layer l = ieLayer;
- if (_stepX == _stepY)
- {
- l.getParameters()["step"] = _stepX;
- l.getParameters()["step_h"] = 0.0f;
- l.getParameters()["step_w"] = 0.0f;
- }
- else
- {
- l.getParameters()["step"] = 0.0f;
- l.getParameters()["step_h"] = _stepY;
- l.getParameters()["step_w"] = _stepX;
- }
- if (!_aspectRatios.empty())
- {
- l.getParameters()["aspect_ratio"] = _aspectRatios;
- }
- CV_Assert(!_variance.empty());
- l.getParameters()["variance"] = _variance;
- return Ptr<BackendNode>(new InfEngineBackendNode(l));
- }
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_OPENCV ||
- ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && preferableTarget != DNN_TARGET_MYRIAD);
+#ifdef HAVE_INF_ENGINE
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ {
+ bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD;
+ return !isMyriad;
+ }
+#endif
+ return backendId == DNN_BACKEND_OPENCV;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
layerOutputs[0].col(2).copyTo(dst);
}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
- {
- InferenceEngine::Builder::ProposalLayer ieLayer(name);
-
- ieLayer.setBaseSize(baseSize);
- ieLayer.setFeatStride(featStride);
- ieLayer.setMinSize(16);
- ieLayer.setNMSThresh(nmsThreshold);
- ieLayer.setPostNMSTopN(keepTopAfterNMS);
- ieLayer.setPreNMSTopN(keepTopBeforeNMS);
-
- std::vector<float> scalesVec(scales.size());
- for (int i = 0; i < scales.size(); ++i)
- scalesVec[i] = scales.get<float>(i);
- ieLayer.setScale(scalesVec);
-
- std::vector<float> ratiosVec(ratios.size());
- for (int i = 0; i < ratios.size(); ++i)
- ratiosVec[i] = ratios.get<float>(i);
- ieLayer.setRatio(ratiosVec);
-
- return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
-
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
+#ifdef HAVE_INF_ENGINE
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ return true;
+#endif
+ return backendId == DNN_BACKEND_OPENCV;
}
#ifdef HAVE_OPENCL
permute->forward(inputs, outputs, internals_arr);
}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
- {
- InferenceEngine::Builder::ReorgYoloLayer ieLayer(name);
- ieLayer.setStride(reorgStride);
- return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs,
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_OPENCV ||
- ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
+#ifdef HAVE_INF_ENGINE
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ return true;
+#endif
+ return backendId == DNN_BACKEND_OPENCV;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
}
}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
- {
- InferenceEngine::Builder::ReshapeLayer ieLayer(name);
- CV_Assert(outShapes.size() == 1);
- ieLayer.setDims(outShapes[0]);
- return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
return (interpolation == "nearest" && scaleWidth == scaleHeight) ||
(interpolation == "bilinear");
CV_Error(Error::StsNotImplemented, "Unknown interpolation: " + interpolation);
}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
- {
- InferenceEngine::Builder::Layer ieLayer(name);
- ieLayer.setName(name);
- if (interpolation == "nearest")
- {
- ieLayer.setType("Resample");
- ieLayer.getParameters()["type"] = std::string("caffe.ResampleParameter.NEAREST");
- ieLayer.getParameters()["antialias"] = false;
- if (scaleWidth != scaleHeight)
- CV_Error(Error::StsNotImplemented, "resample with sw != sh");
- ieLayer.getParameters()["factor"] = 1.0f / scaleWidth;
- }
- else if (interpolation == "bilinear")
- {
- ieLayer.setType("Interp");
- ieLayer.getParameters()["pad_beg"] = 0;
- ieLayer.getParameters()["pad_end"] = 0;
- ieLayer.getParameters()["align_corners"] = alignCorners;
- }
- else
- CV_Error(Error::StsNotImplemented, "Unsupported interpolation: " + interpolation);
- ieLayer.getParameters()["width"] = outWidth;
- ieLayer.getParameters()["height"] = outHeight;
- ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
- ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
- return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
-
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
- (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && axis == 1 && !blobs.empty()) ||
- (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && axis > 0);
+#ifdef HAVE_INF_ENGINE
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ return axis > 0;
+#endif
+ return backendId == DNN_BACKEND_OPENCV ||
+ backendId == DNN_BACKEND_HALIDE;
}
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
}
#endif // HAVE_HALIDE
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
- {
- InferenceEngine::Builder::Layer l = InferenceEngine::Builder::ScaleShiftLayer(name);
-
- CV_Assert(!blobs.empty());
- const size_t numChannels = blobs[0].total();
- if (hasWeights)
- {
- addConstantData("weights", wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C), l);
- }
- else
- {
- auto weights = InferenceEngine::make_shared_blob<float>({
- InferenceEngine::Precision::FP32, {(size_t)numChannels},
- InferenceEngine::Layout::C
- });
- weights->allocate();
- float* buf = weights->buffer().as<float*>();
- std::fill(buf, buf + numChannels, 1);
- addConstantData("weights", weights, l);
- }
- if (hasBias)
- addConstantData("biases", wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C), l);
- return Ptr<BackendNode>(new InfEngineBackendNode(l));
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
-
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
- return INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) &&
- sliceRanges.size() == 1 && sliceRanges[0].size() == 4 && !hasSteps;
-#endif
-#ifdef HAVE_DNN_NGRAPH
+#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return sliceRanges.size() == 1 && !hasSteps;
#endif
}
}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
-#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
- {
- CV_Assert_N(finalSliceRanges.size() == 1, inputs.size() <= 2);
-
- std::vector<size_t> axes, offsets, dims;
- int from, to, step;
- int numDims = finalSliceRanges[0].size();
- if (preferableTarget == DNN_TARGET_MYRIAD)
- {
- from = axis;
- to = numDims;
- step = 1;
- }
- else
- {
- from = numDims - 1;
- to = axis - 1;
- step = -1;
- }
- for (int i = from; i != to; i += step)
- {
- axes.push_back(i);
- offsets.push_back(finalSliceRanges[0][i].start);
- dims.push_back(finalSliceRanges[0][i].size());
- }
-
- InferenceEngine::Builder::Layer ieLayer(name);
- ieLayer.setName(name);
- ieLayer.setType("Crop");
- ieLayer.getParameters()["axis"] = axes;
- ieLayer.getParameters()["dim"] = dims;
- ieLayer.getParameters()["offset"] = offsets;
- ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(2));
- ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
-
- if (inputs.size() != 2)
- {
- std::vector<size_t> outShape(numDims);
- for (int i = 0; i < numDims; ++i)
- outShape[i] = finalSliceRanges[0][i].size();
-
- ieLayer.getInputPorts()[1].setParameter("type", "weights");
-
- auto shapeSource = InferenceEngine::make_shared_blob<float>({
- InferenceEngine::Precision::FP32, outShape,
- InferenceEngine::Layout::ANY
- });
- shapeSource->allocate();
- addConstantData("weights", shapeSource, ieLayer);
- }
- return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
- }
-#endif
-#endif
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
+#ifdef HAVE_INF_ENGINE
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ return true;
+#endif
return backendId == DNN_BACKEND_OPENCV ||
- (backendId == DNN_BACKEND_HALIDE && haveHalide() && axisRaw == 1) ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH ||
- (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && haveInfEngine() && !logSoftMax);
+ (backendId == DNN_BACKEND_HALIDE && haveHalide() && axisRaw == 1);
}
#ifdef HAVE_OPENCL
return Ptr<BackendNode>();
}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
- {
- InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
-
- InferenceEngine::Builder::SoftMaxLayer ieLayer(name);
- ieLayer.setAxis(normalize_axis(axisRaw, input->getDims().size()));
-
- return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
- }
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
#ifdef HAVE_INF_ENGINE
-static Backend parseInferenceEngineBackendType(const cv::String& backend)
-{
- CV_Assert(!backend.empty());
- if (backend == CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
- return DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
- if (backend == CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API)
- return DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019;
- CV_Error(Error::StsBadArg, cv::format("Unknown IE backend: %s", backend.c_str()));
-}
-static const char* dumpInferenceEngineBackendType(Backend backend)
-{
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
- return CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
- return CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API;
- CV_Error(Error::StsBadArg, cv::format("Invalid backend ID for IE: %d", backend));
-}
-Backend& getInferenceEngineBackendTypeParam()
-{
- static Backend param = parseInferenceEngineBackendType(
- utils::getConfigurationParameterString("OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE",
-#ifdef HAVE_DNN_NGRAPH
- CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH
-#elif defined(HAVE_DNN_IE_NN_BUILDER_2019)
- CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API
-#else
-#error "Build configuration error: nGraph or NN Builder API backend should be enabled"
-#endif
- )
- );
- return param;
-}
-
CV__DNN_EXPERIMENTAL_NS_BEGIN
cv::String getInferenceEngineBackendType()
{
- return dumpInferenceEngineBackendType(getInferenceEngineBackendTypeParam());
+ return "NGRAPH";
}
cv::String setInferenceEngineBackendType(const cv::String& newBackendType)
{
- Backend newBackend = parseInferenceEngineBackendType(newBackendType);
- Backend& param = getInferenceEngineBackendTypeParam();
- Backend old = param;
- param = newBackend;
- return dumpInferenceEngineBackendType(old);
+ if (newBackendType != "NGRAPH")
+ CV_Error(Error::StsNotImplemented, cv::format("DNN/IE: only NGRAPH backend is supported: %s", newBackendType.c_str()));
+ return newBackendType;
}
CV__DNN_EXPERIMENTAL_NS_END
}
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
-
-// For networks with input layer which has an empty name, IE generates a name id[some_number].
-// OpenCV lets users use an empty input name and to prevent unexpected naming,
-// we can use some predefined name.
-static std::string kDefaultInpLayerName = "empty_inp_layer_name";
-static std::string kOpenCVLayersType = "OpenCVLayer";
-
-static std::string shapesToStr(const std::vector<Mat>& mats)
-{
- std::ostringstream shapes;
- shapes << mats.size() << " ";
- for (const Mat& m : mats)
- {
- shapes << m.dims << " ";
- for (int i = 0; i < m.dims; ++i)
- shapes << m.size[i] << " ";
- }
- return shapes.str();
-}
-
-static void strToShapes(const std::string& str, std::vector<std::vector<size_t> >& shapes)
-{
- std::istringstream ss(str);
- int num, dims;
- ss >> num;
- shapes.resize(num);
- for (int i = 0; i < num; ++i)
- {
- ss >> dims;
- shapes[i].resize(dims);
- for (int j = 0; j < dims; ++j)
- ss >> shapes[i][j];
- }
-}
-
-class InfEngineCustomLayer : public InferenceEngine::ILayerExecImpl
-{
-public:
- explicit InfEngineCustomLayer(const InferenceEngine::CNNLayer& layer) : cnnLayer(layer)
- {
- std::istringstream iss(layer.GetParamAsString("impl"));
- size_t ptr;
- iss >> ptr;
- cvLayer = (Layer*)ptr;
-
- std::vector<std::vector<size_t> > shapes;
- strToShapes(layer.GetParamAsString("internals"), shapes);
- internals.resize(shapes.size());
- for (int i = 0; i < shapes.size(); ++i)
- internals[i].create(std::vector<int>(shapes[i].begin(), shapes[i].end()), CV_32F);
- }
-
- virtual InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr>& inputs,
- std::vector<InferenceEngine::Blob::Ptr>& outputs,
- InferenceEngine::ResponseDesc *resp) noexcept
- {
- std::vector<Mat> inpMats, outMats;
- infEngineBlobsToMats(inputs, inpMats);
- infEngineBlobsToMats(outputs, outMats);
-
- try
- {
- cvLayer->forward(inpMats, outMats, internals);
- return InferenceEngine::StatusCode::OK;
- }
- catch (...)
- {
- return InferenceEngine::StatusCode::GENERAL_ERROR;
- }
- }
-
- virtual InferenceEngine::StatusCode
- getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf,
- InferenceEngine::ResponseDesc* resp) noexcept
- {
- std::vector<InferenceEngine::DataConfig> inDataConfig;
- std::vector<InferenceEngine::DataConfig> outDataConfig;
- for (auto& it : cnnLayer.insData)
- {
- InferenceEngine::DataConfig conf;
- conf.desc = it.lock()->getTensorDesc();
- inDataConfig.push_back(conf);
- }
-
- for (auto& it : cnnLayer.outData)
- {
- InferenceEngine::DataConfig conf;
- conf.desc = it->getTensorDesc();
- outDataConfig.push_back(conf);
- }
-
- InferenceEngine::LayerConfig layerConfig;
- layerConfig.inConfs = inDataConfig;
- layerConfig.outConfs = outDataConfig;
-
- conf.push_back(layerConfig);
- return InferenceEngine::StatusCode::OK;
- }
-
- InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config,
- InferenceEngine::ResponseDesc *resp) noexcept
- {
- return InferenceEngine::StatusCode::OK;
- }
-
-private:
- InferenceEngine::CNNLayer cnnLayer;
- dnn::Layer* cvLayer;
- std::vector<Mat> internals;
-};
-
-class InfEngineCustomLayerShapeInfer : public InferenceEngine::IShapeInferImpl
-{
-public:
- InferenceEngine::StatusCode
- inferShapes(const std::vector<InferenceEngine::Blob::CPtr>& inBlobs,
- const std::map<std::string, std::string>& params,
- const std::map<std::string, InferenceEngine::Blob::Ptr>& blobs,
- std::vector<InferenceEngine::SizeVector>& outShapes,
- InferenceEngine::ResponseDesc* desc) noexcept override
- {
- strToShapes(params.at("outputs"), outShapes);
- return InferenceEngine::StatusCode::OK;
- }
-};
-
-class InfEngineCustomLayerFactory : public InferenceEngine::ILayerImplFactory {
-public:
- explicit InfEngineCustomLayerFactory(const InferenceEngine::CNNLayer* layer) : cnnLayer(*layer) {}
-
- InferenceEngine::StatusCode
- getImplementations(std::vector<InferenceEngine::ILayerImpl::Ptr>& impls,
- InferenceEngine::ResponseDesc* resp) noexcept override {
- impls.push_back(std::make_shared<InfEngineCustomLayer>(cnnLayer));
- return InferenceEngine::StatusCode::OK;
- }
-
-private:
- InferenceEngine::CNNLayer cnnLayer;
-};
-
-InferenceEngine::StatusCode InfEngineExtension::getFactoryFor(
- InferenceEngine::ILayerImplFactory*& factory,
- const InferenceEngine::CNNLayer* cnnLayer,
- InferenceEngine::ResponseDesc* resp
-) noexcept
-{
- if (cnnLayer->type != kOpenCVLayersType)
- return InferenceEngine::StatusCode::NOT_IMPLEMENTED;
- factory = new InfEngineCustomLayerFactory(cnnLayer);
- return InferenceEngine::StatusCode::OK;
-}
-
-InfEngineBackendNode::InfEngineBackendNode(const InferenceEngine::Builder::Layer& _layer)
- : BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019), layer(_layer) {}
-
- InfEngineBackendNode::InfEngineBackendNode(Ptr<Layer>& cvLayer_, std::vector<Mat*>& inputs,
- std::vector<Mat>& outputs,
- std::vector<Mat>& internals)
- : BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019), layer(cvLayer_->name),
- cvLayer(cvLayer_)
-{
- CV_Assert(!cvLayer->name.empty());
- layer.setName(cvLayer->name);
- layer.setType(kOpenCVLayersType);
- layer.getParameters()["impl"] = (size_t)cvLayer.get();
- layer.getParameters()["outputs"] = shapesToStr(outputs);
- layer.getParameters()["internals"] = shapesToStr(internals);
- layer.setInputPorts(std::vector<InferenceEngine::Port>(inputs.size()));
- layer.setOutputPorts(std::vector<InferenceEngine::Port>(outputs.size()));
-}
-
-static std::vector<Ptr<InfEngineBackendWrapper> >
-infEngineWrappers(const std::vector<Ptr<BackendWrapper> >& ptrs)
-{
- std::vector<Ptr<InfEngineBackendWrapper> > wrappers(ptrs.size());
- for (int i = 0; i < ptrs.size(); ++i)
- {
- CV_Assert(!ptrs[i].empty());
- wrappers[i] = ptrs[i].dynamicCast<InfEngineBackendWrapper>();
- CV_Assert(!wrappers[i].empty());
- }
- return wrappers;
-}
-
-InfEngineBackendNet::InfEngineBackendNet() : netBuilder("")
-{
- hasNetOwner = false;
- device_name = "CPU";
-}
-
-InfEngineBackendNet::InfEngineBackendNet(InferenceEngine::CNNNetwork& net) : netBuilder(""), cnn(net)
-{
- hasNetOwner = true;
- device_name = "CPU";
-}
-
-void InfEngineBackendNet::connect(const std::vector<Ptr<BackendWrapper> >& inputs,
- const std::vector<Ptr<BackendWrapper> >& outputs,
- const std::string& layerName)
-{
- std::vector<Ptr<InfEngineBackendWrapper> > inpWrappers = infEngineWrappers(inputs);
- std::map<std::string, int>::iterator it = layers.find(layerName);
- CV_Assert(it != layers.end());
-
- const int layerId = it->second;
- for (size_t i = 0; i < inpWrappers.size(); ++i)
- {
- const auto& inp = inpWrappers[i];
- const std::string& inpName = inp->dataPtr->getName();
-
- std::string inpLayerName = inpName;
- size_t inpPortId = inpName.rfind('.');
- if (inpPortId != std::string::npos)
- {
- std::string portIdStr = inpName.substr(inpPortId + 1);
- if (std::all_of(portIdStr.begin(), portIdStr.end(), ::isdigit))
- {
- inpLayerName = inpName.substr(0, inpPortId);
- inpPortId = atoi(portIdStr.c_str());
- }
- else
- inpPortId = 0;
- }
- else
- inpPortId = 0;
-
- int inpId;
- it = layers.find(inpLayerName);
- if (it == layers.end())
- {
- InferenceEngine::Builder::InputLayer inpLayer(!inpLayerName.empty() ? inpLayerName : kDefaultInpLayerName);
- std::vector<size_t> shape(inp->blob->getTensorDesc().getDims());
- inpLayer.setPort(InferenceEngine::Port(shape));
- inpId = netBuilder.addLayer(inpLayer);
-
- layers.insert({inpName, inpId});
- }
- else
- inpId = it->second;
-
- netBuilder.connect({(size_t)inpId, inpPortId}, {(size_t)layerId, i});
- unconnectedPorts.erase({inpId, inpPortId});
- }
- CV_Assert(!outputs.empty());
- for (int i = 0; i < outputs.size(); ++i)
- {
- InferenceEngine::DataPtr dataPtr = infEngineDataNode(outputs[i]);
- std::string outputName = outputs.size() > 1 ? (layerName + "." + std::to_string(i)) : layerName;
-#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
- dataPtr->name = outputName;
-#else
- dataPtr->setName(outputName);
-#endif
- }
-}
-
-void InfEngineBackendNet::init(Target targetId)
-{
- if (!hasNetOwner)
- {
- CV_Assert(!unconnectedPorts.empty());
- for (const auto& port : unconnectedPorts)
- {
- InferenceEngine::Builder::OutputLayer outLayer("myconv1");
-#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
- // Inference Engine determines network precision by ports.
- InferenceEngine::Precision p = (targetId == DNN_TARGET_MYRIAD ||
- targetId == DNN_TARGET_OPENCL_FP16) ?
- InferenceEngine::Precision::FP16 :
- InferenceEngine::Precision::FP32;
- outLayer.setPort(InferenceEngine::Port({}, p));
-#endif
- netBuilder.addLayer({InferenceEngine::PortInfo(port.first, port.second)}, outLayer);
- }
- netBuilder.getContext().addShapeInferImpl(kOpenCVLayersType,
- std::make_shared<InfEngineCustomLayerShapeInfer>());
- cnn = InferenceEngine::CNNNetwork(InferenceEngine::Builder::convertToICNNNetwork(netBuilder.build()));
- }
-
- switch (targetId)
- {
- case DNN_TARGET_CPU:
- device_name = "CPU";
- break;
- case DNN_TARGET_OPENCL:
- case DNN_TARGET_OPENCL_FP16:
- device_name = "GPU";
- break;
- case DNN_TARGET_MYRIAD:
- device_name = "MYRIAD";
- break;
- case DNN_TARGET_FPGA:
- device_name = "FPGA";
- break;
- default:
- CV_Error(Error::StsNotImplemented, "Unknown target");
- };
-
- for (const auto& name : requestedOutputs)
- {
- cnn.addOutput(name);
- }
-
- for (const auto& it : cnn.getInputsInfo())
- {
- const std::string& name = it.first;
- auto blobIt = allBlobs.find(name);
- CV_Assert(blobIt != allBlobs.end());
- it.second->setPrecision(blobIt->second->getTensorDesc().getPrecision());
- }
- for (const auto& it : cnn.getOutputsInfo())
- {
- const std::string& name = it.first;
- auto blobIt = allBlobs.find(name);
- CV_Assert(blobIt != allBlobs.end());
- it.second->setPrecision(blobIt->second->getTensorDesc().getPrecision()); // Should be always FP32
- }
-
- initPlugin(cnn);
-}
-
-void InfEngineBackendNet::addLayer(InferenceEngine::Builder::Layer& layer)
-{
-#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
- // Add weights to network and connect them after input blobs.
- std::map<std::string, InferenceEngine::Parameter>& params = layer.getParameters();
- std::vector<int> blobsIds;
- std::vector<int> portIds;
- for (const std::string& name : {"weights", "biases"})
- {
- bool asInput = false;
- int portId = 0;
- for (int i = 0; i < layer.getInputPorts().size(); ++i)
- {
- const auto& port = layer.getInputPorts()[i];
- auto it = port.getParameters().find("type");
- if (it != port.getParameters().end() && it->second == name)
- {
- portId = i;
- asInput = true;
- break;
- }
- }
-
- if (!asInput)
- continue;
-
- auto it = params.find(name);
- if (it != params.end())
- {
- InferenceEngine::Blob::Ptr blob = it->second.as<InferenceEngine::Blob::Ptr>();
- params.erase(it);
- int blobId = netBuilder.addLayer(InferenceEngine::Builder::ConstLayer(name).setData(blob));
- blobsIds.push_back(blobId);
- portIds.push_back(portId);
- }
- }
-#endif
-
- int id = netBuilder.addLayer(layer);
- const std::string& layerName = layer.getName();
-
- CV_Assert(layers.insert({layerName, id}).second);
- for (int i = 0; i < layer.getOutputPorts().size(); ++i)
- unconnectedPorts.insert({id, i});
-
-#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
- // By default, all the weights are connected to last ports ids.
- for (int i = 0; i < blobsIds.size(); ++i)
- {
- netBuilder.connect((size_t)blobsIds[i], {(size_t)id, (size_t)portIds[i]});
- }
-#endif
-}
-
-void InfEngineBackendNet::addOutput(const std::string& name)
-{
- requestedOutputs.push_back(name);
-}
-
-static InferenceEngine::Layout estimateLayout(const Mat& m)
-{
- if (m.dims == 4)
- return InferenceEngine::Layout::NCHW;
- else if (m.dims == 2)
- return InferenceEngine::Layout::NC;
- else
- return InferenceEngine::Layout::ANY;
-}
-
-static InferenceEngine::DataPtr wrapToInfEngineDataNode(const Mat& m, const std::string& name = "")
-{
- std::vector<size_t> shape = getShape<size_t>(m);
- if (m.type() == CV_32F)
- return InferenceEngine::DataPtr(new InferenceEngine::Data(name,
- {InferenceEngine::Precision::FP32, shape, estimateLayout(m)}));
- else if (m.type() == CV_8U)
- return InferenceEngine::DataPtr(new InferenceEngine::Data(name,
- {InferenceEngine::Precision::U8, shape, estimateLayout(m)}));
- else
- CV_Error(Error::StsNotImplemented, format("Unsupported data type %d", m.type()));
-}
-
-InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector<size_t>& shape,
- InferenceEngine::Layout layout)
-{
- if (m.type() == CV_32F)
- return InferenceEngine::make_shared_blob<float>(
- {InferenceEngine::Precision::FP32, shape, layout}, (float*)m.data);
- else if (m.type() == CV_8U)
- return InferenceEngine::make_shared_blob<uint8_t>(
- {InferenceEngine::Precision::U8, shape, layout}, (uint8_t*)m.data);
- else
- CV_Error(Error::StsNotImplemented, format("Unsupported data type %d", m.type()));
-}
-
-InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, InferenceEngine::Layout layout)
-{
- std::vector<size_t> shape = getShape<size_t>(m);
- return wrapToInfEngineBlob(m, shape, layout);
-}
-
-InferenceEngine::Blob::Ptr cloneBlob(const InferenceEngine::Blob::Ptr& blob)
-{
- InferenceEngine::Blob::Ptr copy;
- auto description = blob->getTensorDesc();
- InferenceEngine::Precision precision = description.getPrecision();
- if (precision == InferenceEngine::Precision::FP32)
- {
- copy = InferenceEngine::make_shared_blob<float>(description);
- }
- else if (precision == InferenceEngine::Precision::U8)
- {
- copy = InferenceEngine::make_shared_blob<uint8_t>(description);
- }
- else
- CV_Error(Error::StsNotImplemented, "Unsupported blob precision");
- copy->allocate();
- return copy;
-}
-
-InferenceEngine::DataPtr infEngineDataNode(const Ptr<BackendWrapper>& ptr)
-{
- CV_Assert(!ptr.empty());
- Ptr<InfEngineBackendWrapper> p = ptr.dynamicCast<InfEngineBackendWrapper>();
- CV_Assert(!p.empty());
- return p->dataPtr;
-}
-
-InfEngineBackendWrapper::InfEngineBackendWrapper(int targetId, const cv::Mat& m)
- : BackendWrapper(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, targetId)
-{
- dataPtr = wrapToInfEngineDataNode(m);
- blob = wrapToInfEngineBlob(m, estimateLayout(m));
-}
-
-InfEngineBackendWrapper::InfEngineBackendWrapper(Ptr<BackendWrapper> wrapper)
- : BackendWrapper(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, wrapper->targetId)
-{
- Ptr<InfEngineBackendWrapper> ieWrapper = wrapper.dynamicCast<InfEngineBackendWrapper>();
- CV_Assert(!ieWrapper.empty());
- InferenceEngine::DataPtr srcData = ieWrapper->dataPtr;
-
- dataPtr = InferenceEngine::DataPtr(new InferenceEngine::Data(srcData->getName(), srcData->getTensorDesc()));
- blob = ieWrapper->blob;
-}
-
-Ptr<BackendWrapper> InfEngineBackendWrapper::create(Ptr<BackendWrapper> wrapper)
-{
- return Ptr<BackendWrapper>(new InfEngineBackendWrapper(wrapper));
-}
-
-InfEngineBackendWrapper::~InfEngineBackendWrapper()
-{
-
-}
-
-void InfEngineBackendWrapper::copyToHost()
-{
-
-}
-
-void InfEngineBackendWrapper::setHostDirty()
-{
-
-}
-
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
-
-#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
-static std::map<std::string, InferenceEngine::InferenceEnginePluginPtr>& getSharedPlugins()
-{
- static std::map<std::string, InferenceEngine::InferenceEnginePluginPtr> sharedPlugins;
- return sharedPlugins;
-}
-#else
static bool init_IE_plugins()
{
// load and hold IE plugins
: create_IE_Core_instance(id);
return core;
}
-#endif
+
static bool detectArmPlugin_()
{
static bool detectMyriadX_()
{
AutoLock lock(getInitializationMutex());
-#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R3)
+
// Lightweight detection
InferenceEngine::Core& ie = getCore("MYRIAD");
const std::vector<std::string> devices = ie.GetAvailableDevices();
}
}
return false;
-#else
- InferenceEngine::Builder::Network builder("");
- InferenceEngine::idx_t inpId = builder.addLayer(
- InferenceEngine::Builder::InputLayer().setPort(InferenceEngine::Port({1})));
-
-#if INF_ENGINE_RELEASE <= 2018050000
- InferenceEngine::idx_t clampId;
- {
- InferenceEngine::Builder::Layer l = InferenceEngine::Builder::ClampLayer();
- auto& blobs = l.getConstantData();
- auto blob = InferenceEngine::make_shared_blob<int16_t>(
- InferenceEngine::Precision::FP16,
- InferenceEngine::Layout::C, {1});
- blob->allocate();
- blobs[""] = blob;
- clampId = builder.addLayer({inpId}, l);
- }
- builder.addLayer({InferenceEngine::PortInfo(clampId)}, InferenceEngine::Builder::OutputLayer());
-#else
-
- InferenceEngine::idx_t clampId = builder.addLayer({inpId}, InferenceEngine::Builder::ClampLayer());
- builder.addLayer({InferenceEngine::PortInfo(clampId)},
- InferenceEngine::Builder::OutputLayer().setPort(InferenceEngine::Port({},
- InferenceEngine::Precision::FP16)));
-#endif
-
- InferenceEngine::CNNNetwork cnn = InferenceEngine::CNNNetwork(
- InferenceEngine::Builder::convertToICNNNetwork(builder.build()));
-
-#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
- InferenceEngine::InferenceEnginePluginPtr enginePtr;
- {
- auto& sharedPlugins = getSharedPlugins();
- auto pluginIt = sharedPlugins.find("MYRIAD");
- if (pluginIt != sharedPlugins.end()) {
- enginePtr = pluginIt->second;
- } else {
- auto dispatcher = InferenceEngine::PluginDispatcher({""});
- enginePtr = dispatcher.getPluginByDevice("MYRIAD");
- sharedPlugins["MYRIAD"] = enginePtr;
- }
- }
- auto plugin = InferenceEngine::InferencePlugin(enginePtr);
- try
- {
- auto netExec = plugin.LoadNetwork(cnn, {{"VPU_PLATFORM", "VPU_2480"}});
-#else
- try
- {
-#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R3)
- auto netExec = getCore("MYRIAD").LoadNetwork(cnn, "MYRIAD", {{"VPU_PLATFORM", "VPU_2480"}});
-#else
- auto netExec = getCore("MYRIAD").LoadNetwork(cnn, "MYRIAD", {{"VPU_MYRIAD_PLATFORM", "VPU_MYRIAD_2480"}});
-#endif
-#endif
- auto infRequest = netExec.CreateInferRequest();
- } catch(...) {
- return false;
- }
- return true;
-#endif
}
#endif // !defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT)
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
-
-void InfEngineBackendNet::initPlugin(InferenceEngine::CNNNetwork& net)
-{
- CV_Assert(!isInitialized());
-
- try
- {
- AutoLock lock(getInitializationMutex());
-#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
- auto& sharedPlugins = getSharedPlugins();
- auto pluginIt = sharedPlugins.find(device_name);
- if (pluginIt != sharedPlugins.end())
- {
- enginePtr = pluginIt->second;
- }
- else
-#else
- InferenceEngine::Core& ie = getCore(device_name);
-#endif
- {
-#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
- auto dispatcher = InferenceEngine::PluginDispatcher({""});
- if (device_name == "FPGA")
- enginePtr = dispatcher.getPluginByDevice("HETERO:FPGA,CPU");
- else
- enginePtr = dispatcher.getPluginByDevice(device_name);
- sharedPlugins[device_name] = enginePtr;
-#else
- isInit = true;
-#endif
- std::vector<std::string> candidates;
- std::string param_pluginPath = utils::getConfigurationParameterString("OPENCV_DNN_IE_EXTRA_PLUGIN_PATH", "");
- if (!param_pluginPath.empty())
- {
- candidates.push_back(param_pluginPath);
- }
-#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R3)
- if (device_name == "CPU" || device_name == "FPGA")
- {
- std::string suffixes[] = {"_avx2", "_sse4", ""};
- bool haveFeature[] = {
- checkHardwareSupport(CPU_AVX2),
- checkHardwareSupport(CPU_SSE4_2),
- true
- };
- for (int i = 0; i < 3; ++i)
- {
- if (!haveFeature[i])
- continue;
-#ifdef _WIN32
- candidates.push_back("cpu_extension" + suffixes[i] + ".dll");
-#elif defined(__APPLE__)
- candidates.push_back("libcpu_extension" + suffixes[i] + ".so"); // built as loadable module
- candidates.push_back("libcpu_extension" + suffixes[i] + ".dylib"); // built as shared library
-#else
- candidates.push_back("libcpu_extension" + suffixes[i] + ".so");
-#endif // _WIN32
- }
- }
-#endif
- bool found = false;
- for (size_t i = 0; i != candidates.size(); ++i)
- {
- const std::string& libName = candidates[i];
- try
- {
- InferenceEngine::IExtensionPtr extension =
- InferenceEngine::make_so_pointer<InferenceEngine::IExtension>(libName);
-
-#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
- enginePtr->AddExtension(extension, 0);
-#else
- ie.AddExtension(extension, "CPU");
-#endif
- CV_LOG_INFO(NULL, "DNN-IE: Loaded extension plugin: " << libName);
- found = true;
- break;
- }
- catch(...) {}
- }
- if (!found && !candidates.empty())
- {
- CV_LOG_WARNING(NULL, "DNN-IE: Can't load extension plugin (extra layers for some networks). Specify path via OPENCV_DNN_IE_EXTRA_PLUGIN_PATH parameter");
- }
- // Some of networks can work without a library of extra layers.
-#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2019R1)
- // OpenCV fallbacks as extensions.
- try
- {
- ie.AddExtension(std::make_shared<InfEngineExtension>(), "CPU");
- }
- catch(const std::exception& e)
- {
- CV_LOG_INFO(NULL, "DNN-IE: Can't register OpenCV custom layers extension: " << e.what());
- }
-#endif
- // Limit the number of CPU threads.
-#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
-#ifndef _WIN32
- enginePtr->SetConfig({{
- InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, format("%d", getNumThreads()),
- }}, 0);
-#endif // _WIN32
-#else
- if (device_name == "CPU")
- ie.SetConfig({{
- InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, format("%d", getNumThreads()),
- }}, device_name);
-#endif
- }
-#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
- plugin = InferenceEngine::InferencePlugin(enginePtr);
- netExec = plugin.LoadNetwork(net, {});
-#else
- bool isHetero = false;
- if (device_name != "CPU")
- {
- isHetero = device_name == "FPGA";
- for (auto& layer : net)
- {
- if (layer->type == kOpenCVLayersType)
- {
- isHetero = true;
-#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2019R3)
- // Not sure about lower versions but in 2019R3 we do not need this
- layer->affinity = "CPU";
- }
- else
- {
- layer->affinity = device_name;
-#endif
- }
- }
- }
- if (isHetero)
- netExec = ie.LoadNetwork(net, "HETERO:" + device_name + ",CPU");
- else
- netExec = ie.LoadNetwork(net, device_name);
-#endif
- }
- catch (const std::exception& ex)
- {
- CV_Error(Error::StsError, format("Failed to initialize Inference Engine backend (device = %s): %s", device_name.c_str(), ex.what()));
- }
-}
-
-bool InfEngineBackendNet::isInitialized()
-{
-#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
- return (bool)enginePtr;
-#else
- return isInit;
-#endif
-}
-
-void InfEngineBackendNet::reset()
-{
- allBlobs.clear();
- infRequests.clear();
- isInit = false;
-}
-
-void InfEngineBackendNet::addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& ptrs)
-{
- auto wrappers = infEngineWrappers(ptrs);
- for (const auto& wrapper : wrappers)
- {
- std::string name = wrapper->dataPtr->getName();
- name = name.empty() ? kDefaultInpLayerName : name;
- allBlobs.insert({name, wrapper->blob});
- }
-}
-
-void InfEngineBackendNet::InfEngineReqWrapper::makePromises(const std::vector<Ptr<BackendWrapper> >& outsWrappers)
-{
- auto outs = infEngineWrappers(outsWrappers);
- outProms.clear();
- outProms.resize(outs.size());
- outsNames.resize(outs.size());
- for (int i = 0; i < outs.size(); ++i)
- {
- outs[i]->futureMat = outProms[i].getArrayResult();
- outsNames[i] = outs[i]->dataPtr->getName();
- }
-}
-
-void InfEngineBackendNet::forward(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
- bool isAsync)
-{
- CV_LOG_DEBUG(NULL, "InfEngineBackendNet::forward(" << (isAsync ? "async" : "sync") << ")");
- // Look for finished requests.
- Ptr<InfEngineReqWrapper> reqWrapper;
- for (auto& wrapper : infRequests)
- {
- if (wrapper->isReady)
- {
- reqWrapper = wrapper;
- break;
- }
- }
- if (reqWrapper.empty())
- {
- reqWrapper = Ptr<InfEngineReqWrapper>(new InfEngineReqWrapper());
- try
- {
- reqWrapper->req = netExec.CreateInferRequest();
- }
- catch (const std::exception& ex)
- {
- CV_Error(Error::StsAssert, format("Failed to initialize Inference Engine backend: %s", ex.what()));
- }
- infRequests.push_back(reqWrapper);
-
- InferenceEngine::BlobMap inpBlobs, outBlobs;
- for (const auto& it : cnn.getInputsInfo())
- {
- const std::string& name = it.first;
- auto blobIt = allBlobs.find(name);
- CV_Assert(blobIt != allBlobs.end());
- inpBlobs[name] = isAsync ? cloneBlob(blobIt->second) : blobIt->second;
- }
- for (const auto& it : cnn.getOutputsInfo())
- {
- const std::string& name = it.first;
- auto blobIt = allBlobs.find(name);
- CV_Assert(blobIt != allBlobs.end());
- outBlobs[name] = isAsync ? cloneBlob(blobIt->second) : blobIt->second;
- }
- reqWrapper->req.SetInput(inpBlobs);
- reqWrapper->req.SetOutput(outBlobs);
-
- InferenceEngine::IInferRequest::Ptr infRequestPtr = reqWrapper->req;
- infRequestPtr->SetUserData(reqWrapper.get(), 0);
-
- infRequestPtr->SetCompletionCallback(
- [](InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode status)
- {
- CV_LOG_DEBUG(NULL, "DNN(IE): completionCallback(" << (int)status << ")");
-
- InfEngineReqWrapper* wrapper;
- request->GetUserData((void**)&wrapper, 0);
- CV_Assert(wrapper && "Internal error");
-
- size_t processedOutputs = 0;
- try
- {
- for (; processedOutputs < wrapper->outProms.size(); ++processedOutputs)
- {
- const std::string& name = wrapper->outsNames[processedOutputs];
- Mat m = infEngineBlobToMat(wrapper->req.GetBlob(name));
-
- try
- {
- CV_Assert(status == InferenceEngine::StatusCode::OK);
- wrapper->outProms[processedOutputs].setValue(m.clone());
- }
- catch (...)
- {
- try {
- wrapper->outProms[processedOutputs].setException(std::current_exception());
- } catch(...) {
- CV_LOG_ERROR(NULL, "DNN: Exception occurred during async inference exception propagation");
- }
- }
- }
- }
- catch (...)
- {
- std::exception_ptr e = std::current_exception();
- for (; processedOutputs < wrapper->outProms.size(); ++processedOutputs)
- {
- try {
- wrapper->outProms[processedOutputs].setException(e);
- } catch(...) {
- CV_LOG_ERROR(NULL, "DNN: Exception occurred during async inference exception propagation");
- }
- }
- }
- wrapper->isReady = true;
- }
- );
- }
- if (isAsync)
- {
- // Copy actual data to infer request's input blobs.
- for (const auto& it : cnn.getInputsInfo())
- {
- const std::string& name = it.first;
- auto blobIt = allBlobs.find(name);
- Mat srcMat = infEngineBlobToMat(blobIt->second);
- Mat dstMat = infEngineBlobToMat(reqWrapper->req.GetBlob(name));
- srcMat.copyTo(dstMat);
- }
-
- // Set promises to output blobs wrappers.
- reqWrapper->makePromises(outBlobsWrappers);
-
- reqWrapper->isReady = false;
- reqWrapper->req.StartAsync();
- }
- else
- {
- reqWrapper->req.Infer();
- }
-}
-
-bool InfEngineBackendLayer::getMemoryShapes(const std::vector<MatShape> &inputs,
- const int requiredOutputs,
- std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
-{
- InferenceEngine::ICNNNetwork::InputShapes inShapes = t_net.getInputShapes();
- InferenceEngine::ICNNNetwork::InputShapes::iterator itr;
- bool equal_flag = true;
- size_t i = 0;
- for (itr = inShapes.begin(); itr != inShapes.end(); ++itr)
- {
- InferenceEngine::SizeVector currentInShape(inputs[i].begin(), inputs[i].end());
- if (itr->second != currentInShape)
- {
- itr->second = currentInShape;
- equal_flag = false;
- }
- i++;
- }
-
- if (!equal_flag)
- {
- InferenceEngine::CNNNetwork curr_t_net(t_net);
- curr_t_net.reshape(inShapes);
- }
- std::vector<size_t> dims = t_net.getOutputsInfo()[name]->getDims();
- outputs.push_back(MatShape(dims.begin(), dims.end()));
- return false;
-}
-
-bool InfEngineBackendLayer::supportBackend(int backendId)
-{
- CV_LOG_DEBUG(NULL, "InfEngineBackendLayer::supportBackend(" << backendId << ")");
- return backendId == DNN_BACKEND_DEFAULT ||
- (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019);
-}
-
-void InfEngineBackendLayer::forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
- OutputArrayOfArrays internals)
-{
- CV_Error(Error::StsInternal, "Choose Inference Engine as a preferable backend.");
-}
-
-InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob)
-{
- auto halfs = InferenceEngine::make_shared_blob<int16_t>({
- InferenceEngine::Precision::FP16, blob->getTensorDesc().getDims(),
- blob->getTensorDesc().getLayout()
- });
- halfs->allocate();
- Mat floatsData(1, blob->size(), CV_32F, blob->buffer());
- Mat halfsData(1, blob->size(), CV_16SC1, halfs->buffer());
- convertFp16(floatsData, halfsData);
- return halfs;
-}
-
-void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data,
- InferenceEngine::Builder::Layer& l)
-{
-#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
- l.getParameters()[name] = data;
-#else
- l.addConstantData(name, data);
-#endif
-}
-
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
-
#endif // HAVE_INF_ENGINE
-bool haveInfEngine()
-{
-#ifdef HAVE_INF_ENGINE
- return true;
-#else
- return false;
-#endif // HAVE_INF_ENGINE
-}
-
-void forwardInfEngine(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
- Ptr<BackendNode>& node, bool isAsync)
-{
- CV_Assert(haveInfEngine());
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
- CV_Assert(!node.empty());
- Ptr<InfEngineBackendNode> ieNode = node.dynamicCast<InfEngineBackendNode>();
- CV_Assert(!ieNode.empty());
- ieNode->net->forward(outBlobsWrappers, isAsync);
-#else
- CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support");
-#endif // HAVE_INF_ENGINE
-}
CV__DNN_EXPERIMENTAL_NS_BEGIN
void resetMyriadDevice()
{
#ifdef HAVE_INF_ENGINE
+ CV_LOG_INFO(NULL, "DNN: Unregistering both 'MYRIAD' and 'HETERO:MYRIAD,CPU' plugins");
+
AutoLock lock(getInitializationMutex());
-#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
- getSharedPlugins().erase("MYRIAD");
-#else
- // Unregister both "MYRIAD" and "HETERO:MYRIAD,CPU" plugins
+
InferenceEngine::Core& ie = getCore("MYRIAD");
try
{
ie.UnregisterPlugin("HETERO");
}
catch (...) {}
-#endif
#endif // HAVE_INF_ENGINE
}
#pragma GCC diagnostic ignored "-Wsuggest-override"
#endif
-#if defined(HAVE_DNN_IE_NN_BUILDER_2019) || INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2020_4)
-//#define INFERENCE_ENGINE_DEPRECATED // turn off deprecation warnings from IE
-//there is no way to suppress warnings from IE only at this moment, so we are forced to suppress warnings globally
-#if defined(__GNUC__)
-#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
-#endif
-#ifdef _MSC_VER
-#pragma warning(disable: 4996) // was declared deprecated
-#endif
-#endif
-
-#if defined(__GNUC__) && INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_1)
-#pragma GCC visibility push(default)
-#endif
-
#include <inference_engine.hpp>
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
-#include <ie_builders.hpp>
-#endif
-
-#if defined(__GNUC__) && INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_1)
-#pragma GCC visibility pop
-#endif
-
#if defined(__GNUC__) && __GNUC__ >= 5
//#pragma GCC diagnostic pop
#endif
#endif // HAVE_INF_ENGINE
+#define CV_ERROR_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 do { CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support (legacy API is not supported anymore)"); } while (0)
+
namespace cv { namespace dnn {
#ifdef HAVE_INF_ENGINE
void infEngineBlobsToMats(const std::vector<InferenceEngine::Blob::Ptr>& blobs,
std::vector<Mat>& mats);
-#ifdef HAVE_DNN_IE_NN_BUILDER_2019
-
-class InfEngineBackendNet
-{
-public:
- InfEngineBackendNet();
-
- InfEngineBackendNet(InferenceEngine::CNNNetwork& net);
-
- void addLayer(InferenceEngine::Builder::Layer& layer);
-
- void addOutput(const std::string& name);
-
- void connect(const std::vector<Ptr<BackendWrapper> >& inputs,
- const std::vector<Ptr<BackendWrapper> >& outputs,
- const std::string& layerName);
-
- bool isInitialized();
-
- void init(Target targetId);
-
- void forward(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
- bool isAsync);
-
- void initPlugin(InferenceEngine::CNNNetwork& net);
-
- void addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& ptrs);
-
- void reset();
-
-private:
- InferenceEngine::Builder::Network netBuilder;
-
- InferenceEngine::ExecutableNetwork netExec;
- InferenceEngine::BlobMap allBlobs;
- std::string device_name;
-#if INF_ENGINE_VER_MAJOR_LE(2019010000)
- InferenceEngine::InferenceEnginePluginPtr enginePtr;
- InferenceEngine::InferencePlugin plugin;
-#else
- bool isInit = false;
-#endif
-
- struct InfEngineReqWrapper
- {
- InfEngineReqWrapper() : isReady(true) {}
-
- void makePromises(const std::vector<Ptr<BackendWrapper> >& outs);
-
- InferenceEngine::InferRequest req;
- std::vector<cv::AsyncPromise> outProms;
- std::vector<std::string> outsNames;
- bool isReady;
- };
-
- std::vector<Ptr<InfEngineReqWrapper> > infRequests;
-
- InferenceEngine::CNNNetwork cnn;
- bool hasNetOwner;
-
- std::map<std::string, int> layers;
- std::vector<std::string> requestedOutputs;
-
- std::set<std::pair<int, int> > unconnectedPorts;
-};
-
-class InfEngineBackendNode : public BackendNode
-{
-public:
- InfEngineBackendNode(const InferenceEngine::Builder::Layer& layer);
-
- InfEngineBackendNode(Ptr<Layer>& layer, std::vector<Mat*>& inputs,
- std::vector<Mat>& outputs, std::vector<Mat>& internals);
-
- void connect(std::vector<Ptr<BackendWrapper> >& inputs,
- std::vector<Ptr<BackendWrapper> >& outputs);
-
- // Inference Engine network object that allows to obtain the outputs of this layer.
- InferenceEngine::Builder::Layer layer;
- Ptr<InfEngineBackendNet> net;
- // CPU fallback in case of unsupported Inference Engine layer.
- Ptr<dnn::Layer> cvLayer;
-};
-
-class InfEngineBackendWrapper : public BackendWrapper
-{
-public:
- InfEngineBackendWrapper(int targetId, const Mat& m);
-
- InfEngineBackendWrapper(Ptr<BackendWrapper> wrapper);
-
- ~InfEngineBackendWrapper();
-
- static Ptr<BackendWrapper> create(Ptr<BackendWrapper> wrapper);
-
- virtual void copyToHost() CV_OVERRIDE;
-
- virtual void setHostDirty() CV_OVERRIDE;
-
- InferenceEngine::DataPtr dataPtr;
- InferenceEngine::Blob::Ptr blob;
- AsyncArray futureMat;
-};
-
-InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, InferenceEngine::Layout layout = InferenceEngine::Layout::ANY);
-
-InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector<size_t>& shape, InferenceEngine::Layout layout);
-
-InferenceEngine::DataPtr infEngineDataNode(const Ptr<BackendWrapper>& ptr);
-
-// Convert Inference Engine blob with FP32 precision to FP16 precision.
-// Allocates memory for a new blob.
-InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob);
-
-void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data, InferenceEngine::Builder::Layer& l);
-
-// This is a fake class to run networks from Model Optimizer. Objects of that
-// class simulate responses of layers are imported by OpenCV and supported by
-// Inference Engine. The main difference is that they do not perform forward pass.
-class InfEngineBackendLayer : public Layer
-{
-public:
- InfEngineBackendLayer(const InferenceEngine::CNNNetwork &t_net_) : t_net(t_net_) {};
-
- virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
- const int requiredOutputs,
- std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const CV_OVERRIDE;
-
- virtual void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
- OutputArrayOfArrays internals) CV_OVERRIDE;
-
- virtual bool supportBackend(int backendId) CV_OVERRIDE;
-
-private:
- InferenceEngine::CNNNetwork t_net;
-};
-
-class InfEngineExtension : public InferenceEngine::IExtension
-{
-public:
-#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_2)
- virtual void SetLogCallback(InferenceEngine::IErrorListener&) noexcept {}
-#endif
- virtual void Unload() noexcept {}
- virtual void Release() noexcept {}
- virtual void GetVersion(const InferenceEngine::Version*&) const noexcept {}
-
- virtual InferenceEngine::StatusCode getPrimitiveTypes(char**&, unsigned int&,
- InferenceEngine::ResponseDesc*) noexcept
- {
- return InferenceEngine::StatusCode::OK;
- }
-
- InferenceEngine::StatusCode getFactoryFor(InferenceEngine::ILayerImplFactory*& factory,
- const InferenceEngine::CNNLayer* cnnLayer,
- InferenceEngine::ResponseDesc* resp) noexcept;
-};
-
-#endif // HAVE_DNN_IE_NN_BUILDER_2019
-
CV__DNN_EXPERIMENTAL_NS_BEGIN
return result;
}
-
#endif // HAVE_INF_ENGINE
-bool haveInfEngine();
-
-void forwardInfEngine(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
- Ptr<BackendNode>& node, bool isAsync);
-
}} // namespace dnn, namespace cv
#endif // __OPENCV_DNN_OP_INF_ENGINE_HPP__
std::vector< Target > available;
{
- available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019);
- for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
- {
- if (*i == DNN_TARGET_MYRIAD && !withVPU)
- continue;
- targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, *i));
- }
- }
-
- {
available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
{
continue;
targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, *i));
}
-
}
return testing::ValuesIn(targets);
|| modelName == "person-vehicle-bike-detection-2004" // 2021.4+: ncDeviceOpen:1013 Failed to find booted device after boot
)
)
- applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
if (targetId == DNN_TARGET_OPENCL && (false
|| modelName == "face-detection-0106" // Operation: 2278 of type ExperimentalDetectronPriorGridGenerator(op::v6) is not supported
)
)
- applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
if (targetId == DNN_TARGET_OPENCL_FP16 && (false
|| modelName == "face-detection-0106" // Operation: 2278 of type ExperimentalDetectronPriorGridGenerator(op::v6) is not supported
)
)
- applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
#if INF_ENGINE_VER_MAJOR_GE(2020020000)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
- setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
- else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
- setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
- else
- FAIL() << "Unknown backendId";
+ ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
bool isFP16 = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD);
if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
throw SkipTestException("No support for async forward");
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
- setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
- else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
- setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
- else
- FAIL() << "Unknown backendId";
+ ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
Net netDefault = readNet(_tf("layer_convolution.caffemodel"), _tf("layer_convolution.prototxt"));
Net net = readNet(_tf("layer_convolution.xml"), _tf("layer_convolution.bin"));
if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
throw SkipTestException("No support for async forward");
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
- setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
- else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
- setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
- else
- FAIL() << "Unknown backendId";
+ ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
int blobSize[] = {2, 6, 75, 113};
Mat inputs[] = {Mat(4, &blobSize[0], CV_8U), Mat()};
if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
throw SkipTestException("No support for async forward");
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
- setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
- else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
- setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
- else
- FAIL() << "Unknown backendId";
+ ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
std::string xmlPath = _tf("layer_convolution.xml");
std::string binPath = _tf("layer_convolution.bin");
const std::string& model = findDataFile("dnn/layers/layer_convolution.bin");
const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml");
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
- setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
- else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
- setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
- else
- FAIL() << "Unknown backendId";
+ ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
Net net = readNet(model, proto);
net.setPreferableBackend(backendId);
const std::string& model = findDataFile("dnn/layers/layer_convolution.bin");
const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml");
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
- setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
- else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
- setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
- else
- FAIL() << "Unknown backendId";
+ ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
Net netSync = readNet(model, proto);
netSync.setPreferableBackend(backendId);
const std::string& model = findDataFile("dnn/layers/layer_convolution.bin");
const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml");
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
- setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
- else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
- setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
- else
- FAIL() << "Unknown backendId";
+ ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
Net netSync = readNet(model, proto);
netSync.setPreferableBackend(backendId);
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && dtype == CV_8U)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
- setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
- else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
- setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
- else
- FAIL() << "Unknown backendId";
+ ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
Net netSync;
Net netAsync;
const std::string& model = findDataFile("dnn/layers/layer_convolution.bin");
const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml");
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
- setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
- else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
- setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
- else
- FAIL() << "Unknown backendId";
+ ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
Net net0 = readNet(model, proto);
net0.setPreferableTarget(targetId);
const std::string& weightsFile = findDataFile("dnn/layers/layer_convolution.bin");
const std::string& modelFile = findDataFile("dnn/layers/layer_convolution.xml");
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
- setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
- else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
- setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
- else
- FAIL() << "Unknown backendId";
+ ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
Net net1 = readNetFromModelOptimizer(modelFile, weightsFile);
net1.setPreferableBackend(backendId);
const std::string& model = findDataFile("dnn/layers/layer_convolution.bin");
const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml");
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
- setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
- else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
- setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
- else
- FAIL() << "Unknown backendId";
+ ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
Net net0 = readNet(model, proto);
net0.setPreferableTarget(targetId);