set(include_dirs ${fw_inc})
set(sources_options "")
+
set(libs libprotobuf ${LAPACK_LIBRARIES})
+
if(OPENCV_DNN_OPENCL AND HAVE_OPENCL)
list(APPEND include_dirs ${OPENCL_INCLUDE_DIRS})
else()
//! DNN_BACKEND_DEFAULT equals to DNN_BACKEND_INFERENCE_ENGINE if
//! OpenCV is built with Intel's Inference Engine library or
//! DNN_BACKEND_OPENCV otherwise.
- DNN_BACKEND_DEFAULT,
+ DNN_BACKEND_DEFAULT = 0,
DNN_BACKEND_HALIDE,
- DNN_BACKEND_INFERENCE_ENGINE, //!< Intel's Inference Engine computational backend.
+ DNN_BACKEND_INFERENCE_ENGINE, //!< Intel's Inference Engine computational backend
+ //!< @sa setInferenceEngineBackendType
DNN_BACKEND_OPENCV,
- // OpenCV 4.x: DNN_BACKEND_VKCOM,
- // OpenCV 4.x: DNN_BACKEND_CUDA,
-
+ DNN_BACKEND_VKCOM,
- DNN_BACKEND_CUDA
++ DNN_BACKEND_CUDA,
+ #ifdef __OPENCV_BUILD
+ DNN_BACKEND_INFERENCE_ENGINE_NGRAPH = 1000000, // internal - use DNN_BACKEND_INFERENCE_ENGINE + setInferenceEngineBackendType()
+ DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, // internal - use DNN_BACKEND_INFERENCE_ENGINE + setInferenceEngineBackendType()
+ #endif
};
/**
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs);
+ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs, const std::vector<Ptr<BackendNode> >& nodes);
+
+ virtual Ptr<BackendNode> initVkCom(const std::vector<Ptr<BackendWrapper> > &inputs);
+
+ /**
+ * @brief Returns a CUDA backend node
+ *
+ * @param context void pointer to CSLContext object
+ * @param inputs layer inputs
+ * @param outputs layer outputs
+ */
+ virtual Ptr<BackendNode> initCUDA(
+ void *context,
+ const std::vector<Ptr<BackendWrapper>>& inputs,
+ const std::vector<Ptr<BackendWrapper>>& outputs
+ );
+
/**
* @brief Automatic Halide scheduling based on layer hyper-parameters.
* @param[in] node Backend node with Halide functions.
#include "../dnn.hpp"
namespace cv { namespace dnn {
-CV__DNN_EXPERIMENTAL_NS_BEGIN
+CV__DNN_INLINE_NS_BEGIN
+ /* Values for 'OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE' parameter */
+ #define CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API "NN_BUILDER"
+ #define CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH "NGRAPH"
+
+ /** @brief Returns Inference Engine internal backend API.
+ *
+ * See values of `CV_DNN_BACKEND_INFERENCE_ENGINE_*` macros.
+ *
+ * Default value is controlled through `OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE` runtime parameter (environment variable).
+ */
+ CV_EXPORTS_W cv::String getInferenceEngineBackendType();
+
+ /** @brief Specify Inference Engine internal backend API.
+ *
+ * See values of `CV_DNN_BACKEND_INFERENCE_ENGINE_*` macros.
+ *
+ * @returns previous value of internal backend API
+ */
+ CV_EXPORTS_W cv::String setInferenceEngineBackendType(const cv::String& newBackendType);
+
+
/** @brief Release a Myriad device (binded by OpenCV).
*
* Single Myriad device cannot be shared across multiple processes which uses
--- /dev/null
- #define OPENCV_DNN_API_VERSION 20191111
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#ifndef OPENCV_DNN_VERSION_HPP
+#define OPENCV_DNN_VERSION_HPP
+
+/// Use with major OpenCV version only.
++#define OPENCV_DNN_API_VERSION 20191202
+
+#if !defined CV_DOXYGEN && !defined CV_STATIC_ANALYSIS && !defined CV_DNN_DONT_ADD_INLINE_NS
+#define CV__DNN_INLINE_NS __CV_CAT(dnn4_v, OPENCV_DNN_API_VERSION)
+#define CV__DNN_INLINE_NS_BEGIN namespace CV__DNN_INLINE_NS {
+#define CV__DNN_INLINE_NS_END }
+namespace cv { namespace dnn { namespace CV__DNN_INLINE_NS { } using namespace CV__DNN_INLINE_NS; }}
+#else
+#define CV__DNN_INLINE_NS_BEGIN
+#define CV__DNN_INLINE_NS_END
+#endif
+
+#endif // OPENCV_DNN_VERSION_HPP
#include "precomp.hpp"
#include "op_halide.hpp"
#include "op_inf_engine.hpp"
+ #include "ie_ngraph.hpp"
+#include "op_vkcom.hpp"
+#include "op_cuda.hpp"
+
#include "halide_scheduler.hpp"
+
#include <set>
#include <algorithm>
#include <iostream>
return Ptr<BackendWrapper>(new HalideBackendWrapper(targetId, m));
#endif // HAVE_HALIDE
}
- else if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
+ else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{
- CV_Assert(haveInfEngine());
#ifdef HAVE_INF_ENGINE
return Ptr<BackendWrapper>(new InfEngineBackendWrapper(targetId, m));
- #endif // HAVE_INF_ENGINE
+ #else
+ CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine API support");
+ #endif
+ }
+ else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ {
+ #ifdef HAVE_DNN_NGRAPH
+ return Ptr<BackendWrapper>(new NgraphBackendWrapper(targetId, m));
+ #else
+ CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of Inference Engine + nGraph");
+ #endif
}
+ else if (backendId == DNN_BACKEND_VKCOM)
+ {
+ CV_Assert(haveVulkan());
+#ifdef HAVE_VULKAN
+ return Ptr<BackendWrapper>(new VkComBackendWrapper(m));
+#endif // HAVE_VULKAN
+ }
+ else if (backendId == DNN_BACKEND_CUDA)
+ {
+ CV_Assert(haveCUDA());
+
+#ifdef HAVE_CUDA
+ switch (targetId)
+ {
+ case DNN_TARGET_CUDA:
+ return CUDABackendWrapperFP32::create(m);
+ case DNN_TARGET_CUDA_FP16:
+ return CUDABackendWrapperFP16::create(m);
+ default:
+ CV_Assert(IS_DNN_CUDA_TARGET(targetId));
+ }
+#endif
+ }
else
CV_Error(Error::StsNotImplemented, "Unknown backend identifier");
- return Ptr<BackendWrapper>();
+ return Ptr<BackendWrapper>(); // TODO Error?
}
struct Net::Impl
preferableTarget == DNN_TARGET_OPENCL ||
preferableTarget == DNN_TARGET_OPENCL_FP16 ||
preferableTarget == DNN_TARGET_MYRIAD ||
- preferableTarget == DNN_TARGET_FPGA);
+ preferableTarget == DNN_TARGET_FPGA
+ );
+ }
+ CV_Assert(preferableBackend != DNN_BACKEND_VKCOM ||
+ preferableTarget == DNN_TARGET_VULKAN);
+ CV_Assert(preferableBackend != DNN_BACKEND_CUDA ||
+ IS_DNN_CUDA_TARGET(preferableTarget));
-
if (!netWasAllocated || this->blobsToKeep != blobsToKeep_)
{
if (preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget))
CV_Assert(preferableTarget == DNN_TARGET_CPU || IS_DNN_OPENCL_TARGET(preferableTarget));
else if (preferableBackend == DNN_BACKEND_HALIDE)
initHalideBackend();
- else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE)
+ else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
+ {
+ #ifdef HAVE_INF_ENGINE
initInfEngineBackend();
+ #else
+ CV_Assert(false && "This OpenCV version is built without Inference Engine API support");
+ #endif
+ }
+ else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ {
+ #ifdef HAVE_DNN_NGRAPH
+ initNgraphBackend();
+ #else
+ CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of Inference Engine + nGraph");
+ #endif
+ }
+ else if (preferableBackend == DNN_BACKEND_VKCOM)
+ initVkComBackend();
+ else if (preferableBackend == DNN_BACKEND_CUDA)
+ initCUDABackend();
else
CV_Error(Error::StsNotImplemented, "Unknown backend identifier");
}
ld.skip = false;
}
}
+ }
#endif // HAVE_INF_ENGINE
+
+
+ #ifdef HAVE_DNN_NGRAPH
+ void addNgraphOutputs(LayerData &ld)
+ {
+ CV_TRACE_FUNCTION();
+
+ Ptr<InfEngineNgraphNet> layerNet;
+ auto it = ld.backendNodes.find(preferableBackend);
+ if (it != ld.backendNodes.end())
+ {
+ Ptr<BackendNode> node = it->second;
+ if (!node.empty())
+ {
+ Ptr<InfEngineNgraphNode> ieNode = node.dynamicCast<InfEngineNgraphNode>();
+ CV_Assert(!ieNode.empty()); CV_Assert(!ieNode->net.empty());
+ layerNet = ieNode->net;
+ }
+ }
+
+ for (int i = 0; i < ld.inputBlobsId.size(); ++i)
+ {
+ LayerData &inpLd = layers[ld.inputBlobsId[i].lid];
+ Ptr<BackendNode> inpNode = inpLd.backendNodes[preferableBackend];
+ if (!inpNode.empty())
+ {
+ Ptr<InfEngineNgraphNode> ieInpNode = inpNode.dynamicCast<InfEngineNgraphNode>();
+ CV_Assert(!ieInpNode.empty()); CV_Assert(!ieInpNode->net.empty());
+ if (layerNet != ieInpNode->net)
+ {
+ ieInpNode->net->addOutput(ieInpNode->node->get_friendly_name());
+ ieInpNode->net->setUnconnectedNodes(ieInpNode);
+ }
+ }
+ }
+ }
+
+ void initNgraphBackend()
+ {
+ CV_TRACE_FUNCTION();
+ CV_Assert_N(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, haveInfEngine());
+
+ MapIdToLayerData::iterator it;
+ Ptr<InfEngineNgraphNet> net;
+
+ for (it = layers.begin(); it != layers.end(); ++it)
+ {
+ LayerData &ld = it->second;
+ if (ld.id == 0)
+ {
+ CV_Assert((netInputLayer->outNames.empty() && ld.outputBlobsWrappers.size() == 1) ||
+ (netInputLayer->outNames.size() == ld.outputBlobsWrappers.size()));
+ for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
+ {
+ InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.outputBlobsWrappers[i]);
+ dataPtr->setName(netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i]);
+ }
+ }
+ else
+ {
+ for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
+ {
+ InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.outputBlobsWrappers[i]);
+ dataPtr->setName(ld.name);
+ }
+ }
+ }
+
+ if (skipInfEngineInit)
+ {
+ Ptr<BackendNode> node = layers[lastLayerId].backendNodes[preferableBackend];
+ CV_Assert(!node.empty());
+
+ Ptr<InfEngineNgraphNode> ieNode = node.dynamicCast<InfEngineNgraphNode>();
+ CV_Assert(!ieNode.empty());
+
+ for (it = layers.begin(); it != layers.end(); ++it)
+ {
+ LayerData &ld = it->second;
+ if (ld.id == 0)
+ {
+ for (int i = 0; i < ld.inputBlobsWrappers.size(); ++i)
+ {
+ InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.inputBlobsWrappers[i]);
+ dataPtr->setName(netInputLayer->outNames[i]);
+ }
+ }
+ else
+ {
+ for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
+ {
+ InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.outputBlobsWrappers[i]);
+ dataPtr->setName(ld.name);
+ }
+ }
+ ieNode->net->addBlobs(ld.inputBlobsWrappers);
+ ieNode->net->addBlobs(ld.outputBlobsWrappers);
+ ld.skip = true;
+ }
+ layers[lastLayerId].skip = false;
+ ieNode->net->init((Target)preferableTarget);
+ return;
+ }
+
+ // Build Inference Engine networks from sets of layers that support this
+ // backend. Split a whole model on several Inference Engine networks if
+ // some of layers are not implemented.
+ for (it = layers.begin(); it != layers.end(); ++it)
+ {
+ LayerData &ld = it->second;
+
+ if (ld.id == 0 && ld.skip)
+ continue;
+
+ bool fused = ld.skip;
+ Ptr<Layer> layer = ld.layerInstance;
+ if (!fused && !layer->supportBackend(preferableBackend))
+ {
+ addNgraphOutputs(ld);
+ net = Ptr<InfEngineNgraphNet>();
+ layer->preferableTarget = DNN_TARGET_CPU;
+
+ for (int i = 0; i < ld.inputBlobsId.size(); ++i)
+ {
+ LayerData &inpLd = layers[ld.inputBlobsId[i].lid];
+ Ptr<BackendNode> inpNode = inpLd.backendNodes[preferableBackend];
+ if (!inpNode.empty()) {
+ Ptr<InfEngineNgraphNode> ieNode = inpNode.dynamicCast<InfEngineNgraphNode>();
+ ieNode->net->setUnconnectedNodes(ieNode);
+ }
+ }
+ continue;
+ }
+ ld.skip = true; // Initially skip all Inference Engine supported layers.
+
+ // Create a new network if one of inputs from different Inference Engine graph.
+ std::vector<Ptr<BackendNode>> inputNodes;
+ for (int i = 0; i < ld.inputBlobsId.size(); ++i)
+ {
+ // Layer_Test_ROIPooling.Accuracy has 2 inputs inpLD = 0, 0 -> has 4 inputNodes (input, rois, input, rois)
+ if (inputNodes.size() == ld.inputBlobsId.size()) {
+ break;
+ }
+ LayerData &inpLd = layers[ld.inputBlobsId[i].lid];
+ Ptr<BackendNode> inpNode = inpLd.backendNodes[preferableBackend];
+ if (!inpNode.empty())
+ {
+ Ptr<InfEngineNgraphNode> ieInpNode = inpNode.dynamicCast<InfEngineNgraphNode>();
+ CV_Assert(!ieInpNode.empty()); CV_Assert(!ieInpNode->net.empty());
+ if (ieInpNode->net == net && !fused) {
+ inputNodes.push_back(inpNode);
+ continue;
+ }
+ }
+
+ if (net.empty()) {
+ net = Ptr<InfEngineNgraphNet>(new InfEngineNgraphNet());
+ }
+
+ if (!fused) {
+ std::vector<std::string> inputNames;
+ std::vector<cv::Mat> inputs;
+
+ auto curr_pos = inpLd.consumers.begin();
+ auto compare = [&ld] (const LayerPin& lp) { return lp.lid == ld.id; };
+ auto cons = curr_pos;
+ while ((cons = std::find_if(curr_pos, inpLd.consumers.end(), compare)) !=
+ inpLd.consumers.end()) {
+ int cons_inp = cons->oid;
+ Ptr<NgraphBackendWrapper> inpWrapper = inpLd.outputBlobsWrappers[cons_inp].
+ dynamicCast<NgraphBackendWrapper>();
+ auto iter = std::find(inputNames.begin(), inputNames.end(),
+ inpWrapper->dataPtr->getName());
+ if (iter == inputNames.end()) {
+ inputNames.push_back(inpWrapper->dataPtr->getName());
+ inputs.push_back(inpLd.outputBlobs[cons_inp]);
+ }
+ curr_pos = cons + 1;
+ }
+
+ auto inps = net->setInputs(inputs, inputNames);
+ for (auto& inp : inps) {
+ inputNodes.emplace_back(Ptr<BackendNode>(new InfEngineNgraphNode(inp)));
+ }
+ }
+ }
+
+ Ptr<BackendNode> node;
+ if (!net.empty())
+ {
+ if (fused)
+ {
+ bool inPlace = ld.inputBlobsId.size() == 1 && ld.outputBlobs.size() == 1 &&
+ ld.inputBlobs[0]->data == ld.outputBlobs[0].data;
+ CV_Assert(inPlace);
+ node = layers[ld.inputBlobsId[0].lid].backendNodes[preferableBackend];
+ ld.inputBlobsWrappers = layers[ld.inputBlobsId[0].lid].inputBlobsWrappers;
+ }
+ }
+ else {
+ net = Ptr<InfEngineNgraphNet>(new InfEngineNgraphNet());
+ }
+
+ if (!fused)
+ {
+ CV_Assert(!inputNodes.empty());
+ node = layer->initNgraph(ld.inputBlobsWrappers, inputNodes);
+ for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
+ {
+ InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.outputBlobsWrappers[i]);
+ node.dynamicCast<InfEngineNgraphNode>()->setName(dataPtr->getName());
+ }
+ }
+ else if (node.empty())
+ continue;
+
+ ld.backendNodes[preferableBackend] = node;
+
+ Ptr<InfEngineNgraphNode> ieNode = node.dynamicCast<InfEngineNgraphNode>();
+ CV_Assert(!ieNode.empty());
+ ieNode->net = net;
+
+ if (ld.consumers.empty()) {
+ // TF EAST_text_detection
+ ieNode->net->setUnconnectedNodes(ieNode);
+ }
+ ieNode->net->setNodePtr(&ieNode->node);
+
+ net->addBlobs(ld.inputBlobsWrappers);
+ net->addBlobs(ld.outputBlobsWrappers);
+ addNgraphOutputs(ld);
+ }
+
+ // Initialize all networks.
+ for (MapIdToLayerData::reverse_iterator it = layers.rbegin(); it != layers.rend(); ++it)
+ {
+ LayerData &ld = it->second;
+ auto iter = ld.backendNodes.find(preferableBackend);
+ if (iter == ld.backendNodes.end())
+ continue;
+
+ Ptr<BackendNode>& node = iter->second;
+ if (node.empty())
+ continue;
+
+ Ptr<InfEngineNgraphNode> ieNode = node.dynamicCast<InfEngineNgraphNode>();
+ if (ieNode.empty())
+ continue;
+
+ CV_Assert(!ieNode->net.empty());
+
+ if (!ieNode->net->isInitialized())
+ {
+ ieNode->net->setUnconnectedNodes(ieNode);
+ ieNode->net->createNet((Target)preferableTarget);
+ ld.skip = false;
+ }
+ }
+ }
+ #endif // HAVE_DNN_NGRAPH
+
++ void initVkComBackend()
++ {
++ CV_TRACE_FUNCTION();
++ CV_Assert(preferableBackend == DNN_BACKEND_VKCOM);
++#ifdef HAVE_VULKAN
++ if (!haveVulkan())
++ return;
++
++ MapIdToLayerData::iterator it = layers.begin();
++ for (; it != layers.end(); it++)
++ {
++ LayerData &ld = it->second;
++ Ptr<Layer> layer = ld.layerInstance;
++ if (!layer->supportBackend(preferableBackend))
++ {
++ continue;
++ }
++
++ ld.skip = false;
++
++ try
++ {
++ ld.backendNodes[DNN_BACKEND_VKCOM] =
++ layer->initVkCom(ld.inputBlobsWrappers);
++ }
++ catch (const cv::Exception& e)
++ {
++ CV_LOG_ERROR(NULL, "initVkCom failed, fallback to CPU implementation. " << e.what());
++ ld.backendNodes[DNN_BACKEND_VKCOM] = Ptr<BackendNode>();
++ }
++ }
++#endif
+ }
+
+ void initCUDABackend() {
+ CV_Assert(haveCUDA());
+
+#ifdef HAVE_CUDA
+ for (auto& layer : layers)
+ {
+ auto& ld = layer.second;
+ auto& layerInstance = ld.layerInstance;
+
+ if (!layerInstance->supportBackend(DNN_BACKEND_CUDA))
+ {
+ std::ostringstream os;
+ os << "CUDA backend will fallback to the CPU implementation for the layer \"" << ld.name
+ << "\" of type " << ld.type << '\n';
+ CV_LOG_INFO(NULL, os.str().c_str());
+ continue;
+ }
+
+ /* we make a copy so that `initCUDA` doesn't modify `cudaInfo->context` */
+ auto context = cudaInfo->context;
+ auto node = layerInstance->initCUDA(&context, ld.inputBlobsWrappers, ld.outputBlobsWrappers);
+ ld.backendNodes[DNN_BACKEND_CUDA] = node;
+
+ auto cudaNode = node.dynamicCast<CUDABackendNode>();
+ cudaInfo->workspace.require(cudaNode->get_workspace_memory_in_bytes());
+ }
+#endif
+ }
+
void allocateLayer(int lid, const LayersShapesMap& layersShapes)
{
CV_TRACE_FUNCTION();
void fuseLayers(const std::vector<LayerPin>& blobsToKeep_)
{
- if( !fusion || (preferableBackend != DNN_BACKEND_OPENCV &&
- preferableBackend != DNN_BACKEND_CUDA &&
- preferableBackend != DNN_BACKEND_INFERENCE_ENGINE))
- return;
-
CV_TRACE_FUNCTION();
+ if(!fusion || (preferableBackend != DNN_BACKEND_OPENCV &&
++ preferableBackend != DNN_BACKEND_CUDA &&
+ preferableBackend != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 &&
+ preferableBackend != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH))
+ return;
+
// scan through all the layers. If there is convolution layer followed by the activation layer,
// we try to embed this activation into the convolution and disable separate execution of the activation
std::set<LayerPin> pinsToKeep(blobsToKeep_.begin(),
{
forwardInfEngine(ld.outputBlobsWrappers, node, isAsync);
}
+ else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ {
+ forwardNgraph(ld.outputBlobsWrappers, node, isAsync);
+ }
+ else if (preferableBackend == DNN_BACKEND_VKCOM)
+ {
+ try
+ {
+ forwardVkCom(ld.outputBlobsWrappers, node);
+ }
+ catch (const cv::Exception& e)
+ {
+ CV_LOG_ERROR(NULL, "forwardVkCom failed, fallback to CPU implementation. " << e.what());
+ it->second = Ptr<BackendNode>();
+ forwardLayer(ld);
+ }
+ }
else
{
CV_Error(Error::StsNotImplemented, "Unknown backend identifier");
reader.ReadWeights(bin);
InferenceEngine::CNNNetwork ieNet = reader.getNetwork();
+ #else
+ InferenceEngine::Core& ie = getCore();
+ InferenceEngine::CNNNetwork ieNet = ie.ReadNetwork(xml, bin);
+ #endif
std::vector<String> inputsNames;
+ std::vector<MatShape> inp_shapes;
for (auto& it : ieNet.getInputsInfo())
{
inputsNames.push_back(it.first);
Net cvNet;
cvNet.setInputsNames(inputsNames);
- Ptr<InfEngineBackendNode> backendNode(new InfEngineBackendNode(InferenceEngine::Builder::Layer("")));
- backendNode->net = Ptr<InfEngineBackendNet>(new InfEngineBackendNet(ieNet));
- for (auto& it : ieNet.getOutputsInfo())
+ // set empty input to determine input shapes
+ for (int inp_id = 0; inp_id < inputsNames.size(); ++inp_id)
+ {
+ cvNet.setInput(Mat(inp_shapes[inp_id], CV_32F), inputsNames[inp_id]);
+ }
+
+ Ptr<BackendNode> backendNode;
+ #ifdef HAVE_DNN_NGRAPH
+ if (DNN_BACKEND_INFERENCE_ENGINE_NGRAPH == getInferenceEngineBackendTypeParam())
+ {
+ auto fake_node = std::make_shared<ngraph::op::Parameter>(ngraph::element::f32, ngraph::Shape{});
+ Ptr<InfEngineNgraphNode> backendNodeNGraph(new InfEngineNgraphNode(fake_node));
+ backendNodeNGraph->net = Ptr<InfEngineNgraphNet>(new InfEngineNgraphNet(ieNet));
+ backendNode = backendNodeNGraph;
+ }
+ else
+ #endif
{
- Ptr<Layer> cvLayer(new InfEngineBackendLayer(ieNet));
- InferenceEngine::CNNLayerPtr ieLayer = ieNet.getLayerByName(it.first.c_str());
- CV_Assert(ieLayer);
+ Ptr<InfEngineBackendNode> backendNodeNN(new InfEngineBackendNode(InferenceEngine::Builder::Layer("")));
+ backendNodeNN->net = Ptr<InfEngineBackendNet>(new InfEngineBackendNet(ieNet));
+ backendNode = backendNodeNN;
+ }
+
+ for (auto& it : ieNet.getOutputsInfo())
+ {
LayerParams lp;
int lid = cvNet.addLayer(it.first, "", lp);
switch (prefBackend) {
case DNN_BACKEND_DEFAULT: backend = "DEFAULT/"; break;
case DNN_BACKEND_HALIDE: backend = "HALIDE/"; break;
- case DNN_BACKEND_INFERENCE_ENGINE: backend = "DLIE/"; break;
+ case DNN_BACKEND_INFERENCE_ENGINE: // fallthru
+ case DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019: backend = "DLIE/"; break;
+ case DNN_BACKEND_INFERENCE_ENGINE_NGRAPH: backend = "NGRAPH/"; break;
case DNN_BACKEND_OPENCV: backend = "OCV/"; break;
+ case DNN_BACKEND_CUDA: backend = "CUDA/"; break;
}
out << "digraph G {" << '\n';
// Add nodes
#include "../precomp.hpp"
#include "layers_common.hpp"
+#include "../op_cuda.hpp"
#include "../op_halide.hpp"
#include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
+
#include <opencv2/dnn/shape_utils.hpp>
#ifdef HAVE_OPENCL
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return (backendId == DNN_BACKEND_OPENCV) ||
+ backendId == DNN_BACKEND_CUDA ||
(backendId == DNN_BACKEND_HALIDE && haveHalide()) ||
- (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && (preferableTarget == DNN_TARGET_CPU || dims == 4));
+ ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine() && (preferableTarget == DNN_TARGET_CPU || dims == 4));
}
#ifdef HAVE_OPENCL
//
//M*/
#include "../precomp.hpp"
+#include "../op_cuda.hpp"
#include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
+#ifdef HAVE_CUDA
+#include "../cuda4dnn/primitives/reshape.hpp"
+using namespace cv::dnn::cuda4dnn;
+#endif
+
namespace cv
{
namespace dnn
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
- (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine());
+ backendId == DNN_BACKEND_CUDA ||
+ ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
#include "../precomp.hpp"
#include "layers_common.hpp"
+#include "../op_cuda.hpp"
#include "../op_halide.hpp"
#include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
+#include "../op_vkcom.hpp"
#ifdef HAVE_OPENCL
#include "opencl_kernels_dnn.hpp"
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
+ backendId == DNN_BACKEND_CUDA ||
(backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1 && !padding) || // By channels
- (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && !padding) ||
- ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine() && !padding);
++ ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine() && !padding) ||
+ (backendId == DNN_BACKEND_VKCOM && haveVulkan() && !padding);
}
class ChannelConcatInvoker : public ParallelLoopBody
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019;
+ return backendId == DNN_BACKEND_OPENCV ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE ||
++ backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
+ backendId == DNN_BACKEND_CUDA;
}
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
#include "../precomp.hpp"
#include "layers_common.hpp"
+#include "../op_cuda.hpp"
#include "../op_halide.hpp"
#include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
+#include "../op_vkcom.hpp"
+
#include "opencv2/core/hal/hal.hpp"
#include "opencv2/core/hal/intrin.hpp"
#include <iostream>
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
+ if (backendId == DNN_BACKEND_CUDA)
+ {
+ /* only convolution 2d and 3d supported */
+ if(kernel_size.size() == 2 || kernel_size.size() == 3)
+ return true;
+
+ return false;
+ }
+
#ifdef HAVE_INF_ENGINE
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
if (kernel_size.size() == 3)
return preferableTarget == DNN_TARGET_CPU;
#include "../precomp.hpp"
#include "layers_common.hpp"
+#include "../op_cuda.hpp"
#include "../op_halide.hpp"
#include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
+#include "../op_vkcom.hpp"
+
#include <opencv2/dnn/shape_utils.hpp>
#include <iostream>
}
#endif // HAVE_INF_ENGINE
+ #ifdef HAVE_DNN_NGRAPH
+ virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
+ {
+ auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
+ auto node = func.initNgraphAPI(ieInpNode);
+ return Ptr<BackendNode>(new InfEngineNgraphNode(node));
+ }
+ #endif // HAVE_DNN_NGRAPH
+
+ virtual Ptr<BackendNode> initVkCom(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
+ {
+#ifdef HAVE_VULKAN
+ return Ptr<BackendNode>(new VkComBackendNode(inputs, func.initVkCom()));
+#endif // HAVE_VULKAN
+ return Ptr<BackendNode>();
+ }
+
virtual bool tryFuse(Ptr<dnn::Layer>& top) CV_OVERRIDE
{
return func.tryFuse(top);
bool supportBackend(int backendId, int)
{
#ifdef HAVE_INF_ENGINE
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
return slope >= 0 || !INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2019R1);
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ return true;
#endif
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
+ return backendId == DNN_BACKEND_OPENCV ||
+ backendId == DNN_BACKEND_CUDA ||
+ backendId == DNN_BACKEND_HALIDE ||
+ backendId == DNN_BACKEND_VKCOM;
}
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
}
#endif // HAVE_INF_ENGINE
+ #ifdef HAVE_DNN_NGRAPH
+ std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
+ {
+ if (slope) {
+ auto param = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{1}, &slope);
+ return std::make_shared<ngraph::op::PRelu>(node, param);
+ }
+ return std::make_shared<ngraph::op::Relu>(node);
+ }
+ #endif // HAVE_DNN_NGRAPH
+
+#ifdef HAVE_VULKAN
+ std::shared_ptr<vkcom::OpBase> initVkCom()
+ {
+ std::shared_ptr<vkcom::OpBase> op(new vkcom::OpReLU(slope));
+ return op;
+ }
+#endif // HAVE_VULKAN
+
bool tryFuse(Ptr<dnn::Layer>&) { return false; }
void getScaleShift(Mat&, Mat&) const {}
bool supportBackend(int backendId, int)
{
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
+ return backendId == DNN_BACKEND_OPENCV ||
+ backendId == DNN_BACKEND_CUDA ||
+ backendId == DNN_BACKEND_HALIDE ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE;
+ backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
}
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
}
#endif // HAVE_INF_ENGINE
+ #ifdef HAVE_DNN_NGRAPH
+ std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
+ {
+ return std::make_shared<ngraph::op::Clamp>(node, minValue, maxValue);
+ }
+ #endif // HAVE_DNN_NGRAPH
+
+#ifdef HAVE_VULKAN
+ std::shared_ptr<vkcom::OpBase> initVkCom()
+ {
+ // TODO: add vkcom implementation
+ return std::shared_ptr<vkcom::OpBase>();
+ }
+#endif // HAVE_VULKAN
+
bool tryFuse(Ptr<dnn::Layer>&) { return false; }
void getScaleShift(Mat&, Mat&) const {}
bool supportBackend(int backendId, int)
{
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
+ return backendId == DNN_BACKEND_OPENCV ||
+ backendId == DNN_BACKEND_CUDA ||
+ backendId == DNN_BACKEND_HALIDE ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE;
+ backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
}
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
}
#endif // HAVE_INF_ENGINE
+ #ifdef HAVE_DNN_NGRAPH
+ std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
+ {
+ return std::make_shared<ngraph::op::Tanh>(node);
+ }
+ #endif // HAVE_DNN_NGRAPH
+
+#ifdef HAVE_VULKAN
+ std::shared_ptr<vkcom::OpBase> initVkCom()
+ {
+ // TODO: add vkcom implementation
+ return std::shared_ptr<vkcom::OpBase>();
+ }
+#endif // HAVE_VULKAN
+
bool tryFuse(Ptr<dnn::Layer>&) { return false; }
void getScaleShift(Mat&, Mat&) const {}
}
#endif // HAVE_INF_ENGINE
+ #ifdef HAVE_DNN_NGRAPH
+ std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
+ {
+ CV_Error(Error::StsNotImplemented, "");
+ }
+ #endif // HAVE_DNN_NGRAPH
+
+#ifdef HAVE_VULKAN
+ std::shared_ptr<vkcom::OpBase> initVkCom()
+ {
+ // TODO: add vkcom implementation
+ return std::shared_ptr<vkcom::OpBase>();
+ }
+#endif // HAVE_VULKAN
+
bool tryFuse(Ptr<dnn::Layer>&) { return false; }
void getScaleShift(Mat&, Mat&) const {}
}
#endif // HAVE_INF_ENGINE
+ #ifdef HAVE_DNN_NGRAPH
+ std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
+ {
+ CV_Error(Error::StsNotImplemented, "");
+ }
+ #endif // HAVE_DNN_NGRAPH
+
+#ifdef HAVE_VULKAN
+ std::shared_ptr<vkcom::OpBase> initVkCom()
+ {
+ // TODO: add vkcom implementation
+ return std::shared_ptr<vkcom::OpBase>();
+ }
+#endif // HAVE_VULKAN
+
bool tryFuse(Ptr<dnn::Layer>&) { return false; }
void getScaleShift(Mat&, Mat&) const {}
bool supportBackend(int backendId, int)
{
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
+ return backendId == DNN_BACKEND_OPENCV ||
+ backendId == DNN_BACKEND_CUDA ||
+ backendId == DNN_BACKEND_HALIDE ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE;
+ backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
}
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
}
#endif // HAVE_INF_ENGINE
+ #ifdef HAVE_DNN_NGRAPH
+ std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
+ {
+ return std::make_shared<ngraph::op::Sigmoid>(node);
+ }
+ #endif // HAVE_DNN_NGRAPH
+
+#ifdef HAVE_VULKAN
+ std::shared_ptr<vkcom::OpBase> initVkCom()
+ {
+ // TODO: add vkcom implementation
+ return std::shared_ptr<vkcom::OpBase>();
+ }
+#endif // HAVE_VULKAN
+
bool tryFuse(Ptr<dnn::Layer>&) { return false; }
void getScaleShift(Mat&, Mat&) const {}
bool supportBackend(int backendId, int)
{
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
+ return backendId == DNN_BACKEND_OPENCV ||
+ backendId == DNN_BACKEND_CUDA ||
+ backendId == DNN_BACKEND_HALIDE ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE;
++ backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
}
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
}
#endif // HAVE_INF_ENGINE
+ #ifdef HAVE_DNN_NGRAPH
+ std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
+ {
+ return std::make_shared<ngraph::op::Elu>(node, 1.0);
+ }
+ #endif // HAVE_DNN_NGRAPH
+
+#ifdef HAVE_VULKAN
+ std::shared_ptr<vkcom::OpBase> initVkCom()
+ {
+ // TODO: add vkcom implementation
+ return std::shared_ptr<vkcom::OpBase>();
+ }
+#endif // HAVE_VULKAN
+
bool tryFuse(Ptr<dnn::Layer>&) { return false; }
void getScaleShift(Mat&, Mat&) const {}
bool supportBackend(int backendId, int)
{
#ifdef HAVE_INF_ENGINE
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return !INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2019R1);
#endif
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
+ return backendId == DNN_BACKEND_OPENCV ||
+ backendId == DNN_BACKEND_CUDA ||
+ backendId == DNN_BACKEND_HALIDE;
}
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
}
#endif // HAVE_INF_ENGINE
+ #ifdef HAVE_DNN_NGRAPH
+ std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
+ {
+ float coeff = -0.999999f;
+ // float coeff = preferableTarget == DNN_TARGET_MYRIAD ? -0.999f : -0.999999f;
+ auto slope = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{1}, &coeff);
+ return std::make_shared<ngraph::op::PRelu>(node, slope);
+ }
+ #endif // HAVE_DNN_NGRAPH
+
+#ifdef HAVE_VULKAN
+ std::shared_ptr<vkcom::OpBase> initVkCom()
+ {
+ // TODO: add vkcom implementation
+ return std::shared_ptr<vkcom::OpBase>();
+ }
+#endif // HAVE_VULKAN
+
bool tryFuse(Ptr<dnn::Layer>&) { return false; }
void getScaleShift(Mat&, Mat&) const {}
}
#endif // HAVE_INF_ENGINE
+ #ifdef HAVE_DNN_NGRAPH
+ std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
+ {
+ CV_Error(Error::StsNotImplemented, "");
+ }
+ #endif // HAVE_DNN_NGRAPH
+
+#ifdef HAVE_VULKAN
+ std::shared_ptr<vkcom::OpBase> initVkCom()
+ {
+ // TODO: add vkcom implementation
+ return std::shared_ptr<vkcom::OpBase>();
+ }
+#endif // HAVE_VULKAN
+
bool tryFuse(Ptr<dnn::Layer>&) { return false; }
void getScaleShift(Mat&, Mat&) const {}
bool supportBackend(int backendId, int targetId)
{
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
return (targetId != DNN_TARGET_OPENCL && targetId != DNN_TARGET_OPENCL_FP16) || power == 1.0 || power == 0.5;
++ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
++ return true;
else
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
+ return backendId == DNN_BACKEND_OPENCV ||
+ backendId == DNN_BACKEND_CUDA ||
+ backendId == DNN_BACKEND_HALIDE;
}
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
}
#endif // HAVE_INF_ENGINE
+ #ifdef HAVE_DNN_NGRAPH
+ std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
+ {
+ auto scale_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
+ ngraph::Shape{1}, &scale);
+ auto shift_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
+ ngraph::Shape{1}, &shift);
+ auto power_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
+ ngraph::Shape{1}, &power);
+
+ auto mul = std::make_shared<ngraph::op::v1::Multiply>(scale_node, node, ngraph::op::AutoBroadcastType::NUMPY);
+ auto scale_shift = std::make_shared<ngraph::op::v1::Add>(mul, shift_node, ngraph::op::AutoBroadcastType::NUMPY);
+ return std::make_shared<ngraph::op::v1::Power>(scale_shift, power_node, ngraph::op::AutoBroadcastType::NUMPY);
+ }
+ #endif // HAVE_DNN_NGRAPH
+
+#ifdef HAVE_VULKAN
+ std::shared_ptr<vkcom::OpBase> initVkCom()
+ {
+ // TODO: add vkcom implementation
+ return std::shared_ptr<vkcom::OpBase>();
+ }
+#endif // HAVE_VULKAN
+
bool tryFuse(Ptr<dnn::Layer>& top)
{
if (power != 1.0f && shift != 0.0f)
bool supportBackend(int backendId, int)
{
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
+ return backendId == DNN_BACKEND_OPENCV ||
+ backendId == DNN_BACKEND_CUDA ||
+ backendId == DNN_BACKEND_HALIDE ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE;
+ backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
}
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
}
#endif // HAVE_INF_ENGINE
+ #ifdef HAVE_DNN_NGRAPH
+ std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
+ {
+ const size_t numChannels = scale.total();
+ auto slope = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{numChannels}, scale.data);
+ return std::make_shared<ngraph::op::PRelu>(node, slope);
+ }
+ #endif // HAVE_DNN_NGRAPH
+
+#ifdef HAVE_VULKAN
+ std::shared_ptr<vkcom::OpBase> initVkCom()
+ {
+ // TODO: add vkcom implementation
+ return std::shared_ptr<vkcom::OpBase>();
+ }
+#endif // HAVE_VULKAN
bool tryFuse(Ptr<dnn::Layer>&) { return false; }
#include "../precomp.hpp"
#include "layers_common.hpp"
+#include "../op_cuda.hpp"
#include "../op_halide.hpp"
#include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
#ifdef HAVE_OPENCL
#include "opencl_kernels_dnn.hpp"
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
- backendId == DNN_BACKEND_HALIDE ||
+ (backendId == DNN_BACKEND_CUDA && op != DIV) || // TODO: not implemented, see PR #15811
+ (backendId == DNN_BACKEND_HALIDE && op != DIV) || // TODO: not implemented, see PR #15811
- (backendId == DNN_BACKEND_INFERENCE_ENGINE && !variableChannels &&
- (preferableTarget != DNN_TARGET_OPENCL || coeffs.empty()));
+ ((((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && (preferableTarget != DNN_TARGET_OPENCL || coeffs.empty()))
+ || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && !variableChannels));
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
#include "../precomp.hpp"
#include "layers_common.hpp"
+#include "../op_cuda.hpp"
#include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
+
#include <float.h>
#include <algorithm>
#include <opencv2/dnn/shape_utils.hpp>
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
- (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine());
+ backendId == DNN_BACKEND_CUDA ||
+ ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
#include "../precomp.hpp"
#include "layers_common.hpp"
+#include "../op_cuda.hpp"
#include "../op_halide.hpp"
#include "../op_inf_engine.hpp"
++#include "../ie_ngraph.hpp"
+
#include <opencv2/dnn/shape_utils.hpp>
#ifdef HAVE_OPENCL
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
+ backendId == DNN_BACKEND_CUDA ||
(backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1) ||
- (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && axis == 1);
+ ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine() && axis == 1);
}
virtual bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE
#include "../precomp.hpp"
#include "layers_common.hpp"
+#include "../op_cuda.hpp"
#include "../op_halide.hpp"
#include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
+#include "../op_vkcom.hpp"
+
#include "opencv2/imgproc.hpp"
#include "opencv2/dnn/shape_utils.hpp"
#include "opencv2/core/hal/hal.hpp"
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) {
return bias == (int)bias;
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
+ }
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) {
+ return type == CHANNEL_NRM && bias == (int)bias;
+ }
+ return backendId == DNN_BACKEND_OPENCV ||
+ backendId == DNN_BACKEND_CUDA ||
+ backendId == DNN_BACKEND_HALIDE ||
+ (backendId == DNN_BACKEND_VKCOM && haveVulkan() && (size % 2 == 1) && (type == CHANNEL_NRM));
}
#ifdef HAVE_OPENCL
#include "../precomp.hpp"
#include "layers_common.hpp"
+#include "../op_cuda.hpp"
#include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
+#ifdef HAVE_CUDA
+#include "../cuda4dnn/primitives/normalize_bbox.hpp"
+using namespace cv::dnn::cuda4dnn;
+#endif
+
namespace cv { namespace dnn {
class NormalizeBBoxLayerImpl CV_FINAL : public NormalizeBBoxLayer
#include "../precomp.hpp"
#include "layers_common.hpp"
+#include "../op_cuda.hpp"
#include "../op_halide.hpp"
#include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
+
#include <vector>
+#ifdef HAVE_CUDA
+#include "../cuda4dnn/primitives/padding.hpp"
+using namespace cv::dnn::cuda4dnn;
+#endif
+
namespace cv
{
namespace dnn
#include "../precomp.hpp"
#include "layers_common.hpp"
+#include "../op_cuda.hpp"
#include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
+#include "../op_vkcom.hpp"
++
#include <float.h>
#include <algorithm>
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
- ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
+ backendId == DNN_BACKEND_CUDA ||
- (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine()) ||
++ ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine()) ||
+ (backendId == DNN_BACKEND_VKCOM && haveVulkan());
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
#include "../precomp.hpp"
#include "layers_common.hpp"
#include "opencv2/core/hal/intrin.hpp"
+#include "../op_cuda.hpp"
#include "../op_halide.hpp"
#include "../op_inf_engine.hpp"
+
+ #ifdef HAVE_DNN_NGRAPH
+ #include "../ie_ngraph.hpp"
+ #include <ngraph/op/experimental/layers/roi_pooling.hpp>
+ #include <ngraph/op/experimental/layers/psroi_pooling.hpp>
+ #endif
+
+#include "../op_vkcom.hpp"
++
#include <float.h>
#include <algorithm>
#include <numeric>
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
+ if (backendId == DNN_BACKEND_CUDA)
+ {
+ return type == MAX || type == AVE;
+ }
- else if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
++ else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{
if (computeMaxIdx)
return false;
return false;
#endif
}
+ else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) {
+ return type != STOCHASTIC;
+ }
else
- return (kernel_size.size() == 3 && backendId == DNN_BACKEND_OPENCV && preferableTarget == DNN_TARGET_CPU) ||
- ((kernel_size.empty() || kernel_size.size() == 2) && (backendId == DNN_BACKEND_OPENCV ||
- (backendId == DNN_BACKEND_HALIDE && haveHalide() &&
- (type == MAX || (type == AVE && !pad_t && !pad_l && !pad_b && !pad_r)))));
+ {
+ if (kernel_size.size() == 3)
+ return (backendId == DNN_BACKEND_OPENCV && preferableTarget == DNN_TARGET_CPU);
+ if (kernel_size.empty() || kernel_size.size() == 2)
+ return backendId == DNN_BACKEND_OPENCV ||
+ (backendId == DNN_BACKEND_HALIDE && haveHalide() &&
+ (type == MAX || (type == AVE && !pad_t && !pad_l && !pad_b && !pad_r))) ||
+ (backendId == DNN_BACKEND_VKCOM && haveVulkan() &&
+ (type == MAX || type == AVE));
+ else
+ return false;
+ }
}
#ifdef HAVE_OPENCL
#include "../precomp.hpp"
#include "layers_common.hpp"
+#include "../op_cuda.hpp"
#include "../op_inf_engine.hpp"
+
+ #ifdef HAVE_DNN_NGRAPH
+ #include "../ie_ngraph.hpp"
+ #include <ngraph/op/experimental/layers/prior_box.hpp>
+ #include <ngraph/op/experimental/layers/prior_box_clustered.hpp>
+ #endif
+
+#include "../op_vkcom.hpp"
++
#include <float.h>
#include <algorithm>
#include <cmath>
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
+ #ifdef HAVE_DNN_NGRAPH
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ return _explicitSizes || _stepX == _stepY;
+ #endif
return backendId == DNN_BACKEND_OPENCV ||
- (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() &&
+ backendId == DNN_BACKEND_CUDA ||
- ( _explicitSizes || (_minSize.size() == 1 && _maxSize.size() <= 1)));
+ (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && haveInfEngine() &&
+ ( _explicitSizes || (_minSize.size() == 1 && _maxSize.size() <= 1)))
+ || (backendId == DNN_BACKEND_VKCOM && haveVulkan());
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
//M*/
#include "../precomp.hpp"
+#include "../op_cuda.hpp"
#include "../op_inf_engine.hpp"
+
++#ifdef HAVE_DNN_NGRAPH
++#include "../ie_ngraph.hpp"
++#include <ngraph/op/experimental/layers/reorg_yolo.hpp>
++#endif
++
#include <opencv2/dnn/shape_utils.hpp>
#include <opencv2/dnn/all_layers.hpp>
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
+ return backendId == DNN_BACKEND_OPENCV ||
+ backendId == DNN_BACKEND_CUDA ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE;
++ backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
+ backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
}
#ifdef HAVE_OPENCL
#include "../precomp.hpp"
#include "layers_common.hpp"
+#include "../op_cuda.hpp"
#include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
+
#include <opencv2/dnn/shape_utils.hpp>
+#ifdef HAVE_CUDA
+#include "../cuda4dnn/primitives/reshape.hpp"
+using namespace cv::dnn::cuda4dnn;
+#endif
+
namespace cv
{
namespace dnn
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
- (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine());
+ backendId == DNN_BACKEND_CUDA ||
+ ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
#include "../op_inf_engine.hpp"
#include <opencv2/imgproc.hpp>
+ #ifdef HAVE_DNN_NGRAPH
+ #include "../ie_ngraph.hpp"
+ #include <ngraph/op/experimental/layers/interpolate.hpp>
+ #endif
+
+#ifdef HAVE_CUDA
+#include "../cuda4dnn/primitives/resize.hpp"
+using namespace cv::dnn::cuda4dnn;
+#endif
+
namespace cv { namespace dnn {
class ResizeLayerImpl : public ResizeLayer
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
+ if (backendId == DNN_BACKEND_CUDA)
+ return interpolation == "nearest" || interpolation == "bilinear";
+
#ifdef HAVE_INF_ENGINE
- if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
+ backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
return (interpolation == "nearest" && scaleWidth == scaleHeight) ||
(interpolation == "bilinear");
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_OPENCV;
+ #ifdef HAVE_INF_ENGINE
+ if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019
+ || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ return true;
+ #endif
- backendId == DNN_BACKEND_INFERENCE_ENGINE ||
+ return backendId == DNN_BACKEND_OPENCV ||
+ backendId == DNN_BACKEND_CUDA;
}
virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
#include "../precomp.hpp"
#include "layers_common.hpp"
+#include "../op_cuda.hpp"
#include "../op_halide.hpp"
#include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
+
#include <opencv2/dnn/shape_utils.hpp>
+#ifdef HAVE_CUDA
+#include "../cuda4dnn/primitives/scale_shift.hpp"
+using namespace cv::dnn::cuda4dnn;
+#endif
+
namespace cv
{
namespace dnn
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
+ return backendId == DNN_BACKEND_OPENCV ||
+ backendId == DNN_BACKEND_CUDA ||
+ backendId == DNN_BACKEND_HALIDE ||
- (backendId == DNN_BACKEND_INFERENCE_ENGINE && axis == 1);
+ ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && axis == 1);
}
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
//M*/
#include "../precomp.hpp"
+#include "../op_cuda.hpp"
#include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
+
#include "layers_common.hpp"
#include <opencv2/dnn/shape_utils.hpp>
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
- (backendId == DNN_BACKEND_INFERENCE_ENGINE &&
+ backendId == DNN_BACKEND_CUDA ||
+ ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) &&
#ifdef HAVE_INF_ENGINE
INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) &&
#endif
#include "../precomp.hpp"
#include "layers_common.hpp"
+#include "../op_cuda.hpp"
#include "../op_halide.hpp"
#include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
+#include "../op_vkcom.hpp"
+
#include <algorithm>
#include <stdlib.h>
using std::max;
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
+ backendId == DNN_BACKEND_CUDA ||
(backendId == DNN_BACKEND_HALIDE && haveHalide() && axisRaw == 1) ||
- (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && !logSoftMax) ||
- ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine() && !logSoftMax);
++ ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine() && !logSoftMax) ||
+ (backendId == DNN_BACKEND_VKCOM && haveVulkan());
}
#ifdef HAVE_OPENCL
#ifdef HAVE_INF_ENGINE
-CV__DNN_EXPERIMENTAL_NS_BEGIN
+ static Backend parseInferenceEngineBackendType(const cv::String& backend)
+ {
+ CV_Assert(!backend.empty());
+ if (backend == CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ return DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
+ if (backend == CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API)
+ return DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019;
+ CV_Error(Error::StsBadArg, cv::format("Unknown IE backend: %s", backend.c_str()));
+ }
+ static const char* dumpInferenceEngineBackendType(Backend backend)
+ {
+ if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ return CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
+ if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
+ return CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API;
+ CV_Error(Error::StsBadArg, cv::format("Invalid backend ID for IE: %d", backend));
+ }
+ Backend& getInferenceEngineBackendTypeParam()
+ {
+ static Backend param = parseInferenceEngineBackendType(
+ utils::getConfigurationParameterString("OPENCV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019_TYPE",
+ #ifdef HAVE_NGRAPH
+ CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API // future: CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH
+ #else
+ CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API
+ #endif
+ )
+ );
+ return param;
+ }
+
-CV__DNN_EXPERIMENTAL_NS_END
++CV__DNN_INLINE_NS_BEGIN
+
+ cv::String getInferenceEngineBackendType()
+ {
+ return dumpInferenceEngineBackendType(getInferenceEngineBackendTypeParam());
+ }
+ cv::String setInferenceEngineBackendType(const cv::String& newBackendType)
+ {
+ Backend newBackend = parseInferenceEngineBackendType(newBackendType);
+ Backend& param = getInferenceEngineBackendTypeParam();
+ Backend old = param;
+ param = newBackend;
+ return dumpInferenceEngineBackendType(old);
+ }
+
++CV__DNN_INLINE_NS_END
+
// For networks with input layer which has an empty name, IE generates a name id[some_number].
// OpenCV lets users use an empty input name and to prevent unexpected naming,
// we can use some predefined name.
InferenceEngine::CNNNetwork t_net;
};
-CV__DNN_EXPERIMENTAL_NS_BEGIN
++
+ class InfEngineExtension : public InferenceEngine::IExtension
+ {
+ public:
+ virtual void SetLogCallback(InferenceEngine::IErrorListener&) noexcept {}
+ virtual void Unload() noexcept {}
+ virtual void Release() noexcept {}
+ virtual void GetVersion(const InferenceEngine::Version*&) const noexcept {}
+
+ virtual InferenceEngine::StatusCode getPrimitiveTypes(char**&, unsigned int&,
+ InferenceEngine::ResponseDesc*) noexcept
+ {
+ return InferenceEngine::StatusCode::OK;
+ }
+
+ InferenceEngine::StatusCode getFactoryFor(InferenceEngine::ILayerImplFactory*& factory,
+ const InferenceEngine::CNNLayer* cnnLayer,
+ InferenceEngine::ResponseDesc* resp) noexcept;
+ };
+
+
+CV__DNN_INLINE_NS_BEGIN
bool isMyriadX();
-CV__DNN_EXPERIMENTAL_NS_END
+CV__DNN_INLINE_NS_END
+ InferenceEngine::Core& getCore();
+
+ template<typename T = size_t>
+ static inline std::vector<T> getShape(const Mat& mat)
+ {
+ std::vector<T> result(mat.dims);
+ for (int i = 0; i < mat.dims; i++)
+ result[i] = (T)mat.size[i];
+ return result;
+ }
+
+
#endif // HAVE_INF_ENGINE
bool haveInfEngine();
#define CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X "dnn_skip_ie_myriadx"
#define CV_TEST_TAG_DNN_SKIP_IE_MYRIAD CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2, CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X
+#define CV_TEST_TAG_DNN_SKIP_VULKAN "dnn_skip_vulkan"
+
+#define CV_TEST_TAG_DNN_SKIP_CUDA "dnn_skip_cuda"
+#define CV_TEST_TAG_DNN_SKIP_CUDA_FP16 "dnn_skip_cuda_fp16"
+#define CV_TEST_TAG_DNN_SKIP_CUDA_FP32 "dnn_skip_cuda_fp32"
+
+
+ #ifdef HAVE_INF_ENGINE
+ #if INF_ENGINE_VER_MAJOR_EQ(2018050000)
+ # define CV_TEST_TAG_DNN_SKIP_IE_VERSION CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2018R5
+ #elif INF_ENGINE_VER_MAJOR_EQ(2019010000)
+ # if INF_ENGINE_RELEASE < 2019010100
+ # define CV_TEST_TAG_DNN_SKIP_IE_VERSION CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2019R1
+ # else
+ # define CV_TEST_TAG_DNN_SKIP_IE_VERSION CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2019R1_1
+ # endif
+ #elif INF_ENGINE_VER_MAJOR_EQ(2019020000)
+ # define CV_TEST_TAG_DNN_SKIP_IE_VERSION CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2019R2
+ #elif INF_ENGINE_VER_MAJOR_EQ(2019030000)
+ # define CV_TEST_TAG_DNN_SKIP_IE_VERSION CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2019R3
+ #endif
+ #endif // HAVE_INF_ENGINE
+
+ #ifndef CV_TEST_TAG_DNN_SKIP_IE_VERSION
+ # define CV_TEST_TAG_DNN_SKIP_IE_VERSION CV_TEST_TAG_DNN_SKIP_IE
+ #endif
+
+
namespace cv { namespace dnn {
-CV__DNN_EXPERIMENTAL_NS_BEGIN
+CV__DNN_INLINE_NS_BEGIN
void PrintTo(const cv::dnn::Backend& v, std::ostream* os);
void PrintTo(const cv::dnn::Target& v, std::ostream* os);
bool withInferenceEngine = true,
bool withHalide = false,
bool withCpuOCV = true,
- bool withCUDA = true
+ bool withVkCom = true,
++ bool withCUDA = true,
+ bool withNgraph = true
);
+ testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTargetsIE();
+
class DNNTestLayer : public TestWithParam<tuple<Backend, Target> >
{
void expectNoFallbacksFromIE(Net& net)
{
- if (backend == DNN_BACKEND_INFERENCE_ENGINE)
+ if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
expectNoFallbacks(net);
+ if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ expectNoFallbacks(net, false);
}
+ void expectNoFallbacksFromCUDA(Net& net)
+ {
+ if (backend == DNN_BACKEND_CUDA)
+ expectNoFallbacks(net);
+ }
+
protected:
void checkBackend(Mat* inp = 0, Mat* ref = 0)
{
void PrintTo(const cv::dnn::Backend& v, std::ostream* os)
{
switch (v) {
- case DNN_BACKEND_DEFAULT: *os << "DEFAULT"; return;
- case DNN_BACKEND_HALIDE: *os << "HALIDE"; return;
- case DNN_BACKEND_INFERENCE_ENGINE: *os << "DLIE*"; return;
- case DNN_BACKEND_OPENCV: *os << "OCV"; return;
- case DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019: *os << "DLIE"; return;
- case DNN_BACKEND_INFERENCE_ENGINE_NGRAPH: *os << "NGRAPH"; return;
- default: /* do nothing */;
+ case DNN_BACKEND_DEFAULT: *os << "DEFAULT"; return;
+ case DNN_BACKEND_HALIDE: *os << "HALIDE"; return;
- case DNN_BACKEND_INFERENCE_ENGINE: *os << "DLIE"; return;
++ case DNN_BACKEND_INFERENCE_ENGINE: *os << "DLIE*"; return;
+ case DNN_BACKEND_VKCOM: *os << "VKCOM"; return;
+ case DNN_BACKEND_OPENCV: *os << "OCV"; return;
+ case DNN_BACKEND_CUDA: *os << "CUDA"; return;
++ case DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019: *os << "DLIE"; return;
++ case DNN_BACKEND_INFERENCE_ENGINE_NGRAPH: *os << "NGRAPH"; return;
} // don't use "default:" to emit compiler warnings
*os << "DNN_BACKEND_UNKNOWN(" << (int)v << ")";
}
bool withInferenceEngine /*= true*/,
bool withHalide /*= false*/,
bool withCpuOCV /*= true*/,
- bool withCUDA /*= true*/
+ bool withVkCom /*= true*/,
++ bool withCUDA /*= true*/,
+ bool withNgraph /*= true*/
)
{
#ifdef HAVE_INF_ENGINE
infRequest.Infer();
}
- void runCV(Target target, const std::string& xmlPath, const std::string& binPath,
-std::vector<String> getOutputsNames(const Net& net)
-{
- std::vector<String> names;
- if (names.empty())
- {
- std::vector<int> outLayers = net.getUnconnectedOutLayers();
- std::vector<String> layersNames = net.getLayerNames();
- names.resize(outLayers.size());
- for (size_t i = 0; i < outLayers.size(); ++i)
- names[i] = layersNames[outLayers[i] - 1];
- }
- return names;
-}
-
+ void runCV(Backend backendId, Target targetId, const std::string& xmlPath, const std::string& binPath,
const std::map<std::string, cv::Mat>& inputsMap,
std::map<std::string, cv::Mat>& outputsMap)
{
Net net = readNet(xmlPath, binPath);
for (auto& it : inputsMap)
net.setInput(it.second, it.first);
- net.setPreferableTarget(target);
+
+ net.setPreferableBackend(backendId);
+ net.setPreferableTarget(targetId);
- std::vector<String> outNames = getOutputsNames(net);
+ std::vector<String> outNames = net.getUnconnectedOutLayersNames();
std::vector<Mat> outs;
net.forward(outs, outNames);
{
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
- if (backend == DNN_BACKEND_INFERENCE_ENGINE)
- applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
+ if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
+ if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
+ if(backend == DNN_BACKEND_CUDA)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); /* Proposal layer is unsupported */
Net net = readNetFromCaffe(_tf("net_faster_rcnn_proposal.prototxt"));
TEST_P(Test_ONNX_layers, Convolution3D)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
- if(backend == DNN_BACKEND_INFERENCE_ENGINE)
- throw SkipTestException("Test is enabled starts from 2019R1");
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
- if (target != DNN_TARGET_CPU)
- throw SkipTestException("Only CPU is supported");
+ if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
+ throw SkipTestException("Only CPU and CUDA is supported");
testONNXModels("conv3d");
testONNXModels("conv3d_bias");
}
TEST_P(Test_ONNX_layers, Deconvolution3D)
{
- #if defined(INF_ENGINE_RELEASE)
- applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_2018R5);
+ #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
- if ((backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_CPU) && backend != DNN_BACKEND_CUDA)
- throw SkipTestException("Only DLIE backend on CPU, and CUDA is supported");
- if (backend == DNN_BACKEND_OPENCV || target != DNN_TARGET_CPU)
++ if (backend == DNN_BACKEND_CUDA)
++ {
++ // ok
++ }
++ else if (backend == DNN_BACKEND_OPENCV || target != DNN_TARGET_CPU)
+ throw SkipTestException("Only DLIE backend on CPU is supported");
testONNXModels("deconv3d");
testONNXModels("deconv3d_bias");
testONNXModels("deconv3d_pad");
TEST_P(Test_ONNX_layers, ReduceMean3D)
{
- if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
- throw SkipTestException("Only CPU and CUDA is supported");
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
++ if (backend == DNN_BACKEND_CUDA)
++ {
++ // ok
++ }
++ else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
++ else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
- if (target != DNN_TARGET_CPU)
++ else if (target != DNN_TARGET_CPU)
+ throw SkipTestException("Only CPU is supported");
++
testONNXModels("reduce_mean3d");
}
TEST_P(Test_ONNX_layers, MaxPooling3D)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
- throw SkipTestException("Test is enabled starts from 2019R1");
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
- if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
- throw SkipTestException("Only CPU and CUDA is supported");
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
++ if (backend == DNN_BACKEND_CUDA)
++ {
++ // ok
++ }
++ else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
++ else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
- if (target != DNN_TARGET_CPU)
++ else if (target != DNN_TARGET_CPU)
+ throw SkipTestException("Only CPU is supported");
testONNXModels("max_pool3d", npy, 0, 0, false, false);
}
TEST_P(Test_ONNX_layers, AvePooling3D)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
- throw SkipTestException("Test is enabled starts from 2019R1");
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
- if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
- throw SkipTestException("Only CPU and CUDA is supported");
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
++ if (backend == DNN_BACKEND_CUDA)
++ {
++ // ok
++ }
++ else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
++ else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
- if (target != DNN_TARGET_CPU)
++ else if (target != DNN_TARGET_CPU)
+ throw SkipTestException("Only CPU is supported");
testONNXModels("ave_pool3d");
}
TEST_P(Test_ONNX_layers, PoolConv3D)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
- throw SkipTestException("Test is enabled starts from 2019R1");
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
- if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
- throw SkipTestException("Only CPU and CUDA is supported");
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
++ if (backend == DNN_BACKEND_CUDA)
++ {
++ // ok
++ }
++ else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
++ else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
- if (target != DNN_TARGET_CPU)
++ else if (target != DNN_TARGET_CPU)
+ throw SkipTestException("Only CPU is supported");
testONNXModels("pool_conv_3d");
}
TEST_P(Test_ONNX_nets, Resnet34_kinetics)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
- throw SkipTestException("Test is enabled starts from 2019R1");
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
- if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
- throw SkipTestException("Only CPU and CUDA is supported");
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
++ if (backend == DNN_BACKEND_CUDA)
++ {
++ // ok
++ }
++ else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
++ else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
- if (target != DNN_TARGET_CPU)
++ else if (target != DNN_TARGET_CPU)
+ throw SkipTestException("Only CPU is supported");
String onnxmodel = findDataFile("dnn/resnet-34_kinetics.onnx", false);
Mat image0 = imread(findDataFile("dnn/dog416.png"));
TEST_P(Test_TensorFlow_layers, Convolution3D)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
- throw SkipTestException("Test is enabled starts from 2019R1");
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
- if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
- throw SkipTestException("Only CPU and CUDA is supported");
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
++ if (backend == DNN_BACKEND_CUDA)
++ {
++ // ok
++ }
++ else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
++ else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
- if (target != DNN_TARGET_CPU)
++ else if (target != DNN_TARGET_CPU)
+ throw SkipTestException("Only CPU is supported");
++
runTensorFlowNet("conv3d");
}
TEST_P(Test_TensorFlow_layers, MaxPooling3D)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
- throw SkipTestException("Test is enabled starts from 2019R1");
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
- if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
- throw SkipTestException("Only CPU and CUDA is supported");
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
++ if (backend == DNN_BACKEND_CUDA)
++ {
++ // ok
++ }
++ else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
++ else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
- if (target != DNN_TARGET_CPU)
++ else if (target != DNN_TARGET_CPU)
+ throw SkipTestException("Only CPU is supported");
++
runTensorFlowNet("max_pool3d");
}
TEST_P(Test_TensorFlow_layers, AvePooling3D)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
- throw SkipTestException("Test is enabled starts from 2019R1");
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
- if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
- throw SkipTestException("Only CPU and CUDA is supported");
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
++ if (backend == DNN_BACKEND_CUDA)
++ {
++ // ok
++ }
++ else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
++ else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
- if (target != DNN_TARGET_CPU)
++ else if (target != DNN_TARGET_CPU)
+ throw SkipTestException("Only CPU is supported");
++
runTensorFlowNet("ave_pool3d");
}
TEST_P(Test_TensorFlow_layers, lstm)
{
- if (backend == DNN_BACKEND_INFERENCE_ENGINE)
- applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
+ if(backend == DNN_BACKEND_CUDA)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); /* not supported */
+ if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
+ if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
runTensorFlowNet("lstm", true);