Merge remote-tracking branch 'upstream/3.4' into merge-3.4
authorAlexander Alekhin <alexander.alekhin@intel.com>
Mon, 2 Dec 2019 13:18:07 +0000 (16:18 +0300)
committerAlexander Alekhin <alexander.alekhin@intel.com>
Mon, 2 Dec 2019 13:26:52 +0000 (16:26 +0300)
44 files changed:
1  2 
CMakeLists.txt
cmake/OpenCVDetectInferenceEngine.cmake
modules/calib3d/include/opencv2/calib3d.hpp
modules/dnn/CMakeLists.txt
modules/dnn/include/opencv2/dnn/dnn.hpp
modules/dnn/include/opencv2/dnn/utils/inference_engine.hpp
modules/dnn/include/opencv2/dnn/version.hpp
modules/dnn/src/dnn.cpp
modules/dnn/src/layers/batch_norm_layer.cpp
modules/dnn/src/layers/blank_layer.cpp
modules/dnn/src/layers/concat_layer.cpp
modules/dnn/src/layers/const_layer.cpp
modules/dnn/src/layers/convolution_layer.cpp
modules/dnn/src/layers/detection_output_layer.cpp
modules/dnn/src/layers/elementwise_layers.cpp
modules/dnn/src/layers/eltwise_layer.cpp
modules/dnn/src/layers/flatten_layer.cpp
modules/dnn/src/layers/fully_connected_layer.cpp
modules/dnn/src/layers/lrn_layer.cpp
modules/dnn/src/layers/normalize_bbox_layer.cpp
modules/dnn/src/layers/padding_layer.cpp
modules/dnn/src/layers/permute_layer.cpp
modules/dnn/src/layers/pooling_layer.cpp
modules/dnn/src/layers/prior_box_layer.cpp
modules/dnn/src/layers/reorg_layer.cpp
modules/dnn/src/layers/reshape_layer.cpp
modules/dnn/src/layers/resize_layer.cpp
modules/dnn/src/layers/scale_layer.cpp
modules/dnn/src/layers/slice_layer.cpp
modules/dnn/src/layers/softmax_layer.cpp
modules/dnn/src/op_inf_engine.cpp
modules/dnn/src/op_inf_engine.hpp
modules/dnn/src/tensorflow/tf_importer.cpp
modules/dnn/test/test_backends.cpp
modules/dnn/test/test_caffe_importer.cpp
modules/dnn/test/test_common.hpp
modules/dnn/test/test_common.impl.hpp
modules/dnn/test/test_ie_models.cpp
modules/dnn/test/test_layers.cpp
modules/dnn/test/test_misc.cpp
modules/dnn/test/test_onnx_importer.cpp
modules/dnn/test/test_tf_importer.cpp
modules/dnn/test/test_torch_importer.cpp
modules/ts/include/opencv2/ts.hpp

diff --cc CMakeLists.txt
Simple merge
@@@ -80,8 -74,8 +80,9 @@@ endif(
  
  set(include_dirs ${fw_inc})
  set(sources_options "")
  set(libs libprotobuf ${LAPACK_LIBRARIES})
 +
  if(OPENCV_DNN_OPENCL AND HAVE_OPENCL)
    list(APPEND include_dirs ${OPENCL_INCLUDE_DIRS})
  else()
@@@ -67,12 -74,18 +67,17 @@@ CV__DNN_INLINE_NS_BEGI
          //! DNN_BACKEND_DEFAULT equals to DNN_BACKEND_INFERENCE_ENGINE if
          //! OpenCV is built with Intel's Inference Engine library or
          //! DNN_BACKEND_OPENCV otherwise.
-         DNN_BACKEND_DEFAULT,
+         DNN_BACKEND_DEFAULT = 0,
          DNN_BACKEND_HALIDE,
-         DNN_BACKEND_INFERENCE_ENGINE,  //!< Intel's Inference Engine computational backend.
+         DNN_BACKEND_INFERENCE_ENGINE,            //!< Intel's Inference Engine computational backend
+                                                  //!< @sa setInferenceEngineBackendType
          DNN_BACKEND_OPENCV,
 -        // OpenCV 4.x: DNN_BACKEND_VKCOM,
 -        // OpenCV 4.x: DNN_BACKEND_CUDA,
 -
 +        DNN_BACKEND_VKCOM,
-         DNN_BACKEND_CUDA
++        DNN_BACKEND_CUDA,
+ #ifdef __OPENCV_BUILD
+         DNN_BACKEND_INFERENCE_ENGINE_NGRAPH = 1000000,     // internal - use DNN_BACKEND_INFERENCE_ENGINE + setInferenceEngineBackendType()
+         DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019,      // internal - use DNN_BACKEND_INFERENCE_ENGINE + setInferenceEngineBackendType()
+ #endif
      };
  
      /**
  
          virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs);
  
+         virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs, const std::vector<Ptr<BackendNode> >& nodes);
 +        virtual Ptr<BackendNode> initVkCom(const std::vector<Ptr<BackendWrapper> > &inputs);
 +
 +        /**
 +         * @brief Returns a CUDA backend node
 +         *
 +         * @param   context  void pointer to CSLContext object
 +         * @param   inputs   layer inputs
 +         * @param   outputs  layer outputs
 +         */
 +        virtual Ptr<BackendNode> initCUDA(
 +            void *context,
 +            const std::vector<Ptr<BackendWrapper>>& inputs,
 +            const std::vector<Ptr<BackendWrapper>>& outputs
 +        );
 +
         /**
          * @brief Automatic Halide scheduling based on layer hyper-parameters.
          * @param[in] node Backend node with Halide functions.
  #include "../dnn.hpp"
  
  namespace cv { namespace dnn {
 -CV__DNN_EXPERIMENTAL_NS_BEGIN
 +CV__DNN_INLINE_NS_BEGIN
  
  
+ /* Values for 'OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE' parameter */
+ #define CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API     "NN_BUILDER"
+ #define CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH             "NGRAPH"
+ /** @brief Returns Inference Engine internal backend API.
+  *
+  * See values of `CV_DNN_BACKEND_INFERENCE_ENGINE_*` macros.
+  *
+  * Default value is controlled through `OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE` runtime parameter (environment variable).
+  */
+ CV_EXPORTS_W cv::String getInferenceEngineBackendType();
+ /** @brief Specify Inference Engine internal backend API.
+  *
+  * See values of `CV_DNN_BACKEND_INFERENCE_ENGINE_*` macros.
+  *
+  * @returns previous value of internal backend API
+  */
+ CV_EXPORTS_W cv::String setInferenceEngineBackendType(const cv::String& newBackendType);
  /** @brief Release a Myriad device (binded by OpenCV).
   *
   * Single Myriad device cannot be shared across multiple processes which uses
index 9b1c6e1,0000000..6dd2c39
mode 100644,000000..100644
--- /dev/null
@@@ -1,21 -1,0 +1,21 @@@
- #define OPENCV_DNN_API_VERSION 20191111
 +// This file is part of OpenCV project.
 +// It is subject to the license terms in the LICENSE file found in the top-level directory
 +// of this distribution and at http://opencv.org/license.html.
 +
 +#ifndef OPENCV_DNN_VERSION_HPP
 +#define OPENCV_DNN_VERSION_HPP
 +
 +/// Use with major OpenCV version only.
++#define OPENCV_DNN_API_VERSION 20191202
 +
 +#if !defined CV_DOXYGEN && !defined CV_STATIC_ANALYSIS && !defined CV_DNN_DONT_ADD_INLINE_NS
 +#define CV__DNN_INLINE_NS __CV_CAT(dnn4_v, OPENCV_DNN_API_VERSION)
 +#define CV__DNN_INLINE_NS_BEGIN namespace CV__DNN_INLINE_NS {
 +#define CV__DNN_INLINE_NS_END }
 +namespace cv { namespace dnn { namespace CV__DNN_INLINE_NS { } using namespace CV__DNN_INLINE_NS; }}
 +#else
 +#define CV__DNN_INLINE_NS_BEGIN
 +#define CV__DNN_INLINE_NS_END
 +#endif
 +
 +#endif  // OPENCV_DNN_VERSION_HPP
  #include "precomp.hpp"
  #include "op_halide.hpp"
  #include "op_inf_engine.hpp"
+ #include "ie_ngraph.hpp"
 +#include "op_vkcom.hpp"
 +#include "op_cuda.hpp"
  #include "halide_scheduler.hpp"
 +
  #include <set>
  #include <algorithm>
  #include <iostream>
@@@ -1010,39 -1016,25 +1034,48 @@@ static Ptr<BackendWrapper> wrapMat(int 
          return Ptr<BackendWrapper>(new HalideBackendWrapper(targetId, m));
  #endif  // HAVE_HALIDE
      }
-     else if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
+     else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
      {
-         CV_Assert(haveInfEngine());
  #ifdef HAVE_INF_ENGINE
          return Ptr<BackendWrapper>(new InfEngineBackendWrapper(targetId, m));
- #endif  // HAVE_INF_ENGINE
+ #else
+         CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine API support");
+ #endif
+     }
+     else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+     {
+ #ifdef HAVE_DNN_NGRAPH
+         return Ptr<BackendWrapper>(new NgraphBackendWrapper(targetId, m));
+ #else
+         CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of Inference Engine + nGraph");
+ #endif
      }
 +    else if (backendId == DNN_BACKEND_VKCOM)
 +    {
 +        CV_Assert(haveVulkan());
 +#ifdef HAVE_VULKAN
 +        return Ptr<BackendWrapper>(new VkComBackendWrapper(m));
 +#endif  // HAVE_VULKAN
 +    }
 +    else if (backendId == DNN_BACKEND_CUDA)
 +    {
 +        CV_Assert(haveCUDA());
 +
 +#ifdef HAVE_CUDA
 +        switch (targetId)
 +        {
 +        case DNN_TARGET_CUDA:
 +            return CUDABackendWrapperFP32::create(m);
 +        case DNN_TARGET_CUDA_FP16:
 +            return CUDABackendWrapperFP16::create(m);
 +        default:
 +            CV_Assert(IS_DNN_CUDA_TARGET(targetId));
 +        }
 +#endif
 +    }
      else
          CV_Error(Error::StsNotImplemented, "Unknown backend identifier");
-     return Ptr<BackendWrapper>();
+     return Ptr<BackendWrapper>();  // TODO Error?
  }
  
  struct Net::Impl
                    preferableTarget == DNN_TARGET_OPENCL ||
                    preferableTarget == DNN_TARGET_OPENCL_FP16 ||
                    preferableTarget == DNN_TARGET_MYRIAD ||
-                   preferableTarget == DNN_TARGET_FPGA);
+                   preferableTarget == DNN_TARGET_FPGA
+             );
+         }
 +        CV_Assert(preferableBackend != DNN_BACKEND_VKCOM ||
 +                  preferableTarget == DNN_TARGET_VULKAN);
 +        CV_Assert(preferableBackend != DNN_BACKEND_CUDA ||
 +                  IS_DNN_CUDA_TARGET(preferableTarget));
          if (!netWasAllocated || this->blobsToKeep != blobsToKeep_)
          {
              if (preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget))
              CV_Assert(preferableTarget == DNN_TARGET_CPU || IS_DNN_OPENCL_TARGET(preferableTarget));
          else if (preferableBackend == DNN_BACKEND_HALIDE)
              initHalideBackend();
-         else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE)
+         else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
+         {
+ #ifdef HAVE_INF_ENGINE
              initInfEngineBackend();
+ #else
+             CV_Assert(false && "This OpenCV version is built without Inference Engine API support");
+ #endif
+         }
+         else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+         {
+ #ifdef HAVE_DNN_NGRAPH
+             initNgraphBackend();
+ #else
+             CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of Inference Engine + nGraph");
+ #endif
+         }
 +        else if (preferableBackend == DNN_BACKEND_VKCOM)
 +            initVkComBackend();
 +        else if (preferableBackend == DNN_BACKEND_CUDA)
 +            initCUDABackend();
          else
              CV_Error(Error::StsNotImplemented, "Unknown backend identifier");
      }
                  ld.skip = false;
              }
          }
+     }
  #endif  // HAVE_INF_ENGINE
+ #ifdef HAVE_DNN_NGRAPH
+     void addNgraphOutputs(LayerData &ld)
+     {
+         CV_TRACE_FUNCTION();
+         Ptr<InfEngineNgraphNet> layerNet;
+         auto it = ld.backendNodes.find(preferableBackend);
+         if (it != ld.backendNodes.end())
+         {
+             Ptr<BackendNode> node = it->second;
+             if (!node.empty())
+             {
+                 Ptr<InfEngineNgraphNode> ieNode = node.dynamicCast<InfEngineNgraphNode>();
+                 CV_Assert(!ieNode.empty()); CV_Assert(!ieNode->net.empty());
+                 layerNet = ieNode->net;
+             }
+         }
+         for (int i = 0; i < ld.inputBlobsId.size(); ++i)
+         {
+             LayerData &inpLd = layers[ld.inputBlobsId[i].lid];
+             Ptr<BackendNode> inpNode = inpLd.backendNodes[preferableBackend];
+             if (!inpNode.empty())
+             {
+                 Ptr<InfEngineNgraphNode> ieInpNode = inpNode.dynamicCast<InfEngineNgraphNode>();
+                 CV_Assert(!ieInpNode.empty()); CV_Assert(!ieInpNode->net.empty());
+                 if (layerNet != ieInpNode->net)
+                 {
+                     ieInpNode->net->addOutput(ieInpNode->node->get_friendly_name());
+                     ieInpNode->net->setUnconnectedNodes(ieInpNode);
+                 }
+             }
+         }
+     }
+     void initNgraphBackend()
+     {
+         CV_TRACE_FUNCTION();
+         CV_Assert_N(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, haveInfEngine());
+         MapIdToLayerData::iterator it;
+         Ptr<InfEngineNgraphNet> net;
+         for (it = layers.begin(); it != layers.end(); ++it)
+         {
+             LayerData &ld = it->second;
+             if (ld.id == 0)
+             {
+                 CV_Assert((netInputLayer->outNames.empty() && ld.outputBlobsWrappers.size() == 1) ||
+                           (netInputLayer->outNames.size() == ld.outputBlobsWrappers.size()));
+                 for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
+                 {
+                     InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.outputBlobsWrappers[i]);
+                     dataPtr->setName(netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i]);
+                 }
+             }
+             else
+             {
+                 for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
+                 {
+                     InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.outputBlobsWrappers[i]);
+                     dataPtr->setName(ld.name);
+                 }
+             }
+         }
+         if (skipInfEngineInit)
+         {
+             Ptr<BackendNode> node = layers[lastLayerId].backendNodes[preferableBackend];
+             CV_Assert(!node.empty());
+             Ptr<InfEngineNgraphNode> ieNode = node.dynamicCast<InfEngineNgraphNode>();
+             CV_Assert(!ieNode.empty());
+             for (it = layers.begin(); it != layers.end(); ++it)
+             {
+                 LayerData &ld = it->second;
+                 if (ld.id == 0)
+                 {
+                     for (int i = 0; i < ld.inputBlobsWrappers.size(); ++i)
+                     {
+                         InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.inputBlobsWrappers[i]);
+                         dataPtr->setName(netInputLayer->outNames[i]);
+                     }
+                 }
+                 else
+                 {
+                     for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
+                     {
+                         InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.outputBlobsWrappers[i]);
+                         dataPtr->setName(ld.name);
+                     }
+                 }
+                 ieNode->net->addBlobs(ld.inputBlobsWrappers);
+                 ieNode->net->addBlobs(ld.outputBlobsWrappers);
+                 ld.skip = true;
+             }
+             layers[lastLayerId].skip = false;
+             ieNode->net->init((Target)preferableTarget);
+             return;
+         }
+         // Build Inference Engine networks from sets of layers that support this
+         // backend. Split a whole model on several Inference Engine networks if
+         // some of layers are not implemented.
+         for (it = layers.begin(); it != layers.end(); ++it)
+         {
+             LayerData &ld = it->second;
+             if (ld.id == 0 && ld.skip)
+                 continue;
+             bool fused = ld.skip;
+             Ptr<Layer> layer = ld.layerInstance;
+             if (!fused && !layer->supportBackend(preferableBackend))
+             {
+                 addNgraphOutputs(ld);
+                 net = Ptr<InfEngineNgraphNet>();
+                 layer->preferableTarget = DNN_TARGET_CPU;
+                 for (int i = 0; i < ld.inputBlobsId.size(); ++i)
+                 {
+                     LayerData &inpLd = layers[ld.inputBlobsId[i].lid];
+                     Ptr<BackendNode> inpNode = inpLd.backendNodes[preferableBackend];
+                     if (!inpNode.empty()) {
+                         Ptr<InfEngineNgraphNode> ieNode = inpNode.dynamicCast<InfEngineNgraphNode>();
+                         ieNode->net->setUnconnectedNodes(ieNode);
+                     }
+                 }
+                 continue;
+             }
+             ld.skip = true;  // Initially skip all Inference Engine supported layers.
+             // Create a new network if one of inputs from different Inference Engine graph.
+             std::vector<Ptr<BackendNode>> inputNodes;
+             for (int i = 0; i < ld.inputBlobsId.size(); ++i)
+             {
+                 // Layer_Test_ROIPooling.Accuracy has 2 inputs inpLD = 0, 0 -> has 4 inputNodes (input, rois, input, rois)
+                 if (inputNodes.size() == ld.inputBlobsId.size()) {
+                     break;
+                 }
+                 LayerData &inpLd = layers[ld.inputBlobsId[i].lid];
+                 Ptr<BackendNode> inpNode = inpLd.backendNodes[preferableBackend];
+                 if (!inpNode.empty())
+                 {
+                      Ptr<InfEngineNgraphNode> ieInpNode = inpNode.dynamicCast<InfEngineNgraphNode>();
+                      CV_Assert(!ieInpNode.empty()); CV_Assert(!ieInpNode->net.empty());
+                      if (ieInpNode->net == net && !fused) {
+                         inputNodes.push_back(inpNode);
+                         continue;
+                      }
+                 }
+                 if (net.empty()) {
+                     net = Ptr<InfEngineNgraphNet>(new InfEngineNgraphNet());
+                 }
+                 if (!fused) {
+                     std::vector<std::string> inputNames;
+                     std::vector<cv::Mat> inputs;
+                     auto curr_pos = inpLd.consumers.begin();
+                     auto compare = [&ld] (const LayerPin& lp) { return lp.lid == ld.id; };
+                     auto cons = curr_pos;
+                     while ((cons = std::find_if(curr_pos, inpLd.consumers.end(), compare)) !=
+                             inpLd.consumers.end()) {
+                         int cons_inp = cons->oid;
+                         Ptr<NgraphBackendWrapper> inpWrapper = inpLd.outputBlobsWrappers[cons_inp].
+                                                                      dynamicCast<NgraphBackendWrapper>();
+                         auto iter = std::find(inputNames.begin(), inputNames.end(),
+                                               inpWrapper->dataPtr->getName());
+                         if (iter == inputNames.end()) {
+                             inputNames.push_back(inpWrapper->dataPtr->getName());
+                             inputs.push_back(inpLd.outputBlobs[cons_inp]);
+                         }
+                         curr_pos = cons + 1;
+                     }
+                     auto inps = net->setInputs(inputs, inputNames);
+                     for (auto& inp : inps) {
+                         inputNodes.emplace_back(Ptr<BackendNode>(new InfEngineNgraphNode(inp)));
+                     }
+                 }
+             }
+             Ptr<BackendNode> node;
+             if (!net.empty())
+             {
+                 if (fused)
+                 {
+                     bool inPlace = ld.inputBlobsId.size() == 1 && ld.outputBlobs.size() == 1 &&
+                                    ld.inputBlobs[0]->data == ld.outputBlobs[0].data;
+                     CV_Assert(inPlace);
+                     node = layers[ld.inputBlobsId[0].lid].backendNodes[preferableBackend];
+                     ld.inputBlobsWrappers = layers[ld.inputBlobsId[0].lid].inputBlobsWrappers;
+                 }
+             }
+             else {
+                 net = Ptr<InfEngineNgraphNet>(new InfEngineNgraphNet());
+             }
+             if (!fused)
+             {
+                 CV_Assert(!inputNodes.empty());
+                 node = layer->initNgraph(ld.inputBlobsWrappers, inputNodes);
+                 for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
+                 {
+                     InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.outputBlobsWrappers[i]);
+                     node.dynamicCast<InfEngineNgraphNode>()->setName(dataPtr->getName());
+                 }
+             }
+             else if (node.empty())
+                 continue;
+             ld.backendNodes[preferableBackend] = node;
+             Ptr<InfEngineNgraphNode> ieNode = node.dynamicCast<InfEngineNgraphNode>();
+             CV_Assert(!ieNode.empty());
+             ieNode->net = net;
+             if (ld.consumers.empty()) {
+                 // TF EAST_text_detection
+                 ieNode->net->setUnconnectedNodes(ieNode);
+             }
+             ieNode->net->setNodePtr(&ieNode->node);
+             net->addBlobs(ld.inputBlobsWrappers);
+             net->addBlobs(ld.outputBlobsWrappers);
+             addNgraphOutputs(ld);
+         }
+         // Initialize all networks.
+         for (MapIdToLayerData::reverse_iterator it = layers.rbegin(); it != layers.rend(); ++it)
+         {
+             LayerData &ld = it->second;
+             auto iter = ld.backendNodes.find(preferableBackend);
+             if (iter == ld.backendNodes.end())
+                 continue;
+             Ptr<BackendNode>& node = iter->second;
+             if (node.empty())
+                 continue;
+             Ptr<InfEngineNgraphNode> ieNode = node.dynamicCast<InfEngineNgraphNode>();
+             if (ieNode.empty())
+                 continue;
+             CV_Assert(!ieNode->net.empty());
+             if (!ieNode->net->isInitialized())
+             {
+                 ieNode->net->setUnconnectedNodes(ieNode);
+                 ieNode->net->createNet((Target)preferableTarget);
+                 ld.skip = false;
+             }
+         }
+     }
+ #endif  // HAVE_DNN_NGRAPH
++    void initVkComBackend()
++    {
++        CV_TRACE_FUNCTION();
++        CV_Assert(preferableBackend == DNN_BACKEND_VKCOM);
++#ifdef HAVE_VULKAN
++        if (!haveVulkan())
++            return;
++
++        MapIdToLayerData::iterator it = layers.begin();
++        for (; it != layers.end(); it++)
++        {
++            LayerData &ld = it->second;
++            Ptr<Layer> layer = ld.layerInstance;
++            if (!layer->supportBackend(preferableBackend))
++            {
++                continue;
++            }
++
++            ld.skip = false;
++
++            try
++            {
++                ld.backendNodes[DNN_BACKEND_VKCOM] =
++                    layer->initVkCom(ld.inputBlobsWrappers);
++            }
++            catch (const cv::Exception& e)
++            {
++                CV_LOG_ERROR(NULL, "initVkCom failed, fallback to CPU implementation. " << e.what());
++                ld.backendNodes[DNN_BACKEND_VKCOM] = Ptr<BackendNode>();
++            }
++        }
++#endif
 +    }
 +
 +    void initCUDABackend() {
 +        CV_Assert(haveCUDA());
 +
 +#ifdef HAVE_CUDA
 +        for (auto& layer : layers)
 +        {
 +            auto& ld = layer.second;
 +            auto& layerInstance = ld.layerInstance;
 +
 +            if (!layerInstance->supportBackend(DNN_BACKEND_CUDA))
 +            {
 +                std::ostringstream os;
 +                os << "CUDA backend will fallback to the CPU implementation for the layer \"" << ld.name
 +                   << "\" of type " << ld.type << '\n';
 +                CV_LOG_INFO(NULL, os.str().c_str());
 +                continue;
 +            }
 +
 +            /* we make a copy so that `initCUDA` doesn't modify `cudaInfo->context` */
 +            auto context = cudaInfo->context;
 +            auto node = layerInstance->initCUDA(&context, ld.inputBlobsWrappers, ld.outputBlobsWrappers);
 +            ld.backendNodes[DNN_BACKEND_CUDA] = node;
 +
 +            auto cudaNode = node.dynamicCast<CUDABackendNode>();
 +            cudaInfo->workspace.require(cudaNode->get_workspace_memory_in_bytes());
 +        }
 +#endif
 +    }
 +
      void allocateLayer(int lid, const LayersShapesMap& layersShapes)
      {
          CV_TRACE_FUNCTION();
  
      void fuseLayers(const std::vector<LayerPin>& blobsToKeep_)
      {
-         if( !fusion || (preferableBackend != DNN_BACKEND_OPENCV &&
-                         preferableBackend != DNN_BACKEND_CUDA &&
-                         preferableBackend != DNN_BACKEND_INFERENCE_ENGINE))
-             return;
          CV_TRACE_FUNCTION();
  
+         if(!fusion || (preferableBackend != DNN_BACKEND_OPENCV &&
++                        preferableBackend != DNN_BACKEND_CUDA &&
+                         preferableBackend != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 &&
+                         preferableBackend != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH))
+            return;
          // scan through all the layers. If there is convolution layer followed by the activation layer,
          // we try to embed this activation into the convolution and disable separate execution of the activation
          std::set<LayerPin> pinsToKeep(blobsToKeep_.begin(),
                  {
                      forwardInfEngine(ld.outputBlobsWrappers, node, isAsync);
                  }
+                 else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+                 {
+                     forwardNgraph(ld.outputBlobsWrappers, node, isAsync);
+                 }
 +                else if (preferableBackend == DNN_BACKEND_VKCOM)
 +                {
 +                    try
 +                    {
 +                        forwardVkCom(ld.outputBlobsWrappers, node);
 +                    }
 +                    catch (const cv::Exception& e)
 +                    {
 +                        CV_LOG_ERROR(NULL, "forwardVkCom failed, fallback to CPU implementation. " << e.what());
 +                        it->second = Ptr<BackendNode>();
 +                        forwardLayer(ld);
 +                    }
 +                }
                  else
                  {
                      CV_Error(Error::StsNotImplemented, "Unknown backend identifier");
@@@ -2829,9 -2928,12 +3176,13 @@@ Net Net::readFromModelOptimizer(const S
      reader.ReadWeights(bin);
  
      InferenceEngine::CNNNetwork ieNet = reader.getNetwork();
+ #else
+     InferenceEngine::Core& ie = getCore();
+     InferenceEngine::CNNNetwork ieNet = ie.ReadNetwork(xml, bin);
+ #endif
  
      std::vector<String> inputsNames;
 +    std::vector<MatShape> inp_shapes;
      for (auto& it : ieNet.getInputsInfo())
      {
          inputsNames.push_back(it.first);
      Net cvNet;
      cvNet.setInputsNames(inputsNames);
  
-     Ptr<InfEngineBackendNode> backendNode(new InfEngineBackendNode(InferenceEngine::Builder::Layer("")));
-     backendNode->net = Ptr<InfEngineBackendNet>(new InfEngineBackendNet(ieNet));
-     for (auto& it : ieNet.getOutputsInfo())
 +    // set empty input to determine input shapes
 +    for (int inp_id = 0; inp_id < inputsNames.size(); ++inp_id)
 +    {
 +        cvNet.setInput(Mat(inp_shapes[inp_id], CV_32F), inputsNames[inp_id]);
 +    }
 +
+     Ptr<BackendNode> backendNode;
+ #ifdef HAVE_DNN_NGRAPH
+     if (DNN_BACKEND_INFERENCE_ENGINE_NGRAPH == getInferenceEngineBackendTypeParam())
+     {
+         auto fake_node = std::make_shared<ngraph::op::Parameter>(ngraph::element::f32, ngraph::Shape{});
+         Ptr<InfEngineNgraphNode> backendNodeNGraph(new InfEngineNgraphNode(fake_node));
+         backendNodeNGraph->net = Ptr<InfEngineNgraphNet>(new InfEngineNgraphNet(ieNet));
+         backendNode = backendNodeNGraph;
+     }
+     else
+ #endif
      {
-         Ptr<Layer> cvLayer(new InfEngineBackendLayer(ieNet));
-         InferenceEngine::CNNLayerPtr ieLayer = ieNet.getLayerByName(it.first.c_str());
-         CV_Assert(ieLayer);
+         Ptr<InfEngineBackendNode> backendNodeNN(new InfEngineBackendNode(InferenceEngine::Builder::Layer("")));
+         backendNodeNN->net = Ptr<InfEngineBackendNet>(new InfEngineBackendNet(ieNet));
+         backendNode = backendNodeNN;
+     }
 +
+     for (auto& it : ieNet.getOutputsInfo())
+     {
          LayerParams lp;
          int lid = cvNet.addLayer(it.first, "", lp);
  
@@@ -3288,9 -3424,10 +3682,11 @@@ String Net::dump(
      switch (prefBackend) {
          case DNN_BACKEND_DEFAULT: backend = "DEFAULT/"; break;
          case DNN_BACKEND_HALIDE: backend = "HALIDE/"; break;
-         case DNN_BACKEND_INFERENCE_ENGINE: backend = "DLIE/"; break;
+         case DNN_BACKEND_INFERENCE_ENGINE: // fallthru
+         case DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019: backend = "DLIE/"; break;
+         case DNN_BACKEND_INFERENCE_ENGINE_NGRAPH: backend = "NGRAPH/"; break;
          case DNN_BACKEND_OPENCV: backend = "OCV/"; break;
 +        case DNN_BACKEND_CUDA: backend = "CUDA/"; break;
      }
      out << "digraph G {" << '\n';
      // Add nodes
@@@ -11,9 -11,10 +11,11 @@@ Implementation of Batch Normalization l
  
  #include "../precomp.hpp"
  #include "layers_common.hpp"
 +#include "../op_cuda.hpp"
  #include "../op_halide.hpp"
  #include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
  #include <opencv2/dnn/shape_utils.hpp>
  
  #ifdef HAVE_OPENCL
@@@ -161,9 -157,8 +163,9 @@@ public
      virtual bool supportBackend(int backendId) CV_OVERRIDE
      {
          return (backendId == DNN_BACKEND_OPENCV) ||
 +               backendId == DNN_BACKEND_CUDA ||
                 (backendId == DNN_BACKEND_HALIDE && haveHalide()) ||
-                (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && (preferableTarget == DNN_TARGET_CPU || dims == 4));
+                ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine() && (preferableTarget == DNN_TARGET_CPU || dims == 4));
      }
  
  #ifdef HAVE_OPENCL
  //
  //M*/
  #include "../precomp.hpp"
 +#include "../op_cuda.hpp"
  #include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
  
 +#ifdef HAVE_CUDA
 +#include "../cuda4dnn/primitives/reshape.hpp"
 +using namespace cv::dnn::cuda4dnn;
 +#endif
 +
  namespace cv
  {
  namespace dnn
@@@ -63,8 -58,7 +64,8 @@@ public
      virtual bool supportBackend(int backendId) CV_OVERRIDE
      {
          return backendId == DNN_BACKEND_OPENCV ||
-                (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine());
 +               backendId == DNN_BACKEND_CUDA ||
+                ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
      }
  
      bool getMemoryShapes(const std::vector<MatShape> &inputs,
  
  #include "../precomp.hpp"
  #include "layers_common.hpp"
 +#include "../op_cuda.hpp"
  #include "../op_halide.hpp"
  #include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
 +#include "../op_vkcom.hpp"
  
  #ifdef HAVE_OPENCL
  #include "opencl_kernels_dnn.hpp"
@@@ -111,10 -105,8 +112,10 @@@ public
      virtual bool supportBackend(int backendId) CV_OVERRIDE
      {
          return backendId == DNN_BACKEND_OPENCV ||
 +               backendId == DNN_BACKEND_CUDA ||
                 (backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1 && !padding) ||  // By channels
-                (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && !padding) ||
 -               ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine() && !padding);
++               ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine() && !padding) ||
 +               (backendId == DNN_BACKEND_VKCOM && haveVulkan() && !padding);
      }
  
      class ChannelConcatInvoker : public ParallelLoopBody
@@@ -32,9 -26,7 +32,9 @@@ public
  
      virtual bool supportBackend(int backendId) CV_OVERRIDE
      {
 -        return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019;
 +        return backendId == DNN_BACKEND_OPENCV ||
-                backendId == DNN_BACKEND_INFERENCE_ENGINE ||
++               backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
 +               backendId == DNN_BACKEND_CUDA;
      }
  
      virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
  
  #include "../precomp.hpp"
  #include "layers_common.hpp"
 +#include "../op_cuda.hpp"
  #include "../op_halide.hpp"
  #include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
 +#include "../op_vkcom.hpp"
  #include "opencv2/core/hal/hal.hpp"
  #include "opencv2/core/hal/intrin.hpp"
  #include <iostream>
@@@ -260,17 -254,8 +262,17 @@@ public
  
      virtual bool supportBackend(int backendId) CV_OVERRIDE
      {
 +        if (backendId == DNN_BACKEND_CUDA)
 +        {
 +            /* only convolution 2d and 3d supported */
 +            if(kernel_size.size() == 2 || kernel_size.size() == 3)
 +                return true;
 +
 +            return false;
 +        }
 +
  #ifdef HAVE_INF_ENGINE
-         if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
+         if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
          {
              if (kernel_size.size() == 3)
                  return preferableTarget == DNN_TARGET_CPU;
  
  #include "../precomp.hpp"
  #include "layers_common.hpp"
 +#include "../op_cuda.hpp"
  #include "../op_halide.hpp"
  #include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
 +#include "../op_vkcom.hpp"
  #include <opencv2/dnn/shape_utils.hpp>
  #include <iostream>
  
@@@ -165,14 -160,15 +167,23 @@@ public
      }
  #endif  // HAVE_INF_ENGINE
  
+ #ifdef HAVE_DNN_NGRAPH
+     virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
+     {
+         auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
+         auto node = func.initNgraphAPI(ieInpNode);
+         return Ptr<BackendNode>(new InfEngineNgraphNode(node));
+     }
+ #endif  // HAVE_DNN_NGRAPH
 +    virtual Ptr<BackendNode> initVkCom(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
 +    {
 +#ifdef HAVE_VULKAN
 +        return Ptr<BackendNode>(new VkComBackendNode(inputs, func.initVkCom()));
 +#endif  // HAVE_VULKAN
 +        return Ptr<BackendNode>();
 +    }
 +
      virtual bool tryFuse(Ptr<dnn::Layer>& top) CV_OVERRIDE
      {
          return func.tryFuse(top);
@@@ -276,13 -260,12 +287,15 @@@ struct ReLUFuncto
      bool supportBackend(int backendId, int)
      {
  #ifdef HAVE_INF_ENGINE
-         if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
+         if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
              return slope >= 0 || !INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2019R1);
+         if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+             return true;
  #endif
 -        return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
 +        return backendId == DNN_BACKEND_OPENCV ||
 +               backendId == DNN_BACKEND_CUDA ||
 +               backendId == DNN_BACKEND_HALIDE ||
 +               backendId == DNN_BACKEND_VKCOM;
      }
  
      void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
      }
  #endif  // HAVE_INF_ENGINE
  
+ #ifdef HAVE_DNN_NGRAPH
+     std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
+     {
+         if (slope) {
+             auto param = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{1}, &slope);
+             return std::make_shared<ngraph::op::PRelu>(node, param);
+         }
+         return std::make_shared<ngraph::op::Relu>(node);
+     }
+ #endif  // HAVE_DNN_NGRAPH
 +#ifdef HAVE_VULKAN
 +    std::shared_ptr<vkcom::OpBase> initVkCom()
 +    {
 +        std::shared_ptr<vkcom::OpBase> op(new vkcom::OpReLU(slope));
 +        return op;
 +    }
 +#endif  // HAVE_VULKAN
 +
      bool tryFuse(Ptr<dnn::Layer>&) { return false; }
  
      void getScaleShift(Mat&, Mat&) const {}
@@@ -417,10 -396,8 +441,10 @@@ struct ReLU6Functo
  
      bool supportBackend(int backendId, int)
      {
 -        return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
 +        return backendId == DNN_BACKEND_OPENCV ||
 +               backendId == DNN_BACKEND_CUDA ||
 +               backendId == DNN_BACKEND_HALIDE ||
-                backendId == DNN_BACKEND_INFERENCE_ENGINE;
+                backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
      }
  
      void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
      }
  #endif  // HAVE_INF_ENGINE
  
+ #ifdef HAVE_DNN_NGRAPH
+     std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
+     {
+         return std::make_shared<ngraph::op::Clamp>(node, minValue, maxValue);
+     }
+ #endif  // HAVE_DNN_NGRAPH
 +#ifdef HAVE_VULKAN
 +    std::shared_ptr<vkcom::OpBase> initVkCom()
 +    {
 +        // TODO: add vkcom implementation
 +        return std::shared_ptr<vkcom::OpBase>();
 +    }
 +#endif  // HAVE_VULKAN
 +
      bool tryFuse(Ptr<dnn::Layer>&) { return false; }
  
      void getScaleShift(Mat&, Mat&) const {}
@@@ -530,10 -499,8 +561,10 @@@ struct TanHFuncto
  
      bool supportBackend(int backendId, int)
      {
 -        return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
 +        return backendId == DNN_BACKEND_OPENCV ||
 +               backendId == DNN_BACKEND_CUDA ||
 +               backendId == DNN_BACKEND_HALIDE ||
-                backendId == DNN_BACKEND_INFERENCE_ENGINE;
+                backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
      }
  
      void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
      }
  #endif  // HAVE_INF_ENGINE
  
+ #ifdef HAVE_DNN_NGRAPH
+     std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
+     {
+         return std::make_shared<ngraph::op::Tanh>(node);
+     }
+ #endif  // HAVE_DNN_NGRAPH
 +#ifdef HAVE_VULKAN
 +    std::shared_ptr<vkcom::OpBase> initVkCom()
 +    {
 +        // TODO: add vkcom implementation
 +        return std::shared_ptr<vkcom::OpBase>();
 +    }
 +#endif  // HAVE_VULKAN
 +
      bool tryFuse(Ptr<dnn::Layer>&) { return false; }
  
      void getScaleShift(Mat&, Mat&) const {}
@@@ -686,14 -637,13 +724,21 @@@ struct SwishFuncto
      }
  #endif  // HAVE_INF_ENGINE
  
+ #ifdef HAVE_DNN_NGRAPH
+     std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
+     {
+         CV_Error(Error::StsNotImplemented, "");
+     }
+ #endif  // HAVE_DNN_NGRAPH
 +#ifdef HAVE_VULKAN
 +    std::shared_ptr<vkcom::OpBase> initVkCom()
 +    {
 +        // TODO: add vkcom implementation
 +        return std::shared_ptr<vkcom::OpBase>();
 +    }
 +#endif  // HAVE_VULKAN
 +
      bool tryFuse(Ptr<dnn::Layer>&) { return false; }
  
      void getScaleShift(Mat&, Mat&) const {}
@@@ -775,14 -717,13 +820,21 @@@ struct MishFuncto
      }
  #endif  // HAVE_INF_ENGINE
  
+ #ifdef HAVE_DNN_NGRAPH
+     std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
+     {
+         CV_Error(Error::StsNotImplemented, "");
+     }
+ #endif  // HAVE_DNN_NGRAPH
 +#ifdef HAVE_VULKAN
 +    std::shared_ptr<vkcom::OpBase> initVkCom()
 +    {
 +        // TODO: add vkcom implementation
 +        return std::shared_ptr<vkcom::OpBase>();
 +    }
 +#endif  // HAVE_VULKAN
 +
      bool tryFuse(Ptr<dnn::Layer>&) { return false; }
  
      void getScaleShift(Mat&, Mat&) const {}
@@@ -797,10 -738,8 +849,10 @@@ struct SigmoidFuncto
  
      bool supportBackend(int backendId, int)
      {
 -        return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
 +        return backendId == DNN_BACKEND_OPENCV ||
 +               backendId == DNN_BACKEND_CUDA ||
 +               backendId == DNN_BACKEND_HALIDE ||
-                backendId == DNN_BACKEND_INFERENCE_ENGINE;
+                backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||  backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
      }
  
      void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
      }
  #endif  // HAVE_INF_ENGINE
  
+ #ifdef HAVE_DNN_NGRAPH
+     std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
+     {
+         return std::make_shared<ngraph::op::Sigmoid>(node);
+     }
+ #endif  // HAVE_DNN_NGRAPH
 +#ifdef HAVE_VULKAN
 +    std::shared_ptr<vkcom::OpBase> initVkCom()
 +    {
 +        // TODO: add vkcom implementation
 +        return std::shared_ptr<vkcom::OpBase>();
 +    }
 +#endif  // HAVE_VULKAN
 +
      bool tryFuse(Ptr<dnn::Layer>&) { return false; }
  
      void getScaleShift(Mat&, Mat&) const {}
@@@ -888,10 -819,8 +947,10 @@@ struct ELUFuncto
  
      bool supportBackend(int backendId, int)
      {
 -        return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
 -               backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
 +        return backendId == DNN_BACKEND_OPENCV ||
 +               backendId == DNN_BACKEND_CUDA ||
 +               backendId == DNN_BACKEND_HALIDE ||
-                backendId == DNN_BACKEND_INFERENCE_ENGINE;
++               backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||  backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
      }
  
      void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
      }
  #endif  // HAVE_INF_ENGINE
  
+ #ifdef HAVE_DNN_NGRAPH
+     std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
+     {
+         return std::make_shared<ngraph::op::Elu>(node, 1.0);
+     }
+ #endif  // HAVE_DNN_NGRAPH
 +#ifdef HAVE_VULKAN
 +    std::shared_ptr<vkcom::OpBase> initVkCom()
 +    {
 +        // TODO: add vkcom implementation
 +        return std::shared_ptr<vkcom::OpBase>();
 +    }
 +#endif  // HAVE_VULKAN
 +
      bool tryFuse(Ptr<dnn::Layer>&) { return false; }
  
      void getScaleShift(Mat&, Mat&) const {}
@@@ -978,12 -899,10 +1044,12 @@@ struct AbsValFuncto
      bool supportBackend(int backendId, int)
      {
  #ifdef HAVE_INF_ENGINE
-         if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
+         if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
              return !INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2019R1);
  #endif
 -        return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
 +        return backendId == DNN_BACKEND_OPENCV ||
 +               backendId == DNN_BACKEND_CUDA ||
 +               backendId == DNN_BACKEND_HALIDE;
      }
  
      void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
      }
  #endif  // HAVE_INF_ENGINE
  
+ #ifdef HAVE_DNN_NGRAPH
+     std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
+     {
+         float coeff = -0.999999f;
+         // float coeff = preferableTarget == DNN_TARGET_MYRIAD ? -0.999f : -0.999999f;
+         auto slope = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{1}, &coeff);
+         return std::make_shared<ngraph::op::PRelu>(node, slope);
+     }
+ #endif  // HAVE_DNN_NGRAPH
 +#ifdef HAVE_VULKAN
 +    std::shared_ptr<vkcom::OpBase> initVkCom()
 +    {
 +        // TODO: add vkcom implementation
 +        return std::shared_ptr<vkcom::OpBase>();
 +    }
 +#endif  // HAVE_VULKAN
 +
      bool tryFuse(Ptr<dnn::Layer>&) { return false; }
  
      void getScaleShift(Mat&, Mat&) const {}
@@@ -1138,14 -1043,13 +1214,21 @@@ struct BNLLFuncto
      }
  #endif  // HAVE_INF_ENGINE
  
+ #ifdef HAVE_DNN_NGRAPH
+     std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
+     {
+         CV_Error(Error::StsNotImplemented, "");
+     }
+ #endif  // HAVE_DNN_NGRAPH
 +#ifdef HAVE_VULKAN
 +    std::shared_ptr<vkcom::OpBase> initVkCom()
 +    {
 +        // TODO: add vkcom implementation
 +        return std::shared_ptr<vkcom::OpBase>();
 +    }
 +#endif  // HAVE_VULKAN
 +
      bool tryFuse(Ptr<dnn::Layer>&) { return false; }
  
      void getScaleShift(Mat&, Mat&) const {}
@@@ -1166,12 -1070,10 +1249,14 @@@ struct PowerFuncto
  
      bool supportBackend(int backendId, int targetId)
      {
-         if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
+         if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
              return (targetId != DNN_TARGET_OPENCL && targetId != DNN_TARGET_OPENCL_FP16) || power == 1.0 || power == 0.5;
++        if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
++            return true;
          else
 -            return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
 +            return backendId == DNN_BACKEND_OPENCV ||
 +                   backendId == DNN_BACKEND_CUDA ||
 +                   backendId == DNN_BACKEND_HALIDE;
      }
  
      void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
      }
  #endif  // HAVE_INF_ENGINE
  
+ #ifdef HAVE_DNN_NGRAPH
+     std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
+     {
+         auto scale_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
+                                                                  ngraph::Shape{1}, &scale);
+         auto shift_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
+                                                                  ngraph::Shape{1}, &shift);
+         auto power_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
+                                                                  ngraph::Shape{1}, &power);
+         auto mul = std::make_shared<ngraph::op::v1::Multiply>(scale_node, node, ngraph::op::AutoBroadcastType::NUMPY);
+         auto scale_shift = std::make_shared<ngraph::op::v1::Add>(mul, shift_node, ngraph::op::AutoBroadcastType::NUMPY);
+         return std::make_shared<ngraph::op::v1::Power>(scale_shift, power_node, ngraph::op::AutoBroadcastType::NUMPY);
+     }
+ #endif  // HAVE_DNN_NGRAPH
 +#ifdef HAVE_VULKAN
 +    std::shared_ptr<vkcom::OpBase> initVkCom()
 +    {
 +        // TODO: add vkcom implementation
 +        return std::shared_ptr<vkcom::OpBase>();
 +    }
 +#endif  // HAVE_VULKAN
 +
      bool tryFuse(Ptr<dnn::Layer>& top)
      {
          if (power != 1.0f && shift != 0.0f)
@@@ -1317,10 -1220,8 +1418,10 @@@ struct ChannelsPReLUFuncto
  
      bool supportBackend(int backendId, int)
      {
 -        return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
 +        return backendId == DNN_BACKEND_OPENCV ||
 +               backendId == DNN_BACKEND_CUDA ||
 +               backendId == DNN_BACKEND_HALIDE ||
-                backendId == DNN_BACKEND_INFERENCE_ENGINE;
+                backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
      }
  
      void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
      }
  #endif  // HAVE_INF_ENGINE
  
+ #ifdef HAVE_DNN_NGRAPH
+     std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
+     {
+         const size_t numChannels = scale.total();
+         auto slope = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{numChannels}, scale.data);
+         return std::make_shared<ngraph::op::PRelu>(node, slope);
+     }
+ #endif  // HAVE_DNN_NGRAPH
 +#ifdef HAVE_VULKAN
 +    std::shared_ptr<vkcom::OpBase> initVkCom()
 +    {
 +        // TODO: add vkcom implementation
 +        return std::shared_ptr<vkcom::OpBase>();
 +    }
 +#endif  // HAVE_VULKAN
  
      bool tryFuse(Ptr<dnn::Layer>&) { return false; }
  
  
  #include "../precomp.hpp"
  #include "layers_common.hpp"
 +#include "../op_cuda.hpp"
  #include "../op_halide.hpp"
  #include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
  
  #ifdef HAVE_OPENCL
  #include "opencl_kernels_dnn.hpp"
@@@ -107,10 -102,9 +108,10 @@@ public
      virtual bool supportBackend(int backendId) CV_OVERRIDE
      {
          return backendId == DNN_BACKEND_OPENCV ||
 -               backendId == DNN_BACKEND_HALIDE ||
 +               (backendId == DNN_BACKEND_CUDA && op != DIV) ||  // TODO: not implemented, see PR #15811
 +               (backendId == DNN_BACKEND_HALIDE && op != DIV) ||  // TODO: not implemented, see PR #15811
-                (backendId == DNN_BACKEND_INFERENCE_ENGINE && !variableChannels &&
-                 (preferableTarget != DNN_TARGET_OPENCL || coeffs.empty()));
+                ((((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && (preferableTarget != DNN_TARGET_OPENCL || coeffs.empty()))
+                 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && !variableChannels));
      }
  
      bool getMemoryShapes(const std::vector<MatShape> &inputs,
  
  #include "../precomp.hpp"
  #include "layers_common.hpp"
 +#include "../op_cuda.hpp"
  #include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
  #include <float.h>
  #include <algorithm>
  #include <opencv2/dnn/shape_utils.hpp>
@@@ -71,8 -67,7 +73,8 @@@ public
      virtual bool supportBackend(int backendId) CV_OVERRIDE
      {
          return backendId == DNN_BACKEND_OPENCV ||
-                (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine());
 +               backendId == DNN_BACKEND_CUDA ||
+                ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
      }
  
      bool getMemoryShapes(const std::vector<MatShape> &inputs,
  
  #include "../precomp.hpp"
  #include "layers_common.hpp"
 +#include "../op_cuda.hpp"
  #include "../op_halide.hpp"
  #include "../op_inf_engine.hpp"
++#include "../ie_ngraph.hpp"
  #include <opencv2/dnn/shape_utils.hpp>
  
  #ifdef HAVE_OPENCL
@@@ -129,9 -128,8 +131,9 @@@ public
      virtual bool supportBackend(int backendId) CV_OVERRIDE
      {
          return backendId == DNN_BACKEND_OPENCV ||
 +               backendId == DNN_BACKEND_CUDA ||
                 (backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1) ||
-                (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && axis == 1);
+                ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine() && axis == 1);
      }
  
      virtual bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE
  
  #include "../precomp.hpp"
  #include "layers_common.hpp"
 +#include "../op_cuda.hpp"
  #include "../op_halide.hpp"
  #include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
 +#include "../op_vkcom.hpp"
  #include "opencv2/imgproc.hpp"
  #include "opencv2/dnn/shape_utils.hpp"
  #include "opencv2/core/hal/hal.hpp"
@@@ -97,12 -92,13 +99,16 @@@ public
  
      virtual bool supportBackend(int backendId) CV_OVERRIDE
      {
-         if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
+         if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) {
              return bias == (int)bias;
 -        return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
+         }
+         if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) {
+             return type == CHANNEL_NRM && bias == (int)bias;
+         }
 +        return backendId == DNN_BACKEND_OPENCV ||
 +               backendId == DNN_BACKEND_CUDA ||
 +               backendId == DNN_BACKEND_HALIDE ||
 +               (backendId == DNN_BACKEND_VKCOM && haveVulkan() && (size % 2 == 1) && (type == CHANNEL_NRM));
      }
  
  #ifdef HAVE_OPENCL
  
  #include "../precomp.hpp"
  #include "layers_common.hpp"
 +#include "../op_cuda.hpp"
  #include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
  
 +#ifdef HAVE_CUDA
 +#include "../cuda4dnn/primitives/normalize_bbox.hpp"
 +using namespace cv::dnn::cuda4dnn;
 +#endif
 +
  namespace cv { namespace dnn {
  
  class NormalizeBBoxLayerImpl CV_FINAL : public NormalizeBBoxLayer
@@@ -11,16 -11,12 +11,18 @@@ Implementation of padding layer, which 
  
  #include "../precomp.hpp"
  #include "layers_common.hpp"
 +#include "../op_cuda.hpp"
  #include "../op_halide.hpp"
  #include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
  #include <vector>
  
 +#ifdef HAVE_CUDA
 +#include "../cuda4dnn/primitives/padding.hpp"
 +using namespace cv::dnn::cuda4dnn;
 +#endif
 +
  namespace cv
  {
  namespace dnn
  
  #include "../precomp.hpp"
  #include "layers_common.hpp"
 +#include "../op_cuda.hpp"
  #include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
 +#include "../op_vkcom.hpp"
++
  #include <float.h>
  #include <algorithm>
  
@@@ -112,9 -106,7 +114,9 @@@ public
      virtual bool supportBackend(int backendId) CV_OVERRIDE
      {
          return backendId == DNN_BACKEND_OPENCV ||
 -               ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
 +               backendId == DNN_BACKEND_CUDA ||
-                (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine()) ||
++               ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine()) ||
 +               (backendId == DNN_BACKEND_VKCOM && haveVulkan());
      }
  
      bool getMemoryShapes(const std::vector<MatShape> &inputs,
  #include "../precomp.hpp"
  #include "layers_common.hpp"
  #include "opencv2/core/hal/intrin.hpp"
 +#include "../op_cuda.hpp"
  #include "../op_halide.hpp"
  #include "../op_inf_engine.hpp"
+ #ifdef HAVE_DNN_NGRAPH
+ #include "../ie_ngraph.hpp"
+ #include <ngraph/op/experimental/layers/roi_pooling.hpp>
+ #include <ngraph/op/experimental/layers/psroi_pooling.hpp>
+ #endif
 +#include "../op_vkcom.hpp"
++
  #include <float.h>
  #include <algorithm>
  #include <numeric>
@@@ -168,11 -167,7 +176,11 @@@ public
  
      virtual bool supportBackend(int backendId) CV_OVERRIDE
      {
 -        if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
 +        if (backendId == DNN_BACKEND_CUDA)
 +        {
 +            return type == MAX || type == AVE;
 +        }
-         else if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
++        else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
          {
              if (computeMaxIdx)
                  return false;
              return false;
  #endif
          }
+         else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) {
+             return type != STOCHASTIC;
+         }
          else
 -            return (kernel_size.size() == 3 && backendId == DNN_BACKEND_OPENCV && preferableTarget == DNN_TARGET_CPU) ||
 -                   ((kernel_size.empty() || kernel_size.size() == 2) && (backendId == DNN_BACKEND_OPENCV ||
 -                   (backendId == DNN_BACKEND_HALIDE && haveHalide() &&
 -                   (type == MAX || (type == AVE && !pad_t && !pad_l && !pad_b && !pad_r)))));
 +        {
 +            if (kernel_size.size() == 3)
 +                return (backendId == DNN_BACKEND_OPENCV && preferableTarget == DNN_TARGET_CPU);
 +            if (kernel_size.empty() || kernel_size.size() == 2)
 +                return backendId == DNN_BACKEND_OPENCV ||
 +                       (backendId == DNN_BACKEND_HALIDE && haveHalide() &&
 +                           (type == MAX || (type == AVE && !pad_t && !pad_l && !pad_b && !pad_r))) ||
 +                       (backendId == DNN_BACKEND_VKCOM && haveVulkan() &&
 +                           (type == MAX || type == AVE));
 +            else
 +                return false;
 +        }
      }
  
  #ifdef HAVE_OPENCL
  
  #include "../precomp.hpp"
  #include "layers_common.hpp"
 +#include "../op_cuda.hpp"
  #include "../op_inf_engine.hpp"
+ #ifdef HAVE_DNN_NGRAPH
+ #include "../ie_ngraph.hpp"
+ #include <ngraph/op/experimental/layers/prior_box.hpp>
+ #include <ngraph/op/experimental/layers/prior_box_clustered.hpp>
+ #endif
 +#include "../op_vkcom.hpp"
++
  #include <float.h>
  #include <algorithm>
  #include <cmath>
@@@ -279,11 -279,13 +287,15 @@@ public
  
      virtual bool supportBackend(int backendId) CV_OVERRIDE
      {
+ #ifdef HAVE_DNN_NGRAPH
+         if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+             return _explicitSizes || _stepX == _stepY;
+ #endif
          return backendId == DNN_BACKEND_OPENCV ||
-                (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() &&
 +               backendId == DNN_BACKEND_CUDA ||
 -               ( _explicitSizes || (_minSize.size() == 1 && _maxSize.size() <= 1)));
+                (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && haveInfEngine() &&
 +                   ( _explicitSizes || (_minSize.size() == 1 && _maxSize.size() <= 1)))
 +               || (backendId == DNN_BACKEND_VKCOM && haveVulkan());
      }
  
      bool getMemoryShapes(const std::vector<MatShape> &inputs,
  //M*/
  
  #include "../precomp.hpp"
 +#include "../op_cuda.hpp"
  #include "../op_inf_engine.hpp"
++#ifdef HAVE_DNN_NGRAPH
++#include "../ie_ngraph.hpp"
++#include <ngraph/op/experimental/layers/reorg_yolo.hpp>
++#endif
++
  #include <opencv2/dnn/shape_utils.hpp>
  #include <opencv2/dnn/all_layers.hpp>
  
@@@ -141,9 -141,8 +147,10 @@@ public
  
      virtual bool supportBackend(int backendId) CV_OVERRIDE
      {
 -        return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
 +        return backendId == DNN_BACKEND_OPENCV ||
 +               backendId == DNN_BACKEND_CUDA ||
-                backendId == DNN_BACKEND_INFERENCE_ENGINE;
++               backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
+                backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
      }
  
  #ifdef HAVE_OPENCL
  
  #include "../precomp.hpp"
  #include "layers_common.hpp"
 +#include "../op_cuda.hpp"
  #include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
  #include <opencv2/dnn/shape_utils.hpp>
  
 +#ifdef HAVE_CUDA
 +#include "../cuda4dnn/primitives/reshape.hpp"
 +using namespace cv::dnn::cuda4dnn;
 +#endif
 +
  namespace cv
  {
  namespace dnn
@@@ -185,8 -181,7 +187,8 @@@ public
      virtual bool supportBackend(int backendId) CV_OVERRIDE
      {
          return backendId == DNN_BACKEND_OPENCV ||
-                (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine());
 +               backendId == DNN_BACKEND_CUDA ||
+                ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
      }
  
      bool getMemoryShapes(const std::vector<MatShape> &inputs,
  #include "../op_inf_engine.hpp"
  #include <opencv2/imgproc.hpp>
  
+ #ifdef HAVE_DNN_NGRAPH
+ #include "../ie_ngraph.hpp"
+ #include <ngraph/op/experimental/layers/interpolate.hpp>
+ #endif
 +#ifdef HAVE_CUDA
 +#include "../cuda4dnn/primitives/resize.hpp"
 +using namespace cv::dnn::cuda4dnn;
 +#endif
 +
  namespace cv { namespace dnn {
  
  class ResizeLayerImpl : public ResizeLayer
@@@ -57,11 -56,9 +62,12 @@@ public
  
      virtual bool supportBackend(int backendId) CV_OVERRIDE
      {
 +        if (backendId == DNN_BACKEND_CUDA)
 +            return interpolation == "nearest" || interpolation == "bilinear";
 +
  #ifdef HAVE_INF_ENGINE
-         if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
+         if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
+             backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
          {
              return (interpolation == "nearest" && scaleWidth == scaleHeight) ||
                     (interpolation == "bilinear");
@@@ -254,9 -259,12 +289,13 @@@ public
  
      virtual bool supportBackend(int backendId) CV_OVERRIDE
      {
 -        return backendId == DNN_BACKEND_OPENCV;
+ #ifdef HAVE_INF_ENGINE
+         if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019
+             || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+             return true;
+ #endif
-                backendId == DNN_BACKEND_INFERENCE_ENGINE ||
 +        return backendId == DNN_BACKEND_OPENCV ||
 +               backendId == DNN_BACKEND_CUDA;
      }
  
      virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
@@@ -11,16 -11,12 +11,18 @@@ Implementation of Scale layer
  
  #include "../precomp.hpp"
  #include "layers_common.hpp"
 +#include "../op_cuda.hpp"
  #include "../op_halide.hpp"
  #include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
  #include <opencv2/dnn/shape_utils.hpp>
  
 +#ifdef HAVE_CUDA
 +#include "../cuda4dnn/primitives/scale_shift.hpp"
 +using namespace cv::dnn::cuda4dnn;
 +#endif
 +
  namespace cv
  {
  namespace dnn
@@@ -56,10 -52,8 +58,10 @@@ public
  
      virtual bool supportBackend(int backendId) CV_OVERRIDE
      {
 -        return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
 +        return backendId == DNN_BACKEND_OPENCV ||
 +               backendId == DNN_BACKEND_CUDA ||
 +               backendId == DNN_BACKEND_HALIDE ||
-                (backendId == DNN_BACKEND_INFERENCE_ENGINE && axis == 1);
+                ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && axis == 1);
      }
  
      void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
  //M*/
  
  #include "../precomp.hpp"
 +#include "../op_cuda.hpp"
  #include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
  #include "layers_common.hpp"
  #include <opencv2/dnn/shape_utils.hpp>
  
@@@ -118,8 -114,7 +120,8 @@@ public
      virtual bool supportBackend(int backendId) CV_OVERRIDE
      {
          return backendId == DNN_BACKEND_OPENCV ||
-                (backendId == DNN_BACKEND_INFERENCE_ENGINE &&
 +               backendId == DNN_BACKEND_CUDA ||
+                ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) &&
  #ifdef HAVE_INF_ENGINE
                  INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) &&
  #endif
  
  #include "../precomp.hpp"
  #include "layers_common.hpp"
 +#include "../op_cuda.hpp"
  #include "../op_halide.hpp"
  #include "../op_inf_engine.hpp"
+ #include "../ie_ngraph.hpp"
 +#include "../op_vkcom.hpp"
  #include <algorithm>
  #include <stdlib.h>
  using std::max;
@@@ -96,10 -91,8 +98,10 @@@ public
      virtual bool supportBackend(int backendId) CV_OVERRIDE
      {
          return backendId == DNN_BACKEND_OPENCV ||
 +               backendId == DNN_BACKEND_CUDA ||
                 (backendId == DNN_BACKEND_HALIDE && haveHalide() && axisRaw == 1) ||
-                (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && !logSoftMax) ||
 -               ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine() && !logSoftMax);
++               ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine() && !logSoftMax) ||
 +               (backendId == DNN_BACKEND_VKCOM && haveVulkan());
      }
  
  #ifdef HAVE_OPENCL
@@@ -21,6 -21,54 +21,54 @@@ namespace cv { namespace dnn 
  
  #ifdef HAVE_INF_ENGINE
  
 -CV__DNN_EXPERIMENTAL_NS_BEGIN
+ static Backend parseInferenceEngineBackendType(const cv::String& backend)
+ {
+     CV_Assert(!backend.empty());
+     if (backend == CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+         return DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
+     if (backend == CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API)
+         return DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019;
+     CV_Error(Error::StsBadArg, cv::format("Unknown IE backend: %s", backend.c_str()));
+ }
+ static const char* dumpInferenceEngineBackendType(Backend backend)
+ {
+     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+         return CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
+     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
+         return CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API;
+     CV_Error(Error::StsBadArg, cv::format("Invalid backend ID for IE: %d", backend));
+ }
+ Backend& getInferenceEngineBackendTypeParam()
+ {
+     static Backend param = parseInferenceEngineBackendType(
+         utils::getConfigurationParameterString("OPENCV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019_TYPE",
+ #ifdef HAVE_NGRAPH
+             CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API  // future: CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH
+ #else
+             CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API
+ #endif
+         )
+     );
+     return param;
+ }
 -CV__DNN_EXPERIMENTAL_NS_END
++CV__DNN_INLINE_NS_BEGIN
+ cv::String getInferenceEngineBackendType()
+ {
+     return dumpInferenceEngineBackendType(getInferenceEngineBackendTypeParam());
+ }
+ cv::String setInferenceEngineBackendType(const cv::String& newBackendType)
+ {
+     Backend newBackend = parseInferenceEngineBackendType(newBackendType);
+     Backend& param = getInferenceEngineBackendTypeParam();
+     Backend old = param;
+     param = newBackend;
+     return dumpInferenceEngineBackendType(old);
+ }
++CV__DNN_INLINE_NS_END
  // For networks with input layer which has an empty name, IE generates a name id[some_number].
  // OpenCV lets users use an empty input name and to prevent unexpected naming,
  // we can use some predefined name.
@@@ -210,12 -212,44 +212,45 @@@ private
      InferenceEngine::CNNNetwork t_net;
  };
  
 -CV__DNN_EXPERIMENTAL_NS_BEGIN
++
+ class InfEngineExtension : public InferenceEngine::IExtension
+ {
+ public:
+     virtual void SetLogCallback(InferenceEngine::IErrorListener&) noexcept {}
+     virtual void Unload() noexcept {}
+     virtual void Release() noexcept {}
+     virtual void GetVersion(const InferenceEngine::Version*&) const noexcept {}
+     virtual InferenceEngine::StatusCode getPrimitiveTypes(char**&, unsigned int&,
+                                                           InferenceEngine::ResponseDesc*) noexcept
+     {
+         return InferenceEngine::StatusCode::OK;
+     }
+     InferenceEngine::StatusCode getFactoryFor(InferenceEngine::ILayerImplFactory*& factory,
+                                               const InferenceEngine::CNNLayer* cnnLayer,
+                                               InferenceEngine::ResponseDesc* resp) noexcept;
+ };
 +CV__DNN_INLINE_NS_BEGIN
  
  bool isMyriadX();
  
 -CV__DNN_EXPERIMENTAL_NS_END
 +CV__DNN_INLINE_NS_END
  
+ InferenceEngine::Core& getCore();
+ template<typename T = size_t>
+ static inline std::vector<T> getShape(const Mat& mat)
+ {
+     std::vector<T> result(mat.dims);
+     for (int i = 0; i < mat.dims; i++)
+         result[i] = (T)mat.size[i];
+     return result;
+ }
  #endif  // HAVE_INF_ENGINE
  
  bool haveInfEngine();
Simple merge
  #define CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X         "dnn_skip_ie_myriadx"
  #define CV_TEST_TAG_DNN_SKIP_IE_MYRIAD           CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2, CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X
  
 +#define CV_TEST_TAG_DNN_SKIP_VULKAN              "dnn_skip_vulkan"
 +
 +#define CV_TEST_TAG_DNN_SKIP_CUDA                "dnn_skip_cuda"
 +#define CV_TEST_TAG_DNN_SKIP_CUDA_FP16           "dnn_skip_cuda_fp16"
 +#define CV_TEST_TAG_DNN_SKIP_CUDA_FP32           "dnn_skip_cuda_fp32"
 +
+ #ifdef HAVE_INF_ENGINE
+ #if INF_ENGINE_VER_MAJOR_EQ(2018050000)
+ #  define CV_TEST_TAG_DNN_SKIP_IE_VERSION CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2018R5
+ #elif INF_ENGINE_VER_MAJOR_EQ(2019010000)
+ #  if INF_ENGINE_RELEASE < 2019010100
+ #    define CV_TEST_TAG_DNN_SKIP_IE_VERSION CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2019R1
+ #  else
+ #    define CV_TEST_TAG_DNN_SKIP_IE_VERSION CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2019R1_1
+ #  endif
+ #elif INF_ENGINE_VER_MAJOR_EQ(2019020000)
+ #  define CV_TEST_TAG_DNN_SKIP_IE_VERSION CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2019R2
+ #elif INF_ENGINE_VER_MAJOR_EQ(2019030000)
+ #  define CV_TEST_TAG_DNN_SKIP_IE_VERSION CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2019R3
+ #endif
+ #endif // HAVE_INF_ENGINE
+ #ifndef CV_TEST_TAG_DNN_SKIP_IE_VERSION
+ #    define CV_TEST_TAG_DNN_SKIP_IE_VERSION CV_TEST_TAG_DNN_SKIP_IE
+ #endif
  namespace cv { namespace dnn {
 -CV__DNN_EXPERIMENTAL_NS_BEGIN
 +CV__DNN_INLINE_NS_BEGIN
  
  void PrintTo(const cv::dnn::Backend& v, std::ostream* os);
  void PrintTo(const cv::dnn::Target& v, std::ostream* os);
@@@ -91,10 -117,11 +123,13 @@@ testing::internal::ParamGenerator< tupl
          bool withInferenceEngine = true,
          bool withHalide = false,
          bool withCpuOCV = true,
-         bool withCUDA = true
 +        bool withVkCom = true,
++        bool withCUDA = true,
+         bool withNgraph = true
  );
  
+ testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTargetsIE();
  
  class DNNTestLayer : public TestWithParam<tuple<Backend, Target> >
  {
@@@ -158,16 -191,12 +199,18 @@@ public
  
      void expectNoFallbacksFromIE(Net& net)
      {
-         if (backend == DNN_BACKEND_INFERENCE_ENGINE)
+         if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
              expectNoFallbacks(net);
+         if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+             expectNoFallbacks(net, false);
      }
  
 +    void expectNoFallbacksFromCUDA(Net& net)
 +    {
 +        if (backend == DNN_BACKEND_CUDA)
 +            expectNoFallbacks(net);
 +    }
 +
  protected:
      void checkBackend(Mat* inp = 0, Mat* ref = 0)
      {
@@@ -21,12 -21,13 +21,14 @@@ CV__DNN_INLINE_NS_BEGI
  void PrintTo(const cv::dnn::Backend& v, std::ostream* os)
  {
      switch (v) {
 -        case DNN_BACKEND_DEFAULT: *os << "DEFAULT"; return;
 -        case DNN_BACKEND_HALIDE: *os << "HALIDE"; return;
 -        case DNN_BACKEND_INFERENCE_ENGINE: *os << "DLIE*"; return;
 -        case DNN_BACKEND_OPENCV: *os << "OCV"; return;
 -        case DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019: *os << "DLIE"; return;
 -        case DNN_BACKEND_INFERENCE_ENGINE_NGRAPH: *os << "NGRAPH"; return;
 -        default: /* do nothing */;
 +    case DNN_BACKEND_DEFAULT: *os << "DEFAULT"; return;
 +    case DNN_BACKEND_HALIDE: *os << "HALIDE"; return;
-     case DNN_BACKEND_INFERENCE_ENGINE: *os << "DLIE"; return;
++    case DNN_BACKEND_INFERENCE_ENGINE: *os << "DLIE*"; return;
 +    case DNN_BACKEND_VKCOM: *os << "VKCOM"; return;
 +    case DNN_BACKEND_OPENCV: *os << "OCV"; return;
 +    case DNN_BACKEND_CUDA: *os << "CUDA"; return;
++    case DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019: *os << "DLIE"; return;
++    case DNN_BACKEND_INFERENCE_ENGINE_NGRAPH: *os << "NGRAPH"; return;
      } // don't use "default:" to emit compiler warnings
      *os << "DNN_BACKEND_UNKNOWN(" << (int)v << ")";
  }
@@@ -185,8 -183,7 +187,9 @@@ testing::internal::ParamGenerator< tupl
          bool withInferenceEngine /*= true*/,
          bool withHalide /*= false*/,
          bool withCpuOCV /*= true*/,
-         bool withCUDA /*= true*/
 +        bool withVkCom /*= true*/,
++        bool withCUDA /*= true*/,
+         bool withNgraph /*= true*/
  )
  {
  #ifdef HAVE_INF_ENGINE
@@@ -254,16 -254,32 +254,18 @@@ void runIE(Target target, const std::st
      infRequest.Infer();
  }
  
- void runCV(Target target, const std::string& xmlPath, const std::string& binPath,
 -std::vector<String> getOutputsNames(const Net& net)
 -{
 -    std::vector<String> names;
 -    if (names.empty())
 -    {
 -        std::vector<int> outLayers = net.getUnconnectedOutLayers();
 -        std::vector<String> layersNames = net.getLayerNames();
 -        names.resize(outLayers.size());
 -        for (size_t i = 0; i < outLayers.size(); ++i)
 -            names[i] = layersNames[outLayers[i] - 1];
 -    }
 -    return names;
 -}
 -
+ void runCV(Backend backendId, Target targetId, const std::string& xmlPath, const std::string& binPath,
             const std::map<std::string, cv::Mat>& inputsMap,
             std::map<std::string, cv::Mat>& outputsMap)
  {
      Net net = readNet(xmlPath, binPath);
      for (auto& it : inputsMap)
          net.setInput(it.second, it.first);
-     net.setPreferableTarget(target);
+     net.setPreferableBackend(backendId);
+     net.setPreferableTarget(targetId);
  
 -    std::vector<String> outNames = getOutputsNames(net);
 +    std::vector<String> outNames = net.getUnconnectedOutLayersNames();
      std::vector<Mat> outs;
      net.forward(outs, outNames);
  
@@@ -577,10 -596,10 +599,12 @@@ TEST_P(Test_Caffe_layers, FasterRCNN_Pr
  {
      if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
          applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
-     if (backend == DNN_BACKEND_INFERENCE_ENGINE)
-         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
+     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
+         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
+     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
 +    if(backend == DNN_BACKEND_CUDA)
 +        applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); /* Proposal layer is unsupported */
  
      Net net = readNetFromCaffe(_tf("net_faster_rcnn_proposal.prototxt"));
  
Simple merge
@@@ -101,11 -98,10 +101,10 @@@ TEST_P(Test_ONNX_layers, Convolution
  TEST_P(Test_ONNX_layers, Convolution3D)
  {
  #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
-     if(backend == DNN_BACKEND_INFERENCE_ENGINE)
-         throw SkipTestException("Test is enabled starts from 2019R1");
+     applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  #endif
 -    if (target != DNN_TARGET_CPU)
 -        throw SkipTestException("Only CPU is supported");
 +    if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
 +        throw SkipTestException("Only CPU and CUDA is supported");
      testONNXModels("conv3d");
      testONNXModels("conv3d_bias");
  }
@@@ -133,11 -129,11 +132,15 @@@ TEST_P(Test_ONNX_layers, Deconvolution
  
  TEST_P(Test_ONNX_layers, Deconvolution3D)
  {
- #if defined(INF_ENGINE_RELEASE)
-     applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_2018R5);
+ #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
+     applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  #endif
-     if ((backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_CPU) && backend != DNN_BACKEND_CUDA)
-         throw SkipTestException("Only DLIE backend on CPU, and CUDA is supported");
 -    if (backend == DNN_BACKEND_OPENCV || target != DNN_TARGET_CPU)
++    if (backend == DNN_BACKEND_CUDA)
++    {
++        // ok
++    }
++    else if (backend == DNN_BACKEND_OPENCV || target != DNN_TARGET_CPU)
+         throw SkipTestException("Only DLIE backend on CPU is supported");
      testONNXModels("deconv3d");
      testONNXModels("deconv3d_bias");
      testONNXModels("deconv3d_pad");
@@@ -173,8 -169,12 +176,17 @@@ TEST_P(Test_ONNX_layers, ReduceMean
  
  TEST_P(Test_ONNX_layers, ReduceMean3D)
  {
-     if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
-         throw SkipTestException("Only CPU and CUDA is supported");
 -    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
++    if (backend == DNN_BACKEND_CUDA)
++    {
++        // ok
++    }
++    else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
+         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);  // Only CPU on DLIE backend is supported
 -    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
++    else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
+         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);  // Only CPU on DLIE backend is supported
 -    if (target != DNN_TARGET_CPU)
++    else if (target != DNN_TARGET_CPU)
+         throw SkipTestException("Only CPU is supported");
++
      testONNXModels("reduce_mean3d");
  }
  
@@@ -212,30 -214,42 +226,54 @@@ TEST_P(Test_ONNX_layers, AveragePooling
  TEST_P(Test_ONNX_layers, MaxPooling3D)
  {
  #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
-     throw SkipTestException("Test is enabled starts from 2019R1");
+     applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  #endif
-     if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
-         throw SkipTestException("Only CPU and CUDA is supported");
 -    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
++    if (backend == DNN_BACKEND_CUDA)
++    {
++        // ok
++    }
++    else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
+         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);  // Only CPU on DLIE backend is supported
 -    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
++    else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
+         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);  // Only CPU on DLIE backend is supported
 -    if (target != DNN_TARGET_CPU)
++    else if (target != DNN_TARGET_CPU)
+         throw SkipTestException("Only CPU is supported");
      testONNXModels("max_pool3d", npy, 0, 0, false, false);
  }
  
  TEST_P(Test_ONNX_layers, AvePooling3D)
  {
  #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
-     throw SkipTestException("Test is enabled starts from 2019R1");
+     applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  #endif
-     if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
-         throw SkipTestException("Only CPU and CUDA is supported");
 -    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
++    if (backend == DNN_BACKEND_CUDA)
++    {
++        // ok
++    }
++    else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
+         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);  // Only CPU on DLIE backend is supported
 -    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
++    else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
+         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);  // Only CPU on DLIE backend is supported
 -    if (target != DNN_TARGET_CPU)
++    else if (target != DNN_TARGET_CPU)
+         throw SkipTestException("Only CPU is supported");
      testONNXModels("ave_pool3d");
  }
  
  TEST_P(Test_ONNX_layers, PoolConv3D)
  {
  #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
-     throw SkipTestException("Test is enabled starts from 2019R1");
+     applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  #endif
-     if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
-         throw SkipTestException("Only CPU and CUDA is supported");
 -    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
++    if (backend == DNN_BACKEND_CUDA)
++    {
++        // ok
++    }
++    else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
+         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);  // Only CPU on DLIE backend is supported
 -    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
++    else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
+         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);  // Only CPU on DLIE backend is supported
 -    if (target != DNN_TARGET_CPU)
++    else if (target != DNN_TARGET_CPU)
+         throw SkipTestException("Only CPU is supported");
      testONNXModels("pool_conv_3d");
  }
  
@@@ -656,10 -672,14 +696,18 @@@ TEST_P(Test_ONNX_nets, Shufflenet
  TEST_P(Test_ONNX_nets, Resnet34_kinetics)
  {
  #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
-     throw SkipTestException("Test is enabled starts from 2019R1");
+     applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  #endif
-     if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
-         throw SkipTestException("Only CPU and CUDA is supported");
 -    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
++    if (backend == DNN_BACKEND_CUDA)
++    {
++        // ok
++    }
++    else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
+         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);  // Only CPU on DLIE backend is supported
 -    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
++    else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
+         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);  // Only CPU on DLIE backend is supported
 -    if (target != DNN_TARGET_CPU)
++    else if (target != DNN_TARGET_CPU)
+         throw SkipTestException("Only CPU is supported");
  
      String onnxmodel = findDataFile("dnn/resnet-34_kinetics.onnx", false);
      Mat image0 = imread(findDataFile("dnn/dog416.png"));
@@@ -134,10 -134,14 +134,19 @@@ TEST_P(Test_TensorFlow_layers, conv
  TEST_P(Test_TensorFlow_layers, Convolution3D)
  {
  #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
-     throw SkipTestException("Test is enabled starts from 2019R1");
+     applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  #endif
-     if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
-         throw SkipTestException("Only CPU and CUDA is supported");
 -    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
++    if (backend == DNN_BACKEND_CUDA)
++    {
++        // ok
++    }
++    else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
+         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);  // Only CPU on DLIE backend is supported
 -    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
++    else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
+         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);  // Only CPU on DLIE backend is supported
 -    if (target != DNN_TARGET_CPU)
++    else if (target != DNN_TARGET_CPU)
+         throw SkipTestException("Only CPU is supported");
++
      runTensorFlowNet("conv3d");
  }
  
@@@ -250,20 -258,28 +263,38 @@@ TEST_P(Test_TensorFlow_layers, ave_pool
  TEST_P(Test_TensorFlow_layers, MaxPooling3D)
  {
  #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
-     throw SkipTestException("Test is enabled starts from 2019R1");
+     applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  #endif
-     if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
-         throw SkipTestException("Only CPU and CUDA is supported");
 -    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
++    if (backend == DNN_BACKEND_CUDA)
++    {
++        // ok
++    }
++    else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
+         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);  // Only CPU on DLIE backend is supported
 -    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
++    else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
+         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);  // Only CPU on DLIE backend is supported
 -    if (target != DNN_TARGET_CPU)
++    else if (target != DNN_TARGET_CPU)
+         throw SkipTestException("Only CPU is supported");
++
      runTensorFlowNet("max_pool3d");
  }
  
  TEST_P(Test_TensorFlow_layers, AvePooling3D)
  {
  #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
-     throw SkipTestException("Test is enabled starts from 2019R1");
+     applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
  #endif
-     if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
-         throw SkipTestException("Only CPU and CUDA is supported");
 -    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
++    if (backend == DNN_BACKEND_CUDA)
++    {
++        // ok
++    }
++    else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
+         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);  // Only CPU on DLIE backend is supported
 -    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
++    else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
+         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);  // Only CPU on DLIE backend is supported
 -    if (target != DNN_TARGET_CPU)
++    else if (target != DNN_TARGET_CPU)
+         throw SkipTestException("Only CPU is supported");
++
      runTensorFlowNet("ave_pool3d");
  }
  
@@@ -682,10 -705,10 +723,12 @@@ TEST_P(Test_TensorFlow_layers, quantize
  
  TEST_P(Test_TensorFlow_layers, lstm)
  {
-     if (backend == DNN_BACKEND_INFERENCE_ENGINE)
-         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
 +    if(backend == DNN_BACKEND_CUDA)
 +        applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); /* not supported */
+     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
+         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
+     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
      if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
          applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
      runTensorFlowNet("lstm", true);
Simple merge