Merge remote-tracking branch 'upstream/3.4' into merge-3.4
authorAlexander Alekhin <alexander.a.alekhin@gmail.com>
Fri, 29 Mar 2019 19:21:47 +0000 (19:21 +0000)
committerAlexander Alekhin <alexander.a.alekhin@gmail.com>
Fri, 29 Mar 2019 19:29:36 +0000 (19:29 +0000)
15 files changed:
1  2 
CMakeLists.txt
doc/tutorials/dnn/dnn_googlenet/dnn_googlenet.markdown
modules/core/include/opencv2/core/matx.hpp
modules/core/src/lda.cpp
modules/dnn/include/opencv2/dnn/dnn.hpp
modules/dnn/include/opencv2/dnn/utils/inference_engine.hpp
modules/dnn/src/dnn.cpp
modules/dnn/src/layers/pooling_layer.cpp
modules/dnn/src/op_inf_engine.cpp
modules/dnn/src/op_inf_engine.hpp
modules/dnn/src/tensorflow/tf_graph_simplifier.cpp
modules/dnn/src/tensorflow/tf_importer.cpp
modules/dnn/test/test_backends.cpp
modules/dnn/test/test_common.cpp
modules/dnn/test/test_common.hpp

diff --cc CMakeLists.txt
Simple merge
@@@ -25,7 -25,7 +25,7 @@@ Explanatio
     [bvlc_googlenet.caffemodel](http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel)
  
     Also you need file with names of [ILSVRC2012](http://image-net.org/challenges/LSVRC/2012/browse-synsets) classes:
-    [classification_classes_ILSVRC2012.txt](https://github.com/opencv/opencv/tree/master/samples/dnn/classification_classes_ILSVRC2012.txt).
 -   [classification_classes_ILSVRC2012.txt](https://github.com/opencv/opencv/blob/3.4/samples/data/dnn/classification_classes_ILSVRC2012.txt).
++   [classification_classes_ILSVRC2012.txt](https://github.com/opencv/opencv/blob/master/samples/data/dnn/classification_classes_ILSVRC2012.txt).
  
     Put these files into working dir of this program example.
  
Simple merge
@@@ -955,15 -959,8 +955,8 @@@ CV__DNN_INLINE_NS_BEGI
                               CV_OUT std::vector<int>& indices,
                               const float eta = 1.f, const int top_k = 0);
  
-     /** @brief Release a Myriad device is binded by OpenCV.
-      *
-      * Single Myriad device cannot be shared across multiple processes which uses
-      * Inference Engine's Myriad plugin.
-      */
-     CV_EXPORTS_W void resetMyriadDevice();
  //! @}
 -CV__DNN_EXPERIMENTAL_NS_END
 +CV__DNN_INLINE_NS_END
  }
  }
  
index 0000000,0211096..564e526
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,43 +1,43 @@@
 -CV__DNN_EXPERIMENTAL_NS_BEGIN
+ // This file is part of OpenCV project.
+ // It is subject to the license terms in the LICENSE file found in the top-level directory
+ // of this distribution and at http://opencv.org/license.html.
+ //
+ // Copyright (C) 2018-2019, Intel Corporation, all rights reserved.
+ // Third party copyrights are property of their respective owners.
+ #ifndef OPENCV_DNN_UTILS_INF_ENGINE_HPP
+ #define OPENCV_DNN_UTILS_INF_ENGINE_HPP
+ #include "../dnn.hpp"
+ namespace cv { namespace dnn {
 -CV__DNN_EXPERIMENTAL_NS_END
++CV__DNN_INLINE_NS_BEGIN
+ /** @brief Release a Myriad device (binded by OpenCV).
+  *
+  * Single Myriad device cannot be shared across multiple processes which uses
+  * Inference Engine's Myriad plugin.
+  */
+ CV_EXPORTS_W void resetMyriadDevice();
+ /* Values for 'OPENCV_DNN_IE_VPU_TYPE' parameter */
+ #define CV_DNN_INFERENCE_ENGINE_VPU_TYPE_UNSPECIFIED ""
+ /// Intel(R) Movidius(TM) Neural Compute Stick, NCS (USB 03e7:2150), Myriad2 (https://software.intel.com/en-us/movidius-ncs)
+ #define CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2 "Myriad2"
+ /// Intel(R) Neural Compute Stick 2, NCS2 (USB 03e7:2485), MyriadX (https://software.intel.com/ru-ru/neural-compute-stick)
+ #define CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X "MyriadX"
+ /** @brief Returns Inference Engine VPU type.
+  *
+  * See values of `CV_DNN_INFERENCE_ENGINE_VPU_TYPE_*` macros.
+  */
+ CV_EXPORTS_W cv::String getInferenceEngineVPUType();
++CV__DNN_INLINE_NS_END
+ }} // namespace
+ #endif // OPENCV_DNN_UTILS_INF_ENGINE_HPP
Simple merge
@@@ -875,5 -943,59 +943,59 @@@ void resetMyriadDevice(
  #endif  // HAVE_INF_ENGINE
  }
  
 -CV__DNN_EXPERIMENTAL_NS_END
+ #ifdef HAVE_INF_ENGINE
+ bool isMyriadX()
+ {
+      static bool myriadX = getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X;
+      return myriadX;
+ }
+ static std::string getInferenceEngineVPUType_()
+ {
+     static std::string param_vpu_type = utils::getConfigurationParameterString("OPENCV_DNN_IE_VPU_TYPE", "");
+     if (param_vpu_type == "")
+     {
+ #if defined(OPENCV_DNN_IE_VPU_TYPE_DEFAULT)
+         param_vpu_type = OPENCV_DNN_IE_VPU_TYPE_DEFAULT;
+ #elif INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
+         CV_LOG_INFO(NULL, "OpenCV-DNN: running Inference Engine VPU autodetection: Myriad2/X. In case of other accelerator types specify 'OPENCV_DNN_IE_VPU_TYPE' parameter");
+         try {
+             bool isMyriadX_ = detectMyriadX_();
+             if (isMyriadX_)
+             {
+                 param_vpu_type = CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X;
+             }
+             else
+             {
+                 param_vpu_type = CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2;
+             }
+         }
+         catch (...)
+         {
+             CV_LOG_WARNING(NULL, "OpenCV-DNN: Failed Inference Engine VPU autodetection. Specify 'OPENCV_DNN_IE_VPU_TYPE' parameter.");
+             param_vpu_type.clear();
+         }
+ #else
+         CV_LOG_WARNING(NULL, "OpenCV-DNN: VPU auto-detection is not implemented. Consider specifying VPU type via 'OPENCV_DNN_IE_VPU_TYPE' parameter");
+         param_vpu_type = CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2;
+ #endif
+     }
+     CV_LOG_INFO(NULL, "OpenCV-DNN: Inference Engine VPU type='" << param_vpu_type << "'");
+     return param_vpu_type;
+ }
+ cv::String getInferenceEngineVPUType()
+ {
+     static cv::String vpu_type = getInferenceEngineVPUType_();
+     return vpu_type;
+ }
+ #else  // HAVE_INF_ENGINE
+ cv::String getInferenceEngineVPUType()
+ {
+     CV_Error(Error::StsNotImplemented, "This OpenCV build doesn't include InferenceEngine support");
+ }
+ #endif  // HAVE_INF_ENGINE
 +CV__DNN_INLINE_NS_END
  }}  // namespace dnn, namespace cv
@@@ -100,24 -102,22 +102,22 @@@ public
  
      virtual InferenceEngine::StatusCode getLayerByName(const char *layerName,
                                                         InferenceEngine::CNNLayerPtr &out,
 -                                                       InferenceEngine::ResponseDesc *resp) const noexcept;
 +                                                       InferenceEngine::ResponseDesc *resp) const CV_NOEXCEPT;
  
 -    virtual void setTargetDevice(InferenceEngine::TargetDevice device) noexcept CV_OVERRIDE;
 +    virtual void setTargetDevice(InferenceEngine::TargetDevice device) CV_NOEXCEPT CV_OVERRIDE;
  
 -    virtual InferenceEngine::TargetDevice getTargetDevice() noexcept;
 +    virtual InferenceEngine::TargetDevice getTargetDevice() CV_NOEXCEPT;
  
 -    virtual InferenceEngine::TargetDevice getTargetDevice() const noexcept;
 +    virtual InferenceEngine::TargetDevice getTargetDevice() const CV_NOEXCEPT;
  
 -    virtual InferenceEngine::StatusCode setBatchSize(const size_t size) noexcept CV_OVERRIDE;
 +    virtual InferenceEngine::StatusCode setBatchSize(const size_t size) CV_NOEXCEPT CV_OVERRIDE;
  
 -    virtual InferenceEngine::StatusCode setBatchSize(size_t size, InferenceEngine::ResponseDesc* responseDesc) noexcept;
 +    virtual InferenceEngine::StatusCode setBatchSize(size_t size, InferenceEngine::ResponseDesc* responseDesc) CV_NOEXCEPT;
  
 -    virtual size_t getBatchSize() const noexcept CV_OVERRIDE;
 +    virtual size_t getBatchSize() const CV_NOEXCEPT CV_OVERRIDE;
  
- #if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R2)
-     virtual InferenceEngine::StatusCode AddExtension(const InferenceEngine::IShapeInferExtensionPtr& extension, InferenceEngine::ResponseDesc* resp) CV_NOEXCEPT;
-     virtual InferenceEngine::StatusCode reshape(const InputShapes& inputShapes, InferenceEngine::ResponseDesc* resp) CV_NOEXCEPT;
- #endif
 -    virtual InferenceEngine::StatusCode AddExtension(const InferenceEngine::IShapeInferExtensionPtr& extension, InferenceEngine::ResponseDesc* resp) noexcept CV_OVERRIDE;
 -    virtual InferenceEngine::StatusCode reshape(const InputShapes& inputShapes, InferenceEngine::ResponseDesc* resp) noexcept CV_OVERRIDE;
++    virtual InferenceEngine::StatusCode AddExtension(const InferenceEngine::IShapeInferExtensionPtr& extension, InferenceEngine::ResponseDesc* resp) CV_NOEXCEPT CV_OVERRIDE;
++    virtual InferenceEngine::StatusCode reshape(const InputShapes& inputShapes, InferenceEngine::ResponseDesc* resp) CV_NOEXCEPT CV_OVERRIDE;
  
      void init(int targetId);
  
@@@ -279,6 -279,12 +279,12 @@@ private
      InferenceEngine::CNNNetwork t_net;
  };
  
 -CV__DNN_EXPERIMENTAL_NS_BEGIN
++CV__DNN_INLINE_NS_BEGIN
+ bool isMyriadX();
 -CV__DNN_EXPERIMENTAL_NS_END
++CV__DNN_INLINE_NS_END
  #endif  // HAVE_INF_ENGINE
  
  bool haveInfEngine();
Simple merge
index 0000000,88974e3..023faf5
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,284 +1,293 @@@
 -CV__DNN_EXPERIMENTAL_NS_BEGIN
+ // This file is part of OpenCV project.
+ // It is subject to the license terms in the LICENSE file found in the top-level directory
+ // of this distribution and at http://opencv.org/license.html.
+ // Used in perf tests too, disabled: #include "test_precomp.hpp"
+ #include "opencv2/ts.hpp"
+ #include "opencv2/ts/ts_perf.hpp"
+ #include "opencv2/core/utility.hpp"
+ #include "opencv2/core/ocl.hpp"
+ #include "opencv2/dnn.hpp"
+ #include "test_common.hpp"
+ #include <opencv2/core/utils/configuration.private.hpp>
+ #include <opencv2/core/utils/logger.hpp>
+ namespace cv { namespace dnn {
 -CV__DNN_EXPERIMENTAL_NS_END
++CV__DNN_INLINE_NS_BEGIN
+ void PrintTo(const cv::dnn::Backend& v, std::ostream* os)
+ {
+     switch (v) {
+     case DNN_BACKEND_DEFAULT: *os << "DEFAULT"; return;
+     case DNN_BACKEND_HALIDE: *os << "HALIDE"; return;
+     case DNN_BACKEND_INFERENCE_ENGINE: *os << "DLIE"; return;
+     case DNN_BACKEND_OPENCV: *os << "OCV"; return;
++    case DNN_BACKEND_VKCOM: *os << "VKCOM"; return;
+     } // don't use "default:" to emit compiler warnings
+     *os << "DNN_BACKEND_UNKNOWN(" << (int)v << ")";
+ }
+ void PrintTo(const cv::dnn::Target& v, std::ostream* os)
+ {
+     switch (v) {
+     case DNN_TARGET_CPU: *os << "CPU"; return;
+     case DNN_TARGET_OPENCL: *os << "OCL"; return;
+     case DNN_TARGET_OPENCL_FP16: *os << "OCL_FP16"; return;
+     case DNN_TARGET_MYRIAD: *os << "MYRIAD"; return;
++    case DNN_TARGET_VULKAN: *os << "VULKAN"; return;
+     case DNN_TARGET_FPGA: *os << "FPGA"; return;
+     } // don't use "default:" to emit compiler warnings
+     *os << "DNN_TARGET_UNKNOWN(" << (int)v << ")";
+ }
+ void PrintTo(const tuple<cv::dnn::Backend, cv::dnn::Target> v, std::ostream* os)
+ {
+     PrintTo(get<0>(v), os);
+     *os << "/";
+     PrintTo(get<1>(v), os);
+ }
 -        bool withCpuOCV /*= true*/
++CV__DNN_INLINE_NS_END
+ }} // namespace
+ namespace opencv_test {
+ void normAssert(
+         cv::InputArray ref, cv::InputArray test, const char *comment /*= ""*/,
+         double l1 /*= 0.00001*/, double lInf /*= 0.0001*/)
+ {
+     double normL1 = cvtest::norm(ref, test, cv::NORM_L1) / ref.getMat().total();
+     EXPECT_LE(normL1, l1) << comment;
+     double normInf = cvtest::norm(ref, test, cv::NORM_INF);
+     EXPECT_LE(normInf, lInf) << comment;
+ }
+ std::vector<cv::Rect2d> matToBoxes(const cv::Mat& m)
+ {
+     EXPECT_EQ(m.type(), CV_32FC1);
+     EXPECT_EQ(m.dims, 2);
+     EXPECT_EQ(m.cols, 4);
+     std::vector<cv::Rect2d> boxes(m.rows);
+     for (int i = 0; i < m.rows; ++i)
+     {
+         CV_Assert(m.row(i).isContinuous());
+         const float* data = m.ptr<float>(i);
+         double l = data[0], t = data[1], r = data[2], b = data[3];
+         boxes[i] = cv::Rect2d(l, t, r - l, b - t);
+     }
+     return boxes;
+ }
+ void normAssertDetections(
+         const std::vector<int>& refClassIds,
+         const std::vector<float>& refScores,
+         const std::vector<cv::Rect2d>& refBoxes,
+         const std::vector<int>& testClassIds,
+         const std::vector<float>& testScores,
+         const std::vector<cv::Rect2d>& testBoxes,
+         const char *comment /*= ""*/, double confThreshold /*= 0.0*/,
+         double scores_diff /*= 1e-5*/, double boxes_iou_diff /*= 1e-4*/)
+ {
+     std::vector<bool> matchedRefBoxes(refBoxes.size(), false);
+     for (int i = 0; i < testBoxes.size(); ++i)
+     {
+         double testScore = testScores[i];
+         if (testScore < confThreshold)
+             continue;
+         int testClassId = testClassIds[i];
+         const cv::Rect2d& testBox = testBoxes[i];
+         bool matched = false;
+         for (int j = 0; j < refBoxes.size() && !matched; ++j)
+         {
+             if (!matchedRefBoxes[j] && testClassId == refClassIds[j] &&
+                 std::abs(testScore - refScores[j]) < scores_diff)
+             {
+                 double interArea = (testBox & refBoxes[j]).area();
+                 double iou = interArea / (testBox.area() + refBoxes[j].area() - interArea);
+                 if (std::abs(iou - 1.0) < boxes_iou_diff)
+                 {
+                     matched = true;
+                     matchedRefBoxes[j] = true;
+                 }
+             }
+         }
+         if (!matched)
+             std::cout << cv::format("Unmatched prediction: class %d score %f box ",
+                                     testClassId, testScore) << testBox << std::endl;
+         EXPECT_TRUE(matched) << comment;
+     }
+     // Check unmatched reference detections.
+     for (int i = 0; i < refBoxes.size(); ++i)
+     {
+         if (!matchedRefBoxes[i] && refScores[i] > confThreshold)
+         {
+             std::cout << cv::format("Unmatched reference: class %d score %f box ",
+                                     refClassIds[i], refScores[i]) << refBoxes[i] << std::endl;
+             EXPECT_LE(refScores[i], confThreshold) << comment;
+         }
+     }
+ }
+ // For SSD-based object detection networks which produce output of shape 1x1xNx7
+ // where N is a number of detections and an every detection is represented by
+ // a vector [batchId, classId, confidence, left, top, right, bottom].
+ void normAssertDetections(
+         cv::Mat ref, cv::Mat out, const char *comment /*= ""*/,
+         double confThreshold /*= 0.0*/, double scores_diff /*= 1e-5*/,
+         double boxes_iou_diff /*= 1e-4*/)
+ {
+     CV_Assert(ref.total() % 7 == 0);
+     CV_Assert(out.total() % 7 == 0);
+     ref = ref.reshape(1, ref.total() / 7);
+     out = out.reshape(1, out.total() / 7);
+     cv::Mat refClassIds, testClassIds;
+     ref.col(1).convertTo(refClassIds, CV_32SC1);
+     out.col(1).convertTo(testClassIds, CV_32SC1);
+     std::vector<float> refScores(ref.col(2)), testScores(out.col(2));
+     std::vector<cv::Rect2d> refBoxes = matToBoxes(ref.colRange(3, 7));
+     std::vector<cv::Rect2d> testBoxes = matToBoxes(out.colRange(3, 7));
+     normAssertDetections(refClassIds, refScores, refBoxes, testClassIds, testScores,
+                          testBoxes, comment, confThreshold, scores_diff, boxes_iou_diff);
+ }
+ bool readFileInMemory(const std::string& filename, std::string& content)
+ {
+     std::ios::openmode mode = std::ios::in | std::ios::binary;
+     std::ifstream ifs(filename.c_str(), mode);
+     if (!ifs.is_open())
+         return false;
+     content.clear();
+     ifs.seekg(0, std::ios::end);
+     content.reserve(ifs.tellg());
+     ifs.seekg(0, std::ios::beg);
+     content.assign((std::istreambuf_iterator<char>(ifs)),
+                    std::istreambuf_iterator<char>());
+     return true;
+ }
+ testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTargets(
+         bool withInferenceEngine /*= true*/,
+         bool withHalide /*= false*/,
++        bool withCpuOCV /*= true*/,
++        bool withVkCom /*= true*/
+ )
+ {
+ #ifdef HAVE_INF_ENGINE
+     bool withVPU = validateVPUType();
+ #endif
+     std::vector< tuple<Backend, Target> > targets;
+     std::vector< Target > available;
+     if (withHalide)
+     {
+         available = getAvailableTargets(DNN_BACKEND_HALIDE);
+         for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
+             targets.push_back(make_tuple(DNN_BACKEND_HALIDE, *i));
+     }
+ #ifdef HAVE_INF_ENGINE
+     if (withInferenceEngine)
+     {
+         available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE);
+         for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
+         {
+             if (*i == DNN_TARGET_MYRIAD && !withVPU)
+                 continue;
+             targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE, *i));
+         }
+     }
+ #else
+     CV_UNUSED(withInferenceEngine);
+ #endif
++    if (withVkCom)
++    {
++        available = getAvailableTargets(DNN_BACKEND_VKCOM);
++        for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
++            targets.push_back(make_tuple(DNN_BACKEND_VKCOM, *i));
++    }
+     {
+         available = getAvailableTargets(DNN_BACKEND_OPENCV);
+         for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
+         {
+             if (!withCpuOCV && *i == DNN_TARGET_CPU)
+                 continue;
+             targets.push_back(make_tuple(DNN_BACKEND_OPENCV, *i));
+         }
+     }
+     if (targets.empty())  // validate at least CPU mode
+         targets.push_back(make_tuple(DNN_BACKEND_OPENCV, DNN_TARGET_CPU));
+     return testing::ValuesIn(targets);
+ }
+ #ifdef HAVE_INF_ENGINE
+ static std::string getTestInferenceEngineVPUType()
+ {
+     static std::string param_vpu_type = utils::getConfigurationParameterString("OPENCV_TEST_DNN_IE_VPU_TYPE", "");
+     return param_vpu_type;
+ }
+ static bool validateVPUType_()
+ {
+     std::string test_vpu_type = getTestInferenceEngineVPUType();
+     if (test_vpu_type == "DISABLED" || test_vpu_type == "disabled")
+     {
+         return false;
+     }
+     std::vector<Target> available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE);
+     bool have_vpu_target = false;
+     for (std::vector<Target>::const_iterator i = available.begin(); i != available.end(); ++i)
+     {
+         if (*i == DNN_TARGET_MYRIAD)
+         {
+             have_vpu_target = true;
+             break;
+         }
+     }
+     if (test_vpu_type.empty())
+     {
+         if (have_vpu_target)
+         {
+             CV_LOG_INFO(NULL, "OpenCV-DNN-Test: VPU type for testing is not specified via 'OPENCV_TEST_DNN_IE_VPU_TYPE' parameter.")
+         }
+     }
+     else
+     {
+         if (!have_vpu_target)
+         {
+             CV_LOG_FATAL(NULL, "OpenCV-DNN-Test: 'OPENCV_TEST_DNN_IE_VPU_TYPE' parameter requires VPU of type = '" << test_vpu_type << "', but VPU is not detected. STOP.");
+             exit(1);
+         }
+         std::string dnn_vpu_type = getInferenceEngineVPUType();
+         if (dnn_vpu_type != test_vpu_type)
+         {
+             CV_LOG_FATAL(NULL, "OpenCV-DNN-Test: 'testing' and 'detected' VPU types mismatch: '" << test_vpu_type << "' vs '" << dnn_vpu_type << "'. STOP.");
+             exit(1);
+         }
+     }
+     return true;
+ }
+ bool validateVPUType()
+ {
+     static bool result = validateVPUType_();
+     return result;
+ }
+ #endif // HAVE_INF_ENGINE
+ } // namespace
  #include "opencv2/core/ocl.hpp"
  #endif
  
  namespace cv { namespace dnn {
 -CV__DNN_EXPERIMENTAL_NS_BEGIN
 +CV__DNN_INLINE_NS_BEGIN
- static inline void PrintTo(const cv::dnn::Backend& v, std::ostream* os)
- {
-     switch (v) {
-     case DNN_BACKEND_DEFAULT: *os << "DEFAULT"; return;
-     case DNN_BACKEND_HALIDE: *os << "HALIDE"; return;
-     case DNN_BACKEND_INFERENCE_ENGINE: *os << "DLIE"; return;
-     case DNN_BACKEND_OPENCV: *os << "OCV"; return;
-     case DNN_BACKEND_VKCOM: *os << "VKCOM"; return;
-     } // don't use "default:" to emit compiler warnings
-     *os << "DNN_BACKEND_UNKNOWN(" << (int)v << ")";
- }
- static inline void PrintTo(const cv::dnn::Target& v, std::ostream* os)
- {
-     switch (v) {
-     case DNN_TARGET_CPU: *os << "CPU"; return;
-     case DNN_TARGET_OPENCL: *os << "OCL"; return;
-     case DNN_TARGET_OPENCL_FP16: *os << "OCL_FP16"; return;
-     case DNN_TARGET_MYRIAD: *os << "MYRIAD"; return;
-     case DNN_TARGET_VULKAN: *os << "VULKAN"; return;
-     case DNN_TARGET_FPGA: *os << "FPGA"; return;
-     } // don't use "default:" to emit compiler warnings
-     *os << "DNN_TARGET_UNKNOWN(" << (int)v << ")";
- }
  
+ void PrintTo(const cv::dnn::Backend& v, std::ostream* os);
+ void PrintTo(const cv::dnn::Target& v, std::ostream* os);
  using opencv_test::tuple;
  using opencv_test::get;
- static inline void PrintTo(const tuple<cv::dnn::Backend, cv::dnn::Target> v, std::ostream* os)
- {
-     PrintTo(get<0>(v), os);
-     *os << "/";
-     PrintTo(get<1>(v), os);
- }
+ void PrintTo(const tuple<cv::dnn::Backend, cv::dnn::Target> v, std::ostream* os);
  
 -CV__DNN_EXPERIMENTAL_NS_END
 +CV__DNN_INLINE_NS_END
- }} // namespace
+ }} // namespace cv::dnn
  
  
- static inline const std::string &getOpenCVExtraDir()
- {
-     return cvtest::TS::ptr()->get_data_path();
- }
  
- static inline void normAssert(cv::InputArray ref, cv::InputArray test, const char *comment = "",
-                        double l1 = 0.00001, double lInf = 0.0001)
- {
-     double normL1 = cvtest::norm(ref, test, cv::NORM_L1) / ref.getMat().total();
-     EXPECT_LE(normL1, l1) << comment;
+ namespace opencv_test {
  
-     double normInf = cvtest::norm(ref, test, cv::NORM_INF);
-     EXPECT_LE(normInf, lInf) << comment;
- }
+ using namespace cv::dnn;
  
- static std::vector<cv::Rect2d> matToBoxes(const cv::Mat& m)
+ static inline const std::string &getOpenCVExtraDir()
  {
-     EXPECT_EQ(m.type(), CV_32FC1);
-     EXPECT_EQ(m.dims, 2);
-     EXPECT_EQ(m.cols, 4);
-     std::vector<cv::Rect2d> boxes(m.rows);
-     for (int i = 0; i < m.rows; ++i)
-     {
-         CV_Assert(m.row(i).isContinuous());
-         const float* data = m.ptr<float>(i);
-         double l = data[0], t = data[1], r = data[2], b = data[3];
-         boxes[i] = cv::Rect2d(l, t, r - l, b - t);
-     }
-     return boxes;
+     return cvtest::TS::ptr()->get_data_path();
  }
  
- static inline void normAssertDetections(const std::vector<int>& refClassIds,
-                                  const std::vector<float>& refScores,
-                                  const std::vector<cv::Rect2d>& refBoxes,
-                                  const std::vector<int>& testClassIds,
-                                  const std::vector<float>& testScores,
-                                  const std::vector<cv::Rect2d>& testBoxes,
-                                  const char *comment = "", double confThreshold = 0.0,
-                                  double scores_diff = 1e-5, double boxes_iou_diff = 1e-4)
- {
-     std::vector<bool> matchedRefBoxes(refBoxes.size(), false);
-     for (int i = 0; i < testBoxes.size(); ++i)
-     {
-         double testScore = testScores[i];
-         if (testScore < confThreshold)
-             continue;
+ void normAssert(
+         cv::InputArray ref, cv::InputArray test, const char *comment = "",
+         double l1 = 0.00001, double lInf = 0.0001);
  
-         int testClassId = testClassIds[i];
-         const cv::Rect2d& testBox = testBoxes[i];
-         bool matched = false;
-         for (int j = 0; j < refBoxes.size() && !matched; ++j)
-         {
-             if (!matchedRefBoxes[j] && testClassId == refClassIds[j] &&
-                 std::abs(testScore - refScores[j]) < scores_diff)
-             {
-                 double interArea = (testBox & refBoxes[j]).area();
-                 double iou = interArea / (testBox.area() + refBoxes[j].area() - interArea);
-                 if (std::abs(iou - 1.0) < boxes_iou_diff)
-                 {
-                     matched = true;
-                     matchedRefBoxes[j] = true;
-                 }
-             }
-         }
-         if (!matched)
-             std::cout << cv::format("Unmatched prediction: class %d score %f box ",
-                                     testClassId, testScore) << testBox << std::endl;
-         EXPECT_TRUE(matched) << comment;
-     }
+ std::vector<cv::Rect2d> matToBoxes(const cv::Mat& m);
  
-     // Check unmatched reference detections.
-     for (int i = 0; i < refBoxes.size(); ++i)
-     {
-         if (!matchedRefBoxes[i] && refScores[i] > confThreshold)
-         {
-             std::cout << cv::format("Unmatched reference: class %d score %f box ",
-                                     refClassIds[i], refScores[i]) << refBoxes[i] << std::endl;
-             EXPECT_LE(refScores[i], confThreshold) << comment;
-         }
-     }
- }
+ void normAssertDetections(
+         const std::vector<int>& refClassIds,
+         const std::vector<float>& refScores,
+         const std::vector<cv::Rect2d>& refBoxes,
+         const std::vector<int>& testClassIds,
+         const std::vector<float>& testScores,
+         const std::vector<cv::Rect2d>& testBoxes,
+         const char *comment = "", double confThreshold = 0.0,
+         double scores_diff = 1e-5, double boxes_iou_diff = 1e-4);
  
  // For SSD-based object detection networks which produce output of shape 1x1xNx7
  // where N is a number of detections and an every detection is represented by
@@@ -217,50 -68,9 +68,10 @@@ bool validateVPUType()
  testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTargets(
          bool withInferenceEngine = true,
          bool withHalide = false,
 -        bool withCpuOCV = true
 +        bool withCpuOCV = true,
 +        bool withVkCom = true
- )
- {
-     std::vector< tuple<Backend, Target> > targets;
-     std::vector< Target > available;
-     if (withHalide)
-     {
-         available = getAvailableTargets(DNN_BACKEND_HALIDE);
-         for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
-             targets.push_back(make_tuple(DNN_BACKEND_HALIDE, *i));
-     }
-     if (withInferenceEngine)
-     {
-         available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE);
-         for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
-             targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE, *i));
-     }
-     if (withVkCom)
-     {
-         available = getAvailableTargets(DNN_BACKEND_VKCOM);
-         for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
-             targets.push_back(make_tuple(DNN_BACKEND_VKCOM, *i));
-     }
-     {
-         available = getAvailableTargets(DNN_BACKEND_OPENCV);
-         for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
-         {
-             if (!withCpuOCV && *i == DNN_TARGET_CPU)
-                 continue;
-             targets.push_back(make_tuple(DNN_BACKEND_OPENCV, *i));
-         }
-     }
-     if (targets.empty())  // validate at least CPU mode
-         targets.push_back(make_tuple(DNN_BACKEND_OPENCV, DNN_TARGET_CPU));
-     return testing::ValuesIn(targets);
- }
+ );
  
- } // namespace
- namespace opencv_test {
- using namespace cv::dnn;
  
  class DNNTestLayer : public TestWithParam<tuple<Backend, Target> >
  {