cmake: fix build of dnn tests with shared common code
authorAlexander Alekhin <alexander.a.alekhin@gmail.com>
Sun, 31 Mar 2019 08:51:09 +0000 (08:51 +0000)
committerAlexander Alekhin <alexander.a.alekhin@gmail.com>
Sun, 31 Mar 2019 08:52:25 +0000 (08:52 +0000)
- don't share .cpp files (PCH support is broken)

modules/dnn/CMakeLists.txt
modules/dnn/perf/perf_common.cpp [new file with mode: 0644]
modules/dnn/test/test_common.cpp
modules/dnn/test/test_common.impl.hpp [new file with mode: 0644]

index 2cc0851..7682257 100644 (file)
@@ -94,14 +94,10 @@ set(perf_path "${CMAKE_CURRENT_LIST_DIR}/perf")
 file(GLOB_RECURSE perf_srcs "${perf_path}/*.cpp")
 file(GLOB_RECURSE perf_hdrs "${perf_path}/*.hpp" "${perf_path}/*.h")
 ocv_add_perf_tests(${INF_ENGINE_TARGET}
-    FILES test_common "${CMAKE_CURRENT_LIST_DIR}/test/test_common.cpp"
+    FILES test_common "${CMAKE_CURRENT_LIST_DIR}/test/test_common.hpp" "${CMAKE_CURRENT_LIST_DIR}/test/test_common.impl.hpp"
     FILES Src ${perf_srcs}
     FILES Include ${perf_hdrs}
 )
-set_property(
-  SOURCE "${CMAKE_CURRENT_LIST_DIR}/test/test_common.cpp"
-  PROPERTY COMPILE_DEFINITIONS "__OPENCV_TESTS=1"
-)
 
 ocv_option(${the_module}_PERF_CAFFE "Add performance tests of Caffe framework" OFF)
 ocv_option(${the_module}_PERF_CLCAFFE "Add performance tests of clCaffe framework" OFF)
diff --git a/modules/dnn/perf/perf_common.cpp b/modules/dnn/perf/perf_common.cpp
new file mode 100644 (file)
index 0000000..13db0fe
--- /dev/null
@@ -0,0 +1,6 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#include "perf_precomp.hpp"
+#include "../test/test_common.impl.hpp"  // shared with accuracy tests
index 88974e3..a2ea6ea 100644 (file)
@@ -2,283 +2,5 @@
 // It is subject to the license terms in the LICENSE file found in the top-level directory
 // of this distribution and at http://opencv.org/license.html.
 
-// Used in perf tests too, disabled: #include "test_precomp.hpp"
-#include "opencv2/ts.hpp"
-#include "opencv2/ts/ts_perf.hpp"
-#include "opencv2/core/utility.hpp"
-#include "opencv2/core/ocl.hpp"
-
-#include "opencv2/dnn.hpp"
-#include "test_common.hpp"
-
-#include <opencv2/core/utils/configuration.private.hpp>
-#include <opencv2/core/utils/logger.hpp>
-
-namespace cv { namespace dnn {
-CV__DNN_EXPERIMENTAL_NS_BEGIN
-
-void PrintTo(const cv::dnn::Backend& v, std::ostream* os)
-{
-    switch (v) {
-    case DNN_BACKEND_DEFAULT: *os << "DEFAULT"; return;
-    case DNN_BACKEND_HALIDE: *os << "HALIDE"; return;
-    case DNN_BACKEND_INFERENCE_ENGINE: *os << "DLIE"; return;
-    case DNN_BACKEND_OPENCV: *os << "OCV"; return;
-    } // don't use "default:" to emit compiler warnings
-    *os << "DNN_BACKEND_UNKNOWN(" << (int)v << ")";
-}
-
-void PrintTo(const cv::dnn::Target& v, std::ostream* os)
-{
-    switch (v) {
-    case DNN_TARGET_CPU: *os << "CPU"; return;
-    case DNN_TARGET_OPENCL: *os << "OCL"; return;
-    case DNN_TARGET_OPENCL_FP16: *os << "OCL_FP16"; return;
-    case DNN_TARGET_MYRIAD: *os << "MYRIAD"; return;
-    case DNN_TARGET_FPGA: *os << "FPGA"; return;
-    } // don't use "default:" to emit compiler warnings
-    *os << "DNN_TARGET_UNKNOWN(" << (int)v << ")";
-}
-
-void PrintTo(const tuple<cv::dnn::Backend, cv::dnn::Target> v, std::ostream* os)
-{
-    PrintTo(get<0>(v), os);
-    *os << "/";
-    PrintTo(get<1>(v), os);
-}
-
-CV__DNN_EXPERIMENTAL_NS_END
-}} // namespace
-
-
-
-namespace opencv_test {
-
-void normAssert(
-        cv::InputArray ref, cv::InputArray test, const char *comment /*= ""*/,
-        double l1 /*= 0.00001*/, double lInf /*= 0.0001*/)
-{
-    double normL1 = cvtest::norm(ref, test, cv::NORM_L1) / ref.getMat().total();
-    EXPECT_LE(normL1, l1) << comment;
-
-    double normInf = cvtest::norm(ref, test, cv::NORM_INF);
-    EXPECT_LE(normInf, lInf) << comment;
-}
-
-std::vector<cv::Rect2d> matToBoxes(const cv::Mat& m)
-{
-    EXPECT_EQ(m.type(), CV_32FC1);
-    EXPECT_EQ(m.dims, 2);
-    EXPECT_EQ(m.cols, 4);
-
-    std::vector<cv::Rect2d> boxes(m.rows);
-    for (int i = 0; i < m.rows; ++i)
-    {
-        CV_Assert(m.row(i).isContinuous());
-        const float* data = m.ptr<float>(i);
-        double l = data[0], t = data[1], r = data[2], b = data[3];
-        boxes[i] = cv::Rect2d(l, t, r - l, b - t);
-    }
-    return boxes;
-}
-
-void normAssertDetections(
-        const std::vector<int>& refClassIds,
-        const std::vector<float>& refScores,
-        const std::vector<cv::Rect2d>& refBoxes,
-        const std::vector<int>& testClassIds,
-        const std::vector<float>& testScores,
-        const std::vector<cv::Rect2d>& testBoxes,
-        const char *comment /*= ""*/, double confThreshold /*= 0.0*/,
-        double scores_diff /*= 1e-5*/, double boxes_iou_diff /*= 1e-4*/)
-{
-    std::vector<bool> matchedRefBoxes(refBoxes.size(), false);
-    for (int i = 0; i < testBoxes.size(); ++i)
-    {
-        double testScore = testScores[i];
-        if (testScore < confThreshold)
-            continue;
-
-        int testClassId = testClassIds[i];
-        const cv::Rect2d& testBox = testBoxes[i];
-        bool matched = false;
-        for (int j = 0; j < refBoxes.size() && !matched; ++j)
-        {
-            if (!matchedRefBoxes[j] && testClassId == refClassIds[j] &&
-                std::abs(testScore - refScores[j]) < scores_diff)
-            {
-                double interArea = (testBox & refBoxes[j]).area();
-                double iou = interArea / (testBox.area() + refBoxes[j].area() - interArea);
-                if (std::abs(iou - 1.0) < boxes_iou_diff)
-                {
-                    matched = true;
-                    matchedRefBoxes[j] = true;
-                }
-            }
-        }
-        if (!matched)
-            std::cout << cv::format("Unmatched prediction: class %d score %f box ",
-                                    testClassId, testScore) << testBox << std::endl;
-        EXPECT_TRUE(matched) << comment;
-    }
-
-    // Check unmatched reference detections.
-    for (int i = 0; i < refBoxes.size(); ++i)
-    {
-        if (!matchedRefBoxes[i] && refScores[i] > confThreshold)
-        {
-            std::cout << cv::format("Unmatched reference: class %d score %f box ",
-                                    refClassIds[i], refScores[i]) << refBoxes[i] << std::endl;
-            EXPECT_LE(refScores[i], confThreshold) << comment;
-        }
-    }
-}
-
-// For SSD-based object detection networks which produce output of shape 1x1xNx7
-// where N is a number of detections and an every detection is represented by
-// a vector [batchId, classId, confidence, left, top, right, bottom].
-void normAssertDetections(
-        cv::Mat ref, cv::Mat out, const char *comment /*= ""*/,
-        double confThreshold /*= 0.0*/, double scores_diff /*= 1e-5*/,
-        double boxes_iou_diff /*= 1e-4*/)
-{
-    CV_Assert(ref.total() % 7 == 0);
-    CV_Assert(out.total() % 7 == 0);
-    ref = ref.reshape(1, ref.total() / 7);
-    out = out.reshape(1, out.total() / 7);
-
-    cv::Mat refClassIds, testClassIds;
-    ref.col(1).convertTo(refClassIds, CV_32SC1);
-    out.col(1).convertTo(testClassIds, CV_32SC1);
-    std::vector<float> refScores(ref.col(2)), testScores(out.col(2));
-    std::vector<cv::Rect2d> refBoxes = matToBoxes(ref.colRange(3, 7));
-    std::vector<cv::Rect2d> testBoxes = matToBoxes(out.colRange(3, 7));
-    normAssertDetections(refClassIds, refScores, refBoxes, testClassIds, testScores,
-                         testBoxes, comment, confThreshold, scores_diff, boxes_iou_diff);
-}
-
-bool readFileInMemory(const std::string& filename, std::string& content)
-{
-    std::ios::openmode mode = std::ios::in | std::ios::binary;
-    std::ifstream ifs(filename.c_str(), mode);
-    if (!ifs.is_open())
-        return false;
-
-    content.clear();
-
-    ifs.seekg(0, std::ios::end);
-    content.reserve(ifs.tellg());
-    ifs.seekg(0, std::ios::beg);
-
-    content.assign((std::istreambuf_iterator<char>(ifs)),
-                   std::istreambuf_iterator<char>());
-
-    return true;
-}
-
-
-testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTargets(
-        bool withInferenceEngine /*= true*/,
-        bool withHalide /*= false*/,
-        bool withCpuOCV /*= true*/
-)
-{
-#ifdef HAVE_INF_ENGINE
-    bool withVPU = validateVPUType();
-#endif
-
-    std::vector< tuple<Backend, Target> > targets;
-    std::vector< Target > available;
-    if (withHalide)
-    {
-        available = getAvailableTargets(DNN_BACKEND_HALIDE);
-        for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
-            targets.push_back(make_tuple(DNN_BACKEND_HALIDE, *i));
-    }
-#ifdef HAVE_INF_ENGINE
-    if (withInferenceEngine)
-    {
-        available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE);
-        for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
-        {
-            if (*i == DNN_TARGET_MYRIAD && !withVPU)
-                continue;
-            targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE, *i));
-        }
-    }
-#else
-    CV_UNUSED(withInferenceEngine);
-#endif
-    {
-        available = getAvailableTargets(DNN_BACKEND_OPENCV);
-        for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
-        {
-            if (!withCpuOCV && *i == DNN_TARGET_CPU)
-                continue;
-            targets.push_back(make_tuple(DNN_BACKEND_OPENCV, *i));
-        }
-    }
-    if (targets.empty())  // validate at least CPU mode
-        targets.push_back(make_tuple(DNN_BACKEND_OPENCV, DNN_TARGET_CPU));
-    return testing::ValuesIn(targets);
-}
-
-
-#ifdef HAVE_INF_ENGINE
-static std::string getTestInferenceEngineVPUType()
-{
-    static std::string param_vpu_type = utils::getConfigurationParameterString("OPENCV_TEST_DNN_IE_VPU_TYPE", "");
-    return param_vpu_type;
-}
-
-static bool validateVPUType_()
-{
-    std::string test_vpu_type = getTestInferenceEngineVPUType();
-    if (test_vpu_type == "DISABLED" || test_vpu_type == "disabled")
-    {
-        return false;
-    }
-
-    std::vector<Target> available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE);
-    bool have_vpu_target = false;
-    for (std::vector<Target>::const_iterator i = available.begin(); i != available.end(); ++i)
-    {
-        if (*i == DNN_TARGET_MYRIAD)
-        {
-            have_vpu_target = true;
-            break;
-        }
-    }
-
-    if (test_vpu_type.empty())
-    {
-        if (have_vpu_target)
-        {
-            CV_LOG_INFO(NULL, "OpenCV-DNN-Test: VPU type for testing is not specified via 'OPENCV_TEST_DNN_IE_VPU_TYPE' parameter.")
-        }
-    }
-    else
-    {
-        if (!have_vpu_target)
-        {
-            CV_LOG_FATAL(NULL, "OpenCV-DNN-Test: 'OPENCV_TEST_DNN_IE_VPU_TYPE' parameter requires VPU of type = '" << test_vpu_type << "', but VPU is not detected. STOP.");
-            exit(1);
-        }
-        std::string dnn_vpu_type = getInferenceEngineVPUType();
-        if (dnn_vpu_type != test_vpu_type)
-        {
-            CV_LOG_FATAL(NULL, "OpenCV-DNN-Test: 'testing' and 'detected' VPU types mismatch: '" << test_vpu_type << "' vs '" << dnn_vpu_type << "'. STOP.");
-            exit(1);
-        }
-    }
-    return true;
-}
-
-bool validateVPUType()
-{
-    static bool result = validateVPUType_();
-    return result;
-}
-#endif // HAVE_INF_ENGINE
-
-} // namespace
+#include "test_precomp.hpp"
+#include "test_common.impl.hpp"  // shared with perf tests
diff --git a/modules/dnn/test/test_common.impl.hpp b/modules/dnn/test/test_common.impl.hpp
new file mode 100644 (file)
index 0000000..51c1c5e
--- /dev/null
@@ -0,0 +1,285 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+// Used in accuracy and perf tests as a content of .cpp file
+// Note: don't use "precomp.hpp" here
+#include "opencv2/ts.hpp"
+#include "opencv2/ts/ts_perf.hpp"
+#include "opencv2/core/utility.hpp"
+#include "opencv2/core/ocl.hpp"
+
+#include "opencv2/dnn.hpp"
+#include "test_common.hpp"
+
+#include <opencv2/core/utils/configuration.private.hpp>
+#include <opencv2/core/utils/logger.hpp>
+
+namespace cv { namespace dnn {
+CV__DNN_EXPERIMENTAL_NS_BEGIN
+
+void PrintTo(const cv::dnn::Backend& v, std::ostream* os)
+{
+    switch (v) {
+    case DNN_BACKEND_DEFAULT: *os << "DEFAULT"; return;
+    case DNN_BACKEND_HALIDE: *os << "HALIDE"; return;
+    case DNN_BACKEND_INFERENCE_ENGINE: *os << "DLIE"; return;
+    case DNN_BACKEND_OPENCV: *os << "OCV"; return;
+    } // don't use "default:" to emit compiler warnings
+    *os << "DNN_BACKEND_UNKNOWN(" << (int)v << ")";
+}
+
+void PrintTo(const cv::dnn::Target& v, std::ostream* os)
+{
+    switch (v) {
+    case DNN_TARGET_CPU: *os << "CPU"; return;
+    case DNN_TARGET_OPENCL: *os << "OCL"; return;
+    case DNN_TARGET_OPENCL_FP16: *os << "OCL_FP16"; return;
+    case DNN_TARGET_MYRIAD: *os << "MYRIAD"; return;
+    case DNN_TARGET_FPGA: *os << "FPGA"; return;
+    } // don't use "default:" to emit compiler warnings
+    *os << "DNN_TARGET_UNKNOWN(" << (int)v << ")";
+}
+
+void PrintTo(const tuple<cv::dnn::Backend, cv::dnn::Target> v, std::ostream* os)
+{
+    PrintTo(get<0>(v), os);
+    *os << "/";
+    PrintTo(get<1>(v), os);
+}
+
+CV__DNN_EXPERIMENTAL_NS_END
+}} // namespace
+
+
+
+namespace opencv_test {
+
+void normAssert(
+        cv::InputArray ref, cv::InputArray test, const char *comment /*= ""*/,
+        double l1 /*= 0.00001*/, double lInf /*= 0.0001*/)
+{
+    double normL1 = cvtest::norm(ref, test, cv::NORM_L1) / ref.getMat().total();
+    EXPECT_LE(normL1, l1) << comment;
+
+    double normInf = cvtest::norm(ref, test, cv::NORM_INF);
+    EXPECT_LE(normInf, lInf) << comment;
+}
+
+std::vector<cv::Rect2d> matToBoxes(const cv::Mat& m)
+{
+    EXPECT_EQ(m.type(), CV_32FC1);
+    EXPECT_EQ(m.dims, 2);
+    EXPECT_EQ(m.cols, 4);
+
+    std::vector<cv::Rect2d> boxes(m.rows);
+    for (int i = 0; i < m.rows; ++i)
+    {
+        CV_Assert(m.row(i).isContinuous());
+        const float* data = m.ptr<float>(i);
+        double l = data[0], t = data[1], r = data[2], b = data[3];
+        boxes[i] = cv::Rect2d(l, t, r - l, b - t);
+    }
+    return boxes;
+}
+
+void normAssertDetections(
+        const std::vector<int>& refClassIds,
+        const std::vector<float>& refScores,
+        const std::vector<cv::Rect2d>& refBoxes,
+        const std::vector<int>& testClassIds,
+        const std::vector<float>& testScores,
+        const std::vector<cv::Rect2d>& testBoxes,
+        const char *comment /*= ""*/, double confThreshold /*= 0.0*/,
+        double scores_diff /*= 1e-5*/, double boxes_iou_diff /*= 1e-4*/)
+{
+    std::vector<bool> matchedRefBoxes(refBoxes.size(), false);
+    for (int i = 0; i < testBoxes.size(); ++i)
+    {
+        double testScore = testScores[i];
+        if (testScore < confThreshold)
+            continue;
+
+        int testClassId = testClassIds[i];
+        const cv::Rect2d& testBox = testBoxes[i];
+        bool matched = false;
+        for (int j = 0; j < refBoxes.size() && !matched; ++j)
+        {
+            if (!matchedRefBoxes[j] && testClassId == refClassIds[j] &&
+                std::abs(testScore - refScores[j]) < scores_diff)
+            {
+                double interArea = (testBox & refBoxes[j]).area();
+                double iou = interArea / (testBox.area() + refBoxes[j].area() - interArea);
+                if (std::abs(iou - 1.0) < boxes_iou_diff)
+                {
+                    matched = true;
+                    matchedRefBoxes[j] = true;
+                }
+            }
+        }
+        if (!matched)
+            std::cout << cv::format("Unmatched prediction: class %d score %f box ",
+                                    testClassId, testScore) << testBox << std::endl;
+        EXPECT_TRUE(matched) << comment;
+    }
+
+    // Check unmatched reference detections.
+    for (int i = 0; i < refBoxes.size(); ++i)
+    {
+        if (!matchedRefBoxes[i] && refScores[i] > confThreshold)
+        {
+            std::cout << cv::format("Unmatched reference: class %d score %f box ",
+                                    refClassIds[i], refScores[i]) << refBoxes[i] << std::endl;
+            EXPECT_LE(refScores[i], confThreshold) << comment;
+        }
+    }
+}
+
+// For SSD-based object detection networks which produce output of shape 1x1xNx7
+// where N is a number of detections and an every detection is represented by
+// a vector [batchId, classId, confidence, left, top, right, bottom].
+void normAssertDetections(
+        cv::Mat ref, cv::Mat out, const char *comment /*= ""*/,
+        double confThreshold /*= 0.0*/, double scores_diff /*= 1e-5*/,
+        double boxes_iou_diff /*= 1e-4*/)
+{
+    CV_Assert(ref.total() % 7 == 0);
+    CV_Assert(out.total() % 7 == 0);
+    ref = ref.reshape(1, ref.total() / 7);
+    out = out.reshape(1, out.total() / 7);
+
+    cv::Mat refClassIds, testClassIds;
+    ref.col(1).convertTo(refClassIds, CV_32SC1);
+    out.col(1).convertTo(testClassIds, CV_32SC1);
+    std::vector<float> refScores(ref.col(2)), testScores(out.col(2));
+    std::vector<cv::Rect2d> refBoxes = matToBoxes(ref.colRange(3, 7));
+    std::vector<cv::Rect2d> testBoxes = matToBoxes(out.colRange(3, 7));
+    normAssertDetections(refClassIds, refScores, refBoxes, testClassIds, testScores,
+                         testBoxes, comment, confThreshold, scores_diff, boxes_iou_diff);
+}
+
+bool readFileInMemory(const std::string& filename, std::string& content)
+{
+    std::ios::openmode mode = std::ios::in | std::ios::binary;
+    std::ifstream ifs(filename.c_str(), mode);
+    if (!ifs.is_open())
+        return false;
+
+    content.clear();
+
+    ifs.seekg(0, std::ios::end);
+    content.reserve(ifs.tellg());
+    ifs.seekg(0, std::ios::beg);
+
+    content.assign((std::istreambuf_iterator<char>(ifs)),
+                   std::istreambuf_iterator<char>());
+
+    return true;
+}
+
+
+testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTargets(
+        bool withInferenceEngine /*= true*/,
+        bool withHalide /*= false*/,
+        bool withCpuOCV /*= true*/
+)
+{
+#ifdef HAVE_INF_ENGINE
+    bool withVPU = validateVPUType();
+#endif
+
+    std::vector< tuple<Backend, Target> > targets;
+    std::vector< Target > available;
+    if (withHalide)
+    {
+        available = getAvailableTargets(DNN_BACKEND_HALIDE);
+        for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
+            targets.push_back(make_tuple(DNN_BACKEND_HALIDE, *i));
+    }
+#ifdef HAVE_INF_ENGINE
+    if (withInferenceEngine)
+    {
+        available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE);
+        for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
+        {
+            if (*i == DNN_TARGET_MYRIAD && !withVPU)
+                continue;
+            targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE, *i));
+        }
+    }
+#else
+    CV_UNUSED(withInferenceEngine);
+#endif
+    {
+        available = getAvailableTargets(DNN_BACKEND_OPENCV);
+        for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
+        {
+            if (!withCpuOCV && *i == DNN_TARGET_CPU)
+                continue;
+            targets.push_back(make_tuple(DNN_BACKEND_OPENCV, *i));
+        }
+    }
+    if (targets.empty())  // validate at least CPU mode
+        targets.push_back(make_tuple(DNN_BACKEND_OPENCV, DNN_TARGET_CPU));
+    return testing::ValuesIn(targets);
+}
+
+
+#ifdef HAVE_INF_ENGINE
+static std::string getTestInferenceEngineVPUType()
+{
+    static std::string param_vpu_type = utils::getConfigurationParameterString("OPENCV_TEST_DNN_IE_VPU_TYPE", "");
+    return param_vpu_type;
+}
+
+static bool validateVPUType_()
+{
+    std::string test_vpu_type = getTestInferenceEngineVPUType();
+    if (test_vpu_type == "DISABLED" || test_vpu_type == "disabled")
+    {
+        return false;
+    }
+
+    std::vector<Target> available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE);
+    bool have_vpu_target = false;
+    for (std::vector<Target>::const_iterator i = available.begin(); i != available.end(); ++i)
+    {
+        if (*i == DNN_TARGET_MYRIAD)
+        {
+            have_vpu_target = true;
+            break;
+        }
+    }
+
+    if (test_vpu_type.empty())
+    {
+        if (have_vpu_target)
+        {
+            CV_LOG_INFO(NULL, "OpenCV-DNN-Test: VPU type for testing is not specified via 'OPENCV_TEST_DNN_IE_VPU_TYPE' parameter.")
+        }
+    }
+    else
+    {
+        if (!have_vpu_target)
+        {
+            CV_LOG_FATAL(NULL, "OpenCV-DNN-Test: 'OPENCV_TEST_DNN_IE_VPU_TYPE' parameter requires VPU of type = '" << test_vpu_type << "', but VPU is not detected. STOP.");
+            exit(1);
+        }
+        std::string dnn_vpu_type = getInferenceEngineVPUType();
+        if (dnn_vpu_type != test_vpu_type)
+        {
+            CV_LOG_FATAL(NULL, "OpenCV-DNN-Test: 'testing' and 'detected' VPU types mismatch: '" << test_vpu_type << "' vs '" << dnn_vpu_type << "'. STOP.");
+            exit(1);
+        }
+    }
+    return true;
+}
+
+bool validateVPUType()
+{
+    static bool result = validateVPUType_();
+    return result;
+}
+#endif // HAVE_INF_ENGINE
+
+} // namespace