ocv_cmake_hook(PRE_CMAKE_BOOTSTRAP)
- # Bootstap CMake system: setup CMAKE_SYSTEM_NAME and other vars
+ # Bootstrap CMake system: setup CMAKE_SYSTEM_NAME and other vars
+if(OPENCV_WORKAROUND_CMAKE_20989)
+ set(CMAKE_SYSTEM_PROCESSOR_BACKUP ${CMAKE_SYSTEM_PROCESSOR})
+endif()
enable_language(CXX C)
+if(OPENCV_WORKAROUND_CMAKE_20989)
+ set(CMAKE_SYSTEM_PROCESSOR ${CMAKE_SYSTEM_PROCESSOR_BACKUP})
+endif()
ocv_cmake_hook(POST_CMAKE_BOOTSTRAP)
void writeLogMessage(LogLevel logLevel, const char* message)
{
const int threadID = cv::utils::getThreadID();
- case 1+2: message_id = cv::format("%d@%llu", threadID, getTimestampNS()); break;
+
+ std::string message_id;
+ switch (getShowTimestampMode())
+ {
+ case 1: message_id = cv::format("%d@%0.3f", threadID, getTimestampNS() * 1e-9); break;
++ case 1+2: message_id = cv::format("%d@%llu", threadID, (long long unsigned int)getTimestampNS()); break;
+ default: message_id = cv::format("%d", threadID); break;
+ }
+
std::ostringstream ss;
switch (logLevel)
{
cv::Mutex& getInitializationMutex();
-// TODO Memory barriers?
+ /// @brief Returns timestamp in nanoseconds since program launch
+ int64 getTimestampNS();
+
+
#define CV_SINGLETON_LAZY_INIT_(TYPE, INITIALIZER, RET_VALUE) \
- static TYPE* volatile instance = NULL; \
- if (instance == NULL) \
- { \
- cv::AutoLock lock(cv::getInitializationMutex()); \
- if (instance == NULL) \
- instance = INITIALIZER; \
- } \
+ static TYPE* const instance = INITIALIZER; \
return RET_VALUE;
#define CV_SINGLETON_LAZY_INIT(TYPE, INITIALIZER) CV_SINGLETON_LAZY_INIT_(TYPE, INITIALIZER, instance)
#if defined(INF_ENGINE_RELEASE)
if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) &&
target == DNN_TARGET_MYRIAD && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
- applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
+ #endif
+ #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
+ // IE exception: Ngraph operation Transpose with name conv15_2_mbox_conf_perm has dynamic output shape on 0 port, but CPU plug-in supports only static shape
+ if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
+ applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
+ CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
+ );
#endif
+
Mat sample = imread(findDataFile("dnn/street.png"));
Mat inp = blobFromImage(sample, 1.0f / 127.5, Size(300, 560), Scalar(127.5, 127.5, 127.5), false);
- float diffScores = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.029 : 0.0;
- float diffSquares = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.09 : 0.0;
+ float scoreDiff = 0.0, iouDiff = 0.0;
+ if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
+ {
+ scoreDiff = 0.029;
+ iouDiff = 0.09;
+ }
+ else if (target == DNN_TARGET_CUDA_FP16)
+ {
+ scoreDiff = 0.03;
+ iouDiff = 0.08;
+ }
processNet("dnn/MobileNetSSD_deploy.caffemodel", "dnn/MobileNetSSD_deploy.prototxt",
- inp, "detection_out", "", diffScores, diffSquares);
+ inp, "detection_out", "", scoreDiff, iouDiff);
expectNoFallbacksFromIE(net);
}
Mat out = net.forward();
// Reference output values are in range [-29.1, 69.5]
- double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.25 : 4e-4;
- double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 5.3 : 3e-3;
- if (target == DNN_TARGET_MYRIAD && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
+ double l1 = 4e-4, lInf = 3e-3;
+ if (target == DNN_TARGET_OPENCL_FP16)
+ {
+ l1 = 0.25;
+ lInf = 5.3;
+ }
+ else if (target == DNN_TARGET_MYRIAD)
+ {
+ l1 = (getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) ? 0.5 : 0.25;
+ lInf = (getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) ? 11 : 5.3;
+ }
+ else if(target == DNN_TARGET_CUDA_FP16)
{
- l1 = 0.5; lInf = 11;
+ l1 = 0.21;
+ lInf = 4.5;
}
+ #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
{
- l1 = 0.26; lInf = 6.5;
+ l1 = 0.3; lInf = 10;
}
+ #endif
normAssert(out, ref, "", l1, lInf);
expectNoFallbacksFromIE(net);
scoreDiff = 1e-2;
iouDiff = 0.018;
}
+ else if (target == DNN_TARGET_CUDA_FP16)
+ {
+ scoreDiff = 0.03;
+ iouDiff = 0.018;
+ }
+ #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
+ // accuracy
+ if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
+ {
+ iouDiff = std::numeric_limits<double>::quiet_NaN();
+ }
+ #endif
std::string config_file = "yolo-voc.cfg";
std::string weights_file = "yolo-voc.weights";
// Output values are in range [0, 637.5].
double l1 = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? 0.06 : 1e-6;
double lInf = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? 0.3 : 1e-5;
+ if (targetId == DNN_TARGET_CUDA_FP16)
+ {
+ l1 = 0.06;
+ lInf = 0.3;
+ }
normAssert(out, ref, "", l1, lInf);
+ if (cvtest::debugLevel > 0 || HasFailure())
+ {
+ std::cout << "input1 scale=" << kScale << " input2 scale=" << kScaleInv << std::endl;
+ std::cout << "input1: " << firstInp.size << " " << firstInp.reshape(1, 1) << std::endl;
+ std::cout << "input2: " << secondInp.size << " " << secondInp.reshape(1, 1) << std::endl;
+ std::cout << "ref: " << ref.reshape(1, 1) << std::endl;
+ std::cout << "out: " << out.reshape(1, 1) << std::endl;
+ }
}
INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_DLDT_two_inputs, Combine(
double l1 = default_l1, lInf = default_lInf;
if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
{
- l1 = 0.017;
- lInf = 0.14;
+ l1 = 0.02;
+ lInf = 0.2;
}
+ else if (target == DNN_TARGET_CUDA_FP16)
+ {
+ l1 = 0.018;
+ lInf = 0.16;
+ }
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2020040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
{
double scoreDiff = default_l1, iouDiff = default_lInf;
if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
{
- scoreDiff = 0.0043;
- iouDiff = 0.037;
+ scoreDiff = 0.01;
+ iouDiff = 0.1;
}
+ else if (target == DNN_TARGET_CUDA_FP16)
+ {
+ iouDiff = 0.04;
+ }
+
normAssertDetections(ref, out, "", 0.2, scoreDiff, iouDiff);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2019010000
expectNoFallbacksFromIE(net);
// Assertion `prior_height > 0' failed.
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
+ #endif
- if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
- applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
++ if (backend == DNN_BACKEND_CUDA && target == DNN_TARGET_CUDA_FP16)
++ applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA_FP16);
++
+ checkBackend();
+
+ double scoresDiff = 1e-5;
+ double iouDiff = 1e-4;
+
+ if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ {
+ scoresDiff = 0.02;
+ iouDiff = 0.1;
+ }
+
+ std::string name = "faster_rcnn_inception_v2_coco_2018_01_28";
+ {
+ std::string proto = findDataFile("dnn/" + name + ".pbtxt");
+ std::string model = findDataFile("dnn/" + name + ".pb", false);
+
+ Net net = readNetFromTensorflow(model, proto);
+ net.setPreferableBackend(backend);
+ net.setPreferableTarget(target);
+ Mat img = imread(findDataFile("dnn/dog416.png"));
+ Mat blob = blobFromImage(img, 1.0f, Size(800, 600), Scalar(), true, false);
+
+ net.setInput(blob);
+ Mat out = net.forward();
+
+ Mat ref = blobFromNPY(findDataFile("dnn/tensorflow/" + name + ".detection_out.npy"));
+
+ // accuracy (both OpenCV & IE)
+ if (target == DNN_TARGET_OPENCL_FP16)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
+
+ normAssertDetections(ref, out, name.c_str(), 0.3, scoresDiff, iouDiff);
+ }
+ }
+
+ TEST_P(Test_TensorFlow_nets, Faster_RCNN_resnet50_coco_2018_01_28)
+ {
+ applyTestTag(
+ (target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB),
+ CV_TEST_TAG_LONG,
+ CV_TEST_TAG_DEBUG_VERYLONG
+ );
+
+ #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
+ // IE exception: Ngraph operation Transpose with name FirstStageBoxPredictor/ClassPredictor/reshape_1/nhwc has dynamic output shape on 0 port, but CPU plug-in supports only static shape
+ if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
+ applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
+ CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
+ );
+ #endif
+
+ #ifdef INF_ENGINE_RELEASE
+ if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 &&
+ (INF_ENGINE_VER_MAJOR_LT(2019020000) || target != DNN_TARGET_CPU))
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
+
+ if (INF_ENGINE_VER_MAJOR_GT(2019030000) &&
+ backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
+ #endif
+ #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
+ // segfault: inference-engine/thirdparty/clDNN/src/gpu/detection_output_cpu.cpp:111:
+ // Assertion `prior_height > 0' failed.
+ if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
+ #endif
+ if (backend == DNN_BACKEND_CUDA && target == DNN_TARGET_CUDA_FP16)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA_FP16);
+
checkBackend();
double scoresDiff = backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ? 2.9e-5 : 1e-5;
double iouDiff = 1e-4;
- for (int i = 0; i < 2; ++i)
+ if (target == DNN_TARGET_CUDA)
+ {
+ // for faster_rcnn_resnet50_coco_2018_01_28
+ scoresDiff = 0.06;
+ iouDiff = 0.08;
+ }
+
+ std::string name = "faster_rcnn_resnet50_coco_2018_01_28";
{
- std::string proto = findDataFile("dnn/" + names[i] + ".pbtxt");
- std::string model = findDataFile("dnn/" + names[i] + ".pb", false);
+ std::string proto = findDataFile("dnn/" + name + ".pbtxt");
+ std::string model = findDataFile("dnn/" + name + ".pb", false);
Net net = readNetFromTensorflow(model, proto);
net.setPreferableBackend(backend);