Introduce relaxed accuracy thresholds for CL target in some dnn tests.
authorAaron Greig <aaron.greig@codeplay.com>
Wed, 24 Mar 2021 16:14:23 +0000 (16:14 +0000)
committerAaron Greig <aaron.greig@codeplay.com>
Thu, 25 Mar 2021 10:58:23 +0000 (10:58 +0000)
Partially addresses #9821

modules/dnn/test/test_halide_layers.cpp
modules/dnn/test/test_layers.cpp
modules/dnn/test/test_torch_importer.cpp

index a1b13b7f6338be9986dcff77d5c419cf1e78ff93..c9c0bc1cedacb518f76cdf5f7daf151293168dd2 100644 (file)
@@ -16,7 +16,7 @@ using namespace cv;
 using namespace cv::dnn;
 using namespace testing;
 
-static void test(Mat& input, Net& net, Backend backendId, Target targetId, bool skipCheck = false, bool randInput = true)
+static void test(Mat& input, Net& net, Backend backendId, Target targetId, bool skipCheck = false, bool randInput = true, double l1 = 0.0, double lInf = 0.0)
 {
     DNNTestLayer::checkBackend(backendId, targetId);
     if (randInput)
@@ -33,8 +33,12 @@ static void test(Mat& input, Net& net, Backend backendId, Target targetId, bool
     if (skipCheck)
         return;
 
-    double l1, lInf;
-    DNNTestLayer::getDefaultThresholds(backendId, targetId, &l1, &lInf);
+    double default_l1, default_lInf;
+    DNNTestLayer::getDefaultThresholds(backendId, targetId, &default_l1, &default_lInf);
+    if (l1 == 0.0)
+        l1 = default_l1;
+    if (lInf == 0.0)
+        lInf = default_lInf;
 #if 0
     std::cout << "l1=" << l1 << "  lInf=" << lInf << std::endl;
     std::cout << outputDefault.reshape(1, outputDefault.total()).t() << std::endl;
@@ -43,11 +47,11 @@ static void test(Mat& input, Net& net, Backend backendId, Target targetId, bool
     normAssert(outputDefault, outputHalide, "", l1, lInf);
 }
 
-static void test(LayerParams& params, Mat& input, Backend backendId, Target targetId, bool skipCheck = false)
+static void test(LayerParams& params, Mat& input, Backend backendId, Target targetId, bool skipCheck = false, double l1 = 0.0, double lInf = 0.0)
 {
     Net net;
     net.addLayerToPrev(params.name, params.type, params);
-    test(input, net, backendId, targetId, skipCheck);
+    test(input, net, backendId, targetId, skipCheck, true, l1, lInf);
 }
 
 static inline testing::internal::ParamGenerator<tuple<Backend, Target> > dnnBackendsAndTargetsWithHalide()
@@ -251,7 +255,17 @@ TEST_P(LRN, Accuracy)
 
     int sz[] = {1, inChannels, inSize.height, inSize.width};
     Mat input(4, &sz[0], CV_32F);
-    test(lp, input, backendId, targetId);
+
+    double l1 = 0.0, lInf = 0.0;
+    // The OpenCL kernels use the native_ math functions which have
+    // implementation defined accuracy, so we use relaxed thresholds. See
+    // https://github.com/opencv/opencv/issues/9821 for more details.
+    if (targetId == DNN_TARGET_OPENCL)
+    {
+        l1 = 0.01;
+        lInf = 0.01;
+    }
+    test(lp, input, backendId, targetId, false, l1, lInf);
 }
 
 INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, LRN, Combine(
index 73491fcf25779fc810104ae11173707cd50da68c..8a2037995882090fa6e14227d35495b7b5975d5c 100644 (file)
@@ -169,8 +169,17 @@ TEST_P(Test_Caffe_layers, Softmax)
 
 TEST_P(Test_Caffe_layers, LRN)
 {
-    testLayerUsingCaffeModels("layer_lrn_spatial");
-    testLayerUsingCaffeModels("layer_lrn_channels");
+    double l1 = 0.0, lInf = 0.0;
+    // The OpenCL kernels use the native_ math functions which have
+    // implementation defined accuracy, so we use relaxed thresholds. See
+    // https://github.com/opencv/opencv/issues/9821 for more details.
+    if (target == DNN_TARGET_OPENCL)
+    {
+        l1 = 0.01;
+        lInf = 0.01;
+    }
+    testLayerUsingCaffeModels("layer_lrn_spatial", false, true, l1, lInf);
+    testLayerUsingCaffeModels("layer_lrn_channels", false, true, l1, lInf);
 }
 
 TEST_P(Test_Caffe_layers, Convolution)
index 9eb89f3d78342b8bfe83c1ab4ce391bdf726644e..b8f48b89654292c367eb539bb9652c1cc6abf044 100644 (file)
@@ -218,9 +218,21 @@ TEST_P(Test_Torch_layers, net_conv_gemm_lrn)
         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
-    runTorchNet("net_conv_gemm_lrn", "", false, true, true,
-                target == DNN_TARGET_OPENCL_FP16 ? 0.046 : 0.0,
-                target == DNN_TARGET_OPENCL_FP16 ? 0.023 : 0.0);
+    double l1 = 0.0, lInf = 0.0;
+    if (target == DNN_TARGET_OPENCL_FP16)
+    {
+        l1 = 0.046;
+        lInf = 0.023;
+    }
+    // The OpenCL kernels use the native_ math functions which have
+    // implementation defined accuracy, so we use relaxed thresholds. See
+    // https://github.com/opencv/opencv/issues/9821 for more details.
+    else if (target == DNN_TARGET_OPENCL)
+    {
+        l1 = 0.02;
+        lInf = 0.02;
+    }
+    runTorchNet("net_conv_gemm_lrn", "", false, true, true, l1, lInf);
 }
 
 TEST_P(Test_Torch_layers, net_inception_block)