allow multiple inputs to resize, fix tests
authorYashasSamaga <yashas_2010@yahoo.com>
Thu, 11 Jun 2020 14:01:48 +0000 (19:31 +0530)
committerYashasSamaga <yashas_2010@yahoo.com>
Thu, 11 Jun 2020 14:01:48 +0000 (19:31 +0530)
modules/dnn/src/cuda4dnn/primitives/resize.hpp
modules/dnn/test/test_darknet_importer.cpp
modules/dnn/test/test_onnx_importer.cpp
modules/dnn/test/test_tf_importer.cpp

index 3caf58d..0ac7b94 100644 (file)
@@ -35,7 +35,8 @@ namespace cv { namespace dnn { namespace cuda4dnn {
             const std::vector<cv::Ptr<BackendWrapper>>& outputs,
             csl::Workspace& workspace) override
         {
-            CV_Assert(inputs.size() == 1 && outputs.size() == 1);
+            // sometimes the target shape is taken from the second input; we don't use it however
+            CV_Assert((inputs.size() == 1 || inputs.size() == 2) && outputs.size() == 1);
 
             auto input_wrapper = inputs[0].dynamicCast<wrapper_type>();
             auto input = input_wrapper->getView();
index 4896c64..61dd541 100644 (file)
@@ -574,6 +574,11 @@ TEST_P(Test_Darknet_nets, YOLOv4)
 
     double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.006 : 8e-5;
     double iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.042 : 3e-4;
+    if (target == DNN_TARGET_CUDA_FP16)
+    {
+        scoreDiff = 0.008;
+        iouDiff = 0.03;
+    }
 
     std::string config_file = "yolov4.cfg";
     std::string weights_file = "yolov4.weights";
index 4d36852..fb51fa9 100644 (file)
@@ -355,6 +355,8 @@ TEST_P(Test_ONNX_layers, MatMul)
 {
     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
+    if (backend == DNN_BACKEND_CUDA)
+        applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); // not supported
 
     testONNXModels("matmul_2d");
     testONNXModels("matmul_3d");
index 43df240..46cba7a 100644 (file)
@@ -1067,6 +1067,8 @@ TEST_P(Test_TensorFlow_layers, tf2_prelu)
         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
+    if (backend == DNN_BACKEND_CUDA)
+        applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); // not supported; only across channels is supported
     runTensorFlowNet("tf2_prelu");
 }
 
@@ -1239,6 +1241,11 @@ TEST_P(Test_TensorFlow_nets, EfficientDet)
                                     0, 7, 0.8039304, 0.6118435263633728, 0.13175517320632935, 0.9065558314323425, 0.2943994700908661);
     double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 4e-3 : 1e-5;
     double iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 2e-3 : 1e-4;
+    if (target == DNN_TARGET_CUDA_FP16)
+    {
+        scoreDiff = 0.002;
+        iouDiff = 0.003;
+    }
     normAssertDetections(ref, out, "", 0.5, scoreDiff, iouDiff);
     expectNoFallbacksFromIE(net);
 }