TEST_P(Test_ONNX_layers, InstanceNorm)
{
+ if(backend == DNN_BACKEND_CUDA)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); /* MVN is not supported */
+
if (target == DNN_TARGET_MYRIAD)
testONNXModels("instancenorm", npy, 0, 0, false, false);
else
backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
+ if (backend == DNN_BACKEND_CUDA)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); // not supported
+ if (backend == DNN_BACKEND_VKCOM)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_VULKAN); // not supported
String basename = "conv_variable_w";
Net net = readNetFromONNX(_tf("models/" + basename + ".onnx"));
ASSERT_FALSE(net.empty());
backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
+ if (backend == DNN_BACKEND_CUDA)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); // not supported
+ if (backend == DNN_BACKEND_VKCOM)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_VULKAN); // not supported
+
String basename = "conv_variable_wb";
Net net = readNetFromONNX(_tf("models/" + basename + ".onnx"));
ASSERT_FALSE(net.empty());
testONNXModels("two_deconvolution", npy, 0, 0, false, false);
testONNXModels("deconvolution_group", npy, 0, 0, false, false);
testONNXModels("deconvolution_output_shape", npy, 0, 0, false, false);
- testONNXModels("deconv_adjpad_2d", npy, 0, 0, false, false);
+ if (target != DNN_TARGET_CUDA_FP16) // bug
+ testONNXModels("deconv_adjpad_2d", npy, 0, 0, false, false);
}
TEST_P(Test_ONNX_layers, Deconvolution3D)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
- if (backend == DNN_BACKEND_OPENCV || target != DNN_TARGET_CPU)
+ if (backend == DNN_BACKEND_CUDA)
+ {
+ // ok
+ }
+ else if (backend == DNN_BACKEND_OPENCV || target != DNN_TARGET_CPU)
throw SkipTestException("Only DLIE backend on CPU is supported");
testONNXModels("deconv3d");
testONNXModels("deconv3d_bias");
TEST_P(Test_ONNX_layers, ReduceMean3D)
{
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
+ if (backend == DNN_BACKEND_CUDA)
+ {
+ // ok
+ }
+ else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
+ else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
- if (target != DNN_TARGET_CPU)
+ else if (target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported");
+
testONNXModels("reduce_mean3d");
}
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
+ if (backend == DNN_BACKEND_CUDA)
+ {
+ // ok
+ }
+ else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
+ else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
- if (target != DNN_TARGET_CPU)
+ else if (target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported");
testONNXModels("max_pool3d", npy, 0, 0, false, false);
}
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
+ if (backend == DNN_BACKEND_CUDA)
+ {
+ // ok
+ }
+ else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
+ else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
- if (target != DNN_TARGET_CPU)
+ else if (target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported");
testONNXModels("ave_pool3d");
}
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
+ if (backend == DNN_BACKEND_CUDA)
+ {
+ // ok
+ }
+ else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
+ else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
- if (target != DNN_TARGET_CPU)
+ else if (target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported");
testONNXModels("pool_conv_3d");
}
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
+ if (backend == DNN_BACKEND_CUDA)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); // not supported
testONNXModels("matmul_2d");
testONNXModels("matmul_3d");
normAssert(ref, out, "", default_l1, default_lInf);
expectNoFallbacksFromIE(net);
+ expectNoFallbacksFromCUDA(net);
}
TEST_P(Test_ONNX_layers, DynamicReshape)
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2020040000)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
#endif
+ if (backend == DNN_BACKEND_CUDA)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA);
testONNXModels("lin_with_constant");
}
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2020040000)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
#endif
+ if (backend == DNN_BACKEND_CUDA)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA);
testONNXModels("matmul_with_two_inputs");
}
TEST_P(Test_ONNX_layers, Conv1d_variable_weight)
{
+ if (backend == DNN_BACKEND_CUDA)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); // not supported
+ if (backend == DNN_BACKEND_VKCOM)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_VULKAN); // not supported
String basename = "conv1d_variable_w";
Net net = readNetFromONNX(_tf("models/" + basename + ".onnx"));
ASSERT_FALSE(net.empty());
TEST_P(Test_ONNX_layers, Conv1d_variable_weight_bias)
{
+ if (backend == DNN_BACKEND_CUDA)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); // not supported
+ if (backend == DNN_BACKEND_VKCOM)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_VULKAN); // not supported
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
#endif
// output range: [-11; 8]
- double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.017 : default_l1;
- double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.14 : default_lInf;
+ double l1 = default_l1, lInf = default_lInf;
+ if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
+ {
+ l1 = 0.017;
+ lInf = 0.14;
+ }
+ else if (target == DNN_TARGET_CUDA_FP16)
+ {
+ l1 = 0.018;
+ lInf = 0.16;
+ }
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2020040000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
{
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
}
- double l1 = default_l1;
- double lInf = default_lInf;
+ double l1 = default_l1, lInf = default_lInf;
// output range: [-3; 3]
- if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) {
+ if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
+ {
l1 = 0.009;
lInf = 0.035;
}
- else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_CPU) {
+ else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_CPU)
+ {
l1 = 4.6e-5;
lInf = 1.9e-4;
}
+ else if (target == DNN_TARGET_CUDA_FP16)
+ {
+ l1 = 0.008;
+ lInf = 0.04;
+ }
testONNXModels("LResNet100E_IR", pb, l1, lInf);
}
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
+ if (backend == DNN_BACKEND_CUDA)
+ {
+ // ok
+ }
+ else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
- if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
+ else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
- if (target != DNN_TARGET_CPU)
+ else if (target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported");
String onnxmodel = findDataFile("dnn/resnet-34_kinetics.onnx", false);
net.setPreferableTarget(target);
// output range [-5, 11]
- float l1 = 0.0013;
- float lInf = 0.009;
+ float l1 = 0.0013, lInf = 0.009;
+ if (target == DNN_TARGET_CUDA_FP16)
+ {
+ l1 = 0.01;
+ lInf = 0.06;
+ }
checkBackend(&input0, &ref0);
net.setInput(input0);