TEST_P(Test_Torch_layers, run_convolution)
{
// Output reference values are in range [23.4018, 72.0181]
- double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.08 : default_l1;
- double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.42 : default_lInf;
+ double l1 = default_l1, lInf = default_lInf;
+ if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
+ {
+ l1 = 0.08;
+ lInf = 0.42;
+ }
+ else if (target == DNN_TARGET_CUDA_FP16)
+ {
+ l1 = 0.08;
+ lInf = 0.5;
+ }
runTorchNet("net_conv", "", false, true, true, l1, lInf);
}
{
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
- runTorchNet("net_pool_max", "", true);
+ if (target == DNN_TARGET_CUDA_FP16)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA_FP16);
+ double l1 = 0.0, lInf = 0.0;
+ runTorchNet("net_pool_max", "", true, false, true, l1, lInf);
}
TEST_P(Test_Torch_layers, run_pool_ave)
TEST_P(Test_Torch_layers, run_reshape_single_sample)
{
// Reference output values in range [14.4586, 18.4492].
- runTorchNet("net_reshape_single_sample", "", false, false, true,
- (target == DNN_TARGET_MYRIAD || target == DNN_TARGET_OPENCL_FP16) ? 0.033 : default_l1,
- (target == DNN_TARGET_MYRIAD || target == DNN_TARGET_OPENCL_FP16) ? 0.05 : default_lInf);
+ double l1 = default_l1, lInf = default_lInf;
+ if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
+ {
+ l1 = 0.033;
+ lInf = 0.05;
+ }
+ else if (target == DNN_TARGET_CUDA_FP16)
+ {
+ l1 = 0.01;
+ }
+ runTorchNet("net_reshape_single_sample", "", false, false, true, l1, lInf);
}
TEST_P(Test_Torch_layers, run_linear)
TEST_P(Test_Torch_layers, run_depth_concat)
{
- runTorchNet("net_depth_concat", "", false, true, true, 0.0,
- target == DNN_TARGET_OPENCL_FP16 ? 0.021 : 0.0);
+ double lInf = 0.0;
+ if (target == DNN_TARGET_OPENCL_FP16)
+ {
+ lInf = 0.021;
+ }
+ else if (target == DNN_TARGET_CUDA_FP16)
+ {
+ lInf = 0.03;
+ }
+ runTorchNet("net_depth_concat", "", false, true, true, 0.0, lInf);
}
TEST_P(Test_Torch_layers, run_deconv)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
- runTorchNet("net_conv_gemm_lrn", "", false, true, true,
- target == DNN_TARGET_OPENCL_FP16 ? 0.046 : 0.0,
- target == DNN_TARGET_OPENCL_FP16 ? 0.023 : 0.0);
+ double l1 = 0.0, lInf = 0.0;
+ if (target == DNN_TARGET_OPENCL_FP16)
+ {
+ l1 = 0.046;
+ lInf = 0.023;
+ }
+ else if (target == DNN_TARGET_CUDA_FP16)
+ {
+ l1 = 0.0042;
+ lInf = 0.021;
+ }
+ runTorchNet("net_conv_gemm_lrn", "", false, true, true, l1, lInf);
}
TEST_P(Test_Torch_layers, net_inception_block)
// Reference output values are in range [-0.17212, 0.263492]
// on Myriad problem layer: l4_Pooling - does not use pads_begin
- float l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 2e-3 : 1e-5;
- float lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 5e-3 : 1e-3;
+ float l1 = 1e-5, lInf = 1e-3;
+ if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
+ {
+ l1 = 2e-3;
+ lInf = 5e-3;
+ }
+ else if (target == DNN_TARGET_CUDA_FP16)
+ {
+ l1 = 0.0004;
+ lInf = 0.0012;
+ }
Mat outRef = readTorchBlob(_tf("net_openface_output.dat"), true);
normAssert(out, outRef, "", l1, lInf);
}
checkBackend();
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
throw SkipTestException("");
+ if (backend == DNN_BACKEND_CUDA && target == DNN_TARGET_CUDA_FP16)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA_FP16);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
{
if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
else
EXPECT_LE(normL1, 0.6f);
}
+ else if(target == DNN_TARGET_CUDA_FP16)
+ {
+ normAssert(out, refBlob, "", 0.6, 25);
+ }
else
normAssert(out, refBlob, "", 0.5, 1.1);
}