bool supportBackend(int backendId, int)
{
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
+ return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE;
}
inline float calculate(float x) const
bool supportBackend(int backendId, int)
{
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
+ return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE;
}
inline float calculate(float x) const
bool supportBackend(int backendId, int)
{
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
+ return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE;
}
inline float calculate(float x) const
bool supportBackend(int backendId, int)
{
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
+ return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE;
}
inline float calculate(float x) const
bool supportBackend(int backendId, int)
{
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
+ return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE;
}
inline float calculate(float x) const
bool supportBackend(int backendId, int)
{
- return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
+ return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE;
}
inline float calculate(float x) const
TEST_P(Test_ONNX_layers, Convolution3D)
{
+ if (backend == DNN_BACKEND_CUDA && target == DNN_TARGET_CUDA_FP16)
+ {
+ // CUDA_FP16: cuDNN did not return a suitable algorithm for convolution.
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA_FP16);
+ }
testONNXModels("conv3d");
}
TEST_P(Test_ONNX_layers, Convolution3D_bias)
{
+ if (backend == DNN_BACKEND_CUDA && target == DNN_TARGET_CUDA_FP16)
+ {
+ // CUDA_FP16: cuDNN did not return a suitable algorithm for convolution.
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA_FP16);
+ }
testONNXModels("conv3d_bias");
}
if (backend == DNN_BACKEND_VKCOM)
applyTestTag(CV_TEST_TAG_DNN_SKIP_VULKAN);
+ if (backend == DNN_BACKEND_CUDA && target == DNN_TARGET_CUDA_FP16)
+ {
+ // CUDA_FP16: cuDNN did not return a suitable algorithm for convolution.
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA_FP16);
+ }
+
testONNXModels("pool_conv_3d");
}
Mat out = net.forward();
normAssert(ref, out, "", default_l1, default_lInf);
- expectNoFallbacksFromIE(net);
- expectNoFallbacksFromCUDA(net);
- testONNXModels("div_test_1x1",npy, 0, 0, false, true, 2);
+ // NaryEltwise layer suuports only CPU for now
+ testONNXModels("div_test_1x1", npy, 0, 0, false, false, 2);
}
TEST_P(Test_ONNX_layers, DynamicReshape)
testONNXModels("split_2");
testONNXModels("split_3");
testONNXModels("split_4");
- testONNXModels("split_sizes");
testONNXModels("split_neg_axis");
}
+// Mul inside with 0-d tensor, output should be A x 1, but is 1 x A. PR #22652
+TEST_P(Test_ONNX_layers, DISABLED_Split_sizes_0d)
+{
+ if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
+ if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
+ testONNXModels("split_sizes");
+}
+
TEST_P(Test_ONNX_layers, Slice)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
}
else if (target == DNN_TARGET_CUDA_FP16)
{
- l1 = 0.008;
+ l1 = 0.009;
lInf = 0.04;
}
testONNXModels("LResNet100E_IR", pb, l1, lInf);