Merge remote-tracking branch 'upstream/3.4' into merge-3.4
[platform/upstream/opencv.git] / modules / dnn / test / test_onnx_importer.cpp
1 // This file is part of OpenCV project.
2 // It is subject to the license terms in the LICENSE file found in the top-level directory
3 // of this distribution and at http://opencv.org/license.html.
4
5 // Copyright (C) 2018-2019, Intel Corporation, all rights reserved.
6 // Third party copyrights are property of their respective owners.
7
8
9 #include "test_precomp.hpp"
10 #include "npy_blob.hpp"
11 #include <opencv2/dnn/shape_utils.hpp>
12 namespace opencv_test { namespace {
13
14 template<typename TString>
15 static std::string _tf(TString filename, bool required = true)
16 {
17     return findDataFile(std::string("dnn/onnx/") + filename, required);
18 }
19
20 class Test_ONNX_layers : public DNNTestLayer
21 {
22 public:
23     bool required;
24
25     Test_ONNX_layers() : required(true) { }
26
27     enum Extension
28     {
29         npy,
30         pb
31     };
32
33     void testONNXModels(const String& basename, const Extension ext = npy,
34                         const double l1 = 0, const float lInf = 0, const bool useSoftmax = false,
35                         bool checkNoFallbacks = true, int numInps = 1)
36     {
37         String onnxmodel = _tf("models/" + basename + ".onnx", required);
38         std::vector<Mat> inps(numInps);
39         Mat ref;
40         if (ext == npy) {
41             for (int i = 0; i < numInps; ++i)
42                 inps[i] = blobFromNPY(_tf("data/input_" + basename + (numInps > 1 ? format("_%d", i) : "") + ".npy"));
43             ref = blobFromNPY(_tf("data/output_" + basename + ".npy"));
44         }
45         else if (ext == pb) {
46             for (int i = 0; i < numInps; ++i)
47                 inps[i] = readTensorFromONNX(_tf("data/input_" + basename + (numInps > 1 ? format("_%d", i) : "") + ".pb"));
48             ref = readTensorFromONNX(_tf("data/output_" + basename + ".pb"));
49         }
50         else
51             CV_Error(Error::StsUnsupportedFormat, "Unsupported extension");
52
53         checkBackend(&inps[0], &ref);
54         Net net = readNetFromONNX(onnxmodel);
55         ASSERT_FALSE(net.empty());
56
57         net.setPreferableBackend(backend);
58         net.setPreferableTarget(target);
59
60         std::vector<String> inputNames;
61         for (int i = 0; i < numInps; ++i)
62             inputNames.push_back(format("%d", i));
63         net.setInputsNames(inputNames);
64
65         for (int i = 0; i < numInps; ++i)
66             net.setInput(inps[i], inputNames[i]);
67         Mat out = net.forward("");
68
69         if (useSoftmax)
70         {
71             LayerParams lp;
72             Net netSoftmax;
73             netSoftmax.addLayerToPrev("softmaxLayer", "Softmax", lp);
74             netSoftmax.setPreferableBackend(DNN_BACKEND_OPENCV);
75
76             netSoftmax.setInput(out);
77             out = netSoftmax.forward();
78
79             netSoftmax.setInput(ref);
80             ref = netSoftmax.forward();
81         }
82         normAssert(ref, out, "", l1 ? l1 : default_l1, lInf ? lInf : default_lInf);
83         if (checkNoFallbacks)
84             expectNoFallbacksFromIE(net);
85     }
86 };
87
88 TEST_P(Test_ONNX_layers, InstanceNorm)
89 {
90     if(backend == DNN_BACKEND_CUDA)
91         applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); /* MVN is not supported */
92
93     if (target == DNN_TARGET_MYRIAD)
94         testONNXModels("instancenorm", npy, 0, 0, false, false);
95     else
96         testONNXModels("instancenorm", npy);
97 }
98
99 TEST_P(Test_ONNX_layers, MaxPooling)
100 {
101 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020020000)
102     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
103         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
104 #endif
105     testONNXModels("maxpooling", npy, 0, 0, false, false);
106 }
107 TEST_P(Test_ONNX_layers, MaxPooling_2)
108 {
109     testONNXModels("two_maxpooling", npy, 0, 0, false, false);
110 }
111
112 TEST_P(Test_ONNX_layers, Convolution)
113 {
114     testONNXModels("convolution");
115 }
116
117 TEST_P(Test_ONNX_layers, Convolution_variable_weight)
118 {
119     if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH ||
120          backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) && target == DNN_TARGET_MYRIAD)
121         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
122
123     if (backend == DNN_BACKEND_CUDA)
124         applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); // not supported
125     if (backend == DNN_BACKEND_VKCOM)
126         applyTestTag(CV_TEST_TAG_DNN_SKIP_VULKAN); // not supported
127     String basename = "conv_variable_w";
128     Net net = readNetFromONNX(_tf("models/" + basename + ".onnx"));
129     ASSERT_FALSE(net.empty());
130
131     net.setPreferableBackend(backend);
132     net.setPreferableTarget(target);
133
134     for (int i = 0; i < 2; i++)
135     {
136         Mat input = blobFromNPY(_tf("data/input_" + basename + format("_%d", i) + "_0.npy"));
137         Mat weights = blobFromNPY(_tf("data/input_" + basename + format("_%d", i) + "_1.npy"));
138         Mat ref = blobFromNPY(_tf("data/output_" + basename + format("_%d", i) + ".npy"));
139
140         net.setInput(input, "0");
141         net.setInput(weights, "1");
142
143         Mat out = net.forward();
144         normAssert(ref, out, "", default_l1, default_lInf);
145     }
146 }
147
148 TEST_P(Test_ONNX_layers, Convolution_variable_weight_bias)
149 {
150     if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH ||
151          backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) && target == DNN_TARGET_MYRIAD)
152         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
153
154     if (backend == DNN_BACKEND_CUDA)
155         applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); // not supported
156     if (backend == DNN_BACKEND_VKCOM)
157         applyTestTag(CV_TEST_TAG_DNN_SKIP_VULKAN); // not supported
158
159     String basename = "conv_variable_wb";
160     Net net = readNetFromONNX(_tf("models/" + basename + ".onnx"));
161     ASSERT_FALSE(net.empty());
162
163     net.setPreferableBackend(backend);
164     net.setPreferableTarget(target);
165
166     for (int i = 0; i < 2; i++)
167     {
168         Mat input = blobFromNPY(_tf("data/input_" + basename + format("_%d", i) + "_0.npy"));
169         Mat weights = blobFromNPY(_tf("data/input_" + basename + format("_%d", i) + "_1.npy"));
170         Mat bias = blobFromNPY(_tf("data/input_" + basename + format("_%d", i) + "_2.npy"));
171         Mat ref = blobFromNPY(_tf("data/output_" + basename + format("_%d", i) + ".npy"));
172
173         net.setInput(input, "0");
174         net.setInput(weights, "1");
175         net.setInput(bias, "bias");
176
177         Mat out = net.forward();
178         normAssert(ref, out, "", default_l1, default_lInf);
179     }
180 }
181
182 TEST_P(Test_ONNX_layers, Gather)
183 {
184     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
185         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
186     testONNXModels("gather");
187     // GPU plugin unsupported slice for constant
188     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
189         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
190     testONNXModels("gather_scalar", npy, 0, 0, false, false);
191 }
192
193 TEST_P(Test_ONNX_layers, Convolution3D)
194 {
195 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
196     applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
197 #endif
198     testONNXModels("conv3d");
199 }
200
201 TEST_P(Test_ONNX_layers, Convolution3D_bias)
202 {
203 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
204     applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
205 #endif
206     testONNXModels("conv3d_bias");
207 }
208
209 TEST_P(Test_ONNX_layers, Two_convolution)
210 {
211 #if defined(INF_ENGINE_RELEASE)
212     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD
213         && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
214     )
215         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
216 #endif
217     // Reference output values are in range [-0.855, 0.611]
218     testONNXModels("two_convolution");
219 }
220
221 TEST_P(Test_ONNX_layers, Deconvolution)
222 {
223     testONNXModels("deconvolution", npy, 0, 0, false, false);
224     testONNXModels("two_deconvolution", npy, 0, 0, false, false);
225     testONNXModels("deconvolution_group", npy, 0, 0, false, false);
226     testONNXModels("deconvolution_output_shape", npy, 0, 0, false, false);
227     if (target != DNN_TARGET_CUDA_FP16) // bug
228         testONNXModels("deconv_adjpad_2d", npy, 0, 0, false, false);
229 }
230
231 TEST_P(Test_ONNX_layers, Deconvolution3D)
232 {
233 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
234     applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
235 #endif
236     if (backend == DNN_BACKEND_CUDA)
237     {
238         // ok
239     }
240     else if (backend == DNN_BACKEND_OPENCV || target != DNN_TARGET_CPU)
241         throw SkipTestException("Only DLIE backend on CPU is supported");
242     testONNXModels("deconv3d");
243     testONNXModels("deconv3d_bias");
244     testONNXModels("deconv3d_pad");
245     testONNXModels("deconv3d_adjpad");
246 }
247
248 TEST_P(Test_ONNX_layers, Dropout)
249 {
250     testONNXModels("dropout");
251 }
252
253 TEST_P(Test_ONNX_layers, Linear)
254 {
255     if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
256         applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
257     testONNXModels("linear");
258 }
259
260 TEST_P(Test_ONNX_layers, ReLU)
261 {
262     testONNXModels("ReLU");
263 }
264
265 TEST_P(Test_ONNX_layers, Clip)
266 {
267     testONNXModels("clip", npy);
268 }
269
270 TEST_P(Test_ONNX_layers, Shape)
271 {
272     testONNXModels("shape_of_constant");
273 }
274
275 TEST_P(Test_ONNX_layers, ReduceMean)
276 {
277     testONNXModels("reduce_mean");
278     testONNXModels("reduce_mean_axis1");
279     testONNXModels("reduce_mean_axis2");
280 }
281
282 TEST_P(Test_ONNX_layers, ReduceSum)
283 {
284     testONNXModels("reduce_sum");
285 }
286
287 TEST_P(Test_ONNX_layers, ReduceMax)
288 {
289     testONNXModels("reduce_max");
290     testONNXModels("reduce_max_axis_0");
291     testONNXModels("reduce_max_axis_1");
292 }
293
294 TEST_P(Test_ONNX_layers, Scale)
295 {
296     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
297         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
298     testONNXModels("scale");
299 }
300
301 TEST_P(Test_ONNX_layers, ReduceMean3D)
302 {
303     if (backend == DNN_BACKEND_CUDA)
304     {
305         // ok
306     }
307     else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
308         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);  // Only CPU on DLIE backend is supported
309     else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
310         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);  // Only CPU on DLIE backend is supported
311     else if (target != DNN_TARGET_CPU)
312         throw SkipTestException("Only CPU is supported");
313
314     testONNXModels("reduce_mean3d");
315 }
316
317 TEST_P(Test_ONNX_layers, MaxPooling_Sigmoid)
318 {
319     testONNXModels("maxpooling_sigmoid");
320 }
321
322 TEST_P(Test_ONNX_layers, Cast)
323 {
324     testONNXModels("cast");
325 }
326
327 TEST_P(Test_ONNX_layers, Power)
328 {
329     testONNXModels("pow2", npy, 0, 0, false, false);
330 }
331
332 TEST_P(Test_ONNX_layers, Concatenation)
333 {
334     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
335     {
336         if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
337         if (target == DNN_TARGET_OPENCL)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
338         if (target == DNN_TARGET_MYRIAD)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
339     }
340     testONNXModels("concatenation");
341 }
342
343 TEST_P(Test_ONNX_layers, Eltwise3D)
344 {
345 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
346     applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
347 #endif
348     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
349         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);  // Only CPU on DLIE backend is supported
350     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
351         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);  // Only CPU on DLIE backend is supported
352     testONNXModels("eltwise3d");
353 }
354
355 TEST_P(Test_ONNX_layers, AveragePooling)
356 {
357     testONNXModels("average_pooling");
358 }
359
360 TEST_P(Test_ONNX_layers, MaxPooling3D)
361 {
362 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
363     applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
364 #endif
365     if (backend == DNN_BACKEND_CUDA)
366     {
367         // ok
368     }
369     else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
370         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);  // Only CPU on DLIE backend is supported
371     else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
372         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);  // Only CPU on DLIE backend is supported
373     else if (target != DNN_TARGET_CPU)
374         throw SkipTestException("Only CPU is supported");
375     testONNXModels("max_pool3d", npy, 0, 0, false, false);
376 }
377
378 TEST_P(Test_ONNX_layers, AvePooling3D)
379 {
380 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
381     applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
382 #endif
383     if (backend == DNN_BACKEND_CUDA)
384     {
385         // ok
386     }
387     else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
388         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);  // Only CPU on DLIE backend is supported
389     else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
390         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);  // Only CPU on DLIE backend is supported
391     else if (target != DNN_TARGET_CPU)
392         throw SkipTestException("Only CPU is supported");
393     testONNXModels("ave_pool3d");
394 }
395
396 TEST_P(Test_ONNX_layers, PoolConv3D)
397 {
398 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
399     applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
400 #endif
401     if (backend == DNN_BACKEND_CUDA)
402     {
403         // ok
404     }
405     else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
406         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);  // Only CPU on DLIE backend is supported
407     else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
408         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);  // Only CPU on DLIE backend is supported
409     else if (target != DNN_TARGET_CPU)
410         throw SkipTestException("Only CPU is supported");
411     testONNXModels("pool_conv_3d");
412 }
413
414 TEST_P(Test_ONNX_layers, BatchNormalization)
415 {
416     testONNXModels("batch_norm");
417 }
418
419 TEST_P(Test_ONNX_layers, BatchNormalization3D)
420 {
421     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
422     {
423         if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
424         if (target == DNN_TARGET_OPENCL)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
425         if (target == DNN_TARGET_MYRIAD)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
426     }
427     testONNXModels("batch_norm_3d");
428 }
429
430 TEST_P(Test_ONNX_layers, BatchNormalizationUnfused)
431 {
432     testONNXModels("frozenBatchNorm2d");
433 }
434
435 TEST_P(Test_ONNX_layers, BatchNormalizationSubgraph)
436 {
437     testONNXModels("batch_norm_subgraph");
438 }
439
440 TEST_P(Test_ONNX_layers, Transpose)
441 {
442     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
443     {
444         if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
445         if (target == DNN_TARGET_OPENCL)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
446         if (target == DNN_TARGET_MYRIAD)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
447     }
448     testONNXModels("transpose");
449 }
450
451 TEST_P(Test_ONNX_layers, Multiplication)
452 {
453     if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
454         applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
455     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
456         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
457     testONNXModels("mul");
458 }
459
460 TEST_P(Test_ONNX_layers, MatMul)
461 {
462     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
463         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
464     if (backend == DNN_BACKEND_CUDA)
465         applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); // not supported
466
467     testONNXModels("matmul_2d");
468     testONNXModels("matmul_3d");
469     testONNXModels("matmul_4d");
470 }
471
472 TEST_P(Test_ONNX_layers, MatMulAdd)
473 {
474     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
475         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
476     if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
477         applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
478     testONNXModels("matmul_add");
479 }
480
481 TEST_P(Test_ONNX_layers, Expand)
482 {
483     testONNXModels("expand_batch");
484     testONNXModels("expand_channels");
485     testONNXModels("expand_neg_batch");
486 }
487
488 TEST_P(Test_ONNX_layers, ExpandHW)
489 {
490     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
491         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
492     testONNXModels("expand_hw");
493 }
494
495 TEST_P(Test_ONNX_layers, Constant)
496 {
497 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
498     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD
499             && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
500        applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
501 #endif
502     testONNXModels("constant");
503 }
504
505 TEST_P(Test_ONNX_layers, Padding)
506 {
507 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
508     testONNXModels("padding", npy, 0, 0, false, false);
509 #else
510     testONNXModels("padding");
511 #endif
512 }
513
514 TEST_P(Test_ONNX_layers, Resize)
515 {
516     testONNXModels("resize_nearest");
517     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
518         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
519     testONNXModels("resize_bilinear");
520 }
521
522 TEST_P(Test_ONNX_layers, ResizeUnfused)
523 {
524     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
525         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
526     testONNXModels("upsample_unfused_torch1.2");
527     testONNXModels("upsample_unfused_opset9_torch1.4");
528     testONNXModels("resize_nearest_unfused_opset11_torch1.4");
529     testONNXModels("resize_nearest_unfused_opset11_torch1.3");
530     testONNXModels("resize_bilinear_unfused_opset11_torch1.4");
531 }
532
533 TEST_P(Test_ONNX_layers, ResizeUnfusedTwoInputs)
534 {
535     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
536         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
537     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
538         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
539     testONNXModels("upsample_unfused_two_inputs_opset9_torch1.4", npy, 0, 0, false, true, 2);
540     testONNXModels("upsample_unfused_two_inputs_opset11_torch1.4", npy, 0, 0, false, true, 2);
541 }
542
543 TEST_P(Test_ONNX_layers, MultyInputs)
544 {
545     testONNXModels("multy_inputs", npy, 0, 0, false, true, 2);
546 }
547
548 TEST_P(Test_ONNX_layers, Broadcast)
549 {
550     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
551         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
552     testONNXModels("channel_broadcast", npy, 0, 0, false, true, 2);
553 }
554
555 TEST_P(Test_ONNX_layers, DynamicResize)
556 {
557     testONNXModels("dynamic_resize_9", npy, 0, 0, false, true, 2);
558     testONNXModels("dynamic_resize_10", npy, 0, 0, false, true, 2);
559     testONNXModels("dynamic_resize_11", npy, 0, 0, false, true, 2);
560     testONNXModels("dynamic_resize_scale_9", npy, 0, 0, false, true, 2);
561     testONNXModels("dynamic_resize_scale_10", npy, 0, 0, false, true, 2);
562     testONNXModels("dynamic_resize_scale_11", npy, 0, 0, false, true, 2);
563 }
564
565 TEST_P(Test_ONNX_layers, Div)
566 {
567     const String model =  _tf("models/div.onnx");
568     Net net = readNetFromONNX(model);
569     ASSERT_FALSE(net.empty());
570
571     net.setPreferableBackend(backend);
572     net.setPreferableTarget(target);
573
574     // Reference output values range is -68.80928, 2.991873. So to avoid computational
575     // difference for FP16 we'll perform reversed division (just swap inputs).
576     Mat inp1 = blobFromNPY(_tf("data/input_div_1.npy"));
577     Mat inp2 = blobFromNPY(_tf("data/input_div_0.npy"));
578     Mat ref  = blobFromNPY(_tf("data/output_div.npy"));
579     cv::divide(1.0, ref, ref);
580     checkBackend(&inp1, &ref);
581
582     net.setInput(inp1, "0");
583     net.setInput(inp2, "1");
584     Mat out = net.forward();
585
586     normAssert(ref, out, "", default_l1,  default_lInf);
587     expectNoFallbacksFromIE(net);
588     expectNoFallbacksFromCUDA(net);
589 }
590
591 TEST_P(Test_ONNX_layers, DynamicReshape)
592 {
593     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
594         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
595
596     testONNXModels("dynamic_reshape");
597     testONNXModels("dynamic_reshape_opset_11");
598     testONNXModels("flatten_by_prod");
599     testONNXModels("flatten_const");
600 }
601
602 TEST_P(Test_ONNX_layers, Reshape)
603 {
604     testONNXModels("unsqueeze");
605 }
606
607 TEST_P(Test_ONNX_layers, Squeeze)
608 {
609     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
610         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
611     testONNXModels("squeeze");
612 }
613
614 TEST_P(Test_ONNX_layers, ReduceL2)
615 {
616     testONNXModels("reduceL2");
617     testONNXModels("reduceL2_subgraph");
618     testONNXModels("reduceL2_subgraph_2");
619 }
620
621 TEST_P(Test_ONNX_layers, Split)
622 {
623     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
624         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
625     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
626         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
627     testONNXModels("split_1");
628     testONNXModels("split_2");
629     testONNXModels("split_3");
630     testONNXModels("split_4");
631 }
632
633 TEST_P(Test_ONNX_layers, Slice)
634 {
635 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
636     testONNXModels("slice", npy, 0, 0, false, false);
637 #else
638     testONNXModels("slice");
639     testONNXModels("slice_opset_11");
640 #endif
641 }
642
643 TEST_P(Test_ONNX_layers, Softmax)
644 {
645     testONNXModels("softmax");
646     testONNXModels("log_softmax", npy, 0, 0, false, false);
647     testONNXModels("softmax_unfused");
648 }
649
650 TEST_P(Test_ONNX_layers, Split_EltwiseMax)
651 {
652     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
653         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
654     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
655         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
656     testONNXModels("split_max");
657 }
658
659 TEST_P(Test_ONNX_layers, LSTM)
660 {
661     testONNXModels("lstm", npy, 0, 0, false, false);
662 }
663
664 TEST_P(Test_ONNX_layers, LSTM_bidirectional)
665 {
666     testONNXModels("lstm_bidirectional", npy, 0, 0, false, false);
667 }
668
669 TEST_P(Test_ONNX_layers, Pad2d_Unfused)
670 {
671     testONNXModels("ReflectionPad2d");
672     testONNXModels("ZeroPad2d");
673 }
674
675 TEST_P(Test_ONNX_layers, LinearWithConstant)
676 {
677     if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
678         applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
679 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2020040000)
680     applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
681 #endif
682     if (backend == DNN_BACKEND_CUDA)
683         applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA);
684     testONNXModels("lin_with_constant");
685 }
686
687 TEST_P(Test_ONNX_layers, MatmulWithTwoInputs)
688 {
689     if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
690         applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
691 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2020040000)
692     applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
693 #endif
694     if (backend == DNN_BACKEND_CUDA)
695         applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA);
696     testONNXModels("matmul_with_two_inputs");
697 }
698
699 TEST_P(Test_ONNX_layers, ResizeOpset11_Torch1_6)
700 {
701     testONNXModels("resize_opset11_torch1.6");
702 }
703
704 TEST_P(Test_ONNX_layers, Mish)
705 {
706     testONNXModels("mish");
707 }
708
709 TEST_P(Test_ONNX_layers, CalculatePads)
710 {
711     testONNXModels("calc_pads");
712 }
713
714 TEST_P(Test_ONNX_layers, Conv1d)
715 {
716     testONNXModels("conv1d");
717 }
718
719 TEST_P(Test_ONNX_layers, Conv1d_bias)
720 {
721     testONNXModels("conv1d_bias");
722 }
723
724 TEST_P(Test_ONNX_layers, Conv1d_variable_weight)
725 {
726     if (backend == DNN_BACKEND_CUDA)
727         applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); // not supported
728     if (backend == DNN_BACKEND_VKCOM)
729         applyTestTag(CV_TEST_TAG_DNN_SKIP_VULKAN); // not supported
730     String basename = "conv1d_variable_w";
731     Net net = readNetFromONNX(_tf("models/" + basename + ".onnx"));
732     ASSERT_FALSE(net.empty());
733
734     net.setPreferableBackend(backend);
735     net.setPreferableTarget(target);
736
737     Mat input = blobFromNPY(_tf("data/input_" + basename + "_0.npy"));
738     Mat weights = blobFromNPY(_tf("data/input_" + basename + "_1.npy"));
739     Mat ref = blobFromNPY(_tf("data/output_" + basename + ".npy"));
740
741     net.setInput(input, "0");
742     net.setInput(weights, "1");
743
744     Mat out = net.forward();
745     normAssert(ref, out, "", default_l1, default_lInf);
746 }
747
748 TEST_P(Test_ONNX_layers, Conv1d_variable_weight_bias)
749 {
750     if (backend == DNN_BACKEND_CUDA)
751         applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); // not supported
752     if (backend == DNN_BACKEND_VKCOM)
753         applyTestTag(CV_TEST_TAG_DNN_SKIP_VULKAN); // not supported
754     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
755     {
756         if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
757     }
758     String basename = "conv1d_variable_wb";
759     Net net = readNetFromONNX(_tf("models/" + basename + ".onnx"));
760     ASSERT_FALSE(net.empty());
761
762     net.setPreferableBackend(backend);
763     net.setPreferableTarget(target);
764
765     Mat input = blobFromNPY(_tf("data/input_" + basename + "_0.npy"));
766     Mat weights = blobFromNPY(_tf("data/input_" + basename + "_1.npy"));
767     Mat bias = blobFromNPY(_tf("data/input_" + basename + "_2.npy"));
768     Mat ref = blobFromNPY(_tf("data/output_" + basename + ".npy"));
769
770     net.setInput(input, "0");
771     net.setInput(weights, "1");
772     net.setInput(bias, "bias");
773
774     Mat out = net.forward();
775     normAssert(ref, out, "", default_l1, default_lInf);
776 }
777
778 TEST_P(Test_ONNX_layers, GatherMultiOutput)
779 {
780 #if defined(INF_ENGINE_RELEASE)
781     if (target == DNN_TARGET_MYRIAD)
782         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE);
783 #endif
784
785     testONNXModels("gather_multi_output");
786 }
787
788 TEST_P(Test_ONNX_layers, DynamicAxes)
789 {
790     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
791     {
792         if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
793     }
794     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
795     {
796         if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
797     }
798     testONNXModels("squeeze_and_conv_dynamic_axes");
799     testONNXModels("unsqueeze_and_conv_dynamic_axes");
800     testONNXModels("gather_dynamic_axes");
801     testONNXModels("gather_scalar_dynamic_axes");
802     testONNXModels("slice_dynamic_axes");
803     testONNXModels("slice_opset_11_dynamic_axes");
804     testONNXModels("resize_opset11_torch1.6_dynamic_axes");
805     testONNXModels("average_pooling_dynamic_axes");
806     testONNXModels("maxpooling_sigmoid_dynamic_axes");
807 }
808
809 TEST_P(Test_ONNX_layers, MaxPool1d)
810 {
811     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
812     {
813         if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
814     }
815     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
816     {
817         if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
818     }
819     testONNXModels("maxpooling_1d");
820 }
821
822 TEST_P(Test_ONNX_layers, MaxPoolSigmoid1d)
823 {
824     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
825     {
826         if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
827     }
828     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
829     {
830         if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
831     }
832     testONNXModels("maxpooling_sigmoid_1d");
833 }
834
835 TEST_P(Test_ONNX_layers, MaxPool1d_Twise)
836 {
837     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
838     {
839         if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
840     }
841     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
842     {
843         if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
844     }
845     testONNXModels("two_maxpooling_1d");
846 }
847
848 TEST_P(Test_ONNX_layers, AvePool1d)
849 {
850     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
851     {
852         if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
853     }
854     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
855     {
856         if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
857     }
858     testONNXModels("average_pooling_1d");
859 }
860
861 TEST_P(Test_ONNX_layers, PoolConv1d)
862 {
863     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
864     {
865         if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
866     }
867     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
868     {
869         if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
870     }
871     testONNXModels("pool_conv_1d");
872 }
873
874 TEST_P(Test_ONNX_layers, ConvResizePool1d)
875 {
876     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
877     {
878         if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
879     }
880     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
881     {
882         if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
883     }
884     testONNXModels("conv_resize_pool_1d");
885 }
886
887 INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_ONNX_layers, dnnBackendsAndTargets());
888
889 class Test_ONNX_nets : public Test_ONNX_layers
890 {
891 public:
892     Test_ONNX_nets() { required = false; }
893 };
894
895 TEST_P(Test_ONNX_nets, Alexnet)
896 {
897 #if defined(OPENCV_32BIT_CONFIGURATION) && (defined(HAVE_OPENCL) || defined(_WIN32))
898     applyTestTag(CV_TEST_TAG_MEMORY_2GB);
899 #else
900     applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
901 #endif
902
903     const String model =  _tf("models/alexnet.onnx", false);
904
905     Net net = readNetFromONNX(model);
906     ASSERT_FALSE(net.empty());
907
908     net.setPreferableBackend(backend);
909     net.setPreferableTarget(target);
910
911     Mat inp = imread(_tf("../grace_hopper_227.png"));
912     Mat ref = blobFromNPY(_tf("../caffe_alexnet_prob.npy"));
913     checkBackend(&inp, &ref);
914
915     net.setInput(blobFromImage(inp, 1.0f, Size(227, 227), Scalar(), false));
916     ASSERT_FALSE(net.empty());
917     Mat out = net.forward();
918
919     normAssert(out, ref, "", default_l1,  default_lInf);
920     expectNoFallbacksFromIE(net);
921 }
922
923 TEST_P(Test_ONNX_nets, Squeezenet)
924 {
925     testONNXModels("squeezenet", pb);
926 }
927
928 TEST_P(Test_ONNX_nets, Googlenet)
929 {
930     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
931         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
932
933     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
934         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
935
936     const String model = _tf("models/googlenet.onnx", false);
937
938     Net net = readNetFromONNX(model);
939     ASSERT_FALSE(net.empty());
940
941     net.setPreferableBackend(backend);
942     net.setPreferableTarget(target);
943
944     std::vector<Mat> images;
945     images.push_back( imread(_tf("../googlenet_0.png")) );
946     images.push_back( imread(_tf("../googlenet_1.png")) );
947     Mat inp = blobFromImages(images, 1.0f, Size(), Scalar(), false);
948     Mat ref = blobFromNPY(_tf("../googlenet_prob.npy"));
949     checkBackend(&inp, &ref);
950
951     net.setInput(inp);
952     ASSERT_FALSE(net.empty());
953     Mat out = net.forward();
954
955     normAssert(ref, out, "", default_l1,  default_lInf);
956     expectNoFallbacksFromIE(net);
957 }
958
959 TEST_P(Test_ONNX_nets, CaffeNet)
960 {
961 #if defined(OPENCV_32BIT_CONFIGURATION) && (defined(HAVE_OPENCL) || defined(_WIN32))
962     applyTestTag(CV_TEST_TAG_MEMORY_2GB);
963 #else
964     applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
965 #endif
966
967 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019030000)
968     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD
969         && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
970         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
971 #endif
972     testONNXModels("caffenet", pb);
973 }
974
975 TEST_P(Test_ONNX_nets, RCNN_ILSVRC13)
976 {
977 #if defined(OPENCV_32BIT_CONFIGURATION) && (defined(HAVE_OPENCL) || defined(_WIN32))
978     applyTestTag(CV_TEST_TAG_MEMORY_2GB);
979 #else
980     applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
981 #endif
982
983 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019030000)
984     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD
985         && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
986         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
987 #endif
988     // Reference output values are in range [-4.992, -1.161]
989     testONNXModels("rcnn_ilsvrc13", pb, 0.0046);
990 }
991
992 TEST_P(Test_ONNX_nets, VGG16_bn)
993 {
994     applyTestTag(CV_TEST_TAG_MEMORY_6GB);  // > 2.3Gb
995
996     // output range: [-16; 27], after Softmax [0; 0.67]
997     const double lInf = (target == DNN_TARGET_MYRIAD) ? 0.038 : default_lInf;
998     testONNXModels("vgg16-bn", pb, default_l1, lInf, true);
999 }
1000
1001 TEST_P(Test_ONNX_nets, ZFNet)
1002 {
1003     applyTestTag(CV_TEST_TAG_MEMORY_2GB);
1004     testONNXModels("zfnet512", pb);
1005 }
1006
1007 TEST_P(Test_ONNX_nets, ResNet18v1)
1008 {
1009     applyTestTag(CV_TEST_TAG_MEMORY_512MB);
1010
1011     // output range: [-16; 22], after Softmax [0, 0.51]
1012     testONNXModels("resnet18v1", pb, default_l1, default_lInf, true, target != DNN_TARGET_MYRIAD);
1013 }
1014
1015 TEST_P(Test_ONNX_nets, ResNet50v1)
1016 {
1017     applyTestTag(CV_TEST_TAG_MEMORY_512MB);
1018
1019     // output range: [-67; 75], after Softmax [0, 0.98]
1020     testONNXModels("resnet50v1", pb, default_l1, default_lInf, true, target != DNN_TARGET_MYRIAD);
1021 }
1022
1023 TEST_P(Test_ONNX_nets, ResNet101_DUC_HDC)
1024 {
1025     applyTestTag(CV_TEST_TAG_VERYLONG);
1026
1027 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
1028     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
1029         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
1030 #endif
1031 #if defined(INF_ENGINE_RELEASE)
1032     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
1033         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
1034 #endif
1035     if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_OPENCL)
1036     {
1037         if (backend == DNN_BACKEND_OPENCV)
1038             applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_OPENCL : CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
1039         throw SkipTestException("Test is disabled for OpenCL targets");
1040     }
1041     testONNXModels("resnet101_duc_hdc", pb);
1042 }
1043
1044 TEST_P(Test_ONNX_nets, TinyYolov2)
1045 {
1046     applyTestTag(CV_TEST_TAG_MEMORY_512MB);
1047
1048     if (cvtest::skipUnstableTests)
1049         throw SkipTestException("Skip unstable test");
1050 #if defined(INF_ENGINE_RELEASE)
1051     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019
1052             && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)
1053     )
1054         applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
1055
1056     if (target == DNN_TARGET_MYRIAD && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
1057     )
1058         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X,
1059                      backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ?
1060                      CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER :
1061                      CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
1062 #endif
1063
1064     // output range: [-11; 8]
1065     double l1 =  default_l1, lInf = default_lInf;
1066     if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
1067     {
1068         l1 = 0.017;
1069         lInf = 0.14;
1070     }
1071     else if (target == DNN_TARGET_CUDA_FP16)
1072     {
1073         l1 = 0.018;
1074         lInf = 0.16;
1075     }
1076 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2020040000)
1077     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
1078     {
1079         l1 = 0.018f; lInf = 0.16f;
1080     }
1081 #endif
1082
1083     testONNXModels("tiny_yolo2", pb, l1, lInf);
1084 }
1085
1086 TEST_P(Test_ONNX_nets, CNN_MNIST)
1087 {
1088     // output range: [-1952; 6574], after Softmax [0; 1]
1089     testONNXModels("cnn_mnist", pb, default_l1, default_lInf, true);
1090 }
1091
1092 TEST_P(Test_ONNX_nets, MobileNet_v2)
1093 {
1094     // output range: [-166; 317], after Softmax [0; 1]
1095     testONNXModels("mobilenetv2", pb, default_l1, default_lInf, true);
1096 }
1097
1098 TEST_P(Test_ONNX_nets, LResNet100E_IR)
1099 {
1100     applyTestTag(
1101 #if defined(OPENCV_32BIT_CONFIGURATION) && defined(HAVE_OPENCL)
1102         CV_TEST_TAG_MEMORY_2GB,
1103 #else
1104         (target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB),
1105 #endif
1106         CV_TEST_TAG_DEBUG_LONG
1107     );
1108     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
1109     {
1110         if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
1111         if (target == DNN_TARGET_OPENCL)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
1112         if (target == DNN_TARGET_MYRIAD)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
1113     }
1114     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
1115     {
1116         if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
1117         if (target == DNN_TARGET_OPENCL)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
1118         if (target == DNN_TARGET_MYRIAD)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
1119     }
1120
1121     double l1 = default_l1, lInf = default_lInf;
1122     // output range: [-3; 3]
1123     if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
1124     {
1125         l1 = 0.009;
1126         lInf = 0.035;
1127     }
1128     else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_CPU)
1129     {
1130         l1 = 4.6e-5;
1131         lInf = 1.9e-4;
1132     }
1133     else if (target == DNN_TARGET_CUDA_FP16)
1134     {
1135         l1 = 0.008;
1136         lInf = 0.04;
1137     }
1138     testONNXModels("LResNet100E_IR", pb, l1, lInf);
1139 }
1140
1141 TEST_P(Test_ONNX_nets, Emotion_ferplus)
1142 {
1143 #if defined(INF_ENGINE_RELEASE)
1144     if (target == DNN_TARGET_MYRIAD && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
1145         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X,
1146                      backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ?
1147                      CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER :
1148                      CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
1149 #endif
1150
1151     double l1 = default_l1;
1152     double lInf = default_lInf;
1153
1154     // Output values are in range [-2.011, 2.111]
1155     if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
1156         l1 = 0.007;
1157     else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_OPENCL_FP16)
1158     {
1159         l1 = 0.021;
1160         lInf = 0.034;
1161     }
1162     else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && (target == DNN_TARGET_CPU || target == DNN_TARGET_OPENCL)) {
1163         l1 = 2.4e-4;
1164         lInf = 6e-4;
1165     }
1166 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2020040000)
1167     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
1168     {
1169         l1 = 0.012f; lInf = 0.035f;
1170     }
1171 #endif
1172
1173     testONNXModels("emotion_ferplus", pb, l1, lInf);
1174 }
1175
1176 TEST_P(Test_ONNX_nets, Inception_v2)
1177 {
1178     testONNXModels("inception_v2", pb, default_l1, default_lInf, true);
1179 }
1180
1181 TEST_P(Test_ONNX_nets, DenseNet121)
1182 {
1183     applyTestTag(CV_TEST_TAG_MEMORY_512MB);
1184
1185     // output range: [-87; 138], after Softmax [0; 1]
1186     testONNXModels("densenet121", pb, default_l1, default_lInf, true, target != DNN_TARGET_MYRIAD);
1187 }
1188
1189 TEST_P(Test_ONNX_nets, Inception_v1)
1190 {
1191 #if defined(INF_ENGINE_RELEASE)
1192     if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
1193          backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && target == DNN_TARGET_MYRIAD)
1194         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
1195 #endif
1196     testONNXModels("inception_v1", pb);
1197 }
1198
1199 TEST_P(Test_ONNX_nets, Shufflenet)
1200 {
1201     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
1202     {
1203         if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
1204         if (target == DNN_TARGET_OPENCL)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
1205         if (target == DNN_TARGET_MYRIAD)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
1206     }
1207     testONNXModels("shufflenet", pb);
1208 }
1209
1210 TEST_P(Test_ONNX_nets, Resnet34_kinetics)
1211 {
1212 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
1213     applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
1214 #endif
1215     if (backend == DNN_BACKEND_CUDA)
1216     {
1217         // ok
1218     }
1219     else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
1220         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);  // Only CPU on DLIE backend is supported
1221     else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
1222         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);  // Only CPU on DLIE backend is supported
1223     else if (target != DNN_TARGET_CPU)
1224         throw SkipTestException("Only CPU is supported");
1225
1226     String onnxmodel = findDataFile("dnn/resnet-34_kinetics.onnx", false);
1227     Mat image0 = imread(findDataFile("dnn/dog416.png"));
1228     Mat image1 = imread(findDataFile("dnn/street.png"));
1229
1230     Mat ref0 = blobFromNPY(_tf("data/output_kinetics0.npy"));
1231     Mat ref1 = blobFromNPY(_tf("data/output_kinetics1.npy"));
1232
1233     std::vector<Mat> images_0(16, image0);
1234     std::vector<Mat> images_1(16, image1);
1235     Mat blob0 = blobFromImages(images_0, 1.0, Size(112, 112), Scalar(114.7748, 107.7354, 99.4750), true, true);
1236     Mat blob1 = blobFromImages(images_1, 1.0, Size(112, 112), Scalar(114.7748, 107.7354, 99.4750), true, true);
1237
1238     Net permute;
1239     LayerParams lp;
1240     int order[] = {1, 0, 2, 3};
1241     lp.set("order", DictValue::arrayInt<int*>(&order[0], 4));
1242     permute.addLayerToPrev("perm", "Permute", lp);
1243
1244     permute.setPreferableBackend(backend);
1245     permute.setPreferableTarget(target);
1246
1247     permute.setInput(blob0);
1248     Mat input0 = permute.forward().clone();
1249
1250     permute.setInput(blob1);
1251     Mat input1 = permute.forward().clone();
1252
1253     int dims[] = {1, 3, 16, 112, 112};
1254     input0 = input0.reshape(0, 5, &dims[0]);
1255     input1 = input1.reshape(0, 5, &dims[0]);
1256
1257     Net net = readNetFromONNX(onnxmodel);
1258     ASSERT_FALSE(net.empty());
1259     net.setPreferableBackend(backend);
1260     net.setPreferableTarget(target);
1261
1262     // output range [-5, 11]
1263     float l1 = 0.0013, lInf = 0.009;
1264     if (target == DNN_TARGET_CUDA_FP16)
1265     {
1266         l1 = 0.01;
1267         lInf = 0.06;
1268     }
1269
1270     checkBackend(&input0, &ref0);
1271     net.setInput(input0);
1272     Mat out = net.forward().clone();
1273     normAssert(ref0, out, "", l1, lInf);
1274
1275     checkBackend(&input1, &ref1);
1276     net.setInput(input1);
1277     out = net.forward().clone();
1278     normAssert(ref1, out, "", l1, lInf);
1279
1280     expectNoFallbacksFromIE(net);
1281 }
1282
1283 INSTANTIATE_TEST_CASE_P(/**/, Test_ONNX_nets, dnnBackendsAndTargets());
1284
1285 }} // namespace