Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
[platform/upstream/opencv.git] / modules / dnn / test / test_onnx_importer.cpp
1 // This file is part of OpenCV project.
2 // It is subject to the license terms in the LICENSE file found in the top-level directory
3 // of this distribution and at http://opencv.org/license.html.
4
5 // Copyright (C) 2018-2019, Intel Corporation, all rights reserved.
6 // Third party copyrights are property of their respective owners.
7
8
9 #include "test_precomp.hpp"
10 #include "npy_blob.hpp"
11 #include <opencv2/dnn/shape_utils.hpp>
12 namespace opencv_test { namespace {
13
14 template<typename TString>
15 static std::string _tf(TString filename, bool required = true)
16 {
17     return findDataFile(std::string("dnn/onnx/") + filename, required);
18 }
19
20 class Test_ONNX_layers : public DNNTestLayer
21 {
22 public:
23     bool required;
24
25     Test_ONNX_layers() : required(true) { }
26
27     enum Extension
28     {
29         npy,
30         pb
31     };
32
33     void testONNXModels(const String& basename, const Extension ext = npy,
34                         const double l1 = 0, const float lInf = 0, const bool useSoftmax = false,
35                         bool checkNoFallbacks = true)
36     {
37         String onnxmodel = _tf("models/" + basename + ".onnx", required);
38         Mat inp, ref;
39         if (ext == npy) {
40             inp = blobFromNPY(_tf("data/input_" + basename + ".npy"));
41             ref = blobFromNPY(_tf("data/output_" + basename + ".npy"));
42         }
43         else if (ext == pb) {
44             inp = readTensorFromONNX(_tf("data/input_" + basename + ".pb"));
45             ref = readTensorFromONNX(_tf("data/output_" + basename + ".pb"));
46         }
47         else
48             CV_Error(Error::StsUnsupportedFormat, "Unsupported extension");
49
50         checkBackend(&inp, &ref);
51         Net net = readNetFromONNX(onnxmodel);
52         ASSERT_FALSE(net.empty());
53
54         net.setPreferableBackend(backend);
55         net.setPreferableTarget(target);
56
57         net.setInput(inp);
58         Mat out = net.forward("");
59
60         if (useSoftmax)
61         {
62             LayerParams lp;
63             Net netSoftmax;
64             netSoftmax.addLayerToPrev("softmaxLayer", "SoftMax", lp);
65             netSoftmax.setPreferableBackend(DNN_BACKEND_OPENCV);
66
67             netSoftmax.setInput(out);
68             out = netSoftmax.forward();
69
70             netSoftmax.setInput(ref);
71             ref = netSoftmax.forward();
72         }
73         normAssert(ref, out, "", l1 ? l1 : default_l1, lInf ? lInf : default_lInf);
74         if (checkNoFallbacks)
75             expectNoFallbacksFromIE(net);
76     }
77 };
78
79 TEST_P(Test_ONNX_layers, InstanceNorm)
80 {
81     if(backend == DNN_BACKEND_CUDA)
82         applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); /* MVN is not supported */
83
84     if (target == DNN_TARGET_MYRIAD)
85         testONNXModels("instancenorm", npy, 0, 0, false, false);
86     else
87         testONNXModels("instancenorm", npy);
88 }
89
90 TEST_P(Test_ONNX_layers, MaxPooling)
91 {
92     testONNXModels("maxpooling", npy, 0, 0, false, false);
93     testONNXModels("two_maxpooling", npy, 0, 0, false, false);
94 }
95
96 TEST_P(Test_ONNX_layers, Convolution)
97 {
98     testONNXModels("convolution");
99 }
100
101 TEST_P(Test_ONNX_layers, Convolution3D)
102 {
103 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
104     if(backend == DNN_BACKEND_INFERENCE_ENGINE)
105         throw SkipTestException("Test is enabled starts from 2019R1");
106 #endif
107     if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
108         throw SkipTestException("Only CPU and CUDA is supported");
109     testONNXModels("conv3d");
110     testONNXModels("conv3d_bias");
111 }
112
113 TEST_P(Test_ONNX_layers, Two_convolution)
114 {
115 #if defined(INF_ENGINE_RELEASE)
116     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
117         && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
118     )
119         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
120 #endif
121     // Reference output values are in range [-0.855, 0.611]
122     testONNXModels("two_convolution");
123 }
124
125 TEST_P(Test_ONNX_layers, Deconvolution)
126 {
127     testONNXModels("deconvolution", npy, 0, 0, false, false);
128     testONNXModels("two_deconvolution", npy, 0, 0, false, false);
129     testONNXModels("deconvolution_group", npy, 0, 0, false, false);
130     testONNXModels("deconvolution_output_shape", npy, 0, 0, false, false);
131     testONNXModels("deconv_adjpad_2d", npy, 0, 0, false, false);
132 }
133
134 TEST_P(Test_ONNX_layers, Deconvolution3D)
135 {
136 #if defined(INF_ENGINE_RELEASE)
137     applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_2018R5);
138 #endif
139     if ((backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_CPU) && backend != DNN_BACKEND_CUDA)
140         throw SkipTestException("Only DLIE backend on CPU, and CUDA is supported");
141     testONNXModels("deconv3d");
142     testONNXModels("deconv3d_bias");
143     testONNXModels("deconv3d_pad");
144     testONNXModels("deconv3d_adjpad");
145 }
146
147 TEST_P(Test_ONNX_layers, Dropout)
148 {
149     testONNXModels("dropout");
150 }
151
152 TEST_P(Test_ONNX_layers, Linear)
153 {
154     if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
155         applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
156     testONNXModels("linear");
157 }
158
159 TEST_P(Test_ONNX_layers, ReLU)
160 {
161     testONNXModels("ReLU");
162 }
163
164 TEST_P(Test_ONNX_layers, Clip)
165 {
166     testONNXModels("clip", npy);
167 }
168
169 TEST_P(Test_ONNX_layers, ReduceMean)
170 {
171     testONNXModels("reduce_mean");
172 }
173
174 TEST_P(Test_ONNX_layers, ReduceMean3D)
175 {
176     if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
177         throw SkipTestException("Only CPU and CUDA is supported");
178     testONNXModels("reduce_mean3d");
179 }
180
181 TEST_P(Test_ONNX_layers, MaxPooling_Sigmoid)
182 {
183     testONNXModels("maxpooling_sigmoid");
184 }
185
186 TEST_P(Test_ONNX_layers, Concatenation)
187 {
188     if (backend == DNN_BACKEND_INFERENCE_ENGINE)
189     {
190         if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
191         if (target == DNN_TARGET_OPENCL)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
192         if (target == DNN_TARGET_MYRIAD)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
193     }
194     testONNXModels("concatenation");
195 }
196
197 TEST_P(Test_ONNX_layers, Eltwise3D)
198 {
199 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
200     throw SkipTestException("Test is enabled starts from 2019R1");
201 #endif
202     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_CPU)
203         throw SkipTestException("Only CPU on DLIE backend is supported");
204     testONNXModels("eltwise3d");
205 }
206
207 TEST_P(Test_ONNX_layers, AveragePooling)
208 {
209     testONNXModels("average_pooling");
210 }
211
212 TEST_P(Test_ONNX_layers, MaxPooling3D)
213 {
214 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
215     throw SkipTestException("Test is enabled starts from 2019R1");
216 #endif
217     if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
218         throw SkipTestException("Only CPU and CUDA is supported");
219     testONNXModels("max_pool3d", npy, 0, 0, false, false);
220 }
221
222 TEST_P(Test_ONNX_layers, AvePooling3D)
223 {
224 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
225     throw SkipTestException("Test is enabled starts from 2019R1");
226 #endif
227     if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
228         throw SkipTestException("Only CPU and CUDA is supported");
229     testONNXModels("ave_pool3d");
230 }
231
232 TEST_P(Test_ONNX_layers, PoolConv3D)
233 {
234 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
235     throw SkipTestException("Test is enabled starts from 2019R1");
236 #endif
237     if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
238         throw SkipTestException("Only CPU and CUDA is supported");
239     testONNXModels("pool_conv_3d");
240 }
241
242 TEST_P(Test_ONNX_layers, BatchNormalization)
243 {
244     testONNXModels("batch_norm");
245 }
246
247 TEST_P(Test_ONNX_layers, BatchNormalization3D)
248 {
249     if (backend == DNN_BACKEND_INFERENCE_ENGINE)
250     {
251         if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
252         if (target == DNN_TARGET_OPENCL)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
253         if (target == DNN_TARGET_MYRIAD)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
254     }
255     testONNXModels("batch_norm_3d");
256 }
257
258 TEST_P(Test_ONNX_layers, Transpose)
259 {
260     if (backend == DNN_BACKEND_INFERENCE_ENGINE)
261     {
262         if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
263         if (target == DNN_TARGET_OPENCL)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
264         if (target == DNN_TARGET_MYRIAD)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
265     }
266     testONNXModels("transpose");
267 }
268
269 TEST_P(Test_ONNX_layers, Multiplication)
270 {
271     if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
272         applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
273     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
274         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
275     testONNXModels("mul");
276 }
277
278 TEST_P(Test_ONNX_layers, Constant)
279 {
280 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
281     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
282             && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
283        applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_2018R5);
284 #endif
285     testONNXModels("constant");
286 }
287
288 TEST_P(Test_ONNX_layers, Padding)
289 {
290 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
291     testONNXModels("padding", npy, 0, 0, false, false);
292 #else
293     testONNXModels("padding");
294 #endif
295 }
296
297 TEST_P(Test_ONNX_layers, Resize)
298 {
299     testONNXModels("resize_nearest");
300 }
301
302 TEST_P(Test_ONNX_layers, MultyInputs)
303 {
304     const String model =  _tf("models/multy_inputs.onnx");
305
306     Net net = readNetFromONNX(model);
307     ASSERT_FALSE(net.empty());
308
309     net.setPreferableBackend(backend);
310     net.setPreferableTarget(target);
311
312     Mat inp1 = blobFromNPY(_tf("data/input_multy_inputs_0.npy"));
313     Mat inp2 = blobFromNPY(_tf("data/input_multy_inputs_1.npy"));
314     Mat ref  = blobFromNPY(_tf("data/output_multy_inputs.npy"));
315     checkBackend(&inp1, &ref);
316
317     net.setInput(inp1, "0");
318     net.setInput(inp2, "1");
319     Mat out = net.forward();
320
321     normAssert(ref, out, "", default_l1,  default_lInf);
322     expectNoFallbacksFromIE(net);
323 }
324
325 TEST_P(Test_ONNX_layers, DynamicReshape)
326 {
327     if (backend == DNN_BACKEND_INFERENCE_ENGINE)
328     {
329         if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
330         if (target == DNN_TARGET_OPENCL)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
331     }
332     testONNXModels("dynamic_reshape");
333 }
334
335 TEST_P(Test_ONNX_layers, Reshape)
336 {
337     testONNXModels("unsqueeze");
338 }
339
340 TEST_P(Test_ONNX_layers, Slice)
341 {
342 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
343     testONNXModels("slice", npy, 0, 0, false, false);
344 #else
345     testONNXModels("slice");
346 #endif
347 }
348
349 TEST_P(Test_ONNX_layers, Softmax)
350 {
351     testONNXModels("softmax");
352     testONNXModels("log_softmax", npy, 0, 0, false, false);
353 }
354
355 TEST_P(Test_ONNX_layers, Split_EltwiseMax)
356 {
357     if (backend == DNN_BACKEND_INFERENCE_ENGINE)
358         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
359     testONNXModels("split_max");
360 }
361
362 INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_ONNX_layers, dnnBackendsAndTargets());
363
364 class Test_ONNX_nets : public Test_ONNX_layers
365 {
366 public:
367     Test_ONNX_nets() { required = false; }
368 };
369
370 TEST_P(Test_ONNX_nets, Alexnet)
371 {
372     applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
373     const String model =  _tf("models/alexnet.onnx", false);
374
375     Net net = readNetFromONNX(model);
376     ASSERT_FALSE(net.empty());
377
378     net.setPreferableBackend(backend);
379     net.setPreferableTarget(target);
380
381     Mat inp = imread(_tf("../grace_hopper_227.png"));
382     Mat ref = blobFromNPY(_tf("../caffe_alexnet_prob.npy"));
383     checkBackend(&inp, &ref);
384
385     net.setInput(blobFromImage(inp, 1.0f, Size(227, 227), Scalar(), false));
386     ASSERT_FALSE(net.empty());
387     Mat out = net.forward();
388
389     normAssert(out, ref, "", default_l1,  default_lInf);
390     expectNoFallbacksFromIE(net);
391 }
392
393 TEST_P(Test_ONNX_nets, Squeezenet)
394 {
395     testONNXModels("squeezenet", pb);
396 }
397
398 TEST_P(Test_ONNX_nets, Googlenet)
399 {
400     if (backend == DNN_BACKEND_INFERENCE_ENGINE)
401         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
402
403     const String model = _tf("models/googlenet.onnx", false);
404
405     Net net = readNetFromONNX(model);
406     ASSERT_FALSE(net.empty());
407
408     net.setPreferableBackend(backend);
409     net.setPreferableTarget(target);
410
411     std::vector<Mat> images;
412     images.push_back( imread(_tf("../googlenet_0.png")) );
413     images.push_back( imread(_tf("../googlenet_1.png")) );
414     Mat inp = blobFromImages(images, 1.0f, Size(), Scalar(), false);
415     Mat ref = blobFromNPY(_tf("../googlenet_prob.npy"));
416     checkBackend(&inp, &ref);
417
418     net.setInput(inp);
419     ASSERT_FALSE(net.empty());
420     Mat out = net.forward();
421
422     normAssert(ref, out, "", default_l1,  default_lInf);
423     expectNoFallbacksFromIE(net);
424 }
425
426 TEST_P(Test_ONNX_nets, CaffeNet)
427 {
428     applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
429 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019030000)
430     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
431         && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
432         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_2019R3);
433 #endif
434     testONNXModels("caffenet", pb);
435 }
436
437 TEST_P(Test_ONNX_nets, RCNN_ILSVRC13)
438 {
439     applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
440 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019030000)
441     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
442         && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
443         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_2019R3);
444 #endif
445     // Reference output values are in range [-4.992, -1.161]
446     testONNXModels("rcnn_ilsvrc13", pb, 0.0045);
447 }
448
449 TEST_P(Test_ONNX_nets, VGG16_bn)
450 {
451     applyTestTag(CV_TEST_TAG_MEMORY_6GB);  // > 2.3Gb
452
453     // output range: [-16; 27], after Softmax [0; 0.67]
454     const double lInf = (target == DNN_TARGET_MYRIAD) ? 0.038 : default_lInf;
455     testONNXModels("vgg16-bn", pb, default_l1, lInf, true);
456 }
457
458 TEST_P(Test_ONNX_nets, ZFNet)
459 {
460     applyTestTag(CV_TEST_TAG_MEMORY_2GB);
461     testONNXModels("zfnet512", pb);
462 }
463
464 TEST_P(Test_ONNX_nets, ResNet18v1)
465 {
466     applyTestTag(CV_TEST_TAG_MEMORY_512MB);
467
468     // output range: [-16; 22], after Softmax [0, 0.51]
469     testONNXModels("resnet18v1", pb, default_l1, default_lInf, true, target != DNN_TARGET_MYRIAD);
470 }
471
472 TEST_P(Test_ONNX_nets, ResNet50v1)
473 {
474     applyTestTag(CV_TEST_TAG_MEMORY_512MB);
475
476     // output range: [-67; 75], after Softmax [0, 0.98]
477     testONNXModels("resnet50v1", pb, default_l1, default_lInf, true, target != DNN_TARGET_MYRIAD);
478 }
479
480 TEST_P(Test_ONNX_nets, ResNet101_DUC_HDC)
481 {
482     applyTestTag(CV_TEST_TAG_VERYLONG);
483
484 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
485     if (backend == DNN_BACKEND_INFERENCE_ENGINE)
486         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2019R1, CV_TEST_TAG_DNN_SKIP_IE_2019R1_1);
487 #endif
488 #if defined(INF_ENGINE_RELEASE)
489     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
490         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
491 #endif
492     if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_OPENCL)
493     {
494         if (backend == DNN_BACKEND_OPENCV)
495             applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_OPENCL : CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
496         throw SkipTestException("Test is disabled for OpenCL targets");
497     }
498     testONNXModels("resnet101_duc_hdc", pb);
499 }
500
501 TEST_P(Test_ONNX_nets, TinyYolov2)
502 {
503     applyTestTag(CV_TEST_TAG_MEMORY_512MB);
504
505     if (cvtest::skipUnstableTests)
506         throw SkipTestException("Skip unstable test");
507 #if defined(INF_ENGINE_RELEASE)
508     if (backend == DNN_BACKEND_INFERENCE_ENGINE
509             && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)
510     )
511         applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
512
513     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
514             && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
515     )
516         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
517 #endif
518
519     // output range: [-11; 8]
520     double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.017 : default_l1;
521     double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.14 : default_lInf;
522     testONNXModels("tiny_yolo2", pb, l1, lInf);
523 }
524
525 TEST_P(Test_ONNX_nets, CNN_MNIST)
526 {
527     // output range: [-1952; 6574], after Softmax [0; 1]
528     testONNXModels("cnn_mnist", pb, default_l1, default_lInf, true);
529 }
530
531 TEST_P(Test_ONNX_nets, MobileNet_v2)
532 {
533     // output range: [-166; 317], after Softmax [0; 1]
534     testONNXModels("mobilenetv2", pb, default_l1, default_lInf, true);
535 }
536
537 TEST_P(Test_ONNX_nets, LResNet100E_IR)
538 {
539     applyTestTag(
540         (target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB),
541         CV_TEST_TAG_DEBUG_LONG
542     );
543     if (backend == DNN_BACKEND_INFERENCE_ENGINE)
544     {
545         if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
546         if (target == DNN_TARGET_OPENCL)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
547         if (target == DNN_TARGET_MYRIAD)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
548     }
549
550     double l1 = default_l1;
551     double lInf = default_lInf;
552     // output range: [-3; 3]
553     if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) {
554         l1 = 0.009;
555         lInf = 0.035;
556     }
557     else if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_CPU) {
558         l1 = 4.6e-5;
559         lInf = 1.9e-4;
560     }
561     testONNXModels("LResNet100E_IR", pb, l1, lInf);
562 }
563
564 TEST_P(Test_ONNX_nets, Emotion_ferplus)
565 {
566 #if defined(INF_ENGINE_RELEASE)
567     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
568             && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
569     )
570         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
571 #endif
572
573     double l1 = default_l1;
574     double lInf = default_lInf;
575
576     // Output values are in range [-2.011, 2.111]
577     if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
578         l1 = 0.007;
579     else if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
580     {
581         l1 = 0.021;
582         lInf = 0.034;
583     }
584     else if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_CPU || target == DNN_TARGET_OPENCL)) {
585         l1 = 2.4e-4;
586         lInf = 6e-4;
587     }
588     testONNXModels("emotion_ferplus", pb, l1, lInf);
589 }
590
591 TEST_P(Test_ONNX_nets, Inception_v2)
592 {
593     testONNXModels("inception_v2", pb, default_l1, default_lInf, true);
594 }
595
596 TEST_P(Test_ONNX_nets, DenseNet121)
597 {
598     applyTestTag(CV_TEST_TAG_MEMORY_512MB);
599
600     // output range: [-87; 138], after Softmax [0; 1]
601     testONNXModels("densenet121", pb, default_l1, default_lInf, true, target != DNN_TARGET_MYRIAD);
602 }
603
604 TEST_P(Test_ONNX_nets, Inception_v1)
605 {
606 #if defined(INF_ENGINE_RELEASE)
607     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
608         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
609 #endif
610     testONNXModels("inception_v1", pb);
611 }
612
613 TEST_P(Test_ONNX_nets, Shufflenet)
614 {
615     if (backend == DNN_BACKEND_INFERENCE_ENGINE)
616     {
617         if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
618         if (target == DNN_TARGET_OPENCL)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
619         if (target == DNN_TARGET_MYRIAD)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
620     }
621     testONNXModels("shufflenet", pb);
622 }
623
624 TEST_P(Test_ONNX_nets, Resnet34_kinetics)
625 {
626 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
627     throw SkipTestException("Test is enabled starts from 2019R1");
628 #endif
629     if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
630         throw SkipTestException("Only CPU and CUDA is supported");
631
632     String onnxmodel = findDataFile("dnn/resnet-34_kinetics.onnx", false);
633     Mat image0 = imread(findDataFile("dnn/dog416.png"));
634     Mat image1 = imread(findDataFile("dnn/street.png"));
635
636     Mat ref0 = blobFromNPY(_tf("data/output_kinetics0.npy"));
637     Mat ref1 = blobFromNPY(_tf("data/output_kinetics1.npy"));
638
639     std::vector<Mat> images_0(16, image0);
640     std::vector<Mat> images_1(16, image1);
641     Mat blob0 = blobFromImages(images_0, 1.0, Size(112, 112), Scalar(114.7748, 107.7354, 99.4750), true, true);
642     Mat blob1 = blobFromImages(images_1, 1.0, Size(112, 112), Scalar(114.7748, 107.7354, 99.4750), true, true);
643
644     Net permute;
645     LayerParams lp;
646     int order[] = {1, 0, 2, 3};
647     lp.set("order", DictValue::arrayInt<int*>(&order[0], 4));
648     permute.addLayerToPrev("perm", "Permute", lp);
649
650     permute.setInput(blob0);
651     Mat input0 = permute.forward().clone();
652
653     permute.setInput(blob1);
654     Mat input1 = permute.forward().clone();
655
656     int dims[] = {1, 3, 16, 112, 112};
657     input0 = input0.reshape(0, 5, &dims[0]);
658     input1 = input1.reshape(0, 5, &dims[0]);
659
660     Net net = readNetFromONNX(onnxmodel);
661     ASSERT_FALSE(net.empty());
662     net.setPreferableBackend(backend);
663     net.setPreferableTarget(target);
664
665     // output range [-5, 11]
666     float l1 = 0.0013;
667     float lInf = 0.009;
668
669     checkBackend(&input0, &ref0);
670     net.setInput(input0);
671     Mat out = net.forward().clone();
672     normAssert(ref0, out, "", l1, lInf);
673
674     checkBackend(&input1, &ref1);
675     net.setInput(input1);
676     out = net.forward().clone();
677     normAssert(ref1, out, "", l1, lInf);
678
679     expectNoFallbacksFromIE(net);
680 }
681
682 INSTANTIATE_TEST_CASE_P(/**/, Test_ONNX_nets, dnnBackendsAndTargets());
683
684 }} // namespace