f54ce77d59b08d00a3ba32eb158d4c4888801696
[platform/upstream/opencv.git] / modules / dnn / test / test_onnx_importer.cpp
1 // This file is part of OpenCV project.
2 // It is subject to the license terms in the LICENSE file found in the top-level directory
3 // of this distribution and at http://opencv.org/license.html.
4
5 // Copyright (C) 2018-2019, Intel Corporation, all rights reserved.
6 // Third party copyrights are property of their respective owners.
7
8
9 #include "test_precomp.hpp"
10 #include "npy_blob.hpp"
11 #include <opencv2/dnn/shape_utils.hpp>
12 namespace opencv_test { namespace {
13
14 template<typename TString>
15 static std::string _tf(TString filename, bool required = true)
16 {
17     return findDataFile(std::string("dnn/onnx/") + filename, required);
18 }
19
20 class Test_ONNX_layers : public DNNTestLayer
21 {
22 public:
23     bool required;
24
25     Test_ONNX_layers() : required(true) { }
26
27     enum Extension
28     {
29         npy,
30         pb
31     };
32
33     void testONNXModels(const String& basename, const Extension ext = npy,
34                         const double l1 = 0, const float lInf = 0, const bool useSoftmax = false,
35                         bool checkNoFallbacks = true)
36     {
37         String onnxmodel = _tf("models/" + basename + ".onnx", required);
38         Mat inp, ref;
39         if (ext == npy) {
40             inp = blobFromNPY(_tf("data/input_" + basename + ".npy"));
41             ref = blobFromNPY(_tf("data/output_" + basename + ".npy"));
42         }
43         else if (ext == pb) {
44             inp = readTensorFromONNX(_tf("data/input_" + basename + ".pb"));
45             ref = readTensorFromONNX(_tf("data/output_" + basename + ".pb"));
46         }
47         else
48             CV_Error(Error::StsUnsupportedFormat, "Unsupported extension");
49
50         checkBackend(&inp, &ref);
51         Net net = readNetFromONNX(onnxmodel);
52         ASSERT_FALSE(net.empty());
53
54         net.setPreferableBackend(backend);
55         net.setPreferableTarget(target);
56
57         net.setInput(inp);
58         Mat out = net.forward("");
59
60         if (useSoftmax)
61         {
62             LayerParams lp;
63             Net netSoftmax;
64             netSoftmax.addLayerToPrev("softmaxLayer", "SoftMax", lp);
65             netSoftmax.setPreferableBackend(DNN_BACKEND_OPENCV);
66
67             netSoftmax.setInput(out);
68             out = netSoftmax.forward();
69
70             netSoftmax.setInput(ref);
71             ref = netSoftmax.forward();
72         }
73         normAssert(ref, out, "", l1 ? l1 : default_l1, lInf ? lInf : default_lInf);
74         if (checkNoFallbacks)
75             expectNoFallbacksFromIE(net);
76     }
77 };
78
79 TEST_P(Test_ONNX_layers, InstanceNorm)
80 {
81     if (target == DNN_TARGET_MYRIAD)
82         testONNXModels("instancenorm", npy, 0, 0, false, false);
83     else
84         testONNXModels("instancenorm", npy);
85 }
86
87 TEST_P(Test_ONNX_layers, MaxPooling)
88 {
89     testONNXModels("maxpooling", npy, 0, 0, false, false);
90     testONNXModels("two_maxpooling", npy, 0, 0, false, false);
91 }
92
93 TEST_P(Test_ONNX_layers, Convolution)
94 {
95     testONNXModels("convolution");
96 }
97
98 TEST_P(Test_ONNX_layers, Convolution3D)
99 {
100 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
101     throw SkipTestException("Test is enabled starts from 2019R1");
102 #endif
103     if (target != DNN_TARGET_CPU)
104         throw SkipTestException("Only CPU is supported");
105     testONNXModels("conv3d");
106     testONNXModels("conv3d_bias");
107 }
108
109 TEST_P(Test_ONNX_layers, Two_convolution)
110 {
111 #if defined(INF_ENGINE_RELEASE)
112     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
113         && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
114     )
115         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
116 #endif
117     // Reference output values are in range [-0.855, 0.611]
118     testONNXModels("two_convolution");
119 }
120
121 TEST_P(Test_ONNX_layers, Deconvolution)
122 {
123     testONNXModels("deconvolution", npy, 0, 0, false, false);
124     testONNXModels("two_deconvolution", npy, 0, 0, false, false);
125     testONNXModels("deconvolution_group", npy, 0, 0, false, false);
126     testONNXModels("deconvolution_output_shape", npy, 0, 0, false, false);
127     testONNXModels("deconv_adjpad_2d", npy, 0, 0, false, false);
128 }
129
130 TEST_P(Test_ONNX_layers, Deconvolution3D)
131 {
132 #if defined(INF_ENGINE_RELEASE)
133     applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_2018R5);
134 #endif
135     if (backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_CPU)
136         throw SkipTestException("Only DLIE backend on CPU is supported");
137     testONNXModels("deconv3d");
138     testONNXModels("deconv3d_bias");
139     testONNXModels("deconv3d_pad");
140     testONNXModels("deconv3d_adjpad");
141 }
142
143 TEST_P(Test_ONNX_layers, Dropout)
144 {
145     testONNXModels("dropout");
146 }
147
148 TEST_P(Test_ONNX_layers, Linear)
149 {
150     if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
151         applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
152     testONNXModels("linear");
153 }
154
155 TEST_P(Test_ONNX_layers, ReLU)
156 {
157     testONNXModels("ReLU");
158 }
159
160 TEST_P(Test_ONNX_layers, Clip)
161 {
162     testONNXModels("clip", npy);
163 }
164
165 TEST_P(Test_ONNX_layers, ReduceMean)
166 {
167     testONNXModels("reduce_mean");
168 }
169
170 TEST_P(Test_ONNX_layers, ReduceMean3D)
171 {
172     if (target != DNN_TARGET_CPU)
173         throw SkipTestException("Only CPU is supported");
174     testONNXModels("reduce_mean3d");
175 }
176
177 TEST_P(Test_ONNX_layers, MaxPooling_Sigmoid)
178 {
179     testONNXModels("maxpooling_sigmoid");
180 }
181
182 TEST_P(Test_ONNX_layers, Concatenation)
183 {
184     if (backend == DNN_BACKEND_INFERENCE_ENGINE)
185     {
186         if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
187         if (target == DNN_TARGET_OPENCL)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
188         if (target == DNN_TARGET_MYRIAD)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
189     }
190     testONNXModels("concatenation");
191 }
192
193 TEST_P(Test_ONNX_layers, Eltwise3D)
194 {
195 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
196     throw SkipTestException("Test is enabled starts from 2019R1");
197 #endif
198     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_CPU)
199         throw SkipTestException("Only CPU on DLIE backend is supported");
200     testONNXModels("eltwise3d");
201 }
202
203 TEST_P(Test_ONNX_layers, AveragePooling)
204 {
205     testONNXModels("average_pooling");
206 }
207
208 TEST_P(Test_ONNX_layers, MaxPooling3D)
209 {
210 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
211     throw SkipTestException("Test is enabled starts from 2019R1");
212 #endif
213     if (target != DNN_TARGET_CPU)
214         throw SkipTestException("Only CPU is supported");
215     testONNXModels("max_pool3d", npy, 0, 0, false, false);
216 }
217
218 TEST_P(Test_ONNX_layers, AvePooling3D)
219 {
220 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
221     throw SkipTestException("Test is enabled starts from 2019R1");
222 #endif
223     if (target != DNN_TARGET_CPU)
224         throw SkipTestException("Only CPU is supported");
225     testONNXModels("ave_pool3d");
226 }
227
228 TEST_P(Test_ONNX_layers, PoolConv3D)
229 {
230 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
231     throw SkipTestException("Test is enabled starts from 2019R1");
232 #endif
233     if (target != DNN_TARGET_CPU)
234         throw SkipTestException("Only CPU is supported");
235     testONNXModels("pool_conv_3d");
236 }
237
238 TEST_P(Test_ONNX_layers, BatchNormalization)
239 {
240     testONNXModels("batch_norm");
241 }
242
243 TEST_P(Test_ONNX_layers, BatchNormalization3D)
244 {
245     if (backend == DNN_BACKEND_INFERENCE_ENGINE)
246     {
247         if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
248         if (target == DNN_TARGET_OPENCL)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
249         if (target == DNN_TARGET_MYRIAD)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
250     }
251     testONNXModels("batch_norm_3d");
252 }
253
254 TEST_P(Test_ONNX_layers, Transpose)
255 {
256     if (backend == DNN_BACKEND_INFERENCE_ENGINE)
257     {
258         if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
259         if (target == DNN_TARGET_OPENCL)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
260         if (target == DNN_TARGET_MYRIAD)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
261     }
262     testONNXModels("transpose");
263 }
264
265 TEST_P(Test_ONNX_layers, Multiplication)
266 {
267     if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
268         applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
269     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
270         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
271     testONNXModels("mul");
272 }
273
274 TEST_P(Test_ONNX_layers, Constant)
275 {
276 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
277     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
278             && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
279        applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_2018R5);
280 #endif
281     testONNXModels("constant");
282 }
283
284 TEST_P(Test_ONNX_layers, Padding)
285 {
286 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
287     testONNXModels("padding", npy, 0, 0, false, false);
288 #else
289     testONNXModels("padding");
290 #endif
291 }
292
293 TEST_P(Test_ONNX_layers, Resize)
294 {
295     testONNXModels("resize_nearest");
296 }
297
298 TEST_P(Test_ONNX_layers, MultyInputs)
299 {
300     const String model =  _tf("models/multy_inputs.onnx");
301
302     Net net = readNetFromONNX(model);
303     ASSERT_FALSE(net.empty());
304
305     net.setPreferableBackend(backend);
306     net.setPreferableTarget(target);
307
308     Mat inp1 = blobFromNPY(_tf("data/input_multy_inputs_0.npy"));
309     Mat inp2 = blobFromNPY(_tf("data/input_multy_inputs_1.npy"));
310     Mat ref  = blobFromNPY(_tf("data/output_multy_inputs.npy"));
311     checkBackend(&inp1, &ref);
312
313     net.setInput(inp1, "0");
314     net.setInput(inp2, "1");
315     Mat out = net.forward();
316
317     normAssert(ref, out, "", default_l1,  default_lInf);
318     expectNoFallbacksFromIE(net);
319 }
320
321 TEST_P(Test_ONNX_layers, DynamicReshape)
322 {
323     if (backend == DNN_BACKEND_INFERENCE_ENGINE)
324     {
325         if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
326         if (target == DNN_TARGET_OPENCL)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
327     }
328     testONNXModels("dynamic_reshape");
329 }
330
331 TEST_P(Test_ONNX_layers, Reshape)
332 {
333     testONNXModels("unsqueeze");
334 }
335
336 TEST_P(Test_ONNX_layers, Slice)
337 {
338 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
339     testONNXModels("slice", npy, 0, 0, false, false);
340 #else
341     testONNXModels("slice");
342 #endif
343 }
344
345 TEST_P(Test_ONNX_layers, Softmax)
346 {
347     testONNXModels("softmax");
348     testONNXModels("log_softmax", npy, 0, 0, false, false);
349 }
350
351 TEST_P(Test_ONNX_layers, Split_EltwiseMax)
352 {
353     if (backend == DNN_BACKEND_INFERENCE_ENGINE)
354         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
355     testONNXModels("split_max");
356 }
357
358 INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_ONNX_layers, dnnBackendsAndTargets());
359
360 class Test_ONNX_nets : public Test_ONNX_layers
361 {
362 public:
363     Test_ONNX_nets() { required = false; }
364 };
365
366 TEST_P(Test_ONNX_nets, Alexnet)
367 {
368     applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
369     const String model =  _tf("models/alexnet.onnx", false);
370
371     Net net = readNetFromONNX(model);
372     ASSERT_FALSE(net.empty());
373
374     net.setPreferableBackend(backend);
375     net.setPreferableTarget(target);
376
377     Mat inp = imread(_tf("../grace_hopper_227.png"));
378     Mat ref = blobFromNPY(_tf("../caffe_alexnet_prob.npy"));
379     checkBackend(&inp, &ref);
380
381     net.setInput(blobFromImage(inp, 1.0f, Size(227, 227), Scalar(), false));
382     ASSERT_FALSE(net.empty());
383     Mat out = net.forward();
384
385     normAssert(out, ref, "", default_l1,  default_lInf);
386     expectNoFallbacksFromIE(net);
387 }
388
389 TEST_P(Test_ONNX_nets, Squeezenet)
390 {
391     testONNXModels("squeezenet", pb);
392 }
393
394 TEST_P(Test_ONNX_nets, Googlenet)
395 {
396     if (backend == DNN_BACKEND_INFERENCE_ENGINE)
397         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
398
399     const String model = _tf("models/googlenet.onnx", false);
400
401     Net net = readNetFromONNX(model);
402     ASSERT_FALSE(net.empty());
403
404     net.setPreferableBackend(backend);
405     net.setPreferableTarget(target);
406
407     std::vector<Mat> images;
408     images.push_back( imread(_tf("../googlenet_0.png")) );
409     images.push_back( imread(_tf("../googlenet_1.png")) );
410     Mat inp = blobFromImages(images, 1.0f, Size(), Scalar(), false);
411     Mat ref = blobFromNPY(_tf("../googlenet_prob.npy"));
412     checkBackend(&inp, &ref);
413
414     net.setInput(inp);
415     ASSERT_FALSE(net.empty());
416     Mat out = net.forward();
417
418     normAssert(ref, out, "", default_l1,  default_lInf);
419     expectNoFallbacksFromIE(net);
420 }
421
422 TEST_P(Test_ONNX_nets, CaffeNet)
423 {
424     applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
425 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019030000)
426     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
427         && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
428         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_2019R3);
429 #endif
430     testONNXModels("caffenet", pb);
431 }
432
433 TEST_P(Test_ONNX_nets, RCNN_ILSVRC13)
434 {
435     applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
436 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019030000)
437     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
438         && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
439         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_2019R3);
440 #endif
441     // Reference output values are in range [-4.992, -1.161]
442     testONNXModels("rcnn_ilsvrc13", pb, 0.0045);
443 }
444
445 TEST_P(Test_ONNX_nets, VGG16_bn)
446 {
447     applyTestTag(CV_TEST_TAG_MEMORY_6GB);  // > 2.3Gb
448
449     // output range: [-16; 27], after Softmax [0; 0.67]
450     const double lInf = (target == DNN_TARGET_MYRIAD) ? 0.038 : default_lInf;
451     testONNXModels("vgg16-bn", pb, default_l1, lInf, true);
452 }
453
454 TEST_P(Test_ONNX_nets, ZFNet)
455 {
456     applyTestTag(CV_TEST_TAG_MEMORY_2GB);
457     testONNXModels("zfnet512", pb);
458 }
459
460 TEST_P(Test_ONNX_nets, ResNet18v1)
461 {
462     applyTestTag(CV_TEST_TAG_MEMORY_512MB);
463
464     // output range: [-16; 22], after Softmax [0, 0.51]
465     testONNXModels("resnet18v1", pb, default_l1, default_lInf, true, target != DNN_TARGET_MYRIAD);
466 }
467
468 TEST_P(Test_ONNX_nets, ResNet50v1)
469 {
470     applyTestTag(CV_TEST_TAG_MEMORY_512MB);
471
472     // output range: [-67; 75], after Softmax [0, 0.98]
473     testONNXModels("resnet50v1", pb, default_l1, default_lInf, true, target != DNN_TARGET_MYRIAD);
474 }
475
476 TEST_P(Test_ONNX_nets, ResNet101_DUC_HDC)
477 {
478     applyTestTag(CV_TEST_TAG_VERYLONG);
479
480 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
481     if (backend == DNN_BACKEND_INFERENCE_ENGINE)
482         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2019R1, CV_TEST_TAG_DNN_SKIP_IE_2019R1_1);
483 #endif
484 #if defined(INF_ENGINE_RELEASE)
485     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
486         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
487 #endif
488     if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_OPENCL)
489     {
490         if (backend == DNN_BACKEND_OPENCV)
491             applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_OPENCL : CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
492         throw SkipTestException("Test is disabled for OpenCL targets");
493     }
494     testONNXModels("resnet101_duc_hdc", pb);
495 }
496
497 TEST_P(Test_ONNX_nets, TinyYolov2)
498 {
499     applyTestTag(CV_TEST_TAG_MEMORY_512MB);
500
501     if (cvtest::skipUnstableTests)
502         throw SkipTestException("Skip unstable test");
503 #if defined(INF_ENGINE_RELEASE)
504     if (backend == DNN_BACKEND_INFERENCE_ENGINE
505             && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)
506     )
507         applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
508
509     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
510             && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
511     )
512         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
513 #endif
514
515     // output range: [-11; 8]
516     double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.017 : default_l1;
517     double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.14 : default_lInf;
518     testONNXModels("tiny_yolo2", pb, l1, lInf);
519 }
520
521 TEST_P(Test_ONNX_nets, CNN_MNIST)
522 {
523     // output range: [-1952; 6574], after Softmax [0; 1]
524     testONNXModels("cnn_mnist", pb, default_l1, default_lInf, true);
525 }
526
527 TEST_P(Test_ONNX_nets, MobileNet_v2)
528 {
529     // output range: [-166; 317], after Softmax [0; 1]
530     testONNXModels("mobilenetv2", pb, default_l1, default_lInf, true);
531 }
532
533 TEST_P(Test_ONNX_nets, LResNet100E_IR)
534 {
535     applyTestTag(
536         (target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB),
537         CV_TEST_TAG_DEBUG_LONG
538     );
539     if (backend == DNN_BACKEND_INFERENCE_ENGINE)
540     {
541         if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
542         if (target == DNN_TARGET_OPENCL)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
543         if (target == DNN_TARGET_MYRIAD)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
544     }
545
546     double l1 = default_l1;
547     double lInf = default_lInf;
548     // output range: [-3; 3]
549     if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) {
550         l1 = 0.009;
551         lInf = 0.035;
552     }
553     else if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_CPU) {
554         l1 = 4.6e-5;
555         lInf = 1.9e-4;
556     }
557     testONNXModels("LResNet100E_IR", pb, l1, lInf);
558 }
559
560 TEST_P(Test_ONNX_nets, Emotion_ferplus)
561 {
562 #if defined(INF_ENGINE_RELEASE)
563     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
564             && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
565     )
566         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
567 #endif
568
569     double l1 = default_l1;
570     double lInf = default_lInf;
571
572     // Output values are in range [-2.011, 2.111]
573     if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
574         l1 = 0.007;
575     else if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
576     {
577         l1 = 0.021;
578         lInf = 0.034;
579     }
580     else if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_CPU || target == DNN_TARGET_OPENCL)) {
581         l1 = 2.4e-4;
582         lInf = 6e-4;
583     }
584     testONNXModels("emotion_ferplus", pb, l1, lInf);
585 }
586
587 TEST_P(Test_ONNX_nets, Inception_v2)
588 {
589     testONNXModels("inception_v2", pb, default_l1, default_lInf, true);
590 }
591
592 TEST_P(Test_ONNX_nets, DenseNet121)
593 {
594     applyTestTag(CV_TEST_TAG_MEMORY_512MB);
595
596     // output range: [-87; 138], after Softmax [0; 1]
597     testONNXModels("densenet121", pb, default_l1, default_lInf, true, target != DNN_TARGET_MYRIAD);
598 }
599
600 TEST_P(Test_ONNX_nets, Inception_v1)
601 {
602 #if defined(INF_ENGINE_RELEASE)
603     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
604         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
605 #endif
606     testONNXModels("inception_v1", pb);
607 }
608
609 TEST_P(Test_ONNX_nets, Shufflenet)
610 {
611     if (backend == DNN_BACKEND_INFERENCE_ENGINE)
612     {
613         if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
614         if (target == DNN_TARGET_OPENCL)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
615         if (target == DNN_TARGET_MYRIAD)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
616     }
617     testONNXModels("shufflenet", pb);
618 }
619
620 TEST_P(Test_ONNX_nets, Resnet34_kinetics)
621 {
622 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
623     throw SkipTestException("Test is enabled starts from 2019R1");
624 #endif
625     if (target != DNN_TARGET_CPU)
626         throw SkipTestException("Only CPU is supported");
627
628     String onnxmodel = findDataFile("dnn/resnet-34_kinetics.onnx", false);
629     Mat image0 = imread(findDataFile("dnn/dog416.png"));
630     Mat image1 = imread(findDataFile("dnn/street.png"));
631
632     Mat ref0 = blobFromNPY(_tf("data/output_kinetics0.npy"));
633     Mat ref1 = blobFromNPY(_tf("data/output_kinetics1.npy"));
634
635     std::vector<Mat> images_0(16, image0);
636     std::vector<Mat> images_1(16, image1);
637     Mat blob0 = blobFromImages(images_0, 1.0, Size(112, 112), Scalar(114.7748, 107.7354, 99.4750), true, true);
638     Mat blob1 = blobFromImages(images_1, 1.0, Size(112, 112), Scalar(114.7748, 107.7354, 99.4750), true, true);
639
640     Net permute;
641     LayerParams lp;
642     int order[] = {1, 0, 2, 3};
643     lp.set("order", DictValue::arrayInt<int*>(&order[0], 4));
644     permute.addLayerToPrev("perm", "Permute", lp);
645
646     permute.setInput(blob0);
647     Mat input0 = permute.forward().clone();
648
649     permute.setInput(blob1);
650     Mat input1 = permute.forward().clone();
651
652     int dims[] = {1, 3, 16, 112, 112};
653     input0 = input0.reshape(0, 5, &dims[0]);
654     input1 = input1.reshape(0, 5, &dims[0]);
655
656     Net net = readNetFromONNX(onnxmodel);
657     ASSERT_FALSE(net.empty());
658     net.setPreferableBackend(backend);
659     net.setPreferableTarget(target);
660
661     // output range [-5, 11]
662     float l1 = 0.0013;
663     float lInf = 0.009;
664
665     checkBackend(&input0, &ref0);
666     net.setInput(input0);
667     Mat out = net.forward().clone();
668     normAssert(ref0, out, "", l1, lInf);
669
670     checkBackend(&input1, &ref1);
671     net.setInput(input1);
672     out = net.forward().clone();
673     normAssert(ref1, out, "", l1, lInf);
674
675     expectNoFallbacksFromIE(net);
676 }
677
678 INSTANTIATE_TEST_CASE_P(/**/, Test_ONNX_nets, dnnBackendsAndTargets());
679
680 }} // namespace