1 // This file is part of OpenCV project.
2 // It is subject to the license terms in the LICENSE file found in the top-level directory
3 // of this distribution and at http://opencv.org/license.html.
5 // Copyright (C) 2018-2019, Intel Corporation, all rights reserved.
6 // Third party copyrights are property of their respective owners.
9 #include "test_precomp.hpp"
10 #include "npy_blob.hpp"
11 #include <opencv2/dnn/shape_utils.hpp>
12 namespace opencv_test { namespace {
14 template<typename TString>
15 static std::string _tf(TString filename, bool required = true)
17 return findDataFile(std::string("dnn/onnx/") + filename, required);
20 class Test_ONNX_layers : public DNNTestLayer
25 Test_ONNX_layers() : required(true) { }
33 void testONNXModels(const String& basename, const Extension ext = npy,
34 const double l1 = 0, const float lInf = 0, const bool useSoftmax = false,
35 bool checkNoFallbacks = true)
37 String onnxmodel = _tf("models/" + basename + ".onnx", required);
40 inp = blobFromNPY(_tf("data/input_" + basename + ".npy"));
41 ref = blobFromNPY(_tf("data/output_" + basename + ".npy"));
44 inp = readTensorFromONNX(_tf("data/input_" + basename + ".pb"));
45 ref = readTensorFromONNX(_tf("data/output_" + basename + ".pb"));
48 CV_Error(Error::StsUnsupportedFormat, "Unsupported extension");
50 checkBackend(&inp, &ref);
51 Net net = readNetFromONNX(onnxmodel);
52 ASSERT_FALSE(net.empty());
54 net.setPreferableBackend(backend);
55 net.setPreferableTarget(target);
58 Mat out = net.forward("");
64 netSoftmax.addLayerToPrev("softmaxLayer", "SoftMax", lp);
65 netSoftmax.setPreferableBackend(DNN_BACKEND_OPENCV);
67 netSoftmax.setInput(out);
68 out = netSoftmax.forward();
70 netSoftmax.setInput(ref);
71 ref = netSoftmax.forward();
73 normAssert(ref, out, "", l1 ? l1 : default_l1, lInf ? lInf : default_lInf);
75 expectNoFallbacksFromIE(net);
79 TEST_P(Test_ONNX_layers, InstanceNorm)
81 if(backend == DNN_BACKEND_CUDA)
82 applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); /* MVN is not supported */
84 if (target == DNN_TARGET_MYRIAD)
85 testONNXModels("instancenorm", npy, 0, 0, false, false);
87 testONNXModels("instancenorm", npy);
90 TEST_P(Test_ONNX_layers, MaxPooling)
92 testONNXModels("maxpooling", npy, 0, 0, false, false);
93 testONNXModels("two_maxpooling", npy, 0, 0, false, false);
96 TEST_P(Test_ONNX_layers, Convolution)
98 testONNXModels("convolution");
101 TEST_P(Test_ONNX_layers, Convolution3D)
103 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
104 if(backend == DNN_BACKEND_INFERENCE_ENGINE)
105 throw SkipTestException("Test is enabled starts from 2019R1");
107 if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
108 throw SkipTestException("Only CPU and CUDA is supported");
109 testONNXModels("conv3d");
110 testONNXModels("conv3d_bias");
113 TEST_P(Test_ONNX_layers, Two_convolution)
115 #if defined(INF_ENGINE_RELEASE)
116 if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
117 && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
119 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
121 // Reference output values are in range [-0.855, 0.611]
122 testONNXModels("two_convolution");
125 TEST_P(Test_ONNX_layers, Deconvolution)
127 testONNXModels("deconvolution", npy, 0, 0, false, false);
128 testONNXModels("two_deconvolution", npy, 0, 0, false, false);
129 testONNXModels("deconvolution_group", npy, 0, 0, false, false);
130 testONNXModels("deconvolution_output_shape", npy, 0, 0, false, false);
131 testONNXModels("deconv_adjpad_2d", npy, 0, 0, false, false);
134 TEST_P(Test_ONNX_layers, Deconvolution3D)
136 #if defined(INF_ENGINE_RELEASE)
137 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_2018R5);
139 if ((backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_CPU) && backend != DNN_BACKEND_CUDA)
140 throw SkipTestException("Only DLIE backend on CPU, and CUDA is supported");
141 testONNXModels("deconv3d");
142 testONNXModels("deconv3d_bias");
143 testONNXModels("deconv3d_pad");
144 testONNXModels("deconv3d_adjpad");
147 TEST_P(Test_ONNX_layers, Dropout)
149 testONNXModels("dropout");
152 TEST_P(Test_ONNX_layers, Linear)
154 if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
155 applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
156 testONNXModels("linear");
159 TEST_P(Test_ONNX_layers, ReLU)
161 testONNXModels("ReLU");
164 TEST_P(Test_ONNX_layers, Clip)
166 testONNXModels("clip", npy);
169 TEST_P(Test_ONNX_layers, ReduceMean)
171 testONNXModels("reduce_mean");
174 TEST_P(Test_ONNX_layers, ReduceMean3D)
176 if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
177 throw SkipTestException("Only CPU and CUDA is supported");
178 testONNXModels("reduce_mean3d");
181 TEST_P(Test_ONNX_layers, MaxPooling_Sigmoid)
183 testONNXModels("maxpooling_sigmoid");
186 TEST_P(Test_ONNX_layers, Concatenation)
188 if (backend == DNN_BACKEND_INFERENCE_ENGINE)
190 if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
191 if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
192 if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
194 testONNXModels("concatenation");
197 TEST_P(Test_ONNX_layers, Eltwise3D)
199 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
200 throw SkipTestException("Test is enabled starts from 2019R1");
202 if (backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_CPU)
203 throw SkipTestException("Only CPU on DLIE backend is supported");
204 testONNXModels("eltwise3d");
207 TEST_P(Test_ONNX_layers, AveragePooling)
209 testONNXModels("average_pooling");
212 TEST_P(Test_ONNX_layers, MaxPooling3D)
214 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
215 throw SkipTestException("Test is enabled starts from 2019R1");
217 if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
218 throw SkipTestException("Only CPU and CUDA is supported");
219 testONNXModels("max_pool3d", npy, 0, 0, false, false);
222 TEST_P(Test_ONNX_layers, AvePooling3D)
224 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
225 throw SkipTestException("Test is enabled starts from 2019R1");
227 if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
228 throw SkipTestException("Only CPU and CUDA is supported");
229 testONNXModels("ave_pool3d");
232 TEST_P(Test_ONNX_layers, PoolConv3D)
234 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
235 throw SkipTestException("Test is enabled starts from 2019R1");
237 if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
238 throw SkipTestException("Only CPU and CUDA is supported");
239 testONNXModels("pool_conv_3d");
242 TEST_P(Test_ONNX_layers, BatchNormalization)
244 testONNXModels("batch_norm");
247 TEST_P(Test_ONNX_layers, BatchNormalization3D)
249 if (backend == DNN_BACKEND_INFERENCE_ENGINE)
251 if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
252 if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
253 if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
255 testONNXModels("batch_norm_3d");
258 TEST_P(Test_ONNX_layers, Transpose)
260 if (backend == DNN_BACKEND_INFERENCE_ENGINE)
262 if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
263 if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
264 if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
266 testONNXModels("transpose");
269 TEST_P(Test_ONNX_layers, Multiplication)
271 if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
272 applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
273 if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
274 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
275 testONNXModels("mul");
278 TEST_P(Test_ONNX_layers, Constant)
280 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
281 if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
282 && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
283 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_2018R5);
285 testONNXModels("constant");
288 TEST_P(Test_ONNX_layers, Padding)
290 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
291 testONNXModels("padding", npy, 0, 0, false, false);
293 testONNXModels("padding");
297 TEST_P(Test_ONNX_layers, Resize)
299 testONNXModels("resize_nearest");
302 TEST_P(Test_ONNX_layers, MultyInputs)
304 const String model = _tf("models/multy_inputs.onnx");
306 Net net = readNetFromONNX(model);
307 ASSERT_FALSE(net.empty());
309 net.setPreferableBackend(backend);
310 net.setPreferableTarget(target);
312 Mat inp1 = blobFromNPY(_tf("data/input_multy_inputs_0.npy"));
313 Mat inp2 = blobFromNPY(_tf("data/input_multy_inputs_1.npy"));
314 Mat ref = blobFromNPY(_tf("data/output_multy_inputs.npy"));
315 checkBackend(&inp1, &ref);
317 net.setInput(inp1, "0");
318 net.setInput(inp2, "1");
319 Mat out = net.forward();
321 normAssert(ref, out, "", default_l1, default_lInf);
322 expectNoFallbacksFromIE(net);
325 TEST_P(Test_ONNX_layers, DynamicReshape)
327 if (backend == DNN_BACKEND_INFERENCE_ENGINE)
329 if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
330 if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
332 testONNXModels("dynamic_reshape");
335 TEST_P(Test_ONNX_layers, Reshape)
337 testONNXModels("unsqueeze");
340 TEST_P(Test_ONNX_layers, Slice)
342 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
343 testONNXModels("slice", npy, 0, 0, false, false);
345 testONNXModels("slice");
349 TEST_P(Test_ONNX_layers, Softmax)
351 testONNXModels("softmax");
352 testONNXModels("log_softmax", npy, 0, 0, false, false);
355 TEST_P(Test_ONNX_layers, Split_EltwiseMax)
357 if (backend == DNN_BACKEND_INFERENCE_ENGINE)
358 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
359 testONNXModels("split_max");
362 INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_ONNX_layers, dnnBackendsAndTargets());
364 class Test_ONNX_nets : public Test_ONNX_layers
367 Test_ONNX_nets() { required = false; }
370 TEST_P(Test_ONNX_nets, Alexnet)
372 applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
373 const String model = _tf("models/alexnet.onnx", false);
375 Net net = readNetFromONNX(model);
376 ASSERT_FALSE(net.empty());
378 net.setPreferableBackend(backend);
379 net.setPreferableTarget(target);
381 Mat inp = imread(_tf("../grace_hopper_227.png"));
382 Mat ref = blobFromNPY(_tf("../caffe_alexnet_prob.npy"));
383 checkBackend(&inp, &ref);
385 net.setInput(blobFromImage(inp, 1.0f, Size(227, 227), Scalar(), false));
386 ASSERT_FALSE(net.empty());
387 Mat out = net.forward();
389 normAssert(out, ref, "", default_l1, default_lInf);
390 expectNoFallbacksFromIE(net);
393 TEST_P(Test_ONNX_nets, Squeezenet)
395 testONNXModels("squeezenet", pb);
398 TEST_P(Test_ONNX_nets, Googlenet)
400 if (backend == DNN_BACKEND_INFERENCE_ENGINE)
401 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
403 const String model = _tf("models/googlenet.onnx", false);
405 Net net = readNetFromONNX(model);
406 ASSERT_FALSE(net.empty());
408 net.setPreferableBackend(backend);
409 net.setPreferableTarget(target);
411 std::vector<Mat> images;
412 images.push_back( imread(_tf("../googlenet_0.png")) );
413 images.push_back( imread(_tf("../googlenet_1.png")) );
414 Mat inp = blobFromImages(images, 1.0f, Size(), Scalar(), false);
415 Mat ref = blobFromNPY(_tf("../googlenet_prob.npy"));
416 checkBackend(&inp, &ref);
419 ASSERT_FALSE(net.empty());
420 Mat out = net.forward();
422 normAssert(ref, out, "", default_l1, default_lInf);
423 expectNoFallbacksFromIE(net);
426 TEST_P(Test_ONNX_nets, CaffeNet)
428 applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
429 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019030000)
430 if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
431 && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
432 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_2019R3);
434 testONNXModels("caffenet", pb);
437 TEST_P(Test_ONNX_nets, RCNN_ILSVRC13)
439 applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
440 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019030000)
441 if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
442 && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
443 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_2019R3);
445 // Reference output values are in range [-4.992, -1.161]
446 testONNXModels("rcnn_ilsvrc13", pb, 0.0045);
449 TEST_P(Test_ONNX_nets, VGG16_bn)
451 applyTestTag(CV_TEST_TAG_MEMORY_6GB); // > 2.3Gb
453 // output range: [-16; 27], after Softmax [0; 0.67]
454 const double lInf = (target == DNN_TARGET_MYRIAD) ? 0.038 : default_lInf;
455 testONNXModels("vgg16-bn", pb, default_l1, lInf, true);
458 TEST_P(Test_ONNX_nets, ZFNet)
460 applyTestTag(CV_TEST_TAG_MEMORY_2GB);
461 testONNXModels("zfnet512", pb);
464 TEST_P(Test_ONNX_nets, ResNet18v1)
466 applyTestTag(CV_TEST_TAG_MEMORY_512MB);
468 // output range: [-16; 22], after Softmax [0, 0.51]
469 testONNXModels("resnet18v1", pb, default_l1, default_lInf, true, target != DNN_TARGET_MYRIAD);
472 TEST_P(Test_ONNX_nets, ResNet50v1)
474 applyTestTag(CV_TEST_TAG_MEMORY_512MB);
476 // output range: [-67; 75], after Softmax [0, 0.98]
477 testONNXModels("resnet50v1", pb, default_l1, default_lInf, true, target != DNN_TARGET_MYRIAD);
480 TEST_P(Test_ONNX_nets, ResNet101_DUC_HDC)
482 applyTestTag(CV_TEST_TAG_VERYLONG);
484 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
485 if (backend == DNN_BACKEND_INFERENCE_ENGINE)
486 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2019R1, CV_TEST_TAG_DNN_SKIP_IE_2019R1_1);
488 #if defined(INF_ENGINE_RELEASE)
489 if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
490 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
492 if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_OPENCL)
494 if (backend == DNN_BACKEND_OPENCV)
495 applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_OPENCL : CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
496 throw SkipTestException("Test is disabled for OpenCL targets");
498 testONNXModels("resnet101_duc_hdc", pb);
501 TEST_P(Test_ONNX_nets, TinyYolov2)
503 applyTestTag(CV_TEST_TAG_MEMORY_512MB);
505 if (cvtest::skipUnstableTests)
506 throw SkipTestException("Skip unstable test");
507 #if defined(INF_ENGINE_RELEASE)
508 if (backend == DNN_BACKEND_INFERENCE_ENGINE
509 && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)
511 applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
513 if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
514 && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
516 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
519 // output range: [-11; 8]
520 double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.017 : default_l1;
521 double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.14 : default_lInf;
522 testONNXModels("tiny_yolo2", pb, l1, lInf);
525 TEST_P(Test_ONNX_nets, CNN_MNIST)
527 // output range: [-1952; 6574], after Softmax [0; 1]
528 testONNXModels("cnn_mnist", pb, default_l1, default_lInf, true);
531 TEST_P(Test_ONNX_nets, MobileNet_v2)
533 // output range: [-166; 317], after Softmax [0; 1]
534 testONNXModels("mobilenetv2", pb, default_l1, default_lInf, true);
537 TEST_P(Test_ONNX_nets, LResNet100E_IR)
540 (target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB),
541 CV_TEST_TAG_DEBUG_LONG
543 if (backend == DNN_BACKEND_INFERENCE_ENGINE)
545 if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
546 if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
547 if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
550 double l1 = default_l1;
551 double lInf = default_lInf;
552 // output range: [-3; 3]
553 if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) {
557 else if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_CPU) {
561 testONNXModels("LResNet100E_IR", pb, l1, lInf);
564 TEST_P(Test_ONNX_nets, Emotion_ferplus)
566 #if defined(INF_ENGINE_RELEASE)
567 if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
568 && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
570 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
573 double l1 = default_l1;
574 double lInf = default_lInf;
576 // Output values are in range [-2.011, 2.111]
577 if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
579 else if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
584 else if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_CPU || target == DNN_TARGET_OPENCL)) {
588 testONNXModels("emotion_ferplus", pb, l1, lInf);
591 TEST_P(Test_ONNX_nets, Inception_v2)
593 testONNXModels("inception_v2", pb, default_l1, default_lInf, true);
596 TEST_P(Test_ONNX_nets, DenseNet121)
598 applyTestTag(CV_TEST_TAG_MEMORY_512MB);
600 // output range: [-87; 138], after Softmax [0; 1]
601 testONNXModels("densenet121", pb, default_l1, default_lInf, true, target != DNN_TARGET_MYRIAD);
604 TEST_P(Test_ONNX_nets, Inception_v1)
606 #if defined(INF_ENGINE_RELEASE)
607 if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
608 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
610 testONNXModels("inception_v1", pb);
613 TEST_P(Test_ONNX_nets, Shufflenet)
615 if (backend == DNN_BACKEND_INFERENCE_ENGINE)
617 if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
618 if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
619 if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
621 testONNXModels("shufflenet", pb);
624 TEST_P(Test_ONNX_nets, Resnet34_kinetics)
626 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
627 throw SkipTestException("Test is enabled starts from 2019R1");
629 if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
630 throw SkipTestException("Only CPU and CUDA is supported");
632 String onnxmodel = findDataFile("dnn/resnet-34_kinetics.onnx", false);
633 Mat image0 = imread(findDataFile("dnn/dog416.png"));
634 Mat image1 = imread(findDataFile("dnn/street.png"));
636 Mat ref0 = blobFromNPY(_tf("data/output_kinetics0.npy"));
637 Mat ref1 = blobFromNPY(_tf("data/output_kinetics1.npy"));
639 std::vector<Mat> images_0(16, image0);
640 std::vector<Mat> images_1(16, image1);
641 Mat blob0 = blobFromImages(images_0, 1.0, Size(112, 112), Scalar(114.7748, 107.7354, 99.4750), true, true);
642 Mat blob1 = blobFromImages(images_1, 1.0, Size(112, 112), Scalar(114.7748, 107.7354, 99.4750), true, true);
646 int order[] = {1, 0, 2, 3};
647 lp.set("order", DictValue::arrayInt<int*>(&order[0], 4));
648 permute.addLayerToPrev("perm", "Permute", lp);
650 permute.setInput(blob0);
651 Mat input0 = permute.forward().clone();
653 permute.setInput(blob1);
654 Mat input1 = permute.forward().clone();
656 int dims[] = {1, 3, 16, 112, 112};
657 input0 = input0.reshape(0, 5, &dims[0]);
658 input1 = input1.reshape(0, 5, &dims[0]);
660 Net net = readNetFromONNX(onnxmodel);
661 ASSERT_FALSE(net.empty());
662 net.setPreferableBackend(backend);
663 net.setPreferableTarget(target);
665 // output range [-5, 11]
669 checkBackend(&input0, &ref0);
670 net.setInput(input0);
671 Mat out = net.forward().clone();
672 normAssert(ref0, out, "", l1, lInf);
674 checkBackend(&input1, &ref1);
675 net.setInput(input1);
676 out = net.forward().clone();
677 normAssert(ref1, out, "", l1, lInf);
679 expectNoFallbacksFromIE(net);
682 INSTANTIATE_TEST_CASE_P(/**/, Test_ONNX_nets, dnnBackendsAndTargets());