Merge pull request #14953 from alalek:core_static_analysis_eval_expr
[platform/upstream/opencv.git] / modules / dnn / test / test_tf_importer.cpp
1 // This file is part of OpenCV project.
2 // It is subject to the license terms in the LICENSE file found in the top-level directory
3 // of this distribution and at http://opencv.org/license.html.
4
5 // Copyright (C) 2017-2019, Intel Corporation, all rights reserved.
6 // Third party copyrights are property of their respective owners.
7
8 /*
9 Test for Tensorflow models loading
10 */
11
12 #include "test_precomp.hpp"
13 #include "npy_blob.hpp"
14
15 #include <opencv2/dnn/layer.details.hpp>  // CV_DNN_REGISTER_LAYER_CLASS
16
17 namespace opencv_test
18 {
19
20 using namespace cv;
21 using namespace cv::dnn;
22
23 template<typename TString>
24 static std::string _tf(TString filename)
25 {
26     return (getOpenCVExtraDir() + "/dnn/") + filename;
27 }
28
29 TEST(Test_TensorFlow, read_inception)
30 {
31     Net net;
32     {
33         const string model = findDataFile("dnn/tensorflow_inception_graph.pb", false);
34         net = readNetFromTensorflow(model);
35         ASSERT_FALSE(net.empty());
36     }
37     net.setPreferableBackend(DNN_BACKEND_OPENCV);
38
39     Mat sample = imread(_tf("grace_hopper_227.png"));
40     ASSERT_TRUE(!sample.empty());
41     Mat input;
42     resize(sample, input, Size(224, 224));
43     input -= Scalar::all(117); // mean sub
44
45     Mat inputBlob = blobFromImage(input);
46
47     net.setInput(inputBlob, "input");
48     Mat out = net.forward("softmax2");
49
50     std::cout << out.dims << std::endl;
51 }
52
53 TEST(Test_TensorFlow, inception_accuracy)
54 {
55     Net net;
56     {
57         const string model = findDataFile("dnn/tensorflow_inception_graph.pb", false);
58         net = readNetFromTensorflow(model);
59         ASSERT_FALSE(net.empty());
60     }
61     net.setPreferableBackend(DNN_BACKEND_OPENCV);
62
63     Mat sample = imread(_tf("grace_hopper_227.png"));
64     ASSERT_TRUE(!sample.empty());
65     Mat inputBlob = blobFromImage(sample, 1.0, Size(224, 224), Scalar(), /*swapRB*/true);
66
67     net.setInput(inputBlob, "input");
68     Mat out = net.forward("softmax2");
69
70     Mat ref = blobFromNPY(_tf("tf_inception_prob.npy"));
71
72     normAssert(ref, out);
73 }
74
75 static std::string path(const std::string& file)
76 {
77     return findDataFile("dnn/tensorflow/" + file);
78 }
79
80 class Test_TensorFlow_layers : public DNNTestLayer
81 {
82 public:
83     void runTensorFlowNet(const std::string& prefix, bool hasText = false,
84                           double l1 = 0.0, double lInf = 0.0, bool memoryLoad = false)
85     {
86         std::string netPath = path(prefix + "_net.pb");
87         std::string netConfig = (hasText ? path(prefix + "_net.pbtxt") : "");
88         std::string inpPath = path(prefix + "_in.npy");
89         std::string outPath = path(prefix + "_out.npy");
90
91         cv::Mat input = blobFromNPY(inpPath);
92         cv::Mat ref = blobFromNPY(outPath);
93         checkBackend(&input, &ref);
94
95         Net net;
96         if (memoryLoad)
97         {
98             // Load files into a memory buffers
99             std::vector<char> dataModel;
100             readFileContent(netPath, dataModel);
101
102             std::vector<char> dataConfig;
103             if (hasText)
104             {
105                 readFileContent(netConfig, dataConfig);
106             }
107
108             net = readNetFromTensorflow(dataModel.data(), dataModel.size(),
109                                         dataConfig.data(), dataConfig.size());
110         }
111         else
112             net = readNetFromTensorflow(netPath, netConfig);
113
114         ASSERT_FALSE(net.empty());
115
116         net.setPreferableBackend(backend);
117         net.setPreferableTarget(target);
118         net.setInput(input);
119         cv::Mat output = net.forward();
120         normAssert(ref, output, "", l1 ? l1 : default_l1, lInf ? lInf : default_lInf);
121     }
122 };
123
124 TEST_P(Test_TensorFlow_layers, conv)
125 {
126     runTensorFlowNet("single_conv");
127     runTensorFlowNet("atrous_conv2d_valid");
128     runTensorFlowNet("atrous_conv2d_same");
129     runTensorFlowNet("depthwise_conv2d");
130     runTensorFlowNet("keras_atrous_conv2d_same");
131     runTensorFlowNet("conv_pool_nchw");
132 }
133
134 TEST_P(Test_TensorFlow_layers, Convolution3D)
135 {
136 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
137     throw SkipTestException("Test is enabled starts from 2019R1");
138 #endif
139     if (backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_CPU)
140             throw SkipTestException("Only DLIE backend on CPU is supported");
141     runTensorFlowNet("conv3d");
142 }
143
144 TEST_P(Test_TensorFlow_layers, padding)
145 {
146     runTensorFlowNet("padding_valid");
147     runTensorFlowNet("spatial_padding");
148     runTensorFlowNet("keras_pad_concat");
149     runTensorFlowNet("mirror_pad");
150 }
151
152 TEST_P(Test_TensorFlow_layers, padding_same)
153 {
154     // Reference output values are in range [0.0006, 2.798]
155     runTensorFlowNet("padding_same");
156 }
157
158 TEST_P(Test_TensorFlow_layers, eltwise)
159 {
160     runTensorFlowNet("eltwise_add_mul");
161     runTensorFlowNet("eltwise_sub");
162 }
163
164 TEST_P(Test_TensorFlow_layers, pad_and_concat)
165 {
166     runTensorFlowNet("pad_and_concat");
167 }
168
169 TEST_P(Test_TensorFlow_layers, concat_axis_1)
170 {
171     runTensorFlowNet("concat_axis_1");
172 }
173
174 TEST_P(Test_TensorFlow_layers, batch_norm)
175 {
176     runTensorFlowNet("batch_norm");
177     runTensorFlowNet("batch_norm", false, 0.0, 0.0, true);
178     runTensorFlowNet("fused_batch_norm");
179     runTensorFlowNet("fused_batch_norm", false, 0.0, 0.0, true);
180     runTensorFlowNet("batch_norm_text", true);
181     runTensorFlowNet("batch_norm_text", true, 0.0, 0.0, true);
182     runTensorFlowNet("unfused_batch_norm");
183     runTensorFlowNet("fused_batch_norm_no_gamma");
184     runTensorFlowNet("unfused_batch_norm_no_gamma");
185     runTensorFlowNet("mvn_batch_norm");
186     runTensorFlowNet("mvn_batch_norm_1x1");
187     runTensorFlowNet("switch_identity");
188     runTensorFlowNet("keras_batch_norm_training");
189 }
190
191 TEST_P(Test_TensorFlow_layers, batch_norm3D)
192 {
193     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_CPU)
194     {
195         if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
196         if (target == DNN_TARGET_OPENCL)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL);
197         if (target == DNN_TARGET_MYRIAD)      applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
198         throw SkipTestException("");
199     }
200     runTensorFlowNet("batch_norm3d");
201 }
202
203 TEST_P(Test_TensorFlow_layers, slim_batch_norm)
204 {
205     if (backend == DNN_BACKEND_INFERENCE_ENGINE)
206         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
207     // Output values range: [-40.0597, 207.827]
208     double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.041 : default_l1;
209     double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.33 : default_lInf;
210     runTensorFlowNet("slim_batch_norm", false, l1, lInf);
211 }
212
213 TEST_P(Test_TensorFlow_layers, pooling)
214 {
215     runTensorFlowNet("max_pool_even");
216     runTensorFlowNet("max_pool_odd_valid");
217     runTensorFlowNet("max_pool_odd_same");
218     runTensorFlowNet("reduce_mean");  // an average pooling over all spatial dimensions.
219 }
220
221 // TODO: fix tests and replace to pooling
222 TEST_P(Test_TensorFlow_layers, ave_pool_same)
223 {
224     // Reference output values are in range [-0.519531, 0.112976]
225 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
226     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
227             && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
228     )
229         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
230 #endif
231     runTensorFlowNet("ave_pool_same");
232 }
233
234 TEST_P(Test_TensorFlow_layers, MaxPooling3D)
235 {
236 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
237     throw SkipTestException("Test is enabled starts from 2019R1");
238 #endif
239     if (backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_CPU)
240         throw SkipTestException("Only DLIE backend on CPU is supported");
241     runTensorFlowNet("max_pool3d");
242 }
243
244 TEST_P(Test_TensorFlow_layers, AvePooling3D)
245 {
246 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
247     throw SkipTestException("Test is enabled starts from 2019R1");
248 #endif
249     if (backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_CPU)
250         throw SkipTestException("Only DLIE backend on CPU is supported");
251     runTensorFlowNet("ave_pool3d");
252 }
253
254 TEST_P(Test_TensorFlow_layers, deconvolution)
255 {
256     runTensorFlowNet("deconvolution");
257     runTensorFlowNet("deconvolution_same");
258     runTensorFlowNet("deconvolution_stride_2_same");
259     runTensorFlowNet("deconvolution_adj_pad_valid");
260     runTensorFlowNet("deconvolution_adj_pad_same");
261     runTensorFlowNet("keras_deconv_valid");
262     runTensorFlowNet("keras_deconv_same");
263     runTensorFlowNet("keras_deconv_same_v2");
264 }
265
266 TEST_P(Test_TensorFlow_layers, matmul)
267 {
268     if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
269         applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
270     runTensorFlowNet("matmul");
271     runTensorFlowNet("nhwc_transpose_reshape_matmul");
272     // Reference output values are in range [-5.688, 4.484]
273     double l1 = target == DNN_TARGET_MYRIAD ? 6.1e-3 : default_l1;
274     runTensorFlowNet("nhwc_reshape_matmul", false, l1);
275
276 }
277
278 TEST_P(Test_TensorFlow_layers, reshape)
279 {
280     if (backend == DNN_BACKEND_INFERENCE_ENGINE)
281         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
282     runTensorFlowNet("shift_reshape_no_reorder");
283     runTensorFlowNet("reshape_no_reorder");
284     runTensorFlowNet("reshape_reduce");
285     runTensorFlowNet("reshape_as_shape");
286 }
287
288 TEST_P(Test_TensorFlow_layers, flatten)
289 {
290 #if defined(INF_ENGINE_RELEASE)
291     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
292             && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2
293     )
294         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2);
295 #endif
296
297     runTensorFlowNet("flatten", true);
298 }
299
300 TEST_P(Test_TensorFlow_layers, unfused_flatten)
301 {
302     runTensorFlowNet("unfused_flatten");
303     runTensorFlowNet("unfused_flatten_unknown_batch");
304 }
305
306 TEST_P(Test_TensorFlow_layers, leaky_relu)
307 {
308 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
309     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL)
310         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_2018R5);
311 #endif
312     runTensorFlowNet("leaky_relu_order1");
313     runTensorFlowNet("leaky_relu_order2");
314     runTensorFlowNet("leaky_relu_order3");
315 }
316
317 TEST_P(Test_TensorFlow_layers, l2_normalize)
318 {
319 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
320     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
321             && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
322     )
323         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
324 #endif
325
326     runTensorFlowNet("l2_normalize");
327 }
328
329 // TODO: fix it and add to l2_normalize
330 TEST_P(Test_TensorFlow_layers, l2_normalize_3d)
331 {
332 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
333     if (backend == DNN_BACKEND_INFERENCE_ENGINE
334             && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)
335     )
336         applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
337 #endif
338 #if defined(INF_ENGINE_RELEASE)
339     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
340         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
341 #endif
342
343     runTensorFlowNet("l2_normalize_3d");
344 }
345
346 class Test_TensorFlow_nets : public DNNTestLayer {};
347
348 TEST_P(Test_TensorFlow_nets, MobileNet_SSD)
349 {
350 #if defined(INF_ENGINE_RELEASE)
351     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
352     {
353 #if INF_ENGINE_VER_MAJOR_GE(2019010000)
354         if (getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
355             applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
356 #else
357             applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
358 #endif
359     }
360 #endif
361
362     checkBackend();
363     std::string imgPath = findDataFile("dnn/street.png");
364     std::string netConfig = findDataFile("dnn/ssd_mobilenet_v1_coco.pbtxt");
365     std::string netPath = findDataFile("dnn/ssd_mobilenet_v1_coco.pb", false);
366
367     Mat inp;
368     resize(imread(imgPath), inp, Size(300, 300));
369     inp = blobFromImage(inp, 1.0f / 127.5, Size(), Scalar(127.5, 127.5, 127.5), true);
370
371     Mat ref = blobFromNPY(findDataFile("dnn/tensorflow/ssd_mobilenet_v1_coco.detection_out.npy"));
372
373     Net net = readNetFromTensorflow(netPath, netConfig);
374     net.setPreferableBackend(backend);
375     net.setPreferableTarget(target);
376
377     net.setInput(inp);
378     Mat out = net.forward();
379
380     double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.0043 : default_l1;
381     double iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.037 : default_lInf;
382     normAssertDetections(ref, out, "", 0.2, scoreDiff, iouDiff);
383 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2019010000
384     expectNoFallbacksFromIE(net);
385 #endif
386 }
387
388 TEST_P(Test_TensorFlow_nets, Inception_v2_SSD)
389 {
390     applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
391
392 #if defined(INF_ENGINE_RELEASE)
393     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
394             && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
395     )
396         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
397 #endif
398
399     checkBackend();
400     Mat img = imread(findDataFile("dnn/street.png"));
401     std::string proto = findDataFile("dnn/ssd_inception_v2_coco_2017_11_17.pbtxt");
402     std::string model = findDataFile("dnn/ssd_inception_v2_coco_2017_11_17.pb", false);
403
404     Net net = readNetFromTensorflow(model, proto);
405     Mat blob = blobFromImage(img, 1.0f, Size(300, 300), Scalar(), true, false);
406
407     net.setPreferableBackend(backend);
408     net.setPreferableTarget(target);
409
410     net.setInput(blob);
411     // Output has shape 1x1xNx7 where N - number of detections.
412     // An every detection is a vector of values [id, classId, confidence, left, top, right, bottom]
413     Mat out = net.forward();
414     Mat ref = (Mat_<float>(5, 7) << 0, 1, 0.90176028, 0.19872092, 0.36311883, 0.26461923, 0.63498729,
415                                     0, 3, 0.93569964, 0.64865261, 0.45906419, 0.80675775, 0.65708131,
416                                     0, 3, 0.75838411, 0.44668293, 0.45907149, 0.49459291, 0.52197015,
417                                     0, 10, 0.95932811, 0.38349164, 0.32528657, 0.40387636, 0.39165527,
418                                     0, 10, 0.93973452, 0.66561931, 0.37841269, 0.68074018, 0.42907384);
419
420     double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.0097 : default_l1;
421     double iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.09 : default_lInf;
422     normAssertDetections(ref, out, "", 0.5, scoreDiff, iouDiff);
423     expectNoFallbacksFromIE(net);
424 }
425
426 TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD)
427 {
428     checkBackend();
429     std::string proto = findDataFile("dnn/ssd_mobilenet_v1_coco_2017_11_17.pbtxt");
430     std::string model = findDataFile("dnn/ssd_mobilenet_v1_coco_2017_11_17.pb", false);
431
432     Net net = readNetFromTensorflow(model, proto);
433     Mat img = imread(findDataFile("dnn/dog416.png"));
434     Mat blob = blobFromImage(img, 1.0f, Size(300, 300), Scalar(), true, false);
435
436     net.setPreferableBackend(backend);
437     net.setPreferableTarget(target);
438
439     net.setInput(blob);
440     Mat out = net.forward();
441
442     Mat ref = blobFromNPY(findDataFile("dnn/tensorflow/ssd_mobilenet_v1_coco_2017_11_17.detection_out.npy"));
443     float scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 7e-3 : 1.5e-5;
444     float iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.012 : 1e-3;
445     float detectionConfThresh = (target == DNN_TARGET_MYRIAD) ? 0.35 : 0.3;
446
447 #if defined(INF_ENGINE_RELEASE)
448     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
449             && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
450     )
451         scoreDiff = 0.061;
452         iouDiff = 0.12;
453         detectionConfThresh = 0.36;
454 #endif
455     normAssertDetections(ref, out, "", detectionConfThresh, scoreDiff, iouDiff);
456     expectNoFallbacksFromIE(net);
457 }
458
459 TEST_P(Test_TensorFlow_nets, Faster_RCNN)
460 {
461     // FIXIT split test
462     applyTestTag(
463         (target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB),
464         CV_TEST_TAG_LONG,
465         CV_TEST_TAG_DEBUG_VERYLONG
466     );
467     static std::string names[] = {"faster_rcnn_inception_v2_coco_2018_01_28",
468                                   "faster_rcnn_resnet50_coco_2018_01_28"};
469
470     checkBackend();
471     if (backend == DNN_BACKEND_INFERENCE_ENGINE)
472         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
473     if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
474         applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
475
476     double scoresDiff = backend == DNN_BACKEND_INFERENCE_ENGINE ? 2.9e-5 : 1e-5;
477     for (int i = 0; i < 2; ++i)
478     {
479         std::string proto = findDataFile("dnn/" + names[i] + ".pbtxt");
480         std::string model = findDataFile("dnn/" + names[i] + ".pb", false);
481
482         Net net = readNetFromTensorflow(model, proto);
483         net.setPreferableBackend(backend);
484         net.setPreferableTarget(target);
485         Mat img = imread(findDataFile("dnn/dog416.png"));
486         Mat blob = blobFromImage(img, 1.0f, Size(800, 600), Scalar(), true, false);
487
488         net.setInput(blob);
489         Mat out = net.forward();
490
491         Mat ref = blobFromNPY(findDataFile("dnn/tensorflow/" + names[i] + ".detection_out.npy"));
492         normAssertDetections(ref, out, names[i].c_str(), 0.3, scoresDiff);
493     }
494 }
495
496 TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD_PPN)
497 {
498 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
499     if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
500         applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
501 #endif
502
503     checkBackend();
504     std::string proto = findDataFile("dnn/ssd_mobilenet_v1_ppn_coco.pbtxt");
505     std::string model = findDataFile("dnn/ssd_mobilenet_v1_ppn_coco.pb", false);
506
507     Net net = readNetFromTensorflow(model, proto);
508     Mat img = imread(findDataFile("dnn/dog416.png"));
509     Mat ref = blobFromNPY(findDataFile("dnn/tensorflow/ssd_mobilenet_v1_ppn_coco.detection_out.npy"));
510     Mat blob = blobFromImage(img, 1.0f, Size(300, 300), Scalar(), true, false);
511
512     net.setPreferableBackend(backend);
513     net.setPreferableTarget(target);
514
515     net.setInput(blob);
516     Mat out = net.forward();
517
518     double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.048 : 1.1e-5;
519     double iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.058 : default_lInf;
520     normAssertDetections(ref, out, "", 0.45, scoreDiff, iouDiff);
521     expectNoFallbacksFromIE(net);
522 }
523
524 TEST_P(Test_TensorFlow_nets, opencv_face_detector_uint8)
525 {
526     checkBackend();
527     std::string proto = findDataFile("dnn/opencv_face_detector.pbtxt");
528     std::string model = findDataFile("dnn/opencv_face_detector_uint8.pb", false);
529
530     Net net = readNetFromTensorflow(model, proto);
531     Mat img = imread(findDataFile("gpu/lbpcascade/er.png"));
532     Mat blob = blobFromImage(img, 1.0, Size(), Scalar(104.0, 177.0, 123.0), false, false);
533
534     net.setPreferableBackend(backend);
535     net.setPreferableTarget(target);
536     net.setInput(blob);
537     // Output has shape 1x1xNx7 where N - number of detections.
538     // An every detection is a vector of values [id, classId, confidence, left, top, right, bottom]
539     Mat out = net.forward();
540
541     // References are from test for Caffe model.
542     Mat ref = (Mat_<float>(6, 7) << 0, 1, 0.99520785, 0.80997437, 0.16379407, 0.87996572, 0.26685631,
543                                     0, 1, 0.9934696, 0.2831718, 0.50738752, 0.345781, 0.5985168,
544                                     0, 1, 0.99096733, 0.13629119, 0.24892329, 0.19756334, 0.3310290,
545                                     0, 1, 0.98977017, 0.23901358, 0.09084064, 0.29902688, 0.1769477,
546                                     0, 1, 0.97203469, 0.67965847, 0.06876482, 0.73999709, 0.1513494,
547                                     0, 1, 0.95097077, 0.51901293, 0.45863652, 0.5777427, 0.5347801);
548     double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 4e-3 : 3.4e-3;
549     double iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.024 : 1e-2;
550     normAssertDetections(ref, out, "", 0.9, scoreDiff, iouDiff);
551     expectNoFallbacksFromIE(net);
552 }
553
554 // inp = cv.imread('opencv_extra/testdata/cv/ximgproc/sources/08.png')
555 // inp = inp[:,:,[2, 1, 0]].astype(np.float32).reshape(1, 512, 512, 3)
556 // outs = sess.run([sess.graph.get_tensor_by_name('feature_fusion/Conv_7/Sigmoid:0'),
557 //                  sess.graph.get_tensor_by_name('feature_fusion/concat_3:0')],
558 //                 feed_dict={'input_images:0': inp})
559 // scores = np.ascontiguousarray(outs[0].transpose(0, 3, 1, 2))
560 // geometry = np.ascontiguousarray(outs[1].transpose(0, 3, 1, 2))
561 // np.save('east_text_detection.scores.npy', scores)
562 // np.save('east_text_detection.geometry.npy', geometry)
563 TEST_P(Test_TensorFlow_nets, EAST_text_detection)
564 {
565     applyTestTag(
566         (target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB),
567         CV_TEST_TAG_DEBUG_LONG
568     );
569
570 #if defined(INF_ENGINE_RELEASE)
571     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
572         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
573 #endif
574
575     checkBackend();
576
577     std::string netPath = findDataFile("dnn/frozen_east_text_detection.pb", false);
578     std::string imgPath = findDataFile("cv/ximgproc/sources/08.png");
579     std::string refScoresPath = findDataFile("dnn/east_text_detection.scores.npy");
580     std::string refGeometryPath = findDataFile("dnn/east_text_detection.geometry.npy");
581
582     Net net = readNet(netPath);
583
584     net.setPreferableBackend(backend);
585     net.setPreferableTarget(target);
586
587     Mat img = imread(imgPath);
588     Mat inp = blobFromImage(img, 1.0, Size(), Scalar(123.68, 116.78, 103.94), true, false);
589     net.setInput(inp);
590
591     std::vector<Mat> outs;
592     std::vector<String> outNames(2);
593     outNames[0] = "feature_fusion/Conv_7/Sigmoid";
594     outNames[1] = "feature_fusion/concat_3";
595     net.forward(outs, outNames);
596
597     Mat scores = outs[0];
598     Mat geometry = outs[1];
599
600     // Scores are in range [0, 1]. Geometry values are in range [-0.23, 290]
601     double l1_scores = default_l1, lInf_scores = default_lInf;
602     double l1_geometry = default_l1, lInf_geometry = default_lInf;
603     if (target == DNN_TARGET_OPENCL_FP16)
604     {
605         lInf_scores = backend == DNN_BACKEND_INFERENCE_ENGINE ? 0.16 : 0.11;
606         l1_geometry = 0.28; lInf_geometry = 5.94;
607     }
608     else if (target == DNN_TARGET_MYRIAD)
609     {
610         lInf_scores = 0.41;
611         l1_geometry = 0.28; lInf_geometry = 5.94;
612     }
613     else
614     {
615         l1_geometry = 1e-4, lInf_geometry = 3e-3;
616     }
617     normAssert(scores, blobFromNPY(refScoresPath), "scores", l1_scores, lInf_scores);
618     normAssert(geometry, blobFromNPY(refGeometryPath), "geometry", l1_geometry, lInf_geometry);
619     expectNoFallbacksFromIE(net);
620 }
621
622 INSTANTIATE_TEST_CASE_P(/**/, Test_TensorFlow_nets, dnnBackendsAndTargets());
623
624 TEST_P(Test_TensorFlow_layers, fp16_weights)
625 {
626     float l1 = 0.00078;
627     float lInf = 0.012;
628     runTensorFlowNet("fp16_single_conv", false, l1, lInf);
629     runTensorFlowNet("fp16_max_pool_odd_same", false, l1, lInf);
630     runTensorFlowNet("fp16_eltwise_add_mul", false, l1, lInf);
631     runTensorFlowNet("fp16_pad_and_concat", false, l1, lInf);
632     runTensorFlowNet("fp16_padding_valid", false, l1, lInf);
633     // Reference output values are in range [0.0889, 1.651]
634     runTensorFlowNet("fp16_max_pool_even", false, (target == DNN_TARGET_MYRIAD) ? 0.003 : l1, lInf);
635     if (target == DNN_TARGET_MYRIAD) {
636         l1 = 0.0041;
637         lInf = 0.024;
638     }
639     // Reference output values are in range [0, 10.75]
640     runTensorFlowNet("fp16_deconvolution", false, l1, lInf);
641     // Reference output values are in range [0.418, 2.297]
642     runTensorFlowNet("fp16_max_pool_odd_valid", false, l1, lInf);
643 }
644
645 TEST_P(Test_TensorFlow_layers, fp16_padding_same)
646 {
647     // Reference output values are in range [-3.504, -0.002]
648     runTensorFlowNet("fp16_padding_same", false, 7e-4, 4e-3);
649 }
650
651 TEST_P(Test_TensorFlow_layers, defun)
652 {
653     runTensorFlowNet("defun_dropout");
654 }
655
656 TEST_P(Test_TensorFlow_layers, quantized)
657 {
658     runTensorFlowNet("uint8_single_conv");
659 }
660
661 TEST_P(Test_TensorFlow_layers, lstm)
662 {
663     if (backend == DNN_BACKEND_INFERENCE_ENGINE)
664         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
665     if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
666         applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
667     runTensorFlowNet("lstm", true);
668     runTensorFlowNet("lstm", true, 0.0, 0.0, true);
669 }
670
671 TEST_P(Test_TensorFlow_layers, split)
672 {
673     if (backend == DNN_BACKEND_INFERENCE_ENGINE)
674         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
675     runTensorFlowNet("split_equals");
676 }
677
678 TEST_P(Test_TensorFlow_layers, resize_nearest_neighbor)
679 {
680     runTensorFlowNet("resize_nearest_neighbor");
681     runTensorFlowNet("keras_upsampling2d");
682 }
683
684 TEST_P(Test_TensorFlow_layers, slice)
685 {
686     if (backend == DNN_BACKEND_INFERENCE_ENGINE &&
687         (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
688         applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
689     runTensorFlowNet("slice_4d");
690     runTensorFlowNet("strided_slice");
691 }
692
693 TEST_P(Test_TensorFlow_layers, softmax)
694 {
695     runTensorFlowNet("keras_softmax");
696     runTensorFlowNet("slim_softmax");
697 }
698
699 TEST_P(Test_TensorFlow_layers, slim_softmax_v2)
700 {
701 #if defined(INF_ENGINE_RELEASE)
702     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD &&
703         getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2
704     )
705         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2);
706 #endif
707     runTensorFlowNet("slim_softmax_v2");
708 }
709
710 TEST_P(Test_TensorFlow_layers, relu6)
711 {
712     runTensorFlowNet("keras_relu6");
713     runTensorFlowNet("keras_relu6", /*hasText*/ true);
714 }
715
716 TEST_P(Test_TensorFlow_layers, subpixel)
717 {
718     if (backend == DNN_BACKEND_INFERENCE_ENGINE)
719         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
720     runTensorFlowNet("subpixel");
721 }
722
723 TEST_P(Test_TensorFlow_layers, keras_mobilenet_head)
724 {
725     runTensorFlowNet("keras_mobilenet_head");
726 }
727
728 TEST_P(Test_TensorFlow_layers, resize_bilinear)
729 {
730     runTensorFlowNet("resize_bilinear");
731     runTensorFlowNet("resize_bilinear_factor");
732 }
733
734 TEST_P(Test_TensorFlow_layers, squeeze)
735 {
736 #if defined(INF_ENGINE_RELEASE)
737     if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
738             && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2
739     )
740         applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2);
741 #endif
742     int inpShapes[][4] = {{1, 3, 4, 2}, {1, 3, 1, 2}, {1, 3, 4, 1}, {1, 3, 4, 1}};  // TensorFlow's shape (NHWC)
743     int outShapes[][3] = {{3, 4, 2}, {1, 3, 2}, {1, 3, 4}, {1, 3, 4}};
744     int squeeze_dims[] = {0, 2, 3, -1};
745     for (int i = 0; i < 4; ++i)
746     {
747         SCOPED_TRACE(format("i=%d", i));
748         std::string pbtxt =
749             "node { name: \"input\" op: \"Placeholder\""
750             "attr { key: \"data_format\" value { s: \"NHWC\" } } }"
751             "node { name: \"squeeze\" op: \"Squeeze\" input: \"input\""
752               "attr { key: \"squeeze_dims\" value { list { i:" + format("%d", squeeze_dims[i]) + "}}}}";
753         Net net = readNetFromTensorflow(0, 0, pbtxt.c_str(), pbtxt.size());
754         net.setPreferableBackend(backend);
755         net.setPreferableTarget(target);
756         Mat tfInp(4, &inpShapes[i][0], CV_32F);
757         randu(tfInp, -1, 1);
758
759         // NHWC to NCHW
760         CV_Assert(inpShapes[i][0] == 1);
761         std::swap(inpShapes[i][2], inpShapes[i][3]);
762         std::swap(inpShapes[i][1], inpShapes[i][2]);
763         Mat cvInp = tfInp.reshape(1, tfInp.total() / inpShapes[i][1]).t();
764         cvInp = cvInp.reshape(1, 4, &inpShapes[i][0]);
765
766         net.setInput(cvInp);
767         Mat out = net.forward();
768         normAssert(tfInp.reshape(1, 3, &outShapes[i][0]), out, "", default_l1, default_lInf);
769     }
770 }
771
772 INSTANTIATE_TEST_CASE_P(/**/, Test_TensorFlow_layers, dnnBackendsAndTargets());
773
774 TEST(Test_TensorFlow, two_inputs)
775 {
776     Net net = readNet(path("two_inputs_net.pbtxt"));
777     net.setPreferableBackend(DNN_BACKEND_OPENCV);
778
779     Mat firstInput(2, 3, CV_32FC1), secondInput(2, 3, CV_32FC1);
780     randu(firstInput, -1, 1);
781     randu(secondInput, -1, 1);
782
783     net.setInput(firstInput, "first_input");
784     net.setInput(secondInput, "second_input");
785     Mat out = net.forward();
786
787     normAssert(out, firstInput + secondInput);
788 }
789
790 TEST(Test_TensorFlow, Mask_RCNN)
791 {
792     applyTestTag(CV_TEST_TAG_MEMORY_1GB, CV_TEST_TAG_DEBUG_VERYLONG);
793     Mat img = imread(findDataFile("dnn/street.png"));
794     std::string proto = findDataFile("dnn/mask_rcnn_inception_v2_coco_2018_01_28.pbtxt");
795     std::string model = findDataFile("dnn/mask_rcnn_inception_v2_coco_2018_01_28.pb", false);
796
797     Net net = readNetFromTensorflow(model, proto);
798     Mat refDetections = blobFromNPY(path("mask_rcnn_inception_v2_coco_2018_01_28.detection_out.npy"));
799     Mat refMasks = blobFromNPY(path("mask_rcnn_inception_v2_coco_2018_01_28.detection_masks.npy"));
800     Mat blob = blobFromImage(img, 1.0f, Size(800, 800), Scalar(), true, false);
801
802     net.setPreferableBackend(DNN_BACKEND_OPENCV);
803
804     net.setInput(blob);
805
806     // Mask-RCNN predicts bounding boxes and segmentation masks.
807     std::vector<String> outNames(2);
808     outNames[0] = "detection_out_final";
809     outNames[1] = "detection_masks";
810
811     std::vector<Mat> outs;
812     net.forward(outs, outNames);
813
814     Mat outDetections = outs[0];
815     Mat outMasks = outs[1];
816     normAssertDetections(refDetections, outDetections, "", /*threshold for zero confidence*/1e-5);
817
818     // Output size of masks is NxCxHxW where
819     // N - number of detected boxes
820     // C - number of classes (excluding background)
821     // HxW - segmentation shape
822     const int numDetections = outDetections.size[2];
823
824     int masksSize[] = {1, numDetections, outMasks.size[2], outMasks.size[3]};
825     Mat masks(4, &masksSize[0], CV_32F);
826
827     std::vector<cv::Range> srcRanges(4, cv::Range::all());
828     std::vector<cv::Range> dstRanges(4, cv::Range::all());
829
830     outDetections = outDetections.reshape(1, outDetections.total() / 7);
831     for (int i = 0; i < numDetections; ++i)
832     {
833         // Get a class id for this bounding box and copy mask only for that class.
834         int classId = static_cast<int>(outDetections.at<float>(i, 1));
835         srcRanges[0] = dstRanges[1] = cv::Range(i, i + 1);
836         srcRanges[1] = cv::Range(classId, classId + 1);
837         outMasks(srcRanges).copyTo(masks(dstRanges));
838     }
839     cv::Range topRefMasks[] = {Range::all(), Range(0, numDetections), Range::all(), Range::all()};
840     normAssert(masks, refMasks(&topRefMasks[0]));
841 }
842
843 }