Merge pull request #9305 from dkurt:public_dnn_importer_is_deprecated
[platform/upstream/opencv.git] / modules / dnn / test / test_tf_importer.cpp
1 // This file is part of OpenCV project.
2 // It is subject to the license terms in the LICENSE file found in the top-level directory
3 // of this distribution and at http://opencv.org/license.html.
4
5 // Copyright (C) 2016, Intel Corporation, all rights reserved.
6 // Third party copyrights are property of their respective owners.
7
8 /*
9 Test for Tensorflow models loading
10 */
11
12 #include "test_precomp.hpp"
13 #include "npy_blob.hpp"
14
15 namespace cvtest
16 {
17
18 using namespace cv;
19 using namespace cv::dnn;
20
21 template<typename TString>
22 static std::string _tf(TString filename)
23 {
24     return (getOpenCVExtraDir() + "/dnn/") + filename;
25 }
26
27 TEST(Test_TensorFlow, read_inception)
28 {
29     Net net;
30     {
31         const string model = findDataFile("dnn/tensorflow_inception_graph.pb", false);
32         net = readNetFromTensorflow(model);
33         ASSERT_FALSE(net.empty());
34     }
35
36     Mat sample = imread(_tf("grace_hopper_227.png"));
37     ASSERT_TRUE(!sample.empty());
38     Mat input;
39     resize(sample, input, Size(224, 224));
40     input -= 128; // mean sub
41
42     Mat inputBlob = blobFromImage(input);
43
44     net.setInput(inputBlob, "input");
45     Mat out = net.forward("softmax2");
46
47     std::cout << out.dims << std::endl;
48 }
49
50 TEST(Test_TensorFlow, inception_accuracy)
51 {
52     Net net;
53     {
54         const string model = findDataFile("dnn/tensorflow_inception_graph.pb", false);
55         net = readNetFromTensorflow(model);
56         ASSERT_FALSE(net.empty());
57     }
58
59     Mat sample = imread(_tf("grace_hopper_227.png"));
60     ASSERT_TRUE(!sample.empty());
61     resize(sample, sample, Size(224, 224));
62     Mat inputBlob = blobFromImage(sample);
63
64     net.setInput(inputBlob, "input");
65     Mat out = net.forward("softmax2");
66
67     Mat ref = blobFromNPY(_tf("tf_inception_prob.npy"));
68
69     normAssert(ref, out);
70 }
71
72 static std::string path(const std::string& file)
73 {
74     return findDataFile("dnn/tensorflow/" + file, false);
75 }
76
77 static void runTensorFlowNet(const std::string& prefix,
78                              double l1 = 1e-5, double lInf = 1e-4)
79 {
80     std::string netPath = path(prefix + "_net.pb");
81     std::string inpPath = path(prefix + "_in.npy");
82     std::string outPath = path(prefix + "_out.npy");
83
84     Net net = readNetFromTensorflow(netPath);
85
86     cv::Mat input = blobFromNPY(inpPath);
87     cv::Mat target = blobFromNPY(outPath);
88
89     net.setInput(input);
90     cv::Mat output = net.forward();
91     normAssert(target, output, "", l1, lInf);
92 }
93
94 TEST(Test_TensorFlow, conv)
95 {
96     runTensorFlowNet("single_conv");
97     runTensorFlowNet("atrous_conv2d_valid");
98     runTensorFlowNet("atrous_conv2d_same");
99     runTensorFlowNet("depthwise_conv2d");
100 }
101
102 TEST(Test_TensorFlow, padding)
103 {
104     runTensorFlowNet("padding_same");
105     runTensorFlowNet("padding_valid");
106 }
107
108 TEST(Test_TensorFlow, eltwise_add_mul)
109 {
110     runTensorFlowNet("eltwise_add_mul");
111 }
112
113 TEST(Test_TensorFlow, pad_and_concat)
114 {
115     runTensorFlowNet("pad_and_concat");
116 }
117
118 TEST(Test_TensorFlow, batch_norm)
119 {
120     runTensorFlowNet("batch_norm");
121     runTensorFlowNet("fused_batch_norm");
122 }
123
124 TEST(Test_TensorFlow, pooling)
125 {
126     runTensorFlowNet("max_pool_even");
127     runTensorFlowNet("max_pool_odd_valid");
128     runTensorFlowNet("max_pool_odd_same");
129 }
130
131 TEST(Test_TensorFlow, deconvolution)
132 {
133     runTensorFlowNet("deconvolution");
134 }
135
136 TEST(Test_TensorFlow, matmul)
137 {
138     runTensorFlowNet("matmul");
139 }
140
141 TEST(Test_TensorFlow, fp16)
142 {
143     const float l1 = 1e-3;
144     const float lInf = 1e-2;
145     runTensorFlowNet("fp16_single_conv", l1, lInf);
146     runTensorFlowNet("fp16_deconvolution", l1, lInf);
147     runTensorFlowNet("fp16_max_pool_odd_same", l1, lInf);
148     runTensorFlowNet("fp16_padding_valid", l1, lInf);
149     runTensorFlowNet("fp16_eltwise_add_mul", l1, lInf);
150     runTensorFlowNet("fp16_max_pool_odd_valid", l1, lInf);
151     runTensorFlowNet("fp16_pad_and_concat", l1, lInf);
152     runTensorFlowNet("fp16_max_pool_even", l1, lInf);
153     runTensorFlowNet("fp16_padding_same", l1, lInf);
154 }
155
156 }