Merge pull request #15139 from alalek:openvino_2019R2
[platform/upstream/opencv.git] / modules / dnn / test / test_common.hpp
1 // This file is part of OpenCV project.
2 // It is subject to the license terms in the LICENSE file found in the top-level directory
3 // of this distribution and at http://opencv.org/license.html.
4
5 #ifndef __OPENCV_TEST_COMMON_HPP__
6 #define __OPENCV_TEST_COMMON_HPP__
7
8 #include "opencv2/dnn/utils/inference_engine.hpp"
9
10 #ifdef HAVE_OPENCL
11 #include "opencv2/core/ocl.hpp"
12 #endif
13
14 #define CV_TEST_TAG_DNN_SKIP_HALIDE              "dnn_skip_halide"
15 #define CV_TEST_TAG_DNN_SKIP_OPENCL              "dnn_skip_ocl"
16 #define CV_TEST_TAG_DNN_SKIP_OPENCL_FP16         "dnn_skip_ocl_fp16"
17 #define CV_TEST_TAG_DNN_SKIP_IE                  "dnn_skip_ie"
18 #define CV_TEST_TAG_DNN_SKIP_IE_2018R5           "dnn_skip_ie_2018r5"
19 #define CV_TEST_TAG_DNN_SKIP_IE_2019R1           "dnn_skip_ie_2019r1"
20 #define CV_TEST_TAG_DNN_SKIP_IE_2019R1_1         "dnn_skip_ie_2019r1_1"
21 #define CV_TEST_TAG_DNN_SKIP_IE_2019R2           "dnn_skip_ie_2019r2"
22 #define CV_TEST_TAG_DNN_SKIP_IE_OPENCL           "dnn_skip_ie_ocl"
23 #define CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16      "dnn_skip_ie_ocl_fp16"
24 #define CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2         "dnn_skip_ie_myriad2"
25 #define CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X         "dnn_skip_ie_myriadx"
26 #define CV_TEST_TAG_DNN_SKIP_IE_MYRIAD           CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2, CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X
27
28
29
30 namespace cv { namespace dnn {
31 CV__DNN_EXPERIMENTAL_NS_BEGIN
32
33 void PrintTo(const cv::dnn::Backend& v, std::ostream* os);
34 void PrintTo(const cv::dnn::Target& v, std::ostream* os);
35 using opencv_test::tuple;
36 using opencv_test::get;
37 void PrintTo(const tuple<cv::dnn::Backend, cv::dnn::Target> v, std::ostream* os);
38
39 CV__DNN_EXPERIMENTAL_NS_END
40 }} // namespace cv::dnn
41
42
43
44 namespace opencv_test {
45
46 void initDNNTests();
47
48 using namespace cv::dnn;
49
50 static inline const std::string &getOpenCVExtraDir()
51 {
52     return cvtest::TS::ptr()->get_data_path();
53 }
54
55 void normAssert(
56         cv::InputArray ref, cv::InputArray test, const char *comment = "",
57         double l1 = 0.00001, double lInf = 0.0001);
58
59 std::vector<cv::Rect2d> matToBoxes(const cv::Mat& m);
60
61 void normAssertDetections(
62         const std::vector<int>& refClassIds,
63         const std::vector<float>& refScores,
64         const std::vector<cv::Rect2d>& refBoxes,
65         const std::vector<int>& testClassIds,
66         const std::vector<float>& testScores,
67         const std::vector<cv::Rect2d>& testBoxes,
68         const char *comment = "", double confThreshold = 0.0,
69         double scores_diff = 1e-5, double boxes_iou_diff = 1e-4);
70
71 // For SSD-based object detection networks which produce output of shape 1x1xNx7
72 // where N is a number of detections and an every detection is represented by
73 // a vector [batchId, classId, confidence, left, top, right, bottom].
74 void normAssertDetections(
75         cv::Mat ref, cv::Mat out, const char *comment = "",
76         double confThreshold = 0.0, double scores_diff = 1e-5,
77         double boxes_iou_diff = 1e-4);
78
79 void readFileContent(const std::string& filename, CV_OUT std::vector<char>& content);
80
81 #ifdef HAVE_INF_ENGINE
82 bool validateVPUType();
83 #endif
84
85 testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTargets(
86         bool withInferenceEngine = true,
87         bool withHalide = false,
88         bool withCpuOCV = true
89 );
90
91
92 class DNNTestLayer : public TestWithParam<tuple<Backend, Target> >
93 {
94 public:
95     dnn::Backend backend;
96     dnn::Target target;
97     double default_l1, default_lInf;
98
99     DNNTestLayer()
100     {
101         backend = (dnn::Backend)(int)get<0>(GetParam());
102         target = (dnn::Target)(int)get<1>(GetParam());
103         getDefaultThresholds(backend, target, &default_l1, &default_lInf);
104     }
105
106     static void getDefaultThresholds(int backend, int target, double* l1, double* lInf)
107     {
108         if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
109         {
110             *l1 = 4e-3;
111             *lInf = 2e-2;
112         }
113         else
114         {
115             *l1 = 1e-5;
116             *lInf = 1e-4;
117         }
118     }
119
120     static void checkBackend(int backend, int target, Mat* inp = 0, Mat* ref = 0)
121     {
122         if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
123         {
124             if (inp && ref && inp->dims == 4 && ref->dims == 4 &&
125                 inp->size[0] != 1 && inp->size[0] != ref->size[0])
126             {
127                 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
128                 throw SkipTestException("Inconsistent batch size of input and output blobs for Myriad plugin");
129             }
130         }
131     }
132
133     void expectNoFallbacks(Net& net)
134     {
135         // Check if all the layers are supported with current backend and target.
136         // Some layers might be fused so their timings equal to zero.
137         std::vector<double> timings;
138         net.getPerfProfile(timings);
139         std::vector<String> names = net.getLayerNames();
140         CV_Assert(names.size() == timings.size());
141
142         for (int i = 0; i < names.size(); ++i)
143         {
144             Ptr<dnn::Layer> l = net.getLayer(net.getLayerId(names[i]));
145             bool fused = !timings[i];
146             if ((!l->supportBackend(backend) || l->preferableTarget != target) && !fused)
147                 CV_Error(Error::StsNotImplemented, "Layer [" + l->name + "] of type [" +
148                          l->type + "] is expected to has backend implementation");
149         }
150     }
151
152     void expectNoFallbacksFromIE(Net& net)
153     {
154         if (backend == DNN_BACKEND_INFERENCE_ENGINE)
155             expectNoFallbacks(net);
156     }
157
158 protected:
159     void checkBackend(Mat* inp = 0, Mat* ref = 0)
160     {
161         checkBackend(backend, target, inp, ref);
162     }
163 };
164
165 } // namespace
166
167
168 // src/op_inf_engine.hpp
169 #define INF_ENGINE_VER_MAJOR_GT(ver) (((INF_ENGINE_RELEASE) / 10000) > ((ver) / 10000))
170 #define INF_ENGINE_VER_MAJOR_GE(ver) (((INF_ENGINE_RELEASE) / 10000) >= ((ver) / 10000))
171 #define INF_ENGINE_VER_MAJOR_LT(ver) (((INF_ENGINE_RELEASE) / 10000) < ((ver) / 10000))
172 #define INF_ENGINE_VER_MAJOR_LE(ver) (((INF_ENGINE_RELEASE) / 10000) <= ((ver) / 10000))
173 #define INF_ENGINE_VER_MAJOR_EQ(ver) (((INF_ENGINE_RELEASE) / 10000) == ((ver) / 10000))
174
175 #endif