1 // This file is part of OpenCV project.
2 // It is subject to the license terms in the LICENSE file found in the top-level directory
3 // of this distribution and at http://opencv.org/license.html.
5 // Copyright (C) 2017-2019, Intel Corporation, all rights reserved.
6 // Third party copyrights are property of their respective owners.
8 // This tests doesn't require any external data. They just compare outputs of
9 // layers using different computation backends. Input and parameters are random.
11 #include "test_precomp.hpp"
13 namespace opencv_test { namespace {
16 using namespace cv::dnn;
17 using namespace testing;
19 static void test(Mat& input, Net& net, Backend backendId, Target targetId, bool skipCheck = false, bool randInput = true)
21 DNNTestLayer::checkBackend(backendId, targetId);
23 randu(input, -1.0f, 1.0f);
26 net.setPreferableBackend(DNN_BACKEND_OPENCV);
27 Mat outputDefault = net.forward().clone();
29 net.setPreferableBackend(backendId);
30 net.setPreferableTarget(targetId);
31 Mat outputHalide = net.forward().clone();
37 DNNTestLayer::getDefaultThresholds(backendId, targetId, &l1, &lInf);
39 std::cout << "l1=" << l1 << " lInf=" << lInf << std::endl;
40 std::cout << outputDefault.reshape(1, outputDefault.total()).t() << std::endl;
41 std::cout << outputHalide.reshape(1, outputDefault.total()).t() << std::endl;
43 normAssert(outputDefault, outputHalide, "", l1, lInf);
46 static void test(LayerParams& params, Mat& input, Backend backendId, Target targetId, bool skipCheck = false)
49 net.addLayerToPrev(params.name, params.type, params);
50 test(input, net, backendId, targetId, skipCheck);
53 static inline testing::internal::ParamGenerator<tuple<Backend, Target> > dnnBackendsAndTargetsWithHalide()
55 return dnnBackendsAndTargets(true, true, false); // OpenCV/CPU is used as reference
58 class Test_Halide_layers : public DNNTestLayer {};
60 ////////////////////////////////////////////////////////////////////////////////
62 ////////////////////////////////////////////////////////////////////////////////
63 TEST_P(Test_Halide_layers, Padding)
65 static const int kNumRuns = 10;
66 std::vector<int> paddings(8);
67 cv::RNG& rng = cv::theRNG();
68 for (int t = 0; t < kNumRuns; ++t)
70 for (int i = 0; i < paddings.size(); ++i)
74 lp.set("paddings", DictValue::arrayInt<int*>(&paddings[0], paddings.size()));
76 lp.name = "testLayer";
78 int sz[] = {1 + (int)rng(10), 1 + (int)rng(10), 1 + (int)rng(10), 1 + (int)rng(10)};
79 Mat input(4, &sz[0], CV_32F);
80 test(lp, input, backend, target);
84 ////////////////////////////////////////////////////////////////////////////////
86 ////////////////////////////////////////////////////////////////////////////////
87 typedef TestWithParam<tuple<Vec3i, Size, Size, Size, Size, Size, bool, tuple<Backend, Target> > > Convolution;
88 TEST_P(Convolution, Accuracy)
90 int inChannels = get<0>(GetParam())[0];
91 int outChannels = get<0>(GetParam())[1];
92 int group = get<0>(GetParam())[2];
93 Size inSize = get<1>(GetParam());
94 Size kernel = get<2>(GetParam());
95 Size stride = get<3>(GetParam());
96 Size pad = get<4>(GetParam());
97 Size dilation = get<5>(GetParam());
98 bool hasBias = get<6>(GetParam());
99 Backend backendId = get<0>(get<7>(GetParam()));
100 Target targetId = get<1>(get<7>(GetParam()));
102 bool skipCheck = false;
104 int sz[] = {outChannels, inChannels / group, kernel.height, kernel.width};
105 Mat weights(4, &sz[0], CV_32F);
106 randu(weights, -1.0f, 1.0f);
109 lp.set("kernel_w", kernel.width);
110 lp.set("kernel_h", kernel.height);
111 lp.set("pad_w", pad.width);
112 lp.set("pad_h", pad.height);
113 lp.set("stride_w", stride.width);
114 lp.set("stride_h", stride.height);
115 lp.set("dilation_w", dilation.width);
116 lp.set("dilation_h", dilation.height);
117 lp.set("num_output", outChannels);
118 lp.set("group", group);
119 lp.set("bias_term", hasBias);
120 lp.type = "Convolution";
121 lp.name = "testLayer";
122 lp.blobs.push_back(weights);
125 Mat bias(1, outChannels, CV_32F);
126 randu(bias, -1.0f, 1.0f);
127 lp.blobs.push_back(bias);
129 int inpSz[] = {1, inChannels, inSize.height, inSize.width};
130 Mat input(4, &inpSz[0], CV_32F);
131 test(lp, input, backendId, targetId, skipCheck);
133 throw SkipTestException("Skip checks in unstable test");
136 INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Convolution, Combine(
137 /*in channels, out channels, group*/
138 Values(Vec3i(6, 4, 1), Vec3i(6, 9, 1),
139 Vec3i(6, 4, 2), Vec3i(6, 9, 3)),
140 /*in size*/ Values(Size(5, 6)),
141 /*kernel*/ Values(Size(3, 1), Size(1, 3)),
142 /*stride*/ Values(Size(1, 1), Size(2, 2)),
143 /*pad*/ Values(Size(1, 0), Size(0, 1)),
144 /*dilation*/ Values(Size(1, 1), Size(2, 2)),
146 dnnBackendsAndTargetsWithHalide()
149 ////////////////////////////////////////////////////////////////////////////////
151 ////////////////////////////////////////////////////////////////////////////////
152 typedef TestWithParam<tuple<Vec3i, Size, Size, Size, Size, Vec4i, bool, tuple<Backend, Target> > > Deconvolution;
153 TEST_P(Deconvolution, Accuracy)
155 int inChannels = get<0>(GetParam())[0];
156 int outChannels = get<0>(GetParam())[1];
157 int group = get<0>(GetParam())[2];
158 Size inSize = get<1>(GetParam());
159 Size kernel = get<2>(GetParam());
160 Size pad = get<3>(GetParam());
161 Size dilation = get<4>(GetParam());
162 Size stride = Size(get<5>(GetParam())[0], get<5>(GetParam())[1]);
163 Size adjPad = Size(get<5>(GetParam())[2], get<5>(GetParam())[3]);
164 bool hasBias = get<6>(GetParam());
165 Backend backendId = get<0>(get<7>(GetParam()));
166 Target targetId = get<1>(get<7>(GetParam()));
168 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
169 if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD
170 && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
171 && inChannels == 6 && outChannels == 4 && group == 1
172 && kernel == Size(1, 3) && pad == Size(1, 0)
173 && stride == Size(1, 1) && dilation == Size(1, 1))
174 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
177 int sz[] = {inChannels, outChannels / group, kernel.height, kernel.width};
178 Mat weights(4, &sz[0], CV_32F);
179 randu(weights, -1.0f, 1.0f);
182 lp.set("kernel_w", kernel.width);
183 lp.set("kernel_h", kernel.height);
184 lp.set("pad_w", pad.width);
185 lp.set("pad_h", pad.height);
186 lp.set("stride_w", stride.width);
187 lp.set("stride_h", stride.height);
188 lp.set("dilation_w", dilation.width);
189 lp.set("dilation_h", dilation.height);
190 lp.set("adj_w", adjPad.width);
191 lp.set("adj_h", adjPad.height);
192 lp.set("num_output", outChannels);
193 lp.set("group", group);
194 lp.set("bias_term", hasBias);
195 lp.type = "Deconvolution";
196 lp.name = "testLayer";
197 lp.blobs.push_back(weights);
200 Mat bias(1, outChannels, CV_32F);
201 randu(bias, -1.0f, 1.0f);
202 lp.blobs.push_back(bias);
204 int inpSz[] = {1, inChannels, inSize.height, inSize.width};
205 Mat input(4, &inpSz[0], CV_32F);
206 test(lp, input, backendId, targetId);
209 INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Deconvolution, Combine(
210 /*in channels, out channels, group*/
211 Values(Vec3i(6, 4, 1), Vec3i(6, 9, 3)),
212 /*in size*/ Values(Size(5, 6)),
213 /*kernel*/ Values(Size(3, 1), Size(1, 3)),
214 /*pad*/ Values(Size(1, 0), Size(0, 1)),
215 /*dilation*/ Values(Size(1, 1)),
216 /*stride, adj. pad*/ Values(Vec4i(1,1, 0,0), Vec4i(2,2, 1,0), Vec4i(1,2, 0,1)),
218 dnnBackendsAndTargetsWithHalide()
221 ////////////////////////////////////////////////////////////////////////////////
223 ////////////////////////////////////////////////////////////////////////////////
224 typedef TestWithParam<tuple<Vec3i, int, Vec3f, bool, std::string, tuple<Backend, Target> > > LRN;
225 TEST_P(LRN, Accuracy)
227 int inChannels = get<0>(GetParam())[0];
228 Size inSize = Size(get<0>(GetParam())[1], get<0>(GetParam())[2]);
229 int localSize = get<1>(GetParam());
230 float alpha = get<2>(GetParam())[0];
231 float beta = get<2>(GetParam())[1];
232 float bias = get<2>(GetParam())[2];
233 bool normBySize = get<3>(GetParam());
234 std::string nrmType = get<4>(GetParam());
235 Backend backendId = get<0>(get<5>(GetParam()));
236 Target targetId = get<1>(get<5>(GetParam()));
238 if ((inSize.width == 5 || inSize.height == 5) && targetId == DNN_TARGET_MYRIAD &&
239 nrmType == "ACROSS_CHANNELS")
240 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
243 lp.set("norm_region", nrmType);
244 lp.set("local_size", localSize);
245 lp.set("alpha", alpha);
246 lp.set("beta", beta);
247 lp.set("bias", bias);
248 lp.set("norm_by_size", normBySize);
250 lp.name = "testLayer";
252 int sz[] = {1, inChannels, inSize.height, inSize.width};
253 Mat input(4, &sz[0], CV_32F);
254 test(lp, input, backendId, targetId);
257 INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, LRN, Combine(
258 /*input ch,w,h*/ Values(Vec3i(6, 5, 8), Vec3i(7, 11, 6)),
259 /*local size*/ Values(3, 5),
260 Values(Vec3f(0.9f, 1.0f, 1.1f), Vec3f(0.9f, 1.1f, 1.0f),
261 /*alpha, beta, bias*/ Vec3f(1.0f, 0.9f, 1.1f), Vec3f(1.0f, 1.1f, 0.9f),
262 Vec3f(1.1f, 0.9f, 1.0f), Vec3f(1.1f, 1.0f, 0.9f)),
263 /*norm_by_size*/ Bool(),
264 /*norm_type*/ Values("ACROSS_CHANNELS", "WITHIN_CHANNEL"),
265 dnnBackendsAndTargetsWithHalide()
268 ////////////////////////////////////////////////////////////////////////////////
270 ////////////////////////////////////////////////////////////////////////////////
271 typedef TestWithParam<tuple<int, Size, Size, Size, tuple<Backend, Target> > > AvePooling;
272 TEST_P(AvePooling, Accuracy)
274 int inChannels = get<0>(GetParam());
275 Size outSize = get<1>(GetParam());; // Input size will be computed from parameters.
276 Size kernel = get<2>(GetParam());
277 Size stride = get<3>(GetParam());
278 Backend backendId = get<0>(get<4>(GetParam()));
279 Target targetId = get<1>(get<4>(GetParam()));
281 #if defined(INF_ENGINE_RELEASE)
282 if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD
283 && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
284 && kernel == Size(1, 1) && (stride == Size(1, 1) || stride == Size(2, 2)))
285 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
288 const int inWidth = (outSize.width - 1) * stride.width + kernel.width;
289 const int inHeight = (outSize.height - 1) * stride.height + kernel.height;
292 lp.set("pool", "ave");
293 lp.set("kernel_w", kernel.width);
294 lp.set("kernel_h", kernel.height);
295 lp.set("stride_w", stride.width);
296 lp.set("stride_h", stride.height);
298 lp.name = "testLayer";
300 int sz[] = {1, inChannels, inHeight, inWidth};
301 Mat input(4, &sz[0], CV_32F);
302 test(lp, input, backendId, targetId);
305 INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, AvePooling, Combine(
306 /*in channels*/ Values(3, 4),
307 /*out size*/ Values(Size(1, 1), Size(2, 2), Size(3, 2), Size(4, 7)),
308 /*kernel*/ Values(Size(1, 1), Size(2, 2), Size(3, 3), Size(3, 2)),
309 /*stride*/ Values(Size(1, 1), Size(2, 2), Size(3, 2)),
310 dnnBackendsAndTargetsWithHalide()
313 ////////////////////////////////////////////////////////////////////////////////
315 ////////////////////////////////////////////////////////////////////////////////
316 typedef TestWithParam<tuple<int, Size, Size, Size, Size, tuple<Backend, Target> > > MaxPooling;
317 TEST_P(MaxPooling, Accuracy)
319 int inChannels = get<0>(GetParam());
320 Size inSize = get<1>(GetParam());
321 Size kernel = get<2>(GetParam());
322 Size stride = get<3>(GetParam());
323 Size pad = get<4>(GetParam());
324 Backend backendId = get<0>(get<5>(GetParam()));
325 Target targetId = get<1>(get<5>(GetParam()));
327 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
328 if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD
329 && inSize == Size(7, 6) && kernel == Size(3, 2)
330 && (stride == Size(1, 1) || stride == Size(2, 2))
331 && (pad == Size(0, 1) || pad == Size(1, 1))
333 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
336 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
337 if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD
338 && (kernel == Size(2, 2) || kernel == Size(3, 2))
339 && stride == Size(1, 1) && (pad == Size(0, 0) || pad == Size(0, 1))
341 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
344 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
345 if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD
346 && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
347 && (stride == Size(1, 1) || stride == Size(2, 2))
348 && (pad == Size(0, 1) || pad == Size(1, 1))
350 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
353 #if defined(INF_ENGINE_RELEASE)
354 if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && stride != Size(1, 1) && pad != Size(0, 0))
355 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
359 lp.set("pool", "max");
360 lp.set("kernel_w", kernel.width);
361 lp.set("kernel_h", kernel.height);
362 lp.set("stride_w", stride.width);
363 lp.set("stride_h", stride.height);
364 lp.set("pad_w", pad.width);
365 lp.set("pad_h", pad.height);
367 lp.name = "testLayer";
369 int sz[] = {1, inChannels, inSize.height, inSize.width};
370 Mat input(4, &sz[0], CV_32F);
371 test(lp, input, backendId, targetId);
374 INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, MaxPooling, Combine(
375 /*in channels*/ Values(3, 4),
376 /*in size*/ Values(Size(5, 5), Size(7, 6)),
377 /*kernel*/ Values(Size(2, 2), Size(3, 3), Size(3, 2)),
378 /*stride*/ Values(Size(1, 1), Size(2, 2), Size(3, 2)),
379 /*pad*/ Values(Size(0, 0), Size(1, 1), Size(0, 1)),
380 dnnBackendsAndTargetsWithHalide()
383 ////////////////////////////////////////////////////////////////////////////////
385 ////////////////////////////////////////////////////////////////////////////////
386 typedef TestWithParam<tuple<int, Size, int, bool, tuple<Backend, Target> > > FullyConnected;
387 TEST_P(FullyConnected, Accuracy)
389 int inChannels = get<0>(GetParam());
390 Size inSize = get<1>(GetParam());
391 int outChannels = get<2>(GetParam());
392 bool hasBias = get<3>(GetParam());
393 Backend backendId = get<0>(get<4>(GetParam()));
394 Target targetId = get<1>(get<4>(GetParam()));
395 if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && (targetId == DNN_TARGET_OPENCL_FP16 ||
396 (targetId == DNN_TARGET_MYRIAD && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X))) {
397 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
398 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
401 Mat weights(outChannels, inChannels * inSize.height * inSize.width, CV_32F);
402 randu(weights, -1.0f, 1.0f);
404 Mat bias(1, outChannels, CV_32F);
405 randu(bias, -1.0f, 1.0f);
408 lp.set("num_output", outChannels);
409 lp.set("bias_term", hasBias);
410 lp.blobs.push_back(weights);
411 lp.blobs.push_back(bias);
412 lp.type = "InnerProduct";
413 lp.name = "testLayer";
415 int sz[] = {1, inChannels, inSize.height, inSize.width};
416 Mat input(4, &sz[0], CV_32F);
417 test(lp, input, backendId, targetId);
420 INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, FullyConnected, Combine(
421 /*in channels*/ Values(3, 4),
422 /*in size*/ Values(Size(5, 4), Size(4, 5), Size(1, 1)),
423 /*out channels*/ Values(3, 4),
425 dnnBackendsAndTargetsWithHalide()
428 ////////////////////////////////////////////////////////////////////////////////
430 ////////////////////////////////////////////////////////////////////////////////
431 typedef TestWithParam<tuple<int, tuple<Backend, Target> > > SoftMax;
432 TEST_P(SoftMax, Accuracy)
434 int inChannels = get<0>(GetParam());
435 Backend backendId = get<0>(get<1>(GetParam()));
436 Target targetId = get<1>(get<1>(GetParam()));
439 lp.name = "testLayer";
441 int sz[] = {1, inChannels, 1, 1};
442 Mat input(4, &sz[0], CV_32F);
443 test(lp, input, backendId, targetId);
446 INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, SoftMax, Combine(
447 Values(3, 4, 5, 1024),
448 dnnBackendsAndTargetsWithHalide()
451 //////////////////////////////////////////////////////////////////////////////
452 // Max pooling - unpooling
453 //////////////////////////////////////////////////////////////////////////////
454 TEST_P(Test_Halide_layers, MaxPoolUnpool)
456 if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
457 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
458 if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
459 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
462 pool.set("pool", "max");
463 pool.set("kernel_w", 2);
464 pool.set("kernel_h", 2);
465 pool.set("stride_w", 2);
466 pool.set("stride_h", 2);
467 pool.set("pad_w", 0);
468 pool.set("pad_h", 0);
469 pool.type = "Pooling";
470 pool.name = "testPool";
473 unpool.set("pool_k_w", 2);
474 unpool.set("pool_k_h", 2);
475 unpool.set("pool_stride_w", 2);
476 unpool.set("pool_stride_h", 2);
477 unpool.set("pool_pad_w", 0);
478 unpool.set("pool_pad_h", 0);
479 unpool.type = "MaxUnpool";
480 unpool.name = "testUnpool";
483 int poolId = net.addLayer(pool.name, pool.type, pool);
484 net.connect(0, 0, poolId, 0);
486 int unpoolId = net.addLayer(unpool.name, unpool.type, unpool);
487 net.connect(poolId, 0, unpoolId, 0);
488 net.connect(poolId, 1, unpoolId, 1);
490 int sz[] = {1, 1, 4, 4};
491 Mat input(4, &sz[0], CV_32F);
492 test(input, net, backend, target);
495 ////////////////////////////////////////////////////////////////////////////////
496 // AvePooling + in-place layers
497 ////////////////////////////////////////////////////////////////////////////////
498 static const int kNumChannels = 3;
500 void testInPlaceActivation(LayerParams& lp, Backend backendId, Target targetId)
502 EXPECT_FALSE(lp.name.empty());
505 pool.set("pool", "ave");
506 pool.set("kernel_w", 2);
507 pool.set("kernel_h", 2);
508 pool.set("stride_w", 2);
509 pool.set("stride_h", 2);
510 pool.type = "Pooling";
511 pool.name = "ave_pool";
514 int poolId = net.addLayer(pool.name, pool.type, pool);
515 net.connect(0, 0, poolId, 0);
516 net.addLayerToPrev(lp.name, lp.type, lp);
518 int sz[] = {1, kNumChannels, 10, 10};
519 Mat input(4, &sz[0], CV_32F);
520 test(input, net, backendId, targetId);
523 typedef TestWithParam<tuple<bool, bool, float, tuple<Backend, Target> > > BatchNorm;
524 TEST_P(BatchNorm, Accuracy)
526 bool hasWeights = get<0>(GetParam());
527 bool hasBias = get<1>(GetParam());
528 float epsilon = get<2>(GetParam());
529 Backend backendId = get<0>(get<3>(GetParam()));
530 Target targetId = get<1>(get<3>(GetParam()));
533 lp.set("has_weight", hasWeights);
534 lp.set("has_bias", hasBias);
535 lp.set("eps", epsilon);
536 lp.type = "BatchNorm";
537 lp.name = "testLayer";
540 for (int i = 0; i < 3; ++i)
541 lp.blobs.push_back(Mat(1, kNumChannels, CV_32F));
542 if (hasBias || hasWeights)
543 lp.blobs.push_back(Mat(1, kNumChannels, CV_32F));
545 for (int i = 0; i < lp.blobs.size(); ++i)
546 randu(lp.blobs[i], 0.0f, 1.0f);
548 testInPlaceActivation(lp, backendId, targetId);
551 INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, BatchNorm, Combine(
552 /*has weights*/ Bool(),
554 /*epsilon*/ Values(1e-3f, 1e-5f),
555 dnnBackendsAndTargetsWithHalide()
558 typedef TestWithParam<tuple<float, tuple<Backend, Target> > > ReLU;
559 TEST_P(ReLU, Accuracy)
561 float negativeSlope = get<0>(GetParam());
562 Backend backendId = get<0>(get<1>(GetParam()));
563 Target targetId = get<1>(get<1>(GetParam()));
565 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019020000)
566 if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD && negativeSlope < 0)
567 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
571 lp.set("negative_slope", negativeSlope);
573 lp.name = "testLayer";
574 testInPlaceActivation(lp, backendId, targetId);
577 INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, ReLU, Combine(
578 /*negative slope*/ Values(2.0f, 0.3f, -0.1f, 0.0f),
579 dnnBackendsAndTargetsWithHalide()
582 typedef TestWithParam<tuple<std::string, tuple<Backend, Target> > > NoParamActivation;
583 TEST_P(NoParamActivation, Accuracy)
585 Backend backendId = get<0>(get<1>(GetParam()));
586 Target targetId = get<1>(get<1>(GetParam()));
589 lp.type = get<0>(GetParam());
590 lp.name = "testLayer";
591 testInPlaceActivation(lp, backendId, targetId);
593 INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, NoParamActivation, Combine(
594 /*type*/ Values("TanH", "Sigmoid", "AbsVal", "BNLL", "Swish", "Mish"),
595 dnnBackendsAndTargetsWithHalide()
598 typedef TestWithParam<tuple<Vec3f, tuple<Backend, Target> > > Power;
599 TEST_P(Power, Accuracy)
601 float power = get<0>(GetParam())[0];
602 float scale = get<0>(GetParam())[1];
603 float shift = get<0>(GetParam())[2];
604 Backend backendId = get<0>(get<1>(GetParam()));
605 Target targetId = get<1>(get<1>(GetParam()));
608 lp.set("power", power);
609 lp.set("scale", scale);
610 lp.set("shift", shift);
612 lp.name = "testLayer";
613 testInPlaceActivation(lp, backendId, targetId);
616 INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Power, Combine(
617 /*power, scale, shift*/ Values(Vec3f(0.9f, 1.0f, 1.1f), Vec3f(0.9f, 1.1f, 1.0f),
618 Vec3f(1.0f, 0.9f, 1.1f), Vec3f(1.0f, 1.1f, 0.9f),
619 Vec3f(1.1f, 0.9f, 1.0f), Vec3f(1.1f, 1.0f, 0.9f)),
620 dnnBackendsAndTargetsWithHalide()
623 TEST_P(Test_Halide_layers, ChannelsPReLU)
626 lp.type = "ChannelsPReLU";
627 lp.name = "testLayer";
628 lp.blobs.push_back(Mat(1, kNumChannels, CV_32F));
629 randu(lp.blobs[0], -1.0f, 1.0f);
631 testInPlaceActivation(lp, backend, target);
634 typedef TestWithParam<tuple<bool, tuple<Backend, Target> > > Scale;
635 TEST_P(Scale, Accuracy)
637 bool hasBias = get<0>(GetParam());
638 Backend backendId = get<0>(get<1>(GetParam()));
639 Target targetId = get<1>(get<1>(GetParam()));
642 lp.set("bias_term", hasBias);
644 lp.name = "testLayer";
645 lp.blobs.push_back(Mat(1, kNumChannels, CV_32F));
646 randu(lp.blobs[0], -1.0f, 1.0f);
649 lp.blobs.push_back(Mat(1, kNumChannels, CV_32F));
650 randu(lp.blobs[1], -1.0f, 1.0f);
652 testInPlaceActivation(lp, backendId, targetId);
655 INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Scale, Combine(
657 dnnBackendsAndTargetsWithHalide()
660 ////////////////////////////////////////////////////////////////////////////////
662 ////////////////////////////////////////////////////////////////////////////////
664 // input --- conv --- concat --- output
665 // `--- conv ----^ ^ ^
666 // `---- ... ------' '
667 // `-----------------'
668 typedef TestWithParam<tuple<Vec3i, Vec3i, tuple<Backend, Target> > > Concat;
669 TEST_P(Concat, Accuracy)
671 Vec3i inSize = get<0>(GetParam());
672 Vec3i numChannels = get<1>(GetParam());
673 Backend backendId = get<0>(get<2>(GetParam()));
674 Target targetId = get<1>(get<2>(GetParam()));
676 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
677 if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD
678 && inSize == Vec3i(1, 4, 5) && numChannels == Vec3i(1, 6, 2)
680 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION); // crash
683 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
684 if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_CPU
685 && inSize == Vec3i(1, 4, 5) && numChannels == Vec3i(1, 6, 2)
687 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION); // TODO: IE_CPU
692 std::vector<int> convLayerIds;
693 convLayerIds.reserve(numChannels.channels);
694 for (int i = 0, n = numChannels.channels; i < n; ++i)
699 int sz[] = {numChannels[i], inSize[0], 1, 1};
700 Mat weights(4, &sz[0], CV_32F);
701 randu(weights, -1.0f, 1.0f);
703 LayerParams convParam;
704 convParam.set("kernel_w", 1);
705 convParam.set("kernel_h", 1);
706 convParam.set("num_output", numChannels[i]);
707 convParam.set("bias_term", false);
708 convParam.type = "Convolution";
709 std::ostringstream ss;
710 ss << "convLayer" << i;
711 convParam.name = ss.str();
712 convParam.blobs.push_back(weights);
714 int layerId = net.addLayer(convParam.name, convParam.type, convParam);
715 convLayerIds.push_back(layerId);
716 net.connect(0, 0, layerId, 0);
719 LayerParams concatParam;
720 concatParam.type = "Concat";
721 concatParam.name = "testLayer";
722 int concatId = net.addLayer(concatParam.name, concatParam.type, concatParam);
723 net.connect(0, 0, concatId, 0);
724 for (int i = 0; i < convLayerIds.size(); ++i)
726 net.connect(convLayerIds[i], 0, concatId, i + 1);
729 int sz[] = {1, inSize[0], inSize[1], inSize[2]};
730 Mat input(4, &sz[0], CV_32F);
731 test(input, net, backendId, targetId);
734 INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Concat, Combine(
735 /*input size*/ Values(Vec3i(1, 4, 5), Vec3i(2, 8, 6)),
736 /*channels*/ Values(Vec3i(2, 0, 0), Vec3i(3, 4, 0), Vec3i(1, 6, 2)),
737 dnnBackendsAndTargetsWithHalide()
740 ////////////////////////////////////////////////////////////////////////////////
741 // Element-wise layers
742 ////////////////////////////////////////////////////////////////////////////////
744 // input --- conv --- eltwise --- output
745 // `--- conv ----^ ^ ^
746 // `---- ... ------' '
747 // `-----------------'
748 typedef TestWithParam<tuple<Vec3i, std::string, int, bool, tuple<Backend, Target> > > Eltwise;
749 TEST_P(Eltwise, Accuracy)
751 Vec3i inSize = get<0>(GetParam());
752 std::string op = get<1>(GetParam());
753 int numConv = get<2>(GetParam());
754 bool weighted = get<3>(GetParam());
755 Backend backendId = get<0>(get<4>(GetParam()));
756 Target targetId = get<1>(get<4>(GetParam()));
758 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
759 if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD &&
760 inSize == Vec3i(1, 4, 5))
761 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
764 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
765 if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && numConv > 1)
766 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
769 #if defined(INF_ENGINE_RELEASE)
770 if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_OPENCL &&
771 op == "sum" && numConv == 1 && !weighted)
772 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
775 #if defined(INF_ENGINE_RELEASE)
776 if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && numConv > 1)
777 applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
780 bool convInputShift = 1;
781 int numEltwiseInputs = numConv;
785 convInputShift = 0; // first input is convolution
790 std::vector<int> convLayerIds(numConv);
791 for (int i = 0; i < numConv; ++i)
793 int sz[] = {inSize[0], inSize[0], 1, 1};
794 Mat weights(4, &sz[0], CV_32F);
795 randu(weights, -1.0f, 1.0f);
797 LayerParams convParam;
798 convParam.set("kernel_w", 1);
799 convParam.set("kernel_h", 1);
800 convParam.set("num_output", inSize[0]);
801 convParam.set("bias_term", false);
802 convParam.type = "Convolution";
803 std::ostringstream ss;
804 ss << "convLayer" << i;
805 convParam.name = ss.str();
806 convParam.blobs.push_back(weights);
808 convLayerIds[i] = net.addLayer(convParam.name, convParam.type, convParam);
809 net.connect(0, 0, convLayerIds[i], 0);
812 LayerParams eltwiseParam;
813 eltwiseParam.set("operation", op);
814 if (op == "sum" && weighted)
816 RNG& rng = cv::theRNG();
817 std::vector<float> coeff(1 + numConv);
818 for (int i = 0; i < coeff.size(); ++i)
820 coeff[i] = rng.uniform(-2.0f, 2.0f);
822 eltwiseParam.set("coeff", DictValue::arrayReal<float*>(&coeff[0], coeff.size()));
824 eltwiseParam.type = "Eltwise";
825 eltwiseParam.name = "testLayer";
826 int eltwiseId = net.addLayer(eltwiseParam.name, eltwiseParam.type, eltwiseParam);
827 if (convInputShift == 1)
828 net.connect(0, 0, eltwiseId, 0);
829 for (int i = 0; i < numConv; ++i)
831 net.connect(convLayerIds[i], 0, eltwiseId, i + convInputShift);
833 if (convInputShift == 0)
834 net.connect(0, 0, eltwiseId, numConv);
835 for (int i = numConv; i < numEltwiseInputs; ++i)
837 net.connect(0, 0, eltwiseId, i + 1);
840 int sz[] = {1, inSize[0], inSize[1], inSize[2]};
841 Mat input(4, &sz[0], CV_32F);
843 randu(input, 1.0f, 1.0f); // ensure no divisor value has absouluate value of less than 0.5
844 test(input, net, backendId, targetId, /*skipCheck*/false, (op == "div") ? false : true);
847 INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Eltwise, Combine(
848 /*input size*/ Values(Vec3i(1, 4, 5), Vec3i(2, 8, 6)),
849 /*operation*/ Values("prod", "sum", "div", "max"),
850 /*num convs*/ Values(1, 2, 3),
851 /*weighted(for sum only)*/ Bool(),
852 dnnBackendsAndTargetsWithHalide()
855 ////////////////////////////////////////////////////////////////////////////
857 ////////////////////////////////////////////////////////////////////////////
859 TEST(MixedBackends_Halide_Default_Halide, Accuracy)
861 // Just a layer that supports Halide backend.
864 lrn.name = "testLRN";
866 // Some of layers that doesn't supports Halide backend yet.
869 mvn.name = "testMVN";
871 // Halide layer again.
874 lrn2.name = "testLRN2";
877 int lrnId = net.addLayer(lrn.name, lrn.type, lrn);
878 net.connect(0, 0, lrnId, 0);
879 net.addLayerToPrev(mvn.name, mvn.type, mvn);
880 net.addLayerToPrev(lrn2.name, lrn2.type, lrn2);
882 int sz[] = {4, 3, 5, 6};
883 Mat input(4, &sz[0], CV_32F);
884 randu(input, -1.0f, 1.0f);
886 net.setPreferableBackend(DNN_BACKEND_OPENCV);
887 Mat outputDefault = net.forward().clone();
889 net.setPreferableBackend(DNN_BACKEND_HALIDE);
891 Mat outputHalide = net.forward().clone();
892 normAssert(outputDefault, outputHalide);
894 net.setPreferableTarget(DNN_TARGET_OPENCL);
896 outputHalide = net.forward().clone();
897 normAssert(outputDefault, outputHalide);
899 #endif // HAVE_HALIDE
901 INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_Halide_layers, dnnBackendsAndTargetsWithHalide());