1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #include "test_precomp.hpp"
47 using namespace cvtest;
49 /////////////////////////////////////////////////////////////////////////////////////////////////
54 IMPLEMENT_PARAM_CLASS(FAST_Threshold, int)
55 IMPLEMENT_PARAM_CLASS(FAST_NonmaxSuppression, bool)
58 PARAM_TEST_CASE(FAST, cv::cuda::DeviceInfo, FAST_Threshold, FAST_NonmaxSuppression)
60 cv::cuda::DeviceInfo devInfo;
62 bool nonmaxSuppression;
66 devInfo = GET_PARAM(0);
67 threshold = GET_PARAM(1);
68 nonmaxSuppression = GET_PARAM(2);
70 cv::cuda::setDevice(devInfo.deviceID());
74 CUDA_TEST_P(FAST, Accuracy)
76 cv::Mat image = readImage("features2d/aloe.png", cv::IMREAD_GRAYSCALE);
77 ASSERT_FALSE(image.empty());
79 cv::cuda::FAST_CUDA fast(threshold);
80 fast.nonmaxSuppression = nonmaxSuppression;
82 if (!supportFeature(devInfo, cv::cuda::GLOBAL_ATOMICS))
86 std::vector<cv::KeyPoint> keypoints;
87 fast(loadMat(image), cv::cuda::GpuMat(), keypoints);
89 catch (const cv::Exception& e)
91 ASSERT_EQ(cv::Error::StsNotImplemented, e.code);
96 std::vector<cv::KeyPoint> keypoints;
97 fast(loadMat(image), cv::cuda::GpuMat(), keypoints);
99 std::vector<cv::KeyPoint> keypoints_gold;
100 cv::FAST(image, keypoints_gold, threshold, nonmaxSuppression);
102 ASSERT_KEYPOINTS_EQ(keypoints_gold, keypoints);
106 INSTANTIATE_TEST_CASE_P(CUDA_Features2D, FAST, testing::Combine(
108 testing::Values(FAST_Threshold(25), FAST_Threshold(50)),
109 testing::Values(FAST_NonmaxSuppression(false), FAST_NonmaxSuppression(true))));
111 /////////////////////////////////////////////////////////////////////////////////////////////////
116 IMPLEMENT_PARAM_CLASS(ORB_FeaturesCount, int)
117 IMPLEMENT_PARAM_CLASS(ORB_ScaleFactor, float)
118 IMPLEMENT_PARAM_CLASS(ORB_LevelsCount, int)
119 IMPLEMENT_PARAM_CLASS(ORB_EdgeThreshold, int)
120 IMPLEMENT_PARAM_CLASS(ORB_firstLevel, int)
121 IMPLEMENT_PARAM_CLASS(ORB_WTA_K, int)
122 IMPLEMENT_PARAM_CLASS(ORB_PatchSize, int)
123 IMPLEMENT_PARAM_CLASS(ORB_BlurForDescriptor, bool)
126 CV_ENUM(ORB_ScoreType, ORB::HARRIS_SCORE, ORB::FAST_SCORE)
128 PARAM_TEST_CASE(ORB, cv::cuda::DeviceInfo, ORB_FeaturesCount, ORB_ScaleFactor, ORB_LevelsCount, ORB_EdgeThreshold, ORB_firstLevel, ORB_WTA_K, ORB_ScoreType, ORB_PatchSize, ORB_BlurForDescriptor)
130 cv::cuda::DeviceInfo devInfo;
139 bool blurForDescriptor;
143 devInfo = GET_PARAM(0);
144 nFeatures = GET_PARAM(1);
145 scaleFactor = GET_PARAM(2);
146 nLevels = GET_PARAM(3);
147 edgeThreshold = GET_PARAM(4);
148 firstLevel = GET_PARAM(5);
149 WTA_K = GET_PARAM(6);
150 scoreType = GET_PARAM(7);
151 patchSize = GET_PARAM(8);
152 blurForDescriptor = GET_PARAM(9);
154 cv::cuda::setDevice(devInfo.deviceID());
158 CUDA_TEST_P(ORB, Accuracy)
160 cv::Mat image = readImage("features2d/aloe.png", cv::IMREAD_GRAYSCALE);
161 ASSERT_FALSE(image.empty());
163 cv::Mat mask(image.size(), CV_8UC1, cv::Scalar::all(1));
164 mask(cv::Range(0, image.rows / 2), cv::Range(0, image.cols / 2)).setTo(cv::Scalar::all(0));
166 cv::cuda::ORB_CUDA orb(nFeatures, scaleFactor, nLevels, edgeThreshold, firstLevel, WTA_K, scoreType, patchSize);
167 orb.blurForDescriptor = blurForDescriptor;
169 if (!supportFeature(devInfo, cv::cuda::GLOBAL_ATOMICS))
173 std::vector<cv::KeyPoint> keypoints;
174 cv::cuda::GpuMat descriptors;
175 orb(loadMat(image), loadMat(mask), keypoints, descriptors);
177 catch (const cv::Exception& e)
179 ASSERT_EQ(cv::Error::StsNotImplemented, e.code);
184 std::vector<cv::KeyPoint> keypoints;
185 cv::cuda::GpuMat descriptors;
186 orb(loadMat(image), loadMat(mask), keypoints, descriptors);
188 cv::Ptr<cv::ORB> orb_gold = cv::ORB::create(nFeatures, scaleFactor, nLevels, edgeThreshold, firstLevel, WTA_K, scoreType, patchSize);
190 std::vector<cv::KeyPoint> keypoints_gold;
191 cv::Mat descriptors_gold;
192 orb_gold->detectAndCompute(image, mask, keypoints_gold, descriptors_gold);
194 cv::BFMatcher matcher(cv::NORM_HAMMING);
195 std::vector<cv::DMatch> matches;
196 matcher.match(descriptors_gold, cv::Mat(descriptors), matches);
198 int matchedCount = getMatchedPointsCount(keypoints_gold, keypoints, matches);
199 double matchedRatio = static_cast<double>(matchedCount) / keypoints.size();
201 EXPECT_GT(matchedRatio, 0.35);
205 INSTANTIATE_TEST_CASE_P(CUDA_Features2D, ORB, testing::Combine(
207 testing::Values(ORB_FeaturesCount(1000)),
208 testing::Values(ORB_ScaleFactor(1.2f)),
209 testing::Values(ORB_LevelsCount(4), ORB_LevelsCount(8)),
210 testing::Values(ORB_EdgeThreshold(31)),
211 testing::Values(ORB_firstLevel(0), ORB_firstLevel(2)),
212 testing::Values(ORB_WTA_K(2), ORB_WTA_K(3), ORB_WTA_K(4)),
213 testing::Values(ORB_ScoreType(cv::ORB::HARRIS_SCORE)),
214 testing::Values(ORB_PatchSize(31), ORB_PatchSize(29)),
215 testing::Values(ORB_BlurForDescriptor(false), ORB_BlurForDescriptor(true))));
217 /////////////////////////////////////////////////////////////////////////////////////////////////
222 IMPLEMENT_PARAM_CLASS(DescriptorSize, int)
223 IMPLEMENT_PARAM_CLASS(UseMask, bool)
226 PARAM_TEST_CASE(BruteForceMatcher, cv::cuda::DeviceInfo, NormCode, DescriptorSize, UseMask)
228 cv::cuda::DeviceInfo devInfo;
236 cv::Mat query, train;
240 devInfo = GET_PARAM(0);
241 normCode = GET_PARAM(1);
243 useMask = GET_PARAM(3);
245 cv::cuda::setDevice(devInfo.deviceID());
247 queryDescCount = 300; // must be even number because we split train data in some cases in two
248 countFactor = 4; // do not change it
250 cv::RNG& rng = cvtest::TS::ptr()->get_rng();
252 cv::Mat queryBuf, trainBuf;
254 // Generate query descriptors randomly.
255 // Descriptor vector elements are integer values.
256 queryBuf.create(queryDescCount, dim, CV_32SC1);
257 rng.fill(queryBuf, cv::RNG::UNIFORM, cv::Scalar::all(0), cv::Scalar::all(3));
258 queryBuf.convertTo(queryBuf, CV_32FC1);
260 // Generate train decriptors as follows:
261 // copy each query descriptor to train set countFactor times
262 // and perturb some one element of the copied descriptors in
263 // in ascending order. General boundaries of the perturbation
265 trainBuf.create(queryDescCount * countFactor, dim, CV_32FC1);
266 float step = 1.f / countFactor;
267 for (int qIdx = 0; qIdx < queryDescCount; qIdx++)
269 cv::Mat queryDescriptor = queryBuf.row(qIdx);
270 for (int c = 0; c < countFactor; c++)
272 int tIdx = qIdx * countFactor + c;
273 cv::Mat trainDescriptor = trainBuf.row(tIdx);
274 queryDescriptor.copyTo(trainDescriptor);
276 float diff = rng.uniform(step * c, step * (c + 1));
277 trainDescriptor.at<float>(0, elem) += diff;
281 queryBuf.convertTo(query, CV_32F);
282 trainBuf.convertTo(train, CV_32F);
286 CUDA_TEST_P(BruteForceMatcher, Match_Single)
288 cv::cuda::BFMatcher_CUDA matcher(normCode);
290 cv::cuda::GpuMat mask;
293 mask.create(query.rows, train.rows, CV_8UC1);
294 mask.setTo(cv::Scalar::all(1));
297 std::vector<cv::DMatch> matches;
298 matcher.match(loadMat(query), loadMat(train), matches, mask);
300 ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size());
303 for (size_t i = 0; i < matches.size(); i++)
305 cv::DMatch match = matches[i];
306 if ((match.queryIdx != (int)i) || (match.trainIdx != (int)i * countFactor) || (match.imgIdx != 0))
310 ASSERT_EQ(0, badCount);
313 CUDA_TEST_P(BruteForceMatcher, Match_Collection)
315 cv::cuda::BFMatcher_CUDA matcher(normCode);
317 cv::cuda::GpuMat d_train(train);
319 // make add() twice to test such case
320 matcher.add(std::vector<cv::cuda::GpuMat>(1, d_train.rowRange(0, train.rows / 2)));
321 matcher.add(std::vector<cv::cuda::GpuMat>(1, d_train.rowRange(train.rows / 2, train.rows)));
323 // prepare masks (make first nearest match illegal)
324 std::vector<cv::cuda::GpuMat> masks(2);
325 for (int mi = 0; mi < 2; mi++)
327 masks[mi] = cv::cuda::GpuMat(query.rows, train.rows/2, CV_8UC1, cv::Scalar::all(1));
328 for (int di = 0; di < queryDescCount/2; di++)
329 masks[mi].col(di * countFactor).setTo(cv::Scalar::all(0));
332 std::vector<cv::DMatch> matches;
334 matcher.match(cv::cuda::GpuMat(query), matches, masks);
336 matcher.match(cv::cuda::GpuMat(query), matches);
338 ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size());
341 int shift = useMask ? 1 : 0;
342 for (size_t i = 0; i < matches.size(); i++)
344 cv::DMatch match = matches[i];
346 if ((int)i < queryDescCount / 2)
348 bool validQueryIdx = (match.queryIdx == (int)i);
349 bool validTrainIdx = (match.trainIdx == (int)i * countFactor + shift);
350 bool validImgIdx = (match.imgIdx == 0);
351 if (!validQueryIdx || !validTrainIdx || !validImgIdx)
356 bool validQueryIdx = (match.queryIdx == (int)i);
357 bool validTrainIdx = (match.trainIdx == ((int)i - queryDescCount / 2) * countFactor + shift);
358 bool validImgIdx = (match.imgIdx == 1);
359 if (!validQueryIdx || !validTrainIdx || !validImgIdx)
364 ASSERT_EQ(0, badCount);
367 CUDA_TEST_P(BruteForceMatcher, KnnMatch_2_Single)
369 cv::cuda::BFMatcher_CUDA matcher(normCode);
373 cv::cuda::GpuMat mask;
376 mask.create(query.rows, train.rows, CV_8UC1);
377 mask.setTo(cv::Scalar::all(1));
380 std::vector< std::vector<cv::DMatch> > matches;
381 matcher.knnMatch(loadMat(query), loadMat(train), matches, knn, mask);
383 ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size());
386 for (size_t i = 0; i < matches.size(); i++)
388 if ((int)matches[i].size() != knn)
392 int localBadCount = 0;
393 for (int k = 0; k < knn; k++)
395 cv::DMatch match = matches[i][k];
396 if ((match.queryIdx != (int)i) || (match.trainIdx != (int)i * countFactor + k) || (match.imgIdx != 0))
399 badCount += localBadCount > 0 ? 1 : 0;
403 ASSERT_EQ(0, badCount);
406 CUDA_TEST_P(BruteForceMatcher, KnnMatch_3_Single)
408 cv::cuda::BFMatcher_CUDA matcher(normCode);
412 cv::cuda::GpuMat mask;
415 mask.create(query.rows, train.rows, CV_8UC1);
416 mask.setTo(cv::Scalar::all(1));
419 std::vector< std::vector<cv::DMatch> > matches;
420 matcher.knnMatch(loadMat(query), loadMat(train), matches, knn, mask);
422 ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size());
425 for (size_t i = 0; i < matches.size(); i++)
427 if ((int)matches[i].size() != knn)
431 int localBadCount = 0;
432 for (int k = 0; k < knn; k++)
434 cv::DMatch match = matches[i][k];
435 if ((match.queryIdx != (int)i) || (match.trainIdx != (int)i * countFactor + k) || (match.imgIdx != 0))
438 badCount += localBadCount > 0 ? 1 : 0;
442 ASSERT_EQ(0, badCount);
445 CUDA_TEST_P(BruteForceMatcher, KnnMatch_2_Collection)
447 cv::cuda::BFMatcher_CUDA matcher(normCode);
451 cv::cuda::GpuMat d_train(train);
453 // make add() twice to test such case
454 matcher.add(std::vector<cv::cuda::GpuMat>(1, d_train.rowRange(0, train.rows / 2)));
455 matcher.add(std::vector<cv::cuda::GpuMat>(1, d_train.rowRange(train.rows / 2, train.rows)));
457 // prepare masks (make first nearest match illegal)
458 std::vector<cv::cuda::GpuMat> masks(2);
459 for (int mi = 0; mi < 2; mi++ )
461 masks[mi] = cv::cuda::GpuMat(query.rows, train.rows / 2, CV_8UC1, cv::Scalar::all(1));
462 for (int di = 0; di < queryDescCount / 2; di++)
463 masks[mi].col(di * countFactor).setTo(cv::Scalar::all(0));
466 std::vector< std::vector<cv::DMatch> > matches;
469 matcher.knnMatch(cv::cuda::GpuMat(query), matches, knn, masks);
471 matcher.knnMatch(cv::cuda::GpuMat(query), matches, knn);
473 ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size());
476 int shift = useMask ? 1 : 0;
477 for (size_t i = 0; i < matches.size(); i++)
479 if ((int)matches[i].size() != knn)
483 int localBadCount = 0;
484 for (int k = 0; k < knn; k++)
486 cv::DMatch match = matches[i][k];
488 if ((int)i < queryDescCount / 2)
490 if ((match.queryIdx != (int)i) || (match.trainIdx != (int)i * countFactor + k + shift) || (match.imgIdx != 0) )
495 if ((match.queryIdx != (int)i) || (match.trainIdx != ((int)i - queryDescCount / 2) * countFactor + k + shift) || (match.imgIdx != 1) )
500 badCount += localBadCount > 0 ? 1 : 0;
504 ASSERT_EQ(0, badCount);
507 CUDA_TEST_P(BruteForceMatcher, KnnMatch_3_Collection)
509 cv::cuda::BFMatcher_CUDA matcher(normCode);
513 cv::cuda::GpuMat d_train(train);
515 // make add() twice to test such case
516 matcher.add(std::vector<cv::cuda::GpuMat>(1, d_train.rowRange(0, train.rows / 2)));
517 matcher.add(std::vector<cv::cuda::GpuMat>(1, d_train.rowRange(train.rows / 2, train.rows)));
519 // prepare masks (make first nearest match illegal)
520 std::vector<cv::cuda::GpuMat> masks(2);
521 for (int mi = 0; mi < 2; mi++ )
523 masks[mi] = cv::cuda::GpuMat(query.rows, train.rows / 2, CV_8UC1, cv::Scalar::all(1));
524 for (int di = 0; di < queryDescCount / 2; di++)
525 masks[mi].col(di * countFactor).setTo(cv::Scalar::all(0));
528 std::vector< std::vector<cv::DMatch> > matches;
531 matcher.knnMatch(cv::cuda::GpuMat(query), matches, knn, masks);
533 matcher.knnMatch(cv::cuda::GpuMat(query), matches, knn);
535 ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size());
538 int shift = useMask ? 1 : 0;
539 for (size_t i = 0; i < matches.size(); i++)
541 if ((int)matches[i].size() != knn)
545 int localBadCount = 0;
546 for (int k = 0; k < knn; k++)
548 cv::DMatch match = matches[i][k];
550 if ((int)i < queryDescCount / 2)
552 if ((match.queryIdx != (int)i) || (match.trainIdx != (int)i * countFactor + k + shift) || (match.imgIdx != 0) )
557 if ((match.queryIdx != (int)i) || (match.trainIdx != ((int)i - queryDescCount / 2) * countFactor + k + shift) || (match.imgIdx != 1) )
562 badCount += localBadCount > 0 ? 1 : 0;
566 ASSERT_EQ(0, badCount);
569 CUDA_TEST_P(BruteForceMatcher, RadiusMatch_Single)
571 cv::cuda::BFMatcher_CUDA matcher(normCode);
573 const float radius = 1.f / countFactor;
575 if (!supportFeature(devInfo, cv::cuda::GLOBAL_ATOMICS))
579 std::vector< std::vector<cv::DMatch> > matches;
580 matcher.radiusMatch(loadMat(query), loadMat(train), matches, radius);
582 catch (const cv::Exception& e)
584 ASSERT_EQ(cv::Error::StsNotImplemented, e.code);
589 cv::cuda::GpuMat mask;
592 mask.create(query.rows, train.rows, CV_8UC1);
593 mask.setTo(cv::Scalar::all(1));
596 std::vector< std::vector<cv::DMatch> > matches;
597 matcher.radiusMatch(loadMat(query), loadMat(train), matches, radius, mask);
599 ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size());
602 for (size_t i = 0; i < matches.size(); i++)
604 if ((int)matches[i].size() != 1)
608 cv::DMatch match = matches[i][0];
609 if ((match.queryIdx != (int)i) || (match.trainIdx != (int)i*countFactor) || (match.imgIdx != 0))
614 ASSERT_EQ(0, badCount);
618 CUDA_TEST_P(BruteForceMatcher, RadiusMatch_Collection)
620 cv::cuda::BFMatcher_CUDA matcher(normCode);
623 const float radius = 1.f / countFactor * n;
625 cv::cuda::GpuMat d_train(train);
627 // make add() twice to test such case
628 matcher.add(std::vector<cv::cuda::GpuMat>(1, d_train.rowRange(0, train.rows / 2)));
629 matcher.add(std::vector<cv::cuda::GpuMat>(1, d_train.rowRange(train.rows / 2, train.rows)));
631 // prepare masks (make first nearest match illegal)
632 std::vector<cv::cuda::GpuMat> masks(2);
633 for (int mi = 0; mi < 2; mi++)
635 masks[mi] = cv::cuda::GpuMat(query.rows, train.rows / 2, CV_8UC1, cv::Scalar::all(1));
636 for (int di = 0; di < queryDescCount / 2; di++)
637 masks[mi].col(di * countFactor).setTo(cv::Scalar::all(0));
640 if (!supportFeature(devInfo, cv::cuda::GLOBAL_ATOMICS))
644 std::vector< std::vector<cv::DMatch> > matches;
645 matcher.radiusMatch(cv::cuda::GpuMat(query), matches, radius, masks);
647 catch (const cv::Exception& e)
649 ASSERT_EQ(cv::Error::StsNotImplemented, e.code);
654 std::vector< std::vector<cv::DMatch> > matches;
657 matcher.radiusMatch(cv::cuda::GpuMat(query), matches, radius, masks);
659 matcher.radiusMatch(cv::cuda::GpuMat(query), matches, radius);
661 ASSERT_EQ(static_cast<size_t>(queryDescCount), matches.size());
664 int shift = useMask ? 1 : 0;
665 int needMatchCount = useMask ? n-1 : n;
666 for (size_t i = 0; i < matches.size(); i++)
668 if ((int)matches[i].size() != needMatchCount)
672 int localBadCount = 0;
673 for (int k = 0; k < needMatchCount; k++)
675 cv::DMatch match = matches[i][k];
677 if ((int)i < queryDescCount / 2)
679 if ((match.queryIdx != (int)i) || (match.trainIdx != (int)i * countFactor + k + shift) || (match.imgIdx != 0) )
684 if ((match.queryIdx != (int)i) || (match.trainIdx != ((int)i - queryDescCount / 2) * countFactor + k + shift) || (match.imgIdx != 1) )
689 badCount += localBadCount > 0 ? 1 : 0;
693 ASSERT_EQ(0, badCount);
697 INSTANTIATE_TEST_CASE_P(CUDA_Features2D, BruteForceMatcher, testing::Combine(
699 testing::Values(NormCode(cv::NORM_L1), NormCode(cv::NORM_L2)),
700 testing::Values(DescriptorSize(57), DescriptorSize(64), DescriptorSize(83), DescriptorSize(128), DescriptorSize(179), DescriptorSize(256), DescriptorSize(304)),
701 testing::Values(UseMask(false), UseMask(true))));