Doc: update video processing tutorial code for OpenCV v2.4.9 and v3a
[profile/ivi/opencv.git] / modules / stitching / src / matchers.cpp
1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 //  By downloading, copying, installing or using the software you agree to this license.
6 //  If you do not agree to this license, do not download, install,
7 //  copy or use the software.
8 //
9 //
10 //                          License Agreement
11 //                For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
16 //
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
19 //
20 //   * Redistribution's of source code must retain the above copyright notice,
21 //     this list of conditions and the following disclaimer.
22 //
23 //   * Redistribution's in binary form must reproduce the above copyright notice,
24 //     this list of conditions and the following disclaimer in the documentation
25 //     and/or other materials provided with the distribution.
26 //
27 //   * The name of the copyright holders may not be used to endorse or promote products
28 //     derived from this software without specific prior written permission.
29 //
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
40 //
41 //M*/
42
43 #include "precomp.hpp"
44
45 using namespace cv;
46 using namespace cv::detail;
47 using namespace cv::cuda;
48
49 #ifdef HAVE_OPENCV_NONFREE
50 #include "opencv2/nonfree.hpp"
51
52 static bool makeUseOfNonfree = initModule_nonfree();
53 #endif
54
55 namespace {
56
57 struct DistIdxPair
58 {
59     bool operator<(const DistIdxPair &other) const { return dist < other.dist; }
60     double dist;
61     int idx;
62 };
63
64
65 struct MatchPairsBody : ParallelLoopBody
66 {
67     MatchPairsBody(FeaturesMatcher &_matcher, const std::vector<ImageFeatures> &_features,
68                    std::vector<MatchesInfo> &_pairwise_matches, std::vector<std::pair<int,int> > &_near_pairs)
69             : matcher(_matcher), features(_features),
70               pairwise_matches(_pairwise_matches), near_pairs(_near_pairs) {}
71
72     void operator ()(const Range &r) const
73     {
74         const int num_images = static_cast<int>(features.size());
75         for (int i = r.start; i < r.end; ++i)
76         {
77             int from = near_pairs[i].first;
78             int to = near_pairs[i].second;
79             int pair_idx = from*num_images + to;
80
81             matcher(features[from], features[to], pairwise_matches[pair_idx]);
82             pairwise_matches[pair_idx].src_img_idx = from;
83             pairwise_matches[pair_idx].dst_img_idx = to;
84
85             size_t dual_pair_idx = to*num_images + from;
86
87             pairwise_matches[dual_pair_idx] = pairwise_matches[pair_idx];
88             pairwise_matches[dual_pair_idx].src_img_idx = to;
89             pairwise_matches[dual_pair_idx].dst_img_idx = from;
90
91             if (!pairwise_matches[pair_idx].H.empty())
92                 pairwise_matches[dual_pair_idx].H = pairwise_matches[pair_idx].H.inv();
93
94             for (size_t j = 0; j < pairwise_matches[dual_pair_idx].matches.size(); ++j)
95                 std::swap(pairwise_matches[dual_pair_idx].matches[j].queryIdx,
96                           pairwise_matches[dual_pair_idx].matches[j].trainIdx);
97             LOG(".");
98         }
99     }
100
101     FeaturesMatcher &matcher;
102     const std::vector<ImageFeatures> &features;
103     std::vector<MatchesInfo> &pairwise_matches;
104     std::vector<std::pair<int,int> > &near_pairs;
105
106 private:
107     void operator =(const MatchPairsBody&);
108 };
109
110
111 //////////////////////////////////////////////////////////////////////////////
112
113 typedef std::set<std::pair<int,int> > MatchesSet;
114
115 // These two classes are aimed to find features matches only, not to
116 // estimate homography
117
118 class CpuMatcher : public FeaturesMatcher
119 {
120 public:
121     CpuMatcher(float match_conf) : FeaturesMatcher(true), match_conf_(match_conf) {}
122     void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info);
123
124 private:
125     float match_conf_;
126 };
127
128 #ifdef HAVE_OPENCV_CUDAFEATURES2D
129 class GpuMatcher : public FeaturesMatcher
130 {
131 public:
132     GpuMatcher(float match_conf) : match_conf_(match_conf) {}
133     void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info);
134
135     void collectGarbage();
136
137 private:
138     float match_conf_;
139     GpuMat descriptors1_, descriptors2_;
140     GpuMat train_idx_, distance_, all_dist_;
141     std::vector< std::vector<DMatch> > pair_matches;
142 };
143 #endif
144
145
146 void CpuMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info)
147 {
148     CV_Assert(features1.descriptors.type() == features2.descriptors.type());
149     CV_Assert(features2.descriptors.depth() == CV_8U || features2.descriptors.depth() == CV_32F);
150
151 #ifdef HAVE_TEGRA_OPTIMIZATION
152     if (tegra::match2nearest(features1, features2, matches_info, match_conf_))
153         return;
154 #endif
155
156     matches_info.matches.clear();
157
158     Ptr<DescriptorMatcher> matcher;
159 #if 0 // TODO check this
160     if (ocl::useOpenCL())
161     {
162         matcher = makePtr<BFMatcher>((int)NORM_L2);
163     }
164     else
165 #endif
166     {
167         Ptr<flann::IndexParams> indexParams = makePtr<flann::KDTreeIndexParams>();
168         Ptr<flann::SearchParams> searchParams = makePtr<flann::SearchParams>();
169
170         if (features2.descriptors.depth() == CV_8U)
171         {
172             indexParams->setAlgorithm(cvflann::FLANN_INDEX_LSH);
173             searchParams->setAlgorithm(cvflann::FLANN_INDEX_LSH);
174         }
175
176         matcher = makePtr<FlannBasedMatcher>(indexParams, searchParams);
177     }
178     std::vector< std::vector<DMatch> > pair_matches;
179     MatchesSet matches;
180
181     // Find 1->2 matches
182     matcher->knnMatch(features1.descriptors, features2.descriptors, pair_matches, 2);
183     for (size_t i = 0; i < pair_matches.size(); ++i)
184     {
185         if (pair_matches[i].size() < 2)
186             continue;
187         const DMatch& m0 = pair_matches[i][0];
188         const DMatch& m1 = pair_matches[i][1];
189         if (m0.distance < (1.f - match_conf_) * m1.distance)
190         {
191             matches_info.matches.push_back(m0);
192             matches.insert(std::make_pair(m0.queryIdx, m0.trainIdx));
193         }
194     }
195     LOG("\n1->2 matches: " << matches_info.matches.size() << endl);
196
197     // Find 2->1 matches
198     pair_matches.clear();
199     matcher->knnMatch(features2.descriptors, features1.descriptors, pair_matches, 2);
200     for (size_t i = 0; i < pair_matches.size(); ++i)
201     {
202         if (pair_matches[i].size() < 2)
203             continue;
204         const DMatch& m0 = pair_matches[i][0];
205         const DMatch& m1 = pair_matches[i][1];
206         if (m0.distance < (1.f - match_conf_) * m1.distance)
207             if (matches.find(std::make_pair(m0.trainIdx, m0.queryIdx)) == matches.end())
208                 matches_info.matches.push_back(DMatch(m0.trainIdx, m0.queryIdx, m0.distance));
209     }
210     LOG("1->2 & 2->1 matches: " << matches_info.matches.size() << endl);
211 }
212
213 #ifdef HAVE_OPENCV_CUDAFEATURES2D
214 void GpuMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info)
215 {
216     matches_info.matches.clear();
217
218     ensureSizeIsEnough(features1.descriptors.size(), features1.descriptors.type(), descriptors1_);
219     ensureSizeIsEnough(features2.descriptors.size(), features2.descriptors.type(), descriptors2_);
220
221     descriptors1_.upload(features1.descriptors);
222     descriptors2_.upload(features2.descriptors);
223
224     BFMatcher_CUDA matcher(NORM_L2);
225     MatchesSet matches;
226
227     // Find 1->2 matches
228     pair_matches.clear();
229     matcher.knnMatchSingle(descriptors1_, descriptors2_, train_idx_, distance_, all_dist_, 2);
230     matcher.knnMatchDownload(train_idx_, distance_, pair_matches);
231     for (size_t i = 0; i < pair_matches.size(); ++i)
232     {
233         if (pair_matches[i].size() < 2)
234             continue;
235         const DMatch& m0 = pair_matches[i][0];
236         const DMatch& m1 = pair_matches[i][1];
237         if (m0.distance < (1.f - match_conf_) * m1.distance)
238         {
239             matches_info.matches.push_back(m0);
240             matches.insert(std::make_pair(m0.queryIdx, m0.trainIdx));
241         }
242     }
243
244     // Find 2->1 matches
245     pair_matches.clear();
246     matcher.knnMatchSingle(descriptors2_, descriptors1_, train_idx_, distance_, all_dist_, 2);
247     matcher.knnMatchDownload(train_idx_, distance_, pair_matches);
248     for (size_t i = 0; i < pair_matches.size(); ++i)
249     {
250         if (pair_matches[i].size() < 2)
251             continue;
252         const DMatch& m0 = pair_matches[i][0];
253         const DMatch& m1 = pair_matches[i][1];
254         if (m0.distance < (1.f - match_conf_) * m1.distance)
255             if (matches.find(std::make_pair(m0.trainIdx, m0.queryIdx)) == matches.end())
256                 matches_info.matches.push_back(DMatch(m0.trainIdx, m0.queryIdx, m0.distance));
257     }
258 }
259
260 void GpuMatcher::collectGarbage()
261 {
262     descriptors1_.release();
263     descriptors2_.release();
264     train_idx_.release();
265     distance_.release();
266     all_dist_.release();
267     std::vector< std::vector<DMatch> >().swap(pair_matches);
268 }
269 #endif
270
271 } // namespace
272
273
274 namespace cv {
275 namespace detail {
276
277 void FeaturesFinder::operator ()(InputArray  image, ImageFeatures &features)
278 {
279     find(image, features);
280     features.img_size = image.size();
281 }
282
283
284 void FeaturesFinder::operator ()(InputArray image, ImageFeatures &features, const std::vector<Rect> &rois)
285 {
286     std::vector<ImageFeatures> roi_features(rois.size());
287     size_t total_kps_count = 0;
288     int total_descriptors_height = 0;
289
290     for (size_t i = 0; i < rois.size(); ++i)
291     {
292         find(image.getUMat()(rois[i]), roi_features[i]);
293         total_kps_count += roi_features[i].keypoints.size();
294         total_descriptors_height += roi_features[i].descriptors.rows;
295     }
296
297     features.img_size = image.size();
298     features.keypoints.resize(total_kps_count);
299     features.descriptors.create(total_descriptors_height,
300                                 roi_features[0].descriptors.cols,
301                                 roi_features[0].descriptors.type());
302
303     int kp_idx = 0;
304     int descr_offset = 0;
305     for (size_t i = 0; i < rois.size(); ++i)
306     {
307         for (size_t j = 0; j < roi_features[i].keypoints.size(); ++j, ++kp_idx)
308         {
309             features.keypoints[kp_idx] = roi_features[i].keypoints[j];
310             features.keypoints[kp_idx].pt.x += (float)rois[i].x;
311             features.keypoints[kp_idx].pt.y += (float)rois[i].y;
312         }
313         UMat subdescr = features.descriptors.rowRange(
314                 descr_offset, descr_offset + roi_features[i].descriptors.rows);
315         roi_features[i].descriptors.copyTo(subdescr);
316         descr_offset += roi_features[i].descriptors.rows;
317     }
318 }
319
320
321 SurfFeaturesFinder::SurfFeaturesFinder(double hess_thresh, int num_octaves, int num_layers,
322                                        int num_octaves_descr, int num_layers_descr)
323 {
324     if (num_octaves_descr == num_octaves && num_layers_descr == num_layers)
325     {
326         surf = Algorithm::create<Feature2D>("Feature2D.SURF");
327         if( !surf )
328             CV_Error( Error::StsNotImplemented, "OpenCV was built without SURF support" );
329         surf->set("hessianThreshold", hess_thresh);
330         surf->set("nOctaves", num_octaves);
331         surf->set("nOctaveLayers", num_layers);
332     }
333     else
334     {
335         detector_ = Algorithm::create<FeatureDetector>("Feature2D.SURF");
336         extractor_ = Algorithm::create<DescriptorExtractor>("Feature2D.SURF");
337
338         if( !detector_ || !extractor_ )
339             CV_Error( Error::StsNotImplemented, "OpenCV was built without SURF support" );
340
341         detector_->set("hessianThreshold", hess_thresh);
342         detector_->set("nOctaves", num_octaves);
343         detector_->set("nOctaveLayers", num_layers);
344
345         extractor_->set("nOctaves", num_octaves_descr);
346         extractor_->set("nOctaveLayers", num_layers_descr);
347     }
348 }
349
350 void SurfFeaturesFinder::find(InputArray image, ImageFeatures &features)
351 {
352     UMat gray_image;
353     CV_Assert((image.type() == CV_8UC3) || (image.type() == CV_8UC1));
354     if(image.type() == CV_8UC3)
355     {
356         cvtColor(image, gray_image, COLOR_BGR2GRAY);
357     }
358     else
359     {
360         gray_image = image.getUMat();
361     }
362     if (!surf)
363     {
364         detector_->detect(gray_image, features.keypoints);
365         extractor_->compute(gray_image, features.keypoints, features.descriptors);
366     }
367     else
368     {
369         UMat descriptors;
370         (*surf)(gray_image, Mat(), features.keypoints, descriptors);
371         features.descriptors = descriptors.reshape(1, (int)features.keypoints.size());
372     }
373 }
374
375 OrbFeaturesFinder::OrbFeaturesFinder(Size _grid_size, int n_features, float scaleFactor, int nlevels)
376 {
377     grid_size = _grid_size;
378     orb = makePtr<ORB>(n_features * (99 + grid_size.area())/100/grid_size.area(), scaleFactor, nlevels);
379 }
380
381 void OrbFeaturesFinder::find(InputArray image, ImageFeatures &features)
382 {
383     UMat gray_image;
384
385     CV_Assert((image.type() == CV_8UC3) || (image.type() == CV_8UC4) || (image.type() == CV_8UC1));
386
387     if (image.type() == CV_8UC3) {
388         cvtColor(image, gray_image, COLOR_BGR2GRAY);
389     } else if (image.type() == CV_8UC4) {
390         cvtColor(image, gray_image, COLOR_BGRA2GRAY);
391     } else if (image.type() == CV_8UC1) {
392         gray_image = image.getUMat();
393     } else {
394         CV_Error(Error::StsUnsupportedFormat, "");
395     }
396
397     if (grid_size.area() == 1)
398         (*orb)(gray_image, Mat(), features.keypoints, features.descriptors);
399     else
400     {
401         features.keypoints.clear();
402         features.descriptors.release();
403
404         std::vector<KeyPoint> points;
405         Mat _descriptors;
406         UMat descriptors;
407
408         for (int r = 0; r < grid_size.height; ++r)
409             for (int c = 0; c < grid_size.width; ++c)
410             {
411                 int xl = c * gray_image.cols / grid_size.width;
412                 int yl = r * gray_image.rows / grid_size.height;
413                 int xr = (c+1) * gray_image.cols / grid_size.width;
414                 int yr = (r+1) * gray_image.rows / grid_size.height;
415
416                 // LOGLN("OrbFeaturesFinder::find: gray_image.empty=" << (gray_image.empty()?"true":"false") << ", "
417                 //     << " gray_image.size()=(" << gray_image.size().width << "x" << gray_image.size().height << "), "
418                 //     << " yl=" << yl << ", yr=" << yr << ", "
419                 //     << " xl=" << xl << ", xr=" << xr << ", gray_image.data=" << ((size_t)gray_image.data) << ", "
420                 //     << "gray_image.dims=" << gray_image.dims << "\n");
421
422                 UMat gray_image_part=gray_image(Range(yl, yr), Range(xl, xr));
423                 // LOGLN("OrbFeaturesFinder::find: gray_image_part.empty=" << (gray_image_part.empty()?"true":"false") << ", "
424                 //     << " gray_image_part.size()=(" << gray_image_part.size().width << "x" << gray_image_part.size().height << "), "
425                 //     << " gray_image_part.dims=" << gray_image_part.dims << ", "
426                 //     << " gray_image_part.data=" << ((size_t)gray_image_part.data) << "\n");
427
428                 (*orb)(gray_image_part, UMat(), points, descriptors);
429
430                 features.keypoints.reserve(features.keypoints.size() + points.size());
431                 for (std::vector<KeyPoint>::iterator kp = points.begin(); kp != points.end(); ++kp)
432                 {
433                     kp->pt.x += xl;
434                     kp->pt.y += yl;
435                     features.keypoints.push_back(*kp);
436                 }
437                 _descriptors.push_back(descriptors.getMat(ACCESS_READ));
438             }
439
440         // TODO optimize copyTo()
441         //features.descriptors = _descriptors.getUMat(ACCESS_READ);
442         _descriptors.copyTo(features.descriptors);
443     }
444 }
445
446 #ifdef HAVE_OPENCV_NONFREE
447 SurfFeaturesFinderGpu::SurfFeaturesFinderGpu(double hess_thresh, int num_octaves, int num_layers,
448                                              int num_octaves_descr, int num_layers_descr)
449 {
450     surf_.keypointsRatio = 0.1f;
451     surf_.hessianThreshold = hess_thresh;
452     surf_.extended = false;
453     num_octaves_ = num_octaves;
454     num_layers_ = num_layers;
455     num_octaves_descr_ = num_octaves_descr;
456     num_layers_descr_ = num_layers_descr;
457 }
458
459
460 void SurfFeaturesFinderGpu::find(InputArray image, ImageFeatures &features)
461 {
462     CV_Assert(image.depth() == CV_8U);
463
464     ensureSizeIsEnough(image.size(), image.type(), image_);
465     image_.upload(image);
466
467     ensureSizeIsEnough(image.size(), CV_8UC1, gray_image_);
468     cvtColor(image_, gray_image_, COLOR_BGR2GRAY);
469
470     surf_.nOctaves = num_octaves_;
471     surf_.nOctaveLayers = num_layers_;
472     surf_.upright = false;
473     surf_(gray_image_, GpuMat(), keypoints_);
474
475     surf_.nOctaves = num_octaves_descr_;
476     surf_.nOctaveLayers = num_layers_descr_;
477     surf_.upright = true;
478     surf_(gray_image_, GpuMat(), keypoints_, descriptors_, true);
479     surf_.downloadKeypoints(keypoints_, features.keypoints);
480
481     descriptors_.download(features.descriptors);
482 }
483
484 void SurfFeaturesFinderGpu::collectGarbage()
485 {
486     surf_.releaseMemory();
487     image_.release();
488     gray_image_.release();
489     keypoints_.release();
490     descriptors_.release();
491 }
492 #endif
493
494
495 //////////////////////////////////////////////////////////////////////////////
496
497 MatchesInfo::MatchesInfo() : src_img_idx(-1), dst_img_idx(-1), num_inliers(0), confidence(0) {}
498
499 MatchesInfo::MatchesInfo(const MatchesInfo &other) { *this = other; }
500
501 const MatchesInfo& MatchesInfo::operator =(const MatchesInfo &other)
502 {
503     src_img_idx = other.src_img_idx;
504     dst_img_idx = other.dst_img_idx;
505     matches = other.matches;
506     inliers_mask = other.inliers_mask;
507     num_inliers = other.num_inliers;
508     H = other.H.clone();
509     confidence = other.confidence;
510     return *this;
511 }
512
513
514 //////////////////////////////////////////////////////////////////////////////
515
516 void FeaturesMatcher::operator ()(const std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches,
517                                   const UMat &mask)
518 {
519     const int num_images = static_cast<int>(features.size());
520
521     CV_Assert(mask.empty() || (mask.type() == CV_8U && mask.cols == num_images && mask.rows));
522     Mat_<uchar> mask_(mask.getMat(ACCESS_READ));
523     if (mask_.empty())
524         mask_ = Mat::ones(num_images, num_images, CV_8U);
525
526     std::vector<std::pair<int,int> > near_pairs;
527     for (int i = 0; i < num_images - 1; ++i)
528         for (int j = i + 1; j < num_images; ++j)
529             if (features[i].keypoints.size() > 0 && features[j].keypoints.size() > 0 && mask_(i, j))
530                 near_pairs.push_back(std::make_pair(i, j));
531
532     pairwise_matches.resize(num_images * num_images);
533     MatchPairsBody body(*this, features, pairwise_matches, near_pairs);
534
535     if (is_thread_safe_)
536         parallel_for_(Range(0, static_cast<int>(near_pairs.size())), body);
537     else
538         body(Range(0, static_cast<int>(near_pairs.size())));
539     LOGLN_CHAT("");
540 }
541
542
543 //////////////////////////////////////////////////////////////////////////////
544
545 BestOf2NearestMatcher::BestOf2NearestMatcher(bool try_use_gpu, float match_conf, int num_matches_thresh1, int num_matches_thresh2)
546 {
547     (void)try_use_gpu;
548
549 #ifdef HAVE_OPENCV_CUDAFEATURES2D
550     if (try_use_gpu && getCudaEnabledDeviceCount() > 0)
551     {
552         impl_ = makePtr<GpuMatcher>(match_conf);
553     }
554     else
555 #endif
556     {
557         impl_ = makePtr<CpuMatcher>(match_conf);
558     }
559
560     is_thread_safe_ = impl_->isThreadSafe();
561     num_matches_thresh1_ = num_matches_thresh1;
562     num_matches_thresh2_ = num_matches_thresh2;
563 }
564
565
566 void BestOf2NearestMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2,
567                                   MatchesInfo &matches_info)
568 {
569     (*impl_)(features1, features2, matches_info);
570
571     // Check if it makes sense to find homography
572     if (matches_info.matches.size() < static_cast<size_t>(num_matches_thresh1_))
573         return;
574
575     // Construct point-point correspondences for homography estimation
576     Mat src_points(1, static_cast<int>(matches_info.matches.size()), CV_32FC2);
577     Mat dst_points(1, static_cast<int>(matches_info.matches.size()), CV_32FC2);
578     for (size_t i = 0; i < matches_info.matches.size(); ++i)
579     {
580         const DMatch& m = matches_info.matches[i];
581
582         Point2f p = features1.keypoints[m.queryIdx].pt;
583         p.x -= features1.img_size.width * 0.5f;
584         p.y -= features1.img_size.height * 0.5f;
585         src_points.at<Point2f>(0, static_cast<int>(i)) = p;
586
587         p = features2.keypoints[m.trainIdx].pt;
588         p.x -= features2.img_size.width * 0.5f;
589         p.y -= features2.img_size.height * 0.5f;
590         dst_points.at<Point2f>(0, static_cast<int>(i)) = p;
591     }
592
593     // Find pair-wise motion
594     matches_info.H = findHomography(src_points, dst_points, matches_info.inliers_mask, RANSAC);
595     if (matches_info.H.empty() || std::abs(determinant(matches_info.H)) < std::numeric_limits<double>::epsilon())
596         return;
597
598     // Find number of inliers
599     matches_info.num_inliers = 0;
600     for (size_t i = 0; i < matches_info.inliers_mask.size(); ++i)
601         if (matches_info.inliers_mask[i])
602             matches_info.num_inliers++;
603
604     // These coeffs are from paper M. Brown and D. Lowe. "Automatic Panoramic Image Stitching
605     // using Invariant Features"
606     matches_info.confidence = matches_info.num_inliers / (8 + 0.3 * matches_info.matches.size());
607
608     // Set zero confidence to remove matches between too close images, as they don't provide
609     // additional information anyway. The threshold was set experimentally.
610     matches_info.confidence = matches_info.confidence > 3. ? 0. : matches_info.confidence;
611
612     // Check if we should try to refine motion
613     if (matches_info.num_inliers < num_matches_thresh2_)
614         return;
615
616     // Construct point-point correspondences for inliers only
617     src_points.create(1, matches_info.num_inliers, CV_32FC2);
618     dst_points.create(1, matches_info.num_inliers, CV_32FC2);
619     int inlier_idx = 0;
620     for (size_t i = 0; i < matches_info.matches.size(); ++i)
621     {
622         if (!matches_info.inliers_mask[i])
623             continue;
624
625         const DMatch& m = matches_info.matches[i];
626
627         Point2f p = features1.keypoints[m.queryIdx].pt;
628         p.x -= features1.img_size.width * 0.5f;
629         p.y -= features1.img_size.height * 0.5f;
630         src_points.at<Point2f>(0, inlier_idx) = p;
631
632         p = features2.keypoints[m.trainIdx].pt;
633         p.x -= features2.img_size.width * 0.5f;
634         p.y -= features2.img_size.height * 0.5f;
635         dst_points.at<Point2f>(0, inlier_idx) = p;
636
637         inlier_idx++;
638     }
639
640     // Rerun motion estimation on inliers only
641     matches_info.H = findHomography(src_points, dst_points, RANSAC);
642 }
643
644 void BestOf2NearestMatcher::collectGarbage()
645 {
646     impl_->collectGarbage();
647 }
648
649
650 BestOf2NearestRangeMatcher::BestOf2NearestRangeMatcher(int range_width, bool try_use_gpu, float match_conf, int num_matches_thresh1, int num_matches_thresh2): BestOf2NearestMatcher(try_use_gpu, match_conf, num_matches_thresh1, num_matches_thresh2)
651 {
652     range_width_ = range_width;
653 }
654
655
656 void BestOf2NearestRangeMatcher::operator ()(const std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches,
657                                   const UMat &mask)
658 {
659     const int num_images = static_cast<int>(features.size());
660
661     CV_Assert(mask.empty() || (mask.type() == CV_8U && mask.cols == num_images && mask.rows));
662     Mat_<uchar> mask_(mask.getMat(ACCESS_READ));
663     if (mask_.empty())
664         mask_ = Mat::ones(num_images, num_images, CV_8U);
665
666     std::vector<std::pair<int,int> > near_pairs;
667     for (int i = 0; i < num_images - 1; ++i)
668         for (int j = i + 1; j < std::min(num_images, i + range_width_); ++j)
669             if (features[i].keypoints.size() > 0 && features[j].keypoints.size() > 0 && mask_(i, j))
670                 near_pairs.push_back(std::make_pair(i, j));
671
672     pairwise_matches.resize(num_images * num_images);
673     MatchPairsBody body(*this, features, pairwise_matches, near_pairs);
674
675     if (is_thread_safe_)
676         parallel_for_(Range(0, static_cast<int>(near_pairs.size())), body);
677     else
678         body(Range(0, static_cast<int>(near_pairs.size())));
679     LOGLN_CHAT("");
680 }
681
682
683 } // namespace detail
684 } // namespace cv