next: drop HAVE_TEGRA_OPTIMIZATION/TADP
[platform/upstream/opencv.git] / modules / stitching / src / matchers.cpp
1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 //  By downloading, copying, installing or using the software you agree to this license.
6 //  If you do not agree to this license, do not download, install,
7 //  copy or use the software.
8 //
9 //
10 //                          License Agreement
11 //                For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
16 //
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
19 //
20 //   * Redistribution's of source code must retain the above copyright notice,
21 //     this list of conditions and the following disclaimer.
22 //
23 //   * Redistribution's in binary form must reproduce the above copyright notice,
24 //     this list of conditions and the following disclaimer in the documentation
25 //     and/or other materials provided with the distribution.
26 //
27 //   * The name of the copyright holders may not be used to endorse or promote products
28 //     derived from this software without specific prior written permission.
29 //
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
40 //
41 //M*/
42
43 #include "precomp.hpp"
44
45 #include "opencv2/core/opencl/ocl_defs.hpp"
46
47 using namespace cv;
48 using namespace cv::detail;
49 using namespace cv::cuda;
50
51 #ifdef HAVE_OPENCV_XFEATURES2D
52 #include "opencv2/xfeatures2d.hpp"
53 using xfeatures2d::SURF;
54 #endif
55
56 #ifdef HAVE_OPENCV_CUDAIMGPROC
57 #  include "opencv2/cudaimgproc.hpp"
58 #endif
59
60 namespace {
61
62 struct DistIdxPair
63 {
64     bool operator<(const DistIdxPair &other) const { return dist < other.dist; }
65     double dist;
66     int idx;
67 };
68
69
70 struct MatchPairsBody : ParallelLoopBody
71 {
72     MatchPairsBody(FeaturesMatcher &_matcher, const std::vector<ImageFeatures> &_features,
73                    std::vector<MatchesInfo> &_pairwise_matches, std::vector<std::pair<int,int> > &_near_pairs)
74             : matcher(_matcher), features(_features),
75               pairwise_matches(_pairwise_matches), near_pairs(_near_pairs) {}
76
77     void operator ()(const Range &r) const CV_OVERRIDE
78     {
79         cv::RNG rng = cv::theRNG(); // save entry rng state
80         const int num_images = static_cast<int>(features.size());
81         for (int i = r.start; i < r.end; ++i)
82         {
83             cv::theRNG() = cv::RNG(rng.state + i); // force "stable" RNG seed for each processed pair
84
85             int from = near_pairs[i].first;
86             int to = near_pairs[i].second;
87             int pair_idx = from*num_images + to;
88
89             matcher(features[from], features[to], pairwise_matches[pair_idx]);
90             pairwise_matches[pair_idx].src_img_idx = from;
91             pairwise_matches[pair_idx].dst_img_idx = to;
92
93             size_t dual_pair_idx = to*num_images + from;
94
95             pairwise_matches[dual_pair_idx] = pairwise_matches[pair_idx];
96             pairwise_matches[dual_pair_idx].src_img_idx = to;
97             pairwise_matches[dual_pair_idx].dst_img_idx = from;
98
99             if (!pairwise_matches[pair_idx].H.empty())
100                 pairwise_matches[dual_pair_idx].H = pairwise_matches[pair_idx].H.inv();
101
102             for (size_t j = 0; j < pairwise_matches[dual_pair_idx].matches.size(); ++j)
103                 std::swap(pairwise_matches[dual_pair_idx].matches[j].queryIdx,
104                           pairwise_matches[dual_pair_idx].matches[j].trainIdx);
105             LOG(".");
106         }
107     }
108
109     FeaturesMatcher &matcher;
110     const std::vector<ImageFeatures> &features;
111     std::vector<MatchesInfo> &pairwise_matches;
112     std::vector<std::pair<int,int> > &near_pairs;
113
114 private:
115     void operator =(const MatchPairsBody&);
116 };
117
118
119 struct FindFeaturesBody : ParallelLoopBody
120 {
121     FindFeaturesBody(FeaturesFinder &finder, InputArrayOfArrays images,
122                      std::vector<ImageFeatures> &features, const std::vector<std::vector<cv::Rect> > *rois)
123             : finder_(finder), images_(images), features_(features), rois_(rois) {}
124
125     void operator ()(const Range &r) const CV_OVERRIDE
126     {
127         for (int i = r.start; i < r.end; ++i)
128         {
129             Mat image = images_.getMat(i);
130             if (rois_)
131                 finder_(image, features_[i], (*rois_)[i]);
132             else
133                 finder_(image, features_[i]);
134         }
135     }
136
137 private:
138     FeaturesFinder &finder_;
139     InputArrayOfArrays images_;
140     std::vector<ImageFeatures> &features_;
141     const std::vector<std::vector<cv::Rect> > *rois_;
142
143     // to cease visual studio warning
144     void operator =(const FindFeaturesBody&);
145 };
146
147
148 //////////////////////////////////////////////////////////////////////////////
149
150 typedef std::set<std::pair<int,int> > MatchesSet;
151
152 // These two classes are aimed to find features matches only, not to
153 // estimate homography
154
155 class CpuMatcher CV_FINAL : public FeaturesMatcher
156 {
157 public:
158     CpuMatcher(float match_conf) : FeaturesMatcher(true), match_conf_(match_conf) {}
159     void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info) CV_OVERRIDE;
160
161 private:
162     float match_conf_;
163 };
164
165 #ifdef HAVE_OPENCV_CUDAFEATURES2D
166 class GpuMatcher CV_FINAL : public FeaturesMatcher
167 {
168 public:
169     GpuMatcher(float match_conf) : match_conf_(match_conf) {}
170     void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info);
171
172     void collectGarbage();
173
174 private:
175     float match_conf_;
176     GpuMat descriptors1_, descriptors2_;
177     GpuMat train_idx_, distance_, all_dist_;
178     std::vector< std::vector<DMatch> > pair_matches;
179 };
180 #endif
181
182
183 void CpuMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info)
184 {
185     CV_INSTRUMENT_REGION()
186
187     CV_Assert(features1.descriptors.type() == features2.descriptors.type());
188     CV_Assert(features2.descriptors.depth() == CV_8U || features2.descriptors.depth() == CV_32F);
189
190     matches_info.matches.clear();
191
192     Ptr<cv::DescriptorMatcher> matcher;
193 #if 0 // TODO check this
194     if (ocl::isOpenCLActivated())
195     {
196         matcher = makePtr<BFMatcher>((int)NORM_L2);
197     }
198     else
199 #endif
200     {
201         Ptr<flann::IndexParams> indexParams = makePtr<flann::KDTreeIndexParams>();
202         Ptr<flann::SearchParams> searchParams = makePtr<flann::SearchParams>();
203
204         if (features2.descriptors.depth() == CV_8U)
205         {
206             indexParams->setAlgorithm(cvflann::FLANN_INDEX_LSH);
207             searchParams->setAlgorithm(cvflann::FLANN_INDEX_LSH);
208         }
209
210         matcher = makePtr<FlannBasedMatcher>(indexParams, searchParams);
211     }
212     std::vector< std::vector<DMatch> > pair_matches;
213     MatchesSet matches;
214
215     // Find 1->2 matches
216     matcher->knnMatch(features1.descriptors, features2.descriptors, pair_matches, 2);
217     for (size_t i = 0; i < pair_matches.size(); ++i)
218     {
219         if (pair_matches[i].size() < 2)
220             continue;
221         const DMatch& m0 = pair_matches[i][0];
222         const DMatch& m1 = pair_matches[i][1];
223         if (m0.distance < (1.f - match_conf_) * m1.distance)
224         {
225             matches_info.matches.push_back(m0);
226             matches.insert(std::make_pair(m0.queryIdx, m0.trainIdx));
227         }
228     }
229     LOG("\n1->2 matches: " << matches_info.matches.size() << endl);
230
231     // Find 2->1 matches
232     pair_matches.clear();
233     matcher->knnMatch(features2.descriptors, features1.descriptors, pair_matches, 2);
234     for (size_t i = 0; i < pair_matches.size(); ++i)
235     {
236         if (pair_matches[i].size() < 2)
237             continue;
238         const DMatch& m0 = pair_matches[i][0];
239         const DMatch& m1 = pair_matches[i][1];
240         if (m0.distance < (1.f - match_conf_) * m1.distance)
241             if (matches.find(std::make_pair(m0.trainIdx, m0.queryIdx)) == matches.end())
242                 matches_info.matches.push_back(DMatch(m0.trainIdx, m0.queryIdx, m0.distance));
243     }
244     LOG("1->2 & 2->1 matches: " << matches_info.matches.size() << endl);
245 }
246
247 #ifdef HAVE_OPENCV_CUDAFEATURES2D
248 void GpuMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info)
249 {
250     CV_INSTRUMENT_REGION()
251
252     matches_info.matches.clear();
253
254     ensureSizeIsEnough(features1.descriptors.size(), features1.descriptors.type(), descriptors1_);
255     ensureSizeIsEnough(features2.descriptors.size(), features2.descriptors.type(), descriptors2_);
256
257     descriptors1_.upload(features1.descriptors);
258     descriptors2_.upload(features2.descriptors);
259
260     //TODO: NORM_L1 allows to avoid matcher crashes for ORB features, but is not absolutely correct for them.
261     //      The best choice for ORB features is NORM_HAMMING, but it is incorrect for SURF features.
262     //      More accurate fix in this place should be done in the future -- the type of the norm
263     //      should be either a parameter of this method, or a field of the class.
264     Ptr<cuda::DescriptorMatcher> matcher = cuda::DescriptorMatcher::createBFMatcher(NORM_L1);
265
266     MatchesSet matches;
267
268     // Find 1->2 matches
269     pair_matches.clear();
270     matcher->knnMatch(descriptors1_, descriptors2_, pair_matches, 2);
271     for (size_t i = 0; i < pair_matches.size(); ++i)
272     {
273         if (pair_matches[i].size() < 2)
274             continue;
275         const DMatch& m0 = pair_matches[i][0];
276         const DMatch& m1 = pair_matches[i][1];
277         if (m0.distance < (1.f - match_conf_) * m1.distance)
278         {
279             matches_info.matches.push_back(m0);
280             matches.insert(std::make_pair(m0.queryIdx, m0.trainIdx));
281         }
282     }
283
284     // Find 2->1 matches
285     pair_matches.clear();
286     matcher->knnMatch(descriptors2_, descriptors1_, pair_matches, 2);
287     for (size_t i = 0; i < pair_matches.size(); ++i)
288     {
289         if (pair_matches[i].size() < 2)
290             continue;
291         const DMatch& m0 = pair_matches[i][0];
292         const DMatch& m1 = pair_matches[i][1];
293         if (m0.distance < (1.f - match_conf_) * m1.distance)
294             if (matches.find(std::make_pair(m0.trainIdx, m0.queryIdx)) == matches.end())
295                 matches_info.matches.push_back(DMatch(m0.trainIdx, m0.queryIdx, m0.distance));
296     }
297 }
298
299 void GpuMatcher::collectGarbage()
300 {
301     descriptors1_.release();
302     descriptors2_.release();
303     train_idx_.release();
304     distance_.release();
305     all_dist_.release();
306     std::vector< std::vector<DMatch> >().swap(pair_matches);
307 }
308 #endif
309
310 } // namespace
311
312
313 namespace cv {
314 namespace detail {
315
316 void FeaturesFinder::operator ()(InputArray  image, ImageFeatures &features)
317 {
318     find(image, features);
319     features.img_size = image.size();
320 }
321
322
323 void FeaturesFinder::operator ()(InputArray image, ImageFeatures &features, const std::vector<Rect> &rois)
324 {
325     std::vector<ImageFeatures> roi_features(rois.size());
326     size_t total_kps_count = 0;
327     int total_descriptors_height = 0;
328
329     for (size_t i = 0; i < rois.size(); ++i)
330     {
331         find(image.getUMat()(rois[i]), roi_features[i]);
332         total_kps_count += roi_features[i].keypoints.size();
333         total_descriptors_height += roi_features[i].descriptors.rows;
334     }
335
336     features.img_size = image.size();
337     features.keypoints.resize(total_kps_count);
338     features.descriptors.create(total_descriptors_height,
339                                 roi_features[0].descriptors.cols,
340                                 roi_features[0].descriptors.type());
341
342     int kp_idx = 0;
343     int descr_offset = 0;
344     for (size_t i = 0; i < rois.size(); ++i)
345     {
346         for (size_t j = 0; j < roi_features[i].keypoints.size(); ++j, ++kp_idx)
347         {
348             features.keypoints[kp_idx] = roi_features[i].keypoints[j];
349             features.keypoints[kp_idx].pt.x += (float)rois[i].x;
350             features.keypoints[kp_idx].pt.y += (float)rois[i].y;
351         }
352         UMat subdescr = features.descriptors.rowRange(
353                 descr_offset, descr_offset + roi_features[i].descriptors.rows);
354         roi_features[i].descriptors.copyTo(subdescr);
355         descr_offset += roi_features[i].descriptors.rows;
356     }
357 }
358
359
360 void FeaturesFinder::operator ()(InputArrayOfArrays images, std::vector<ImageFeatures> &features)
361 {
362     size_t count = images.total();
363     features.resize(count);
364
365     FindFeaturesBody body(*this, images, features, NULL);
366     if (isThreadSafe())
367         parallel_for_(Range(0, static_cast<int>(count)), body);
368     else
369         body(Range(0, static_cast<int>(count)));
370 }
371
372
373 void FeaturesFinder::operator ()(InputArrayOfArrays images, std::vector<ImageFeatures> &features,
374                                   const std::vector<std::vector<cv::Rect> > &rois)
375 {
376     CV_Assert(rois.size() == images.total());
377     size_t count = images.total();
378     features.resize(count);
379
380     FindFeaturesBody body(*this, images, features, &rois);
381     if (isThreadSafe())
382         parallel_for_(Range(0, static_cast<int>(count)), body);
383     else
384         body(Range(0, static_cast<int>(count)));
385 }
386
387
388 bool FeaturesFinder::isThreadSafe() const
389 {
390 #ifdef HAVE_OPENCL
391     if (ocl::isOpenCLActivated())
392     {
393         return false;
394     }
395 #endif
396     if (dynamic_cast<const SurfFeaturesFinder*>(this))
397     {
398         return true;
399     }
400     else if (dynamic_cast<const OrbFeaturesFinder*>(this))
401     {
402         return true;
403     }
404     else
405     {
406         return false;
407     }
408 }
409
410
411 SurfFeaturesFinder::SurfFeaturesFinder(double hess_thresh, int num_octaves, int num_layers,
412                                        int num_octaves_descr, int num_layers_descr)
413 {
414 #ifdef HAVE_OPENCV_XFEATURES2D
415     if (num_octaves_descr == num_octaves && num_layers_descr == num_layers)
416     {
417         Ptr<SURF> surf_ = SURF::create();
418         if( !surf_ )
419             CV_Error( Error::StsNotImplemented, "OpenCV was built without SURF support" );
420         surf_->setHessianThreshold(hess_thresh);
421         surf_->setNOctaves(num_octaves);
422         surf_->setNOctaveLayers(num_layers);
423         surf = surf_;
424     }
425     else
426     {
427         Ptr<SURF> sdetector_ = SURF::create();
428         Ptr<SURF> sextractor_ = SURF::create();
429
430         if( !sdetector_ || !sextractor_ )
431             CV_Error( Error::StsNotImplemented, "OpenCV was built without SURF support" );
432
433         sdetector_->setHessianThreshold(hess_thresh);
434         sdetector_->setNOctaves(num_octaves);
435         sdetector_->setNOctaveLayers(num_layers);
436
437         sextractor_->setNOctaves(num_octaves_descr);
438         sextractor_->setNOctaveLayers(num_layers_descr);
439
440         detector_ = sdetector_;
441         extractor_ = sextractor_;
442     }
443 #else
444     (void)hess_thresh;
445     (void)num_octaves;
446     (void)num_layers;
447     (void)num_octaves_descr;
448     (void)num_layers_descr;
449     CV_Error( Error::StsNotImplemented, "OpenCV was built without SURF support" );
450 #endif
451 }
452
453 void SurfFeaturesFinder::find(InputArray image, ImageFeatures &features)
454 {
455     UMat gray_image;
456     CV_Assert((image.type() == CV_8UC3) || (image.type() == CV_8UC1));
457     if(image.type() == CV_8UC3)
458     {
459         cvtColor(image, gray_image, COLOR_BGR2GRAY);
460     }
461     else
462     {
463         gray_image = image.getUMat();
464     }
465     if (!surf)
466     {
467         detector_->detect(gray_image, features.keypoints);
468         extractor_->compute(gray_image, features.keypoints, features.descriptors);
469     }
470     else
471     {
472         UMat descriptors;
473         surf->detectAndCompute(gray_image, Mat(), features.keypoints, descriptors);
474         features.descriptors = descriptors.reshape(1, (int)features.keypoints.size());
475     }
476 }
477
478 OrbFeaturesFinder::OrbFeaturesFinder(Size _grid_size, int n_features, float scaleFactor, int nlevels)
479 {
480     grid_size = _grid_size;
481     orb = ORB::create(n_features * (99 + grid_size.area())/100/grid_size.area(), scaleFactor, nlevels);
482 }
483
484 void OrbFeaturesFinder::find(InputArray image, ImageFeatures &features)
485 {
486     UMat gray_image;
487
488     CV_Assert((image.type() == CV_8UC3) || (image.type() == CV_8UC4) || (image.type() == CV_8UC1));
489
490     if (image.type() == CV_8UC3) {
491         cvtColor(image, gray_image, COLOR_BGR2GRAY);
492     } else if (image.type() == CV_8UC4) {
493         cvtColor(image, gray_image, COLOR_BGRA2GRAY);
494     } else if (image.type() == CV_8UC1) {
495         gray_image = image.getUMat();
496     } else {
497         CV_Error(Error::StsUnsupportedFormat, "");
498     }
499
500     if (grid_size.area() == 1)
501         orb->detectAndCompute(gray_image, Mat(), features.keypoints, features.descriptors);
502     else
503     {
504         features.keypoints.clear();
505         features.descriptors.release();
506
507         std::vector<KeyPoint> points;
508         Mat _descriptors;
509         UMat descriptors;
510
511         for (int r = 0; r < grid_size.height; ++r)
512             for (int c = 0; c < grid_size.width; ++c)
513             {
514                 int xl = c * gray_image.cols / grid_size.width;
515                 int yl = r * gray_image.rows / grid_size.height;
516                 int xr = (c+1) * gray_image.cols / grid_size.width;
517                 int yr = (r+1) * gray_image.rows / grid_size.height;
518
519                 // LOGLN("OrbFeaturesFinder::find: gray_image.empty=" << (gray_image.empty()?"true":"false") << ", "
520                 //     << " gray_image.size()=(" << gray_image.size().width << "x" << gray_image.size().height << "), "
521                 //     << " yl=" << yl << ", yr=" << yr << ", "
522                 //     << " xl=" << xl << ", xr=" << xr << ", gray_image.data=" << ((size_t)gray_image.data) << ", "
523                 //     << "gray_image.dims=" << gray_image.dims << "\n");
524
525                 UMat gray_image_part=gray_image(Range(yl, yr), Range(xl, xr));
526                 // LOGLN("OrbFeaturesFinder::find: gray_image_part.empty=" << (gray_image_part.empty()?"true":"false") << ", "
527                 //     << " gray_image_part.size()=(" << gray_image_part.size().width << "x" << gray_image_part.size().height << "), "
528                 //     << " gray_image_part.dims=" << gray_image_part.dims << ", "
529                 //     << " gray_image_part.data=" << ((size_t)gray_image_part.data) << "\n");
530
531                 orb->detectAndCompute(gray_image_part, UMat(), points, descriptors);
532
533                 features.keypoints.reserve(features.keypoints.size() + points.size());
534                 for (std::vector<KeyPoint>::iterator kp = points.begin(); kp != points.end(); ++kp)
535                 {
536                     kp->pt.x += xl;
537                     kp->pt.y += yl;
538                     features.keypoints.push_back(*kp);
539                 }
540                 _descriptors.push_back(descriptors.getMat(ACCESS_READ));
541             }
542
543         // TODO optimize copyTo()
544         //features.descriptors = _descriptors.getUMat(ACCESS_READ);
545         _descriptors.copyTo(features.descriptors);
546     }
547 }
548
549 AKAZEFeaturesFinder::AKAZEFeaturesFinder(int descriptor_type,
550                                          int descriptor_size,
551                                          int descriptor_channels,
552                                          float threshold,
553                                          int nOctaves,
554                                          int nOctaveLayers,
555                                          int diffusivity)
556 {
557     akaze = AKAZE::create(descriptor_type, descriptor_size, descriptor_channels,
558                           threshold, nOctaves, nOctaveLayers, diffusivity);
559 }
560
561 void AKAZEFeaturesFinder::find(InputArray image, detail::ImageFeatures &features)
562 {
563     CV_Assert((image.type() == CV_8UC3) || (image.type() == CV_8UC1));
564     akaze->detectAndCompute(image, noArray(), features.keypoints, features.descriptors);
565 }
566
567 #ifdef HAVE_OPENCV_XFEATURES2D
568 SurfFeaturesFinderGpu::SurfFeaturesFinderGpu(double hess_thresh, int num_octaves, int num_layers,
569                                              int num_octaves_descr, int num_layers_descr)
570 {
571     surf_.keypointsRatio = 0.1f;
572     surf_.hessianThreshold = hess_thresh;
573     surf_.extended = false;
574     num_octaves_ = num_octaves;
575     num_layers_ = num_layers;
576     num_octaves_descr_ = num_octaves_descr;
577     num_layers_descr_ = num_layers_descr;
578 }
579
580
581 void SurfFeaturesFinderGpu::find(InputArray image, ImageFeatures &features)
582 {
583     CV_Assert(image.depth() == CV_8U);
584
585     ensureSizeIsEnough(image.size(), image.type(), image_);
586     image_.upload(image);
587
588     ensureSizeIsEnough(image.size(), CV_8UC1, gray_image_);
589
590 #ifdef HAVE_OPENCV_CUDAIMGPROC
591     cv::cuda::cvtColor(image_, gray_image_, COLOR_BGR2GRAY);
592 #else
593     cvtColor(image_, gray_image_, COLOR_BGR2GRAY);
594 #endif
595
596     surf_.nOctaves = num_octaves_;
597     surf_.nOctaveLayers = num_layers_;
598     surf_.upright = false;
599     surf_(gray_image_, GpuMat(), keypoints_);
600
601     surf_.nOctaves = num_octaves_descr_;
602     surf_.nOctaveLayers = num_layers_descr_;
603     surf_.upright = true;
604     surf_(gray_image_, GpuMat(), keypoints_, descriptors_, true);
605     surf_.downloadKeypoints(keypoints_, features.keypoints);
606
607     descriptors_.download(features.descriptors);
608 }
609
610 void SurfFeaturesFinderGpu::collectGarbage()
611 {
612     surf_.releaseMemory();
613     image_.release();
614     gray_image_.release();
615     keypoints_.release();
616     descriptors_.release();
617 }
618 #endif
619
620
621 //////////////////////////////////////////////////////////////////////////////
622
623 MatchesInfo::MatchesInfo() : src_img_idx(-1), dst_img_idx(-1), num_inliers(0), confidence(0) {}
624
625 MatchesInfo::MatchesInfo(const MatchesInfo &other) { *this = other; }
626
627 MatchesInfo& MatchesInfo::operator =(const MatchesInfo &other)
628 {
629     src_img_idx = other.src_img_idx;
630     dst_img_idx = other.dst_img_idx;
631     matches = other.matches;
632     inliers_mask = other.inliers_mask;
633     num_inliers = other.num_inliers;
634     H = other.H.clone();
635     confidence = other.confidence;
636     return *this;
637 }
638
639
640 //////////////////////////////////////////////////////////////////////////////
641
642 void FeaturesMatcher::operator ()(const std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches,
643                                   const UMat &mask)
644 {
645     const int num_images = static_cast<int>(features.size());
646
647     CV_Assert(mask.empty() || (mask.type() == CV_8U && mask.cols == num_images && mask.rows));
648     Mat_<uchar> mask_(mask.getMat(ACCESS_READ));
649     if (mask_.empty())
650         mask_ = Mat::ones(num_images, num_images, CV_8U);
651
652     std::vector<std::pair<int,int> > near_pairs;
653     for (int i = 0; i < num_images - 1; ++i)
654         for (int j = i + 1; j < num_images; ++j)
655             if (features[i].keypoints.size() > 0 && features[j].keypoints.size() > 0 && mask_(i, j))
656                 near_pairs.push_back(std::make_pair(i, j));
657
658     pairwise_matches.resize(num_images * num_images);
659     MatchPairsBody body(*this, features, pairwise_matches, near_pairs);
660
661     if (is_thread_safe_)
662         parallel_for_(Range(0, static_cast<int>(near_pairs.size())), body);
663     else
664         body(Range(0, static_cast<int>(near_pairs.size())));
665     LOGLN_CHAT("");
666 }
667
668
669 //////////////////////////////////////////////////////////////////////////////
670
671 BestOf2NearestMatcher::BestOf2NearestMatcher(bool try_use_gpu, float match_conf, int num_matches_thresh1, int num_matches_thresh2)
672 {
673     (void)try_use_gpu;
674
675 #ifdef HAVE_OPENCV_CUDAFEATURES2D
676     if (try_use_gpu && getCudaEnabledDeviceCount() > 0)
677     {
678         impl_ = makePtr<GpuMatcher>(match_conf);
679     }
680     else
681 #endif
682     {
683         impl_ = makePtr<CpuMatcher>(match_conf);
684     }
685
686     is_thread_safe_ = impl_->isThreadSafe();
687     num_matches_thresh1_ = num_matches_thresh1;
688     num_matches_thresh2_ = num_matches_thresh2;
689 }
690
691
692 void BestOf2NearestMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2,
693                                   MatchesInfo &matches_info)
694 {
695     CV_INSTRUMENT_REGION()
696
697     (*impl_)(features1, features2, matches_info);
698
699     // Check if it makes sense to find homography
700     if (matches_info.matches.size() < static_cast<size_t>(num_matches_thresh1_))
701         return;
702
703     // Construct point-point correspondences for homography estimation
704     Mat src_points(1, static_cast<int>(matches_info.matches.size()), CV_32FC2);
705     Mat dst_points(1, static_cast<int>(matches_info.matches.size()), CV_32FC2);
706     for (size_t i = 0; i < matches_info.matches.size(); ++i)
707     {
708         const DMatch& m = matches_info.matches[i];
709
710         Point2f p = features1.keypoints[m.queryIdx].pt;
711         p.x -= features1.img_size.width * 0.5f;
712         p.y -= features1.img_size.height * 0.5f;
713         src_points.at<Point2f>(0, static_cast<int>(i)) = p;
714
715         p = features2.keypoints[m.trainIdx].pt;
716         p.x -= features2.img_size.width * 0.5f;
717         p.y -= features2.img_size.height * 0.5f;
718         dst_points.at<Point2f>(0, static_cast<int>(i)) = p;
719     }
720
721     // Find pair-wise motion
722     matches_info.H = findHomography(src_points, dst_points, matches_info.inliers_mask, RANSAC);
723     if (matches_info.H.empty() || std::abs(determinant(matches_info.H)) < std::numeric_limits<double>::epsilon())
724         return;
725
726     // Find number of inliers
727     matches_info.num_inliers = 0;
728     for (size_t i = 0; i < matches_info.inliers_mask.size(); ++i)
729         if (matches_info.inliers_mask[i])
730             matches_info.num_inliers++;
731
732     // These coeffs are from paper M. Brown and D. Lowe. "Automatic Panoramic Image Stitching
733     // using Invariant Features"
734     matches_info.confidence = matches_info.num_inliers / (8 + 0.3 * matches_info.matches.size());
735
736     // Set zero confidence to remove matches between too close images, as they don't provide
737     // additional information anyway. The threshold was set experimentally.
738     matches_info.confidence = matches_info.confidence > 3. ? 0. : matches_info.confidence;
739
740     // Check if we should try to refine motion
741     if (matches_info.num_inliers < num_matches_thresh2_)
742         return;
743
744     // Construct point-point correspondences for inliers only
745     src_points.create(1, matches_info.num_inliers, CV_32FC2);
746     dst_points.create(1, matches_info.num_inliers, CV_32FC2);
747     int inlier_idx = 0;
748     for (size_t i = 0; i < matches_info.matches.size(); ++i)
749     {
750         if (!matches_info.inliers_mask[i])
751             continue;
752
753         const DMatch& m = matches_info.matches[i];
754
755         Point2f p = features1.keypoints[m.queryIdx].pt;
756         p.x -= features1.img_size.width * 0.5f;
757         p.y -= features1.img_size.height * 0.5f;
758         src_points.at<Point2f>(0, inlier_idx) = p;
759
760         p = features2.keypoints[m.trainIdx].pt;
761         p.x -= features2.img_size.width * 0.5f;
762         p.y -= features2.img_size.height * 0.5f;
763         dst_points.at<Point2f>(0, inlier_idx) = p;
764
765         inlier_idx++;
766     }
767
768     // Rerun motion estimation on inliers only
769     matches_info.H = findHomography(src_points, dst_points, RANSAC);
770 }
771
772 void BestOf2NearestMatcher::collectGarbage()
773 {
774     impl_->collectGarbage();
775 }
776
777
778 BestOf2NearestRangeMatcher::BestOf2NearestRangeMatcher(int range_width, bool try_use_gpu, float match_conf, int num_matches_thresh1, int num_matches_thresh2): BestOf2NearestMatcher(try_use_gpu, match_conf, num_matches_thresh1, num_matches_thresh2)
779 {
780     range_width_ = range_width;
781 }
782
783
784 void BestOf2NearestRangeMatcher::operator ()(const std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches,
785                                   const UMat &mask)
786 {
787     const int num_images = static_cast<int>(features.size());
788
789     CV_Assert(mask.empty() || (mask.type() == CV_8U && mask.cols == num_images && mask.rows));
790     Mat_<uchar> mask_(mask.getMat(ACCESS_READ));
791     if (mask_.empty())
792         mask_ = Mat::ones(num_images, num_images, CV_8U);
793
794     std::vector<std::pair<int,int> > near_pairs;
795     for (int i = 0; i < num_images - 1; ++i)
796         for (int j = i + 1; j < std::min(num_images, i + range_width_); ++j)
797             if (features[i].keypoints.size() > 0 && features[j].keypoints.size() > 0 && mask_(i, j))
798                 near_pairs.push_back(std::make_pair(i, j));
799
800     pairwise_matches.resize(num_images * num_images);
801     MatchPairsBody body(*this, features, pairwise_matches, near_pairs);
802
803     if (is_thread_safe_)
804         parallel_for_(Range(0, static_cast<int>(near_pairs.size())), body);
805     else
806         body(Range(0, static_cast<int>(near_pairs.size())));
807     LOGLN_CHAT("");
808 }
809
810
811 void AffineBestOf2NearestMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2,
812                                         MatchesInfo &matches_info)
813 {
814     (*impl_)(features1, features2, matches_info);
815
816     // Check if it makes sense to find transform
817     if (matches_info.matches.size() < static_cast<size_t>(num_matches_thresh1_))
818         return;
819
820     // Construct point-point correspondences for transform estimation
821     Mat src_points(1, static_cast<int>(matches_info.matches.size()), CV_32FC2);
822     Mat dst_points(1, static_cast<int>(matches_info.matches.size()), CV_32FC2);
823     for (size_t i = 0; i < matches_info.matches.size(); ++i)
824     {
825         const cv::DMatch &m = matches_info.matches[i];
826         src_points.at<Point2f>(0, static_cast<int>(i)) = features1.keypoints[m.queryIdx].pt;
827         dst_points.at<Point2f>(0, static_cast<int>(i)) = features2.keypoints[m.trainIdx].pt;
828     }
829
830     // Find pair-wise motion
831     if (full_affine_)
832         matches_info.H = estimateAffine2D(src_points, dst_points, matches_info.inliers_mask);
833     else
834         matches_info.H = estimateAffinePartial2D(src_points, dst_points, matches_info.inliers_mask);
835
836     if (matches_info.H.empty()) {
837         // could not find transformation
838         matches_info.confidence = 0;
839         matches_info.num_inliers = 0;
840         return;
841     }
842
843     // Find number of inliers
844     matches_info.num_inliers = 0;
845     for (size_t i = 0; i < matches_info.inliers_mask.size(); ++i)
846         if (matches_info.inliers_mask[i])
847             matches_info.num_inliers++;
848
849     // These coeffs are from paper M. Brown and D. Lowe. "Automatic Panoramic
850     // Image Stitching using Invariant Features"
851     matches_info.confidence =
852         matches_info.num_inliers / (8 + 0.3 * matches_info.matches.size());
853
854     /* should we remove matches between too close images? */
855     // matches_info.confidence = matches_info.confidence > 3. ? 0. : matches_info.confidence;
856
857     // extend H to represent linear transformation in homogeneous coordinates
858     matches_info.H.push_back(Mat::zeros(1, 3, CV_64F));
859     matches_info.H.at<double>(2, 2) = 1;
860 }
861
862
863 } // namespace detail
864 } // namespace cv