From 2de0e1fc6609be5e347ee0db385511526e4f23aa Mon Sep 17 00:00:00 2001 From: Alexey Spizhevoy Date: Fri, 20 May 2011 08:08:55 +0000 Subject: [PATCH] refactored opencv_stitching --- modules/stitching/autocalib.cpp | 7 +- modules/stitching/autocalib.hpp | 3 +- modules/stitching/main.cpp | 90 +++++++++---------- modules/stitching/matchers.cpp | 150 +++++++++++++------------------- modules/stitching/matchers.hpp | 20 ++--- modules/stitching/motion_estimators.cpp | 34 ++++---- modules/stitching/motion_estimators.hpp | 23 +++-- 7 files changed, 144 insertions(+), 183 deletions(-) diff --git a/modules/stitching/autocalib.cpp b/modules/stitching/autocalib.cpp index e5a789e..bdda753 100644 --- a/modules/stitching/autocalib.cpp +++ b/modules/stitching/autocalib.cpp @@ -35,10 +35,9 @@ void focalsFromHomography(const Mat& H, double &f0, double &f1, bool &f0_ok, boo } -double estimateFocal(const vector &images, const vector &/*features*/, - const vector &pairwise_matches) +double estimateFocal(const vector &features, const vector &pairwise_matches) { - const int num_images = static_cast(images.size()); + const int num_images = static_cast(features.size()); vector focals; for (int src_idx = 0; src_idx < num_images; ++src_idx) @@ -65,6 +64,6 @@ double estimateFocal(const vector &images, const vector &/*f LOGLN("Can't estimate focal length, will use naive approach"); double focals_sum = 0; for (int i = 0; i < num_images; ++i) - focals_sum += images[i].rows + images[i].cols; + focals_sum += features[i].img_size.width + features[i].img_size.height; return focals_sum / num_images; } diff --git a/modules/stitching/autocalib.hpp b/modules/stitching/autocalib.hpp index 81652b4..6c98570 100644 --- a/modules/stitching/autocalib.hpp +++ b/modules/stitching/autocalib.hpp @@ -9,7 +9,6 @@ // by Heung-Yeung Shum and Richard Szeliski. void focalsFromHomography(const cv::Mat &H, double &f0, double &f1, bool &f0_ok, bool &f1_ok); -double estimateFocal(const std::vector &images, const std::vector &features, - const std::vector &pairwise_matches); +double estimateFocal(const std::vector &features, const std::vector &pairwise_matches); #endif // __OPENCV_AUTOCALIB_HPP__ diff --git a/modules/stitching/main.cpp b/modules/stitching/main.cpp index 996ce65..933adc1 100644 --- a/modules/stitching/main.cpp +++ b/modules/stitching/main.cpp @@ -37,7 +37,6 @@ int main(int argc, char* argv[]) cv::setBreakOnError(true); vector img_names; - vector images; // Default parameters bool trygpu = false; @@ -188,32 +187,11 @@ int main(int argc, char* argv[]) i++; } else - { img_names.push_back(argv[i]); - Mat full_img = imread(argv[i]); - if (full_img.empty()) - { - cout << "Can't open image " << argv[i] << endl; - return -1; - } - if (work_megapix < 0) - images.push_back(full_img); - else - { - if (!is_work_scale_set) - { - work_scale = min(1.0, sqrt(work_megapix * 1e6 / full_img.size().area())); - is_work_scale_set = true; - } - Mat img; - resize(full_img, img, Size(), work_scale, work_scale); - images.push_back(img); - } - } } LOGLN("Parsing params and reading images, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec"); - int num_images = static_cast(images.size()); + int num_images = static_cast(img_names.size()); if (num_images < 2) { cout << "Need more images\n"; @@ -222,9 +200,30 @@ int main(int argc, char* argv[]) t = getTickCount(); LOGLN("Finding features..."); - vector features; + vector features(num_images); SurfFeaturesFinder finder(trygpu); - finder(images, features); + Mat full_img, img; + for (int i = 0; i < num_images; ++i) + { + full_img = imread(img_names[i]); + if (full_img.empty()) + { + cout << "Can't open image " << img_names[i] << endl; + return -1; + } + if (work_megapix < 0) + img = full_img; + else + { + if (!is_work_scale_set) + { + work_scale = min(1.0, sqrt(work_megapix * 1e6 / full_img.size().area())); + is_work_scale_set = true; + } + resize(full_img, img, Size(), work_scale, work_scale); + } + finder(img, features[i]); + } LOGLN("Finding features, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec"); t = getTickCount(); @@ -233,16 +232,16 @@ int main(int argc, char* argv[]) BestOf2NearestMatcher matcher(trygpu); if (user_match_conf) matcher = BestOf2NearestMatcher(trygpu, match_conf); - matcher(images, features, pairwise_matches); + matcher(features, pairwise_matches); LOGLN("Pairwise matching, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec"); - vector indices = leaveBiggestComponent(images, features, pairwise_matches, conf_thresh); + vector indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh); vector img_names_subset; for (size_t i = 0; i < indices.size(); ++i) img_names_subset.push_back(img_names[indices[i]]); img_names = img_names_subset; - num_images = static_cast(images.size()); + num_images = static_cast(img_names.size()); if (num_images < 2) { cout << "Need more images\n"; @@ -253,7 +252,7 @@ int main(int argc, char* argv[]) LOGLN("Estimating rotations..."); HomographyBasedEstimator estimator; vector cameras; - estimator(images, features, pairwise_matches, cameras); + estimator(features, pairwise_matches, cameras); LOGLN("Estimating rotations, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec"); for (size_t i = 0; i < cameras.size(); ++i) @@ -267,7 +266,7 @@ int main(int argc, char* argv[]) t = getTickCount(); LOGLN("Bundle adjustment... "); BundleAdjuster adjuster(ba_space, conf_thresh); - adjuster(images, features, pairwise_matches, cameras); + adjuster(features, pairwise_matches, cameras); LOGLN("Bundle adjustment, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec"); if (wave_correct) @@ -293,27 +292,24 @@ int main(int argc, char* argv[]) nth_element(focals.begin(), focals.end(), focals.begin() + focals.size() / 2); float camera_focal = static_cast(focals[focals.size() / 2]); - if ((work_megapix > 0 || compose_megapix > 0) - && abs(work_megapix - compose_megapix) > 1e-3) + t = getTickCount(); + vector images(num_images); + LOGLN("Compose scaling..."); + for (int i = 0; i < num_images; ++i) { - t = getTickCount(); - LOGLN("Compose scaling..."); - for (int i = 0; i < num_images; ++i) + Mat full_img = imread(img_names[i]); + if (!is_compose_scale_set) { - Mat full_img = imread(img_names[i]); - if (!is_compose_scale_set) - { - compose_scale = min(1.0, sqrt(compose_megapix * 1e6 / full_img.size().area())); - is_compose_scale_set = true; - } - Mat img; - resize(full_img, img, Size(), compose_scale, compose_scale); - images[i] = img; - cameras[i].focal *= compose_scale / work_scale; + compose_scale = min(1.0, sqrt(compose_megapix * 1e6 / full_img.size().area())); + is_compose_scale_set = true; } - camera_focal *= static_cast(compose_scale / work_scale); - LOGLN("Compose scaling, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec"); + Mat img; + resize(full_img, img, Size(), compose_scale, compose_scale); + images[i] = img; + cameras[i].focal *= compose_scale / work_scale; } + camera_focal *= static_cast(compose_scale / work_scale); + LOGLN("Compose scaling, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec"); vector masks(num_images); for (int i = 0; i < num_images; ++i) diff --git a/modules/stitching/matchers.cpp b/modules/stitching/matchers.cpp index 8f42bd8..5d7e350 100644 --- a/modules/stitching/matchers.cpp +++ b/modules/stitching/matchers.cpp @@ -13,26 +13,23 @@ using namespace cv::gpu; ////////////////////////////////////////////////////////////////////////////// -void FeaturesFinder::operator ()(const vector &images, vector &features) +void FeaturesFinder::operator ()(const Mat &image, ImageFeatures &features) { - features.resize(images.size()); - - // Calculate histograms - for (size_t i = 0; i < images.size(); ++i) - { - Mat hsv; - cvtColor(images[i], hsv, CV_BGR2HSV); - int hbins = 30, sbins = 32, vbins = 30; - int hist_size[] = { hbins, sbins, vbins }; - float hranges[] = { 0, 180 }; - float sranges[] = { 0, 256 }; - float vranges[] = { 0, 256 }; - const float* ranges[] = { hranges, sranges, vranges }; - int channels[] = { 0, 1, 2 }; - calcHist(&hsv, 1, channels, Mat(), features[i].hist, 3, hist_size, ranges); - } - - find(images, features); + features.img_size = image.size(); + + // Calculate histogram + Mat hsv; + cvtColor(image, hsv, CV_BGR2HSV); + int hbins = 30, sbins = 32, vbins = 30; + int hist_size[] = { hbins, sbins, vbins }; + float hranges[] = { 0, 180 }; + float sranges[] = { 0, 256 }; + float vranges[] = { 0, 256 }; + const float* ranges[] = { hranges, sranges, vranges }; + int channels[] = { 0, 1, 2 }; + calcHist(&hsv, 1, channels, Mat(), features.hist, 3, hist_size, ranges); + + find(image, features); } ////////////////////////////////////////////////////////////////////////////// @@ -50,31 +47,20 @@ namespace } protected: - void find(const vector &images, vector &features); + void find(const Mat &image, ImageFeatures &features); private: Ptr detector_; Ptr extractor_; }; - void CpuSurfFeaturesFinder::find(const vector &images, vector &features) + void CpuSurfFeaturesFinder::find(const Mat &image, ImageFeatures &features) { - // Make images gray - vector gray_images(images.size()); - for (size_t i = 0; i < images.size(); ++i) - { - CV_Assert(images[i].depth() == CV_8U); - cvtColor(images[i], gray_images[i], CV_BGR2GRAY); - } - - features.resize(images.size()); - - // Find keypoints in all images - for (size_t i = 0; i < images.size(); ++i) - { - detector_->detect(gray_images[i], features[i].keypoints); - extractor_->compute(gray_images[i], features[i].keypoints, features[i].descriptors); - } + Mat gray_image; + CV_Assert(image.depth() == CV_8U); + cvtColor(image, gray_image, CV_BGR2GRAY); + detector_->detect(gray_image, features.keypoints); + extractor_->compute(gray_image, features.keypoints, features.descriptors); } class GpuSurfFeaturesFinder : public FeaturesFinder @@ -92,7 +78,7 @@ namespace } protected: - void find(const vector &images, vector &features); + void find(const Mat &image, ImageFeatures &features); private: SURF_GPU surf_; @@ -100,34 +86,24 @@ namespace int num_octaves_descr_, num_layers_descr_; }; - void GpuSurfFeaturesFinder::find(const vector &images, vector &features) + void GpuSurfFeaturesFinder::find(const Mat &image, ImageFeatures &features) { - // Make images gray - vector gray_images(images.size()); - for (size_t i = 0; i < images.size(); ++i) - { - CV_Assert(images[i].depth() == CV_8U); - cvtColor(GpuMat(images[i]), gray_images[i], CV_BGR2GRAY); - } + GpuMat gray_image; + CV_Assert(image.depth() == CV_8U); + cvtColor(GpuMat(image), gray_image, CV_BGR2GRAY); - features.resize(images.size()); - - // Find keypoints in all images GpuMat d_keypoints; GpuMat d_descriptors; - for (size_t i = 0; i < images.size(); ++i) - { - surf_.nOctaves = num_octaves_; - surf_.nOctaveLayers = num_layers_; - surf_(gray_images[i], GpuMat(), d_keypoints); + surf_.nOctaves = num_octaves_; + surf_.nOctaveLayers = num_layers_; + surf_(gray_image, GpuMat(), d_keypoints); - surf_.nOctaves = num_octaves_descr_; - surf_.nOctaveLayers = num_layers_descr_; - surf_(gray_images[i], GpuMat(), d_keypoints, d_descriptors, true); + surf_.nOctaves = num_octaves_descr_; + surf_.nOctaveLayers = num_layers_descr_; + surf_(gray_image, GpuMat(), d_keypoints, d_descriptors, true); + surf_.downloadKeypoints(d_keypoints, features.keypoints); - surf_.downloadKeypoints(d_keypoints, features[i].keypoints); - d_descriptors.download(features[i].descriptors); - } + d_descriptors.download(features.descriptors); } } @@ -141,9 +117,9 @@ SurfFeaturesFinder::SurfFeaturesFinder(bool try_use_gpu, double hess_thresh, int } -void SurfFeaturesFinder::find(const vector &images, vector &features) +void SurfFeaturesFinder::find(const Mat &image, ImageFeatures &features) { - (*impl_)(images, features); + (*impl_)(image, features); } @@ -168,31 +144,29 @@ const MatchesInfo& MatchesInfo::operator =(const MatchesInfo &other) ////////////////////////////////////////////////////////////////////////////// -void FeaturesMatcher::operator ()(const vector &images, const vector &features, - vector &pairwise_matches) +void FeaturesMatcher::operator ()(const vector &features, vector &pairwise_matches) { - pairwise_matches.resize(images.size() * images.size()); - for (size_t i = 0; i < images.size(); ++i) + const int num_images = static_cast(features.size()); + + pairwise_matches.resize(num_images * num_images); + for (int i = 0; i < num_images; ++i) { LOGLN("Processing image " << i << "... "); - for (size_t j = i + 1; j < images.size(); ++j) + for (int j = i + 1; j < num_images; ++j) { // Save time by ignoring poor pairs if (compareHist(features[i].hist, features[j].hist, CV_COMP_INTERSECT) - < min(images[i].size().area(), images[j].size().area()) * 0.4) - { - //LOGLN("Ignoring (" << i << ", " << j << ") pair..."); + < min(features[i].img_size.area(), features[j].img_size.area()) * 0.4) continue; - } - size_t pair_idx = i * images.size() + j; + int pair_idx = i * num_images + j; - (*this)(images[i], features[i], images[j], features[j], pairwise_matches[pair_idx]); + (*this)(features[i], features[j], pairwise_matches[pair_idx]); pairwise_matches[pair_idx].src_img_idx = i; pairwise_matches[pair_idx].dst_img_idx = j; // Set up dual pair matches info - size_t dual_pair_idx = j * images.size() + i; + size_t dual_pair_idx = j * num_images + i; pairwise_matches[dual_pair_idx] = pairwise_matches[pair_idx]; pairwise_matches[dual_pair_idx].src_img_idx = j; pairwise_matches[dual_pair_idx].dst_img_idx = i; @@ -215,13 +189,13 @@ namespace public: inline CpuMatcher(float match_conf) : match_conf_(match_conf) {} - void match(const cv::Mat&, const ImageFeatures &features1, const cv::Mat&, const ImageFeatures &features2, MatchesInfo& matches_info); + void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info); private: float match_conf_; }; - void CpuMatcher::match(const cv::Mat&, const ImageFeatures &features1, const cv::Mat&, const ImageFeatures &features2, MatchesInfo& matches_info) + void CpuMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info) { matches_info.matches.clear(); @@ -259,7 +233,7 @@ namespace public: inline GpuMatcher(float match_conf) : match_conf_(match_conf) {} - void match(const cv::Mat&, const ImageFeatures &features1, const cv::Mat&, const ImageFeatures &features2, MatchesInfo& matches_info); + void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info); private: float match_conf_; @@ -270,7 +244,7 @@ namespace GpuMat trainIdx_, distance_, allDist_; }; - void GpuMatcher::match(const cv::Mat&, const ImageFeatures &features1, const cv::Mat&, const ImageFeatures &features2, MatchesInfo& matches_info) + void GpuMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info) { matches_info.matches.clear(); @@ -330,10 +304,10 @@ BestOf2NearestMatcher::BestOf2NearestMatcher(bool try_use_gpu, float match_conf, } -void BestOf2NearestMatcher::match(const Mat &img1, const ImageFeatures &features1, const Mat &img2, const ImageFeatures &features2, +void BestOf2NearestMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo &matches_info) { - (*impl_)(img1, features1, img2, features2, matches_info); + (*impl_)(features1, features2, matches_info); // Check if it makes sense to find homography if (matches_info.matches.size() < static_cast(num_matches_thresh1_)) @@ -347,13 +321,13 @@ void BestOf2NearestMatcher::match(const Mat &img1, const ImageFeatures &features const DMatch& m = matches_info.matches[i]; Point2f p = features1.keypoints[m.queryIdx].pt; - p.x -= img1.cols * 0.5f; - p.y -= img1.rows * 0.5f; + p.x -= features1.img_size.width * 0.5f; + p.y -= features1.img_size.height * 0.5f; src_points.at(0, i) = p; p = features2.keypoints[m.trainIdx].pt; - p.x -= img2.cols * 0.5f; - p.y -= img2.rows * 0.5f; + p.x -= features2.img_size.width * 0.5f; + p.y -= features2.img_size.height * 0.5f; dst_points.at(0, i) = p; } @@ -384,13 +358,13 @@ void BestOf2NearestMatcher::match(const Mat &img1, const ImageFeatures &features const DMatch& m = matches_info.matches[i]; Point2f p = features1.keypoints[m.queryIdx].pt; - p.x -= img1.cols * 0.5f; - p.y -= img2.rows * 0.5f; + p.x -= features1.img_size.width * 0.5f; + p.y -= features1.img_size.height * 0.5f; src_points.at(0, inlier_idx) = p; p = features2.keypoints[m.trainIdx].pt; - p.x -= img2.cols * 0.5f; - p.y -= img2.rows * 0.5f; + p.x -= features2.img_size.width * 0.5f; + p.y -= features2.img_size.height * 0.5f; dst_points.at(0, inlier_idx) = p; inlier_idx++; diff --git a/modules/stitching/matchers.hpp b/modules/stitching/matchers.hpp index f7e6537..317e882 100644 --- a/modules/stitching/matchers.hpp +++ b/modules/stitching/matchers.hpp @@ -7,6 +7,7 @@ struct ImageFeatures { + cv::Size img_size; cv::Mat hist; std::vector keypoints; cv::Mat descriptors; @@ -17,10 +18,10 @@ class FeaturesFinder { public: virtual ~FeaturesFinder() {} - void operator ()(const std::vector &images, std::vector &features); + void operator ()(const cv::Mat &image, ImageFeatures &features); protected: - virtual void find(const std::vector &images, std::vector &features) = 0; + virtual void find(const cv::Mat &image, ImageFeatures &features) = 0; }; @@ -32,7 +33,7 @@ public: int num_octaves_descr = 4, int num_layers_descr = 2); protected: - void find(const std::vector &images, std::vector &features); + void find(const cv::Mat &image, ImageFeatures &features); cv::Ptr impl_; }; @@ -57,14 +58,12 @@ class FeaturesMatcher { public: virtual ~FeaturesMatcher() {} - void operator ()(const cv::Mat &img1, const ImageFeatures &features1, const cv::Mat &img2, const ImageFeatures &features2, - MatchesInfo& matches_info) { match(img1, features1, img2, features2, matches_info); } - void operator ()(const std::vector &images, const std::vector &features, - std::vector &pairwise_matches); + void operator ()(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info) + { match(features1, features2, matches_info); } + void operator ()(const std::vector &features, std::vector &pairwise_matches); protected: - virtual void match(const cv::Mat &img1, const ImageFeatures &features1, const cv::Mat &img2, const ImageFeatures &features2, - MatchesInfo& matches_info) = 0; + virtual void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info) = 0; }; @@ -74,8 +73,7 @@ public: BestOf2NearestMatcher(bool try_use_gpu = true, float match_conf = 0.55f, int num_matches_thresh1 = 6, int num_matches_thresh2 = 6); protected: - void match(const cv::Mat &img1, const ImageFeatures &features1, const cv::Mat &img2, const ImageFeatures &features2, - MatchesInfo &matches_info); + void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo &matches_info); int num_matches_thresh1_; int num_matches_thresh2_; diff --git a/modules/stitching/motion_estimators.cpp b/modules/stitching/motion_estimators.cpp index 341cf1f..9be37f9 100644 --- a/modules/stitching/motion_estimators.cpp +++ b/modules/stitching/motion_estimators.cpp @@ -64,13 +64,13 @@ struct CalcRotation }; -void HomographyBasedEstimator::estimate(const vector &images, const vector &features, - const vector &pairwise_matches, vector &cameras) +void HomographyBasedEstimator::estimate(const vector &features, const vector &pairwise_matches, + vector &cameras) { - const int num_images = static_cast(images.size()); + const int num_images = static_cast(features.size()); // Estimate focal length and set it for all cameras - double focal = estimateFocal(images, features, pairwise_matches); + double focal = estimateFocal(features, pairwise_matches); cameras.resize(num_images); for (int i = 0; i < num_images; ++i) cameras[i].focal = focal; @@ -85,11 +85,10 @@ void HomographyBasedEstimator::estimate(const vector &images, const vector< ////////////////////////////////////////////////////////////////////////////// -void BundleAdjuster::estimate(const vector &images, const vector &features, - const vector &pairwise_matches, vector &cameras) +void BundleAdjuster::estimate(const vector &features, const vector &pairwise_matches, + vector &cameras) { - num_images_ = static_cast(images.size()); - images_ = &images[0]; + num_images_ = static_cast(features.size()); features_ = &features[0]; pairwise_matches_ = &pairwise_matches[0]; @@ -227,11 +226,11 @@ void BundleAdjuster::calcError(Mat &err) const DMatch& m = matches_info.matches[k]; Point2d kp1 = features1.keypoints[m.queryIdx].pt; - kp1.x -= 0.5 * images_[i].cols; - kp1.y -= 0.5 * images_[i].rows; + kp1.x -= 0.5 * features1.img_size.width; + kp1.y -= 0.5 * features1.img_size.height; Point2d kp2 = features2.keypoints[m.trainIdx].pt; - kp2.x -= 0.5 * images_[j].cols; - kp2.y -= 0.5 * images_[j].rows; + kp2.x -= 0.5 * features2.img_size.width; + kp2.y -= 0.5 * features2.img_size.height; double len1 = sqrt(kp1.x * kp1.x + kp1.y * kp1.y + f1 * f1); double len2 = sqrt(kp2.x * kp2.x + kp2.y * kp2.y + f2 * f2); Point3d p1(kp1.x / len1, kp1.y / len1, f1 / len1); @@ -346,10 +345,10 @@ void waveCorrect(vector &rmats) ////////////////////////////////////////////////////////////////////////////// -vector leaveBiggestComponent(vector &images, vector &features, - vector &pairwise_matches, float conf_threshold) +vector leaveBiggestComponent(vector &features, vector &pairwise_matches, + float conf_threshold) { - const int num_images = static_cast(images.size()); + const int num_images = static_cast(features.size()); DjSets comps(num_images); for (int i = 0; i < num_images; ++i) @@ -375,12 +374,10 @@ vector leaveBiggestComponent(vector &images, vector &fe else indices_removed.push_back(i); - vector images_subset; vector features_subset; vector pairwise_matches_subset; for (size_t i = 0; i < indices.size(); ++i) { - images_subset.push_back(images[indices[i]]); features_subset.push_back(features[indices[i]]); for (size_t j = 0; j < indices.size(); ++j) { @@ -390,7 +387,7 @@ vector leaveBiggestComponent(vector &images, vector &fe } } - if (static_cast(images_subset.size()) == num_images) + if (static_cast(features_subset.size()) == num_images) return indices; LOG("Removed some images, because can't match them: ("); @@ -398,7 +395,6 @@ vector leaveBiggestComponent(vector &images, vector &fe for (size_t i = 1; i < indices_removed.size(); ++i) LOG(", " << indices_removed[i]); LOGLN(")"); - images = images_subset; features = features_subset; pairwise_matches = pairwise_matches_subset; diff --git a/modules/stitching/motion_estimators.hpp b/modules/stitching/motion_estimators.hpp index 3bc3f74..88a860f 100644 --- a/modules/stitching/motion_estimators.hpp +++ b/modules/stitching/motion_estimators.hpp @@ -21,15 +21,15 @@ struct CameraParams class Estimator { public: - void operator ()(const std::vector &images, const std::vector &features, - const std::vector &pairwise_matches, std::vector &cameras) + void operator ()(const std::vector &features, const std::vector &pairwise_matches, + std::vector &cameras) { - estimate(images, features, pairwise_matches, cameras); + estimate(features, pairwise_matches, cameras); } protected: - virtual void estimate(const std::vector &images, const std::vector &features, - const std::vector &pairwise_matches, std::vector &cameras) = 0; + virtual void estimate(const std::vector &features, const std::vector &pairwise_matches, + std::vector &cameras) = 0; }; @@ -40,8 +40,8 @@ public: bool isFocalsEstimated() const { return is_focals_estimated_; } private: - void estimate(const std::vector &images, const std::vector &features, - const std::vector &pairwise_matches, std::vector &cameras); + void estimate(const std::vector &features, const std::vector &pairwise_matches, + std::vector &cameras); bool is_focals_estimated_; }; @@ -56,15 +56,14 @@ public: : cost_space_(cost_space), conf_thresh_(conf_thresh) {} private: - void estimate(const std::vector &images, const std::vector &features, - const std::vector &pairwise_matches, std::vector &cameras); + void estimate(const std::vector &features, const std::vector &pairwise_matches, + std::vector &cameras); void calcError(cv::Mat &err); void calcJacobian(); int num_images_; int total_num_matches_; - const cv::Mat *images_; const ImageFeatures *features_; const MatchesInfo *pairwise_matches_; cv::Mat cameras_; @@ -83,8 +82,8 @@ void waveCorrect(std::vector &rmats); ////////////////////////////////////////////////////////////////////////////// // Auxiliary functions -std::vector leaveBiggestComponent(std::vector &images, std::vector &features, - std::vector &pairwise_matches, float conf_threshold); +std::vector leaveBiggestComponent(std::vector &features, std::vector &pairwise_matches, + float conf_threshold); void findMaxSpanningTree(int num_images, const std::vector &pairwise_matches, Graph &span_tree, std::vector ¢ers); -- 2.7.4