}
-double estimateFocal(const vector<Mat> &images, const vector<ImageFeatures> &/*features*/,
- const vector<MatchesInfo> &pairwise_matches)
+double estimateFocal(const vector<ImageFeatures> &features, const vector<MatchesInfo> &pairwise_matches)
{
- const int num_images = static_cast<int>(images.size());
+ const int num_images = static_cast<int>(features.size());
vector<double> focals;
for (int src_idx = 0; src_idx < num_images; ++src_idx)
LOGLN("Can't estimate focal length, will use naive approach");
double focals_sum = 0;
for (int i = 0; i < num_images; ++i)
- focals_sum += images[i].rows + images[i].cols;
+ focals_sum += features[i].img_size.width + features[i].img_size.height;
return focals_sum / num_images;
}
// by Heung-Yeung Shum and Richard Szeliski.
void focalsFromHomography(const cv::Mat &H, double &f0, double &f1, bool &f0_ok, bool &f1_ok);
-double estimateFocal(const std::vector<cv::Mat> &images, const std::vector<ImageFeatures> &features,
- const std::vector<MatchesInfo> &pairwise_matches);
+double estimateFocal(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches);
#endif // __OPENCV_AUTOCALIB_HPP__
cv::setBreakOnError(true);\r
\r
vector<string> img_names;\r
- vector<Mat> images;\r
\r
// Default parameters\r
bool trygpu = false;\r
i++;\r
}\r
else\r
- {\r
img_names.push_back(argv[i]);\r
- Mat full_img = imread(argv[i]);\r
- if (full_img.empty())\r
- {\r
- cout << "Can't open image " << argv[i] << endl;\r
- return -1;\r
- }\r
- if (work_megapix < 0)\r
- images.push_back(full_img);\r
- else\r
- {\r
- if (!is_work_scale_set)\r
- {\r
- work_scale = min(1.0, sqrt(work_megapix * 1e6 / full_img.size().area())); \r
- is_work_scale_set = true;\r
- }\r
- Mat img;\r
- resize(full_img, img, Size(), work_scale, work_scale);\r
- images.push_back(img);\r
- }\r
- }\r
}\r
LOGLN("Parsing params and reading images, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");\r
\r
- int num_images = static_cast<int>(images.size());\r
+ int num_images = static_cast<int>(img_names.size());\r
if (num_images < 2)\r
{\r
cout << "Need more images\n";\r
\r
t = getTickCount();\r
LOGLN("Finding features...");\r
- vector<ImageFeatures> features;\r
+ vector<ImageFeatures> features(num_images);\r
SurfFeaturesFinder finder(trygpu);\r
- finder(images, features);\r
+ Mat full_img, img;\r
+ for (int i = 0; i < num_images; ++i)\r
+ {\r
+ full_img = imread(img_names[i]);\r
+ if (full_img.empty())\r
+ {\r
+ cout << "Can't open image " << img_names[i] << endl;\r
+ return -1;\r
+ }\r
+ if (work_megapix < 0)\r
+ img = full_img;\r
+ else\r
+ {\r
+ if (!is_work_scale_set)\r
+ {\r
+ work_scale = min(1.0, sqrt(work_megapix * 1e6 / full_img.size().area())); \r
+ is_work_scale_set = true;\r
+ }\r
+ resize(full_img, img, Size(), work_scale, work_scale);\r
+ }\r
+ finder(img, features[i]);\r
+ }\r
LOGLN("Finding features, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");\r
\r
t = getTickCount();\r
BestOf2NearestMatcher matcher(trygpu);\r
if (user_match_conf)\r
matcher = BestOf2NearestMatcher(trygpu, match_conf);\r
- matcher(images, features, pairwise_matches);\r
+ matcher(features, pairwise_matches);\r
LOGLN("Pairwise matching, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");\r
\r
- vector<int> indices = leaveBiggestComponent(images, features, pairwise_matches, conf_thresh);\r
+ vector<int> indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh);\r
vector<string> img_names_subset;\r
for (size_t i = 0; i < indices.size(); ++i)\r
img_names_subset.push_back(img_names[indices[i]]);\r
img_names = img_names_subset;\r
\r
- num_images = static_cast<int>(images.size());\r
+ num_images = static_cast<int>(img_names.size());\r
if (num_images < 2)\r
{\r
cout << "Need more images\n";\r
LOGLN("Estimating rotations...");\r
HomographyBasedEstimator estimator;\r
vector<CameraParams> cameras;\r
- estimator(images, features, pairwise_matches, cameras);\r
+ estimator(features, pairwise_matches, cameras);\r
LOGLN("Estimating rotations, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");\r
\r
for (size_t i = 0; i < cameras.size(); ++i)\r
t = getTickCount();\r
LOGLN("Bundle adjustment... ");\r
BundleAdjuster adjuster(ba_space, conf_thresh);\r
- adjuster(images, features, pairwise_matches, cameras);\r
+ adjuster(features, pairwise_matches, cameras);\r
LOGLN("Bundle adjustment, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");\r
\r
if (wave_correct)\r
nth_element(focals.begin(), focals.end(), focals.begin() + focals.size() / 2);\r
float camera_focal = static_cast<float>(focals[focals.size() / 2]);\r
\r
- if ((work_megapix > 0 || compose_megapix > 0) \r
- && abs(work_megapix - compose_megapix) > 1e-3)\r
+ t = getTickCount();\r
+ vector<Mat> images(num_images);\r
+ LOGLN("Compose scaling...");\r
+ for (int i = 0; i < num_images; ++i)\r
{\r
- t = getTickCount();\r
- LOGLN("Compose scaling...");\r
- for (int i = 0; i < num_images; ++i)\r
+ Mat full_img = imread(img_names[i]);\r
+ if (!is_compose_scale_set)\r
{\r
- Mat full_img = imread(img_names[i]);\r
- if (!is_compose_scale_set)\r
- {\r
- compose_scale = min(1.0, sqrt(compose_megapix * 1e6 / full_img.size().area())); \r
- is_compose_scale_set = true;\r
- }\r
- Mat img;\r
- resize(full_img, img, Size(), compose_scale, compose_scale);\r
- images[i] = img;\r
- cameras[i].focal *= compose_scale / work_scale;\r
+ compose_scale = min(1.0, sqrt(compose_megapix * 1e6 / full_img.size().area())); \r
+ is_compose_scale_set = true;\r
}\r
- camera_focal *= static_cast<float>(compose_scale / work_scale);\r
- LOGLN("Compose scaling, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");\r
+ Mat img;\r
+ resize(full_img, img, Size(), compose_scale, compose_scale);\r
+ images[i] = img;\r
+ cameras[i].focal *= compose_scale / work_scale;\r
}\r
+ camera_focal *= static_cast<float>(compose_scale / work_scale);\r
+ LOGLN("Compose scaling, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");\r
\r
vector<Mat> masks(num_images);\r
for (int i = 0; i < num_images; ++i)\r
//////////////////////////////////////////////////////////////////////////////
-void FeaturesFinder::operator ()(const vector<Mat> &images, vector<ImageFeatures> &features)
+void FeaturesFinder::operator ()(const Mat &image, ImageFeatures &features)
{
- features.resize(images.size());
-
- // Calculate histograms
- for (size_t i = 0; i < images.size(); ++i)
- {
- Mat hsv;
- cvtColor(images[i], hsv, CV_BGR2HSV);
- int hbins = 30, sbins = 32, vbins = 30;\r
- int hist_size[] = { hbins, sbins, vbins };\r
- float hranges[] = { 0, 180 };\r
- float sranges[] = { 0, 256 };\r
- float vranges[] = { 0, 256 };\r
- const float* ranges[] = { hranges, sranges, vranges };
- int channels[] = { 0, 1, 2 };
- calcHist(&hsv, 1, channels, Mat(), features[i].hist, 3, hist_size, ranges);
- }
-
- find(images, features);
+ features.img_size = image.size();
+
+ // Calculate histogram
+ Mat hsv;
+ cvtColor(image, hsv, CV_BGR2HSV);
+ int hbins = 30, sbins = 32, vbins = 30;\r
+ int hist_size[] = { hbins, sbins, vbins };\r
+ float hranges[] = { 0, 180 };\r
+ float sranges[] = { 0, 256 };\r
+ float vranges[] = { 0, 256 };\r
+ const float* ranges[] = { hranges, sranges, vranges };
+ int channels[] = { 0, 1, 2 };
+ calcHist(&hsv, 1, channels, Mat(), features.hist, 3, hist_size, ranges);
+
+ find(image, features);
}
//////////////////////////////////////////////////////////////////////////////
}
protected:
- void find(const vector<Mat> &images, vector<ImageFeatures> &features);
+ void find(const Mat &image, ImageFeatures &features);
private:
Ptr<FeatureDetector> detector_;
Ptr<DescriptorExtractor> extractor_;
};
- void CpuSurfFeaturesFinder::find(const vector<Mat> &images, vector<ImageFeatures> &features)
+ void CpuSurfFeaturesFinder::find(const Mat &image, ImageFeatures &features)
{
- // Make images gray
- vector<Mat> gray_images(images.size());
- for (size_t i = 0; i < images.size(); ++i)
- {
- CV_Assert(images[i].depth() == CV_8U);
- cvtColor(images[i], gray_images[i], CV_BGR2GRAY);
- }
-
- features.resize(images.size());
-
- // Find keypoints in all images
- for (size_t i = 0; i < images.size(); ++i)
- {
- detector_->detect(gray_images[i], features[i].keypoints);
- extractor_->compute(gray_images[i], features[i].keypoints, features[i].descriptors);
- }
+ Mat gray_image;
+ CV_Assert(image.depth() == CV_8U);
+ cvtColor(image, gray_image, CV_BGR2GRAY);
+ detector_->detect(gray_image, features.keypoints);
+ extractor_->compute(gray_image, features.keypoints, features.descriptors);
}
class GpuSurfFeaturesFinder : public FeaturesFinder
}
protected:
- void find(const vector<Mat> &images, vector<ImageFeatures> &features);
+ void find(const Mat &image, ImageFeatures &features);
private:
SURF_GPU surf_;
int num_octaves_descr_, num_layers_descr_;
};
- void GpuSurfFeaturesFinder::find(const vector<Mat> &images, vector<ImageFeatures> &features)
+ void GpuSurfFeaturesFinder::find(const Mat &image, ImageFeatures &features)
{
- // Make images gray
- vector<GpuMat> gray_images(images.size());
- for (size_t i = 0; i < images.size(); ++i)
- {
- CV_Assert(images[i].depth() == CV_8U);
- cvtColor(GpuMat(images[i]), gray_images[i], CV_BGR2GRAY);
- }
+ GpuMat gray_image;
+ CV_Assert(image.depth() == CV_8U);
+ cvtColor(GpuMat(image), gray_image, CV_BGR2GRAY);
- features.resize(images.size());
-
- // Find keypoints in all images
GpuMat d_keypoints;
GpuMat d_descriptors;
- for (size_t i = 0; i < images.size(); ++i)
- {
- surf_.nOctaves = num_octaves_;
- surf_.nOctaveLayers = num_layers_;
- surf_(gray_images[i], GpuMat(), d_keypoints);
+ surf_.nOctaves = num_octaves_;
+ surf_.nOctaveLayers = num_layers_;
+ surf_(gray_image, GpuMat(), d_keypoints);
- surf_.nOctaves = num_octaves_descr_;
- surf_.nOctaveLayers = num_layers_descr_;
- surf_(gray_images[i], GpuMat(), d_keypoints, d_descriptors, true);
+ surf_.nOctaves = num_octaves_descr_;
+ surf_.nOctaveLayers = num_layers_descr_;
+ surf_(gray_image, GpuMat(), d_keypoints, d_descriptors, true);
+ surf_.downloadKeypoints(d_keypoints, features.keypoints);
- surf_.downloadKeypoints(d_keypoints, features[i].keypoints);
- d_descriptors.download(features[i].descriptors);
- }
+ d_descriptors.download(features.descriptors);
}
}
}
-void SurfFeaturesFinder::find(const vector<Mat> &images, vector<ImageFeatures> &features)
+void SurfFeaturesFinder::find(const Mat &image, ImageFeatures &features)
{
- (*impl_)(images, features);
+ (*impl_)(image, features);
}
//////////////////////////////////////////////////////////////////////////////
-void FeaturesMatcher::operator ()(const vector<Mat> &images, const vector<ImageFeatures> &features,
- vector<MatchesInfo> &pairwise_matches)
+void FeaturesMatcher::operator ()(const vector<ImageFeatures> &features, vector<MatchesInfo> &pairwise_matches)
{
- pairwise_matches.resize(images.size() * images.size());
- for (size_t i = 0; i < images.size(); ++i)
+ const int num_images = static_cast<int>(features.size());
+
+ pairwise_matches.resize(num_images * num_images);
+ for (int i = 0; i < num_images; ++i)
{
LOGLN("Processing image " << i << "... ");
- for (size_t j = i + 1; j < images.size(); ++j)
+ for (int j = i + 1; j < num_images; ++j)
{
// Save time by ignoring poor pairs
if (compareHist(features[i].hist, features[j].hist, CV_COMP_INTERSECT)
- < min(images[i].size().area(), images[j].size().area()) * 0.4)
- {
- //LOGLN("Ignoring (" << i << ", " << j << ") pair...");
+ < min(features[i].img_size.area(), features[j].img_size.area()) * 0.4)
continue;
- }
- size_t pair_idx = i * images.size() + j;
+ int pair_idx = i * num_images + j;
- (*this)(images[i], features[i], images[j], features[j], pairwise_matches[pair_idx]);
+ (*this)(features[i], features[j], pairwise_matches[pair_idx]);
pairwise_matches[pair_idx].src_img_idx = i;
pairwise_matches[pair_idx].dst_img_idx = j;
// Set up dual pair matches info
- size_t dual_pair_idx = j * images.size() + i;
+ size_t dual_pair_idx = j * num_images + i;
pairwise_matches[dual_pair_idx] = pairwise_matches[pair_idx];
pairwise_matches[dual_pair_idx].src_img_idx = j;
pairwise_matches[dual_pair_idx].dst_img_idx = i;
public:
inline CpuMatcher(float match_conf) : match_conf_(match_conf) {}
- void match(const cv::Mat&, const ImageFeatures &features1, const cv::Mat&, const ImageFeatures &features2, MatchesInfo& matches_info);
+ void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info);
private:
float match_conf_;
};
- void CpuMatcher::match(const cv::Mat&, const ImageFeatures &features1, const cv::Mat&, const ImageFeatures &features2, MatchesInfo& matches_info)
+ void CpuMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info)
{
matches_info.matches.clear();
public:
inline GpuMatcher(float match_conf) : match_conf_(match_conf) {}
- void match(const cv::Mat&, const ImageFeatures &features1, const cv::Mat&, const ImageFeatures &features2, MatchesInfo& matches_info);
+ void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info);
private:
float match_conf_;
GpuMat trainIdx_, distance_, allDist_;
};
- void GpuMatcher::match(const cv::Mat&, const ImageFeatures &features1, const cv::Mat&, const ImageFeatures &features2, MatchesInfo& matches_info)
+ void GpuMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info)
{
matches_info.matches.clear();
}
-void BestOf2NearestMatcher::match(const Mat &img1, const ImageFeatures &features1, const Mat &img2, const ImageFeatures &features2,
+void BestOf2NearestMatcher::match(const ImageFeatures &features1, const ImageFeatures &features2,
MatchesInfo &matches_info)
{
- (*impl_)(img1, features1, img2, features2, matches_info);
+ (*impl_)(features1, features2, matches_info);
// Check if it makes sense to find homography
if (matches_info.matches.size() < static_cast<size_t>(num_matches_thresh1_))
const DMatch& m = matches_info.matches[i];
Point2f p = features1.keypoints[m.queryIdx].pt;
- p.x -= img1.cols * 0.5f;
- p.y -= img1.rows * 0.5f;
+ p.x -= features1.img_size.width * 0.5f;
+ p.y -= features1.img_size.height * 0.5f;
src_points.at<Point2f>(0, i) = p;
p = features2.keypoints[m.trainIdx].pt;
- p.x -= img2.cols * 0.5f;
- p.y -= img2.rows * 0.5f;
+ p.x -= features2.img_size.width * 0.5f;
+ p.y -= features2.img_size.height * 0.5f;
dst_points.at<Point2f>(0, i) = p;
}
const DMatch& m = matches_info.matches[i];
Point2f p = features1.keypoints[m.queryIdx].pt;
- p.x -= img1.cols * 0.5f;
- p.y -= img2.rows * 0.5f;
+ p.x -= features1.img_size.width * 0.5f;
+ p.y -= features1.img_size.height * 0.5f;
src_points.at<Point2f>(0, inlier_idx) = p;
p = features2.keypoints[m.trainIdx].pt;
- p.x -= img2.cols * 0.5f;
- p.y -= img2.rows * 0.5f;
+ p.x -= features2.img_size.width * 0.5f;
+ p.y -= features2.img_size.height * 0.5f;
dst_points.at<Point2f>(0, inlier_idx) = p;
inlier_idx++;
\r
struct ImageFeatures
{
+ cv::Size img_size;
cv::Mat hist;
std::vector<cv::KeyPoint> keypoints;
cv::Mat descriptors;
{
public:
virtual ~FeaturesFinder() {}
- void operator ()(const std::vector<cv::Mat> &images, std::vector<ImageFeatures> &features);
+ void operator ()(const cv::Mat &image, ImageFeatures &features);
protected:
- virtual void find(const std::vector<cv::Mat> &images, std::vector<ImageFeatures> &features) = 0;
+ virtual void find(const cv::Mat &image, ImageFeatures &features) = 0;
};
int num_octaves_descr = 4, int num_layers_descr = 2);
protected:
- void find(const std::vector<cv::Mat> &images, std::vector<ImageFeatures> &features);
+ void find(const cv::Mat &image, ImageFeatures &features);
cv::Ptr<FeaturesFinder> impl_;
};
{
public:
virtual ~FeaturesMatcher() {}
- void operator ()(const cv::Mat &img1, const ImageFeatures &features1, const cv::Mat &img2, const ImageFeatures &features2,
- MatchesInfo& matches_info) { match(img1, features1, img2, features2, matches_info); }
- void operator ()(const std::vector<cv::Mat> &images, const std::vector<ImageFeatures> &features,
- std::vector<MatchesInfo> &pairwise_matches);
+ void operator ()(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info)
+ { match(features1, features2, matches_info); }
+ void operator ()(const std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches);
protected:
- virtual void match(const cv::Mat &img1, const ImageFeatures &features1, const cv::Mat &img2, const ImageFeatures &features2,
- MatchesInfo& matches_info) = 0;
+ virtual void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info) = 0;
};
BestOf2NearestMatcher(bool try_use_gpu = true, float match_conf = 0.55f, int num_matches_thresh1 = 6, int num_matches_thresh2 = 6);
protected:
- void match(const cv::Mat &img1, const ImageFeatures &features1, const cv::Mat &img2, const ImageFeatures &features2,
- MatchesInfo &matches_info);
+ void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo &matches_info);
int num_matches_thresh1_;
int num_matches_thresh2_;
};
-void HomographyBasedEstimator::estimate(const vector<Mat> &images, const vector<ImageFeatures> &features,
- const vector<MatchesInfo> &pairwise_matches, vector<CameraParams> &cameras)
+void HomographyBasedEstimator::estimate(const vector<ImageFeatures> &features, const vector<MatchesInfo> &pairwise_matches,
+ vector<CameraParams> &cameras)
{
- const int num_images = static_cast<int>(images.size());
+ const int num_images = static_cast<int>(features.size());
// Estimate focal length and set it for all cameras
- double focal = estimateFocal(images, features, pairwise_matches);
+ double focal = estimateFocal(features, pairwise_matches);
cameras.resize(num_images);
for (int i = 0; i < num_images; ++i)
cameras[i].focal = focal;
//////////////////////////////////////////////////////////////////////////////
-void BundleAdjuster::estimate(const vector<Mat> &images, const vector<ImageFeatures> &features,
- const vector<MatchesInfo> &pairwise_matches, vector<CameraParams> &cameras)
+void BundleAdjuster::estimate(const vector<ImageFeatures> &features, const vector<MatchesInfo> &pairwise_matches,
+ vector<CameraParams> &cameras)
{
- num_images_ = static_cast<int>(images.size());
- images_ = &images[0];
+ num_images_ = static_cast<int>(features.size());
features_ = &features[0];
pairwise_matches_ = &pairwise_matches[0];
const DMatch& m = matches_info.matches[k];
Point2d kp1 = features1.keypoints[m.queryIdx].pt;
- kp1.x -= 0.5 * images_[i].cols;
- kp1.y -= 0.5 * images_[i].rows;
+ kp1.x -= 0.5 * features1.img_size.width;
+ kp1.y -= 0.5 * features1.img_size.height;
Point2d kp2 = features2.keypoints[m.trainIdx].pt;
- kp2.x -= 0.5 * images_[j].cols;
- kp2.y -= 0.5 * images_[j].rows;
+ kp2.x -= 0.5 * features2.img_size.width;
+ kp2.y -= 0.5 * features2.img_size.height;
double len1 = sqrt(kp1.x * kp1.x + kp1.y * kp1.y + f1 * f1);
double len2 = sqrt(kp2.x * kp2.x + kp2.y * kp2.y + f2 * f2);
Point3d p1(kp1.x / len1, kp1.y / len1, f1 / len1);
//////////////////////////////////////////////////////////////////////////////
-vector<int> leaveBiggestComponent(vector<Mat> &images, vector<ImageFeatures> &features,
- vector<MatchesInfo> &pairwise_matches, float conf_threshold)
+vector<int> leaveBiggestComponent(vector<ImageFeatures> &features, vector<MatchesInfo> &pairwise_matches,
+ float conf_threshold)
{
- const int num_images = static_cast<int>(images.size());
+ const int num_images = static_cast<int>(features.size());
DjSets comps(num_images);
for (int i = 0; i < num_images; ++i)
else
indices_removed.push_back(i);
- vector<Mat> images_subset;
vector<ImageFeatures> features_subset;
vector<MatchesInfo> pairwise_matches_subset;
for (size_t i = 0; i < indices.size(); ++i)
{
- images_subset.push_back(images[indices[i]]);
features_subset.push_back(features[indices[i]]);
for (size_t j = 0; j < indices.size(); ++j)
{
}
}
- if (static_cast<int>(images_subset.size()) == num_images)
+ if (static_cast<int>(features_subset.size()) == num_images)
return indices;
LOG("Removed some images, because can't match them: (");
for (size_t i = 1; i < indices_removed.size(); ++i) LOG(", " << indices_removed[i]);
LOGLN(")");
- images = images_subset;
features = features_subset;
pairwise_matches = pairwise_matches_subset;
class Estimator
{
public:
- void operator ()(const std::vector<cv::Mat> &images, const std::vector<ImageFeatures> &features,
- const std::vector<MatchesInfo> &pairwise_matches, std::vector<CameraParams> &cameras)
+ void operator ()(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches,
+ std::vector<CameraParams> &cameras)
{
- estimate(images, features, pairwise_matches, cameras);
+ estimate(features, pairwise_matches, cameras);
}
protected:
- virtual void estimate(const std::vector<cv::Mat> &images, const std::vector<ImageFeatures> &features,
- const std::vector<MatchesInfo> &pairwise_matches, std::vector<CameraParams> &cameras) = 0;
+ virtual void estimate(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches,
+ std::vector<CameraParams> &cameras) = 0;
};
bool isFocalsEstimated() const { return is_focals_estimated_; }
private:
- void estimate(const std::vector<cv::Mat> &images, const std::vector<ImageFeatures> &features,
- const std::vector<MatchesInfo> &pairwise_matches, std::vector<CameraParams> &cameras);
+ void estimate(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches,
+ std::vector<CameraParams> &cameras);
bool is_focals_estimated_;
};
: cost_space_(cost_space), conf_thresh_(conf_thresh) {}
private:
- void estimate(const std::vector<cv::Mat> &images, const std::vector<ImageFeatures> &features,
- const std::vector<MatchesInfo> &pairwise_matches, std::vector<CameraParams> &cameras);
+ void estimate(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches,
+ std::vector<CameraParams> &cameras);
void calcError(cv::Mat &err);
void calcJacobian();
int num_images_;
int total_num_matches_;
- const cv::Mat *images_;
const ImageFeatures *features_;
const MatchesInfo *pairwise_matches_;
cv::Mat cameras_;
//////////////////////////////////////////////////////////////////////////////
// Auxiliary functions
-std::vector<int> leaveBiggestComponent(std::vector<cv::Mat> &images, std::vector<ImageFeatures> &features,
- std::vector<MatchesInfo> &pairwise_matches, float conf_threshold);
+std::vector<int> leaveBiggestComponent(std::vector<ImageFeatures> &features, std::vector<MatchesInfo> &pairwise_matches,
+ float conf_threshold);
void findMaxSpanningTree(int num_images, const std::vector<MatchesInfo> &pairwise_matches,
Graph &span_tree, std::vector<int> ¢ers);