#include "opencv2/gpu/gpu.hpp"
#endif
-// TODO remove code duplications (in cpp too)
-
namespace cv
{
namespace videostab
const RansacParams ¶ms = RansacParams::default2dMotion(MM_AFFINE),
float *rmse = 0, int *ninliers = 0);
-class CV_EXPORTS GlobalMotionEstimatorBase
+class CV_EXPORTS MotionEstimatorBase
{
public:
- virtual ~GlobalMotionEstimatorBase() {}
+ virtual ~MotionEstimatorBase() {}
+
+ virtual void setMotionModel(MotionModel val) { motionModel_ = val; }
+ virtual MotionModel motionModel() const { return motionModel_; }
+
+ virtual Mat estimate(InputArray points0, InputArray points1, bool *ok = 0) = 0;
+
+protected:
+ MotionEstimatorBase(MotionModel model) { setMotionModel(model); }
+
+private:
+ MotionModel motionModel_;
+};
+
+class CV_EXPORTS MotionEstimatorRansacL2 : public MotionEstimatorBase
+{
+public:
+ MotionEstimatorRansacL2(MotionModel model = MM_AFFINE);
+
+ void setRansacParams(const RansacParams &val) { ransacParams_ = val; }
+ RansacParams ransacParams() const { return ransacParams_; }
+
+ void setMinInlierRatio(float val) { minInlierRatio_ = val; }
+ float minInlierRatio() const { return minInlierRatio_; }
+
+ virtual Mat estimate(InputArray points0, InputArray points1, bool *ok = 0);
+
+private:
+ RansacParams ransacParams_;
+ float minInlierRatio_;
+};
+
+class CV_EXPORTS MotionEstimatorL1 : public MotionEstimatorBase
+{
+public:
+ MotionEstimatorL1(MotionModel model = MM_AFFINE);
+
+ virtual Mat estimate(InputArray points0, InputArray points1, bool *ok);
+
+private:
+ std::vector<double> obj_, collb_, colub_;
+ std::vector<double> elems_, rowlb_, rowub_;
+ std::vector<int> rows_, cols_;
+
+ void set(int row, int col, double coef)
+ {
+ rows_.push_back(row);
+ cols_.push_back(col);
+ elems_.push_back(coef);
+ }
+};
+
+class CV_EXPORTS ImageMotionEstimatorBase
+{
+public:
+ virtual ~ImageMotionEstimatorBase() {}
virtual void setMotionModel(MotionModel val) { motionModel_ = val; }
virtual MotionModel motionModel() const { return motionModel_; }
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0) = 0;
protected:
- GlobalMotionEstimatorBase(MotionModel model) { setMotionModel(model); }
+ ImageMotionEstimatorBase(MotionModel model) { setMotionModel(model); }
+private:
MotionModel motionModel_;
};
-class CV_EXPORTS FromFileMotionReader : public GlobalMotionEstimatorBase
+class CV_EXPORTS FromFileMotionReader : public ImageMotionEstimatorBase
{
public:
FromFileMotionReader(const std::string &path);
std::ifstream file_;
};
-class CV_EXPORTS ToFileMotionWriter : public GlobalMotionEstimatorBase
+class CV_EXPORTS ToFileMotionWriter : public ImageMotionEstimatorBase
{
public:
- ToFileMotionWriter(const std::string &path, Ptr<GlobalMotionEstimatorBase> estimator);
+ ToFileMotionWriter(const std::string &path, Ptr<ImageMotionEstimatorBase> estimator);
+
+ virtual void setMotionModel(MotionModel val) { motionEstimator_->setMotionModel(val); }
+ virtual MotionModel motionModel() const { return motionEstimator_->motionModel(); }
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0);
private:
std::ofstream file_;
- Ptr<GlobalMotionEstimatorBase> estimator_;
+ Ptr<ImageMotionEstimatorBase> motionEstimator_;
};
-class CV_EXPORTS RansacMotionEstimator : public GlobalMotionEstimatorBase
+class CV_EXPORTS KeypointBasedMotionEstimator : public ImageMotionEstimatorBase
{
public:
- RansacMotionEstimator(MotionModel model = MM_AFFINE);
+ KeypointBasedMotionEstimator(Ptr<MotionEstimatorBase> estimator);
+
+ virtual void setMotionModel(MotionModel val) { motionEstimator_->setMotionModel(val); }
+ virtual MotionModel motionModel() const { return motionEstimator_->motionModel(); }
void setDetector(Ptr<FeatureDetector> val) { detector_ = val; }
Ptr<FeatureDetector> detector() const { return detector_; }
- void setOptFlowEstimator(Ptr<ISparseOptFlowEstimator> val) { optFlowEstimator_ = val; }
- Ptr<ISparseOptFlowEstimator> optFlowEstimator() const { return optFlowEstimator_; }
-
- void setGridSize(Size val) { gridSize_ = val; }
- Size gridSize() const { return gridSize_; }
-
- void setRansacParams(const RansacParams &val) { ransacParams_ = val; }
- RansacParams ransacParams() const { return ransacParams_; }
+ void setOpticalFlowEstimator(Ptr<ISparseOptFlowEstimator> val) { optFlowEstimator_ = val; }
+ Ptr<ISparseOptFlowEstimator> opticalFlowEstimator() const { return optFlowEstimator_; }
void setOutlierRejector(Ptr<IOutlierRejector> val) { outlierRejector_ = val; }
Ptr<IOutlierRejector> outlierRejector() const { return outlierRejector_; }
- void setMinInlierRatio(float val) { minInlierRatio_ = val; }
- float minInlierRatio() const { return minInlierRatio_; }
-
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0);
private:
+ Ptr<MotionEstimatorBase> motionEstimator_;
Ptr<FeatureDetector> detector_;
Ptr<ISparseOptFlowEstimator> optFlowEstimator_;
- Size gridSize_;
- RansacParams ransacParams_;
Ptr<IOutlierRejector> outlierRejector_;
- float minInlierRatio_;
std::vector<uchar> status_;
std::vector<KeyPoint> keypointsPrev_;
};
#if HAVE_OPENCV_GPU
-class CV_EXPORTS RansacMotionEstimatorGpu : public GlobalMotionEstimatorBase
+class CV_EXPORTS KeypointBasedMotionEstimatorGpu : public ImageMotionEstimatorBase
{
public:
- RansacMotionEstimatorGpu(MotionModel model = MM_AFFINE);
+ KeypointBasedMotionEstimatorGpu(Ptr<MotionEstimatorBase> estimator);
- void setRansacParams(const RansacParams &val) { ransacParams_ = val; }
- RansacParams ransacParams() const { return ransacParams_; }
+ virtual void setMotionModel(MotionModel val) { motionEstimator_->setMotionModel(val); }
+ virtual MotionModel motionModel() const { return motionEstimator_->motionModel(); }
void setOutlierRejector(Ptr<IOutlierRejector> val) { outlierRejector_ = val; }
Ptr<IOutlierRejector> outlierRejector() const { return outlierRejector_; }
- void setMinInlierRatio(float val) { minInlierRatio_ = val; }
- float minInlierRatio() const { return minInlierRatio_; }
-
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0);
Mat estimate(const gpu::GpuMat &frame0, const gpu::GpuMat &frame1, bool *ok = 0);
private:
+ Ptr<MotionEstimatorBase> motionEstimator_;
gpu::GoodFeaturesToTrackDetector_GPU detector_;
SparsePyrLkOptFlowEstimatorGpu optFlowEstimator_;
- RansacParams ransacParams_;
Ptr<IOutlierRejector> outlierRejector_;
- float minInlierRatio_;
gpu::GpuMat frame0_, grayFrame0_, frame1_;
gpu::GpuMat pointsPrev_, points_;
};
#endif
-class CV_EXPORTS LpBasedMotionEstimator : public GlobalMotionEstimatorBase
-{
-public:
- LpBasedMotionEstimator(MotionModel model = MM_AFFINE);
-
- void setDetector(Ptr<FeatureDetector> val) { detector_ = val; }
- Ptr<FeatureDetector> detector() const { return detector_; }
-
- void setOptFlowEstimator(Ptr<ISparseOptFlowEstimator> val) { optFlowEstimator_ = val; }
- Ptr<ISparseOptFlowEstimator> optFlowEstimator() const { return optFlowEstimator_; }
-
- void setOutlierRejector(Ptr<IOutlierRejector> val) { outlierRejector_ = val; }
- Ptr<IOutlierRejector> outlierRejector() const { return outlierRejector_; }
-
- virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok);
-
-private:
- Ptr<FeatureDetector> detector_;
- Ptr<ISparseOptFlowEstimator> optFlowEstimator_;
- Ptr<IOutlierRejector> outlierRejector_;
-
- std::vector<uchar> status_;
- std::vector<KeyPoint> keypointsPrev_;
- std::vector<Point2f> pointsPrev_, points_;
- std::vector<Point2f> pointsPrevGood_, pointsGood_;
-
- std::vector<double> obj_, collb_, colub_;
- std::vector<int> rows_, cols_;
- std::vector<double> elems_, rowlb_, rowub_;
-
- void set(int row, int col, double coef)
- {
- rows_.push_back(row);
- cols_.push_back(col);
- elems_.push_back(coef);
- }
-};
-
CV_EXPORTS Mat getMotion(int from, int to, const std::vector<Mat> &motions);
} // namespace videostab
}
+MotionEstimatorRansacL2::MotionEstimatorRansacL2(MotionModel model)
+ : MotionEstimatorBase(model)
+{
+ setRansacParams(RansacParams::default2dMotion(model));
+ setMinInlierRatio(0.1f);
+}
+
+
+Mat MotionEstimatorRansacL2::estimate(InputArray points0, InputArray points1, bool *ok)
+{
+ CV_Assert(points0.type() == points1.type());
+ const int npoints = points0.getMat().checkVector(2);
+ CV_Assert(points1.getMat().checkVector(2) == npoints);
+
+ // find motion
+
+ int ninliers = 0;
+ Mat_<float> M;
+
+ if (motionModel() != MM_HOMOGRAPHY)
+ M = estimateGlobalMotionRobust(
+ points0, points1, motionModel(), ransacParams_, 0, &ninliers);
+ else
+ {
+ vector<uchar> mask;
+ M = findHomography(points0, points1, mask, CV_RANSAC, ransacParams_.thresh);
+ for (int i = 0; i < npoints; ++i)
+ if (mask[i]) ninliers++;
+ }
+
+ // check if we're confident enough in estimated motion
+
+ if (ok) *ok = true;
+ if (static_cast<float>(ninliers) / npoints < minInlierRatio_)
+ {
+ M = Mat::eye(3, 3, CV_32F);
+ if (ok) *ok = false;
+ }
+
+ return M;
+}
+
+
+MotionEstimatorL1::MotionEstimatorL1(MotionModel model)
+ : MotionEstimatorBase(model)
+{
+}
+
+
+// TODO will estimation of all motions as one LP problem be faster?
+Mat MotionEstimatorL1::estimate(InputArray points0, InputArray points1, bool *ok)
+{
+ CV_Assert(points0.type() == points1.type());
+ const int npoints = points0.getMat().checkVector(2);
+ CV_Assert(points1.getMat().checkVector(2) == npoints);
+
+ const Point2f *points0_ = points0.getMat().ptr<Point2f>();
+ const Point2f *points1_ = points1.getMat().ptr<Point2f>();
+
+#ifndef HAVE_CLP
+
+ CV_Error(CV_StsError, "The library is built without Clp support");
+ if (ok) *ok = false;
+ return Mat::eye(3, 3, CV_32F);
+
+#else
+
+ CV_Assert(motionModel() <= MM_AFFINE && motionModel() != MM_RIGID);
+
+ // prepare LP problem
+
+ int ncols = 6 + 2*npoints;
+ int nrows = 4*npoints;
+
+ if (motionModel() == MM_SIMILARITY)
+ nrows += 2;
+ else if (motionModel() == MM_TRANSLATION_AND_SCALE)
+ nrows += 3;
+ else if (motionModel() == MM_TRANSLATION)
+ nrows += 4;
+
+ rows_.clear();
+ cols_.clear();
+ elems_.clear();
+ obj_.assign(ncols, 0);
+ collb_.assign(ncols, -INF);
+ colub_.assign(ncols, INF);
+
+ int c = 6;
+
+ for (int i = 0; i < npoints; ++i, c += 2)
+ {
+ obj_[c] = 1;
+ collb_[c] = 0;
+
+ obj_[c+1] = 1;
+ collb_[c+1] = 0;
+ }
+
+ elems_.clear();
+ rowlb_.assign(nrows, -INF);
+ rowub_.assign(nrows, INF);
+
+ int r = 0;
+ Point2f p0, p1;
+
+ for (int i = 0; i < npoints; ++i, r += 4)
+ {
+ p0 = points0_[i];
+ p1 = points1_[i];
+
+ set(r, 0, p0.x); set(r, 1, p0.y); set(r, 2, 1); set(r, 6+2*i, -1);
+ rowub_[r] = p1.x;
+
+ set(r+1, 3, p0.x); set(r+1, 4, p0.y); set(r+1, 5, 1); set(r+1, 6+2*i+1, -1);
+ rowub_[r+1] = p1.y;
+
+ set(r+2, 0, p0.x); set(r+2, 1, p0.y); set(r+2, 2, 1); set(r+2, 6+2*i, 1);
+ rowlb_[r+2] = p1.x;
+
+ set(r+3, 3, p0.x); set(r+3, 4, p0.y); set(r+3, 5, 1); set(r+3, 6+2*i+1, 1);
+ rowlb_[r+3] = p1.y;
+ }
+
+ if (motionModel() == MM_SIMILARITY)
+ {
+ set(r, 0, 1); set(r, 4, -1); rowlb_[r] = rowub_[r] = 0;
+ set(r+1, 1, 1); set(r+1, 3, 1); rowlb_[r+1] = rowub_[r+1] = 0;
+ }
+ else if (motionModel() == MM_TRANSLATION_AND_SCALE)
+ {
+ set(r, 0, 1); set(r, 4, -1); rowlb_[r] = rowub_[r] = 0;
+ set(r+1, 1, 1); rowlb_[r+1] = rowub_[r+1] = 0;
+ set(r+2, 3, 1); rowlb_[r+2] = rowub_[r+2] = 0;
+ }
+ else if (motionModel() == MM_TRANSLATION)
+ {
+ set(r, 0, 1); rowlb_[r] = rowub_[r] = 1;
+ set(r+1, 1, 1); rowlb_[r+1] = rowub_[r+1] = 0;
+ set(r+2, 3, 1); rowlb_[r+2] = rowub_[r+2] = 0;
+ set(r+3, 4, 1); rowlb_[r+3] = rowub_[r+3] = 1;
+ }
+
+ // solve
+
+ CoinPackedMatrix A(true, &rows_[0], &cols_[0], &elems_[0], elems_.size());
+ A.setDimensions(nrows, ncols);
+
+ ClpSimplex model(false);
+ model.loadProblem(A, &collb_[0], &colub_[0], &obj_[0], &rowlb_[0], &rowub_[0]);
+
+ ClpDualRowSteepest dualSteep(1);
+ model.setDualRowPivotAlgorithm(dualSteep);
+ model.scaling(1);
+
+ model.dual();
+
+ // extract motion
+
+ const double *sol = model.getColSolution();
+
+ Mat_<float> M = Mat::eye(3, 3, CV_32F);
+ M(0,0) = sol[0];
+ M(0,1) = sol[1];
+ M(0,2) = sol[2];
+ M(1,0) = sol[3];
+ M(1,1) = sol[4];
+ M(1,2) = sol[5];
+
+ if (ok) *ok = true;
+ return M;
+#endif
+}
+
+
FromFileMotionReader::FromFileMotionReader(const string &path)
- : GlobalMotionEstimatorBase(MM_UNKNOWN)
+ : ImageMotionEstimatorBase(MM_UNKNOWN)
{
file_.open(path.c_str());
CV_Assert(file_.is_open());
}
-ToFileMotionWriter::ToFileMotionWriter(const string &path, Ptr<GlobalMotionEstimatorBase> estimator)
- : GlobalMotionEstimatorBase(estimator->motionModel())
+ToFileMotionWriter::ToFileMotionWriter(const string &path, Ptr<ImageMotionEstimatorBase> estimator)
+ : ImageMotionEstimatorBase(estimator->motionModel()), motionEstimator_(estimator)
{
file_.open(path.c_str());
CV_Assert(file_.is_open());
- estimator_ = estimator;
}
Mat ToFileMotionWriter::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
{
bool ok_;
- Mat_<float> M = estimator_->estimate(frame0, frame1, &ok_);
+ Mat_<float> M = motionEstimator_->estimate(frame0, frame1, &ok_);
file_ << M(0,0) << " " << M(0,1) << " " << M(0,2) << " "
<< M(1,0) << " " << M(1,1) << " " << M(1,2) << " "
<< M(2,0) << " " << M(2,1) << " " << M(2,2) << " " << ok_ << endl;
}
-RansacMotionEstimator::RansacMotionEstimator(MotionModel model)
- : GlobalMotionEstimatorBase(model)
-{
+KeypointBasedMotionEstimator::KeypointBasedMotionEstimator(Ptr<MotionEstimatorBase> estimator)
+ : ImageMotionEstimatorBase(estimator->motionModel()), motionEstimator_(estimator)
+{
setDetector(new GoodFeaturesToTrackDetector());
- setOptFlowEstimator(new SparsePyrLkOptFlowEstimator());
- setGridSize(Size(0,0));
- setRansacParams(RansacParams::default2dMotion(model));
+ setOpticalFlowEstimator(new SparsePyrLkOptFlowEstimator());
setOutlierRejector(new NullOutlierRejector());
- setMinInlierRatio(0.1f);
}
-Mat RansacMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
+Mat KeypointBasedMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
{
// find keypoints
-
detector_->detect(frame0, keypointsPrev_);
- // add extra keypoints
-
- if (gridSize_.width > 0 && gridSize_.height > 0)
- {
- float dx = static_cast<float>(frame0.cols) / (gridSize_.width + 1);
- float dy = static_cast<float>(frame0.rows) / (gridSize_.height + 1);
- for (int x = 0; x < gridSize_.width; ++x)
- for (int y = 0; y < gridSize_.height; ++y)
- keypointsPrev_.push_back(KeyPoint((x+1)*dx, (y+1)*dy, 0.f));
- }
-
// extract points from keypoints
-
pointsPrev_.resize(keypointsPrev_.size());
for (size_t i = 0; i < keypointsPrev_.size(); ++i)
pointsPrev_[i] = keypointsPrev_[i].pt;
// find correspondences
-
optFlowEstimator_->run(frame0, frame1, pointsPrev_, points_, status_, noArray());
// leave good correspondences only
}
}
- // perfrom outlier rejection
+ // perform outlier rejection
IOutlierRejector *outlierRejector = static_cast<IOutlierRejector*>(outlierRejector_);
if (!dynamic_cast<NullOutlierRejector*>(outlierRejector))
outlierRejector_->process(frame0.size(), pointsPrev_, points_, status_);
- pointsPrevGood_.clear(); pointsPrevGood_.reserve(points_.size());
- pointsGood_.clear(); pointsGood_.reserve(points_.size());
+ pointsPrevGood_.clear();
+ pointsPrevGood_.reserve(points_.size());
+
+ pointsGood_.clear();
+ pointsGood_.reserve(points_.size());
for (size_t i = 0; i < points_.size(); ++i)
{
}
}
- size_t npoints = pointsGood_.size();
-
- // find motion
-
- int ninliers = 0;
- Mat_<float> M;
-
- if (motionModel_ != MM_HOMOGRAPHY)
- M = estimateGlobalMotionRobust(
- pointsPrevGood_, pointsGood_, motionModel_, ransacParams_, 0, &ninliers);
- else
- {
- vector<uchar> mask;
- M = findHomography(pointsPrevGood_, pointsGood_, mask, CV_RANSAC, ransacParams_.thresh);
- for (size_t i = 0; i < npoints; ++i)
- if (mask[i]) ninliers++;
- }
-
- // check if we're confident enough in estimated motion
-
- if (ok) *ok = true;
- if (static_cast<float>(ninliers) / npoints < minInlierRatio_)
- {
- M = Mat::eye(3, 3, CV_32F);
- if (ok) *ok = false;
- }
-
- return M;
+ // estimate motion
+ return motionEstimator_->estimate(pointsPrevGood_, pointsGood_, ok);
}
#if HAVE_OPENCV_GPU
-RansacMotionEstimatorGpu::RansacMotionEstimatorGpu(MotionModel model)
- : GlobalMotionEstimatorBase(model)
+KeypointBasedMotionEstimatorGpu::KeypointBasedMotionEstimatorGpu(Ptr<MotionEstimatorBase> estimator)
+ : ImageMotionEstimatorBase(estimator->motionModel()), motionEstimator_(estimator)
{
- CV_Assert(gpu::getCudaEnabledDeviceCount() > 0);
- setRansacParams(RansacParams::default2dMotion(model));
+ CV_Assert(gpu::getCudaEnabledDeviceCount() > 0);
setOutlierRejector(new NullOutlierRejector());
- setMinInlierRatio(0.1f);
}
-Mat RansacMotionEstimatorGpu::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
+Mat KeypointBasedMotionEstimatorGpu::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
{
frame0_.upload(frame0);
frame1_.upload(frame1);
}
-Mat RansacMotionEstimatorGpu::estimate(const gpu::GpuMat &frame0, const gpu::GpuMat &frame1, bool *ok)
+Mat KeypointBasedMotionEstimatorGpu::estimate(const gpu::GpuMat &frame0, const gpu::GpuMat &frame1, bool *ok)
{
// convert frame to gray if it's color
}
// find keypoints
-
detector_(grayFrame0, pointsPrev_);
// find correspondences
-
optFlowEstimator_.run(frame0, frame1, pointsPrev_, points_, status_);
// leave good correspondences only
-
gpu::compactPoints(pointsPrev_, points_, status_);
pointsPrev_.download(hostPointsPrev_);
points_.download(hostPoints_);
- // perfrom outlier rejection
+ // perform outlier rejection
IOutlierRejector *outlierRejector = static_cast<IOutlierRejector*>(outlierRejector_);
if (!dynamic_cast<NullOutlierRejector*>(outlierRejector))
{
outlierRejector_->process(frame0.size(), hostPointsPrev_, hostPoints_, rejectionStatus_);
- hostPointsPrevTmp_.clear(); hostPointsPrevTmp_.reserve(hostPoints_.cols);
- hostPointsTmp_.clear(); hostPointsTmp_.reserve(hostPoints_.cols);
+ hostPointsPrevTmp_.clear();
+ hostPointsPrevTmp_.reserve(hostPoints_.cols);
+
+ hostPointsTmp_.clear();
+ hostPointsTmp_.reserve(hostPoints_.cols);
for (int i = 0; i < hostPoints_.cols; ++i)
{
hostPoints_ = Mat(1, hostPointsTmp_.size(), CV_32FC2, &hostPointsTmp_[0]);
}
- // find motion
-
- int npoints = hostPoints_.cols;
- int ninliers = 0;
- Mat_<float> M;
-
- if (motionModel_ != MM_HOMOGRAPHY)
- M = estimateGlobalMotionRobust(
- hostPointsPrev_, hostPoints_, motionModel_, ransacParams_, 0, &ninliers);
- else
- {
- vector<uchar> mask;
- M = findHomography(hostPointsPrev_, hostPoints_, mask, CV_RANSAC, ransacParams_.thresh);
- for (int i = 0; i < npoints; ++i)
- if (mask[i]) ninliers++;
- }
-
- // check if we're confident enough in estimated motion
-
- if (ok) *ok = true;
- if (static_cast<float>(ninliers) / npoints < minInlierRatio_)
- {
- M = Mat::eye(3, 3, CV_32F);
- if (ok) *ok = false;
- }
-
- return M;
-}
-#endif // #if HAVE_OPENCV_GPU
-
-
-LpBasedMotionEstimator::LpBasedMotionEstimator(MotionModel model)
- : GlobalMotionEstimatorBase(model)
-{
- setDetector(new GoodFeaturesToTrackDetector());
- setOptFlowEstimator(new SparsePyrLkOptFlowEstimator());
- setOutlierRejector(new NullOutlierRejector());
-}
-
-// TODO will estimation of all motions as one LP problem be faster?
-
-Mat LpBasedMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
-{
- // find keypoints
-
- detector_->detect(frame0, keypointsPrev_);
-
- // extract points from keypoints
-
- pointsPrev_.resize(keypointsPrev_.size());
- for (size_t i = 0; i < keypointsPrev_.size(); ++i)
- pointsPrev_[i] = keypointsPrev_[i].pt;
-
- // find correspondences
-
- optFlowEstimator_->run(frame0, frame1, pointsPrev_, points_, status_, noArray());
-
- // leave good correspondences only
-
- pointsPrevGood_.clear(); pointsPrevGood_.reserve(points_.size());
- pointsGood_.clear(); pointsGood_.reserve(points_.size());
-
- for (size_t i = 0; i < points_.size(); ++i)
- {
- if (status_[i])
- {
- pointsPrevGood_.push_back(pointsPrev_[i]);
- pointsGood_.push_back(points_[i]);
- }
- }
-
- // perfrom outlier rejection
-
- IOutlierRejector *outlierRejector = static_cast<IOutlierRejector*>(outlierRejector_);
- if (!dynamic_cast<NullOutlierRejector*>(outlierRejector))
- {
- pointsPrev_.swap(pointsPrevGood_);
- points_.swap(pointsGood_);
-
- outlierRejector_->process(frame0.size(), pointsPrev_, points_, status_);
-
- pointsPrevGood_.clear(); pointsPrevGood_.reserve(points_.size());
- pointsGood_.clear(); pointsGood_.reserve(points_.size());
-
- for (size_t i = 0; i < points_.size(); ++i)
- {
- if (status_[i])
- {
- pointsPrevGood_.push_back(pointsPrev_[i]);
- pointsGood_.push_back(points_[i]);
- }
- }
- }
-
- // prepare LP problem
-
-#ifndef HAVE_CLP
-
- CV_Error(CV_StsError, "The library is built without Clp support");
- if (ok) *ok = false;
- return Mat::eye(3, 3, CV_32F);
-
-#else
-
- CV_Assert(motionModel_ <= MM_AFFINE && motionModel_ != MM_RIGID);
-
- int npoints = static_cast<int>(pointsGood_.size());
- int ncols = 6 + 2*npoints;
- int nrows = 4*npoints;
-
- if (motionModel_ == MM_SIMILARITY)
- nrows += 2;
- else if (motionModel_ == MM_TRANSLATION_AND_SCALE)
- nrows += 3;
- else if (motionModel_ == MM_TRANSLATION)
- nrows += 4;
-
- rows_.clear();
- cols_.clear();
- elems_.clear();
-
- obj_.assign(ncols, 0);
- collb_.assign(ncols, -INF);
- colub_.assign(ncols, INF);
-
- int c = 6;
-
- for (int i = 0; i < npoints; ++i, c += 2)
- {
- obj_[c] = 1;
- collb_[c] = 0;
-
- obj_[c+1] = 1;
- collb_[c+1] = 0;
- }
-
- elems_.clear();
- rowlb_.assign(nrows, -INF);
- rowub_.assign(nrows, INF);
-
- int r = 0;
- Point2f p0, p1;
-
- for (int i = 0; i < npoints; ++i, r += 4)
- {
- p0 = pointsPrevGood_[i];
- p1 = pointsGood_[i];
-
- set(r, 0, p0.x); set(r, 1, p0.y); set(r, 2, 1); set(r, 6+2*i, -1);
- rowub_[r] = p1.x;
-
- set(r+1, 3, p0.x); set(r+1, 4, p0.y); set(r+1, 5, 1); set(r+1, 6+2*i+1, -1);
- rowub_[r+1] = p1.y;
-
- set(r+2, 0, p0.x); set(r+2, 1, p0.y); set(r+2, 2, 1); set(r+2, 6+2*i, 1);
- rowlb_[r+2] = p1.x;
-
- set(r+3, 3, p0.x); set(r+3, 4, p0.y); set(r+3, 5, 1); set(r+3, 6+2*i+1, 1);
- rowlb_[r+3] = p1.y;
- }
-
- if (motionModel_ == MM_SIMILARITY)
- {
- set(r, 0, 1); set(r, 4, -1); rowlb_[r] = rowub_[r] = 0;
- set(r+1, 1, 1); set(r+1, 3, 1); rowlb_[r+1] = rowub_[r+1] = 0;
- }
- else if (motionModel_ == MM_TRANSLATION_AND_SCALE)
- {
- set(r, 0, 1); set(r, 4, -1); rowlb_[r] = rowub_[r] = 0;
- set(r+1, 1, 1); rowlb_[r+1] = rowub_[r+1] = 0;
- set(r+2, 3, 1); rowlb_[r+2] = rowub_[r+2] = 0;
- }
- else if (motionModel_ == MM_TRANSLATION)
- {
- set(r, 0, 1); rowlb_[r] = rowub_[r] = 1;
- set(r+1, 1, 1); rowlb_[r+1] = rowub_[r+1] = 0;
- set(r+2, 3, 1); rowlb_[r+2] = rowub_[r+2] = 0;
- set(r+3, 4, 1); rowlb_[r+3] = rowub_[r+3] = 1;
- }
-
- // solve
-
- CoinPackedMatrix A(true, &rows_[0], &cols_[0], &elems_[0], elems_.size());
- A.setDimensions(nrows, ncols);
-
- ClpSimplex model(false);
- model.loadProblem(A, &collb_[0], &colub_[0], &obj_[0], &rowlb_[0], &rowub_[0]);
-
- ClpDualRowSteepest dualSteep(1);
- model.setDualRowPivotAlgorithm(dualSteep);
- model.scaling(1);
-
- model.dual();
-
- // extract motion
-
- const double *sol = model.getColSolution();
-
- Mat_<float> M = Mat::eye(3, 3, CV_32F);
- M(0,0) = sol[0];
- M(0,1) = sol[1];
- M(0,2) = sol[2];
- M(1,0) = sol[3];
- M(1,1) = sol[4];
- M(1,2) = sol[5];
-
- if (ok) *ok = true;
- return M;
-#endif
+ // estimate motion
+ return motionEstimator_->estimate(hostPointsPrev_, hostPoints_, ok);
}
+#endif // HAVE_OPENCV_GPU
Mat getMotion(int from, int to, const vector<Mat> &motions)
" Minimum inlier ratio to decide if estimated motion is OK. The default is 0.1.\n"
" --nkps=<int_number>\n"
" Number of keypoints to find in each frame. The default is 1000.\n"
- " --extra-kps=<int_number>\n"
- " Extra keypoint grid size for motion estimation. The default is 0.\n"
" --local-outlier-rejection=(yes|no)\n"
" Perform local outlier rejection. The default is no.\n\n"
" -sm, --save-motions=(<file_path>|no)\n"
" Minimum inlier ratio to decide if estimated motion is OK. The default is 0.1.\n"
" --ws-nkps=<int_number>\n"
" Number of keypoints to find in each frame. The default is 1000.\n"
- " --ws-extra-kps=<int_number>\n"
- " Extra keypoint grid size for motion estimation. The default is 0.\n"
" --ws-local-outlier-rejection=(yes|no)\n"
" Perform local outlier rejection. The default is no.\n\n"
" -sm2, --save-motions2=(<file_path>|no)\n"
{
public:
virtual ~IMotionEstimatorBuilder() {}
- virtual Ptr<GlobalMotionEstimatorBase> build() = 0;
+ virtual Ptr<ImageMotionEstimatorBase> build() = 0;
protected:
IMotionEstimatorBuilder(CommandLineParser &cmd) : cmd(cmd) {}
CommandLineParser cmd;
};
-class RansacMotionEstimatorBuilder : public IMotionEstimatorBuilder
+class MotionEstimatorRansacL2Builder : public IMotionEstimatorBuilder
{
public:
- RansacMotionEstimatorBuilder(CommandLineParser &cmd, const string &prefix = "")
- : IMotionEstimatorBuilder(cmd), prefix(prefix) {}
+ MotionEstimatorRansacL2Builder(CommandLineParser &cmd, bool gpu, const string &prefix = "")
+ : IMotionEstimatorBuilder(cmd), gpu(gpu), prefix(prefix) {}
- virtual Ptr<GlobalMotionEstimatorBase> build()
+ virtual Ptr<ImageMotionEstimatorBase> build()
{
- RansacMotionEstimator *est = new RansacMotionEstimator(motionModel(arg(prefix + "model")));
-
- est->setDetector(new GoodFeaturesToTrackDetector(argi(prefix + "nkps")));
+ MotionEstimatorRansacL2 *est = new MotionEstimatorRansacL2(motionModel(arg(prefix + "model")));
RansacParams ransac = est->ransacParams();
if (arg(prefix + "subset") != "auto")
ransac.eps = argf(prefix + "outlier-ratio");
est->setRansacParams(ransac);
- est->setGridSize(Size(argi(prefix + "extra-kps"), argi(prefix + "extra-kps")));
+ est->setMinInlierRatio(argf(prefix + "min-inlier-ratio"));
Ptr<IOutlierRejector> outlierRejector = new NullOutlierRejector();
if (arg(prefix + "local-outlier-rejection") == "yes")
{
- TranslationBasedLocalOutlierRejector *tor = new TranslationBasedLocalOutlierRejector();
- RansacParams ransacParams = tor->ransacParams();
+ TranslationBasedLocalOutlierRejector *tblor = new TranslationBasedLocalOutlierRejector();
+ RansacParams ransacParams = tblor->ransacParams();
if (arg(prefix + "thresh") != "auto")
ransacParams.thresh = argf(prefix + "thresh");
- tor->setRansacParams(ransacParams);
- outlierRejector = tor;
+ tblor->setRansacParams(ransacParams);
+ outlierRejector = tblor;
}
- est->setOutlierRejector(outlierRejector);
-
- est->setMinInlierRatio(argf(prefix + "min-inlier-ratio"));
-
- return est;
- }
-private:
- string prefix;
-};
-
#if HAVE_OPENCV_GPU
-class RansacMotionEstimatorBuilderGpu : public IMotionEstimatorBuilder
-{
-public:
- RansacMotionEstimatorBuilderGpu(CommandLineParser &cmd, const string &prefix = "")
- : IMotionEstimatorBuilder(cmd), prefix(prefix) {}
-
- virtual Ptr<GlobalMotionEstimatorBase> build()
- {
- RansacMotionEstimatorGpu *est = new RansacMotionEstimatorGpu(motionModel(arg(prefix + "model")));
-
- RansacParams ransac = est->ransacParams();
- if (arg(prefix + "subset") != "auto")
- ransac.size = argi(prefix + "subset");
- if (arg(prefix + "thresh") != "auto")
- ransac.thresh = argi(prefix + "thresh");
- ransac.eps = argf(prefix + "outlier-ratio");
- est->setRansacParams(ransac);
-
- Ptr<IOutlierRejector> outlierRejector = new NullOutlierRejector();
- if (arg(prefix + "local-outlier-rejection") == "yes")
+ if (gpu)
{
- TranslationBasedLocalOutlierRejector *tor = new TranslationBasedLocalOutlierRejector();
- RansacParams ransacParams = tor->ransacParams();
- if (arg(prefix + "thresh") != "auto")
- ransacParams.thresh = argf(prefix + "thresh");
- tor->setRansacParams(ransacParams);
- outlierRejector = tor;
+ KeypointBasedMotionEstimatorGpu *kbest = new KeypointBasedMotionEstimatorGpu(est);
+ kbest->setOutlierRejector(outlierRejector);
+ return kbest;
}
- est->setOutlierRejector(outlierRejector);
-
- est->setMinInlierRatio(argf(prefix + "min-inlier-ratio"));
+#endif
- return est;
+ KeypointBasedMotionEstimator *kbest = new KeypointBasedMotionEstimator(est);
+ kbest->setDetector(new GoodFeaturesToTrackDetector(argi(prefix + "nkps")));
+ kbest->setOutlierRejector(outlierRejector);
+ return kbest;
}
private:
+ bool gpu;
string prefix;
};
-#endif
-class LpBasedMotionEstimatorBuilder : public IMotionEstimatorBuilder
+class MotionEstimatorL1Builder : public IMotionEstimatorBuilder
{
public:
- LpBasedMotionEstimatorBuilder(CommandLineParser &cmd, const string &prefix = "")
- : IMotionEstimatorBuilder(cmd), prefix(prefix) {}
+ MotionEstimatorL1Builder(CommandLineParser &cmd, bool gpu, const string &prefix = "")
+ : IMotionEstimatorBuilder(cmd), gpu(gpu), prefix(prefix) {}
- virtual Ptr<GlobalMotionEstimatorBase> build()
+ virtual Ptr<ImageMotionEstimatorBase> build()
{
- LpBasedMotionEstimator *est = new LpBasedMotionEstimator(motionModel(arg(prefix + "model")));
-
- est->setDetector(new GoodFeaturesToTrackDetector(argi(prefix + "nkps")));
+ MotionEstimatorL1 *est = new MotionEstimatorL1(motionModel(arg(prefix + "model")));
Ptr<IOutlierRejector> outlierRejector = new NullOutlierRejector();
if (arg(prefix + "local-outlier-rejection") == "yes")
{
- TranslationBasedLocalOutlierRejector *tor = new TranslationBasedLocalOutlierRejector();
- RansacParams ransacParams = tor->ransacParams();
+ TranslationBasedLocalOutlierRejector *tblor = new TranslationBasedLocalOutlierRejector();
+ RansacParams ransacParams = tblor->ransacParams();
if (arg(prefix + "thresh") != "auto")
ransacParams.thresh = argf(prefix + "thresh");
- tor->setRansacParams(ransacParams);
- outlierRejector = tor;
+ tblor->setRansacParams(ransacParams);
+ outlierRejector = tblor;
+ }
+
+#if HAVE_OPENCV_GPU
+ if (gpu)
+ {
+ KeypointBasedMotionEstimatorGpu *kbest = new KeypointBasedMotionEstimatorGpu(est);
+ kbest->setOutlierRejector(outlierRejector);
+ return kbest;
}
- est->setOutlierRejector(outlierRejector);
+#endif
- return est;
+ KeypointBasedMotionEstimator *kbest = new KeypointBasedMotionEstimator(est);
+ kbest->setDetector(new GoodFeaturesToTrackDetector(argi(prefix + "nkps")));
+ kbest->setOutlierRejector(outlierRejector);
+ return kbest;
}
private:
+ bool gpu;
string prefix;
};
// prepare motion estimation builders
Ptr<IMotionEstimatorBuilder> motionEstBuilder;
-#if HAVE_OPENCV_GPU
- if (arg("gpu") == "yes")
- {
- if (arg("lin-prog-motion-est") == "yes")
- motionEstBuilder = new LpBasedMotionEstimatorBuilder(cmd);
- else
- motionEstBuilder = new RansacMotionEstimatorBuilderGpu(cmd);
- }
+ if (arg("lin-prog-motion-est") == "yes")
+ motionEstBuilder = new MotionEstimatorL1Builder(cmd, arg("gpu") == "yes");
else
-#endif
- {
- if (arg("lin-prog-motion-est") == "yes")
- motionEstBuilder = new LpBasedMotionEstimatorBuilder(cmd);
- else
- motionEstBuilder = new RansacMotionEstimatorBuilder(cmd);
- }
+ motionEstBuilder = new MotionEstimatorRansacL2Builder(cmd, arg("gpu") == "yes");
Ptr<IMotionEstimatorBuilder> wsMotionEstBuilder;
-#if HAVE_OPENCV_GPU
- if (arg("gpu") == "yes")
- {
- if (arg("ws-lp") == "yes")
- wsMotionEstBuilder = new LpBasedMotionEstimatorBuilder(cmd, "ws-");
- else
- wsMotionEstBuilder = new RansacMotionEstimatorBuilderGpu(cmd, "ws-");
- }
+ if (arg("ws-lp") == "yes")
+ wsMotionEstBuilder = new MotionEstimatorL1Builder(cmd, arg("gpu") == "yes", "ws-");
else
-#endif
- {
- if (arg("ws-lp") == "yes")
- wsMotionEstBuilder = new LpBasedMotionEstimatorBuilder(cmd, "ws-");
- else
- wsMotionEstBuilder = new RansacMotionEstimatorBuilder(cmd, "ws-");
- }
+ wsMotionEstBuilder = new MotionEstimatorRansacL2Builder(cmd, arg("gpu") == "yes", "ws-");
// determine whether we must use one pass or two pass stabilizer
bool isTwoPass =