From a60dc947b176feb18227eb47b3354f53f0a440d7 Mon Sep 17 00:00:00 2001 From: Alexey Spizhevoy Date: Mon, 19 Mar 2012 13:39:23 +0000 Subject: [PATCH] Added first version of videostab module --- modules/videostab/CMakeLists.txt | 3 + .../include/opencv2/videostab/deblurring.hpp | 66 ++++ .../include/opencv2/videostab/fast_marching.hpp | 61 +++ .../opencv2/videostab/fast_marching_inl.hpp | 122 ++++++ .../include/opencv2/videostab/frame_source.hpp | 48 +++ .../include/opencv2/videostab/global_motion.hpp | 102 +++++ .../videostab/include/opencv2/videostab/log.hpp | 31 ++ .../include/opencv2/videostab/motion_filtering.hpp | 35 ++ .../include/opencv2/videostab/optical_flow.hpp | 72 ++++ .../include/opencv2/videostab/stabilizer.hpp | 95 +++++ .../include/opencv2/videostab/videostab.hpp | 6 + modules/videostab/src/deblurring.cpp | 94 +++++ modules/videostab/src/fast_marching.cpp | 99 +++++ modules/videostab/src/global_motion.cpp | 438 +++++++++++++++++++++ modules/videostab/src/inpainting.cpp | 390 ++++++++++++++++++ modules/videostab/src/log.cpp | 23 ++ modules/videostab/src/motion_filtering.cpp | 33 ++ modules/videostab/src/optical_flow.cpp | 50 +++ modules/videostab/src/precomp.cpp | 1 + modules/videostab/src/precomp.hpp | 34 ++ modules/videostab/src/stabilizer.cpp | 248 ++++++++++++ samples/cpp/CMakeLists.txt | 2 +- samples/cpp/videostab.cpp | 225 +++++++++++ 23 files changed, 2277 insertions(+), 1 deletion(-) create mode 100644 modules/videostab/CMakeLists.txt create mode 100644 modules/videostab/include/opencv2/videostab/deblurring.hpp create mode 100644 modules/videostab/include/opencv2/videostab/fast_marching.hpp create mode 100644 modules/videostab/include/opencv2/videostab/fast_marching_inl.hpp create mode 100644 modules/videostab/include/opencv2/videostab/frame_source.hpp create mode 100644 modules/videostab/include/opencv2/videostab/global_motion.hpp create mode 100644 modules/videostab/include/opencv2/videostab/log.hpp create mode 100644 modules/videostab/include/opencv2/videostab/motion_filtering.hpp create mode 100644 modules/videostab/include/opencv2/videostab/optical_flow.hpp create mode 100644 modules/videostab/include/opencv2/videostab/stabilizer.hpp create mode 100644 modules/videostab/include/opencv2/videostab/videostab.hpp create mode 100644 modules/videostab/src/deblurring.cpp create mode 100644 modules/videostab/src/fast_marching.cpp create mode 100644 modules/videostab/src/global_motion.cpp create mode 100644 modules/videostab/src/inpainting.cpp create mode 100644 modules/videostab/src/log.cpp create mode 100644 modules/videostab/src/motion_filtering.cpp create mode 100644 modules/videostab/src/optical_flow.cpp create mode 100644 modules/videostab/src/precomp.cpp create mode 100644 modules/videostab/src/precomp.hpp create mode 100644 modules/videostab/src/stabilizer.cpp create mode 100644 samples/cpp/videostab.cpp diff --git a/modules/videostab/CMakeLists.txt b/modules/videostab/CMakeLists.txt new file mode 100644 index 0000000..912c00d --- /dev/null +++ b/modules/videostab/CMakeLists.txt @@ -0,0 +1,3 @@ +set(the_description "Video stabilization") +ocv_define_module(videostab opencv_imgproc opencv_features2d opencv_video OPTIONAL opencv_gpu) + diff --git a/modules/videostab/include/opencv2/videostab/deblurring.hpp b/modules/videostab/include/opencv2/videostab/deblurring.hpp new file mode 100644 index 0000000..1509331 --- /dev/null +++ b/modules/videostab/include/opencv2/videostab/deblurring.hpp @@ -0,0 +1,66 @@ +#ifndef __OPENCV_VIDEOSTAB_DEBLURRING_HPP__ +#define __OPENCV_VIDEOSTAB_DEBLURRING_HPP__ + +#include +#include "opencv2/core/core.hpp" + +namespace cv +{ +namespace videostab +{ + +float calcBlurriness(const Mat &frame); + +class IDeblurer +{ +public: + IDeblurer() : radius_(0), frames_(0), motions_(0) {} + + virtual ~IDeblurer() {} + + virtual void setRadius(int val) { radius_ = val; } + int radius() const { return radius_; } + + virtual void setFrames(const std::vector &val) { frames_ = &val; } + const std::vector& frames() const { return *frames_; } + + virtual void setMotions(const std::vector &val) { motions_ = &val; } + const std::vector& motions() const { return *motions_; } + + virtual void setBlurrinessRates(const std::vector &val) { blurrinessRates_ = &val; } + const std::vector& blurrinessRates() const { return *blurrinessRates_; } + + virtual void deblur(int idx, Mat &frame) = 0; + +protected: + int radius_; + const std::vector *frames_; + const std::vector *motions_; + const std::vector *blurrinessRates_; +}; + +class NullDeblurer : public IDeblurer +{ +public: + virtual void deblur(int idx, Mat &frame) {} +}; + +class WeightingDeblurer : public IDeblurer +{ +public: + WeightingDeblurer(); + + void setSensitivity(float val) { sensitivity_ = val; } + float sensitivity() const { return sensitivity_; } + + virtual void deblur(int idx, Mat &frame); + +private: + float sensitivity_; + Mat_ bSum_, gSum_, rSum_, wSum_; +}; + +} // namespace videostab +} // namespace cv + +#endif diff --git a/modules/videostab/include/opencv2/videostab/fast_marching.hpp b/modules/videostab/include/opencv2/videostab/fast_marching.hpp new file mode 100644 index 0000000..da1de83 --- /dev/null +++ b/modules/videostab/include/opencv2/videostab/fast_marching.hpp @@ -0,0 +1,61 @@ +#ifndef __OPENCV_VIDEOSTAB_FAST_MARCHING_HPP__ +#define __OPENCV_VIDEOSTAB_FAST_MARCHING_HPP__ + +#include +#include +#include +#include "opencv2/core/core.hpp" + +namespace cv +{ +namespace videostab +{ + +// See http://iwi.eldoc.ub.rug.nl/FILES/root/2004/JGraphToolsTelea/2004JGraphToolsTelea.pdf +class FastMarchingMethod +{ +public: + FastMarchingMethod() : inf_(1e6f) {} + + template + void run(const Mat &mask, Inpaint inpaint); + + Mat distanceMap() const { return dist_; } + +private: + enum { INSIDE = 0, BAND = 1, KNOWN = 255 }; + + struct DXY + { + float dist; + int x, y; + + DXY() {} + DXY(float dist, int x, int y) : dist(dist), x(x), y(y) {} + bool operator <(const DXY &dxy) const { return dist < dxy.dist; } + }; + + float solve(int x1, int y1, int x2, int y2) const; + int& indexOf(const DXY &dxy) { return index_(dxy.y, dxy.x); } + + void heapUp(int idx); + void heapDown(int idx); + void heapAdd(const DXY &dxy); + void heapRemoveMin(); + + float inf_; + + cv::Mat_ flag_; // flag map + cv::Mat_ dist_; // distance map + + cv::Mat_ index_; // index of point in the narrow band + std::vector narrowBand_; // narrow band heap + int size_; // narrow band size +}; + +} // namespace videostab +} // namespace cv + +#include "fast_marching_inl.hpp" + +#endif diff --git a/modules/videostab/include/opencv2/videostab/fast_marching_inl.hpp b/modules/videostab/include/opencv2/videostab/fast_marching_inl.hpp new file mode 100644 index 0000000..3595bfa --- /dev/null +++ b/modules/videostab/include/opencv2/videostab/fast_marching_inl.hpp @@ -0,0 +1,122 @@ +#ifndef __OPENCV_VIDEOSTAB_FAST_MARCHING_INL_HPP__ +#define __OPENCV_VIDEOSTAB_FAST_MARCHING_INL_HPP__ + +#include "opencv2/videostab/fast_marching.hpp" + +namespace cv +{ +namespace videostab +{ + +template +void FastMarchingMethod::run(const cv::Mat &mask, Inpaint inpaint) +{ + using namespace std; + using namespace cv; + + CV_Assert(mask.type() == CV_8U); + + static const int lut[4][2] = {{-1,0}, {0,-1}, {1,0}, {0,1}}; + + mask.copyTo(flag_); + flag_.create(mask.size()); + dist_.create(mask.size()); + index_.create(mask.size()); + narrowBand_.clear(); + size_ = 0; + + // init + for (int y = 0; y < flag_.rows; ++y) + { + for (int x = 0; x < flag_.cols; ++x) + { + if (flag_(y,x) == KNOWN) + dist_(y,x) = 0.f; + else + { + int n = 0; + int nunknown = 0; + + for (int i = 0; i < 4; ++i) + { + int xn = x + lut[i][0]; + int yn = y + lut[i][1]; + + if (xn >= 0 && xn < flag_.cols && yn >= 0 && yn < flag_.rows) + { + n++; + if (flag_(yn,xn) != KNOWN) + nunknown++; + } + } + + if (n>0 && nunknown == n) + { + dist_(y,x) = inf_; + flag_(y,x) = INSIDE; + } + else + { + dist_(y,x) = 0.f; + flag_(y,x) = BAND; + inpaint(x, y); + + narrowBand_.push_back(DXY(0.f,x,y)); + index_(y,x) = size_++; + } + } + } + } + + // make heap + for (int i = size_/2-1; i >= 0; --i) + heapDown(i); + + // main cycle + while (size_ > 0) + { + int x = narrowBand_[0].x; + int y = narrowBand_[0].y; + heapRemoveMin(); + + flag_(y,x) = KNOWN; + for (int n = 0; n < 4; ++n) + { + int xn = x + lut[n][0]; + int yn = y + lut[n][1]; + + if (xn >= 0 && xn < flag_.cols && yn >= 0 && yn < flag_.rows && flag_(yn,xn) != KNOWN) + { + dist_(yn,xn) = min(min(solve(xn-1, yn, xn, yn-1), solve(xn+1, yn, xn, yn-1)), + min(solve(xn-1, yn, xn, yn+1), solve(xn+1, yn, xn, yn+1))); + + if (flag_(yn,xn) == INSIDE) + { + flag_(yn,xn) = BAND; + inpaint(xn, yn); + heapAdd(DXY(dist_(yn,xn),xn,yn)); + } + else + { + int i = index_(yn,xn); + if (dist_(yn,xn) < narrowBand_[i].dist) + { + narrowBand_[i].dist = dist_(yn,xn); + heapUp(i); + } + // works better if it's commented out + /*else if (dist(yn,xn) > narrowBand[i].dist) + { + narrowBand[i].dist = dist(yn,xn); + heapDown(i); + }*/ + } + } + } + } +} + +} // namespace videostab +} // namespace cv + +#endif diff --git a/modules/videostab/include/opencv2/videostab/frame_source.hpp b/modules/videostab/include/opencv2/videostab/frame_source.hpp new file mode 100644 index 0000000..7d2f509 --- /dev/null +++ b/modules/videostab/include/opencv2/videostab/frame_source.hpp @@ -0,0 +1,48 @@ +#ifndef __OPENCV_VIDEOSTAB_FRAME_SOURCE_HPP__ +#define __OPENCV_VIDEOSTAB_FRAME_SOURCE_HPP__ + +#include +#include +#include "opencv2/core/core.hpp" +#include "opencv2/highgui/highgui.hpp" + +namespace cv +{ +namespace videostab +{ + +class IFrameSource +{ +public: + virtual ~IFrameSource() {} + virtual void reset() = 0; + virtual Mat nextFrame() = 0; +}; + +class NullFrameSource : public IFrameSource +{ +public: + virtual void reset() {} + virtual Mat nextFrame() { return Mat(); } +}; + +class VideoFileSource : public IFrameSource +{ +public: + VideoFileSource(const std::string &path, bool volatileFrame = false); + virtual void reset(); + virtual Mat nextFrame(); + + int frameCount() { return reader_.get(CV_CAP_PROP_FRAME_COUNT); } + double fps() { return reader_.get(CV_CAP_PROP_FPS); } + +private: + std::string path_; + bool volatileFrame_; + VideoCapture reader_; +}; + +} // namespace videostab +} // namespace cv + +#endif diff --git a/modules/videostab/include/opencv2/videostab/global_motion.hpp b/modules/videostab/include/opencv2/videostab/global_motion.hpp new file mode 100644 index 0000000..88c65f3 --- /dev/null +++ b/modules/videostab/include/opencv2/videostab/global_motion.hpp @@ -0,0 +1,102 @@ +#ifndef __OPENCV_VIDEOSTAB_GLOBAL_MOTION_HPP__ +#define __OPENCV_VIDEOSTAB_GLOBAL_MOTION_HPP__ + +#include +#include "opencv2/core/core.hpp" +#include "opencv2/features2d/features2d.hpp" +#include "opencv2/videostab/optical_flow.hpp" + +namespace cv +{ +namespace videostab +{ + +enum MotionModel +{ + TRANSLATION = 0, + TRANSLATION_AND_SCALE = 1, + AFFINE = 2 +}; + +Mat estimateGlobalMotionLeastSquares( + const std::vector &points0, const std::vector &points1, + int model = AFFINE, float *rmse = 0); + +struct RansacParams +{ + int size; // subset size + float thresh; // max error to classify as inlier + float eps; // max outliers ratio + float prob; // probability of success + + RansacParams(int size, float thresh, float eps, float prob) + : size(size), thresh(thresh), eps(eps), prob(prob) {} + + static RansacParams affine2dMotionStd() { return RansacParams(6, 0.5f, 0.5f, 0.99f); } + static RansacParams translationAndScale2dMotionStd() { return RansacParams(3, 0.5f, 0.5f, 0.99f); } + static RansacParams translationMotionStd() { return RansacParams(2, 0.5f, 0.5f, 0.99f); } +}; + +Mat estimateGlobalMotionRobust( + const std::vector &points0, const std::vector &points1, + int model = AFFINE, const RansacParams ¶ms = RansacParams::affine2dMotionStd(), + float *rmse = 0, int *ninliers = 0); + +class IGlobalMotionEstimator +{ +public: + virtual ~IGlobalMotionEstimator() {} + virtual Mat estimate(const Mat &frame0, const Mat &frame1) = 0; +}; + +class PyrLkRobustMotionEstimator : public IGlobalMotionEstimator +{ +public: + PyrLkRobustMotionEstimator(); + + void setDetector(Ptr val) { detector_ = val; } + Ptr detector() const { return detector_; } + + void setOptFlowEstimator(Ptr val) { optFlowEstimator_ = val; } + Ptr optFlowEstimator() const { return optFlowEstimator_; } + + void setMotionModel(MotionModel val) { motionModel_ = val; } + MotionModel motionModel() const { return motionModel_; } + + void setRansacParams(const RansacParams &val) { ransacParams_ = val; } + RansacParams ransacParams() const { return ransacParams_; } + + void setMaxRmse(float val) { maxRmse_ = val; } + float maxRmse() const { return maxRmse_; } + + void setMinInlierRatio(float val) { minInlierRatio_ = val; } + float minInlierRatio() const { return minInlierRatio_; } + + virtual Mat estimate(const Mat &frame0, const Mat &frame1); + +private: + Ptr detector_; + Ptr optFlowEstimator_; + MotionModel motionModel_; + RansacParams ransacParams_; + std::vector status_; + std::vector keypointsPrev_; + std::vector pointsPrev_, points_; + std::vector pointsPrevGood_, pointsGood_; + float maxRmse_; + float minInlierRatio_; +}; + +Mat getMotion(int from, int to, const std::vector &motions); + +Mat ensureInclusionConstraint(const Mat &M, Size size, float trimRatio); + +float estimateOptimalTrimRatio(const Mat &M, Size size); + +// frame1 is non-transformed frame +float alignementError(const Mat &M, const Mat &frame0, const Mat &mask0, const Mat &frame1); + +} // namespace videostab +} // namespace cv + +#endif diff --git a/modules/videostab/include/opencv2/videostab/log.hpp b/modules/videostab/include/opencv2/videostab/log.hpp new file mode 100644 index 0000000..5d5749f --- /dev/null +++ b/modules/videostab/include/opencv2/videostab/log.hpp @@ -0,0 +1,31 @@ +#ifndef __OPENCV_VIDEOSTAB_LOG_HPP__ +#define __OPENCV_VIDEOSTAB_LOG_HPP__ + +namespace cv +{ +namespace videostab +{ + +class ILog +{ +public: + virtual ~ILog() {} + virtual void print(const char *format, ...) = 0; +}; + +class NullLog : public ILog +{ +public: + virtual void print(const char *format, ...) {} +}; + +class LogToStdout : public ILog +{ +public: + virtual void print(const char *format, ...); +}; + +} // namespace videostab +} // namespace cv + +#endif diff --git a/modules/videostab/include/opencv2/videostab/motion_filtering.hpp b/modules/videostab/include/opencv2/videostab/motion_filtering.hpp new file mode 100644 index 0000000..e29666d --- /dev/null +++ b/modules/videostab/include/opencv2/videostab/motion_filtering.hpp @@ -0,0 +1,35 @@ +#ifndef __OPENCV_VIDEOSTAB_MOTION_FILTERING_HPP__ +#define __OPENCV_VIDEOSTAB_MOTION_FILTERING_HPP__ + +#include +#include "opencv2/core/core.hpp" + +namespace cv +{ +namespace videostab +{ + +class IMotionFilter +{ +public: + virtual ~IMotionFilter() {} + virtual int radius() const = 0; + virtual Mat apply(int index, std::vector &Ms) const = 0; +}; + +class GaussianMotionFilter : public IMotionFilter +{ +public: + GaussianMotionFilter(int radius, float stdev); + virtual int radius() const { return radius_; } + virtual Mat apply(int idx, std::vector &motions) const; + +private: + int radius_; + std::vector weight_; +}; + +} // namespace videostab +} // namespace + +#endif diff --git a/modules/videostab/include/opencv2/videostab/optical_flow.hpp b/modules/videostab/include/opencv2/videostab/optical_flow.hpp new file mode 100644 index 0000000..2c6228b --- /dev/null +++ b/modules/videostab/include/opencv2/videostab/optical_flow.hpp @@ -0,0 +1,72 @@ +#ifndef __OPENCV_VIDEOSTAB_OPTICAL_FLOW_HPP__ +#define __OPENCV_VIDEOSTAB_OPTICAL_FLOW_HPP__ + +#include "opencv2/core/core.hpp" +#include "opencv2/gpu/gpu.hpp" + +namespace cv +{ +namespace videostab +{ + +class ISparseOptFlowEstimator +{ +public: + virtual ~ISparseOptFlowEstimator() {} + virtual void run( + InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1, + OutputArray status, OutputArray errors) = 0; +}; + +class IDenseOptFlowEstimator +{ +public: + virtual ~IDenseOptFlowEstimator() {} + virtual void run( + InputArray frame0, InputArray frame1, InputOutputArray flowX, InputOutputArray flowY, + OutputArray errors) = 0; +}; + +class PyrLkOptFlowEstimatorBase +{ +public: + PyrLkOptFlowEstimatorBase() { setWinSize(Size(21, 21)); setMaxLevel(3); } + + void setWinSize(Size val) { winSize_ = val; } + Size winSize() const { return winSize_; } + + void setMaxLevel(int val) { maxLevel_ = val; } + int maxLevel() const { return maxLevel_; } + +protected: + Size winSize_; + int maxLevel_; +}; + +class SparsePyrLkOptFlowEstimator + : public PyrLkOptFlowEstimatorBase, public ISparseOptFlowEstimator +{ +public: + virtual void run( + InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1, + OutputArray status, OutputArray errors); +}; + +class DensePyrLkOptFlowEstimatorGpu + : public PyrLkOptFlowEstimatorBase, public IDenseOptFlowEstimator +{ +public: + DensePyrLkOptFlowEstimatorGpu(); + + virtual void run( + InputArray frame0, InputArray frame1, InputOutputArray flowX, InputOutputArray flowY, + OutputArray errors); +private: + gpu::PyrLKOpticalFlow optFlowEstimator_; + gpu::GpuMat frame0_, frame1_, flowX_, flowY_, errors_; +}; + +} // namespace videostab +} // namespace cv + +#endif diff --git a/modules/videostab/include/opencv2/videostab/stabilizer.hpp b/modules/videostab/include/opencv2/videostab/stabilizer.hpp new file mode 100644 index 0000000..d063189 --- /dev/null +++ b/modules/videostab/include/opencv2/videostab/stabilizer.hpp @@ -0,0 +1,95 @@ +#ifndef __OPENCV_VIDEOSTAB_STABILIZER_HPP__ +#define __OPENCV_VIDEOSTAB_STABILIZER_HPP__ + +#include +#include "opencv2/core/core.hpp" +#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/videostab/global_motion.hpp" +#include "opencv2/videostab/motion_filtering.hpp" +#include "opencv2/videostab/frame_source.hpp" +#include "opencv2/videostab/log.hpp" +#include "opencv2/videostab/inpainting.hpp" +#include "opencv2/videostab/deblurring.hpp" + +namespace cv +{ +namespace videostab +{ + +class Stabilizer : public IFrameSource +{ +public: + Stabilizer(); + + void setLog(Ptr log) { log_ = log; } + Ptr log() const { return log_; } + + void setFrameSource(Ptr val) { frameSource_ = val; reset(); } + Ptr frameSource() const { return frameSource_; } + + void setMotionEstimator(Ptr val) { motionEstimator_ = val; } + Ptr motionEstimator() const { return motionEstimator_; } + + void setMotionFilter(Ptr val) { motionFilter_ = val; reset(); } + Ptr motionFilter() const { return motionFilter_; } + + void setDeblurer(Ptr val) { deblurer_ = val; reset(); } + Ptr deblurrer() const { return deblurer_; } + + void setEstimateTrimRatio(bool val) { mustEstimateTrimRatio_ = val; reset(); } + bool mustEstimateTrimRatio() const { return mustEstimateTrimRatio_; } + + void setTrimRatio(float val) { trimRatio_ = val; reset(); } + int trimRatio() const { return trimRatio_; } + + void setInclusionConstraint(bool val) { inclusionConstraint_ = val; } + bool inclusionConstraint() const { return inclusionConstraint_; } + + void setBorderMode(int val) { borderMode_ = val; } + int borderMode() const { return borderMode_; } + + void setInpainter(Ptr val) { inpainter_ = val; reset(); } + Ptr inpainter() const { return inpainter_; } + + virtual void reset(); + virtual Mat nextFrame(); + +private: + void estimateMotionsAndTrimRatio(); + void processFirstFrame(Mat &frame); + bool processNextFrame(); + void stabilizeFrame(int idx); + + Ptr frameSource_; + Ptr motionEstimator_; + Ptr motionFilter_; + Ptr deblurer_; + Ptr inpainter_; + bool mustEstimateTrimRatio_; + float trimRatio_; + bool inclusionConstraint_; + int borderMode_; + Ptr log_; + + Size frameSize_; + Mat frameMask_; + int radius_; + int curPos_; + int curStabilizedPos_; + bool auxPassWasDone_; + bool doDeblurring_; + Mat preProcessedFrame_; + bool doInpainting_; + Mat inpaintingMask_; + std::vector frames_; + std::vector motions_; // motions_[i] is the motion from i to i+1 frame + std::vector blurrinessRates_; + std::vector stabilizedFrames_; + std::vector stabilizedMasks_; + std::vector stabilizationMotions_; +}; + +} // namespace videostab +} // namespace cv + +#endif diff --git a/modules/videostab/include/opencv2/videostab/videostab.hpp b/modules/videostab/include/opencv2/videostab/videostab.hpp new file mode 100644 index 0000000..e856910 --- /dev/null +++ b/modules/videostab/include/opencv2/videostab/videostab.hpp @@ -0,0 +1,6 @@ +#ifndef __OPENCV_VIDEOSTAB_HPP__ +#define __OPENCV_VIDEOSTAB_HPP__ + +#include "opencv2/videostab/stabilizer.hpp" + +#endif diff --git a/modules/videostab/src/deblurring.cpp b/modules/videostab/src/deblurring.cpp new file mode 100644 index 0000000..bebab9a --- /dev/null +++ b/modules/videostab/src/deblurring.cpp @@ -0,0 +1,94 @@ +#include "precomp.hpp" +#include "opencv2/videostab/deblurring.hpp" +#include "opencv2/videostab/global_motion.hpp" + +using namespace std; + +namespace cv +{ +namespace videostab +{ + +float calcBlurriness(const Mat &frame) +{ + Mat Gx, Gy; + Sobel(frame, Gx, CV_32F, 1, 0); + Sobel(frame, Gy, CV_32F, 0, 1); + double normGx = norm(Gx); + double normGy = norm(Gx); + double sumSq = normGx*normGx + normGy*normGy; + return 1.f / (sumSq / frame.size().area() + 1e-6); +} + + +WeightingDeblurer::WeightingDeblurer() +{ + setSensitivity(0.1); +} + + +void WeightingDeblurer::deblur(int idx, Mat &frame) +{ + CV_Assert(frame.type() == CV_8UC3); + + bSum_.create(frame.size()); + gSum_.create(frame.size()); + rSum_.create(frame.size()); + wSum_.create(frame.size()); + + for (int y = 0; y < frame.rows; ++y) + { + for (int x = 0; x < frame.cols; ++x) + { + Point3_ p = frame.at >(y,x); + bSum_(y,x) = p.x; + gSum_(y,x) = p.y; + rSum_(y,x) = p.z; + wSum_(y,x) = 1.f; + } + } + + for (int k = idx - radius_; k <= idx + radius_; ++k) + { + const Mat &neighbor = at(k, *frames_); + float bRatio = at(idx, *blurrinessRates_) / at(k, *blurrinessRates_); + Mat_ M = getMotion(idx, k, *motions_); + + if (bRatio > 1.f) + { + for (int y = 0; y < frame.rows; ++y) + { + for (int x = 0; x < frame.cols; ++x) + { + int x1 = M(0,0)*x + M(0,1)*y + M(0,2); + int y1 = M(1,0)*x + M(1,1)*y + M(1,2); + + if (x1 >= 0 && x1 < neighbor.cols && y1 >= 0 && y1 < neighbor.rows) + { + const Point3_ &p = frame.at >(y,x); + const Point3_ &p1 = neighbor.at >(y1,x1); + float w = bRatio * sensitivity_ / + (sensitivity_ + std::abs(intensity(p1) - intensity(p))); + bSum_(y,x) += w * p1.x; + gSum_(y,x) += w * p1.y; + rSum_(y,x) += w * p1.z; + wSum_(y,x) += w; + } + } + } + } + } + + for (int y = 0; y < frame.rows; ++y) + { + for (int x = 0; x < frame.cols; ++x) + { + float wSumInv = 1.f / wSum_(y,x); + frame.at >(y,x) = Point3_( + bSum_(y,x) * wSumInv, gSum_(y,x) * wSumInv, rSum_(y,x) * wSumInv); + } + } +} + +} // namespace videostab +} // namespace cv diff --git a/modules/videostab/src/fast_marching.cpp b/modules/videostab/src/fast_marching.cpp new file mode 100644 index 0000000..8ba5376 --- /dev/null +++ b/modules/videostab/src/fast_marching.cpp @@ -0,0 +1,99 @@ +#include "precomp.hpp" +#include "opencv2/videostab/fast_marching.hpp" + +using namespace std; + +namespace cv +{ +namespace videostab +{ + +float FastMarchingMethod::solve(int x1, int y1, int x2, int y2) const +{ + float sol = inf_; + if (y1 >=0 && y1 < flag_.rows && x1 >= 0 && x1 < flag_.cols && flag_(y1,x1) == KNOWN) + { + float t1 = dist_(y1,x1); + if (y2 >=0 && y2 < flag_.rows && x2 >= 0 && x2 < flag_.cols && flag_(y2,x2) == KNOWN) + { + float t2 = dist_(y2,x2); + float r = sqrt(2 - sqr(t1 - t2)); + float s = (t1 + t2 - r) / 2; + + if (s >= t1 && s >= t2) + sol = s; + else + { + s += r; + if (s >= t1 && s >= t2) + sol = s; + } + } + else + sol = 1 + t1; + } + else if (y2 >=0 && y2 < flag_.rows && x2 >= 0 && x2 < flag_.cols && flag_(y2,x2) == KNOWN) + sol = 1 + dist_(y2,x1); + return sol; +} + + +void FastMarchingMethod::heapUp(int idx) +{ + int p = (idx-1)/2; + while (idx > 0 && narrowBand_[idx] < narrowBand_[p]) + { + std::swap(indexOf(narrowBand_[p]), indexOf(narrowBand_[idx])); + std::swap(narrowBand_[p], narrowBand_[idx]); + idx = p; + p = (idx-1)/2; + } +} + + +void FastMarchingMethod::heapDown(int idx) +{ + int l, r, smallest; + while (true) + { + l = 2*idx+1; + r = 2*idx+2; + smallest = idx; + + if (l < size_ && narrowBand_[l] < narrowBand_[smallest]) smallest = l; + if (r < size_ && narrowBand_[r] < narrowBand_[smallest]) smallest = r; + + if (smallest == idx) break; + else + { + std::swap(indexOf(narrowBand_[idx]), indexOf(narrowBand_[smallest])); + std::swap(narrowBand_[idx], narrowBand_[smallest]); + idx = smallest; + } + } +} + + +void FastMarchingMethod::heapAdd(const DXY &dxy) +{ + if (static_cast(narrowBand_.size()) < size_ + 1) + narrowBand_.resize(size_*2 + 1); + narrowBand_[size_] = dxy; + indexOf(dxy) = size_++; + heapUp(size_-1); +} + + +void FastMarchingMethod::heapRemoveMin() +{ + if (size_ > 0) + { + size_--; + std::swap(indexOf(narrowBand_[0]), indexOf(narrowBand_[size_])); + std::swap(narrowBand_[0], narrowBand_[size_]); + heapDown(0); + } +} + +} // namespace videostab +} // namespace cv diff --git a/modules/videostab/src/global_motion.cpp b/modules/videostab/src/global_motion.cpp new file mode 100644 index 0000000..d5da6ae --- /dev/null +++ b/modules/videostab/src/global_motion.cpp @@ -0,0 +1,438 @@ +#include "precomp.hpp" +#include "opencv2/highgui/highgui.hpp" +#include "opencv2/videostab/global_motion.hpp" + +using namespace std; + +namespace cv +{ +namespace videostab +{ + +static Mat estimateGlobMotionLeastSquaresTranslation( + int npoints, const Point2f *points0, const Point2f *points1, float *rmse) +{ + Mat_ M = Mat::eye(3, 3, CV_32F); + for (int i = 0; i < npoints; ++i) + { + M(0,2) += points1[i].x - points0[i].x; + M(1,2) += points1[i].y - points0[i].y; + } + M(0,2) /= npoints; + M(1,2) /= npoints; + if (rmse) + { + *rmse = 0; + for (int i = 0; i < npoints; ++i) + *rmse += sqr(points1[i].x - points0[i].x - M(0,2)) + + sqr(points1[i].y - points0[i].y - M(1,2)); + *rmse = sqrt(*rmse / npoints); + } + return M; +} + + +static Mat estimateGlobMotionLeastSquaresTranslationAndScale( + int npoints, const Point2f *points0, const Point2f *points1, float *rmse) +{ + Mat_ A(2*npoints, 3), b(2*npoints, 1); + float *a0, *a1; + Point2f p0, p1; + + for (int i = 0; i < npoints; ++i) + { + a0 = A[2*i]; + a1 = A[2*i+1]; + p0 = points0[i]; + p1 = points1[i]; + a0[0] = p0.x; a0[1] = 1; a0[2] = 0; + a1[0] = p0.y; a1[1] = 0; a1[2] = 1; + b(2*i,0) = p1.x; + b(2*i+1,0) = p1.y; + } + + Mat_ sol; + solve(A, b, sol, DECOMP_SVD); + + if (rmse) + *rmse = norm(A*sol, b, NORM_L2) / sqrt(npoints); + + Mat_ M = Mat::eye(3, 3, CV_32F); + M(0,0) = M(1,1) = sol(0,0); + M(0,2) = sol(1,0); + M(1,2) = sol(2,0); + return M; +} + + +static Mat estimateGlobMotionLeastSquaresAffine( + int npoints, const Point2f *points0, const Point2f *points1, float *rmse) +{ + Mat_ A(2*npoints, 6), b(2*npoints, 1); + float *a0, *a1; + Point2f p0, p1; + + for (int i = 0; i < npoints; ++i) + { + a0 = A[2*i]; + a1 = A[2*i+1]; + p0 = points0[i]; + p1 = points1[i]; + a0[0] = p0.x; a0[1] = p0.y; a0[2] = 1; a0[3] = a0[4] = a0[5] = 0; + a1[0] = a1[1] = a1[2] = 0; a1[3] = p0.x; a1[4] = p0.y; a1[5] = 1; + b(2*i,0) = p1.x; + b(2*i+1,0) = p1.y; + } + + Mat_ sol; + solve(A, b, sol, DECOMP_SVD); + + if (rmse) + *rmse = norm(A*sol, b, NORM_L2) / sqrt(npoints); + + Mat_ M = Mat::eye(3, 3, CV_32F); + for (int i = 0, k = 0; i < 2; ++i) + for (int j = 0; j < 3; ++j, ++k) + M(i,j) = sol(k,0); + + return M; +} + + +Mat estimateGlobalMotionLeastSquares( + const vector &points0, const vector &points1, int model, float *rmse) +{ + CV_Assert(points0.size() == points1.size()); + + typedef Mat (*Impl)(int, const Point2f*, const Point2f*, float*); + static Impl impls[] = { estimateGlobMotionLeastSquaresTranslation, + estimateGlobMotionLeastSquaresTranslationAndScale, + estimateGlobMotionLeastSquaresAffine }; + + int npoints = static_cast(points0.size()); + return impls[model](npoints, &points0[0], &points1[0], rmse); +} + + +Mat estimateGlobalMotionRobust( + const vector &points0, const vector &points1, int model, const RansacParams ¶ms, + float *rmse, int *ninliers) +{ + CV_Assert(points0.size() == points1.size()); + + typedef Mat (*Impl)(int, const Point2f*, const Point2f*, float*); + static Impl impls[] = { estimateGlobMotionLeastSquaresTranslation, + estimateGlobMotionLeastSquaresTranslationAndScale, + estimateGlobMotionLeastSquaresAffine }; + + const int npoints = static_cast(points0.size()); + const int niters = static_cast(ceil(log(1 - params.prob) / log(1 - pow(1 - params.eps, params.size)))); + + RNG rng(0); + vector indices(params.size); + vector subset0(params.size), subset1(params.size); + vector subset0best(params.size), subset1best(params.size); + Mat_ bestM; + int ninliersMax = -1; + Point2f p0, p1; + float x, y; + + for (int iter = 0; iter < niters; ++iter) + { + for (int i = 0; i < params.size; ++i) + { + bool ok = false; + while (!ok) + { + ok = true; + indices[i] = static_cast(rng) % npoints; + for (int j = 0; j < i; ++j) + if (indices[i] == indices[j]) + { ok = false; break; } + } + } + for (int i = 0; i < params.size; ++i) + { + subset0[i] = points0[indices[i]]; + subset1[i] = points1[indices[i]]; + } + + Mat_ M = impls[model](params.size, &subset0[0], &subset1[0], 0); + + int ninliers = 0; + for (int i = 0; i < npoints; ++i) + { + p0 = points0[i]; p1 = points1[i]; + x = M(0,0)*p0.x + M(0,1)*p0.y + M(0,2); + y = M(1,0)*p0.x + M(1,1)*p0.y + M(1,2); + if (sqr(x - p1.x) + sqr(y - p1.y) < params.thresh * params.thresh) + ninliers++; + } + if (ninliers >= ninliersMax) + { + bestM = M; + ninliersMax = ninliers; + subset0best.swap(subset0); + subset1best.swap(subset1); + } + } + + if (ninliersMax < params.size) + // compute rmse + bestM = impls[model](params.size, &subset0best[0], &subset1best[0], rmse); + else + { + subset0.resize(ninliersMax); + subset1.resize(ninliersMax); + for (int i = 0, j = 0; i < npoints; ++i) + { + p0 = points0[i]; p1 = points1[i]; + x = bestM(0,0)*p0.x + bestM(0,1)*p0.y + bestM(0,2); + y = bestM(1,0)*p0.x + bestM(1,1)*p0.y + bestM(1,2); + if (sqr(x - p1.x) + sqr(y - p1.y) < params.thresh * params.thresh) + { + subset0[j] = p0; + subset1[j] = p1; + j++; + } + } + bestM = impls[model](ninliersMax, &subset0[0], &subset1[0], rmse); + } + + if (ninliers) + *ninliers = ninliersMax; + + return bestM; +} + + +PyrLkRobustMotionEstimator::PyrLkRobustMotionEstimator() : ransacParams_(RansacParams::affine2dMotionStd()) +{ + setDetector(new GoodFeaturesToTrackDetector()); + setOptFlowEstimator(new SparsePyrLkOptFlowEstimator()); + setMotionModel(AFFINE); + setMaxRmse(0.5f); + setMinInlierRatio(0.1f); +} + + +Mat PyrLkRobustMotionEstimator::estimate(const Mat &frame0, const Mat &frame1) +{ + detector_->detect(frame0, keypointsPrev_); + + pointsPrev_.resize(keypointsPrev_.size()); + for (size_t i = 0; i < keypointsPrev_.size(); ++i) + pointsPrev_[i] = keypointsPrev_[i].pt; + + optFlowEstimator_->run(frame0, frame1, pointsPrev_, points_, status_, noArray()); + + size_t npoints = points_.size(); + pointsPrevGood_.clear(); + pointsPrevGood_.reserve(npoints); + pointsGood_.clear(); + pointsGood_.reserve(npoints); + for (size_t i = 0; i < npoints; ++i) + { + if (status_[i]) + { + pointsPrevGood_.push_back(pointsPrev_[i]); + pointsGood_.push_back(points_[i]); + } + } + + float rmse; + int ninliers; + Mat M = estimateGlobalMotionRobust( + pointsPrevGood_, pointsGood_, motionModel_, ransacParams_, &rmse, &ninliers); + + if (rmse > maxRmse_ || static_cast(ninliers) / pointsGood_.size() < minInlierRatio_) + M = Mat::eye(3, 3, CV_32F); + + return M; +} + + +Mat getMotion(int from, int to, const vector &motions) +{ + Mat M = Mat::eye(3, 3, CV_32F); + if (to > from) + { + for (int i = from; i < to; ++i) + M = at(i, motions) * M; + } + else if (from > to) + { + for (int i = to; i < from; ++i) + M = at(i, motions) * M; + M = M.inv(); + } + return M; +} + + +static inline int areaSign(Point2f a, Point2f b, Point2f c) +{ + float area = (b-a).cross(c-a); + if (area < -1e-5f) return -1; + if (area > 1e-5f) return 1; + return 0; +} + + +static inline bool segmentsIntersect(Point2f a, Point2f b, Point2f c, Point2f d) +{ + return areaSign(a,b,c) * areaSign(a,b,d) < 0 && + areaSign(c,d,a) * areaSign(c,d,b) < 0; +} + + +// Checks if rect a (with sides parallel to axis) is inside rect b (arbitrary). +// Rects must be passed in the [(0,0), (w,0), (w,h), (0,h)] order. +static inline bool isRectInside(const Point2f a[4], const Point2f b[4]) +{ + for (int i = 0; i < 4; ++i) + if (b[i].x > a[0].x && b[i].x < a[2].x && b[i].y > a[0].y && b[i].y < a[2].y) + return false; + for (int i = 0; i < 4; ++i) + for (int j = 0; j < 4; ++j) + if (segmentsIntersect(a[i], a[(i+1)%4], b[j], b[(j+1)%4])) + return false; + return true; +} + + +static inline bool isGoodMotion(const float M[], float w, float h, float dx, float dy) +{ + Point2f pt[4] = {Point2f(0,0), Point2f(w,0), Point2f(w,h), Point2f(0,h)}; + Point2f Mpt[4]; + + for (int i = 0; i < 4; ++i) + { + Mpt[i].x = M[0]*pt[i].x + M[1]*pt[i].y + M[2]; + Mpt[i].x = M[3]*pt[i].x + M[4]*pt[i].y + M[5]; + } + + pt[0] = Point2f(dx, dy); + pt[1] = Point2f(w - dx, dy); + pt[2] = Point2f(w - dx, h - dy); + pt[3] = Point2f(dx, h - dy); + + return isRectInside(pt, Mpt); +} + + +static inline void relaxMotion(const float M[], float t, float res[]) +{ + res[0] = M[0]*(1.f-t) + t; + res[1] = M[1]*(1.f-t); + res[2] = M[2]*(1.f-t); + res[3] = M[3]*(1.f-t); + res[4] = M[4]*(1.f-t) + t; + res[5] = M[5]*(1.f-t); +} + + +Mat ensureInclusionConstraint(const Mat &M, Size size, float trimRatio) +{ + CV_Assert(M.size() == Size(3,3) && M.type() == CV_32F); + + const float w = size.width; + const float h = size.height; + const float dx = floor(w * trimRatio); + const float dy = floor(h * trimRatio); + const float srcM[6] = + {M.at(0,0), M.at(0,1), M.at(0,2), + M.at(1,0), M.at(1,1), M.at(1,2)}; + + float curM[6]; + float t = 0; + relaxMotion(srcM, t, curM); + if (isGoodMotion(curM, w, h, dx, dy)) + return M; + + float l = 0, r = 1; + while (r - l > 1e-3f) + { + t = (l + r) * 0.5f; + relaxMotion(srcM, t, curM); + if (isGoodMotion(curM, w, h, dx, dy)) + r = t; + else + l = t; + t = r; + relaxMotion(srcM, r, curM); + } + + return (1 - r) * M + r * Mat::eye(3, 3, CV_32F); +} + + +// TODO can be estimated for O(1) time +float estimateOptimalTrimRatio(const Mat &M, Size size) +{ + CV_Assert(M.size() == Size(3,3) && M.type() == CV_32F); + + const float w = size.width; + const float h = size.height; + Mat_ M_(M); + + Point2f pt[4] = {Point2f(0,0), Point2f(w,0), Point2f(w,h), Point2f(0,h)}; + Point2f Mpt[4]; + + for (int i = 0; i < 4; ++i) + { + Mpt[i].x = M_(0,0)*pt[i].x + M_(0,1)*pt[i].y + M_(0,2); + Mpt[i].y = M_(1,0)*pt[i].x + M_(1,1)*pt[i].y + M_(1,2); + } + + float l = 0, r = 0.5f; + while (r - l > 1e-3f) + { + float t = (l + r) * 0.5f; + float dx = floor(w * t); + float dy = floor(h * t); + pt[0] = Point2f(dx, dy); + pt[1] = Point2f(w - dx, dy); + pt[2] = Point2f(w - dx, h - dy); + pt[3] = Point2f(dx, h - dy); + if (isRectInside(pt, Mpt)) + r = t; + else + l = t; + } + + return r; +} + + +float alignementError(const Mat &M, const Mat &frame0, const Mat &mask0, const Mat &frame1) +{ + CV_Assert(frame0.type() == CV_8UC3 && frame1.type() == CV_8UC3); + CV_Assert(mask0.type() == CV_8U && mask0.size() == frame0.size()); + CV_Assert(frame0.size() == frame1.size()); + CV_Assert(M.size() == Size(3,3) && M.type() == CV_32F); + + Mat_ mask0_(mask0); + Mat_ M_(M); + float err = 0; + + for (int y0 = 0; y0 < frame0.rows; ++y0) + { + for (int x0 = 0; x0 < frame0.cols; ++x0) + { + if (mask0_(y0,x0)) + { + int x1 = cvRound(M_(0,0)*x0 + M_(0,1)*y0 + M_(0,2)); + int y1 = cvRound(M_(1,0)*x0 + M_(1,1)*y0 + M_(1,2)); + if (y1 >= 0 && y1 < frame1.rows && x1 >= 0 && x1 < frame1.cols) + err += std::abs(intensity(frame1.at >(y1,x1)) - + intensity(frame0.at >(y0,x0))); + } + } + } + + return err; +} + +} // namespace videostab +} // namespace cv diff --git a/modules/videostab/src/inpainting.cpp b/modules/videostab/src/inpainting.cpp new file mode 100644 index 0000000..dec2d11 --- /dev/null +++ b/modules/videostab/src/inpainting.cpp @@ -0,0 +1,390 @@ +#include "precomp.hpp" +#include +#include "opencv2/videostab/inpainting.hpp" +#include "opencv2/videostab/global_motion.hpp" +#include "opencv2/videostab/fast_marching.hpp" + +using namespace std; + +namespace cv +{ +namespace videostab +{ + +void InpaintingPipeline::setRadius(int val) +{ + for (size_t i = 0; i < inpainters_.size(); ++i) + inpainters_[i]->setRadius(val); + IInpainter::setRadius(val); +} + + +void InpaintingPipeline::setFrames(const vector &val) +{ + for (size_t i = 0; i < inpainters_.size(); ++i) + inpainters_[i]->setFrames(val); + IInpainter::setFrames(val); +} + + +void InpaintingPipeline::setMotions(const vector &val) +{ + for (size_t i = 0; i < inpainters_.size(); ++i) + inpainters_[i]->setMotions(val); + IInpainter::setMotions(val); +} + + +void InpaintingPipeline::setStabilizedFrames(const vector &val) +{ + for (size_t i = 0; i < inpainters_.size(); ++i) + inpainters_[i]->setStabilizedFrames(val); + IInpainter::setStabilizedFrames(val); +} + + +void InpaintingPipeline::setStabilizationMotions(const vector &val) +{ + for (size_t i = 0; i < inpainters_.size(); ++i) + inpainters_[i]->setStabilizationMotions(val); + IInpainter::setStabilizationMotions(val); +} + + +void InpaintingPipeline::inpaint(int idx, Mat &frame, Mat &mask) +{ + for (size_t i = 0; i < inpainters_.size(); ++i) + inpainters_[i]->inpaint(idx, frame, mask); +} + + +struct Pixel3 +{ + float intens; + Point3_ color; + bool operator <(const Pixel3 &other) const { return intens < other.intens; } +}; + + +ConsistentMosaicInpainter::ConsistentMosaicInpainter() +{ + setStdevThresh(10); +} + + +void ConsistentMosaicInpainter::inpaint(int idx, Mat &frame, Mat &mask) +{ + CV_Assert(frame.type() == CV_8UC3); + CV_Assert(mask.size() == frame.size() && mask.type() == CV_8U); + + Mat invS = at(idx, *stabilizationMotions_).inv(); + vector > motions(2*radius_ + 1); + for (int i = -radius_; i <= radius_; ++i) + motions[radius_ + i] = getMotion(idx, idx + i, *motions_) * invS; + + int n; + float mean, var; + vector pixels(2*radius_ + 1); + + Mat_ > frame_(frame); + Mat_ mask_(mask); + + for (int y = 0; y < mask.rows; ++y) + { + for (int x = 0; x < mask.cols; ++x) + { + if (!mask_(y, x)) + { + n = 0; + mean = 0; + var = 0; + + for (int i = -radius_; i <= radius_; ++i) + { + const Mat_ > &framei = at(idx + i, *frames_); + const Mat_ &Mi = motions[radius_ + i]; + int xi = cvRound(Mi(0,0)*x + Mi(0,1)*y + Mi(0,2)); + int yi = cvRound(Mi(1,0)*x + Mi(1,1)*y + Mi(1,2)); + if (xi >= 0 && xi < framei.cols && yi >= 0 && yi < framei.rows) + { + pixels[n].color = framei(yi, xi); + mean += pixels[n].intens = intensity(pixels[n].color); + n++; + } + } + + if (n > 0) + { + mean /= n; + for (int i = 0; i < n; ++i) + var += sqr(pixels[i].intens - mean); + var /= std::max(n - 1, 1); + + if (var < stdevThresh_ * stdevThresh_) + { + sort(pixels.begin(), pixels.begin() + n); + int nh = (n-1)/2; + int c1 = pixels[nh].color.x; + int c2 = pixels[nh].color.y; + int c3 = pixels[nh].color.z; + if (n-2*nh) + { + c1 = (c1 + pixels[nh].color.x) / 2; + c2 = (c2 + pixels[nh].color.y) / 2; + c3 = (c3 + pixels[nh].color.z) / 2; + } + frame_(y, x) = Point3_(c1, c2, c3); + mask_(y, x) = 255; + } + } + } + } + } +} + + +class MotionInpaintBody +{ +public: + void operator ()(int x, int y) + { + float uEst = 0.f, vEst = 0.f; + float wSum = 0.f; + + for (int dy = -rad; dy <= rad; ++dy) + { + for (int dx = -rad; dx <= rad; ++dx) + { + int qx0 = x + dx; + int qy0 = y + dy; + + if (qy0 > 0 && qy0+1 < mask0.rows && qx0 > 0 && qx0+1 < mask0.cols && mask0(qy0,qx0) && + mask0(qy0-1,qx0) && mask0(qy0+1,qx0) && mask0(qy0,qx0-1) && mask0(qy0,qx0+1)) + { + float dudx = 0.5f * (flowX(qy0,qx0+1) - flowX(qy0,qx0-1)); + float dvdx = 0.5f * (flowY(qy0,qx0+1) - flowY(qy0,qx0-1)); + float dudy = 0.5f * (flowX(qy0+1,qx0) - flowX(qy0-1,qx0)); + float dvdy = 0.5f * (flowY(qy0+1,qx0) - flowY(qy0-1,qx0)); + + int qx1 = cvRound(qx0 + flowX(qy0,qx0)); + int qy1 = cvRound(qy0 + flowY(qy0,qx0)); + int px1 = qx1 - dx; + int py1 = qy1 - dy; + + if (qx1 >= 0 && qx1 < mask1.cols && qy1 >= 0 && qy1 < mask1.rows && mask1(qy1,qx1) && + px1 >= 0 && px1 < mask1.cols && py1 >= 0 && py1 < mask1.rows && mask1(py1,px1)) + { + Point3_ cp = frame1(py1,px1), cq = frame1(qy1,qx1); + float distColor = sqr(cp.x-cq.x) + sqr(cp.y-cq.y) + sqr(cp.z-cq.z); + float w = 1.f / (sqrt(distColor * (dx*dx + dy*dy)) + eps); + uEst += w * (flowX(qy0,qx0) - dudx*dx - dudy*dy); + vEst += w * (flowY(qy0,qx0) - dvdx*dx - dvdy*dy); + wSum += w; + } + //else return; + } + } + } + + if (wSum > 0.f) + { + flowX(y,x) = uEst / wSum; + flowY(y,x) = vEst / wSum; + mask0(y,x) = 255; + } + } + + Mat_ > frame1; + Mat_ mask0, mask1; + Mat_ flowX, flowY; + float eps; + int rad; +}; + + +MotionInpainter::MotionInpainter() +{ + setOptFlowEstimator(new DensePyrLkOptFlowEstimatorGpu()); + setFlowErrorThreshold(1e-4f); + setBorderMode(BORDER_REPLICATE); +} + + +void MotionInpainter::inpaint(int idx, Mat &frame, Mat &mask) +{ + priority_queue > neighbors; + vector motions(2*radius_ + 1); + + for (int i = -radius_; i <= radius_; ++i) + { + Mat motion0to1 = getMotion(idx, idx + i, *motions_) * at(idx, *stabilizationMotions_).inv(); + motions[radius_ + i] = motion0to1; + + if (i != 0) + { + float err = alignementError(motion0to1, frame, mask, at(idx + i, *frames_)); + neighbors.push(make_pair(-err, idx + i)); + } + } + + if (mask1_.size() != mask.size()) + { + mask1_.create(mask.size()); + mask1_.setTo(255); + } + + cvtColor(frame, grayFrame_, CV_BGR2GRAY); + + MotionInpaintBody body; + body.rad = 2; + body.eps = 1e-4f; + + while (!neighbors.empty()) + { + int neighbor = neighbors.top().second; + neighbors.pop(); + + Mat motion1to0 = motions[radius_ + neighbor - idx].inv(); + + frame1_ = at(neighbor, *frames_); + warpAffine( + frame1_, transformedFrame1_, motion1to0(Rect(0,0,3,2)), frame1_.size(), + INTER_LINEAR, borderMode_); + cvtColor(transformedFrame1_, transformedGrayFrame1_, CV_BGR2GRAY); + + warpAffine( + mask1_, transformedMask1_, motion1to0(Rect(0,0,3,2)), mask1_.size(), + INTER_NEAREST); + erode(transformedMask1_, transformedMask1_, Mat()); + + optFlowEstimator_->run(grayFrame_, transformedGrayFrame1_, flowX_, flowY_, flowErrors_); + + calcFlowMask( + flowX_, flowY_, flowErrors_, flowErrorThreshold_, mask, transformedMask1_, + flowMask_); + + body.flowX = flowX_; + body.flowY = flowY_; + body.mask0 = flowMask_; + body.mask1 = transformedMask1_; + body.frame1 = transformedFrame1_; + fmm_.run(flowMask_, body); + + completeFrameAccordingToFlow( + flowMask_, flowX_, flowY_, transformedFrame1_, transformedMask1_, frame, mask); + } +} + + +class ColorAverageInpaintBody +{ +public: + void operator ()(int x, int y) + { + float c1 = 0, c2 = 0, c3 = 0; + float wSum = 0; + + static const int lut[8][2] = {{-1,-1}, {-1,0}, {-1,1}, {0,-1}, {0,1}, {1,-1}, {1,0}, {1,1}}; + + for (int i = 0; i < 8; ++i) + { + int qx = x + lut[i][0]; + int qy = y + lut[i][1]; + if (qy >= 0 && qy < mask.rows && qx >= 0 && qx < mask.cols && mask(qy,qx)) + { + c1 += frame.at(qy,3*qx); + c2 += frame.at(qy,3*qx+1); + c3 += frame.at(qy,3*qx+2); + wSum += 1; + } + } + + float wSumInv = 1.f / wSum; + frame(y,x) = Point3_(c1*wSumInv, c2*wSumInv, c3*wSumInv); + mask(y,x) = 255; + } + + cv::Mat_ mask; + cv::Mat_ > frame; +}; + + +void ColorAverageInpainter::inpaint(int /*idx*/, Mat &frame, Mat &mask) +{ + ColorAverageInpaintBody body; + body.mask = mask; + body.frame = frame; + fmm_.run(mask, body); +} + + +void calcFlowMask( + const Mat &flowX, const Mat &flowY, const Mat &errors, float maxError, + const Mat &mask0, const Mat &mask1, Mat &flowMask) +{ + CV_Assert(flowX.type() == CV_32F && flowX.size() == mask0.size()); + CV_Assert(flowY.type() == CV_32F && flowY.size() == mask0.size()); + CV_Assert(errors.type() == CV_32F && errors.size() == mask0.size()); + CV_Assert(mask0.type() == CV_8U); + CV_Assert(mask1.type() == CV_8U && mask1.size() == mask0.size()); + + Mat_ flowX_(flowX), flowY_(flowY), errors_(errors); + Mat_ mask0_(mask0), mask1_(mask1); + + flowMask.create(mask0.size(), CV_8U); + flowMask.setTo(0); + Mat_ flowMask_(flowMask); + + for (int y0 = 0; y0 < flowMask_.rows; ++y0) + { + for (int x0 = 0; x0 < flowMask_.cols; ++x0) + { + if (mask0_(y0,x0) && errors_(y0,x0) < maxError) + { + int x1 = cvRound(x0 + flowX_(y0,x0)); + int y1 = cvRound(y0 + flowY_(y0,x0)); + + if (x1 >= 0 && x1 < mask1_.cols && y1 >= 0 && y1 < mask1_.rows && mask1_(y1,x1)) + flowMask_(y0,x0) = 255; + } + } + } +} + + +void completeFrameAccordingToFlow( + const Mat &flowMask, const Mat &flowX, const Mat &flowY, const Mat &frame1, const Mat &mask1, + Mat &frame0, Mat &mask0) +{ + CV_Assert(flowMask.type() == CV_8U); + CV_Assert(flowX.type() == CV_32F && flowX.size() == flowMask.size()); + CV_Assert(flowY.type() == CV_32F && flowY.size() == flowMask.size()); + CV_Assert(frame1.type() == CV_8UC3 && frame1.size() == flowMask.size()); + CV_Assert(mask1.type() == CV_8U && mask1.size() == flowMask.size()); + CV_Assert(frame0.type() == CV_8UC3 && frame0.size() == flowMask.size()); + CV_Assert(mask0.type() == CV_8U && mask0.size() == flowMask.size()); + + Mat_ flowMask_(flowMask), mask1_(mask1), mask0_(mask0); + Mat_ flowX_(flowX), flowY_(flowY); + + for (int y0 = 0; y0 < frame0.rows; ++y0) + { + for (int x0 = 0; x0 < frame0.cols; ++x0) + { + if (!mask0_(y0,x0) && flowMask_(y0,x0)) + { + int x1 = cvRound(x0 + flowX_(y0,x0)); + int y1 = cvRound(y0 + flowY_(y0,x0)); + + if (x1 >= 0 && x1 < frame1.cols && y1 >= 0 && y1 < frame1.rows && mask1_(y1,x1)) + { + frame0.at >(y0,x0) = frame1.at >(y1,x1); + mask0_(y0,x0) = 255; + } + } + } + } +} + +} // namespace videostab +} // namespace cv diff --git a/modules/videostab/src/log.cpp b/modules/videostab/src/log.cpp new file mode 100644 index 0000000..b122dda --- /dev/null +++ b/modules/videostab/src/log.cpp @@ -0,0 +1,23 @@ +#include "precomp.hpp" +#include +#include +#include "opencv2/videostab/log.hpp" + +using namespace std; + +namespace cv +{ +namespace videostab +{ + +void LogToStdout::print(const char *format, ...) +{ + va_list args; + va_start(args, format); + vprintf(format, args); + fflush(stdout); + va_end(args); +} + +} // namespace videostab +} // namespace cv diff --git a/modules/videostab/src/motion_filtering.cpp b/modules/videostab/src/motion_filtering.cpp new file mode 100644 index 0000000..83e9c73 --- /dev/null +++ b/modules/videostab/src/motion_filtering.cpp @@ -0,0 +1,33 @@ +#include "precomp.hpp" +#include "opencv2/videostab/motion_filtering.hpp" +#include "opencv2/videostab/global_motion.hpp" + +using namespace std; + +namespace cv +{ +namespace videostab +{ + +GaussianMotionFilter::GaussianMotionFilter(int radius, float stdev) : radius_(radius) +{ + float sum = 0; + weight_.resize(2*radius_ + 1); + for (int i = -radius_; i <= radius_; ++i) + sum += weight_[radius_ + i] = std::exp(-i*i/(stdev*stdev)); + for (int i = -radius_; i <= radius_; ++i) + weight_[radius_ + i] /= sum; +} + + +Mat GaussianMotionFilter::apply(int idx, vector &motions) const +{ + const Mat &cur = at(idx, motions); + Mat res = Mat::zeros(cur.size(), cur.type()); + for (int i = -radius_; i <= radius_; ++i) + res += weight_[radius_ + i] * getMotion(idx, idx + i, motions); + return res; +} + +} // namespace videostab +} // namespace cv diff --git a/modules/videostab/src/optical_flow.cpp b/modules/videostab/src/optical_flow.cpp new file mode 100644 index 0000000..d56eacd --- /dev/null +++ b/modules/videostab/src/optical_flow.cpp @@ -0,0 +1,50 @@ +#include "precomp.hpp" +#include "opencv2/gpu/gpu.hpp" +#include "opencv2/video/video.hpp" +#include "opencv2/videostab/optical_flow.hpp" + +using namespace std; + +namespace cv +{ +namespace videostab +{ + +void SparsePyrLkOptFlowEstimator::run( + InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1, + OutputArray status, OutputArray errors) +{ + calcOpticalFlowPyrLK(frame0, frame1, points0, points1, status, errors, winSize_, maxLevel_); +} + + +DensePyrLkOptFlowEstimatorGpu::DensePyrLkOptFlowEstimatorGpu() +{ + CV_Assert(gpu::getCudaEnabledDeviceCount() > 0); +} + + +void DensePyrLkOptFlowEstimatorGpu::run( + InputArray frame0, InputArray frame1, InputOutputArray flowX, InputOutputArray flowY, + OutputArray errors) +{ + frame0_.upload(frame0.getMat()); + frame1_.upload(frame1.getMat()); + + optFlowEstimator_.winSize = winSize_; + optFlowEstimator_.maxLevel = maxLevel_; + if (errors.needed()) + { + optFlowEstimator_.dense(frame0_, frame1_, flowX_, flowY_, &errors_); + errors_.download(errors.getMatRef()); + } + else + optFlowEstimator_.dense(frame0_, frame1_, flowX_, flowY_); + + flowX_.download(flowX.getMatRef()); + flowY_.download(flowY.getMatRef()); +} + + +} // namespace videostab +} // namespace cv diff --git a/modules/videostab/src/precomp.cpp b/modules/videostab/src/precomp.cpp new file mode 100644 index 0000000..c149df1 --- /dev/null +++ b/modules/videostab/src/precomp.cpp @@ -0,0 +1 @@ +#include "precomp.hpp" diff --git a/modules/videostab/src/precomp.hpp b/modules/videostab/src/precomp.hpp new file mode 100644 index 0000000..ab3c4da --- /dev/null +++ b/modules/videostab/src/precomp.hpp @@ -0,0 +1,34 @@ +#ifndef __OPENCV_PRECOMP_HPP__ +#define __OPENCV_PRECOMP_HPP__ + +#ifdef HAVE_CVCONFIG_H +#include "cvconfig.h" +#endif + +#include +#include +#include "opencv2/core/core.hpp" +#include "opencv2/highgui/highgui.hpp" +#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/video/video.hpp" +#include "opencv2/features2d/features2d.hpp" + +inline float sqr(float x) { return x * x; } + +inline float intensity(const cv::Point3_ &bgr) +{ + return 0.3f*bgr.x + 0.59f*bgr.y + 0.11f*bgr.z; +} + +template inline T& at(int index, std::vector &items) +{ + return items[cv::borderInterpolate(index, items.size(), cv::BORDER_WRAP)]; +} + +template inline const T& at(int index, const std::vector &items) +{ + return items[cv::borderInterpolate(index, items.size(), cv::BORDER_WRAP)]; +} + +#endif + diff --git a/modules/videostab/src/stabilizer.cpp b/modules/videostab/src/stabilizer.cpp new file mode 100644 index 0000000..a78cdc0 --- /dev/null +++ b/modules/videostab/src/stabilizer.cpp @@ -0,0 +1,248 @@ +#include "precomp.hpp" +#include "opencv2/videostab/stabilizer.hpp" + +using namespace std; + +namespace cv +{ +namespace videostab +{ + +Stabilizer::Stabilizer() +{ + setFrameSource(new NullFrameSource()); + setMotionEstimator(new PyrLkRobustMotionEstimator()); + setMotionFilter(new GaussianMotionFilter(15, sqrt(15))); + setDeblurer(new NullDeblurer()); + setInpainter(new NullInpainter()); + setEstimateTrimRatio(true); + setTrimRatio(0); + setInclusionConstraint(false); + setBorderMode(BORDER_REPLICATE); + setLog(new NullLog()); +} + + +void Stabilizer::reset() +{ + radius_ = 0; + curPos_ = -1; + curStabilizedPos_ = -1; + auxPassWasDone_ = false; + frames_.clear(); + motions_.clear(); + stabilizedFrames_.clear(); + stabilizationMotions_.clear(); + doDeblurring_ = false; + doInpainting_ = false; +} + + +Mat Stabilizer::nextFrame() +{ + if (mustEstimateTrimRatio_ && !auxPassWasDone_) + { + estimateMotionsAndTrimRatio(); + auxPassWasDone_ = true; + frameSource_->reset(); + } + + if (curStabilizedPos_ == curPos_ && curStabilizedPos_ != -1) + return Mat(); // we've processed all frames already + + bool processed; + do { + processed = processNextFrame(); + } while (processed && curStabilizedPos_ == -1); + + if (curStabilizedPos_ == -1) + return Mat(); // frame source is empty + + const Mat &stabilizedFrame = at(curStabilizedPos_, stabilizedFrames_); + int dx = floor(trimRatio_ * stabilizedFrame.cols); + int dy = floor(trimRatio_ * stabilizedFrame.rows); + return stabilizedFrame(Rect(dx, dy, stabilizedFrame.cols - 2*dx, stabilizedFrame.rows - 2*dy)); +} + + +void Stabilizer::estimateMotionsAndTrimRatio() +{ + log_->print("estimating motions and trim ratio"); + + Size size; + Mat prevFrame, frame; + int frameCount = 0; + + while (!(frame = frameSource_->nextFrame()).empty()) + { + if (frameCount > 0) + motions_.push_back(motionEstimator_->estimate(prevFrame, frame)); + else + size = frame.size(); + prevFrame = frame; + frameCount++; + + log_->print("."); + } + + radius_ = motionFilter_->radius(); + for (int i = 0; i < radius_; ++i) + motions_.push_back(Mat::eye(3, 3, CV_32F)); + log_->print("\n"); + + trimRatio_ = 0; + for (int i = 0; i < frameCount; ++i) + { + Mat S = motionFilter_->apply(i, motions_); + trimRatio_ = std::max(trimRatio_, estimateOptimalTrimRatio(S, size)); + stabilizationMotions_.push_back(S); + } + + log_->print("estimated trim ratio: %f\n", static_cast(trimRatio_)); +} + + +void Stabilizer::processFirstFrame(Mat &frame) +{ + log_->print("processing frames"); + + frameSize_ = frame.size(); + frameMask_.create(frameSize_, CV_8U); + frameMask_.setTo(255); + + radius_ = motionFilter_->radius(); + int cacheSize = 2*radius_ + 1; + + frames_.resize(cacheSize); + stabilizedFrames_.resize(cacheSize); + stabilizedMasks_.resize(cacheSize); + + if (!auxPassWasDone_) + { + motions_.resize(cacheSize); + stabilizationMotions_.resize(cacheSize); + } + + for (int i = -radius_; i < 0; ++i) + { + at(i, motions_) = Mat::eye(3, 3, CV_32F); + at(i, frames_) = frame; + } + + at(0, frames_) = frame; + + IInpainter *inpainter = static_cast(inpainter_); + doInpainting_ = dynamic_cast(inpainter) == 0; + if (doInpainting_) + { + inpainter_->setRadius(radius_); + inpainter_->setFrames(frames_); + inpainter_->setMotions(motions_); + inpainter_->setStabilizedFrames(stabilizedFrames_); + inpainter_->setStabilizationMotions(stabilizationMotions_); + } + + IDeblurer *deblurer = static_cast(deblurer_); + doDeblurring_ = dynamic_cast(deblurer) == 0; + if (doDeblurring_) + { + blurrinessRates_.resize(cacheSize); + float blurriness = calcBlurriness(frame); + for (int i = -radius_; i <= 0; ++i) + at(i, blurrinessRates_) = blurriness; + deblurer_->setRadius(radius_); + deblurer_->setFrames(frames_); + deblurer_->setMotions(motions_); + deblurer_->setBlurrinessRates(blurrinessRates_); + } +} + + +bool Stabilizer::processNextFrame() +{ + Mat frame = frameSource_->nextFrame(); + if (!frame.empty()) + { + curPos_++; + + if (curPos_ > 0) + { + at(curPos_, frames_) = frame; + + if (doDeblurring_) + at(curPos_, blurrinessRates_) = calcBlurriness(frame); + + if (!auxPassWasDone_) + { + Mat motionPrevToCur = motionEstimator_->estimate( + at(curPos_ - 1, frames_), at(curPos_, frames_)); + at(curPos_ - 1, motions_) = motionPrevToCur; + } + + if (curPos_ >= radius_) + { + curStabilizedPos_ = curPos_ - radius_; + stabilizeFrame(curStabilizedPos_); + } + } + else + processFirstFrame(frame); + + log_->print("."); + return true; + } + + if (curStabilizedPos_ < curPos_) + { + curStabilizedPos_++; + at(curStabilizedPos_ + radius_, frames_) = at(curPos_, frames_); + at(curStabilizedPos_ + radius_ - 1, motions_) = at(curPos_ - 1, motions_); + stabilizeFrame(curStabilizedPos_); + + log_->print("."); + return true; + } + + return false; +} + + +void Stabilizer::stabilizeFrame(int idx) +{ + Mat stabMotion; + if (!auxPassWasDone_) + stabMotion = motionFilter_->apply(idx, motions_); + else + stabMotion = at(idx, stabilizationMotions_); + + if (inclusionConstraint_ && !mustEstimateTrimRatio_) + stabMotion = ensureInclusionConstraint(stabMotion, frameSize_, trimRatio_); + + at(idx, stabilizationMotions_) = stabMotion; + + if (doDeblurring_) + { + at(idx, frames_).copyTo(preProcessedFrame_); + deblurer_->deblur(idx, preProcessedFrame_); + } + else + preProcessedFrame_ = at(idx, frames_); + + // apply stabilization transformation + warpAffine( + preProcessedFrame_, at(idx, stabilizedFrames_), stabMotion(Rect(0,0,3,2)), + frameSize_, INTER_LINEAR, borderMode_); + + if (doInpainting_) + { + warpAffine( + frameMask_, at(idx, stabilizedMasks_), stabMotion(Rect(0,0,3,2)), frameSize_, + INTER_NEAREST); + erode(at(idx, stabilizedMasks_), at(idx, stabilizedMasks_), Mat()); + at(idx, stabilizedMasks_).copyTo(inpaintingMask_); + inpainter_->inpaint(idx, at(idx, stabilizedFrames_), inpaintingMask_); + } +} + +} // namespace videostab +} // namespace cv diff --git a/samples/cpp/CMakeLists.txt b/samples/cpp/CMakeLists.txt index 69ac690..029f759 100644 --- a/samples/cpp/CMakeLists.txt +++ b/samples/cpp/CMakeLists.txt @@ -5,7 +5,7 @@ SET(OPENCV_CPP_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc opencv_highgui opencv_ml opencv_video opencv_objdetect opencv_photo opencv_nonfree - opencv_features2d opencv_calib3d opencv_legacy opencv_contrib opencv_stitching) + opencv_features2d opencv_calib3d opencv_legacy opencv_contrib opencv_stitching opencv_videostab) ocv_check_dependencies(${OPENCV_CPP_SAMPLES_REQUIRED_DEPS}) diff --git a/samples/cpp/videostab.cpp b/samples/cpp/videostab.cpp new file mode 100644 index 0000000..49d8d81 --- /dev/null +++ b/samples/cpp/videostab.cpp @@ -0,0 +1,225 @@ +#include +#include +#include +#include +#include "opencv2/core/core.hpp" +#include "opencv2/video/video.hpp" +#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/highgui/highgui.hpp" +#include "opencv2/videostab/videostab.hpp" + +using namespace std; +using namespace cv; +using namespace cv::videostab; + +Ptr stabilizer; +double outputFps; +string outputPath; + +void run(); +void printHelp(); + +void run() +{ + VideoWriter writer; + Mat stabilizedFrame; + + while (!(stabilizedFrame = stabilizer->nextFrame()).empty()) + { + if (!outputPath.empty()) + { + if (!writer.isOpened()) + writer.open(outputPath, CV_FOURCC('X','V','I','D'), outputFps, stabilizedFrame.size()); + writer << stabilizedFrame; + } + imshow("stabilizedFrame", stabilizedFrame); + char key = static_cast(waitKey(3)); + if (key == 27) + break; + } + + cout << "\nfinished\n"; +} + + +void printHelp() +{ + cout << "OpenCV video stabilizer.\n" + "Usage: videostab [arguments]\n\n" + "Arguments:\n" + " -m, --model=(transl|transl_and_scale|affine)\n" + " Set motion model. The default is affine.\n" + " --outlier-ratio=\n" + " Outliers ratio in motion estimation. The default is 0.5.\n" + " --min-inlier-ratio=\n" + " Minimum inlier ratio to decide if estiamted motion is OK. The default is 0.1,\n" + " but may want to increase it.\n" + " -r, --radius=\n" + " Set smoothing radius. The default is 15.\n" + " --stdev=\n" + " Set smoothing weights standard deviation. The default is sqrt(radius).\n" + " --deblur=(yes|no)\n" + " Do deblurring.\n" + " --deblur-sens=\n" + " Set deblurring sensitivity (from 0 to +inf). The default is 0.1.\n" + " -t, --trim-ratio=\n" + " Set trimming ratio (from 0 to 0.5). The default is 0.\n" + " --est-trim=(yes|no)\n" + " Estimate trim ratio automatically. The default is yes.\n" + " --incl-constr=(yes|no)\n" + " Ensure the inclusion constraint is always satisfied. The default is no.\n" + " --border-mode=(replicate|const)\n" + " Set border extrapolation mode. The default is replicate.\n" + " --mosaic=(yes|no)\n" + " Do consistent mosaicing. The default is no.\n" + " --mosaic-stdev=\n" + " Consistent mosaicing stdev threshold. The default is 10.\n" + " --motion-inpaint=(yes|no)\n" + " Do motion inpainting (requires GPU support). The default is no.\n" + " --color-inpaint=(yes|no)\n" + " Do color inpainting. The defailt is no.\n" + " -o, --output=\n" + " Set output file path explicitely. The default is stabilized.avi.\n" + " --fps=\n" + " Set output video FPS explicitely. By default the source FPS is used.\n" + " -h, --help\n" + " Print help.\n" + "\n"; +} + + +int main(int argc, const char **argv) +{ + try + { + const char *keys = + "{ 1 | | | | }" + "{ m | model | | }" + "{ | min-inlier-ratio | | }" + "{ | outlier-ratio | | }" + "{ r | radius | | }" + "{ | stdev | | }" + "{ | deblur | | }" + "{ | deblur-sens | | }" + "{ | est-trim | | }" + "{ t | trim-ratio | | }" + "{ | incl-constr | | }" + "{ | border-mode | | }" + "{ | mosaic | | }" + "{ | mosaic-stdev | | }" + "{ | motion-inpaint | | }" + "{ | color-inpaint | | }" + "{ o | output | stabilized.avi | }" + "{ | fps | | }" + "{ h | help | false | }"; + CommandLineParser cmd(argc, argv, keys); + + // parse command arguments + + if (cmd.get("help")) + { + printHelp(); + return 0; + } + + stabilizer = new Stabilizer(); + + string inputPath = cmd.get("1"); + if (inputPath.empty()) + throw runtime_error("specify video file path"); + + VideoFileSource *frameSource = new VideoFileSource(inputPath); + outputFps = frameSource->fps(); + stabilizer->setFrameSource(frameSource); + cout << "frame count: " << frameSource->frameCount() << endl; + + PyrLkRobustMotionEstimator *motionEstimator = new PyrLkRobustMotionEstimator(); + if (cmd.get("model") == "transl") + motionEstimator->setMotionModel(TRANSLATION); + else if (cmd.get("model") == "transl_and_scale") + motionEstimator->setMotionModel(TRANSLATION_AND_SCALE); + else if (cmd.get("model") == "affine") + motionEstimator->setMotionModel(AFFINE); + else if (!cmd.get("model").empty()) + throw runtime_error("unknow motion mode: " + cmd.get("model")); + + if (!cmd.get("outlier-ratio").empty()) + { + RansacParams ransacParams = motionEstimator->ransacParams(); + ransacParams.eps = atof(cmd.get("outlier-ratio").c_str()); + motionEstimator->setRansacParams(ransacParams); + } + + if (!cmd.get("min-inlier-ratio").empty()) + motionEstimator->setMinInlierRatio(atof(cmd.get("min-inlier-ratio").c_str())); + + stabilizer->setMotionEstimator(motionEstimator); + + int smoothRadius = -1; + float smoothStdev = -1; + if (!cmd.get("radius").empty()) + smoothRadius = atoi(cmd.get("radius").c_str()); + if (!cmd.get("stdev").empty()) + smoothStdev = atof(cmd.get("stdev").c_str()); + if (smoothRadius > 0 && smoothStdev > 0) + stabilizer->setMotionFilter(new GaussianMotionFilter(smoothRadius, smoothStdev)); + else if (smoothRadius > 0 && smoothStdev < 0) + stabilizer->setMotionFilter(new GaussianMotionFilter(smoothRadius, sqrt(smoothRadius))); + + if (cmd.get("deblur") == "yes") + { + WeightingDeblurer *deblurer = new WeightingDeblurer(); + if (!cmd.get("deblur-sens").empty()) + deblurer->setSensitivity(atof(cmd.get("deblur-sens").c_str())); + stabilizer->setDeblurer(deblurer); + } + + if (!cmd.get("est-trim").empty()) + stabilizer->setEstimateTrimRatio(cmd.get("est-trim") == "yes"); + + if (!cmd.get("trim-ratio").empty()) + stabilizer->setTrimRatio(atof(cmd.get("trim-ratio").c_str())); + + stabilizer->setInclusionConstraint(cmd.get("incl-constr") == "yes"); + + if (cmd.get("border-mode") == "replicate") + stabilizer->setBorderMode(BORDER_REPLICATE); + else if (cmd.get("border-mode") == "const") + stabilizer->setBorderMode(BORDER_CONSTANT); + else if (!cmd.get("border-mode").empty()) + throw runtime_error("unknown border extrapolation mode: " + cmd.get("border-mode")); + + InpaintingPipeline *inpainters = new InpaintingPipeline(); + if (cmd.get("mosaic") == "yes") + { + ConsistentMosaicInpainter *inpainter = new ConsistentMosaicInpainter(); + if (!cmd.get("mosaic-stdev").empty()) + inpainter->setStdevThresh(atof(cmd.get("mosaic-stdev").c_str())); + inpainters->pushBack(inpainter); + } + if (cmd.get("motion-inpaint") == "yes") + inpainters->pushBack(new MotionInpainter()); + if (cmd.get("color-inpaint") == "yes") + inpainters->pushBack(new ColorAverageInpainter()); + if (!inpainters->empty()) + stabilizer->setInpainter(inpainters); + + stabilizer->setLog(new LogToStdout()); + + outputPath = cmd.get("output"); + + if (!cmd.get("fps").empty()) + outputFps = atoi(cmd.get("fps").c_str()); + + // run video processing + run(); + } + catch (const exception &e) + { + cout << e.what() << endl; + stabilizer.release(); + return -1; + } + stabilizer.release(); + return 0; +} -- 2.7.4