added first version of public stitching API, added simple sample which uses that...
authorAlexey Spizhevoy <no@email>
Wed, 7 Sep 2011 11:14:27 +0000 (11:14 +0000)
committerAlexey Spizhevoy <no@email>
Wed, 7 Sep 2011 11:14:27 +0000 (11:14 +0000)
19 files changed:
modules/stitching/include/opencv2/stitching/detail/autocalib.hpp
modules/stitching/include/opencv2/stitching/detail/blenders.hpp
modules/stitching/include/opencv2/stitching/detail/camera.hpp
modules/stitching/include/opencv2/stitching/detail/exposure_compensate.hpp
modules/stitching/include/opencv2/stitching/detail/matchers.hpp
modules/stitching/include/opencv2/stitching/detail/motion_estimators.hpp
modules/stitching/include/opencv2/stitching/detail/seam_finders.hpp
modules/stitching/include/opencv2/stitching/detail/util.hpp
modules/stitching/include/opencv2/stitching/detail/util_inl.hpp
modules/stitching/include/opencv2/stitching/detail/warpers.hpp
modules/stitching/include/opencv2/stitching/detail/warpers_inl.hpp
modules/stitching/include/opencv2/stitching/stitcher.hpp [new file with mode: 0644]
modules/stitching/include/opencv2/stitching/warpers.hpp [new file with mode: 0644]
modules/stitching/src/matchers.cpp
modules/stitching/src/precomp.hpp
modules/stitching/src/stitcher.cpp [new file with mode: 0644]
modules/stitching/src/util.cpp
samples/cpp/stitching.cpp
samples/cpp/stitching_detailed.cpp [new file with mode: 0644]

index 9d1b075..0b06294 100644 (file)
@@ -39,6 +39,7 @@
 // the use of this software, even if advised of the possibility of such damage.\r
 //\r
 //M*/\r
+\r
 #ifndef __OPENCV_STITCHING_AUTOCALIB_HPP__\r
 #define __OPENCV_STITCHING_AUTOCALIB_HPP__\r
 \r
index 1c48785..e147447 100644 (file)
@@ -39,6 +39,7 @@
 // the use of this software, even if advised of the possibility of such damage.\r
 //\r
 //M*/\r
+\r
 #ifndef __OPENCV_STITCHING_BLENDERS_HPP__\r
 #define __OPENCV_STITCHING_BLENDERS_HPP__\r
 \r
@@ -56,7 +57,7 @@ public:
     enum { NO, FEATHER, MULTI_BAND };\r
     static Ptr<Blender> createDefault(int type, bool try_gpu = false);\r
 \r
-    void prepare(const std::vector<Point> &corners, const std::vector<Size> &sizes);\r
+    void prepare(const std::vector<Point> &corners, const std::vector<Size> &sizes);    \r
     virtual void prepare(Rect dst_roi);\r
     virtual void feed(const Mat &img, const Mat &mask, Point tl);\r
     virtual void blend(Mat &dst, Mat &dst_mask);\r
index 507305b..1638c06 100644 (file)
@@ -39,6 +39,7 @@
 // the use of this software, even if advised of the possibility of such damage.
 //
 //M*/
+
 #ifndef __OPENCV_STITCHING_CAMERA_HPP__
 #define __OPENCV_STITCHING_CAMERA_HPP__
 
index 2bf922b..4fb466c 100644 (file)
@@ -39,6 +39,7 @@
 // the use of this software, even if advised of the possibility of such damage.\r
 //\r
 //M*/\r
+\r
 #ifndef __OPENCV_STITCHING_EXPOSURE_COMPENSATE_HPP__\r
 #define __OPENCV_STITCHING_EXPOSURE_COMPENSATE_HPP__\r
 \r
index 20abd78..6350f13 100644 (file)
@@ -39,6 +39,7 @@
 // the use of this software, even if advised of the possibility of such damage.\r
 //\r
 //M*/\r
+\r
 #ifndef __OPENCV_STITCHING_MATCHERS_HPP__\r
 #define __OPENCV_STITCHING_MATCHERS_HPP__\r
 \r
@@ -51,9 +52,9 @@ namespace detail {
 struct CV_EXPORTS ImageFeatures\r
 {\r
     int img_idx;\r
-    cv::Size img_size;\r
-    std::vector<cv::KeyPoint> keypoints;\r
-    cv::Mat descriptors;\r
+    Size img_size;\r
+    std::vector<KeyPoint> keypoints;\r
+    Mat descriptors;\r
 };\r
 \r
 \r
@@ -61,12 +62,13 @@ class CV_EXPORTS FeaturesFinder
 {\r
 public:\r
     virtual ~FeaturesFinder() {}\r
-    void operator ()(const cv::Mat &image, ImageFeatures &features);\r
+    void operator ()(const Mat &image, ImageFeatures &features);\r
 \r
-    virtual void releaseMemory() {}\r
+    // TODO put it into operator ()\r
+    virtual void collectGarbage() {}\r
 \r
 protected:\r
-    virtual void find(const cv::Mat &image, ImageFeatures &features) = 0;\r
+    virtual void find(const Mat &image, ImageFeatures &features) = 0;\r
 };\r
 \r
 \r
@@ -77,12 +79,12 @@ public:
                        int num_octaves = 3, int num_layers = 4, \r
                        int num_octaves_descr = 4, int num_layers_descr = 2);\r
 \r
-    void releaseMemory();\r
+    void collectGarbage();\r
 \r
 protected:\r
-    void find(const cv::Mat &image, ImageFeatures &features);\r
+    void find(const Mat &image, ImageFeatures &features);\r
 \r
-    cv::Ptr<FeaturesFinder> impl_;\r
+    Ptr<FeaturesFinder> impl_;\r
 };\r
 \r
 \r
@@ -93,10 +95,10 @@ struct CV_EXPORTS MatchesInfo
     const MatchesInfo& operator =(const MatchesInfo &other);\r
 \r
     int src_img_idx, dst_img_idx;       // Images indices (optional)\r
-    std::vector<cv::DMatch> matches;\r
+    std::vector<DMatch> matches;\r
     std::vector<uchar> inliers_mask;    // Geometrically consistent matches mask\r
     int num_inliers;                    // Number of geometrically consistent matches\r
-    cv::Mat H;                          // Estimated homography\r
+    Mat H;                              // Estimated homography\r
     double confidence;                  // Confidence two images are from the same panorama\r
 };\r
 \r
@@ -112,7 +114,7 @@ public:
 \r
     bool isThreadSafe() const { return is_thread_safe_; }\r
 \r
-    virtual void releaseMemory() {}\r
+    virtual void collectGarbage() {}\r
 \r
 protected:\r
     FeaturesMatcher(bool is_thread_safe = false) : is_thread_safe_(is_thread_safe) {}\r
@@ -127,17 +129,17 @@ protected:
 class CV_EXPORTS BestOf2NearestMatcher : public FeaturesMatcher\r
 {\r
 public:\r
-    BestOf2NearestMatcher(bool try_use_gpu = true, float match_conf = 0.55f, int num_matches_thresh1 = 6, \r
+    BestOf2NearestMatcher(bool try_use_gpu = true, float match_conf = 0.65f, int num_matches_thresh1 = 6,\r
                           int num_matches_thresh2 = 6);\r
 \r
-    void releaseMemory();\r
+    void collectGarbage();\r
 \r
 protected:\r
     void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo &matches_info);\r
 \r
     int num_matches_thresh1_;\r
     int num_matches_thresh2_;\r
-    cv::Ptr<FeaturesMatcher> impl_;\r
+    Ptr<FeaturesMatcher> impl_;\r
 };\r
 \r
 } // namespace detail\r
index bc7ae3c..b8dd348 100644 (file)
@@ -39,6 +39,7 @@
 // the use of this software, even if advised of the possibility of such damage.\r
 //\r
 //M*/\r
+\r
 #ifndef __OPENCV_STITCHING_MOTION_ESTIMATORS_HPP__\r
 #define __OPENCV_STITCHING_MOTION_ESTIMATORS_HPP__\r
 \r
index 017baf8..52ffd0a 100644 (file)
@@ -39,6 +39,7 @@
 // the use of this software, even if advised of the possibility of such damage.\r
 //\r
 //M*/\r
+\r
 #ifndef __OPENCV_STITCHING_SEAM_FINDERS_HPP__\r
 #define __OPENCV_STITCHING_SEAM_FINDERS_HPP__\r
 \r
index e21fcfd..0793617 100644 (file)
@@ -39,6 +39,7 @@
 // the use of this software, even if advised of the possibility of such damage.\r
 //\r
 //M*/\r
+\r
 #ifndef __OPENCV_STITCHING_UTIL_HPP__\r
 #define __OPENCV_STITCHING_UTIL_HPP__\r
 \r
@@ -47,6 +48,7 @@
 \r
 #define ENABLE_LOG 1\r
 \r
+// TODO remove LOG macros, add logging class\r
 #if ENABLE_LOG\r
   #include <iostream>\r
   #define LOG(msg) { std::cout << msg; std::cout.flush(); }\r
index 3c69cc5..1359744 100644 (file)
@@ -39,6 +39,7 @@
 // the use of this software, even if advised of the possibility of such damage.\r
 //\r
 //M*/\r
+\r
 #ifndef __OPENCV_STITCHING_UTIL_INL_HPP__\r
 #define __OPENCV_STITCHING_UTIL_INL_HPP__\r
 \r
index 7bd264f..8e9d9a1 100644 (file)
@@ -39,6 +39,7 @@
 // the use of this software, even if advised of the possibility of such damage.\r
 //\r
 //M*/\r
+\r
 #ifndef __OPENCV_STITCHING_WARPERS_HPP__\r
 #define __OPENCV_STITCHING_WARPERS_HPP__\r
 \r
@@ -55,11 +56,13 @@ class CV_EXPORTS Warper
 {\r
 public:\r
     enum { PLANE, CYLINDRICAL, SPHERICAL };\r
+\r
+    // TODO remove this method\r
     static Ptr<Warper> createByCameraFocal(float focal, int type, bool try_gpu = false);\r
 \r
     virtual ~Warper() {}\r
     virtual Point warp(const Mat &src, float focal, const Mat& R, Mat &dst,\r
-                           int interp_mode = INTER_LINEAR, int border_mode = BORDER_REFLECT) = 0;\r
+                       int interp_mode = INTER_LINEAR, int border_mode = BORDER_REFLECT) = 0;\r
     virtual Rect warpRoi(const Size &sz, float focal, const Mat &R) = 0;\r
 };\r
 \r
@@ -81,7 +84,7 @@ class CV_EXPORTS WarperBase : public Warper
 {   \r
 public:\r
     virtual Point warp(const Mat &src, float focal, const Mat &R, Mat &dst,\r
-                           int interp_mode, int border_mode);\r
+                       int interp_mode, int border_mode);\r
 \r
     virtual Rect warpRoi(const Size &sz, float focal, const Mat &R);\r
 \r
@@ -126,7 +129,7 @@ class CV_EXPORTS PlaneWarperGpu : public PlaneWarper
 public:\r
     PlaneWarperGpu(float plane_dist = 1.f, float scale = 1.f) : PlaneWarper(plane_dist, scale) {}\r
     Point warp(const Mat &src, float focal, const Mat &R, Mat &dst,\r
-                   int interp_mode, int border_mode);\r
+               int interp_mode, int border_mode);\r
 \r
 private:\r
     gpu::GpuMat d_xmap_, d_ymap_, d_dst_, d_src_;\r
index 169155b..551d5d3 100644 (file)
@@ -39,6 +39,7 @@
 // the use of this software, even if advised of the possibility of such damage.\r
 //\r
 //M*/\r
+\r
 #ifndef __OPENCV_STITCHING_WARPERS_INL_HPP__\r
 #define __OPENCV_STITCHING_WARPERS_INL_HPP__\r
 \r
@@ -50,7 +51,7 @@ namespace detail {
 \r
 template <class P>\r
 Point WarperBase<P>::warp(const Mat &src, float focal, const Mat &R, Mat &dst,\r
-                              int interp_mode, int border_mode)\r
+                          int interp_mode, int border_mode)\r
 {\r
     src_size_ = src.size();\r
 \r
diff --git a/modules/stitching/include/opencv2/stitching/stitcher.hpp b/modules/stitching/include/opencv2/stitching/stitcher.hpp
new file mode 100644 (file)
index 0000000..0b7cccf
--- /dev/null
@@ -0,0 +1,128 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+//  By downloading, copying, installing or using the software you agree to this license.
+//  If you do not agree to this license, do not download, install,
+//  copy or use the software.
+//
+//
+//                          License Agreement
+//                For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+//   * Redistribution's of source code must retain the above copyright notice,
+//     this list of conditions and the following disclaimer.
+//
+//   * Redistribution's in binary form must reproduce the above copyright notice,
+//     this list of conditions and the following disclaimer in the documentation
+//     and/or other materials provided with the distribution.
+//
+//   * The name of the copyright holders may not be used to endorse or promote products
+//     derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_STITCHING_STITCHER_HPP__
+#define __OPENCV_STITCHING_STITCHER_HPP__
+
+#include "opencv2/core/core.hpp"
+#include "opencv2/features2d/features2d.hpp"
+#include "warpers.hpp"
+#include "detail/matchers.hpp"
+#include "detail/exposure_compensate.hpp"
+#include "detail/seam_finders.hpp"
+#include "detail/blenders.hpp"
+
+namespace cv {
+
+class Stitcher
+{
+public:
+    enum { ORIG_RESOL = -1 };
+    enum Status { OK, ERR_NEED_MORE_IMGS };
+
+    // Creates stitcher with default parameters
+    static Stitcher createDefault(bool try_use_gpu = false);
+
+    // Stitches the biggest found pano. Returns status code.
+    Status stitch(InputArray imgs, OutputArray pano);
+
+    double registrationResol() const { return registr_resol_; }
+    void setRegistrationResol(double resol_mpx) { registr_resol_ = resol_mpx; }
+
+    double seamEstimationResol() const { return seam_est_resol_; }
+    void setSeamEstimationResol(double resol_mpx) { seam_est_resol_ = resol_mpx; }
+
+    double compositingResol() const { return compose_resol_; }
+    void setCompositingResol(double resol_mpx) { compose_resol_ = resol_mpx; }
+
+    double panoConfidenceThresh() const { return conf_thresh_; }
+    void setPanoConfidenceThresh(double conf_thresh) { conf_thresh_ = conf_thresh; }
+
+    bool horizontalStrightening() const { return horiz_stright_; }
+    void setHorizontalStrightening(bool flag) { horiz_stright_ = flag; }
+
+    Ptr<detail::FeaturesFinder> featuresFinder() { return features_finder_; }
+    const Ptr<detail::FeaturesFinder> featuresFinder() const { return features_finder_; }
+    void setFeaturesFinder(Ptr<detail::FeaturesFinder> features_finder)
+        { features_finder_ = features_finder; }
+
+    Ptr<detail::FeaturesMatcher> featuresMatcher() { return features_matcher_; }
+    const Ptr<detail::FeaturesMatcher> featuresMatcher() const { return features_matcher_; }
+    void setFeaturesMatcher(Ptr<detail::FeaturesMatcher> features_matcher)
+        { features_matcher_ = features_matcher; }
+
+    Ptr<WarperCreator> warper() { return warper_; }
+    const Ptr<WarperCreator> warper() const { return warper_; }
+    void setWarper(Ptr<WarperCreator> warper) { warper_ = warper; }
+
+    Ptr<detail::ExposureCompensator> exposureCompensator() { return exposure_comp_; }
+    const Ptr<detail::ExposureCompensator> exposureCompensator() const { return exposure_comp_; }
+    void setExposureCompenstor(Ptr<detail::ExposureCompensator> exposure_comp)
+        { exposure_comp_ = exposure_comp; }
+
+    Ptr<detail::SeamFinder> seamFinder() { return seam_finder_; }
+    const Ptr<detail::SeamFinder> seamFinder() const { return seam_finder_; }
+    void setSeamFinder(Ptr<detail::SeamFinder> seam_finder) { seam_finder_ = seam_finder; }
+
+    Ptr<detail::Blender> blender() { return blender_; }
+    const Ptr<detail::Blender> blender() const { return blender_; }
+    void setBlender(Ptr<detail::Blender> blender) { blender_ = blender; }
+
+private:
+    Stitcher() {}
+
+    double registr_resol_;
+    double seam_est_resol_;
+    double compose_resol_;
+    double conf_thresh_;
+    bool horiz_stright_;
+    Ptr<detail::FeaturesFinder> features_finder_;
+    Ptr<detail::FeaturesMatcher> features_matcher_;
+    Ptr<WarperCreator> warper_;
+    Ptr<detail::ExposureCompensator> exposure_comp_;
+    Ptr<detail::SeamFinder> seam_finder_;
+    Ptr<detail::Blender> blender_;
+};
+
+} // namespace cv
+
+#endif // __OPENCV_STITCHING_STITCHER_HPP__
diff --git a/modules/stitching/include/opencv2/stitching/warpers.hpp b/modules/stitching/include/opencv2/stitching/warpers.hpp
new file mode 100644 (file)
index 0000000..2f32601
--- /dev/null
@@ -0,0 +1,105 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+//  By downloading, copying, installing or using the software you agree to this license.
+//  If you do not agree to this license, do not download, install,
+//  copy or use the software.
+//
+//
+//                          License Agreement
+//                For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+//   * Redistribution's of source code must retain the above copyright notice,
+//     this list of conditions and the following disclaimer.
+//
+//   * Redistribution's in binary form must reproduce the above copyright notice,
+//     this list of conditions and the following disclaimer in the documentation
+//     and/or other materials provided with the distribution.
+//
+//   * The name of the copyright holders may not be used to endorse or promote products
+//     derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_STITCHING_WARPER_CREATORS_HPP__
+#define __OPENCV_STITCHING_WARPER_CREATORS_HPP__
+
+#include "detail/warpers.hpp"
+
+namespace cv {
+
+class WarperCreator
+{
+public:
+    virtual ~WarperCreator() {}
+    virtual Ptr<detail::Warper> createByFocalLength(double f) const = 0;
+};
+
+
+class PlaneWarper : public WarperCreator
+{
+public:
+    Ptr<detail::Warper> createByFocalLength(double f) const { return new detail::PlaneWarper(f); }
+};
+
+
+class CylindricalWarper: public WarperCreator
+{
+public:
+    Ptr<detail::Warper> createByFocalLength(double f) const { return new detail::CylindricalWarper(f); }
+};
+
+
+class SphericalWarper: public WarperCreator
+{
+public:
+    Ptr<detail::Warper> createByFocalLength(double f) const { return new detail::SphericalWarper(f); }
+};
+
+
+#ifndef ANDROID
+
+class PlaneWarperGpu: public WarperCreator
+{
+public:
+    Ptr<detail::Warper> createByFocalLength(double f) const { return new detail::PlaneWarperGpu(f); }
+};
+
+
+class CylindricalWarperGpu: public WarperCreator
+{
+public:
+    Ptr<detail::Warper> createByFocalLength(double f) const { return new detail::CylindricalWarperGpu(f); }
+};
+
+
+class SphericalWarperGpu: public WarperCreator
+{
+public:
+    Ptr<detail::Warper> createByFocalLength(double f) const { return new detail::SphericalWarperGpu(f); }
+};
+
+#endif
+
+} // namespace cv
+
+#endif // __OPENCV_STITCHING_WARPER_CREATORS_HPP__
index 9b02e35..4d9e16e 100644 (file)
@@ -85,7 +85,7 @@ public:
         num_layers_descr_ = num_layers_descr;\r
     }\r
 \r
-    void releaseMemory();\r
+    void collectGarbage();\r
 \r
 protected:\r
     void find(const Mat &image, ImageFeatures &features);\r
@@ -136,7 +136,7 @@ void GpuSurfFeaturesFinder::find(const Mat &image, ImageFeatures &features)
     descriptors_.download(features.descriptors);\r
 }\r
 \r
-void GpuSurfFeaturesFinder::releaseMemory()\r
+void GpuSurfFeaturesFinder::collectGarbage()\r
 {\r
     surf_.releaseMemory();\r
     image_.release();\r
@@ -231,7 +231,7 @@ public:
     GpuMatcher(float match_conf) : match_conf_(match_conf) {}\r
     void match(const ImageFeatures &features1, const ImageFeatures &features2, MatchesInfo& matches_info);\r
 \r
-    void releaseMemory();\r
+    void collectGarbage();\r
 \r
 private:\r
     float match_conf_;\r
@@ -326,7 +326,7 @@ void GpuMatcher::match(const ImageFeatures &features1, const ImageFeatures &feat
     }\r
 }\r
 \r
-void GpuMatcher::releaseMemory()\r
+void GpuMatcher::collectGarbage()\r
 {\r
     descriptors1_.release();\r
     descriptors2_.release();\r
@@ -369,9 +369,9 @@ void SurfFeaturesFinder::find(const Mat &image, ImageFeatures &features)
 }\r
 \r
 \r
-void SurfFeaturesFinder::releaseMemory()\r
+void SurfFeaturesFinder::collectGarbage()\r
 {\r
-    impl_->releaseMemory();\r
+    impl_->collectGarbage();\r
 }\r
 \r
 \r
@@ -511,9 +511,9 @@ void BestOf2NearestMatcher::match(const ImageFeatures &features1, const ImageFea
     matches_info.H = findHomography(src_points, dst_points, CV_RANSAC);\r
 }\r
 \r
-void BestOf2NearestMatcher::releaseMemory()\r
+void BestOf2NearestMatcher::collectGarbage()\r
 {\r
-    impl_->releaseMemory();\r
+    impl_->collectGarbage();\r
 }\r
 \r
 } // namespace detail\r
index 2cce1de..3432864 100644 (file)
@@ -39,6 +39,7 @@
 // the use of this software, even if advised of the possibility of such damage.\r
 //\r
 //M*/\r
+\r
 #ifndef __OPENCV_STITCHING_PRECOMP_H__\r
 #define __OPENCV_STITCHING_PRECOMP_H__\r
 \r
@@ -52,6 +53,8 @@
 #include <set>\r
 #include <functional>\r
 #include <sstream>\r
+#include <cmath>\r
+#include "opencv2/stitching/stitcher.hpp"\r
 #include "opencv2/stitching/detail/autocalib.hpp"\r
 #include "opencv2/stitching/detail/blenders.hpp"\r
 #include "opencv2/stitching/detail/camera.hpp"\r
diff --git a/modules/stitching/src/stitcher.cpp b/modules/stitching/src/stitcher.cpp
new file mode 100644 (file)
index 0000000..b3679d8
--- /dev/null
@@ -0,0 +1,352 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+//  By downloading, copying, installing or using the software you agree to this license.
+//  If you do not agree to this license, do not download, install,
+//  copy or use the software.
+//
+//
+//                          License Agreement
+//                For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+//   * Redistribution's of source code must retain the above copyright notice,
+//     this list of conditions and the following disclaimer.
+//
+//   * Redistribution's in binary form must reproduce the above copyright notice,
+//     this list of conditions and the following disclaimer in the documentation
+//     and/or other materials provided with the distribution.
+//
+//   * The name of the copyright holders may not be used to endorse or promote products
+//     derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "precomp.hpp"
+
+using namespace std;
+
+namespace cv {
+
+// TODO put all #ifndef ANDROID here, avoid passing try_use_gpu
+Stitcher Stitcher::createDefault(bool try_use_gpu)
+{
+    Stitcher stitcher;
+    stitcher.setRegistrationResol(0.6);
+    stitcher.setSeamEstimationResol(0.1);
+    stitcher.setCompositingResol(ORIG_RESOL);
+    stitcher.setPanoConfidenceThresh(1);
+    stitcher.setHorizontalStrightening(true);
+    stitcher.setFeaturesFinder(new detail::SurfFeaturesFinder(try_use_gpu));
+    stitcher.setFeaturesMatcher(new detail::BestOf2NearestMatcher(try_use_gpu));
+#ifndef ANDROID
+    bool must_use_gpu = try_use_gpu && (gpu::getCudaEnabledDeviceCount() > 0);
+    stitcher.setWarper(must_use_gpu ? static_cast<WarperCreator*>(new SphericalWarperGpu()) :
+                                      static_cast<WarperCreator*>(new SphericalWarper()));
+#else
+    stitcher.setWarper(new SphericalWarper());
+#endif
+    stitcher.setExposureCompenstor(new detail::BlocksGainCompensator());
+    stitcher.setSeamFinder(new detail::GraphCutSeamFinder());
+    stitcher.setBlender(new detail::MultiBandBlender(try_use_gpu));
+    return stitcher;
+}
+
+
+Stitcher::Status Stitcher::stitch(InputArray imgs_, OutputArray pano_)
+{
+    // TODO split this func
+
+    vector<Mat> imgs;
+    imgs_.getMatVector(imgs);
+    Mat &pano = pano_.getMatRef();
+
+    int64 app_start_time = getTickCount();
+
+    int num_imgs = static_cast<int>(imgs.size());
+    if (num_imgs < 2)
+    {
+        LOGLN("Need more images");
+        return ERR_NEED_MORE_IMGS;
+    }
+
+    double work_scale = 1, seam_scale = 1, compose_scale = 1;
+    bool is_work_scale_set = false, is_seam_scale_set = false, is_compose_scale_set = false;
+
+    LOGLN("Finding features...");
+    int64 t = getTickCount();
+
+    vector<detail::ImageFeatures> features(num_imgs);
+    Mat full_img, img;
+    vector<Mat> seam_est_imgs(num_imgs);
+    vector<Size> full_img_sizes(num_imgs);
+    double seam_work_aspect = 1;
+
+    for (int i = 0; i < num_imgs; ++i)
+    {
+        full_img = imgs[i];
+        full_img_sizes[i] = full_img.size();
+
+        if (registr_resol_ < 0)
+        {
+            img = full_img;
+            work_scale = 1;
+            is_work_scale_set = true;
+        }
+        else
+        {
+            if (!is_work_scale_set)
+            {
+                work_scale = min(1.0, sqrt(registr_resol_ * 1e6 / full_img.size().area()));
+                is_work_scale_set = true;
+            }
+            resize(full_img, img, Size(), work_scale, work_scale);
+        }
+        if (!is_seam_scale_set)
+        {
+            seam_scale = min(1.0, sqrt(seam_est_resol_ * 1e6 / full_img.size().area()));
+            seam_work_aspect = seam_scale / work_scale;
+            is_seam_scale_set = true;
+        }
+
+        (*features_finder_)(img, features[i]);
+        features[i].img_idx = i;
+        LOGLN("Features in image #" << i+1 << ": " << features[i].keypoints.size());
+
+        resize(full_img, img, Size(), seam_scale, seam_scale);
+        seam_est_imgs[i] = img.clone();
+    }
+
+    features_finder_->collectGarbage();
+    full_img.release();
+    img.release();
+
+    LOGLN("Finding features, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
+
+    LOG("Pairwise matching");
+    t = getTickCount();
+    vector<detail::MatchesInfo> pairwise_matches;
+    (*features_matcher_)(features, pairwise_matches);
+    features_matcher_->collectGarbage();
+    LOGLN("Pairwise matching, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
+
+    // Leave only images we are sure are from the same panorama
+    vector<int> indices = detail::leaveBiggestComponent(features, pairwise_matches, conf_thresh_);
+    vector<Mat> seam_est_imgs_subset;
+    vector<Mat> imgs_subset;
+    vector<Size> full_img_sizes_subset;
+    for (size_t i = 0; i < indices.size(); ++i)
+    {
+        imgs_subset.push_back(imgs[indices[i]]);
+        seam_est_imgs_subset.push_back(seam_est_imgs[indices[i]]);
+        full_img_sizes_subset.push_back(full_img_sizes[indices[i]]);
+    }
+
+    seam_est_imgs = seam_est_imgs_subset;
+    imgs = imgs_subset;
+    full_img_sizes = full_img_sizes_subset;
+
+    num_imgs = static_cast<int>(imgs.size());
+    if (num_imgs < 2)
+    {
+        LOGLN("Need more images");
+        return ERR_NEED_MORE_IMGS;
+    }
+
+    vector<detail::CameraParams> cameras;
+    detail::HomographyBasedEstimator estimator;
+    estimator(features, pairwise_matches, cameras);
+
+    for (size_t i = 0; i < cameras.size(); ++i)
+    {
+        Mat R;
+        cameras[i].R.convertTo(R, CV_32F);
+        cameras[i].R = R;
+        LOGLN("Initial focal length #" << indices[i]+1 << ": " << cameras[i].focal);
+    }
+
+    detail::BundleAdjuster adjuster(detail::BundleAdjuster::FOCAL_RAY_SPACE, conf_thresh_);
+    adjuster(features, pairwise_matches, cameras);
+
+    // Find median focal length
+    vector<double> focals;
+    for (size_t i = 0; i < cameras.size(); ++i)
+    {
+        LOGLN("Camera #" << indices[i]+1 << " focal length: " << cameras[i].focal);
+        focals.push_back(cameras[i].focal);
+    }
+    nth_element(focals.begin(), focals.begin() + focals.size()/2, focals.end());
+    float warped_image_scale = static_cast<float>(focals[focals.size() / 2]);
+
+    if (horiz_stright_)
+    {
+        vector<Mat> rmats;
+        for (size_t i = 0; i < cameras.size(); ++i)
+            rmats.push_back(cameras[i].R);
+        detail::waveCorrect(rmats);
+        for (size_t i = 0; i < cameras.size(); ++i)
+            cameras[i].R = rmats[i];
+    }
+
+    LOGLN("Warping images (auxiliary)... ");
+    t = getTickCount();
+
+    vector<Point> corners(num_imgs);
+    vector<Mat> masks_warped(num_imgs);
+    vector<Mat> images_warped(num_imgs);
+    vector<Size> sizes(num_imgs);
+    vector<Mat> masks(num_imgs);
+
+    // Preapre images masks
+    for (int i = 0; i < num_imgs; ++i)
+    {
+        masks[i].create(seam_est_imgs[i].size(), CV_8U);
+        masks[i].setTo(Scalar::all(255));
+    }
+
+    // Warp images and their masks
+    Ptr<detail::Warper> warper = warper_->createByFocalLength(warped_image_scale * seam_work_aspect);
+    for (int i = 0; i < num_imgs; ++i)
+    {
+        corners[i] = warper->warp(seam_est_imgs[i], static_cast<float>(cameras[i].focal * seam_work_aspect),
+                                  cameras[i].R, images_warped[i]);
+        sizes[i] = images_warped[i].size();
+        warper->warp(masks[i], static_cast<float>(cameras[i].focal * seam_work_aspect),
+                     cameras[i].R, masks_warped[i], INTER_NEAREST, BORDER_CONSTANT);
+    }
+
+    vector<Mat> images_warped_f(num_imgs);
+    for (int i = 0; i < num_imgs; ++i)
+        images_warped[i].convertTo(images_warped_f[i], CV_32F);
+
+    LOGLN("Warping images, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
+
+    exposure_comp_->feed(corners, images_warped, masks_warped);
+    seam_finder_->find(images_warped_f, corners, masks_warped);
+
+    // Release unused memory
+    seam_est_imgs.clear();
+    images_warped.clear();
+    images_warped_f.clear();
+    masks.clear();
+
+    LOGLN("Compositing...");
+    t = getTickCount();
+
+    Mat img_warped, img_warped_s;
+    Mat dilated_mask, seam_mask, mask, mask_warped;
+    double compose_seam_aspect = 1;
+    double compose_work_aspect = 1;
+    bool is_blender_prepared = false;
+
+    for (int img_idx = 0; img_idx < num_imgs; ++img_idx)
+    {
+        LOGLN("Compositing image #" << indices[img_idx]+1);
+
+        // Read image and resize it if necessary
+        full_img = imgs[img_idx];
+        if (!is_compose_scale_set)
+        {
+            if (compose_resol_ > 0)
+                compose_scale = min(1.0, sqrt(compose_resol_ * 1e6 / full_img.size().area()));
+            is_compose_scale_set = true;
+
+            // Compute relative scales
+            compose_seam_aspect = compose_scale / seam_scale;
+            compose_work_aspect = compose_scale / work_scale;
+
+            // Update warped image scale
+            warped_image_scale *= static_cast<float>(compose_work_aspect);
+            warper = warper_->createByFocalLength(warped_image_scale);
+
+            // Update corners and sizes
+            for (int i = 0; i < num_imgs; ++i)
+            {
+                // Update camera focal
+                cameras[i].focal *= compose_work_aspect;
+
+                // Update corner and size
+                Size sz = full_img_sizes[i];
+                if (std::abs(compose_scale - 1) > 1e-1)
+                {
+                    sz.width = cvRound(full_img_sizes[i].width * compose_scale);
+                    sz.height = cvRound(full_img_sizes[i].height * compose_scale);
+                }
+
+                Rect roi = warper->warpRoi(sz, static_cast<float>(cameras[i].focal), cameras[i].R);
+                corners[i] = roi.tl();
+                sizes[i] = roi.size();
+            }
+        }
+        if (std::abs(compose_scale - 1) > 1e-1)
+            resize(full_img, img, Size(), compose_scale, compose_scale);
+        else
+            img = full_img;
+        full_img.release();
+        Size img_size = img.size();
+
+        // Warp the current image
+        warper->warp(img, static_cast<float>(cameras[img_idx].focal), cameras[img_idx].R,
+                     img_warped);
+
+        // Warp the current image mask
+        mask.create(img_size, CV_8U);
+        mask.setTo(Scalar::all(255));
+        warper->warp(mask, static_cast<float>(cameras[img_idx].focal), cameras[img_idx].R, mask_warped,
+                     INTER_NEAREST, BORDER_CONSTANT);
+
+        // Compensate exposure
+        exposure_comp_->apply(img_idx, corners[img_idx], img_warped, mask_warped);
+
+        img_warped.convertTo(img_warped_s, CV_16S);
+        img_warped.release();
+        img.release();
+        mask.release();
+
+        dilate(masks_warped[img_idx], dilated_mask, Mat());
+        resize(dilated_mask, seam_mask, mask_warped.size());
+        mask_warped = seam_mask & mask_warped;
+
+        if (!is_blender_prepared)
+        {
+            blender_->prepare(corners, sizes);
+            is_blender_prepared = true;
+        }
+
+        // Blend the current image
+        blender_->feed(img_warped_s, mask_warped, corners[img_idx]);
+    }
+
+    Mat result, result_mask;
+    blender_->blend(result, result_mask);
+
+    LOGLN("Compositing, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
+
+    // Preliminary result is in CV_16SC3 format, but all values are in [0,255] range,
+    // so convert it to avoid user confusing
+    result.convertTo(pano, CV_8U);
+
+    LOGLN("Finished, total time: " << ((getTickCount() - app_start_time) / getTickFrequency()) << " sec");
+
+    return OK;
+}
+
+} // namespace cv
index 57a95f3..3f6cff5 100644 (file)
@@ -39,6 +39,7 @@
 // the use of this software, even if advised of the possibility of such damage.\r
 //\r
 //M*/\r
+\r
 #include "precomp.hpp"\r
 \r
 using namespace std;\r
index 813e718..c577dd2 100644 (file)
 // 3) Automatic Panoramic Image Stitching using Invariant Features.
 //    Matthew Brown and David G. Lowe. 2007.
 
+#include <iostream>
 #include <fstream>
 #include "opencv2/highgui/highgui.hpp"
-#include "opencv2/stitching/detail/autocalib.hpp"
-#include "opencv2/stitching/detail/blenders.hpp"
-#include "opencv2/stitching/detail/camera.hpp"
-#include "opencv2/stitching/detail/exposure_compensate.hpp"
-#include "opencv2/stitching/detail/matchers.hpp"
-#include "opencv2/stitching/detail/motion_estimators.hpp"
-#include "opencv2/stitching/detail/seam_finders.hpp"
-#include "opencv2/stitching/detail/util.hpp"
-#include "opencv2/stitching/detail/warpers.hpp"
+#include "opencv2/stitching/stitcher.hpp"
 
 using namespace std;
 using namespace cv;
-using namespace cv::detail;
 
 void printUsage()
 {
     cout <<
         "Rotation model images stitcher.\n\n"
-        "stitching img1 img2 [...imgN] [flags]\n\n"
+        "stitching img1 img2 [...imgN]\n\n"
         "Flags:\n"
-        "  --preview\n"
-        "      Run stitching in the preview mode. Works faster than usual mode,\n"
-        "      but output image will have lower resolution.\n"
-        "  --try_gpu (yes|no)\n"
+        "  --try_use_gpu (yes|no)\n"
         "      Try to use GPU. The default value is 'no'. All default values\n"
         "      are for CPU mode.\n"
-        "\nMotion Estimation Flags:\n"
-        "  --work_megapix <float>\n"
-        "      Resolution for image registration step. The default is 0.6 Mpx.\n"
-        "  --match_conf <float>\n"
-        "      Confidence for feature matching step. The default is 0.65.\n"
-        "  --conf_thresh <float>\n"
-        "      Threshold for two images are from the same panorama confidence.\n"
-        "      The default is 1.0.\n"
-        "  --ba (no|ray|focal_ray)\n"
-        "      Bundle adjustment cost function. The default is 'focal_ray'.\n"
-        "  --wave_correct (no|yes)\n"
-        "      Perform wave effect correction. The default is 'yes'.\n"
-        "  --save_graph <file_name>\n"
-        "      Save matches graph represented in DOT language to <file_name> file.\n"
-        "      Labels description: Nm is number of matches, Ni is number of inliers,\n"
-        "      C is confidence.\n"
-        "\nCompositing Flags:\n"
-        "  --warp (plane|cylindrical|spherical)\n"
-        "      Warp surface type. The default is 'spherical'.\n"
-        "  --seam_megapix <float>\n"
-        "      Resolution for seam estimation step. The default is 0.1 Mpx.\n"
-        "  --seam (no|voronoi|gc_color|gc_colorgrad)\n"
-        "      Seam estimation method. The default is 'gc_color'.\n"
-        "  --compose_megapix <float>\n"
-        "      Resolution for compositing step. Use -1 for original resolution.\n"
-        "      The default is -1.\n"
-        "  --expos_comp (no|gain|gain_blocks)\n"
-        "      Exposure compensation method. The default is 'gain_blocks'.\n"
-        "  --blend (no|feather|multiband)\n"
-        "      Blending method. The default is 'multiband'.\n"
-        "  --blend_strength <float>\n"
-        "      Blending strength from [0,100] range. The default is 5.\n"
         "  --output <result_img>\n"
         "      The default is 'result.jpg'.\n";
 }
 
-
-// Default command line args
-vector<string> img_names;
-bool preview = false;
-bool try_gpu = false;
-double work_megapix = 0.6;
-double seam_megapix = 0.1;
-double compose_megapix = -1;
-int ba_space = BundleAdjuster::FOCAL_RAY_SPACE;
-float conf_thresh = 1.f;
-bool wave_correct = true;
-bool save_graph = false;
-std::string save_graph_to;
-int warp_type = Warper::SPHERICAL;
-int expos_comp_type = ExposureCompensator::GAIN_BLOCKS;
-float match_conf = 0.65f;
-int seam_find_type = SeamFinder::GC_COLOR;
-int blend_type = Blender::MULTI_BAND;
-float blend_strength = 5;
+bool try_use_gpu = false;
+vector<Mat> imgs;
 string result_name = "result.jpg";
 
 int parseCmdArgs(int argc, char** argv)
@@ -147,165 +87,34 @@ int parseCmdArgs(int argc, char** argv)
             printUsage();
             return -1;
         }
-        else if (string(argv[i]) == "--preview")
-        {
-            preview = true;
-        }
         else if (string(argv[i]) == "--try_gpu")
         {
             if (string(argv[i + 1]) == "no")
-                try_gpu = false;
+                try_use_gpu = false;
             else if (string(argv[i + 1]) == "yes")
-                try_gpu = true;
+                try_use_gpu = true;
             else
             {
-                cout << "Bad --try_gpu flag value\n";
+                cout << "Bad --try_use_gpu flag value\n";
                 return -1;
             }
             i++;
         }
-        else if (string(argv[i]) == "--work_megapix")
-        {
-            work_megapix = atof(argv[i + 1]);
-            i++;
-        }
-        else if (string(argv[i]) == "--seam_megapix")
-        {
-            seam_megapix = atof(argv[i + 1]);
-            i++;
-        }
-        else if (string(argv[i]) == "--compose_megapix")
-        {
-            compose_megapix = atof(argv[i + 1]);
-            i++;
-        }
-        else if (string(argv[i]) == "--result")
+        else if (string(argv[i]) == "--output")
         {
             result_name = argv[i + 1];
             i++;
         }
-        else if (string(argv[i]) == "--match_conf")
-        {
-            match_conf = static_cast<float>(atof(argv[i + 1]));
-            i++;
-        }
-        else if (string(argv[i]) == "--ba")
-        {
-            if (string(argv[i + 1]) == "no")
-                ba_space = BundleAdjuster::NO;
-            else if (string(argv[i + 1]) == "ray")
-                ba_space = BundleAdjuster::RAY_SPACE;
-            else if (string(argv[i + 1]) == "focal_ray")
-                ba_space = BundleAdjuster::FOCAL_RAY_SPACE;
-            else
-            {
-                cout << "Bad bundle adjustment space\n";
-                return -1;
-            }
-            i++;
-        }
-        else if (string(argv[i]) == "--conf_thresh")
-        {
-            conf_thresh = static_cast<float>(atof(argv[i + 1]));
-            i++;
-        }
-        else if (string(argv[i]) == "--wave_correct")
-        {
-            if (string(argv[i + 1]) == "no")
-                wave_correct = false;
-            else if (string(argv[i + 1]) == "yes")
-                wave_correct = true;
-            else
-            {
-                cout << "Bad --wave_correct flag value\n";
-                return -1;
-            }
-            i++;
-        }
-        else if (string(argv[i]) == "--save_graph")
-        {
-            save_graph = true;
-            save_graph_to = argv[i + 1];
-            i++;
-        }
-        else if (string(argv[i]) == "--warp")
-        {
-            if (string(argv[i + 1]) == "plane")
-                warp_type = Warper::PLANE;
-            else if (string(argv[i + 1]) == "cylindrical")
-                warp_type = Warper::CYLINDRICAL;
-            else if (string(argv[i + 1]) == "spherical")
-                warp_type = Warper::SPHERICAL;
-            else
-            {
-                cout << "Bad warping method\n";
-                return -1;
-            }
-            i++;
-        }
-        else if (string(argv[i]) == "--expos_comp")
-        {
-            if (string(argv[i + 1]) == "no")
-                expos_comp_type = ExposureCompensator::NO;
-            else if (string(argv[i + 1]) == "gain")
-                expos_comp_type = ExposureCompensator::GAIN;
-            else if (string(argv[i + 1]) == "gain_blocks")
-                expos_comp_type = ExposureCompensator::GAIN_BLOCKS;
-            else
-            {
-                cout << "Bad exposure compensation method\n";
-                return -1;
-            }
-            i++;
-        }
-        else if (string(argv[i]) == "--seam")
-        {
-            if (string(argv[i + 1]) == "no")
-                seam_find_type = SeamFinder::NO;
-            else if (string(argv[i + 1]) == "voronoi")
-                seam_find_type = SeamFinder::VORONOI;
-            else if (string(argv[i + 1]) == "gc_color")
-                seam_find_type = SeamFinder::GC_COLOR;
-            else if (string(argv[i + 1]) == "gc_colorgrad")
-                seam_find_type = SeamFinder::GC_COLOR_GRAD;
-            else
-            {
-                cout << "Bad seam finding method\n";
-                return -1;
-            }
-            i++;
-        }
-        else if (string(argv[i]) == "--blend")
+        else
         {
-            if (string(argv[i + 1]) == "no")
-                blend_type = Blender::NO;
-            else if (string(argv[i + 1]) == "feather")
-                blend_type = Blender::FEATHER;
-            else if (string(argv[i + 1]) == "multiband")
-                blend_type = Blender::MULTI_BAND;
-            else
+            Mat img = imread(argv[i]);
+            if (img.empty())
             {
-                cout << "Bad blending method\n";
+                cout << "Can't read image '" << argv[i] << "'\n";
                 return -1;
             }
-            i++;
-        }
-        else if (string(argv[i]) == "--blend_strength")
-        {
-            blend_strength = static_cast<float>(atof(argv[i + 1]));
-            i++;
+            imgs.push_back(img);
         }
-        else if (string(argv[i]) == "--output")
-        {
-            result_name = argv[i + 1];
-            i++;
-        }
-        else
-            img_names.push_back(argv[i]);
-    }
-    if (preview)
-    {
-        compose_megapix = 0.6;
     }
     return 0;
 }
@@ -313,314 +122,20 @@ int parseCmdArgs(int argc, char** argv)
 
 int main(int argc, char* argv[])
 {
-    int64 app_start_time = getTickCount();
-    cv::setBreakOnError(true);
-
     int retval = parseCmdArgs(argc, argv);
-    if (retval)
-        return retval;
-
-    // Check if have enough images
-    int num_images = static_cast<int>(img_names.size());
-    if (num_images < 2)
-    {
-        LOGLN("Need more images");
-        return -1;
-    }
-
-    double work_scale = 1, seam_scale = 1, compose_scale = 1;
-    bool is_work_scale_set = false, is_seam_scale_set = false, is_compose_scale_set = false;
-
-    LOGLN("Finding features...");
-    int64 t = getTickCount();
-
-    vector<ImageFeatures> features(num_images);
-    SurfFeaturesFinder finder(try_gpu);
-    Mat full_img, img;
+    if (retval) return -1;
 
-    vector<Mat> images(num_images);
-    vector<Size> full_img_sizes(num_images);
-    double seam_work_aspect = 1;
+    Mat pano;
+    Stitcher stitcher = Stitcher::createDefault(try_use_gpu);
+    Stitcher::Status status = stitcher.stitch(imgs, pano);
 
-    for (int i = 0; i < num_images; ++i)
+    if (status != Stitcher::OK)
     {
-        full_img = imread(img_names[i]);
-        full_img_sizes[i] = full_img.size();
-
-        if (full_img.empty())
-        {
-            LOGLN("Can't open image " << img_names[i]);
-            return -1;
-        }
-        if (work_megapix < 0)
-        {
-            img = full_img;
-            work_scale = 1;
-            is_work_scale_set = true;
-        }
-        else
-        {
-            if (!is_work_scale_set)
-            {
-                work_scale = min(1.0, sqrt(work_megapix * 1e6 / full_img.size().area()));
-                is_work_scale_set = true;
-            }
-            resize(full_img, img, Size(), work_scale, work_scale);
-        }
-        if (!is_seam_scale_set)
-        {
-            seam_scale = min(1.0, sqrt(seam_megapix * 1e6 / full_img.size().area()));
-            seam_work_aspect = seam_scale / work_scale;
-            is_seam_scale_set = true;
-        }
-
-        finder(img, features[i]);
-        features[i].img_idx = i;
-        LOGLN("Features in image #" << i+1 << ": " << features[i].keypoints.size());
-
-        resize(full_img, img, Size(), seam_scale, seam_scale);
-        images[i] = img.clone();
-    }
-
-    finder.releaseMemory();
-
-    full_img.release();
-    img.release();
-
-    LOGLN("Finding features, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
-
-    LOG("Pairwise matching");
-    t = getTickCount();
-    vector<MatchesInfo> pairwise_matches;
-    BestOf2NearestMatcher matcher(try_gpu, match_conf);
-    matcher(features, pairwise_matches);
-    matcher.releaseMemory();
-    LOGLN("Pairwise matching, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
-
-    // Check if we should save matches graph
-    if (save_graph)
-    {
-        LOGLN("Saving matches graph...");
-        ofstream f(save_graph_to.c_str());
-        f << matchesGraphAsString(img_names, pairwise_matches, conf_thresh);
-    }
-
-    // Leave only images we are sure are from the same panorama
-    vector<int> indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh);
-    vector<Mat> img_subset;
-    vector<string> img_names_subset;
-    vector<Size> full_img_sizes_subset;
-    for (size_t i = 0; i < indices.size(); ++i)
-    {
-        img_names_subset.push_back(img_names[indices[i]]);
-        img_subset.push_back(images[indices[i]]);
-        full_img_sizes_subset.push_back(full_img_sizes[indices[i]]);
-    }
-
-    images = img_subset;
-    img_names = img_names_subset;
-    full_img_sizes = full_img_sizes_subset;
-
-    // Check if we still have enough images
-    num_images = static_cast<int>(img_names.size());
-    if (num_images < 2)
-    {
-        LOGLN("Need more images");
+        cout << "Can't stitch images, error code = " << status << endl;
         return -1;
     }
 
-    HomographyBasedEstimator estimator;
-    vector<CameraParams> cameras;
-    estimator(features, pairwise_matches, cameras);
-
-    for (size_t i = 0; i < cameras.size(); ++i)
-    {
-        Mat R;
-        cameras[i].R.convertTo(R, CV_32F);
-        cameras[i].R = R;
-        LOGLN("Initial focal length #" << indices[i]+1 << ": " << cameras[i].focal);
-    }
-
-    BundleAdjuster adjuster(ba_space, conf_thresh);
-    adjuster(features, pairwise_matches, cameras);
-
-    // Find median focal length
-    vector<double> focals;
-    for (size_t i = 0; i < cameras.size(); ++i)
-    {
-        LOGLN("Camera #" << indices[i]+1 << " focal length: " << cameras[i].focal);
-        focals.push_back(cameras[i].focal);
-    }
-    nth_element(focals.begin(), focals.begin() + focals.size()/2, focals.end());
-    float warped_image_scale = static_cast<float>(focals[focals.size() / 2]);
-
-    if (wave_correct)
-    {
-        vector<Mat> rmats;
-        for (size_t i = 0; i < cameras.size(); ++i)
-            rmats.push_back(cameras[i].R);
-        waveCorrect(rmats);
-        for (size_t i = 0; i < cameras.size(); ++i)
-            cameras[i].R = rmats[i];
-    }
-
-    LOGLN("Warping images (auxiliary)... ");
-    t = getTickCount();
-
-    vector<Point> corners(num_images);
-    vector<Mat> masks_warped(num_images);
-    vector<Mat> images_warped(num_images);
-    vector<Size> sizes(num_images);
-    vector<Mat> masks(num_images);
-
-    // Preapre images masks
-    for (int i = 0; i < num_images; ++i)
-    {
-        masks[i].create(images[i].size(), CV_8U);
-        masks[i].setTo(Scalar::all(255));
-    }
-
-    // Warp images and their masks
-    Ptr<Warper> warper = Warper::createByCameraFocal(static_cast<float>(warped_image_scale * seam_work_aspect),
-                                                     warp_type, try_gpu);
-    for (int i = 0; i < num_images; ++i)
-    {
-        corners[i] = warper->warp(images[i], static_cast<float>(cameras[i].focal * seam_work_aspect),
-                                  cameras[i].R, images_warped[i]);
-        sizes[i] = images_warped[i].size();
-        warper->warp(masks[i], static_cast<float>(cameras[i].focal * seam_work_aspect),
-                     cameras[i].R, masks_warped[i], INTER_NEAREST, BORDER_CONSTANT);
-    }
-
-    vector<Mat> images_warped_f(num_images);
-    for (int i = 0; i < num_images; ++i)
-        images_warped[i].convertTo(images_warped_f[i], CV_32F);
-
-    LOGLN("Warping images, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
-
-    Ptr<ExposureCompensator> compensator = ExposureCompensator::createDefault(expos_comp_type);
-    compensator->feed(corners, images_warped, masks_warped);
-
-    Ptr<SeamFinder> seam_finder = SeamFinder::createDefault(seam_find_type);
-    seam_finder->find(images_warped_f, corners, masks_warped);
-
-    // Release unused memory
-    images.clear();
-    images_warped.clear();
-    images_warped_f.clear();
-    masks.clear();
-
-    LOGLN("Compositing...");
-    t = getTickCount();
-
-    Mat img_warped, img_warped_s;
-    Mat dilated_mask, seam_mask, mask, mask_warped;
-    Ptr<Blender> blender;
-    double compose_seam_aspect = 1;
-    double compose_work_aspect = 1;
-
-    for (int img_idx = 0; img_idx < num_images; ++img_idx)
-    {
-        LOGLN("Compositing image #" << indices[img_idx]+1);
-
-        // Read image and resize it if necessary
-        full_img = imread(img_names[img_idx]);
-        if (!is_compose_scale_set)
-        {
-            if (compose_megapix > 0)
-                compose_scale = min(1.0, sqrt(compose_megapix * 1e6 / full_img.size().area()));
-            is_compose_scale_set = true;
-
-            // Compute relative scales
-            compose_seam_aspect = compose_scale / seam_scale;
-            compose_work_aspect = compose_scale / work_scale;
-
-            // Update warped image scale
-            warped_image_scale *= static_cast<float>(compose_work_aspect);
-            warper = Warper::createByCameraFocal(warped_image_scale, warp_type, try_gpu);
-
-            // Update corners and sizes
-            for (int i = 0; i < num_images; ++i)
-            {
-                // Update camera focal
-                cameras[i].focal *= compose_work_aspect;
-
-                // Update corner and size
-                Size sz = full_img_sizes[i];
-                if (abs(compose_scale - 1) > 1e-1)
-                {
-                    sz.width = cvRound(full_img_sizes[i].width * compose_scale);
-                    sz.height = cvRound(full_img_sizes[i].height * compose_scale);
-                }
-
-                Rect roi = warper->warpRoi(sz, static_cast<float>(cameras[i].focal), cameras[i].R);
-                corners[i] = roi.tl();
-                sizes[i] = roi.size();
-            }
-        }
-        if (abs(compose_scale - 1) > 1e-1)
-            resize(full_img, img, Size(), compose_scale, compose_scale);
-        else
-            img = full_img;
-        full_img.release();
-        Size img_size = img.size();
-
-        // Warp the current image
-        warper->warp(img, static_cast<float>(cameras[img_idx].focal), cameras[img_idx].R,
-                     img_warped);
-
-        // Warp the current image mask
-        mask.create(img_size, CV_8U);
-        mask.setTo(Scalar::all(255));
-        warper->warp(mask, static_cast<float>(cameras[img_idx].focal), cameras[img_idx].R, mask_warped,
-                     INTER_NEAREST, BORDER_CONSTANT);
-
-        // Compensate exposure
-        compensator->apply(img_idx, corners[img_idx], img_warped, mask_warped);
-
-        img_warped.convertTo(img_warped_s, CV_16S);
-        img_warped.release();
-        img.release();
-        mask.release();
-
-        dilate(masks_warped[img_idx], dilated_mask, Mat());
-        resize(dilated_mask, seam_mask, mask_warped.size());
-        mask_warped = seam_mask & mask_warped;
-
-        if (blender.empty())
-        {
-            blender = Blender::createDefault(blend_type, try_gpu);
-            Size dst_sz = resultRoi(corners, sizes).size();
-            float blend_width = sqrt(static_cast<float>(dst_sz.area())) * blend_strength / 100.f;
-            if (blend_width < 1.f)
-                blender = Blender::createDefault(Blender::NO, try_gpu);
-            else if (blend_type == Blender::MULTI_BAND)
-            {
-                MultiBandBlender* mb = dynamic_cast<MultiBandBlender*>(static_cast<Blender*>(blender));
-                mb->setNumBands(static_cast<int>(ceil(log(blend_width)/log(2.)) - 1.));
-                LOGLN("Multi-band blender, number of bands: " << mb->numBands());
-            }
-            else if (blend_type == Blender::FEATHER)
-            {
-                FeatherBlender* fb = dynamic_cast<FeatherBlender*>(static_cast<Blender*>(blender));
-                fb->setSharpness(1.f/blend_width);
-                LOGLN("Feather blender, sharpness: " << fb->sharpness());
-            }
-            blender->prepare(corners, sizes);
-        }
-
-        // Blend the current image
-        blender->feed(img_warped_s, mask_warped, corners[img_idx]);
-    }
-
-    Mat result, result_mask;
-    blender->blend(result, result_mask);
-
-    LOGLN("Compositing, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
-
-    imwrite(result_name, result);
-
-    LOGLN("Finished, total time: " << ((getTickCount() - app_start_time) / getTickFrequency()) << " sec");
+    imwrite(result_name, pano);
     return 0;
 }
 
diff --git a/samples/cpp/stitching_detailed.cpp b/samples/cpp/stitching_detailed.cpp
new file mode 100644 (file)
index 0000000..15d82db
--- /dev/null
@@ -0,0 +1,627 @@
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+//  By downloading, copying, installing or using the software you agree to this license.
+//  If you do not agree to this license, do not download, install,
+//  copy or use the software.
+//
+//
+//                          License Agreement
+//                For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+//   * Redistribution's of source code must retain the above copyright notice,
+//     this list of conditions and the following disclaimer.
+//
+//   * Redistribution's in binary form must reproduce the above copyright notice,
+//     this list of conditions and the following disclaimer in the documentation
+//     and/or other materials provided with the distribution.
+//
+//   * The name of the copyright holders may not be used to endorse or promote products
+//     derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+// We follow to these papers:
+// 1) Construction of panoramic mosaics with global and local alignment.
+//    Heung-Yeung Shum and Richard Szeliski. 2000.
+// 2) Eliminating Ghosting and Exposure Artifacts in Image Mosaics.
+//    Matthew Uyttendaele, Ashley Eden and Richard Szeliski. 2001.
+// 3) Automatic Panoramic Image Stitching using Invariant Features.
+//    Matthew Brown and David G. Lowe. 2007.
+
+#include <fstream>
+#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/stitching/detail/autocalib.hpp"
+#include "opencv2/stitching/detail/blenders.hpp"
+#include "opencv2/stitching/detail/camera.hpp"
+#include "opencv2/stitching/detail/exposure_compensate.hpp"
+#include "opencv2/stitching/detail/matchers.hpp"
+#include "opencv2/stitching/detail/motion_estimators.hpp"
+#include "opencv2/stitching/detail/seam_finders.hpp"
+#include "opencv2/stitching/detail/util.hpp"
+#include "opencv2/stitching/detail/warpers.hpp"
+
+using namespace std;
+using namespace cv;
+using namespace cv::detail;
+
+void printUsage()
+{
+    cout <<
+        "Rotation model images stitcher.\n\n"
+        "stitching_detailed img1 img2 [...imgN] [flags]\n\n"
+        "Flags:\n"
+        "  --preview\n"
+        "      Run stitching in the preview mode. Works faster than usual mode,\n"
+        "      but output image will have lower resolution.\n"
+        "  --try_gpu (yes|no)\n"
+        "      Try to use GPU. The default value is 'no'. All default values\n"
+        "      are for CPU mode.\n"
+        "\nMotion Estimation Flags:\n"
+        "  --work_megapix <float>\n"
+        "      Resolution for image registration step. The default is 0.6 Mpx.\n"
+        "  --match_conf <float>\n"
+        "      Confidence for feature matching step. The default is 0.65.\n"
+        "  --conf_thresh <float>\n"
+        "      Threshold for two images are from the same panorama confidence.\n"
+        "      The default is 1.0.\n"
+        "  --ba (no|ray|focal_ray)\n"
+        "      Bundle adjustment cost function. The default is 'focal_ray'.\n"
+        "  --wave_correct (no|yes)\n"
+        "      Perform wave effect correction. The default is 'yes'.\n"
+        "  --save_graph <file_name>\n"
+        "      Save matches graph represented in DOT language to <file_name> file.\n"
+        "      Labels description: Nm is number of matches, Ni is number of inliers,\n"
+        "      C is confidence.\n"
+        "\nCompositing Flags:\n"
+        "  --warp (plane|cylindrical|spherical)\n"
+        "      Warp surface type. The default is 'spherical'.\n"
+        "  --seam_megapix <float>\n"
+        "      Resolution for seam estimation step. The default is 0.1 Mpx.\n"
+        "  --seam (no|voronoi|gc_color|gc_colorgrad)\n"
+        "      Seam estimation method. The default is 'gc_color'.\n"
+        "  --compose_megapix <float>\n"
+        "      Resolution for compositing step. Use -1 for original resolution.\n"
+        "      The default is -1.\n"
+        "  --expos_comp (no|gain|gain_blocks)\n"
+        "      Exposure compensation method. The default is 'gain_blocks'.\n"
+        "  --blend (no|feather|multiband)\n"
+        "      Blending method. The default is 'multiband'.\n"
+        "  --blend_strength <float>\n"
+        "      Blending strength from [0,100] range. The default is 5.\n"
+        "  --output <result_img>\n"
+        "      The default is 'result.jpg'.\n";
+}
+
+
+// Default command line args
+vector<string> img_names;
+bool preview = false;
+bool try_gpu = false;
+double work_megapix = 0.6;
+double seam_megapix = 0.1;
+double compose_megapix = -1;
+int ba_space = BundleAdjuster::FOCAL_RAY_SPACE;
+float conf_thresh = 1.f;
+bool wave_correct = true;
+bool save_graph = false;
+std::string save_graph_to;
+int warp_type = Warper::SPHERICAL;
+int expos_comp_type = ExposureCompensator::GAIN_BLOCKS;
+float match_conf = 0.65f;
+int seam_find_type = SeamFinder::GC_COLOR;
+int blend_type = Blender::MULTI_BAND;
+float blend_strength = 5;
+string result_name = "result.jpg";
+
+int parseCmdArgs(int argc, char** argv)
+{
+    if (argc == 1)
+    {
+        printUsage();
+        return -1;
+    }
+    for (int i = 1; i < argc; ++i)
+    {
+        if (string(argv[i]) == "--help" || string(argv[i]) == "/?")
+        {
+            printUsage();
+            return -1;
+        }
+        else if (string(argv[i]) == "--preview")
+        {
+            preview = true;
+        }
+        else if (string(argv[i]) == "--try_gpu")
+        {
+            if (string(argv[i + 1]) == "no")
+                try_gpu = false;
+            else if (string(argv[i + 1]) == "yes")
+                try_gpu = true;
+            else
+            {
+                cout << "Bad --try_gpu flag value\n";
+                return -1;
+            }
+            i++;
+        }
+        else if (string(argv[i]) == "--work_megapix")
+        {
+            work_megapix = atof(argv[i + 1]);
+            i++;
+        }
+        else if (string(argv[i]) == "--seam_megapix")
+        {
+            seam_megapix = atof(argv[i + 1]);
+            i++;
+        }
+        else if (string(argv[i]) == "--compose_megapix")
+        {
+            compose_megapix = atof(argv[i + 1]);
+            i++;
+        }
+        else if (string(argv[i]) == "--result")
+        {
+            result_name = argv[i + 1];
+            i++;
+        }
+        else if (string(argv[i]) == "--match_conf")
+        {
+            match_conf = static_cast<float>(atof(argv[i + 1]));
+            i++;
+        }
+        else if (string(argv[i]) == "--ba")
+        {
+            if (string(argv[i + 1]) == "no")
+                ba_space = BundleAdjuster::NO;
+            else if (string(argv[i + 1]) == "ray")
+                ba_space = BundleAdjuster::RAY_SPACE;
+            else if (string(argv[i + 1]) == "focal_ray")
+                ba_space = BundleAdjuster::FOCAL_RAY_SPACE;
+            else
+            {
+                cout << "Bad bundle adjustment space\n";
+                return -1;
+            }
+            i++;
+        }
+        else if (string(argv[i]) == "--conf_thresh")
+        {
+            conf_thresh = static_cast<float>(atof(argv[i + 1]));
+            i++;
+        }
+        else if (string(argv[i]) == "--wave_correct")
+        {
+            if (string(argv[i + 1]) == "no")
+                wave_correct = false;
+            else if (string(argv[i + 1]) == "yes")
+                wave_correct = true;
+            else
+            {
+                cout << "Bad --wave_correct flag value\n";
+                return -1;
+            }
+            i++;
+        }
+        else if (string(argv[i]) == "--save_graph")
+        {
+            save_graph = true;
+            save_graph_to = argv[i + 1];
+            i++;
+        }
+        else if (string(argv[i]) == "--warp")
+        {
+            if (string(argv[i + 1]) == "plane")
+                warp_type = Warper::PLANE;
+            else if (string(argv[i + 1]) == "cylindrical")
+                warp_type = Warper::CYLINDRICAL;
+            else if (string(argv[i + 1]) == "spherical")
+                warp_type = Warper::SPHERICAL;
+            else
+            {
+                cout << "Bad warping method\n";
+                return -1;
+            }
+            i++;
+        }
+        else if (string(argv[i]) == "--expos_comp")
+        {
+            if (string(argv[i + 1]) == "no")
+                expos_comp_type = ExposureCompensator::NO;
+            else if (string(argv[i + 1]) == "gain")
+                expos_comp_type = ExposureCompensator::GAIN;
+            else if (string(argv[i + 1]) == "gain_blocks")
+                expos_comp_type = ExposureCompensator::GAIN_BLOCKS;
+            else
+            {
+                cout << "Bad exposure compensation method\n";
+                return -1;
+            }
+            i++;
+        }
+        else if (string(argv[i]) == "--seam")
+        {
+            if (string(argv[i + 1]) == "no")
+                seam_find_type = SeamFinder::NO;
+            else if (string(argv[i + 1]) == "voronoi")
+                seam_find_type = SeamFinder::VORONOI;
+            else if (string(argv[i + 1]) == "gc_color")
+                seam_find_type = SeamFinder::GC_COLOR;
+            else if (string(argv[i + 1]) == "gc_colorgrad")
+                seam_find_type = SeamFinder::GC_COLOR_GRAD;
+            else
+            {
+                cout << "Bad seam finding method\n";
+                return -1;
+            }
+            i++;
+        }
+        else if (string(argv[i]) == "--blend")
+        {
+            if (string(argv[i + 1]) == "no")
+                blend_type = Blender::NO;
+            else if (string(argv[i + 1]) == "feather")
+                blend_type = Blender::FEATHER;
+            else if (string(argv[i + 1]) == "multiband")
+                blend_type = Blender::MULTI_BAND;
+            else
+            {
+                cout << "Bad blending method\n";
+                return -1;
+            }
+            i++;
+        }
+        else if (string(argv[i]) == "--blend_strength")
+        {
+            blend_strength = static_cast<float>(atof(argv[i + 1]));
+            i++;
+        }
+        else if (string(argv[i]) == "--output")
+        {
+            result_name = argv[i + 1];
+            i++;
+        }
+        else
+            img_names.push_back(argv[i]);
+    }
+    if (preview)
+    {
+        compose_megapix = 0.6;
+    }
+    return 0;
+}
+
+
+int main(int argc, char* argv[])
+{
+    int64 app_start_time = getTickCount();
+    cv::setBreakOnError(true);
+
+    int retval = parseCmdArgs(argc, argv);
+    if (retval)
+        return retval;
+
+    // Check if have enough images
+    int num_images = static_cast<int>(img_names.size());
+    if (num_images < 2)
+    {
+        LOGLN("Need more images");
+        return -1;
+    }
+
+    double work_scale = 1, seam_scale = 1, compose_scale = 1;
+    bool is_work_scale_set = false, is_seam_scale_set = false, is_compose_scale_set = false;
+
+    LOGLN("Finding features...");
+    int64 t = getTickCount();
+
+    vector<ImageFeatures> features(num_images);
+    SurfFeaturesFinder finder(try_gpu);
+    Mat full_img, img;
+
+    vector<Mat> images(num_images);
+    vector<Size> full_img_sizes(num_images);
+    double seam_work_aspect = 1;
+
+    for (int i = 0; i < num_images; ++i)
+    {
+        full_img = imread(img_names[i]);
+        full_img_sizes[i] = full_img.size();
+
+        if (full_img.empty())
+        {
+            LOGLN("Can't open image " << img_names[i]);
+            return -1;
+        }
+        if (work_megapix < 0)
+        {
+            img = full_img;
+            work_scale = 1;
+            is_work_scale_set = true;
+        }
+        else
+        {
+            if (!is_work_scale_set)
+            {
+                work_scale = min(1.0, sqrt(work_megapix * 1e6 / full_img.size().area()));
+                is_work_scale_set = true;
+            }
+            resize(full_img, img, Size(), work_scale, work_scale);
+        }
+        if (!is_seam_scale_set)
+        {
+            seam_scale = min(1.0, sqrt(seam_megapix * 1e6 / full_img.size().area()));
+            seam_work_aspect = seam_scale / work_scale;
+            is_seam_scale_set = true;
+        }
+
+        finder(img, features[i]);
+        features[i].img_idx = i;
+        LOGLN("Features in image #" << i+1 << ": " << features[i].keypoints.size());
+
+        resize(full_img, img, Size(), seam_scale, seam_scale);
+        images[i] = img.clone();
+    }
+
+    finder.collectGarbage();
+
+    full_img.release();
+    img.release();
+
+    LOGLN("Finding features, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
+
+    LOG("Pairwise matching");
+    t = getTickCount();
+    vector<MatchesInfo> pairwise_matches;
+    BestOf2NearestMatcher matcher(try_gpu, match_conf);
+    matcher(features, pairwise_matches);
+    matcher.collectGarbage();
+    LOGLN("Pairwise matching, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
+
+    // Check if we should save matches graph
+    if (save_graph)
+    {
+        LOGLN("Saving matches graph...");
+        ofstream f(save_graph_to.c_str());
+        f << matchesGraphAsString(img_names, pairwise_matches, conf_thresh);
+    }
+
+    // Leave only images we are sure are from the same panorama
+    vector<int> indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh);
+    vector<Mat> img_subset;
+    vector<string> img_names_subset;
+    vector<Size> full_img_sizes_subset;
+    for (size_t i = 0; i < indices.size(); ++i)
+    {
+        img_names_subset.push_back(img_names[indices[i]]);
+        img_subset.push_back(images[indices[i]]);
+        full_img_sizes_subset.push_back(full_img_sizes[indices[i]]);
+    }
+
+    images = img_subset;
+    img_names = img_names_subset;
+    full_img_sizes = full_img_sizes_subset;
+
+    // Check if we still have enough images
+    num_images = static_cast<int>(img_names.size());
+    if (num_images < 2)
+    {
+        LOGLN("Need more images");
+        return -1;
+    }
+
+    HomographyBasedEstimator estimator;
+    vector<CameraParams> cameras;
+    estimator(features, pairwise_matches, cameras);
+
+    for (size_t i = 0; i < cameras.size(); ++i)
+    {
+        Mat R;
+        cameras[i].R.convertTo(R, CV_32F);
+        cameras[i].R = R;
+        LOGLN("Initial focal length #" << indices[i]+1 << ": " << cameras[i].focal);
+    }
+
+    BundleAdjuster adjuster(ba_space, conf_thresh);
+    adjuster(features, pairwise_matches, cameras);
+
+    // Find median focal length
+    vector<double> focals;
+    for (size_t i = 0; i < cameras.size(); ++i)
+    {
+        LOGLN("Camera #" << indices[i]+1 << " focal length: " << cameras[i].focal);
+        focals.push_back(cameras[i].focal);
+    }
+    nth_element(focals.begin(), focals.begin() + focals.size()/2, focals.end());
+    float warped_image_scale = static_cast<float>(focals[focals.size() / 2]);
+
+    if (wave_correct)
+    {
+        vector<Mat> rmats;
+        for (size_t i = 0; i < cameras.size(); ++i)
+            rmats.push_back(cameras[i].R);
+        waveCorrect(rmats);
+        for (size_t i = 0; i < cameras.size(); ++i)
+            cameras[i].R = rmats[i];
+    }
+
+    LOGLN("Warping images (auxiliary)... ");
+    t = getTickCount();
+
+    vector<Point> corners(num_images);
+    vector<Mat> masks_warped(num_images);
+    vector<Mat> images_warped(num_images);
+    vector<Size> sizes(num_images);
+    vector<Mat> masks(num_images);
+
+    // Preapre images masks
+    for (int i = 0; i < num_images; ++i)
+    {
+        masks[i].create(images[i].size(), CV_8U);
+        masks[i].setTo(Scalar::all(255));
+    }
+
+    // Warp images and their masks
+    Ptr<Warper> warper = Warper::createByCameraFocal(static_cast<float>(warped_image_scale * seam_work_aspect),
+                                                     warp_type, try_gpu);
+    for (int i = 0; i < num_images; ++i)
+    {
+        corners[i] = warper->warp(images[i], static_cast<float>(cameras[i].focal * seam_work_aspect),
+                                  cameras[i].R, images_warped[i]);
+        sizes[i] = images_warped[i].size();
+        warper->warp(masks[i], static_cast<float>(cameras[i].focal * seam_work_aspect),
+                     cameras[i].R, masks_warped[i], INTER_NEAREST, BORDER_CONSTANT);
+    }
+
+    vector<Mat> images_warped_f(num_images);
+    for (int i = 0; i < num_images; ++i)
+        images_warped[i].convertTo(images_warped_f[i], CV_32F);
+
+    LOGLN("Warping images, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
+
+    Ptr<ExposureCompensator> compensator = ExposureCompensator::createDefault(expos_comp_type);
+    compensator->feed(corners, images_warped, masks_warped);
+
+    Ptr<SeamFinder> seam_finder = SeamFinder::createDefault(seam_find_type);
+    seam_finder->find(images_warped_f, corners, masks_warped);
+
+    // Release unused memory
+    images.clear();
+    images_warped.clear();
+    images_warped_f.clear();
+    masks.clear();
+
+    LOGLN("Compositing...");
+    t = getTickCount();
+
+    Mat img_warped, img_warped_s;
+    Mat dilated_mask, seam_mask, mask, mask_warped;
+    Ptr<Blender> blender;
+    double compose_seam_aspect = 1;
+    double compose_work_aspect = 1;
+
+    for (int img_idx = 0; img_idx < num_images; ++img_idx)
+    {
+        LOGLN("Compositing image #" << indices[img_idx]+1);
+
+        // Read image and resize it if necessary
+        full_img = imread(img_names[img_idx]);
+        if (!is_compose_scale_set)
+        {
+            if (compose_megapix > 0)
+                compose_scale = min(1.0, sqrt(compose_megapix * 1e6 / full_img.size().area()));
+            is_compose_scale_set = true;
+
+            // Compute relative scales
+            compose_seam_aspect = compose_scale / seam_scale;
+            compose_work_aspect = compose_scale / work_scale;
+
+            // Update warped image scale
+            warped_image_scale *= static_cast<float>(compose_work_aspect);
+            warper = Warper::createByCameraFocal(warped_image_scale, warp_type, try_gpu);
+
+            // Update corners and sizes
+            for (int i = 0; i < num_images; ++i)
+            {
+                // Update camera focal
+                cameras[i].focal *= compose_work_aspect;
+
+                // Update corner and size
+                Size sz = full_img_sizes[i];
+                if (abs(compose_scale - 1) > 1e-1)
+                {
+                    sz.width = cvRound(full_img_sizes[i].width * compose_scale);
+                    sz.height = cvRound(full_img_sizes[i].height * compose_scale);
+                }
+
+                Rect roi = warper->warpRoi(sz, static_cast<float>(cameras[i].focal), cameras[i].R);
+                corners[i] = roi.tl();
+                sizes[i] = roi.size();
+            }
+        }
+        if (abs(compose_scale - 1) > 1e-1)
+            resize(full_img, img, Size(), compose_scale, compose_scale);
+        else
+            img = full_img;
+        full_img.release();
+        Size img_size = img.size();
+
+        // Warp the current image
+        warper->warp(img, static_cast<float>(cameras[img_idx].focal), cameras[img_idx].R,
+                     img_warped);
+
+        // Warp the current image mask
+        mask.create(img_size, CV_8U);
+        mask.setTo(Scalar::all(255));
+        warper->warp(mask, static_cast<float>(cameras[img_idx].focal), cameras[img_idx].R, mask_warped,
+                     INTER_NEAREST, BORDER_CONSTANT);
+
+        // Compensate exposure
+        compensator->apply(img_idx, corners[img_idx], img_warped, mask_warped);
+
+        img_warped.convertTo(img_warped_s, CV_16S);
+        img_warped.release();
+        img.release();
+        mask.release();
+
+        dilate(masks_warped[img_idx], dilated_mask, Mat());
+        resize(dilated_mask, seam_mask, mask_warped.size());
+        mask_warped = seam_mask & mask_warped;
+
+        if (blender.empty())
+        {
+            blender = Blender::createDefault(blend_type, try_gpu);
+            Size dst_sz = resultRoi(corners, sizes).size();
+            float blend_width = sqrt(static_cast<float>(dst_sz.area())) * blend_strength / 100.f;
+            if (blend_width < 1.f)
+                blender = Blender::createDefault(Blender::NO, try_gpu);
+            else if (blend_type == Blender::MULTI_BAND)
+            {
+                MultiBandBlender* mb = dynamic_cast<MultiBandBlender*>(static_cast<Blender*>(blender));
+                mb->setNumBands(static_cast<int>(ceil(log(blend_width)/log(2.)) - 1.));
+                LOGLN("Multi-band blender, number of bands: " << mb->numBands());
+            }
+            else if (blend_type == Blender::FEATHER)
+            {
+                FeatherBlender* fb = dynamic_cast<FeatherBlender*>(static_cast<Blender*>(blender));
+                fb->setSharpness(1.f/blend_width);
+                LOGLN("Feather blender, sharpness: " << fb->sharpness());
+            }
+            blender->prepare(corners, sizes);
+        }
+
+        // Blend the current image
+        blender->feed(img_warped_s, mask_warped, corners[img_idx]);
+    }
+
+    Mat result, result_mask;
+    blender->blend(result, result_mask);
+
+    LOGLN("Compositing, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
+
+    imwrite(result_name, result);
+
+    LOGLN("Finished, total time: " << ((getTickCount() - app_start_time) / getTickFrequency()) << " sec");
+    return 0;
+}
+
+