fixed bug in opencv_stitching (corrected resize images step), added matches checking...
authorAlexey Spizhevoy <no@email>
Thu, 9 Jun 2011 10:16:10 +0000 (10:16 +0000)
committerAlexey Spizhevoy <no@email>
Thu, 9 Jun 2011 10:16:10 +0000 (10:16 +0000)
modules/stitching/blenders.cpp
modules/stitching/main.cpp
modules/stitching/matchers.cpp
modules/stitching/motion_estimators.cpp
modules/stitching/seam_finders.cpp
modules/stitching/warpers.hpp
modules/stitching/warpers_inl.hpp
samples/gpu/driver_api_stereo_multi.cpp

index fe185a1..5797245 100644 (file)
 // or tort (including negligence or otherwise) arising in any way out of\r
 // the use of this software, even if advised of the possibility of such damage.\r
 //\r
-//M*/
-#include "blenders.hpp"
-#include "util.hpp"
-
-using namespace std;
-using namespace cv;
-
-static const float WEIGHT_EPS = 1e-5f;
-
-Ptr<Blender> Blender::createDefault(int type)
-{
-    if (type == NO)
-        return new Blender();
-    if (type == FEATHER)
-        return new FeatherBlender();
-    if (type == MULTI_BAND)
-        return new MultiBandBlender();
-    CV_Error(CV_StsBadArg, "unsupported blending method");
-    return NULL;
-}
-
-
-void Blender::prepare(const vector<Point> &corners, const vector<Size> &sizes)
-{
-    prepare(resultRoi(corners, sizes));
-}
-
-
-void Blender::prepare(Rect dst_roi)
-{
-    dst_.create(dst_roi.size(), CV_16SC3);
-    dst_.setTo(Scalar::all(0));
-    dst_mask_.create(dst_roi.size(), CV_8U);
-    dst_mask_.setTo(Scalar::all(0));
-    dst_roi_ = dst_roi;
-}
-
-
-void Blender::feed(const Mat &img, const Mat &mask, Point tl) 
-{
-    CV_Assert(img.type() == CV_16SC3);
-    CV_Assert(mask.type() == CV_8U);
-    int dx = tl.x - dst_roi_.x;
-    int dy = tl.y - dst_roi_.y;
-
-    for (int y = 0; y < img.rows; ++y)
-    {
-        const Point3_<short> *src_row = img.ptr<Point3_<short> >(y);
-        Point3_<short> *dst_row = dst_.ptr<Point3_<short> >(dy + y);
-        const uchar *mask_row = mask.ptr<uchar>(y);
-        uchar *dst_mask_row = dst_mask_.ptr<uchar>(dy + y);
-
-        for (int x = 0; x < img.cols; ++x)
-        {
-            if (mask_row[x]) 
-                dst_row[dx + x] = src_row[x];
-            dst_mask_row[dx + x] |= mask_row[x];
-        }
-    }
-}
-
-
-void Blender::blend(Mat &dst, Mat &dst_mask)
-{
-    dst_.setTo(Scalar::all(0), dst_mask_ == 0);
-    dst = dst_;
-    dst_mask = dst_mask_;
-    dst_.release();
-    dst_mask_.release();
-}
-
-
-void FeatherBlender::prepare(Rect dst_roi)
-{
-    Blender::prepare(dst_roi);
-    dst_weight_map_.create(dst_roi.size(), CV_32F);
-    dst_weight_map_.setTo(0);
-}
-
-
-void FeatherBlender::feed(const Mat &img, const Mat &mask, Point tl)
-{
-    CV_Assert(img.type() == CV_16SC3);
-    CV_Assert(mask.type() == CV_8U);
-
-    createWeightMap(mask, sharpness_, weight_map_);
-    int dx = tl.x - dst_roi_.x;
-    int dy = tl.y - dst_roi_.y;
-
-    for (int y = 0; y < img.rows; ++y)
-    {
-        const Point3_<short>* src_row = img.ptr<Point3_<short> >(y);
-        Point3_<short>* dst_row = dst_.ptr<Point3_<short> >(dy + y);
-        const float* weight_row = weight_map_.ptr<float>(y);
-        float* dst_weight_row = dst_weight_map_.ptr<float>(dy + y);
-
-        for (int x = 0; x < img.cols; ++x)               
-        {
-            dst_row[dx + x].x += static_cast<short>(src_row[x].x * weight_row[x]);
-            dst_row[dx + x].y += static_cast<short>(src_row[x].y * weight_row[x]);
-            dst_row[dx + x].z += static_cast<short>(src_row[x].z * weight_row[x]);
-            dst_weight_row[dx + x] += weight_row[x];
-        }
-    }
-}
-
-
-void FeatherBlender::blend(Mat &dst, Mat &dst_mask)
-{
-    normalize(dst_weight_map_, dst_);
-    dst_mask_ = dst_weight_map_ > WEIGHT_EPS;
-    Blender::blend(dst, dst_mask);
-}
-
-
-void MultiBandBlender::prepare(Rect dst_roi)
-{
-    dst_roi_final_ = dst_roi;
-
-    // Crop unnecessary bands
-    double max_len = static_cast<double>(max(dst_roi.width, dst_roi.height));
-    num_bands_ = min(actual_num_bands_, static_cast<int>(ceil(log(max_len) / log(2.0))));
-
-    // Add border to the final image, to ensure sizes are divided by (1 << num_bands_)
-    dst_roi.width += ((1 << num_bands_) - dst_roi.width % (1 << num_bands_)) % (1 << num_bands_);
-    dst_roi.height += ((1 << num_bands_) - dst_roi.height % (1 << num_bands_)) % (1 << num_bands_);
-
-    Blender::prepare(dst_roi);
-
-    dst_pyr_laplace_.resize(num_bands_ + 1);
-    dst_pyr_laplace_[0] = dst_;
-
-    dst_band_weights_.resize(num_bands_ + 1);
-    dst_band_weights_[0].create(dst_roi.size(), CV_32F);
-    dst_band_weights_[0].setTo(0);
-
-    for (int i = 1; i <= num_bands_; ++i)
-    {
-        dst_pyr_laplace_[i].create((dst_pyr_laplace_[i - 1].rows + 1) / 2, 
-                                   (dst_pyr_laplace_[i - 1].cols + 1) / 2, CV_16SC3);
-        dst_band_weights_[i].create((dst_band_weights_[i - 1].rows + 1) / 2,
-                                    (dst_band_weights_[i - 1].cols + 1) / 2, CV_32F);
-        dst_pyr_laplace_[i].setTo(Scalar::all(0));
-        dst_band_weights_[i].setTo(0);
-    }
-}
-
-
-void MultiBandBlender::feed(const Mat &img, const Mat &mask, Point tl)
-{
-    CV_Assert(img.type() == CV_16SC3);
-    CV_Assert(mask.type() == CV_8U);
-
-    // Keep source image in memory with small border
-    int gap = 3 * (1 << num_bands_);
-    Point tl_new(max(dst_roi_.x, tl.x - gap), 
-                 max(dst_roi_.y, tl.y - gap));
-    Point br_new(min(dst_roi_.br().x, tl.x + img.cols + gap), 
-                 min(dst_roi_.br().y, tl.y + img.rows + gap));
-
-    // Ensure coordinates of top-left, bottom-right corners are divided by (1 << num_bands_). 
-    // After that scale between layers is exactly 2.
-    //
-    // We do it to avoid interpolation problems when keeping sub-images only. There is no such problem when 
-    // image is bordered to have size equal to the final image size, but this is too memory hungry approach.
-    tl_new.x = dst_roi_.x + (((tl_new.x - dst_roi_.x) >> num_bands_) << num_bands_);
-    tl_new.y = dst_roi_.y + (((tl_new.y - dst_roi_.y) >> num_bands_) << num_bands_);
-    int width = br_new.x - tl_new.x;
-    int height = br_new.y - tl_new.y;
-    width += ((1 << num_bands_) - width % (1 << num_bands_)) % (1 << num_bands_);
-    height += ((1 << num_bands_) - height % (1 << num_bands_)) % (1 << num_bands_);
-    br_new.x = tl_new.x + width;
-    br_new.y = tl_new.y + height;
-    int dy = max(br_new.y - dst_roi_.br().y, 0);
-    int dx = max(br_new.x - dst_roi_.br().x, 0);
-    tl_new.x -= dx; br_new.x -= dx;
-    tl_new.y -= dy; br_new.y -= dy;
-
-    int top = tl.y - tl_new.y;
-    int left = tl.x - tl_new.x;
-    int bottom = br_new.y - tl.y - img.rows;
-    int right = br_new.x - tl.x - img.cols;
-
-    // Create the source image Laplacian pyramid
-    vector<Mat> src_pyr_gauss(num_bands_ + 1);
-    copyMakeBorder(img, src_pyr_gauss[0], top, bottom, left, right, 
-                   BORDER_REFLECT);
-    for (int i = 0; i < num_bands_; ++i)
-        pyrDown(src_pyr_gauss[i], src_pyr_gauss[i + 1]);
-    vector<Mat> src_pyr_laplace;
-    createLaplacePyr(src_pyr_gauss, src_pyr_laplace);
-    src_pyr_gauss.clear();
-
-    // Create the weight map Gaussian pyramid
-    Mat weight_map;
-    mask.convertTo(weight_map, CV_32F, 1./255.);
-    vector<Mat> weight_pyr_gauss(num_bands_ + 1);
-    copyMakeBorder(weight_map, weight_pyr_gauss[0], top, bottom, left, right, 
-                   BORDER_CONSTANT);
-    for (int i = 0; i < num_bands_; ++i)
-        pyrDown(weight_pyr_gauss[i], weight_pyr_gauss[i + 1]);
-
-    int y_tl = tl_new.y - dst_roi_.y;
-    int y_br = br_new.y - dst_roi_.y;
-    int x_tl = tl_new.x - dst_roi_.x;
-    int x_br = br_new.x - dst_roi_.x;
-
-    // Add weighted layer of the source image to the final Laplacian pyramid layer
-    for (int i = 0; i <= num_bands_; ++i)
-    {
-        for (int y = y_tl; y < y_br; ++y)
-        {
-            int y_ = y - y_tl;
-            const Point3_<short>* src_row = src_pyr_laplace[i].ptr<Point3_<short> >(y_);
-            Point3_<short>* dst_row = dst_pyr_laplace_[i].ptr<Point3_<short> >(y);
-            const float* weight_row = weight_pyr_gauss[i].ptr<float>(y_);
-            float* dst_weight_row = dst_band_weights_[i].ptr<float>(y);
-
-            for (int x = x_tl; x < x_br; ++x)               
-            {
-                int x_ = x - x_tl;
-                dst_row[x].x += static_cast<short>(src_row[x_].x * weight_row[x_]);
-                dst_row[x].y += static_cast<short>(src_row[x_].y * weight_row[x_]);
-                dst_row[x].z += static_cast<short>(src_row[x_].z * weight_row[x_]);
-                dst_weight_row[x] += weight_row[x_];
-            }
-        }
-        x_tl /= 2; y_tl /= 2; 
-        x_br /= 2; y_br /= 2;
-    }    
-}
-
-
-void MultiBandBlender::blend(Mat &dst, Mat &dst_mask)
-{
-    for (int i = 0; i <= num_bands_; ++i)
-        normalize(dst_band_weights_[i], dst_pyr_laplace_[i]);
-
-    restoreImageFromLaplacePyr(dst_pyr_laplace_);
-
-    dst_ = dst_pyr_laplace_[0];
-    dst_ = dst_(Range(0, dst_roi_final_.height), Range(0, dst_roi_final_.width));
-    dst_mask_ = dst_band_weights_[0] > WEIGHT_EPS;
-    dst_mask_ = dst_mask_(Range(0, dst_roi_final_.height), Range(0, dst_roi_final_.width));
-    dst_pyr_laplace_.clear();
-    dst_band_weights_.clear();
-
-    Blender::blend(dst, dst_mask);
-}
-
-
-//////////////////////////////////////////////////////////////////////////////
-// Auxiliary functions
-
-void normalize(const Mat& weight, Mat& src)
-{
-    CV_Assert(weight.type() == CV_32F);
-    CV_Assert(src.type() == CV_16SC3);
-    for (int y = 0; y < src.rows; ++y)
-    {
-        Point3_<short> *row = src.ptr<Point3_<short> >(y);
-        const float *weight_row = weight.ptr<float>(y);
-
-        for (int x = 0; x < src.cols; ++x)
-        {
-            row[x].x = static_cast<short>(row[x].x / (weight_row[x] + WEIGHT_EPS));
-            row[x].y = static_cast<short>(row[x].y / (weight_row[x] + WEIGHT_EPS));
-            row[x].z = static_cast<short>(row[x].z / (weight_row[x] + WEIGHT_EPS));
-        }
-    }
-}
-
-
-void createWeightMap(const Mat &mask, float sharpness, Mat &weight)
-{
-    CV_Assert(mask.type() == CV_8U);
-    distanceTransform(mask, weight, CV_DIST_L1, 3);
-    threshold(weight * sharpness, weight, 1.f, 1.f, THRESH_TRUNC);
-}
-
-
-void createLaplacePyr(const vector<Mat> &pyr_gauss, vector<Mat> &pyr_laplace)
-{
-    if (pyr_gauss.size() == 0)
-        return;
-    pyr_laplace.resize(pyr_gauss.size());
-    Mat tmp;
-    for (size_t i = 0; i < pyr_laplace.size() - 1; ++i)
-    {
-        pyrUp(pyr_gauss[i + 1], tmp, pyr_gauss[i].size());
-        subtract(pyr_gauss[i], tmp, pyr_laplace[i]);
-    }
-    pyr_laplace[pyr_laplace.size() - 1] = pyr_gauss[pyr_laplace.size() - 1].clone();
-}
-
-
-void restoreImageFromLaplacePyr(vector<Mat> &pyr)
-{
-    if (pyr.size() == 0)
-        return;
-    Mat tmp;
-    for (size_t i = pyr.size() - 1; i > 0; --i)
-    {
-        pyrUp(pyr[i], tmp, pyr[i - 1].size());
-        add(tmp, pyr[i - 1], pyr[i - 1]);
-    }
-}
-
-
+//M*/\r
+#include "blenders.hpp"\r
+#include "util.hpp"\r
+\r
+using namespace std;\r
+using namespace cv;\r
+\r
+static const float WEIGHT_EPS = 1e-5f;\r
+\r
+Ptr<Blender> Blender::createDefault(int type)\r
+{\r
+    if (type == NO)\r
+        return new Blender();\r
+    if (type == FEATHER)\r
+        return new FeatherBlender();\r
+    if (type == MULTI_BAND)\r
+        return new MultiBandBlender();\r
+    CV_Error(CV_StsBadArg, "unsupported blending method");\r
+    return NULL;\r
+}\r
+\r
+\r
+void Blender::prepare(const vector<Point> &corners, const vector<Size> &sizes)\r
+{\r
+    prepare(resultRoi(corners, sizes));\r
+}\r
+\r
+\r
+void Blender::prepare(Rect dst_roi)\r
+{\r
+    dst_.create(dst_roi.size(), CV_16SC3);\r
+    dst_.setTo(Scalar::all(0));\r
+    dst_mask_.create(dst_roi.size(), CV_8U);\r
+    dst_mask_.setTo(Scalar::all(0));\r
+    dst_roi_ = dst_roi;\r
+}\r
+\r
+\r
+void Blender::feed(const Mat &img, const Mat &mask, Point tl) \r
+{\r
+    CV_Assert(img.type() == CV_16SC3);\r
+    CV_Assert(mask.type() == CV_8U);\r
+    int dx = tl.x - dst_roi_.x;\r
+    int dy = tl.y - dst_roi_.y;\r
+\r
+    for (int y = 0; y < img.rows; ++y)\r
+    {\r
+        const Point3_<short> *src_row = img.ptr<Point3_<short> >(y);\r
+        Point3_<short> *dst_row = dst_.ptr<Point3_<short> >(dy + y);\r
+        const uchar *mask_row = mask.ptr<uchar>(y);\r
+        uchar *dst_mask_row = dst_mask_.ptr<uchar>(dy + y);\r
+\r
+        for (int x = 0; x < img.cols; ++x)\r
+        {\r
+            if (mask_row[x]) \r
+                dst_row[dx + x] = src_row[x];\r
+            dst_mask_row[dx + x] |= mask_row[x];\r
+        }\r
+    }\r
+}\r
+\r
+\r
+void Blender::blend(Mat &dst, Mat &dst_mask)\r
+{\r
+    dst_.setTo(Scalar::all(0), dst_mask_ == 0);\r
+    dst = dst_;\r
+    dst_mask = dst_mask_;\r
+    dst_.release();\r
+    dst_mask_.release();\r
+}\r
+\r
+\r
+void FeatherBlender::prepare(Rect dst_roi)\r
+{\r
+    Blender::prepare(dst_roi);\r
+    dst_weight_map_.create(dst_roi.size(), CV_32F);\r
+    dst_weight_map_.setTo(0);\r
+}\r
+\r
+\r
+void FeatherBlender::feed(const Mat &img, const Mat &mask, Point tl)\r
+{\r
+    CV_Assert(img.type() == CV_16SC3);\r
+    CV_Assert(mask.type() == CV_8U);\r
+\r
+    createWeightMap(mask, sharpness_, weight_map_);\r
+    int dx = tl.x - dst_roi_.x;\r
+    int dy = tl.y - dst_roi_.y;\r
+\r
+    for (int y = 0; y < img.rows; ++y)\r
+    {\r
+        const Point3_<short>* src_row = img.ptr<Point3_<short> >(y);\r
+        Point3_<short>* dst_row = dst_.ptr<Point3_<short> >(dy + y);\r
+        const float* weight_row = weight_map_.ptr<float>(y);\r
+        float* dst_weight_row = dst_weight_map_.ptr<float>(dy + y);\r
+\r
+        for (int x = 0; x < img.cols; ++x)               \r
+        {\r
+            dst_row[dx + x].x += static_cast<short>(src_row[x].x * weight_row[x]);\r
+            dst_row[dx + x].y += static_cast<short>(src_row[x].y * weight_row[x]);\r
+            dst_row[dx + x].z += static_cast<short>(src_row[x].z * weight_row[x]);\r
+            dst_weight_row[dx + x] += weight_row[x];\r
+        }\r
+    }\r
+}\r
+\r
+\r
+void FeatherBlender::blend(Mat &dst, Mat &dst_mask)\r
+{\r
+    normalize(dst_weight_map_, dst_);\r
+    dst_mask_ = dst_weight_map_ > WEIGHT_EPS;\r
+    Blender::blend(dst, dst_mask);\r
+}\r
+\r
+\r
+void MultiBandBlender::prepare(Rect dst_roi)\r
+{\r
+    dst_roi_final_ = dst_roi;\r
+\r
+    // Crop unnecessary bands\r
+    double max_len = static_cast<double>(max(dst_roi.width, dst_roi.height));\r
+    num_bands_ = min(actual_num_bands_, static_cast<int>(ceil(log(max_len) / log(2.0))));\r
+\r
+    // Add border to the final image, to ensure sizes are divided by (1 << num_bands_)\r
+    dst_roi.width += ((1 << num_bands_) - dst_roi.width % (1 << num_bands_)) % (1 << num_bands_);\r
+    dst_roi.height += ((1 << num_bands_) - dst_roi.height % (1 << num_bands_)) % (1 << num_bands_);\r
+\r
+    Blender::prepare(dst_roi);\r
+\r
+    dst_pyr_laplace_.resize(num_bands_ + 1);\r
+    dst_pyr_laplace_[0] = dst_;\r
+\r
+    dst_band_weights_.resize(num_bands_ + 1);\r
+    dst_band_weights_[0].create(dst_roi.size(), CV_32F);\r
+    dst_band_weights_[0].setTo(0);\r
+\r
+    for (int i = 1; i <= num_bands_; ++i)\r
+    {\r
+        dst_pyr_laplace_[i].create((dst_pyr_laplace_[i - 1].rows + 1) / 2, \r
+                                   (dst_pyr_laplace_[i - 1].cols + 1) / 2, CV_16SC3);\r
+        dst_band_weights_[i].create((dst_band_weights_[i - 1].rows + 1) / 2,\r
+                                    (dst_band_weights_[i - 1].cols + 1) / 2, CV_32F);\r
+        dst_pyr_laplace_[i].setTo(Scalar::all(0));\r
+        dst_band_weights_[i].setTo(0);\r
+    }\r
+}\r
+\r
+\r
+void MultiBandBlender::feed(const Mat &img, const Mat &mask, Point tl)\r
+{\r
+    CV_Assert(img.type() == CV_16SC3);\r
+    CV_Assert(mask.type() == CV_8U);\r
+\r
+    // Keep source image in memory with small border\r
+    int gap = 3 * (1 << num_bands_);\r
+    Point tl_new(max(dst_roi_.x, tl.x - gap), \r
+                 max(dst_roi_.y, tl.y - gap));\r
+    Point br_new(min(dst_roi_.br().x, tl.x + img.cols + gap), \r
+                 min(dst_roi_.br().y, tl.y + img.rows + gap));\r
+\r
+    // Ensure coordinates of top-left, bottom-right corners are divided by (1 << num_bands_). \r
+    // After that scale between layers is exactly 2.\r
+    //\r
+    // We do it to avoid interpolation problems when keeping sub-images only. There is no such problem when \r
+    // image is bordered to have size equal to the final image size, but this is too memory hungry approach.\r
+    tl_new.x = dst_roi_.x + (((tl_new.x - dst_roi_.x) >> num_bands_) << num_bands_);\r
+    tl_new.y = dst_roi_.y + (((tl_new.y - dst_roi_.y) >> num_bands_) << num_bands_);\r
+    int width = br_new.x - tl_new.x;\r
+    int height = br_new.y - tl_new.y;\r
+    width += ((1 << num_bands_) - width % (1 << num_bands_)) % (1 << num_bands_);\r
+    height += ((1 << num_bands_) - height % (1 << num_bands_)) % (1 << num_bands_);\r
+    br_new.x = tl_new.x + width;\r
+    br_new.y = tl_new.y + height;\r
+    int dy = max(br_new.y - dst_roi_.br().y, 0);\r
+    int dx = max(br_new.x - dst_roi_.br().x, 0);\r
+    tl_new.x -= dx; br_new.x -= dx;\r
+    tl_new.y -= dy; br_new.y -= dy;\r
+\r
+    int top = tl.y - tl_new.y;\r
+    int left = tl.x - tl_new.x;\r
+    int bottom = br_new.y - tl.y - img.rows;\r
+    int right = br_new.x - tl.x - img.cols;\r
+\r
+    // Create the source image Laplacian pyramid\r
+    vector<Mat> src_pyr_gauss(num_bands_ + 1);\r
+    copyMakeBorder(img, src_pyr_gauss[0], top, bottom, left, right, \r
+                   BORDER_REFLECT);\r
+    for (int i = 0; i < num_bands_; ++i)\r
+        pyrDown(src_pyr_gauss[i], src_pyr_gauss[i + 1]);\r
+    vector<Mat> src_pyr_laplace;\r
+    createLaplacePyr(src_pyr_gauss, src_pyr_laplace);\r
+    src_pyr_gauss.clear();\r
+\r
+    // Create the weight map Gaussian pyramid\r
+    Mat weight_map;\r
+    mask.convertTo(weight_map, CV_32F, 1./255.);\r
+    vector<Mat> weight_pyr_gauss(num_bands_ + 1);\r
+    copyMakeBorder(weight_map, weight_pyr_gauss[0], top, bottom, left, right, \r
+                   BORDER_CONSTANT);\r
+    for (int i = 0; i < num_bands_; ++i)\r
+        pyrDown(weight_pyr_gauss[i], weight_pyr_gauss[i + 1]);\r
+\r
+    int y_tl = tl_new.y - dst_roi_.y;\r
+    int y_br = br_new.y - dst_roi_.y;\r
+    int x_tl = tl_new.x - dst_roi_.x;\r
+    int x_br = br_new.x - dst_roi_.x;\r
+\r
+    // Add weighted layer of the source image to the final Laplacian pyramid layer\r
+    for (int i = 0; i <= num_bands_; ++i)\r
+    {\r
+        for (int y = y_tl; y < y_br; ++y)\r
+        {\r
+            int y_ = y - y_tl;\r
+            const Point3_<short>* src_row = src_pyr_laplace[i].ptr<Point3_<short> >(y_);\r
+            Point3_<short>* dst_row = dst_pyr_laplace_[i].ptr<Point3_<short> >(y);\r
+            const float* weight_row = weight_pyr_gauss[i].ptr<float>(y_);\r
+            float* dst_weight_row = dst_band_weights_[i].ptr<float>(y);\r
+\r
+            for (int x = x_tl; x < x_br; ++x)               \r
+            {\r
+                int x_ = x - x_tl;\r
+                dst_row[x].x += static_cast<short>(src_row[x_].x * weight_row[x_]);\r
+                dst_row[x].y += static_cast<short>(src_row[x_].y * weight_row[x_]);\r
+                dst_row[x].z += static_cast<short>(src_row[x_].z * weight_row[x_]);\r
+                dst_weight_row[x] += weight_row[x_];\r
+            }\r
+        }\r
+        x_tl /= 2; y_tl /= 2; \r
+        x_br /= 2; y_br /= 2;\r
+    }    \r
+}\r
+\r
+\r
+void MultiBandBlender::blend(Mat &dst, Mat &dst_mask)\r
+{\r
+    for (int i = 0; i <= num_bands_; ++i)\r
+        normalize(dst_band_weights_[i], dst_pyr_laplace_[i]);\r
+\r
+    restoreImageFromLaplacePyr(dst_pyr_laplace_);\r
+\r
+    dst_ = dst_pyr_laplace_[0];\r
+    dst_ = dst_(Range(0, dst_roi_final_.height), Range(0, dst_roi_final_.width));\r
+    dst_mask_ = dst_band_weights_[0] > WEIGHT_EPS;\r
+    dst_mask_ = dst_mask_(Range(0, dst_roi_final_.height), Range(0, dst_roi_final_.width));\r
+    dst_pyr_laplace_.clear();\r
+    dst_band_weights_.clear();\r
+\r
+    Blender::blend(dst, dst_mask);\r
+}\r
+\r
+\r
+//////////////////////////////////////////////////////////////////////////////\r
+// Auxiliary functions\r
+\r
+void normalize(const Mat& weight, Mat& src)\r
+{\r
+    CV_Assert(weight.type() == CV_32F);\r
+    CV_Assert(src.type() == CV_16SC3);\r
+    for (int y = 0; y < src.rows; ++y)\r
+    {\r
+        Point3_<short> *row = src.ptr<Point3_<short> >(y);\r
+        const float *weight_row = weight.ptr<float>(y);\r
+\r
+        for (int x = 0; x < src.cols; ++x)\r
+        {\r
+            row[x].x = static_cast<short>(row[x].x / (weight_row[x] + WEIGHT_EPS));\r
+            row[x].y = static_cast<short>(row[x].y / (weight_row[x] + WEIGHT_EPS));\r
+            row[x].z = static_cast<short>(row[x].z / (weight_row[x] + WEIGHT_EPS));\r
+        }\r
+    }\r
+}\r
+\r
+\r
+void createWeightMap(const Mat &mask, float sharpness, Mat &weight)\r
+{\r
+    CV_Assert(mask.type() == CV_8U);\r
+    distanceTransform(mask, weight, CV_DIST_L1, 3);\r
+    threshold(weight * sharpness, weight, 1.f, 1.f, THRESH_TRUNC);\r
+}\r
+\r
+\r
+void createLaplacePyr(const vector<Mat> &pyr_gauss, vector<Mat> &pyr_laplace)\r
+{\r
+    if (pyr_gauss.size() == 0)\r
+        return;\r
+    pyr_laplace.resize(pyr_gauss.size());\r
+    Mat tmp;\r
+    for (size_t i = 0; i < pyr_laplace.size() - 1; ++i)\r
+    {\r
+        pyrUp(pyr_gauss[i + 1], tmp, pyr_gauss[i].size());\r
+        subtract(pyr_gauss[i], tmp, pyr_laplace[i]);\r
+    }\r
+    pyr_laplace[pyr_laplace.size() - 1] = pyr_gauss[pyr_laplace.size() - 1].clone();\r
+}\r
+\r
+\r
+void restoreImageFromLaplacePyr(vector<Mat> &pyr)\r
+{\r
+    if (pyr.size() == 0)\r
+        return;\r
+    Mat tmp;\r
+    for (size_t i = pyr.size() - 1; i > 0; --i)\r
+    {\r
+        pyrUp(pyr[i], tmp, pyr[i - 1].size());\r
+        add(tmp, pyr[i - 1], pyr[i - 1]);\r
+    }\r
+}\r
+\r
+\r
index fe952e6..7c49228 100644 (file)
@@ -75,7 +75,7 @@ void printUsage()
         "  --work_megapix <float>\n"\r
         "      Resolution for image registration step. The default is 0.6 Mpx.\n"\r
         "  --match_conf <float>\n"\r
-        "      Confidence for feature matching step. The default is 0.7.\n"\r
+        "      Confidence for feature matching step. The default is 0.65.\n"\r
         "  --conf_thresh <float>\n"\r
         "      Threshold for two images are from the same panorama confidence.\n"\r
         "      The default is 1.0.\n"\r
@@ -320,11 +320,14 @@ int main(int argc, char* argv[])
     Mat full_img, img;\r
 \r
     vector<Mat> images(num_images);\r
+    vector<Size> full_img_sizes(num_images);\r
     double seam_work_aspect = 1;\r
 \r
     for (int i = 0; i < num_images; ++i)\r
     {\r
         full_img = imread(img_names[i]);\r
+        full_img_sizes[i] = full_img.size();\r
+\r
         if (full_img.empty())\r
         {\r
             LOGLN("Can't open image " << img_names[i]);\r
@@ -376,14 +379,17 @@ int main(int argc, char* argv[])
     vector<int> indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh);\r
     vector<Mat> img_subset;\r
     vector<string> img_names_subset;\r
+    vector<Size> full_img_sizes_subset;\r
     for (size_t i = 0; i < indices.size(); ++i)\r
     {\r
         img_names_subset.push_back(img_names[indices[i]]);\r
         img_subset.push_back(images[indices[i]]);\r
+        full_img_sizes_subset.push_back(full_img_sizes[indices[i]]);\r
     }\r
 \r
     images = img_subset;\r
     img_names = img_names_subset;\r
+    full_img_sizes = full_img_sizes_subset;\r
 \r
     // Check if we still have enough images\r
     num_images = static_cast<int>(img_names.size());\r
@@ -519,16 +525,21 @@ int main(int argc, char* argv[])
             warper = Warper::createByCameraFocal(warped_image_scale, warp_type);\r
 \r
             // Update corners and sizes\r
-            Rect dst_roi = resultRoi(corners, sizes);\r
             for (int i = 0; i < num_images; ++i)\r
             {\r
                 // Update camera focal\r
                 cameras[i].focal *= compose_work_aspect;\r
 \r
                 // Update corner and size\r
-                corners[i] = dst_roi.tl() + (corners[i] - dst_roi.tl()) * compose_seam_aspect;\r
-                sizes[i] = Size(static_cast<int>((sizes[i].width + 1) * compose_seam_aspect), \r
-                                static_cast<int>((sizes[i].height + 1) * compose_seam_aspect));\r
+                Size sz = full_img_sizes[i];\r
+                if (abs(compose_scale - 1) > 1e-1)\r
+                {\r
+                    sz.width = cvRound(full_img_sizes[i].width * compose_scale);\r
+                    sz.height = cvRound(full_img_sizes[i].height * compose_scale);\r
+                }\r
+                Rect roi = warper->warpRoi(sz, static_cast<float>(cameras[i].focal), cameras[i].R);\r
+                corners[i] = roi.tl();\r
+                sizes[i] = roi.size();\r
             }\r
         }\r
         if (abs(compose_scale - 1) > 1e-1)\r
@@ -539,7 +550,7 @@ int main(int argc, char* argv[])
         Size img_size = img.size();\r
 \r
         // Warp the current image\r
-        warper->warp(img, static_cast<float>(cameras[img_idx].focal), cameras[img_idx].R, \r
+        warper->warp(img, static_cast<float>(cameras[img_idx].focal), cameras[img_idx].R,\r
                      img_warped);\r
 \r
         // Warp the current image mask\r
@@ -587,7 +598,7 @@ int main(int argc, char* argv[])
     }\r
    \r
     Mat result, result_mask;\r
-    blender->blend(result, result_mask);\r
+    blender->blend(result, result_mask);    \r
 \r
     LOGLN("Compositing, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");\r
 \r
index 07c8fb9..69262eb 100644 (file)
@@ -311,7 +311,7 @@ namespace
             const DMatch& m1 = pair_matches[i][1];\r
             if (m0.distance < (1.f - match_conf_) * m1.distance)\r
             {\r
-                matches_info.matches.push_back(m0);\r
+                //matches_info.matches.push_back(m0);\r
                 matches.insert(make_pair(m0.queryIdx, m0.trainIdx));\r
             }\r
         }\r
@@ -326,7 +326,7 @@ namespace
             const DMatch& m0 = pair_matches[i][0];\r
             const DMatch& m1 = pair_matches[i][1];\r
             if (m0.distance < (1.f - match_conf_) * m1.distance)\r
-                if (matches.find(make_pair(m0.trainIdx, m0.queryIdx)) == matches.end())\r
+                if (matches.find(make_pair(m0.trainIdx, m0.queryIdx)) != matches.end())\r
                     matches_info.matches.push_back(DMatch(m0.trainIdx, m0.queryIdx, m0.distance));\r
         }\r
     }\r
@@ -352,7 +352,7 @@ namespace
             const DMatch& m1 = pair_matches[i][1];\r
             if (m0.distance < (1.f - match_conf_) * m1.distance)\r
             {\r
-                matches_info.matches.push_back(m0);\r
+                //matches_info.matches.push_back(m0);\r
                 matches.insert(make_pair(m0.queryIdx, m0.trainIdx));\r
             }\r
         }\r
@@ -368,7 +368,7 @@ namespace
             const DMatch& m0 = pair_matches[i][0];\r
             const DMatch& m1 = pair_matches[i][1];\r
             if (m0.distance < (1.f - match_conf_) * m1.distance)\r
-                if (matches.find(make_pair(m0.trainIdx, m0.queryIdx)) == matches.end())\r
+                if (matches.find(make_pair(m0.trainIdx, m0.queryIdx)) != matches.end())\r
                     matches_info.matches.push_back(DMatch(m0.trainIdx, m0.queryIdx, m0.distance));\r
         }\r
     }\r
index e1fac70..763c4a6 100644 (file)
 // or tort (including negligence or otherwise) arising in any way out of\r
 // the use of this software, even if advised of the possibility of such damage.\r
 //\r
-//M*/
-#include <algorithm>
-#include "autocalib.hpp"
-#include "motion_estimators.hpp"
-#include "util.hpp"
-
-using namespace std;
-using namespace cv;
-
-
-//////////////////////////////////////////////////////////////////////////////
-
-CameraParams::CameraParams() : focal(1), R(Mat::eye(3, 3, CV_64F)), t(Mat::zeros(3, 1, CV_64F)) {}
-
-CameraParams::CameraParams(const CameraParams &other) { *this = other; }
-
-const CameraParams& CameraParams::operator =(const CameraParams &other)
-{
-    focal = other.focal;
-    R = other.R.clone();
-    t = other.t.clone();
-    return *this;
-}
-
-
-//////////////////////////////////////////////////////////////////////////////
-
-struct IncDistance
-{
-    IncDistance(vector<int> &dists) : dists(&dists[0]) {}
-    void operator ()(const GraphEdge &edge) { dists[edge.to] = dists[edge.from] + 1; }
-    int* dists;
-};
-
-
-struct CalcRotation
-{
-    CalcRotation(int num_images, const vector<MatchesInfo> &pairwise_matches, vector<CameraParams> &cameras)
-        : num_images(num_images), pairwise_matches(&pairwise_matches[0]), cameras(&cameras[0]) {}
-
-    void operator ()(const GraphEdge &edge)
-    {
-        int pair_idx = edge.from * num_images + edge.to;
-
-        double f_from = cameras[edge.from].focal;
-        double f_to = cameras[edge.to].focal;
-
-        Mat K_from = Mat::eye(3, 3, CV_64F);
-        K_from.at<double>(0, 0) = f_from;
-        K_from.at<double>(1, 1) = f_from;
-
-        Mat K_to = Mat::eye(3, 3, CV_64F);
-        K_to.at<double>(0, 0) = f_to;
-        K_to.at<double>(1, 1) = f_to;
-
-        Mat R = K_from.inv() * pairwise_matches[pair_idx].H.inv() * K_to;
-        cameras[edge.to].R = cameras[edge.from].R * R;
-    }
-
-    int num_images;
-    const MatchesInfo* pairwise_matches;
-    CameraParams* cameras;
-};
-
-
-void HomographyBasedEstimator::estimate(const vector<ImageFeatures> &features, const vector<MatchesInfo> &pairwise_matches, 
-                                        vector<CameraParams> &cameras)
-{
-    const int num_images = static_cast<int>(features.size());
-
-    // Estimate focal length and set it for all cameras
-    vector<double> focals;
-    estimateFocal(features, pairwise_matches, focals);
-    cameras.resize(num_images);
-    for (int i = 0; i < num_images; ++i)
-        cameras[i].focal = focals[i];
-
-    // Restore global motion
-    Graph span_tree;
-    vector<int> span_tree_centers;
-    findMaxSpanningTree(num_images, pairwise_matches, span_tree, span_tree_centers);
-    span_tree.walkBreadthFirst(span_tree_centers[0], CalcRotation(num_images, pairwise_matches, cameras));
-}
-
-
-//////////////////////////////////////////////////////////////////////////////
-
-void BundleAdjuster::estimate(const vector<ImageFeatures> &features, const vector<MatchesInfo> &pairwise_matches, 
-                              vector<CameraParams> &cameras)
-{
-    num_images_ = static_cast<int>(features.size());
-    features_ = &features[0];
-    pairwise_matches_ = &pairwise_matches[0];
-
-    // Prepare focals and rotations
-    cameras_.create(num_images_ * 4, 1, CV_64F);
-    SVD svd;
-    for (int i = 0; i < num_images_; ++i)
-    {
-        cameras_.at<double>(i * 4, 0) = cameras[i].focal;
-
-        svd(cameras[i].R, SVD::FULL_UV);
-        Mat R = svd.u * svd.vt;
-        if (determinant(R) < 0) 
-            R *= -1;
-
-        Mat rvec;
-        Rodrigues(R, rvec); CV_Assert(rvec.type() == CV_32F);
-        cameras_.at<double>(i * 4 + 1, 0) = rvec.at<float>(0, 0);
-        cameras_.at<double>(i * 4 + 2, 0) = rvec.at<float>(1, 0);
-        cameras_.at<double>(i * 4 + 3, 0) = rvec.at<float>(2, 0);
-    }
-
-    // Select only consistent image pairs for futher adjustment
-    edges_.clear();
-    for (int i = 0; i < num_images_ - 1; ++i)
-    {
-        for (int j = i + 1; j < num_images_; ++j)
-        {
-            const MatchesInfo& matches_info = pairwise_matches_[i * num_images_ + j];
-            if (matches_info.confidence > conf_thresh_)
-                edges_.push_back(make_pair(i, j));
-        }
-    }
-
-    // Compute number of correspondences
-    total_num_matches_ = 0;
-    for (size_t i = 0; i < edges_.size(); ++i)
-        total_num_matches_ += static_cast<int>(pairwise_matches[edges_[i].first * num_images_ + edges_[i].second].num_inliers);
-
-    CvLevMarq solver(num_images_ * 4, total_num_matches_ * 3,
-                     cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 1000, DBL_EPSILON));
-
-    CvMat matParams = cameras_;
-    cvCopy(&matParams, solver.param);
-
-    int count = 0;
-    for(;;)
-    {
-        const CvMat* _param = 0;
-        CvMat* _J = 0;
-        CvMat* _err = 0;
-
-        bool proceed = solver.update(_param, _J, _err);
-
-        cvCopy( _param, &matParams );
-
-        if( !proceed || !_err )
-            break;
-
-        if( _J )
-        {
-            calcJacobian();
-            CvMat matJ = J_;
-            cvCopy( &matJ, _J );
-        }
-
-        if (_err)
-        {
-            calcError(err_);
-            LOG(".");
-            count++;
-            CvMat matErr = err_;
-            cvCopy( &matErr, _err );
-        }
-    }
-    LOGLN("");
-    LOGLN("Bundle adjustment, final error: " << sqrt(err_.dot(err_)));
-    LOGLN("Bundle adjustment, iterations done: " << count);
-
-    // Obtain global motion
-    for (int i = 0; i < num_images_; ++i)
-    {
-        cameras[i].focal = cameras_.at<double>(i * 4, 0);
-        Mat rvec(3, 1, CV_64F);
-        rvec.at<double>(0, 0) = cameras_.at<double>(i * 4 + 1, 0);
-        rvec.at<double>(1, 0) = cameras_.at<double>(i * 4 + 2, 0);
-        rvec.at<double>(2, 0) = cameras_.at<double>(i * 4 + 3, 0);
-        Rodrigues(rvec, cameras[i].R);
-        Mat Mf;
-        cameras[i].R.convertTo(Mf, CV_32F);
-        cameras[i].R = Mf;
-    }
-
-    // Normalize motion to center image
-    Graph span_tree;
-    vector<int> span_tree_centers;
-    findMaxSpanningTree(num_images_, pairwise_matches, span_tree, span_tree_centers);
-    Mat R_inv = cameras[span_tree_centers[0]].R.inv();
-    for (int i = 0; i < num_images_; ++i)
-        cameras[i].R = R_inv * cameras[i].R;
-}
-
-
-void BundleAdjuster::calcError(Mat &err)
-{
-    err.create(total_num_matches_ * 3, 1, CV_64F);
-
-    int match_idx = 0;
-    for (size_t edge_idx = 0; edge_idx < edges_.size(); ++edge_idx)
-    {
-        int i = edges_[edge_idx].first;
-        int j = edges_[edge_idx].second;
-        double f1 = cameras_.at<double>(i * 4, 0);
-        double f2 = cameras_.at<double>(j * 4, 0);
-        double R1[9], R2[9];
-        Mat R1_(3, 3, CV_64F, R1), R2_(3, 3, CV_64F, R2);
-        Mat rvec(3, 1, CV_64F);
-        rvec.at<double>(0, 0) = cameras_.at<double>(i * 4 + 1, 0);
-        rvec.at<double>(1, 0) = cameras_.at<double>(i * 4 + 2, 0);
-        rvec.at<double>(2, 0) = cameras_.at<double>(i * 4 + 3, 0);
-        Rodrigues(rvec, R1_); CV_Assert(R1_.type() == CV_64F);
-        rvec.at<double>(0, 0) = cameras_.at<double>(j * 4 + 1, 0);
-        rvec.at<double>(1, 0) = cameras_.at<double>(j * 4 + 2, 0);
-        rvec.at<double>(2, 0) = cameras_.at<double>(j * 4 + 3, 0);
-        Rodrigues(rvec, R2_); CV_Assert(R2_.type() == CV_64F);
-
-        const ImageFeatures& features1 = features_[i];
-        const ImageFeatures& features2 = features_[j];
-        const MatchesInfo& matches_info = pairwise_matches_[i * num_images_ + j];
-
-        for (size_t k = 0; k < matches_info.matches.size(); ++k)
-        {
-            if (!matches_info.inliers_mask[k])
-                continue;
-
-            const DMatch& m = matches_info.matches[k];
-
-            Point2d kp1 = features1.keypoints[m.queryIdx].pt;
-            kp1.x -= 0.5 * features1.img_size.width;
-            kp1.y -= 0.5 * features1.img_size.height;
-            Point2d kp2 = features2.keypoints[m.trainIdx].pt;
-            kp2.x -= 0.5 * features2.img_size.width;
-            kp2.y -= 0.5 * features2.img_size.height;
-            double len1 = sqrt(kp1.x * kp1.x + kp1.y * kp1.y + f1 * f1);
-            double len2 = sqrt(kp2.x * kp2.x + kp2.y * kp2.y + f2 * f2);
-            Point3d p1(kp1.x / len1, kp1.y / len1, f1 / len1);
-            Point3d p2(kp2.x / len2, kp2.y / len2, f2 / len2);
-
-            Point3d d1(p1.x * R1[0] + p1.y * R1[1] + p1.z * R1[2],
-                       p1.x * R1[3] + p1.y * R1[4] + p1.z * R1[5],
-                       p1.x * R1[6] + p1.y * R1[7] + p1.z * R1[8]);
-            Point3d d2(p2.x * R2[0] + p2.y * R2[1] + p2.z * R2[2],
-                       p2.x * R2[3] + p2.y * R2[4] + p2.z * R2[5],
-                       p2.x * R2[6] + p2.y * R2[7] + p2.z * R2[8]);
-
-            double mult = 1;
-            if (cost_space_ == FOCAL_RAY_SPACE)
-                mult = sqrt(f1 * f2);
-            err.at<double>(3 * match_idx, 0) = mult * (d1.x - d2.x);
-            err.at<double>(3 * match_idx + 1, 0) = mult * (d1.y - d2.y);
-            err.at<double>(3 * match_idx + 2, 0) = mult * (d1.z - d2.z);
-            match_idx++;
-        }
-    }
-}
-
-
-void calcDeriv(const Mat &err1, const Mat &err2, double h, Mat res)
-{
-    for (int i = 0; i < err1.rows; ++i)
-        res.at<double>(i, 0) = (err2.at<double>(i, 0) - err1.at<double>(i, 0)) / h;
-}
-
-
-void BundleAdjuster::calcJacobian()
-{
-    J_.create(total_num_matches_ * 3, num_images_ * 4, CV_64F);
-
-    double f, r;
-    const double df = 0.001; // Focal length step
-    const double dr = 0.001; // Angle step
-
-    for (int i = 0; i < num_images_; ++i)
-    {
-        f = cameras_.at<double>(i * 4, 0);
-        cameras_.at<double>(i * 4, 0) = f - df;
-        calcError(err1_);
-        cameras_.at<double>(i * 4, 0) = f + df;
-        calcError(err2_);
-        calcDeriv(err1_, err2_, 2 * df, J_.col(i * 4));
-        cameras_.at<double>(i * 4, 0) = f;
-
-        r = cameras_.at<double>(i * 4 + 1, 0);
-        cameras_.at<double>(i * 4 + 1, 0) = r - dr;
-        calcError(err1_);
-        cameras_.at<double>(i * 4 + 1, 0) = r + dr;
-        calcError(err2_);
-        calcDeriv(err1_, err2_, 2 * dr, J_.col(i * 4 + 1));
-        cameras_.at<double>(i * 4 + 1, 0) = r;
-
-        r = cameras_.at<double>(i * 4 + 2, 0);
-        cameras_.at<double>(i * 4 + 2, 0) = r - dr;
-        calcError(err1_);
-        cameras_.at<double>(i * 4 + 2, 0) = r + dr;
-        calcError(err2_);
-        calcDeriv(err1_, err2_, 2 * dr, J_.col(i * 4 + 2));
-        cameras_.at<double>(i * 4 + 2, 0) = r;
-
-        r = cameras_.at<double>(i * 4 + 3, 0);
-        cameras_.at<double>(i * 4 + 3, 0) = r - dr;
-        calcError(err1_);
-        cameras_.at<double>(i * 4 + 3, 0) = r + dr;
-        calcError(err2_);
-        calcDeriv(err1_, err2_, 2 * dr, J_.col(i * 4 + 3));
-        cameras_.at<double>(i * 4 + 3, 0) = r;
-    }
-}
-
-
-//////////////////////////////////////////////////////////////////////////////
-
-void waveCorrect(vector<Mat> &rmats)
-{
-    float data[9];
-    Mat r0(1, 3, CV_32F, data);
-    Mat r1(1, 3, CV_32F, data + 3);
-    Mat r2(1, 3, CV_32F, data + 6);
-    Mat R(3, 3, CV_32F, data);
-
-    Mat cov = Mat::zeros(3, 3, CV_32F);
-    for (size_t i = 0; i < rmats.size(); ++i)
-    {   
-        Mat r0 = rmats[i].col(0);
-        cov += r0 * r0.t();
-    }
-
-    SVD svd;
-    svd(cov, SVD::FULL_UV);
-    svd.vt.row(2).copyTo(r1);
-    if (determinant(svd.vt) < 0) r1 *= -1;
-
-    Mat avgz = Mat::zeros(3, 1, CV_32F);
-    for (size_t i = 0; i < rmats.size(); ++i)
-        avgz += rmats[i].col(2);
-    r1.cross(avgz.t()).copyTo(r0);
-    normalize(r0, r0);
-
-    r1.cross(r0).copyTo(r2);
-    if (determinant(R) < 0) R *= -1;
-
-    for (size_t i = 0; i < rmats.size(); ++i)
-        rmats[i] = R * rmats[i];
-}
-
-
-//////////////////////////////////////////////////////////////////////////////
-
-vector<int> leaveBiggestComponent(vector<ImageFeatures> &features,  vector<MatchesInfo> &pairwise_matches, 
-                                  float conf_threshold)
-{
-    const int num_images = static_cast<int>(features.size());
-
-    DjSets comps(num_images);
-    for (int i = 0; i < num_images; ++i)
-    {
-        for (int j = 0; j < num_images; ++j)
-        {
-            if (pairwise_matches[i*num_images + j].confidence < conf_threshold)
-                continue;
-            int comp1 = comps.find(i);
-            int comp2 = comps.find(j);
-            if (comp1 != comp2) 
-                comps.merge(comp1, comp2);
-        }
-    }
-
-    int max_comp = max_element(comps.size.begin(), comps.size.end()) - comps.size.begin();
-
-    vector<int> indices;
-    vector<int> indices_removed;
-    for (int i = 0; i < num_images; ++i)
-        if (comps.find(i) == max_comp)
-            indices.push_back(i);    
-        else
-            indices_removed.push_back(i);
-
-    vector<ImageFeatures> features_subset;
-    vector<MatchesInfo> pairwise_matches_subset;
-    for (size_t i = 0; i < indices.size(); ++i)
-    {
-        features_subset.push_back(features[indices[i]]);
-        for (size_t j = 0; j < indices.size(); ++j)
-        {
-            pairwise_matches_subset.push_back(pairwise_matches[indices[i]*num_images + indices[j]]);
-            pairwise_matches_subset.back().src_img_idx = i;
-            pairwise_matches_subset.back().dst_img_idx = j;
-        }
-    }
-
-    if (static_cast<int>(features_subset.size()) == num_images)
-        return indices;
-
-    LOG("Removed some images, because can't match them: (");
-    LOG(indices_removed[0]+1);
-    for (size_t i = 1; i < indices_removed.size(); ++i) 
-        LOG(", " << indices_removed[i]+1);
-    LOGLN(")");
-
-    features = features_subset;
-    pairwise_matches = pairwise_matches_subset;
-
-    return indices;
-}
-
-
-void findMaxSpanningTree(int num_images, const vector<MatchesInfo> &pairwise_matches,
-                         Graph &span_tree, vector<int> &centers)
-{
-    Graph graph(num_images);
-    vector<GraphEdge> edges;
-
-    // Construct images graph and remember its edges
-    for (int i = 0; i < num_images; ++i)
-    {
-        for (int j = 0; j < num_images; ++j)
-        {
-            if (pairwise_matches[i * num_images + j].H.empty())
-                continue;
-            float conf = static_cast<float>(pairwise_matches[i * num_images + j].num_inliers);
-            graph.addEdge(i, j, conf);
-            edges.push_back(GraphEdge(i, j, conf));
-        }
-    }
-
-    DjSets comps(num_images);
-    span_tree.create(num_images);
-    vector<int> span_tree_powers(num_images, 0);
-
-    // Find maximum spanning tree
-    sort(edges.begin(), edges.end(), greater<GraphEdge>());
-    for (size_t i = 0; i < edges.size(); ++i)
-    {
-        int comp1 = comps.find(edges[i].from);
-        int comp2 = comps.find(edges[i].to);
-        if (comp1 != comp2)
-        {
-            comps.merge(comp1, comp2);
-            span_tree.addEdge(edges[i].from, edges[i].to, edges[i].weight);
-            span_tree.addEdge(edges[i].to, edges[i].from, edges[i].weight);
-            span_tree_powers[edges[i].from]++;
-            span_tree_powers[edges[i].to]++;
-        }
-    }
-
-    // Find spanning tree leafs
-    vector<int> span_tree_leafs;
-    for (int i = 0; i < num_images; ++i)
-        if (span_tree_powers[i] == 1)
-            span_tree_leafs.push_back(i);
-
-    // Find maximum distance from each spanning tree vertex
-    vector<int> max_dists(num_images, 0);
-    vector<int> cur_dists;
-    for (size_t i = 0; i < span_tree_leafs.size(); ++i)
-    {
-        cur_dists.assign(num_images, 0);
-        span_tree.walkBreadthFirst(span_tree_leafs[i], IncDistance(cur_dists));
-        for (int j = 0; j < num_images; ++j)
-            max_dists[j] = max(max_dists[j], cur_dists[j]);
-    }
-
-    // Find min-max distance
-    int min_max_dist = max_dists[0];
-    for (int i = 1; i < num_images; ++i)
-        if (min_max_dist > max_dists[i])
-            min_max_dist = max_dists[i];
-
-    // Find spanning tree centers
-    centers.clear();
-    for (int i = 0; i < num_images; ++i)
-        if (max_dists[i] == min_max_dist)
-            centers.push_back(i);
-    CV_Assert(centers.size() > 0 && centers.size() <= 2);
-}
+//M*/\r
+#include <algorithm>\r
+#include "autocalib.hpp"\r
+#include "motion_estimators.hpp"\r
+#include "util.hpp"\r
+\r
+using namespace std;\r
+using namespace cv;\r
+\r
+\r
+//////////////////////////////////////////////////////////////////////////////\r
+\r
+CameraParams::CameraParams() : focal(1), R(Mat::eye(3, 3, CV_64F)), t(Mat::zeros(3, 1, CV_64F)) {}\r
+\r
+CameraParams::CameraParams(const CameraParams &other) { *this = other; }\r
+\r
+const CameraParams& CameraParams::operator =(const CameraParams &other)\r
+{\r
+    focal = other.focal;\r
+    R = other.R.clone();\r
+    t = other.t.clone();\r
+    return *this;\r
+}\r
+\r
+\r
+//////////////////////////////////////////////////////////////////////////////\r
+\r
+struct IncDistance\r
+{\r
+    IncDistance(vector<int> &dists) : dists(&dists[0]) {}\r
+    void operator ()(const GraphEdge &edge) { dists[edge.to] = dists[edge.from] + 1; }\r
+    int* dists;\r
+};\r
+\r
+\r
+struct CalcRotation\r
+{\r
+    CalcRotation(int num_images, const vector<MatchesInfo> &pairwise_matches, vector<CameraParams> &cameras)\r
+        : num_images(num_images), pairwise_matches(&pairwise_matches[0]), cameras(&cameras[0]) {}\r
+\r
+    void operator ()(const GraphEdge &edge)\r
+    {\r
+        int pair_idx = edge.from * num_images + edge.to;\r
+\r
+        double f_from = cameras[edge.from].focal;\r
+        double f_to = cameras[edge.to].focal;\r
+\r
+        Mat K_from = Mat::eye(3, 3, CV_64F);\r
+        K_from.at<double>(0, 0) = f_from;\r
+        K_from.at<double>(1, 1) = f_from;\r
+\r
+        Mat K_to = Mat::eye(3, 3, CV_64F);\r
+        K_to.at<double>(0, 0) = f_to;\r
+        K_to.at<double>(1, 1) = f_to;\r
+\r
+        Mat R = K_from.inv() * pairwise_matches[pair_idx].H.inv() * K_to;\r
+        cameras[edge.to].R = cameras[edge.from].R * R;\r
+    }\r
+\r
+    int num_images;\r
+    const MatchesInfo* pairwise_matches;\r
+    CameraParams* cameras;\r
+};\r
+\r
+\r
+void HomographyBasedEstimator::estimate(const vector<ImageFeatures> &features, const vector<MatchesInfo> &pairwise_matches, \r
+                                        vector<CameraParams> &cameras)\r
+{\r
+    const int num_images = static_cast<int>(features.size());\r
+\r
+    // Estimate focal length and set it for all cameras\r
+    vector<double> focals;\r
+    estimateFocal(features, pairwise_matches, focals);\r
+    cameras.resize(num_images);\r
+    for (int i = 0; i < num_images; ++i)\r
+        cameras[i].focal = focals[i];\r
+\r
+    // Restore global motion\r
+    Graph span_tree;\r
+    vector<int> span_tree_centers;\r
+    findMaxSpanningTree(num_images, pairwise_matches, span_tree, span_tree_centers);\r
+    span_tree.walkBreadthFirst(span_tree_centers[0], CalcRotation(num_images, pairwise_matches, cameras));\r
+}\r
+\r
+\r
+//////////////////////////////////////////////////////////////////////////////\r
+\r
+void BundleAdjuster::estimate(const vector<ImageFeatures> &features, const vector<MatchesInfo> &pairwise_matches, \r
+                              vector<CameraParams> &cameras)\r
+{\r
+    num_images_ = static_cast<int>(features.size());\r
+    features_ = &features[0];\r
+    pairwise_matches_ = &pairwise_matches[0];\r
+\r
+    // Prepare focals and rotations\r
+    cameras_.create(num_images_ * 4, 1, CV_64F);\r
+    SVD svd;\r
+    for (int i = 0; i < num_images_; ++i)\r
+    {\r
+        cameras_.at<double>(i * 4, 0) = cameras[i].focal;\r
+\r
+        svd(cameras[i].R, SVD::FULL_UV);\r
+        Mat R = svd.u * svd.vt;\r
+        if (determinant(R) < 0) \r
+            R *= -1;\r
+\r
+        Mat rvec;\r
+        Rodrigues(R, rvec); CV_Assert(rvec.type() == CV_32F);\r
+        cameras_.at<double>(i * 4 + 1, 0) = rvec.at<float>(0, 0);\r
+        cameras_.at<double>(i * 4 + 2, 0) = rvec.at<float>(1, 0);\r
+        cameras_.at<double>(i * 4 + 3, 0) = rvec.at<float>(2, 0);\r
+    }\r
+\r
+    // Select only consistent image pairs for futher adjustment\r
+    edges_.clear();\r
+    for (int i = 0; i < num_images_ - 1; ++i)\r
+    {\r
+        for (int j = i + 1; j < num_images_; ++j)\r
+        {\r
+            const MatchesInfo& matches_info = pairwise_matches_[i * num_images_ + j];\r
+            if (matches_info.confidence > conf_thresh_)\r
+                edges_.push_back(make_pair(i, j));\r
+        }\r
+    }\r
+\r
+    // Compute number of correspondences\r
+    total_num_matches_ = 0;\r
+    for (size_t i = 0; i < edges_.size(); ++i)\r
+        total_num_matches_ += static_cast<int>(pairwise_matches[edges_[i].first * num_images_ + edges_[i].second].num_inliers);\r
+\r
+    CvLevMarq solver(num_images_ * 4, total_num_matches_ * 3,\r
+                     cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 1000, DBL_EPSILON));\r
+\r
+    CvMat matParams = cameras_;\r
+    cvCopy(&matParams, solver.param);\r
+\r
+    int count = 0;\r
+    for(;;)\r
+    {\r
+        const CvMat* _param = 0;\r
+        CvMat* _J = 0;\r
+        CvMat* _err = 0;\r
+\r
+        bool proceed = solver.update(_param, _J, _err);\r
+\r
+        cvCopy( _param, &matParams );\r
+\r
+        if( !proceed || !_err )\r
+            break;\r
+\r
+        if( _J )\r
+        {\r
+            calcJacobian();\r
+            CvMat matJ = J_;\r
+            cvCopy( &matJ, _J );\r
+        }\r
+\r
+        if (_err)\r
+        {\r
+            calcError(err_);\r
+            LOG(".");\r
+            count++;\r
+            CvMat matErr = err_;\r
+            cvCopy( &matErr, _err );\r
+        }\r
+    }\r
+    LOGLN("");\r
+    LOGLN("Bundle adjustment, final error: " << sqrt(err_.dot(err_)));\r
+    LOGLN("Bundle adjustment, iterations done: " << count);\r
+\r
+    // Obtain global motion\r
+    for (int i = 0; i < num_images_; ++i)\r
+    {\r
+        cameras[i].focal = cameras_.at<double>(i * 4, 0);\r
+        Mat rvec(3, 1, CV_64F);\r
+        rvec.at<double>(0, 0) = cameras_.at<double>(i * 4 + 1, 0);\r
+        rvec.at<double>(1, 0) = cameras_.at<double>(i * 4 + 2, 0);\r
+        rvec.at<double>(2, 0) = cameras_.at<double>(i * 4 + 3, 0);\r
+        Rodrigues(rvec, cameras[i].R);\r
+        Mat Mf;\r
+        cameras[i].R.convertTo(Mf, CV_32F);\r
+        cameras[i].R = Mf;\r
+    }\r
+\r
+    // Normalize motion to center image\r
+    Graph span_tree;\r
+    vector<int> span_tree_centers;\r
+    findMaxSpanningTree(num_images_, pairwise_matches, span_tree, span_tree_centers);\r
+    Mat R_inv = cameras[span_tree_centers[0]].R.inv();\r
+    for (int i = 0; i < num_images_; ++i)\r
+        cameras[i].R = R_inv * cameras[i].R;\r
+}\r
+\r
+\r
+void BundleAdjuster::calcError(Mat &err)\r
+{\r
+    err.create(total_num_matches_ * 3, 1, CV_64F);\r
+\r
+    int match_idx = 0;\r
+    for (size_t edge_idx = 0; edge_idx < edges_.size(); ++edge_idx)\r
+    {\r
+        int i = edges_[edge_idx].first;\r
+        int j = edges_[edge_idx].second;\r
+        double f1 = cameras_.at<double>(i * 4, 0);\r
+        double f2 = cameras_.at<double>(j * 4, 0);\r
+        double R1[9], R2[9];\r
+        Mat R1_(3, 3, CV_64F, R1), R2_(3, 3, CV_64F, R2);\r
+        Mat rvec(3, 1, CV_64F);\r
+        rvec.at<double>(0, 0) = cameras_.at<double>(i * 4 + 1, 0);\r
+        rvec.at<double>(1, 0) = cameras_.at<double>(i * 4 + 2, 0);\r
+        rvec.at<double>(2, 0) = cameras_.at<double>(i * 4 + 3, 0);\r
+        Rodrigues(rvec, R1_); CV_Assert(R1_.type() == CV_64F);\r
+        rvec.at<double>(0, 0) = cameras_.at<double>(j * 4 + 1, 0);\r
+        rvec.at<double>(1, 0) = cameras_.at<double>(j * 4 + 2, 0);\r
+        rvec.at<double>(2, 0) = cameras_.at<double>(j * 4 + 3, 0);\r
+        Rodrigues(rvec, R2_); CV_Assert(R2_.type() == CV_64F);\r
+\r
+        const ImageFeatures& features1 = features_[i];\r
+        const ImageFeatures& features2 = features_[j];\r
+        const MatchesInfo& matches_info = pairwise_matches_[i * num_images_ + j];\r
+\r
+        for (size_t k = 0; k < matches_info.matches.size(); ++k)\r
+        {\r
+            if (!matches_info.inliers_mask[k])\r
+                continue;\r
+\r
+            const DMatch& m = matches_info.matches[k];\r
+\r
+            Point2d kp1 = features1.keypoints[m.queryIdx].pt;\r
+            kp1.x -= 0.5 * features1.img_size.width;\r
+            kp1.y -= 0.5 * features1.img_size.height;\r
+            Point2d kp2 = features2.keypoints[m.trainIdx].pt;\r
+            kp2.x -= 0.5 * features2.img_size.width;\r
+            kp2.y -= 0.5 * features2.img_size.height;\r
+            double len1 = sqrt(kp1.x * kp1.x + kp1.y * kp1.y + f1 * f1);\r
+            double len2 = sqrt(kp2.x * kp2.x + kp2.y * kp2.y + f2 * f2);\r
+            Point3d p1(kp1.x / len1, kp1.y / len1, f1 / len1);\r
+            Point3d p2(kp2.x / len2, kp2.y / len2, f2 / len2);\r
+\r
+            Point3d d1(p1.x * R1[0] + p1.y * R1[1] + p1.z * R1[2],\r
+                       p1.x * R1[3] + p1.y * R1[4] + p1.z * R1[5],\r
+                       p1.x * R1[6] + p1.y * R1[7] + p1.z * R1[8]);\r
+            Point3d d2(p2.x * R2[0] + p2.y * R2[1] + p2.z * R2[2],\r
+                       p2.x * R2[3] + p2.y * R2[4] + p2.z * R2[5],\r
+                       p2.x * R2[6] + p2.y * R2[7] + p2.z * R2[8]);\r
+\r
+            double mult = 1;\r
+            if (cost_space_ == FOCAL_RAY_SPACE)\r
+                mult = sqrt(f1 * f2);\r
+            err.at<double>(3 * match_idx, 0) = mult * (d1.x - d2.x);\r
+            err.at<double>(3 * match_idx + 1, 0) = mult * (d1.y - d2.y);\r
+            err.at<double>(3 * match_idx + 2, 0) = mult * (d1.z - d2.z);\r
+            match_idx++;\r
+        }\r
+    }\r
+}\r
+\r
+\r
+void calcDeriv(const Mat &err1, const Mat &err2, double h, Mat res)\r
+{\r
+    for (int i = 0; i < err1.rows; ++i)\r
+        res.at<double>(i, 0) = (err2.at<double>(i, 0) - err1.at<double>(i, 0)) / h;\r
+}\r
+\r
+\r
+void BundleAdjuster::calcJacobian()\r
+{\r
+    J_.create(total_num_matches_ * 3, num_images_ * 4, CV_64F);\r
+\r
+    double f, r;\r
+    const double df = 0.001; // Focal length step\r
+    const double dr = 0.001; // Angle step\r
+\r
+    for (int i = 0; i < num_images_; ++i)\r
+    {\r
+        f = cameras_.at<double>(i * 4, 0);\r
+        cameras_.at<double>(i * 4, 0) = f - df;\r
+        calcError(err1_);\r
+        cameras_.at<double>(i * 4, 0) = f + df;\r
+        calcError(err2_);\r
+        calcDeriv(err1_, err2_, 2 * df, J_.col(i * 4));\r
+        cameras_.at<double>(i * 4, 0) = f;\r
+\r
+        r = cameras_.at<double>(i * 4 + 1, 0);\r
+        cameras_.at<double>(i * 4 + 1, 0) = r - dr;\r
+        calcError(err1_);\r
+        cameras_.at<double>(i * 4 + 1, 0) = r + dr;\r
+        calcError(err2_);\r
+        calcDeriv(err1_, err2_, 2 * dr, J_.col(i * 4 + 1));\r
+        cameras_.at<double>(i * 4 + 1, 0) = r;\r
+\r
+        r = cameras_.at<double>(i * 4 + 2, 0);\r
+        cameras_.at<double>(i * 4 + 2, 0) = r - dr;\r
+        calcError(err1_);\r
+        cameras_.at<double>(i * 4 + 2, 0) = r + dr;\r
+        calcError(err2_);\r
+        calcDeriv(err1_, err2_, 2 * dr, J_.col(i * 4 + 2));\r
+        cameras_.at<double>(i * 4 + 2, 0) = r;\r
+\r
+        r = cameras_.at<double>(i * 4 + 3, 0);\r
+        cameras_.at<double>(i * 4 + 3, 0) = r - dr;\r
+        calcError(err1_);\r
+        cameras_.at<double>(i * 4 + 3, 0) = r + dr;\r
+        calcError(err2_);\r
+        calcDeriv(err1_, err2_, 2 * dr, J_.col(i * 4 + 3));\r
+        cameras_.at<double>(i * 4 + 3, 0) = r;\r
+    }\r
+}\r
+\r
+\r
+//////////////////////////////////////////////////////////////////////////////\r
+\r
+void waveCorrect(vector<Mat> &rmats)\r
+{\r
+    float data[9];\r
+    Mat r0(1, 3, CV_32F, data);\r
+    Mat r1(1, 3, CV_32F, data + 3);\r
+    Mat r2(1, 3, CV_32F, data + 6);\r
+    Mat R(3, 3, CV_32F, data);\r
+\r
+    Mat cov = Mat::zeros(3, 3, CV_32F);\r
+    for (size_t i = 0; i < rmats.size(); ++i)\r
+    {   \r
+        Mat r0 = rmats[i].col(0);\r
+        cov += r0 * r0.t();\r
+    }\r
+\r
+    SVD svd;\r
+    svd(cov, SVD::FULL_UV);\r
+    svd.vt.row(2).copyTo(r1);\r
+    if (determinant(svd.vt) < 0) r1 *= -1;\r
+\r
+    Mat avgz = Mat::zeros(3, 1, CV_32F);\r
+    for (size_t i = 0; i < rmats.size(); ++i)\r
+        avgz += rmats[i].col(2);\r
+    r1.cross(avgz.t()).copyTo(r0);\r
+    normalize(r0, r0);\r
+\r
+    r1.cross(r0).copyTo(r2);\r
+    if (determinant(R) < 0) R *= -1;\r
+\r
+    for (size_t i = 0; i < rmats.size(); ++i)\r
+        rmats[i] = R * rmats[i];\r
+}\r
+\r
+\r
+//////////////////////////////////////////////////////////////////////////////\r
+\r
+vector<int> leaveBiggestComponent(vector<ImageFeatures> &features,  vector<MatchesInfo> &pairwise_matches, \r
+                                  float conf_threshold)\r
+{\r
+    const int num_images = static_cast<int>(features.size());\r
+\r
+    DjSets comps(num_images);\r
+    for (int i = 0; i < num_images; ++i)\r
+    {\r
+        for (int j = 0; j < num_images; ++j)\r
+        {\r
+            if (pairwise_matches[i*num_images + j].confidence < conf_threshold)\r
+                continue;\r
+            int comp1 = comps.find(i);\r
+            int comp2 = comps.find(j);\r
+            if (comp1 != comp2) \r
+                comps.merge(comp1, comp2);\r
+        }\r
+    }\r
+\r
+    int max_comp = max_element(comps.size.begin(), comps.size.end()) - comps.size.begin();\r
+\r
+    vector<int> indices;\r
+    vector<int> indices_removed;\r
+    for (int i = 0; i < num_images; ++i)\r
+        if (comps.find(i) == max_comp)\r
+            indices.push_back(i);    \r
+        else\r
+            indices_removed.push_back(i);\r
+\r
+    vector<ImageFeatures> features_subset;\r
+    vector<MatchesInfo> pairwise_matches_subset;\r
+    for (size_t i = 0; i < indices.size(); ++i)\r
+    {\r
+        features_subset.push_back(features[indices[i]]);\r
+        for (size_t j = 0; j < indices.size(); ++j)\r
+        {\r
+            pairwise_matches_subset.push_back(pairwise_matches[indices[i]*num_images + indices[j]]);\r
+            pairwise_matches_subset.back().src_img_idx = i;\r
+            pairwise_matches_subset.back().dst_img_idx = j;\r
+        }\r
+    }\r
+\r
+    if (static_cast<int>(features_subset.size()) == num_images)\r
+        return indices;\r
+\r
+    LOG("Removed some images, because can't match them: (");\r
+    LOG(indices_removed[0]+1);\r
+    for (size_t i = 1; i < indices_removed.size(); ++i) \r
+        LOG(", " << indices_removed[i]+1);\r
+    LOGLN("). Try decrease --match_conf value.");\r
+\r
+    features = features_subset;\r
+    pairwise_matches = pairwise_matches_subset;\r
+\r
+    return indices;\r
+}\r
+\r
+\r
+void findMaxSpanningTree(int num_images, const vector<MatchesInfo> &pairwise_matches,\r
+                         Graph &span_tree, vector<int> &centers)\r
+{\r
+    Graph graph(num_images);\r
+    vector<GraphEdge> edges;\r
+\r
+    // Construct images graph and remember its edges\r
+    for (int i = 0; i < num_images; ++i)\r
+    {\r
+        for (int j = 0; j < num_images; ++j)\r
+        {\r
+            if (pairwise_matches[i * num_images + j].H.empty())\r
+                continue;\r
+            float conf = static_cast<float>(pairwise_matches[i * num_images + j].num_inliers);\r
+            graph.addEdge(i, j, conf);\r
+            edges.push_back(GraphEdge(i, j, conf));\r
+        }\r
+    }\r
+\r
+    DjSets comps(num_images);\r
+    span_tree.create(num_images);\r
+    vector<int> span_tree_powers(num_images, 0);\r
+\r
+    // Find maximum spanning tree\r
+    sort(edges.begin(), edges.end(), greater<GraphEdge>());\r
+    for (size_t i = 0; i < edges.size(); ++i)\r
+    {\r
+        int comp1 = comps.find(edges[i].from);\r
+        int comp2 = comps.find(edges[i].to);\r
+        if (comp1 != comp2)\r
+        {\r
+            comps.merge(comp1, comp2);\r
+            span_tree.addEdge(edges[i].from, edges[i].to, edges[i].weight);\r
+            span_tree.addEdge(edges[i].to, edges[i].from, edges[i].weight);\r
+            span_tree_powers[edges[i].from]++;\r
+            span_tree_powers[edges[i].to]++;\r
+        }\r
+    }\r
+\r
+    // Find spanning tree leafs\r
+    vector<int> span_tree_leafs;\r
+    for (int i = 0; i < num_images; ++i)\r
+        if (span_tree_powers[i] == 1)\r
+            span_tree_leafs.push_back(i);\r
+\r
+    // Find maximum distance from each spanning tree vertex\r
+    vector<int> max_dists(num_images, 0);\r
+    vector<int> cur_dists;\r
+    for (size_t i = 0; i < span_tree_leafs.size(); ++i)\r
+    {\r
+        cur_dists.assign(num_images, 0);\r
+        span_tree.walkBreadthFirst(span_tree_leafs[i], IncDistance(cur_dists));\r
+        for (int j = 0; j < num_images; ++j)\r
+            max_dists[j] = max(max_dists[j], cur_dists[j]);\r
+    }\r
+\r
+    // Find min-max distance\r
+    int min_max_dist = max_dists[0];\r
+    for (int i = 1; i < num_images; ++i)\r
+        if (min_max_dist > max_dists[i])\r
+            min_max_dist = max_dists[i];\r
+\r
+    // Find spanning tree centers\r
+    centers.clear();\r
+    for (int i = 0; i < num_images; ++i)\r
+        if (max_dists[i] == min_max_dist)\r
+            centers.push_back(i);\r
+    CV_Assert(centers.size() > 0 && centers.size() <= 2);\r
+}\r
index c2eb8eb..efcd86e 100644 (file)
 // or tort (including negligence or otherwise) arising in any way out of\r
 // the use of this software, even if advised of the possibility of such damage.\r
 //\r
-//M*/
-#include "seam_finders.hpp"
-#include "util.hpp"
-
-using namespace std;
-using namespace cv;
-
-
-Ptr<SeamFinder> SeamFinder::createDefault(int type)
-{
-    if (type == NO)
-        return new NoSeamFinder();
-    if (type == VORONOI)
-        return new VoronoiSeamFinder();
-    if (type == GC_COLOR)
-        return new GraphCutSeamFinder(GraphCutSeamFinder::COST_COLOR);
-    if (type == GC_COLOR_GRAD)
-        return new GraphCutSeamFinder(GraphCutSeamFinder::COST_COLOR_GRAD);
-    CV_Error(CV_StsBadArg, "unsupported seam finding method");
-    return NULL;
-}
-
-
-void PairwiseSeamFinder::find(const vector<Mat> &src, const vector<Point> &corners,
-                              vector<Mat> &masks)
-{
-    if (src.size() == 0)
-        return;
-
-    images_ = src;
-    corners_ = corners;
-    masks_ = masks;
-
-    for (size_t i = 0; i < src.size() - 1; ++i)
-    {
-        for (size_t j = i + 1; j < src.size(); ++j)
-        {
-            Rect roi;
-            if (overlapRoi(corners[i], corners[j], src[i].size(), src[j].size(), roi))
-                findInPair(i, j, roi);
-        }
-    }
-}
-
-
-void VoronoiSeamFinder::findInPair(size_t first, size_t second, Rect roi)
-{
-    const int gap = 10;
-    Mat submask1(roi.height + 2 * gap, roi.width + 2 * gap, CV_8U);
-    Mat submask2(roi.height + 2 * gap, roi.width + 2 * gap, CV_8U);
-
-    Mat img1 = images_[first], img2 = images_[second];
-    Mat mask1 = masks_[first], mask2 = masks_[second];
-    Point tl1 = corners_[first], tl2 = corners_[second];
-
-    // Cut submasks with some gap
-    for (int y = -gap; y < roi.height + gap; ++y)
-    {
-        for (int x = -gap; x < roi.width + gap; ++x)
-        {
-            int y1 = roi.y - tl1.y + y;
-            int x1 = roi.x - tl1.x + x;
-            if (y1 >= 0 && x1 >= 0 && y1 < img1.rows && x1 < img1.cols)
-                submask1.at<uchar>(y + gap, x + gap) = mask1.at<uchar>(y1, x1);
-            else
-                submask1.at<uchar>(y + gap, x + gap) = 0;
-
-            int y2 = roi.y - tl2.y + y;
-            int x2 = roi.x - tl2.x + x;
-            if (y2 >= 0 && x2 >= 0 && y2 < img2.rows && x2 < img2.cols)
-                submask2.at<uchar>(y + gap, x + gap) = mask2.at<uchar>(y2, x2);
-            else
-                submask2.at<uchar>(y + gap, x + gap) = 0;
-        }
-    }
-
-    Mat collision = (submask1 != 0) & (submask2 != 0);
-    Mat unique1 = submask1.clone(); unique1.setTo(0, collision);
-    Mat unique2 = submask2.clone(); unique2.setTo(0, collision);
-
-    Mat dist1, dist2;
-    distanceTransform(unique1 == 0, dist1, CV_DIST_L1, 3);
-    distanceTransform(unique2 == 0, dist2, CV_DIST_L1, 3);
-
-    Mat seam = dist1 < dist2;
-
-    for (int y = 0; y < roi.height; ++y)
-    {
-        for (int x = 0; x < roi.width; ++x)
-        {
-            if (seam.at<uchar>(y + gap, x + gap))
-                mask2.at<uchar>(roi.y - tl2.y + y, roi.x - tl2.x + x) = 0;
-            else
-                mask1.at<uchar>(roi.y - tl1.y + y, roi.x - tl1.x + x) = 0;
-        }
-    }
-}
-
-
-class GraphCutSeamFinder::Impl : public PairwiseSeamFinder
-{
-public:
-    Impl(int cost_type, float terminal_cost, float bad_region_penalty)
-        : cost_type_(cost_type), terminal_cost_(terminal_cost), bad_region_penalty_(bad_region_penalty) {}
-
-    void find(const vector<Mat> &src, const vector<Point> &corners, vector<Mat> &masks);
-    void findInPair(size_t first, size_t second, Rect roi);
-
-private:
+//M*/\r
+#include "seam_finders.hpp"\r
+#include "util.hpp"\r
+\r
+using namespace std;\r
+using namespace cv;\r
+\r
+\r
+Ptr<SeamFinder> SeamFinder::createDefault(int type)\r
+{\r
+    if (type == NO)\r
+        return new NoSeamFinder();\r
+    if (type == VORONOI)\r
+        return new VoronoiSeamFinder();\r
+    if (type == GC_COLOR)\r
+        return new GraphCutSeamFinder(GraphCutSeamFinder::COST_COLOR);\r
+    if (type == GC_COLOR_GRAD)\r
+        return new GraphCutSeamFinder(GraphCutSeamFinder::COST_COLOR_GRAD);\r
+    CV_Error(CV_StsBadArg, "unsupported seam finding method");\r
+    return NULL;\r
+}\r
+\r
+\r
+void PairwiseSeamFinder::find(const vector<Mat> &src, const vector<Point> &corners,\r
+                              vector<Mat> &masks)\r
+{\r
+    if (src.size() == 0)\r
+        return;\r
+\r
+    images_ = src;\r
+    corners_ = corners;\r
+    masks_ = masks;\r
+\r
+    for (size_t i = 0; i < src.size() - 1; ++i)\r
+    {\r
+        for (size_t j = i + 1; j < src.size(); ++j)\r
+        {\r
+            Rect roi;\r
+            if (overlapRoi(corners[i], corners[j], src[i].size(), src[j].size(), roi))\r
+                findInPair(i, j, roi);\r
+        }\r
+    }\r
+}\r
+\r
+\r
+void VoronoiSeamFinder::findInPair(size_t first, size_t second, Rect roi)\r
+{\r
+    const int gap = 10;\r
+    Mat submask1(roi.height + 2 * gap, roi.width + 2 * gap, CV_8U);\r
+    Mat submask2(roi.height + 2 * gap, roi.width + 2 * gap, CV_8U);\r
+\r
+    Mat img1 = images_[first], img2 = images_[second];\r
+    Mat mask1 = masks_[first], mask2 = masks_[second];\r
+    Point tl1 = corners_[first], tl2 = corners_[second];\r
+\r
+    // Cut submasks with some gap\r
+    for (int y = -gap; y < roi.height + gap; ++y)\r
+    {\r
+        for (int x = -gap; x < roi.width + gap; ++x)\r
+        {\r
+            int y1 = roi.y - tl1.y + y;\r
+            int x1 = roi.x - tl1.x + x;\r
+            if (y1 >= 0 && x1 >= 0 && y1 < img1.rows && x1 < img1.cols)\r
+                submask1.at<uchar>(y + gap, x + gap) = mask1.at<uchar>(y1, x1);\r
+            else\r
+                submask1.at<uchar>(y + gap, x + gap) = 0;\r
+\r
+            int y2 = roi.y - tl2.y + y;\r
+            int x2 = roi.x - tl2.x + x;\r
+            if (y2 >= 0 && x2 >= 0 && y2 < img2.rows && x2 < img2.cols)\r
+                submask2.at<uchar>(y + gap, x + gap) = mask2.at<uchar>(y2, x2);\r
+            else\r
+                submask2.at<uchar>(y + gap, x + gap) = 0;\r
+        }\r
+    }\r
+\r
+    Mat collision = (submask1 != 0) & (submask2 != 0);\r
+    Mat unique1 = submask1.clone(); unique1.setTo(0, collision);\r
+    Mat unique2 = submask2.clone(); unique2.setTo(0, collision);\r
+\r
+    Mat dist1, dist2;\r
+    distanceTransform(unique1 == 0, dist1, CV_DIST_L1, 3);\r
+    distanceTransform(unique2 == 0, dist2, CV_DIST_L1, 3);\r
+\r
+    Mat seam = dist1 < dist2;\r
+\r
+    for (int y = 0; y < roi.height; ++y)\r
+    {\r
+        for (int x = 0; x < roi.width; ++x)\r
+        {\r
+            if (seam.at<uchar>(y + gap, x + gap))\r
+                mask2.at<uchar>(roi.y - tl2.y + y, roi.x - tl2.x + x) = 0;\r
+            else\r
+                mask1.at<uchar>(roi.y - tl1.y + y, roi.x - tl1.x + x) = 0;\r
+        }\r
+    }\r
+}\r
+\r
+\r
+class GraphCutSeamFinder::Impl : public PairwiseSeamFinder\r
+{\r
+public:\r
+    Impl(int cost_type, float terminal_cost, float bad_region_penalty)\r
+        : cost_type_(cost_type), terminal_cost_(terminal_cost), bad_region_penalty_(bad_region_penalty) {}\r
+\r
+    void find(const vector<Mat> &src, const vector<Point> &corners, vector<Mat> &masks);\r
+    void findInPair(size_t first, size_t second, Rect roi);\r
+\r
+private:\r
     void setGraphWeightsColor(const Mat &img1, const Mat &img2, \r
-                              const Mat &mask1, const Mat &mask2, GCGraph<float> &graph);
+                              const Mat &mask1, const Mat &mask2, GCGraph<float> &graph);\r
     void setGraphWeightsColorGrad(const Mat &img1, const Mat &img2, const Mat &dx1, const Mat &dx2, \r
-                                  const Mat &dy1, const Mat &dy2, const Mat &mask1, const Mat &mask2, 
-                                  GCGraph<float> &graph);
-
-    vector<Mat> dx_, dy_;
-    int cost_type_;
-    float terminal_cost_;
-    float bad_region_penalty_;
-};
-
-
-void GraphCutSeamFinder::Impl::find(const vector<Mat> &src, const vector<Point> &corners, 
-                                    vector<Mat> &masks)
-{
-    // Compute gradients
-    dx_.resize(src.size());
-    dy_.resize(src.size());
-    Mat dx, dy;
-    for (size_t i = 0; i < src.size(); ++i)
-    {
-        CV_Assert(src[i].channels() == 3);
-        Sobel(src[i], dx, CV_32F, 1, 0);
-        Sobel(src[i], dy, CV_32F, 0, 1);
-        dx_[i].create(src[i].size(), CV_32F);
-        dy_[i].create(src[i].size(), CV_32F);
-        for (int y = 0; y < src[i].rows; ++y)
-        {
-            const Point3f* dx_row = dx.ptr<Point3f>(y);
-            const Point3f* dy_row = dy.ptr<Point3f>(y);
-            float* dx_row_ = dx_[i].ptr<float>(y);
-            float* dy_row_ = dy_[i].ptr<float>(y);
-            for (int x = 0; x < src[i].cols; ++x)
-            {
-                dx_row_[x] = normL2(dx_row[x]);
-                dy_row_[x] = normL2(dy_row[x]);
-            }
-        }
-    }
-    PairwiseSeamFinder::find(src, corners, masks);
-}
-
-
+                                  const Mat &dy1, const Mat &dy2, const Mat &mask1, const Mat &mask2, \r
+                                  GCGraph<float> &graph);\r
+\r
+    vector<Mat> dx_, dy_;\r
+    int cost_type_;\r
+    float terminal_cost_;\r
+    float bad_region_penalty_;\r
+};\r
+\r
+\r
+void GraphCutSeamFinder::Impl::find(const vector<Mat> &src, const vector<Point> &corners, \r
+                                    vector<Mat> &masks)\r
+{\r
+    // Compute gradients\r
+    dx_.resize(src.size());\r
+    dy_.resize(src.size());\r
+    Mat dx, dy;\r
+    for (size_t i = 0; i < src.size(); ++i)\r
+    {\r
+        CV_Assert(src[i].channels() == 3);\r
+        Sobel(src[i], dx, CV_32F, 1, 0);\r
+        Sobel(src[i], dy, CV_32F, 0, 1);\r
+        dx_[i].create(src[i].size(), CV_32F);\r
+        dy_[i].create(src[i].size(), CV_32F);\r
+        for (int y = 0; y < src[i].rows; ++y)\r
+        {\r
+            const Point3f* dx_row = dx.ptr<Point3f>(y);\r
+            const Point3f* dy_row = dy.ptr<Point3f>(y);\r
+            float* dx_row_ = dx_[i].ptr<float>(y);\r
+            float* dy_row_ = dy_[i].ptr<float>(y);\r
+            for (int x = 0; x < src[i].cols; ++x)\r
+            {\r
+                dx_row_[x] = normL2(dx_row[x]);\r
+                dy_row_[x] = normL2(dy_row[x]);\r
+            }\r
+        }\r
+    }\r
+    PairwiseSeamFinder::find(src, corners, masks);\r
+}\r
+\r
+\r
 void GraphCutSeamFinder::Impl::setGraphWeightsColor(const Mat &img1, const Mat &img2, \r
-                                                    const Mat &mask1, const Mat &mask2, GCGraph<float> &graph)
-{
-    const Size img_size = img1.size();
-
-    // Set terminal weights
-    for (int y = 0; y < img_size.height; ++y)
-    {
-        for (int x = 0; x < img_size.width; ++x)
-        {
-            int v = graph.addVtx();
-            graph.addTermWeights(v, mask1.at<uchar>(y, x) ? terminal_cost_ : 0.f,
-                                    mask2.at<uchar>(y, x) ? terminal_cost_ : 0.f);
-        }
-    }
-
-    // Set regular edge weights
-    const float weight_eps = 1e-3f;
-    for (int y = 0; y < img_size.height; ++y)
-    {
-        for (int x = 0; x < img_size.width; ++x)
-        {
-            int v = y * img_size.width + x;
-            if (x < img_size.width - 1)
-            {
-                float weight = normL2(img1.at<Point3f>(y, x), img2.at<Point3f>(y, x)) +
-                               normL2(img1.at<Point3f>(y, x + 1), img2.at<Point3f>(y, x + 1)) +
-                               weight_eps;
-                if (!mask1.at<uchar>(y, x) || !mask1.at<uchar>(y, x + 1) ||
-                    !mask2.at<uchar>(y, x) || !mask2.at<uchar>(y, x + 1))
-                    weight += bad_region_penalty_;
-                graph.addEdges(v, v + 1, weight, weight);
-            }
-            if (y < img_size.height - 1)
-            {
-                float weight = normL2(img1.at<Point3f>(y, x), img2.at<Point3f>(y, x)) +
-                               normL2(img1.at<Point3f>(y + 1, x), img2.at<Point3f>(y + 1, x)) +
-                               weight_eps;
-                if (!mask1.at<uchar>(y, x) || !mask1.at<uchar>(y + 1, x) ||
-                    !mask2.at<uchar>(y, x) || !mask2.at<uchar>(y + 1, x))
-                    weight += bad_region_penalty_;
-                graph.addEdges(v, v + img_size.width, weight, weight);
-            }
-        }
-    }
-}
-
-
+                                                    const Mat &mask1, const Mat &mask2, GCGraph<float> &graph)\r
+{\r
+    const Size img_size = img1.size();\r
+\r
+    // Set terminal weights\r
+    for (int y = 0; y < img_size.height; ++y)\r
+    {\r
+        for (int x = 0; x < img_size.width; ++x)\r
+        {\r
+            int v = graph.addVtx();\r
+            graph.addTermWeights(v, mask1.at<uchar>(y, x) ? terminal_cost_ : 0.f,\r
+                                    mask2.at<uchar>(y, x) ? terminal_cost_ : 0.f);\r
+        }\r
+    }\r
+\r
+    // Set regular edge weights\r
+    const float weight_eps = 1.f;\r
+    for (int y = 0; y < img_size.height; ++y)\r
+    {\r
+        for (int x = 0; x < img_size.width; ++x)\r
+        {\r
+            int v = y * img_size.width + x;\r
+            if (x < img_size.width - 1)\r
+            {\r
+                float weight = normL2(img1.at<Point3f>(y, x), img2.at<Point3f>(y, x)) +\r
+                               normL2(img1.at<Point3f>(y, x + 1), img2.at<Point3f>(y, x + 1)) +\r
+                               weight_eps;\r
+                if (!mask1.at<uchar>(y, x) || !mask1.at<uchar>(y, x + 1) ||\r
+                    !mask2.at<uchar>(y, x) || !mask2.at<uchar>(y, x + 1))\r
+                    weight += bad_region_penalty_;\r
+                graph.addEdges(v, v + 1, weight, weight);\r
+            }\r
+            if (y < img_size.height - 1)\r
+            {\r
+                float weight = normL2(img1.at<Point3f>(y, x), img2.at<Point3f>(y, x)) +\r
+                               normL2(img1.at<Point3f>(y + 1, x), img2.at<Point3f>(y + 1, x)) +\r
+                               weight_eps;\r
+                if (!mask1.at<uchar>(y, x) || !mask1.at<uchar>(y + 1, x) ||\r
+                    !mask2.at<uchar>(y, x) || !mask2.at<uchar>(y + 1, x))\r
+                    weight += bad_region_penalty_;\r
+                graph.addEdges(v, v + img_size.width, weight, weight);\r
+            }\r
+        }\r
+    }\r
+}\r
+\r
+\r
 void GraphCutSeamFinder::Impl::setGraphWeightsColorGrad(\r
-        const Mat &img1, const Mat &img2, const Mat &dx1, const Mat &dx2, 
-        const Mat &dy1, const Mat &dy2, const Mat &mask1, const Mat &mask2, 
-        GCGraph<float> &graph)
-{
-    const Size img_size = img1.size();
-
-    // Set terminal weights
-    for (int y = 0; y < img_size.height; ++y)
-    {
-        for (int x = 0; x < img_size.width; ++x)
-        {
-            int v = graph.addVtx();
-            graph.addTermWeights(v, mask1.at<uchar>(y, x) ? terminal_cost_ : 0.f,
-                                    mask2.at<uchar>(y, x) ? terminal_cost_ : 0.f);
-        }
-    }
-
-    // Set regular edge weights
-    const float weight_eps = 1e-3f;
-    for (int y = 0; y < img_size.height; ++y)
-    {
-        for (int x = 0; x < img_size.width; ++x)
-        {
-            int v = y * img_size.width + x;
-            if (x < img_size.width - 1)
-            {
+        const Mat &img1, const Mat &img2, const Mat &dx1, const Mat &dx2, \r
+        const Mat &dy1, const Mat &dy2, const Mat &mask1, const Mat &mask2, \r
+        GCGraph<float> &graph)\r
+{\r
+    const Size img_size = img1.size();\r
+\r
+    // Set terminal weights\r
+    for (int y = 0; y < img_size.height; ++y)\r
+    {\r
+        for (int x = 0; x < img_size.width; ++x)\r
+        {\r
+            int v = graph.addVtx();\r
+            graph.addTermWeights(v, mask1.at<uchar>(y, x) ? terminal_cost_ : 0.f,\r
+                                    mask2.at<uchar>(y, x) ? terminal_cost_ : 0.f);\r
+        }\r
+    }\r
+\r
+    // Set regular edge weights\r
+    const float weight_eps = 1.f;\r
+    for (int y = 0; y < img_size.height; ++y)\r
+    {\r
+        for (int x = 0; x < img_size.width; ++x)\r
+        {\r
+            int v = y * img_size.width + x;\r
+            if (x < img_size.width - 1)\r
+            {\r
                 float grad = dx1.at<float>(y, x) + dx1.at<float>(y, x + 1) +\r
-                             dx2.at<float>(y, x) + dx2.at<float>(y, x + 1) + weight_eps;
-                float weight = (normL2(img1.at<Point3f>(y, x), img2.at<Point3f>(y, x)) +
-                                normL2(img1.at<Point3f>(y, x + 1), img2.at<Point3f>(y, x + 1))) / grad + 
-                               weight_eps;
-                if (!mask1.at<uchar>(y, x) || !mask1.at<uchar>(y, x + 1) ||
-                    !mask2.at<uchar>(y, x) || !mask2.at<uchar>(y, x + 1))
-                    weight += bad_region_penalty_;
-                graph.addEdges(v, v + 1, weight, weight);
-            }
-            if (y < img_size.height - 1)
-            {
+                             dx2.at<float>(y, x) + dx2.at<float>(y, x + 1) + weight_eps;\r
+                float weight = (normL2(img1.at<Point3f>(y, x), img2.at<Point3f>(y, x)) +\r
+                                normL2(img1.at<Point3f>(y, x + 1), img2.at<Point3f>(y, x + 1))) / grad + \r
+                               weight_eps;\r
+                if (!mask1.at<uchar>(y, x) || !mask1.at<uchar>(y, x + 1) ||\r
+                    !mask2.at<uchar>(y, x) || !mask2.at<uchar>(y, x + 1))\r
+                    weight += bad_region_penalty_;\r
+                graph.addEdges(v, v + 1, weight, weight);\r
+            }\r
+            if (y < img_size.height - 1)\r
+            {\r
                 float grad = dy1.at<float>(y, x) + dy1.at<float>(y + 1, x) + \r
-                             dy2.at<float>(y, x) + dy2.at<float>(y + 1, x) + weight_eps;
-                float weight = (normL2(img1.at<Point3f>(y, x), img2.at<Point3f>(y, x)) + 
-                                normL2(img1.at<Point3f>(y + 1, x), img2.at<Point3f>(y + 1, x))) / grad + 
-                               weight_eps;
-                if (!mask1.at<uchar>(y, x) || !mask1.at<uchar>(y + 1, x) ||
-                    !mask2.at<uchar>(y, x) || !mask2.at<uchar>(y + 1, x))
-                    weight += bad_region_penalty_;
-                graph.addEdges(v, v + img_size.width, weight, weight);
-            }
-        }
-    }
-}
-
-
-void GraphCutSeamFinder::Impl::findInPair(size_t first, size_t second, Rect roi)
-{
-    Mat img1 = images_[first], img2 = images_[second];
-    Mat dx1 = dx_[first], dx2 = dx_[second];
-    Mat dy1 = dy_[first], dy2 = dy_[second];
-    Mat mask1 = masks_[first], mask2 = masks_[second];
-    Point tl1 = corners_[first], tl2 = corners_[second];
-
-    const int gap = 10;
-    Mat subimg1(roi.height + 2 * gap, roi.width + 2 * gap, CV_32FC3);
-    Mat subimg2(roi.height + 2 * gap, roi.width + 2 * gap, CV_32FC3);
-    Mat submask1(roi.height + 2 * gap, roi.width + 2 * gap, CV_8U);
-    Mat submask2(roi.height + 2 * gap, roi.width + 2 * gap, CV_8U);
-    Mat subdx1(roi.height + 2 * gap, roi.width + 2 * gap, CV_32F);
-    Mat subdy1(roi.height + 2 * gap, roi.width + 2 * gap, CV_32F);
-    Mat subdx2(roi.height + 2 * gap, roi.width + 2 * gap, CV_32F);
-    Mat subdy2(roi.height + 2 * gap, roi.width + 2 * gap, CV_32F);
-
-    // Cut subimages and submasks with some gap
-    for (int y = -gap; y < roi.height + gap; ++y)
-    {
-        for (int x = -gap; x < roi.width + gap; ++x)
-        {
-            int y1 = roi.y - tl1.y + y;
-            int x1 = roi.x - tl1.x + x;
-            if (y1 >= 0 && x1 >= 0 && y1 < img1.rows && x1 < img1.cols)
-            {
-                subimg1.at<Point3f>(y + gap, x + gap) = img1.at<Point3f>(y1, x1);
-                submask1.at<uchar>(y + gap, x + gap) = mask1.at<uchar>(y1, x1);
-                subdx1.at<float>(y + gap, x + gap) = dx1.at<float>(y1, x1);
-                subdy1.at<float>(y + gap, x + gap) = dy1.at<float>(y1, x1);
-            }
-            else
-            {
-                subimg1.at<Point3f>(y + gap, x + gap) = Point3f(0, 0, 0);
-                submask1.at<uchar>(y + gap, x + gap) = 0;
-                subdx1.at<float>(y + gap, x + gap) = 0.f;
-                subdy1.at<float>(y + gap, x + gap) = 0.f;
-            }
-
-            int y2 = roi.y - tl2.y + y;
-            int x2 = roi.x - tl2.x + x;
-            if (y2 >= 0 && x2 >= 0 && y2 < img2.rows && x2 < img2.cols)
-            {
-                subimg2.at<Point3f>(y + gap, x + gap) = img2.at<Point3f>(y2, x2);
-                submask2.at<uchar>(y + gap, x + gap) = mask2.at<uchar>(y2, x2);
-                subdx2.at<float>(y + gap, x + gap) = dx2.at<float>(y2, x2);
-                subdy2.at<float>(y + gap, x + gap) = dy2.at<float>(y2, x2);
-            }
-            else
-            {
-                subimg2.at<Point3f>(y + gap, x + gap) = Point3f(0, 0, 0);
-                submask2.at<uchar>(y + gap, x + gap) = 0;
-                subdx2.at<float>(y + gap, x + gap) = 0.f;
-                subdy2.at<float>(y + gap, x + gap) = 0.f;
-            }
-        }
-    }
-
-    const int vertex_count = (roi.height + 2 * gap) * (roi.width + 2 * gap);
-    const int edge_count = (roi.height - 1 + 2 * gap) * (roi.width + 2 * gap) +
-                           (roi.width - 1 + 2 * gap) * (roi.height + 2 * gap);
-    GCGraph<float> graph(vertex_count, edge_count);
-
-    switch (cost_type_)
-    {
-    case GraphCutSeamFinder::COST_COLOR:
-        setGraphWeightsColor(subimg1, subimg2, submask1, submask2, graph);
-        break;
-    case GraphCutSeamFinder::COST_COLOR_GRAD:
-        setGraphWeightsColorGrad(subimg1, subimg2, subdx1, subdx2, subdy1, subdy2, 
-                                 submask1, submask2, graph);
-        break;
-    default:
-        CV_Error(CV_StsBadArg, "unsupported pixel similarity measure");
-    }
-
-    graph.maxFlow();
-
-    for (int y = 0; y < roi.height; ++y)
-    {
-        for (int x = 0; x < roi.width; ++x)
-        {
-            if (graph.inSourceSegment((y + gap) * (roi.width + 2 * gap) + x + gap))
-            {
-                if (mask1.at<uchar>(roi.y - tl1.y + y, roi.x - tl1.x + x))
-                    mask2.at<uchar>(roi.y - tl2.y + y, roi.x - tl2.x + x) = 0;
-            }
-            else
-            {
-                if (mask2.at<uchar>(roi.y - tl2.y + y, roi.x - tl2.x + x))
-                    mask1.at<uchar>(roi.y - tl1.y + y, roi.x - tl1.x + x) = 0;
-            }
-        }
-    }
-}
-
-
-GraphCutSeamFinder::GraphCutSeamFinder(int cost_type, float terminal_cost, float bad_region_penalty)
-    : impl_(new Impl(cost_type, terminal_cost, bad_region_penalty)) {}
-
-
-void GraphCutSeamFinder::find(const vector<Mat> &src, const vector<Point> &corners,
-                              vector<Mat> &masks)
-{
-    impl_->find(src, corners, masks);
-}
+                             dy2.at<float>(y, x) + dy2.at<float>(y + 1, x) + weight_eps;\r
+                float weight = (normL2(img1.at<Point3f>(y, x), img2.at<Point3f>(y, x)) + \r
+                                normL2(img1.at<Point3f>(y + 1, x), img2.at<Point3f>(y + 1, x))) / grad + \r
+                               weight_eps;\r
+                if (!mask1.at<uchar>(y, x) || !mask1.at<uchar>(y + 1, x) ||\r
+                    !mask2.at<uchar>(y, x) || !mask2.at<uchar>(y + 1, x))\r
+                    weight += bad_region_penalty_;\r
+                graph.addEdges(v, v + img_size.width, weight, weight);\r
+            }\r
+        }\r
+    }\r
+}\r
+\r
+\r
+void GraphCutSeamFinder::Impl::findInPair(size_t first, size_t second, Rect roi)\r
+{\r
+    Mat img1 = images_[first], img2 = images_[second];\r
+    Mat dx1 = dx_[first], dx2 = dx_[second];\r
+    Mat dy1 = dy_[first], dy2 = dy_[second];\r
+    Mat mask1 = masks_[first], mask2 = masks_[second];\r
+    Point tl1 = corners_[first], tl2 = corners_[second];\r
+\r
+    const int gap = 10;\r
+    Mat subimg1(roi.height + 2 * gap, roi.width + 2 * gap, CV_32FC3);\r
+    Mat subimg2(roi.height + 2 * gap, roi.width + 2 * gap, CV_32FC3);\r
+    Mat submask1(roi.height + 2 * gap, roi.width + 2 * gap, CV_8U);\r
+    Mat submask2(roi.height + 2 * gap, roi.width + 2 * gap, CV_8U);\r
+    Mat subdx1(roi.height + 2 * gap, roi.width + 2 * gap, CV_32F);\r
+    Mat subdy1(roi.height + 2 * gap, roi.width + 2 * gap, CV_32F);\r
+    Mat subdx2(roi.height + 2 * gap, roi.width + 2 * gap, CV_32F);\r
+    Mat subdy2(roi.height + 2 * gap, roi.width + 2 * gap, CV_32F);\r
+\r
+    // Cut subimages and submasks with some gap\r
+    for (int y = -gap; y < roi.height + gap; ++y)\r
+    {\r
+        for (int x = -gap; x < roi.width + gap; ++x)\r
+        {\r
+            int y1 = roi.y - tl1.y + y;\r
+            int x1 = roi.x - tl1.x + x;\r
+            if (y1 >= 0 && x1 >= 0 && y1 < img1.rows && x1 < img1.cols)\r
+            {\r
+                subimg1.at<Point3f>(y + gap, x + gap) = img1.at<Point3f>(y1, x1);\r
+                submask1.at<uchar>(y + gap, x + gap) = mask1.at<uchar>(y1, x1);\r
+                subdx1.at<float>(y + gap, x + gap) = dx1.at<float>(y1, x1);\r
+                subdy1.at<float>(y + gap, x + gap) = dy1.at<float>(y1, x1);\r
+            }\r
+            else\r
+            {\r
+                subimg1.at<Point3f>(y + gap, x + gap) = Point3f(0, 0, 0);\r
+                submask1.at<uchar>(y + gap, x + gap) = 0;\r
+                subdx1.at<float>(y + gap, x + gap) = 0.f;\r
+                subdy1.at<float>(y + gap, x + gap) = 0.f;\r
+            }\r
+\r
+            int y2 = roi.y - tl2.y + y;\r
+            int x2 = roi.x - tl2.x + x;\r
+            if (y2 >= 0 && x2 >= 0 && y2 < img2.rows && x2 < img2.cols)\r
+            {\r
+                subimg2.at<Point3f>(y + gap, x + gap) = img2.at<Point3f>(y2, x2);\r
+                submask2.at<uchar>(y + gap, x + gap) = mask2.at<uchar>(y2, x2);\r
+                subdx2.at<float>(y + gap, x + gap) = dx2.at<float>(y2, x2);\r
+                subdy2.at<float>(y + gap, x + gap) = dy2.at<float>(y2, x2);\r
+            }\r
+            else\r
+            {\r
+                subimg2.at<Point3f>(y + gap, x + gap) = Point3f(0, 0, 0);\r
+                submask2.at<uchar>(y + gap, x + gap) = 0;\r
+                subdx2.at<float>(y + gap, x + gap) = 0.f;\r
+                subdy2.at<float>(y + gap, x + gap) = 0.f;\r
+            }\r
+        }\r
+    }\r
+\r
+    const int vertex_count = (roi.height + 2 * gap) * (roi.width + 2 * gap);\r
+    const int edge_count = (roi.height - 1 + 2 * gap) * (roi.width + 2 * gap) +\r
+                           (roi.width - 1 + 2 * gap) * (roi.height + 2 * gap);\r
+    GCGraph<float> graph(vertex_count, edge_count);\r
+\r
+    switch (cost_type_)\r
+    {\r
+    case GraphCutSeamFinder::COST_COLOR:\r
+        setGraphWeightsColor(subimg1, subimg2, submask1, submask2, graph);\r
+        break;\r
+    case GraphCutSeamFinder::COST_COLOR_GRAD:\r
+        setGraphWeightsColorGrad(subimg1, subimg2, subdx1, subdx2, subdy1, subdy2, \r
+                                 submask1, submask2, graph);\r
+        break;\r
+    default:\r
+        CV_Error(CV_StsBadArg, "unsupported pixel similarity measure");\r
+    }\r
+\r
+    graph.maxFlow();\r
+\r
+    for (int y = 0; y < roi.height; ++y)\r
+    {\r
+        for (int x = 0; x < roi.width; ++x)\r
+        {\r
+            if (graph.inSourceSegment((y + gap) * (roi.width + 2 * gap) + x + gap))\r
+            {\r
+                if (mask1.at<uchar>(roi.y - tl1.y + y, roi.x - tl1.x + x))\r
+                    mask2.at<uchar>(roi.y - tl2.y + y, roi.x - tl2.x + x) = 0;\r
+            }\r
+            else\r
+            {\r
+                if (mask2.at<uchar>(roi.y - tl2.y + y, roi.x - tl2.x + x))\r
+                    mask1.at<uchar>(roi.y - tl1.y + y, roi.x - tl1.x + x) = 0;\r
+            }\r
+        }\r
+    }\r
+}\r
+\r
+\r
+GraphCutSeamFinder::GraphCutSeamFinder(int cost_type, float terminal_cost, float bad_region_penalty)\r
+    : impl_(new Impl(cost_type, terminal_cost, bad_region_penalty)) {}\r
+\r
+\r
+void GraphCutSeamFinder::find(const vector<Mat> &src, const vector<Point> &corners,\r
+                              vector<Mat> &masks)\r
+{\r
+    impl_->find(src, corners, masks);\r
+}\r
index 51d6209..597ee4f 100644 (file)
 // or tort (including negligence or otherwise) arising in any way out of\r
 // the use of this software, even if advised of the possibility of such damage.\r
 //\r
-//M*/
-#ifndef __OPENCV_WARPERS_HPP__
-#define __OPENCV_WARPERS_HPP__
-
-#include "precomp.hpp"
-
-class Warper
-{
-public:
-    enum { PLANE, CYLINDRICAL, SPHERICAL };
-    static cv::Ptr<Warper> createByCameraFocal(float focal, int type);
-
-    virtual ~Warper() {}
-    virtual cv::Point warp(const cv::Mat &src, float focal, const cv::Mat& R, cv::Mat &dst,
-                           int interp_mode = cv::INTER_LINEAR, int border_mode = cv::BORDER_REFLECT) = 0;
-};
-
-
-struct ProjectorBase
-{
-    void setTransformation(const cv::Mat& R);
-
-    cv::Size size;
-    float focal;
-    float r[9];
-    float rinv[9];
-    float scale;
-};
-
-
-template <class P>
-class WarperBase : public Warper
-{   
-public:
-    cv::Point warp(const cv::Mat &src, float focal, const cv::Mat &R, cv::Mat &dst,
-                   int interp_mode, int border_mode);
-
-protected:
-    // Detects ROI of the destination image. It's correct for any projection.
-    virtual void detectResultRoi(cv::Point &dst_tl, cv::Point &dst_br);
-
-    // Detects ROI of the destination image by walking over image border.
-    // Correctness for any projection isn't guaranteed.
-    void detectResultRoiByBorder(cv::Point &dst_tl, cv::Point &dst_br);
-
-    cv::Size src_size_;
-    P projector_;
-};
-
-
-struct PlaneProjector : ProjectorBase
-{
-    void mapForward(float x, float y, float &u, float &v);
-    void mapBackward(float u, float v, float &x, float &y);
-
-    float plane_dist;
-};
-
-
-// Projects image onto z = plane_dist plane
-class PlaneWarper : public WarperBase<PlaneProjector>
-{
-public:
-    PlaneWarper(float plane_dist = 1.f, float scale = 1.f)
-    {
-        projector_.plane_dist = plane_dist;
-        projector_.scale = scale;
-    }
-
-private:
-    void detectResultRoi(cv::Point &dst_tl, cv::Point &dst_br);
-};
-
-
-struct SphericalProjector : ProjectorBase
-{
-    void mapForward(float x, float y, float &u, float &v);
-    void mapBackward(float u, float v, float &x, float &y);
-};
-
-
-// Projects image onto unit sphere with origin at (0, 0, 0).
-// Poles are located at (0, -1, 0) and (0, 1, 0) points.
-class SphericalWarper : public WarperBase<SphericalProjector>
-{
-public:
-    SphericalWarper(float scale = 300.f) { projector_.scale = scale; }
-
-private:  
-    void detectResultRoi(cv::Point &dst_tl, cv::Point &dst_br);
-};
-
-
-struct CylindricalProjector : ProjectorBase
-{
-    void mapForward(float x, float y, float &u, float &v);
-    void mapBackward(float u, float v, float &x, float &y);
-};
-
-
-// Projects image onto x * x + z * z = 1 cylinder
-class CylindricalWarper : public WarperBase<CylindricalProjector>
-{
-public:
-    CylindricalWarper(float scale = 300.f) { projector_.scale = scale; }
-
-private:
-    void detectResultRoi(cv::Point &dst_tl, cv::Point &dst_br)
-    {
-        WarperBase<CylindricalProjector>::detectResultRoiByBorder(dst_tl, dst_br);
-    }
-};
-
-#include "warpers_inl.hpp"
-
-#endif // __OPENCV_WARPERS_HPP__
+//M*/\r
+#ifndef __OPENCV_WARPERS_HPP__\r
+#define __OPENCV_WARPERS_HPP__\r
+\r
+#include "precomp.hpp"\r
+\r
+class Warper\r
+{\r
+public:\r
+    enum { PLANE, CYLINDRICAL, SPHERICAL };\r
+    static cv::Ptr<Warper> createByCameraFocal(float focal, int type);\r
+\r
+    virtual ~Warper() {}\r
+    virtual cv::Point warp(const cv::Mat &src, float focal, const cv::Mat& R, cv::Mat &dst,\r
+                           int interp_mode = cv::INTER_LINEAR, int border_mode = cv::BORDER_REFLECT) = 0;\r
+    virtual cv::Rect warpRoi(const cv::Size &sz, float focal, const cv::Mat &R) = 0;\r
+};\r
+\r
+\r
+struct ProjectorBase\r
+{\r
+    void setTransformation(const cv::Mat& R);\r
+\r
+    cv::Size size;\r
+    float focal;\r
+    float r[9];\r
+    float rinv[9];\r
+    float scale;\r
+};\r
+\r
+\r
+template <class P>\r
+class WarperBase : public Warper\r
+{   \r
+public:\r
+    cv::Point warp(const cv::Mat &src, float focal, const cv::Mat &R, cv::Mat &dst,\r
+                   int interp_mode, int border_mode);\r
+\r
+    cv::Rect warpRoi(const cv::Size &sz, float focal, const cv::Mat &R);\r
+\r
+protected:\r
+    // Detects ROI of the destination image. It's correct for any projection.\r
+    virtual void detectResultRoi(cv::Point &dst_tl, cv::Point &dst_br);\r
+\r
+    // Detects ROI of the destination image by walking over image border.\r
+    // Correctness for any projection isn't guaranteed.\r
+    void detectResultRoiByBorder(cv::Point &dst_tl, cv::Point &dst_br);\r
+\r
+    cv::Size src_size_;\r
+    P projector_;\r
+};\r
+\r
+\r
+struct PlaneProjector : ProjectorBase\r
+{\r
+    void mapForward(float x, float y, float &u, float &v);\r
+    void mapBackward(float u, float v, float &x, float &y);\r
+\r
+    float plane_dist;\r
+};\r
+\r
+\r
+// Projects image onto z = plane_dist plane\r
+class PlaneWarper : public WarperBase<PlaneProjector>\r
+{\r
+public:\r
+    PlaneWarper(float plane_dist = 1.f, float scale = 1.f)\r
+    {\r
+        projector_.plane_dist = plane_dist;\r
+        projector_.scale = scale;\r
+    }\r
+\r
+private:\r
+    void detectResultRoi(cv::Point &dst_tl, cv::Point &dst_br);\r
+};\r
+\r
+\r
+struct SphericalProjector : ProjectorBase\r
+{\r
+    void mapForward(float x, float y, float &u, float &v);\r
+    void mapBackward(float u, float v, float &x, float &y);\r
+};\r
+\r
+\r
+// Projects image onto unit sphere with origin at (0, 0, 0).\r
+// Poles are located at (0, -1, 0) and (0, 1, 0) points.\r
+class SphericalWarper : public WarperBase<SphericalProjector>\r
+{\r
+public:\r
+    SphericalWarper(float scale = 300.f) { projector_.scale = scale; }\r
+\r
+private:  \r
+    void detectResultRoi(cv::Point &dst_tl, cv::Point &dst_br);\r
+};\r
+\r
+\r
+struct CylindricalProjector : ProjectorBase\r
+{\r
+    void mapForward(float x, float y, float &u, float &v);\r
+    void mapBackward(float u, float v, float &x, float &y);\r
+};\r
+\r
+\r
+// Projects image onto x * x + z * z = 1 cylinder\r
+class CylindricalWarper : public WarperBase<CylindricalProjector>\r
+{\r
+public:\r
+    CylindricalWarper(float scale = 300.f) { projector_.scale = scale; }\r
+\r
+private:\r
+    void detectResultRoi(cv::Point &dst_tl, cv::Point &dst_br)\r
+    {\r
+        WarperBase<CylindricalProjector>::detectResultRoiByBorder(dst_tl, dst_br);\r
+    }\r
+};\r
+\r
+#include "warpers_inl.hpp"\r
+\r
+#endif // __OPENCV_WARPERS_HPP__\r
index d55558c..e385ff1 100644 (file)
 // or tort (including negligence or otherwise) arising in any way out of\r
 // the use of this software, even if advised of the possibility of such damage.\r
 //\r
-//M*/
-#ifndef __OPENCV_WARPERS_INL_HPP__
-#define __OPENCV_WARPERS_INL_HPP__
-
-#include "warpers.hpp" // Make your IDE see declarations
-
-template <class P>
-cv::Point WarperBase<P>::warp(const cv::Mat &src, float focal, const cv::Mat &R, cv::Mat &dst,
-                              int interp_mode, int border_mode)
-{
-    src_size_ = src.size();
-
-    projector_.size = src.size();
-    projector_.focal = focal;
-    projector_.setTransformation(R);
-
-    cv::Point dst_tl, dst_br;
-    detectResultRoi(dst_tl, dst_br);
-
-    cv::Mat xmap(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, CV_32F);
-    cv::Mat ymap(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, CV_32F);
-
-    float x, y;
-    for (int v = dst_tl.y; v <= dst_br.y; ++v)
-    {
-        for (int u = dst_tl.x; u <= dst_br.x; ++u)
-        {
-            projector_.mapBackward(static_cast<float>(u), static_cast<float>(v), x, y);
-            xmap.at<float>(v - dst_tl.y, u - dst_tl.x) = x;
-            ymap.at<float>(v - dst_tl.y, u - dst_tl.x) = y;
-        }
-    }
-
-    dst.create(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, src.type());
-    remap(src, dst, xmap, ymap, interp_mode, border_mode);
-
-    return dst_tl;
-}
-
-
-template <class P>
-void WarperBase<P>::detectResultRoi(cv::Point &dst_tl, cv::Point &dst_br)
-{
-    float tl_uf = std::numeric_limits<float>::max();
-    float tl_vf = std::numeric_limits<float>::max();
-    float br_uf = -std::numeric_limits<float>::max();
-    float br_vf = -std::numeric_limits<float>::max();
-
-    float u, v;
-    for (int y = 0; y < src_size_.height; ++y)
-    {
-        for (int x = 0; x < src_size_.width; ++x)
-        {
-            projector_.mapForward(static_cast<float>(x), static_cast<float>(y), u, v);
-            tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);
-            br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);
-        }
-    }
-
-    dst_tl.x = static_cast<int>(tl_uf);
-    dst_tl.y = static_cast<int>(tl_vf);
-    dst_br.x = static_cast<int>(br_uf);
-    dst_br.y = static_cast<int>(br_vf);
-}
-
-
-template <class P>
-void WarperBase<P>::detectResultRoiByBorder(cv::Point &dst_tl, cv::Point &dst_br)
-{
-    float tl_uf = std::numeric_limits<float>::max();
-    float tl_vf = std::numeric_limits<float>::max();
-    float br_uf = -std::numeric_limits<float>::max();
-    float br_vf = -std::numeric_limits<float>::max();
-
-    float u, v;
-    for (float x = 0; x < src_size_.width; ++x)
-    {
-        projector_.mapForward(static_cast<float>(x), 0, u, v);
-        tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);
-        br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);
-
-        projector_.mapForward(static_cast<float>(x), static_cast<float>(src_size_.height - 1), u, v);
-        tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);
-        br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);
-    }
-    for (int y = 0; y < src_size_.height; ++y)
-    {
-        projector_.mapForward(0, static_cast<float>(y), u, v);
-        tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);
-        br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);
-
-        projector_.mapForward(static_cast<float>(src_size_.width - 1), static_cast<float>(y), u, v);
-        tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);
-        br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);
-    }
-
-    dst_tl.x = static_cast<int>(tl_uf);
-    dst_tl.y = static_cast<int>(tl_vf);
-    dst_br.x = static_cast<int>(br_uf);
-    dst_br.y = static_cast<int>(br_vf);
-}
-
-
-inline
-void PlaneProjector::mapForward(float x, float y, float &u, float &v)
-{
-    x -= size.width * 0.5f;
-    y -= size.height * 0.5f;
-
-    float x_ = r[0] * x + r[1] * y + r[2] * focal;
-    float y_ = r[3] * x + r[4] * y + r[5] * focal;
-    float z_ = r[6] * x + r[7] * y + r[8] * focal;
-
-    u = scale * x_ / z_ * plane_dist;
-    v = scale * y_ / z_ * plane_dist;
-}
-
-
-inline
-void PlaneProjector::mapBackward(float u, float v, float &x, float &y)
-{
-    float x_ = u / scale;
-    float y_ = v / scale;
-
-    float z;
-    x = rinv[0] * x_ + rinv[1] * y_ + rinv[2] * plane_dist;
-    y = rinv[3] * x_ + rinv[4] * y_ + rinv[5] * plane_dist;
-    z = rinv[6] * x_ + rinv[7] * y_ + rinv[8] * plane_dist;
-
-    x = focal * x / z + size.width * 0.5f;
-    y = focal * y / z + size.height * 0.5f;
-}
-
-
-inline
-void SphericalProjector::mapForward(float x, float y, float &u, float &v)
-{
-    x -= size.width * 0.5f;
-    y -= size.height * 0.5f;
-
-    float x_ = r[0] * x + r[1] * y + r[2] * focal;
-    float y_ = r[3] * x + r[4] * y + r[5] * focal;
-    float z_ = r[6] * x + r[7] * y + r[8] * focal;
-
-    u = scale * atan2f(x_, z_);
-    v = scale * (static_cast<float>(CV_PI) - acosf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_)));
-}
-
-
-inline
-void SphericalProjector::mapBackward(float u, float v, float &x, float &y)
-{
-    float sinv = sinf(static_cast<float>(CV_PI) - v / scale);
-    float x_ = sinv * sinf(u / scale);
-    float y_ = cosf(static_cast<float>(CV_PI) - v / scale);
-    float z_ = sinv * cosf(u / scale);
-
-    float z;
-    x = rinv[0] * x_ + rinv[1] * y_ + rinv[2] * z_;
-    y = rinv[3] * x_ + rinv[4] * y_ + rinv[5] * z_;
-    z = rinv[6] * x_ + rinv[7] * y_ + rinv[8] * z_;
-
-    x = focal * x / z + size.width * 0.5f;
-    y = focal * y / z + size.height * 0.5f;
-}
-
-
-inline
-void CylindricalProjector::mapForward(float x, float y, float &u, float &v)
-{
-    x -= size.width * 0.5f;
-    y -= size.height * 0.5f;
-
-    float x_ = r[0] * x + r[1] * y + r[2] * focal;
-    float y_ = r[3] * x + r[4] * y + r[5] * focal;
-    float z_ = r[6] * x + r[7] * y + r[8] * focal;
-
-    u = scale * atan2f(x_, z_);
-    v = scale * y_ / sqrtf(x_ * x_ + z_ * z_);
-}
-
-
-inline
-void CylindricalProjector::mapBackward(float u, float v, float &x, float &y)
-{
-    float x_ = sinf(u / scale);
-    float y_ = v / scale;
-    float z_ = cosf(u / scale);
-
-    float z;
-    x = rinv[0] * x_ + rinv[1] * y_ + rinv[2] * z_;
-    y = rinv[3] * x_ + rinv[4] * y_ + rinv[5] * z_;
-    z = rinv[6] * x_ + rinv[7] * y_ + rinv[8] * z_;
-
-    x = focal * x / z + size.width * 0.5f;
-    y = focal * y / z + size.height * 0.5f;
-}
-
-#endif // __OPENCV_WARPERS_INL_HPP__
+//M*/\r
+#ifndef __OPENCV_WARPERS_INL_HPP__\r
+#define __OPENCV_WARPERS_INL_HPP__\r
+\r
+#include "warpers.hpp" // Make your IDE see declarations\r
+\r
+template <class P>\r
+cv::Point WarperBase<P>::warp(const cv::Mat &src, float focal, const cv::Mat &R, cv::Mat &dst,\r
+                              int interp_mode, int border_mode)\r
+{\r
+    src_size_ = src.size();\r
+\r
+    projector_.size = src.size();\r
+    projector_.focal = focal;\r
+    projector_.setTransformation(R);\r
+\r
+    cv::Point dst_tl, dst_br;\r
+    detectResultRoi(dst_tl, dst_br);\r
+\r
+    cv::Mat xmap(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, CV_32F);\r
+    cv::Mat ymap(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, CV_32F);\r
+\r
+    float x, y;\r
+    for (int v = dst_tl.y; v <= dst_br.y; ++v)\r
+    {\r
+        for (int u = dst_tl.x; u <= dst_br.x; ++u)\r
+        {\r
+            projector_.mapBackward(static_cast<float>(u), static_cast<float>(v), x, y);\r
+            xmap.at<float>(v - dst_tl.y, u - dst_tl.x) = x;\r
+            ymap.at<float>(v - dst_tl.y, u - dst_tl.x) = y;\r
+        }\r
+    }\r
+\r
+    dst.create(dst_br.y - dst_tl.y + 1, dst_br.x - dst_tl.x + 1, src.type());\r
+    remap(src, dst, xmap, ymap, interp_mode, border_mode);\r
+\r
+    return dst_tl;\r
+}\r
+\r
+\r
+template <class P>\r
+cv::Rect WarperBase<P>::warpRoi(const cv::Size &sz, float focal, const cv::Mat &R)\r
+{\r
+    src_size_ = sz;\r
+\r
+    projector_.size = sz;\r
+    projector_.focal = focal;\r
+    projector_.setTransformation(R);\r
+\r
+    cv::Point dst_tl, dst_br;\r
+    detectResultRoi(dst_tl, dst_br);\r
+\r
+    return cv::Rect(dst_tl, cv::Point(dst_br.x + 1, dst_br.y + 1));\r
+}\r
+\r
+\r
+template <class P>\r
+void WarperBase<P>::detectResultRoi(cv::Point &dst_tl, cv::Point &dst_br)\r
+{\r
+    float tl_uf = std::numeric_limits<float>::max();\r
+    float tl_vf = std::numeric_limits<float>::max();\r
+    float br_uf = -std::numeric_limits<float>::max();\r
+    float br_vf = -std::numeric_limits<float>::max();\r
+\r
+    float u, v;\r
+    for (int y = 0; y < src_size_.height; ++y)\r
+    {\r
+        for (int x = 0; x < src_size_.width; ++x)\r
+        {\r
+            projector_.mapForward(static_cast<float>(x), static_cast<float>(y), u, v);\r
+            tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);\r
+            br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);\r
+        }\r
+    }\r
+\r
+    dst_tl.x = static_cast<int>(tl_uf);\r
+    dst_tl.y = static_cast<int>(tl_vf);\r
+    dst_br.x = static_cast<int>(br_uf);\r
+    dst_br.y = static_cast<int>(br_vf);\r
+}\r
+\r
+\r
+template <class P>\r
+void WarperBase<P>::detectResultRoiByBorder(cv::Point &dst_tl, cv::Point &dst_br)\r
+{\r
+    float tl_uf = std::numeric_limits<float>::max();\r
+    float tl_vf = std::numeric_limits<float>::max();\r
+    float br_uf = -std::numeric_limits<float>::max();\r
+    float br_vf = -std::numeric_limits<float>::max();\r
+\r
+    float u, v;\r
+    for (float x = 0; x < src_size_.width; ++x)\r
+    {\r
+        projector_.mapForward(static_cast<float>(x), 0, u, v);\r
+        tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);\r
+        br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);\r
+\r
+        projector_.mapForward(static_cast<float>(x), static_cast<float>(src_size_.height - 1), u, v);\r
+        tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);\r
+        br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);\r
+    }\r
+    for (int y = 0; y < src_size_.height; ++y)\r
+    {\r
+        projector_.mapForward(0, static_cast<float>(y), u, v);\r
+        tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);\r
+        br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);\r
+\r
+        projector_.mapForward(static_cast<float>(src_size_.width - 1), static_cast<float>(y), u, v);\r
+        tl_uf = std::min(tl_uf, u); tl_vf = std::min(tl_vf, v);\r
+        br_uf = std::max(br_uf, u); br_vf = std::max(br_vf, v);\r
+    }\r
+\r
+    dst_tl.x = static_cast<int>(tl_uf);\r
+    dst_tl.y = static_cast<int>(tl_vf);\r
+    dst_br.x = static_cast<int>(br_uf);\r
+    dst_br.y = static_cast<int>(br_vf);\r
+}\r
+\r
+\r
+inline\r
+void PlaneProjector::mapForward(float x, float y, float &u, float &v)\r
+{\r
+    x -= size.width * 0.5f;\r
+    y -= size.height * 0.5f;\r
+\r
+    float x_ = r[0] * x + r[1] * y + r[2] * focal;\r
+    float y_ = r[3] * x + r[4] * y + r[5] * focal;\r
+    float z_ = r[6] * x + r[7] * y + r[8] * focal;\r
+\r
+    u = scale * x_ / z_ * plane_dist;\r
+    v = scale * y_ / z_ * plane_dist;\r
+}\r
+\r
+\r
+inline\r
+void PlaneProjector::mapBackward(float u, float v, float &x, float &y)\r
+{\r
+    float x_ = u / scale;\r
+    float y_ = v / scale;\r
+\r
+    float z;\r
+    x = rinv[0] * x_ + rinv[1] * y_ + rinv[2] * plane_dist;\r
+    y = rinv[3] * x_ + rinv[4] * y_ + rinv[5] * plane_dist;\r
+    z = rinv[6] * x_ + rinv[7] * y_ + rinv[8] * plane_dist;\r
+\r
+    x = focal * x / z + size.width * 0.5f;\r
+    y = focal * y / z + size.height * 0.5f;\r
+}\r
+\r
+\r
+inline\r
+void SphericalProjector::mapForward(float x, float y, float &u, float &v)\r
+{\r
+    x -= size.width * 0.5f;\r
+    y -= size.height * 0.5f;\r
+\r
+    float x_ = r[0] * x + r[1] * y + r[2] * focal;\r
+    float y_ = r[3] * x + r[4] * y + r[5] * focal;\r
+    float z_ = r[6] * x + r[7] * y + r[8] * focal;\r
+\r
+    u = scale * atan2f(x_, z_);\r
+    v = scale * (static_cast<float>(CV_PI) - acosf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_)));\r
+}\r
+\r
+\r
+inline\r
+void SphericalProjector::mapBackward(float u, float v, float &x, float &y)\r
+{\r
+    float sinv = sinf(static_cast<float>(CV_PI) - v / scale);\r
+    float x_ = sinv * sinf(u / scale);\r
+    float y_ = cosf(static_cast<float>(CV_PI) - v / scale);\r
+    float z_ = sinv * cosf(u / scale);\r
+\r
+    float z;\r
+    x = rinv[0] * x_ + rinv[1] * y_ + rinv[2] * z_;\r
+    y = rinv[3] * x_ + rinv[4] * y_ + rinv[5] * z_;\r
+    z = rinv[6] * x_ + rinv[7] * y_ + rinv[8] * z_;\r
+\r
+    x = focal * x / z + size.width * 0.5f;\r
+    y = focal * y / z + size.height * 0.5f;\r
+}\r
+\r
+\r
+inline\r
+void CylindricalProjector::mapForward(float x, float y, float &u, float &v)\r
+{\r
+    x -= size.width * 0.5f;\r
+    y -= size.height * 0.5f;\r
+\r
+    float x_ = r[0] * x + r[1] * y + r[2] * focal;\r
+    float y_ = r[3] * x + r[4] * y + r[5] * focal;\r
+    float z_ = r[6] * x + r[7] * y + r[8] * focal;\r
+\r
+    u = scale * atan2f(x_, z_);\r
+    v = scale * y_ / sqrtf(x_ * x_ + z_ * z_);\r
+}\r
+\r
+\r
+inline\r
+void CylindricalProjector::mapBackward(float u, float v, float &x, float &y)\r
+{\r
+    float x_ = sinf(u / scale);\r
+    float y_ = v / scale;\r
+    float z_ = cosf(u / scale);\r
+\r
+    float z;\r
+    x = rinv[0] * x_ + rinv[1] * y_ + rinv[2] * z_;\r
+    y = rinv[3] * x_ + rinv[4] * y_ + rinv[5] * z_;\r
+    z = rinv[6] * x_ + rinv[7] * y_ + rinv[8] * z_;\r
+\r
+    x = focal * x / z + size.width * 0.5f;\r
+    y = focal * y / z + size.height * 0.5f;\r
+}\r
+\r
+#endif // __OPENCV_WARPERS_INL_HPP__\r
index 9f65c46..ce2925b 100644 (file)
@@ -32,7 +32,11 @@ int main()
 \r
 #include <cuda.h>\r
 #include <cuda_runtime.h>\r
+#include <GL/gl.h>\r
+#include <cudaGL.h>\r
 #include "opencv2/core/internal.hpp" // For TBB wrappers\r
+#include "tbb/tbb.h"\r
+#include "tbb/mutex.h"\r
 \r
 using namespace std;\r
 using namespace cv;\r
@@ -54,7 +58,7 @@ inline void safeCall_(int code, const char* expr, const char* file, int line)
 }\r
 \r
 // Each GPU is associated with its own context\r
-CUcontext contexts[2];\r
+CUcontext contexts[/*2*/1];\r
 \r
 void inline contextOn(int id) \r
 {\r
@@ -76,6 +80,10 @@ GpuMat d_result[2];
 // CPU result\r
 Mat result;\r
 \r
+int some[2];\r
+\r
+tbb::mutex mutex;\r
+\r
 int main(int argc, char** argv)\r
 {\r
     if (argc < 3)\r
@@ -85,11 +93,11 @@ int main(int argc, char** argv)
     }\r
 \r
     int num_devices = getCudaEnabledDeviceCount();\r
-    if (num_devices < 2)\r
-    {\r
-        std::cout << "Two or more GPUs are required\n";\r
-        return -1;\r
-    }\r
+//    if (num_devices < 2)\r
+//    {\r
+//        std::cout << "Two or more GPUs are required\n";\r
+//        return -1;\r
+//    }\r
 \r
     for (int i = 0; i < num_devices; ++i)\r
     {\r
@@ -123,13 +131,14 @@ int main(int argc, char** argv)
     // Create context for GPU #0\r
     CUdevice device;\r
     safeCall(cuDeviceGet(&device, 0));\r
-    safeCall(cuCtxCreate(&contexts[0], 0, device));\r
+    safeCall(cuGLCtxCreate(&contexts[0], 0, device));\r
+    //safeCall(cuCtxCreate(&contexts[0], 0, device));\r
     contextOff();\r
 \r
-    // Create context for GPU #1\r
-    safeCall(cuDeviceGet(&device, 1));\r
-    safeCall(cuCtxCreate(&contexts[1], 0, device));\r
-    contextOff();\r
+//    // Create context for GPU #1\r
+//    safeCall(cuDeviceGet(&device, 0));\r
+//    safeCall(cuCtxCreate(&contexts[1], 0, device));\r
+//    contextOff();\r
 \r
     // Split source images for processing on GPU #0\r
     contextOn(0);\r
@@ -139,15 +148,20 @@ int main(int argc, char** argv)
     contextOff();\r
 \r
     // Split source images for processing on the GPU #1\r
-    contextOn(1);\r
+    contextOn(0);\r
     d_left[1].upload(left.rowRange(left.rows / 2, left.rows));\r
     d_right[1].upload(right.rowRange(right.rows / 2, right.rows));\r
     bm[1] = new StereoBM_GPU();\r
     contextOff();\r
 \r
+    some[0] = some[1] = 0;\r
     // Execute calculation in two threads using two GPUs\r
-    int devices[] = {0, 1};\r
-    parallel_do(devices, devices + 2, Worker());\r
+    vector<int> devices;\r
+    for (int i = 0; i < 4; ++i)\r
+        devices.push_back(rand()%2);\r
+    tbb::parallel_do(&devices[0], &devices[devices.size() - 1], Worker());\r
+\r
+    cout << some[0] << " " << some[1] << endl;\r
 \r
     // Release the first GPU resources\r
     contextOn(0);\r
@@ -159,7 +173,7 @@ int main(int argc, char** argv)
     contextOff();\r
 \r
     // Release the second GPU resources\r
-    contextOn(1);\r
+    contextOn(0);\r
     imshow("GPU #1 result", Mat(d_result[1]));\r
     d_left[1].release();\r
     d_right[1].release();\r
@@ -175,7 +189,9 @@ int main(int argc, char** argv)
 \r
 void Worker::operator()(int device_id) const\r
 {\r
-    contextOn(device_id);\r
+    mutex.lock();\r
+\r
+    contextOn(0);\r
 \r
     bm[device_id]->operator()(d_left[device_id], d_right[device_id],\r
                               d_result[device_id]);\r
@@ -184,13 +200,16 @@ void Worker::operator()(int device_id) const
         << "): finished\n";\r
 \r
     contextOff();\r
+\r
+    mutex.unlock();\r
 }\r
 \r
 \r
 void destroyContexts()\r
 {\r
     safeCall(cuCtxDestroy(contexts[0]));\r
-    safeCall(cuCtxDestroy(contexts[1]));\r
+    //safeCall(cuCtxDestroy(contexts[1]));\r
 }\r
 \r
 #endif\r
+\r