Updated bundle adjustment in stitching module: 1) it minimizes reprojection error...
authorAlexey Spizhevoy <no@email>
Fri, 16 Sep 2011 12:25:23 +0000 (12:25 +0000)
committerAlexey Spizhevoy <no@email>
Fri, 16 Sep 2011 12:25:23 +0000 (12:25 +0000)
15 files changed:
modules/gpu/include/opencv2/gpu/gpu.hpp
modules/gpu/perf/perf_imgproc.cpp
modules/gpu/src/cuda/imgproc.cu
modules/gpu/src/imgproc.cpp
modules/stitching/include/opencv2/stitching/detail/camera.hpp
modules/stitching/include/opencv2/stitching/detail/motion_estimators.hpp
modules/stitching/include/opencv2/stitching/detail/warpers.hpp
modules/stitching/include/opencv2/stitching/detail/warpers_inl.hpp
modules/stitching/include/opencv2/stitching/warpers.hpp
modules/stitching/src/camera.cpp
modules/stitching/src/motion_estimators.cpp
modules/stitching/src/stitcher.cpp
modules/stitching/src/warpers.cpp
samples/cpp/stitching.cpp
samples/cpp/stitching_detailed.cpp

index 642deee..ffbc0da 100644 (file)
@@ -645,15 +645,15 @@ namespace cv
         CV_EXPORTS void warpPerspective(const GpuMat& src, GpuMat& dst, const Mat& M, Size dsize, int flags = INTER_LINEAR, Stream& stream = Stream::Null());\r
 \r
         //! builds plane warping maps\r
-        CV_EXPORTS void buildWarpPlaneMaps(Size src_size, Rect dst_roi, const Mat& R, double f, double s, double dist,\r
+        CV_EXPORTS void buildWarpPlaneMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale,\r
                                            GpuMat& map_x, GpuMat& map_y, Stream& stream = Stream::Null());\r
 \r
         //! builds cylindrical warping maps\r
-        CV_EXPORTS void buildWarpCylindricalMaps(Size src_size, Rect dst_roi, const Mat& R, double f, double s,\r
+        CV_EXPORTS void buildWarpCylindricalMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale,\r
                                                  GpuMat& map_x, GpuMat& map_y, Stream& stream = Stream::Null());\r
 \r
         //! builds spherical warping maps\r
-        CV_EXPORTS void buildWarpSphericalMaps(Size src_size, Rect dst_roi, const Mat& R, double f, double s,\r
+        CV_EXPORTS void buildWarpSphericalMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale,\r
                                                GpuMat& map_x, GpuMat& map_y, Stream& stream = Stream::Null());\r
 \r
         //! rotate 8bit single or four channel image\r
index fb72043..b2acf4d 100644 (file)
@@ -366,7 +366,8 @@ PERF_TEST_P(DevInfo_Size, buildWarpPlaneMaps, testing::Combine(testing::ValuesIn
 \r
     SIMPLE_TEST_CYCLE()\r
     {\r
-        buildWarpPlaneMaps(size, Rect(0, 0, size.width, size.height), Mat::ones(3, 3, CV_32FC1), 1.0, 1.0, 1.0, map_x, map_y);\r
+        buildWarpPlaneMaps(size, Rect(0, 0, size.width, size.height), Mat::eye(3, 3, CV_32FC1), \r
+                           Mat::ones(3, 3, CV_32FC1), 1.0, map_x, map_y);\r
     }\r
 \r
     Mat map_x_host(map_x);\r
@@ -391,7 +392,8 @@ PERF_TEST_P(DevInfo_Size, buildWarpCylindricalMaps, testing::Combine(testing::Va
 \r
     SIMPLE_TEST_CYCLE()\r
     {\r
-        buildWarpCylindricalMaps(size, Rect(0, 0, size.width, size.height), Mat::ones(3, 3, CV_32FC1), 1.0, 1.0, map_x, map_y);\r
+        buildWarpCylindricalMaps(size, Rect(0, 0, size.width, size.height), Mat::eye(3, 3, CV_32FC1),\r
+                                 Mat::ones(3, 3, CV_32FC1), 1.0, map_x, map_y);\r
     }\r
 \r
     Mat map_x_host(map_x);\r
@@ -402,7 +404,7 @@ PERF_TEST_P(DevInfo_Size, buildWarpCylindricalMaps, testing::Combine(testing::Va
 }\r
 \r
 PERF_TEST_P(DevInfo_Size, buildWarpSphericalMaps, testing::Combine(testing::ValuesIn(devices()),\r
-                                                               testing::Values(GPU_TYPICAL_MAT_SIZES)))\r
+                                                                   testing::Values(GPU_TYPICAL_MAT_SIZES)))\r
 {\r
     DeviceInfo devInfo = std::tr1::get<0>(GetParam());\r
     Size size = std::tr1::get<1>(GetParam());\r
@@ -416,7 +418,8 @@ PERF_TEST_P(DevInfo_Size, buildWarpSphericalMaps, testing::Combine(testing::Valu
 \r
     SIMPLE_TEST_CYCLE()\r
     {\r
-        buildWarpSphericalMaps(size, Rect(0, 0, size.width, size.height), Mat::ones(3, 3, CV_32FC1), 1.0, 1.0, map_x, map_y);\r
+        buildWarpSphericalMaps(size, Rect(0, 0, size.width, size.height), Mat::eye(3, 3, CV_32FC1),\r
+                               Mat::ones(3, 3, CV_32FC1), 1.0, map_x, map_y);\r
     }\r
 \r
     Mat map_x_host(map_x);\r
index 67cfa59..927c383 100644 (file)
@@ -787,14 +787,14 @@ namespace cv { namespace gpu { namespace imgproc
     //////////////////////////////////////////////////////////////////////////\r
     // buildWarpMaps\r
 \r
+    // TODO use intrinsics like __sinf and so on\r
+\r
     namespace build_warp_maps\r
     {\r
 \r
-        __constant__ float cr[9];\r
-        __constant__ float crinv[9];\r
-        __constant__ float cf, cs;\r
-        __constant__ float chalf_w, chalf_h;\r
-        __constant__ float cdist;\r
+        __constant__ float ck_rinv[9];\r
+        __constant__ float cr_kinv[9];\r
+        __constant__ float cscale;\r
     }\r
 \r
 \r
@@ -805,16 +805,16 @@ namespace cv { namespace gpu { namespace imgproc
         {\r
             using namespace build_warp_maps;\r
 \r
-            float x_ = u / cs;\r
-            float y_ = v / cs;\r
+            float x_ = u / cscale;\r
+            float y_ = v / cscale;\r
 \r
             float z;\r
-            x = crinv[0]*x_ + crinv[1]*y_ + crinv[2]*cdist;\r
-            y = crinv[3]*x_ + crinv[4]*y_ + crinv[5]*cdist;\r
-            z = crinv[6]*x_ + crinv[7]*y_ + crinv[8]*cdist;\r
+            x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2];\r
+            y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5];\r
+            z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8];\r
 \r
-            x = cf*x/z + chalf_w;\r
-            y = cf*y/z + chalf_h;\r
+            x /= z;\r
+            y /= z;\r
         }\r
     };\r
 \r
@@ -826,18 +826,18 @@ namespace cv { namespace gpu { namespace imgproc
         {\r
             using namespace build_warp_maps;\r
 \r
-            u /= cs;\r
+            u /= cscale;\r
             float x_ = sinf(u);\r
-            float y_ = v / cs;\r
+            float y_ = v / cscale;\r
             float z_ = cosf(u);\r
 \r
             float z;\r
-            x = crinv[0]*x_ + crinv[1]*y_ + crinv[2]*z_;\r
-            y = crinv[3]*x_ + crinv[4]*y_ + crinv[5]*z_;\r
-            z = crinv[6]*x_ + crinv[7]*y_ + crinv[8]*z_;\r
+            x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * z_;\r
+            y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * z_;\r
+            z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * z_;\r
 \r
-            x = cf*x/z + chalf_w;\r
-            y = cf*y/z + chalf_h;\r
+            x /= z;\r
+            y /= z;\r
         }\r
     };\r
 \r
@@ -849,8 +849,8 @@ namespace cv { namespace gpu { namespace imgproc
         {\r
             using namespace build_warp_maps;\r
 \r
-            v /= cs;\r
-            u /= cs;\r
+            v /= cscale;\r
+            u /= cscale;\r
 \r
             float sinv = sinf(v);\r
             float x_ = sinv * sinf(u);\r
@@ -858,12 +858,12 @@ namespace cv { namespace gpu { namespace imgproc
             float z_ = sinv * cosf(u);\r
 \r
             float z;\r
-            x = crinv[0]*x_ + crinv[1]*y_ + crinv[2]*z_;\r
-            y = crinv[3]*x_ + crinv[4]*y_ + crinv[5]*z_;\r
-            z = crinv[6]*x_ + crinv[7]*y_ + crinv[8]*z_;\r
+            x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * z_;\r
+            y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * z_;\r
+            z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * z_;\r
 \r
-            x = cf*x/z + chalf_w;\r
-            y = cf*y/z + chalf_h;\r
+            x /= z;\r
+            y /= z;\r
         }\r
     };\r
 \r
@@ -887,16 +887,12 @@ namespace cv { namespace gpu { namespace imgproc
 \r
 \r
     void buildWarpPlaneMaps(int tl_u, int tl_v, DevMem2Df map_x, DevMem2Df map_y,\r
-                            const float r[9], const float rinv[9], float f, float s, float dist,\r
-                            float half_w, float half_h, cudaStream_t stream)\r
+                            const float k_rinv[9], const float r_kinv[9], float scale,\r
+                            cudaStream_t stream)\r
     {\r
-        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr, r, 9*sizeof(float)));\r
-        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::crinv, rinv, 9*sizeof(float)));\r
-        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cf, &f, sizeof(float)));\r
-        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cs, &s, sizeof(float)));\r
-        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::chalf_w, &half_w, sizeof(float)));\r
-        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::chalf_h, &half_h, sizeof(float)));\r
-        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cdist, &dist, sizeof(float)));\r
+        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float)));\r
+        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float)));\r
+        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float)));\r
 \r
         int cols = map_x.cols;\r
         int rows = map_x.rows;\r
@@ -912,15 +908,12 @@ namespace cv { namespace gpu { namespace imgproc
 \r
 \r
     void buildWarpCylindricalMaps(int tl_u, int tl_v, DevMem2Df map_x, DevMem2Df map_y,\r
-                                  const float r[9], const float rinv[9], float f, float s,\r
-                                  float half_w, float half_h, cudaStream_t stream)\r
+                            const float k_rinv[9], const float r_kinv[9], float scale,\r
+                            cudaStream_t stream)\r
     {\r
-        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr, r, 9*sizeof(float)));\r
-        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::crinv, rinv, 9*sizeof(float)));\r
-        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cf, &f, sizeof(float)));\r
-        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cs, &s, sizeof(float)));\r
-        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::chalf_w, &half_w, sizeof(float)));\r
-        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::chalf_h, &half_h, sizeof(float)));\r
+        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float)));\r
+        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float)));\r
+        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float)));\r
 \r
         int cols = map_x.cols;\r
         int rows = map_x.rows;\r
@@ -936,15 +929,12 @@ namespace cv { namespace gpu { namespace imgproc
 \r
 \r
     void buildWarpSphericalMaps(int tl_u, int tl_v, DevMem2Df map_x, DevMem2Df map_y,\r
-                                const float r[9], const float rinv[9], float f, float s,\r
-                                float half_w, float half_h, cudaStream_t stream)\r
-    {\r
-        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr, r, 9*sizeof(float)));\r
-        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::crinv, rinv, 9*sizeof(float)));\r
-        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cf, &f, sizeof(float)));\r
-        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cs, &s, sizeof(float)));\r
-        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::chalf_w, &half_w, sizeof(float)));\r
-        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::chalf_h, &half_h, sizeof(float)));\r
+                            const float k_rinv[9], const float r_kinv[9], float scale,\r
+                            cudaStream_t stream)\r
+    {\r
+        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float)));\r
+        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float)));\r
+        cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float)));\r
 \r
         int cols = map_x.cols;\r
         int rows = map_x.rows;\r
index a372fcd..753cc4e 100644 (file)
@@ -56,9 +56,9 @@ void cv::gpu::resize(const GpuMat&, GpuMat&, Size, double, double, int, Stream&)
 void cv::gpu::copyMakeBorder(const GpuMat&, GpuMat&, int, int, int, int, const Scalar&, Stream&) { throw_nogpu(); }\r
 void cv::gpu::warpAffine(const GpuMat&, GpuMat&, const Mat&, Size, int, Stream&) { throw_nogpu(); }\r
 void cv::gpu::warpPerspective(const GpuMat&, GpuMat&, const Mat&, Size, int, Stream&) { throw_nogpu(); }\r
-void cv::gpu::buildWarpPlaneMaps(Size, Rect, const Mat&, double, double, double, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }\r
-void cv::gpu::buildWarpCylindricalMaps(Size, Rect, const Mat&, double, double, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }\r
-void cv::gpu::buildWarpSphericalMaps(Size, Rect, const Mat&, double, double, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }\r
+void cv::gpu::buildWarpPlaneMaps(Size, Rect, const Mat&, const Mat&, float, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }\r
+void cv::gpu::buildWarpCylindricalMaps(Size, Rect, const Mat&, const Mat&, float, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }\r
+void cv::gpu::buildWarpSphericalMaps(Size, Rect, const Mat&, const Mat&, float, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }\r
 void cv::gpu::rotate(const GpuMat&, GpuMat&, Size, double, double, double, int, Stream&) { throw_nogpu(); }\r
 void cv::gpu::integral(const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }\r
 void cv::gpu::integralBuffered(const GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }\r
@@ -584,22 +584,25 @@ void cv::gpu::warpPerspective(const GpuMat& src, GpuMat& dst, const Mat& M, Size
 namespace cv { namespace gpu { namespace imgproc\r
 {\r
     void buildWarpPlaneMaps(int tl_u, int tl_v, DevMem2Df map_x, DevMem2Df map_y,\r
-                            const float r[9], const float rinv[9], float f, float s, float dist,\r
-                            float half_w, float half_h, cudaStream_t stream);\r
+                            const float k_rinv[9], const float r_kinv[9], float scale,\r
+                            cudaStream_t stream);\r
 }}}\r
 \r
-void cv::gpu::buildWarpPlaneMaps(Size src_size, Rect dst_roi, const Mat& R, double f, double s,\r
-                                 double dist, GpuMat& map_x, GpuMat& map_y, Stream& stream)\r
+void cv::gpu::buildWarpPlaneMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale,\r
+                                 GpuMat& map_x, GpuMat& map_y, Stream& stream)\r
 {\r
-    CV_Assert(R.size() == Size(3,3) && R.isContinuous() && R.type() == CV_32F);\r
-    Mat Rinv = R.inv();\r
-    CV_Assert(Rinv.isContinuous());\r
+    CV_Assert(K.size() == Size(3,3) && K.type() == CV_32F);\r
+    CV_Assert(R.size() == Size(3,3) && R.type() == CV_32F);\r
+\r
+    Mat K_Rinv = K * R.t();\r
+    Mat R_Kinv = R * K.inv();\r
+    CV_Assert(K_Rinv.isContinuous());\r
+    CV_Assert(R_Kinv.isContinuous());\r
 \r
     map_x.create(dst_roi.size(), CV_32F);\r
     map_y.create(dst_roi.size(), CV_32F);\r
-    imgproc::buildWarpPlaneMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, R.ptr<float>(), Rinv.ptr<float>(),\r
-                                static_cast<float>(f), static_cast<float>(s), static_cast<float>(dist), \r
-                                0.5f*src_size.width, 0.5f*src_size.height, StreamAccessor::getStream(stream));\r
+    imgproc::buildWarpPlaneMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, K_Rinv.ptr<float>(), R_Kinv.ptr<float>(),\r
+                                scale, StreamAccessor::getStream(stream));\r
 }\r
 \r
 //////////////////////////////////////////////////////////////////////////////\r
@@ -608,22 +611,25 @@ void cv::gpu::buildWarpPlaneMaps(Size src_size, Rect dst_roi, const Mat& R, doub
 namespace cv { namespace gpu { namespace imgproc\r
 {\r
     void buildWarpCylindricalMaps(int tl_u, int tl_v, DevMem2Df map_x, DevMem2Df map_y,\r
-                                  const float r[9], const float rinv[9], float f, float s,\r
-                                  float half_w, float half_h, cudaStream_t stream);\r
+                                  const float k_rinv[9], const float r_kinv[9], float scale,\r
+                                  cudaStream_t stream);\r
 }}}\r
 \r
-void cv::gpu::buildWarpCylindricalMaps(Size src_size, Rect dst_roi, const Mat& R, double f, double s,\r
+void cv::gpu::buildWarpCylindricalMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale,\r
                                        GpuMat& map_x, GpuMat& map_y, Stream& stream)\r
 {\r
-    CV_Assert(R.size() == Size(3,3) && R.isContinuous() && R.type() == CV_32F);\r
-    Mat Rinv = R.inv();\r
-    CV_Assert(Rinv.isContinuous());\r
+    CV_Assert(K.size() == Size(3,3) && K.type() == CV_32F);\r
+    CV_Assert(R.size() == Size(3,3) && R.type() == CV_32F);\r
+\r
+    Mat K_Rinv = K * R.t();\r
+    Mat R_Kinv = R * K.inv();\r
+    CV_Assert(K_Rinv.isContinuous());\r
+    CV_Assert(R_Kinv.isContinuous());\r
 \r
     map_x.create(dst_roi.size(), CV_32F);\r
     map_y.create(dst_roi.size(), CV_32F);\r
-    imgproc::buildWarpCylindricalMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, R.ptr<float>(), Rinv.ptr<float>(),\r
-                                      static_cast<float>(f), static_cast<float>(s), 0.5f*src_size.width, 0.5f*src_size.height, \r
-                                      StreamAccessor::getStream(stream));\r
+    imgproc::buildWarpCylindricalMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, K_Rinv.ptr<float>(), R_Kinv.ptr<float>(),\r
+                                      scale, StreamAccessor::getStream(stream));\r
 }\r
 \r
 \r
@@ -633,22 +639,25 @@ void cv::gpu::buildWarpCylindricalMaps(Size src_size, Rect dst_roi, const Mat& R
 namespace cv { namespace gpu { namespace imgproc\r
 {\r
     void buildWarpSphericalMaps(int tl_u, int tl_v, DevMem2Df map_x, DevMem2Df map_y,\r
-                                const float r[9], const float rinv[9], float f, float s,\r
-                                float half_w, float half_h, cudaStream_t stream);\r
+                                const float k_rinv[9], const float r_kinv[9], float scale,\r
+                                cudaStream_t stream);\r
 }}}\r
 \r
-void cv::gpu::buildWarpSphericalMaps(Size src_size, Rect dst_roi, const Mat& R, double f, double s,\r
+void cv::gpu::buildWarpSphericalMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale,\r
                                      GpuMat& map_x, GpuMat& map_y, Stream& stream)\r
 {\r
-    CV_Assert(R.size() == Size(3,3) && R.isContinuous() && R.type() == CV_32F);\r
-    Mat Rinv = R.inv();\r
-    CV_Assert(Rinv.isContinuous());\r
+    CV_Assert(K.size() == Size(3,3) && K.type() == CV_32F);\r
+    CV_Assert(R.size() == Size(3,3) && R.type() == CV_32F);\r
+\r
+    Mat K_Rinv = K * R.t();\r
+    Mat R_Kinv = R * K.inv();\r
+    CV_Assert(K_Rinv.isContinuous());\r
+    CV_Assert(R_Kinv.isContinuous());\r
 \r
     map_x.create(dst_roi.size(), CV_32F);\r
     map_y.create(dst_roi.size(), CV_32F);\r
-    imgproc::buildWarpSphericalMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, R.ptr<float>(), Rinv.ptr<float>(),\r
-                                    static_cast<float>(f), static_cast<float>(s), 0.5f*src_size.width, 0.5f*src_size.height, \r
-                                    StreamAccessor::getStream(stream));\r
+    imgproc::buildWarpSphericalMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, K_Rinv.ptr<float>(), R_Kinv.ptr<float>(),\r
+                                    scale, StreamAccessor::getStream(stream));\r
 }\r
 \r
 ////////////////////////////////////////////////////////////////////////\r
index 1638c06..a74abcb 100644 (file)
@@ -53,8 +53,12 @@ struct CV_EXPORTS CameraParams
     CameraParams();
     CameraParams(const CameraParams& other);
     const CameraParams& operator =(const CameraParams& other);
+    Mat K() const;
 
     double focal; // Focal length
+    double aspect; // Aspect ratio
+    double ppx; // Principal point X
+    double ppy; // Principal point Y
     Mat R; // Rotation
     Mat t; // Translation
 };
index b8dd348..2ec1a4e 100644 (file)
@@ -58,9 +58,7 @@ public:
 \r
     void operator ()(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches, \r
                      std::vector<CameraParams> &cameras)\r
-    {\r
-        estimate(features, pairwise_matches, cameras);\r
-    }\r
+        { estimate(features, pairwise_matches, cameras); }\r
 \r
 protected:\r
     virtual void estimate(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches, \r
@@ -90,6 +88,8 @@ public:
     BundleAdjuster(int cost_space = FOCAL_RAY_SPACE, float conf_thresh = 1.f) \r
         : cost_space_(cost_space), conf_thresh_(conf_thresh) {}\r
 \r
+    Mat K;\r
+\r
 private:\r
     void estimate(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches, \r
                   std::vector<CameraParams> &cameras);\r
index 8e9d9a1..2a64ef4 100644 (file)
@@ -55,27 +55,22 @@ namespace detail {
 class CV_EXPORTS Warper\r
 {\r
 public:\r
-    enum { PLANE, CYLINDRICAL, SPHERICAL };\r
-\r
-    // TODO remove this method\r
-    static Ptr<Warper> createByCameraFocal(float focal, int type, bool try_gpu = false);\r
-\r
     virtual ~Warper() {}\r
-    virtual Point warp(const Mat &src, float focal, const Mat& R, Mat &dst,\r
+    virtual Point warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,\r
                        int interp_mode = INTER_LINEAR, int border_mode = BORDER_REFLECT) = 0;\r
-    virtual Rect warpRoi(const Size &sz, float focal, const Mat &R) = 0;\r
+    virtual Rect warpRoi(const Size &sz, const Mat &K, const Mat &R) = 0;\r
 };\r
 \r
 \r
 struct CV_EXPORTS ProjectorBase\r
 {\r
-    void setTransformation(const Mat& R);\r
+    void setCameraParams(const Mat &K, const Mat &R);\r
 \r
-    Size size;\r
-    float focal;\r
-    float r[9];\r
-    float rinv[9];\r
     float scale;\r
+    float k[9];\r
+    float rinv[9];\r
+    float r_kinv[9];\r
+    float k_rinv[9];\r
 };\r
 \r
 \r
@@ -83,10 +78,10 @@ template <class P>
 class CV_EXPORTS WarperBase : public Warper\r
 {   \r
 public:\r
-    virtual Point warp(const Mat &src, float focal, const Mat &R, Mat &dst,\r
+    virtual Point warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,\r
                        int interp_mode, int border_mode);\r
 \r
-    virtual Rect warpRoi(const Size &sz, float focal, const Mat &R);\r
+    virtual Rect warpRoi(const Size &sz, const Mat &K, const Mat &R);\r
 \r
 protected:\r
     // Detects ROI of the destination image. It's correct for any projection.\r
@@ -105,7 +100,6 @@ struct CV_EXPORTS PlaneProjector : ProjectorBase
 {\r
     void mapForward(float x, float y, float &u, float &v);\r
     void mapBackward(float u, float v, float &x, float &y);\r
-    float plane_dist;\r
 };\r
 \r
 \r
@@ -113,11 +107,7 @@ struct CV_EXPORTS PlaneProjector : ProjectorBase
 class CV_EXPORTS PlaneWarper : public WarperBase<PlaneProjector>\r
 {\r
 public:\r
-    PlaneWarper(float plane_dist = 1.f, float scale = 1.f)\r
-    {\r
-        projector_.plane_dist = plane_dist;\r
-        projector_.scale = scale;\r
-    }\r
+    PlaneWarper(float scale = 1.f) { projector_.scale = scale; }\r
 \r
 protected:\r
     void detectResultRoi(Point &dst_tl, Point &dst_br);\r
@@ -127,8 +117,8 @@ protected:
 class CV_EXPORTS PlaneWarperGpu : public PlaneWarper\r
 {\r
 public:\r
-    PlaneWarperGpu(float plane_dist = 1.f, float scale = 1.f) : PlaneWarper(plane_dist, scale) {}\r
-    Point warp(const Mat &src, float focal, const Mat &R, Mat &dst,\r
+    PlaneWarperGpu(float scale = 1.f) : PlaneWarper(scale) {}\r
+    Point warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,\r
                int interp_mode, int border_mode);\r
 \r
 private:\r
@@ -149,7 +139,7 @@ struct CV_EXPORTS SphericalProjector : ProjectorBase
 class CV_EXPORTS SphericalWarper : public WarperBase<SphericalProjector>\r
 {\r
 public:\r
-    SphericalWarper(float scale = 300.f) { projector_.scale = scale; }\r
+    SphericalWarper(float scale) { projector_.scale = scale; }\r
 \r
 protected:\r
     void detectResultRoi(Point &dst_tl, Point &dst_br);\r
@@ -160,9 +150,9 @@ protected:
 class CV_EXPORTS SphericalWarperGpu : public SphericalWarper\r
 {\r
 public:\r
-    SphericalWarperGpu(float scale = 300.f) : SphericalWarper(scale) {}\r
-    Point warp(const Mat &src, float focal, const Mat &R, Mat &dst,\r
-                   int interp_mode, int border_mode);\r
+    SphericalWarperGpu(float scale) : SphericalWarper(scale) {}\r
+    Point warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,\r
+               int interp_mode, int border_mode);\r
 \r
 private:\r
     gpu::GpuMat d_xmap_, d_ymap_, d_dst_, d_src_;\r
@@ -181,13 +171,11 @@ struct CV_EXPORTS CylindricalProjector : ProjectorBase
 class CV_EXPORTS CylindricalWarper : public WarperBase<CylindricalProjector>\r
 {\r
 public:\r
-    CylindricalWarper(float scale = 300.f) { projector_.scale = scale; }\r
+    CylindricalWarper(float scale) { projector_.scale = scale; }\r
 \r
 protected:\r
     void detectResultRoi(Point &dst_tl, Point &dst_br)\r
-    {\r
-        WarperBase<CylindricalProjector>::detectResultRoiByBorder(dst_tl, dst_br);\r
-    }\r
+        { WarperBase<CylindricalProjector>::detectResultRoiByBorder(dst_tl, dst_br); }\r
 };\r
 \r
 \r
@@ -195,9 +183,9 @@ protected:
 class CV_EXPORTS CylindricalWarperGpu : public CylindricalWarper\r
 {\r
 public:\r
-    CylindricalWarperGpu(float scale = 300.f) : CylindricalWarper(scale) {}\r
-    Point warp(const Mat &src, float focal, const Mat &R, Mat &dst,\r
-                   int interp_mode, int border_mode);\r
+    CylindricalWarperGpu(float scale) : CylindricalWarper(scale) {}\r
+    Point warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,\r
+               int interp_mode, int border_mode);\r
 \r
 private:\r
     gpu::GpuMat d_xmap_, d_ymap_, d_dst_, d_src_;\r
index 551d5d3..40c2543 100644 (file)
@@ -50,14 +50,11 @@ namespace cv {
 namespace detail {\r
 \r
 template <class P>\r
-Point WarperBase<P>::warp(const Mat &src, float focal, const Mat &R, Mat &dst,\r
+Point WarperBase<P>::warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,\r
                           int interp_mode, int border_mode)\r
 {\r
     src_size_ = src.size();\r
-\r
-    projector_.size = src.size();\r
-    projector_.focal = focal;\r
-    projector_.setTransformation(R);\r
+    projector_.setCameraParams(K, R);\r
 \r
     Point dst_tl, dst_br;\r
     detectResultRoi(dst_tl, dst_br);\r
@@ -84,13 +81,10 @@ Point WarperBase<P>::warp(const Mat &src, float focal, const Mat &R, Mat &dst,
 \r
 \r
 template <class P>\r
-Rect WarperBase<P>::warpRoi(const Size &sz, float focal, const Mat &R)\r
+Rect WarperBase<P>::warpRoi(const Size &sz, const Mat &K, const Mat &R)\r
 {\r
     src_size_ = sz;\r
-\r
-    projector_.size = sz;\r
-    projector_.focal = focal;\r
-    projector_.setTransformation(R);\r
+    projector_.setCameraParams(K, R);\r
 \r
     Point dst_tl, dst_br;\r
     detectResultRoi(dst_tl, dst_br);\r
@@ -165,43 +159,37 @@ void WarperBase<P>::detectResultRoiByBorder(Point &dst_tl, Point &dst_br)
 inline\r
 void PlaneProjector::mapForward(float x, float y, float &u, float &v)\r
 {\r
-    x -= size.width * 0.5f;\r
-    y -= size.height * 0.5f;\r
+    float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];\r
+    float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];\r
+    float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];\r
 \r
-    float x_ = r[0] * x + r[1] * y + r[2] * focal;\r
-    float y_ = r[3] * x + r[4] * y + r[5] * focal;\r
-    float z_ = r[6] * x + r[7] * y + r[8] * focal;\r
-\r
-    u = scale * x_ / z_ * plane_dist;\r
-    v = scale * y_ / z_ * plane_dist;\r
+    u = scale * x_ / z_;\r
+    v = scale * y_ / z_;\r
 }\r
 \r
 \r
 inline\r
 void PlaneProjector::mapBackward(float u, float v, float &x, float &y)\r
 {\r
-    float x_ = u / scale;\r
-    float y_ = v / scale;\r
+    u /= scale;\r
+    v /= scale;\r
 \r
     float z;\r
-    x = rinv[0] * x_ + rinv[1] * y_ + rinv[2] * plane_dist;\r
-    y = rinv[3] * x_ + rinv[4] * y_ + rinv[5] * plane_dist;\r
-    z = rinv[6] * x_ + rinv[7] * y_ + rinv[8] * plane_dist;\r
+    x = k_rinv[0] * u + k_rinv[1] * v + k_rinv[2];\r
+    y = k_rinv[3] * u + k_rinv[4] * v + k_rinv[5];\r
+    z = k_rinv[6] * u + k_rinv[7] * v + k_rinv[8];\r
 \r
-    x = focal * x / z + size.width * 0.5f;\r
-    y = focal * y / z + size.height * 0.5f;\r
+    x /= z;\r
+    y /= z;\r
 }\r
 \r
 \r
 inline\r
 void SphericalProjector::mapForward(float x, float y, float &u, float &v)\r
-{\r
-    x -= size.width * 0.5f;\r
-    y -= size.height * 0.5f;\r
-\r
-    float x_ = r[0] * x + r[1] * y + r[2] * focal;\r
-    float y_ = r[3] * x + r[4] * y + r[5] * focal;\r
-    float z_ = r[6] * x + r[7] * y + r[8] * focal;\r
+{    \r
+    float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];\r
+    float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];\r
+    float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];\r
 \r
     u = scale * atan2f(x_, z_);\r
     v = scale * (static_cast<float>(CV_PI) - acosf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_)));\r
@@ -211,30 +199,30 @@ void SphericalProjector::mapForward(float x, float y, float &u, float &v)
 inline\r
 void SphericalProjector::mapBackward(float u, float v, float &x, float &y)\r
 {\r
-    float sinv = sinf(static_cast<float>(CV_PI) - v / scale);\r
-    float x_ = sinv * sinf(u / scale);\r
-    float y_ = cosf(static_cast<float>(CV_PI) - v / scale);\r
-    float z_ = sinv * cosf(u / scale);\r
+    u /= scale;\r
+    v /= scale;\r
+\r
+    float sinv = sinf(static_cast<float>(CV_PI) - v);\r
+    float x_ = sinv * sinf(u);\r
+    float y_ = cosf(static_cast<float>(CV_PI) - v);\r
+    float z_ = sinv * cosf(u);\r
 \r
     float z;\r
-    x = rinv[0] * x_ + rinv[1] * y_ + rinv[2] * z_;\r
-    y = rinv[3] * x_ + rinv[4] * y_ + rinv[5] * z_;\r
-    z = rinv[6] * x_ + rinv[7] * y_ + rinv[8] * z_;\r
+    x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;\r
+    y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;\r
+    z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;\r
 \r
-    x = focal * x / z + size.width * 0.5f;\r
-    y = focal * y / z + size.height * 0.5f;\r
+    x /= z;\r
+    y /= z;\r
 }\r
 \r
 \r
 inline\r
 void CylindricalProjector::mapForward(float x, float y, float &u, float &v)\r
 {\r
-    x -= size.width * 0.5f;\r
-    y -= size.height * 0.5f;\r
-\r
-    float x_ = r[0] * x + r[1] * y + r[2] * focal;\r
-    float y_ = r[3] * x + r[4] * y + r[5] * focal;\r
-    float z_ = r[6] * x + r[7] * y + r[8] * focal;\r
+    float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];\r
+    float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];\r
+    float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];\r
 \r
     u = scale * atan2f(x_, z_);\r
     v = scale * y_ / sqrtf(x_ * x_ + z_ * z_);\r
@@ -244,17 +232,20 @@ void CylindricalProjector::mapForward(float x, float y, float &u, float &v)
 inline\r
 void CylindricalProjector::mapBackward(float u, float v, float &x, float &y)\r
 {\r
-    float x_ = sinf(u / scale);\r
-    float y_ = v / scale;\r
-    float z_ = cosf(u / scale);\r
+    u /= scale;\r
+    v /= scale;\r
+\r
+    float x_ = sinf(u);\r
+    float y_ = v;\r
+    float z_ = cosf(u);\r
 \r
     float z;\r
-    x = rinv[0] * x_ + rinv[1] * y_ + rinv[2] * z_;\r
-    y = rinv[3] * x_ + rinv[4] * y_ + rinv[5] * z_;\r
-    z = rinv[6] * x_ + rinv[7] * y_ + rinv[8] * z_;\r
+    x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;\r
+    y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;\r
+    z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;\r
 \r
-    x = focal * x / z + size.width * 0.5f;\r
-    y = focal * y / z + size.height * 0.5f;\r
+    x /= z;\r
+    y /= z;\r
 }\r
 \r
 } // namespace detail\r
index a98a499..6f3c944 100644 (file)
@@ -51,28 +51,28 @@ class WarperCreator
 {
 public:
     virtual ~WarperCreator() {}
-    virtual Ptr<detail::Warper> createByFocalLength(double f) const = 0;
+    virtual Ptr<detail::Warper> create(float scale) const = 0;
 };
 
 
 class PlaneWarper : public WarperCreator
 {
 public:
-    Ptr<detail::Warper> createByFocalLength(double f) const { return new detail::PlaneWarper(f); }
+    Ptr<detail::Warper> create(float scale) const { return new detail::PlaneWarper(scale); }
 };
 
 
 class CylindricalWarper: public WarperCreator
 {
 public:
-    Ptr<detail::Warper> createByFocalLength(double f) const { return new detail::CylindricalWarper(f); }
+    Ptr<detail::Warper> create(float scale) const { return new detail::CylindricalWarper(scale); }
 };
 
 
 class SphericalWarper: public WarperCreator
 {
 public:
-    Ptr<detail::Warper> createByFocalLength(double f) const { return new detail::SphericalWarper(f); }
+    Ptr<detail::Warper> create(float scale) const { return new detail::SphericalWarper(scale); }
 };
 
 
@@ -80,21 +80,21 @@ public:
 class PlaneWarperGpu: public WarperCreator
 {
 public:
-    Ptr<detail::Warper> createByFocalLength(double f) const { return new detail::PlaneWarperGpu(f); }
+    Ptr<detail::Warper> create(float scale) const { return new detail::PlaneWarperGpu(scale); }
 };
 
 
 class CylindricalWarperGpu: public WarperCreator
 {
 public:
-    Ptr<detail::Warper> createByFocalLength(double f) const { return new detail::CylindricalWarperGpu(f); }
+    Ptr<detail::Warper> create(float scale) const { return new detail::CylindricalWarperGpu(scale); }
 };
 
 
 class SphericalWarperGpu: public WarperCreator
 {
 public:
-    Ptr<detail::Warper> createByFocalLength(double f) const { return new detail::SphericalWarperGpu(f); }
+    Ptr<detail::Warper> create(float scale) const { return new detail::SphericalWarperGpu(scale); }
 };
 #endif
 
index 79c0282..b20417c 100644 (file)
@@ -47,17 +47,29 @@ using namespace std;
 namespace cv {
 namespace detail {
 
-CameraParams::CameraParams() : focal(1), R(Mat::eye(3, 3, CV_64F)), t(Mat::zeros(3, 1, CV_64F)) {}
+CameraParams::CameraParams() : focal(1), aspect(1), ppx(0), ppy(0),
+                               R(Mat::eye(3, 3, CV_64F)), t(Mat::zeros(3, 1, CV_64F)) {}
 
 CameraParams::CameraParams(const CameraParams &other) { *this = other; }
 
 const CameraParams& CameraParams::operator =(const CameraParams &other)
 {
     focal = other.focal;
+    ppx = other.ppx;
+    ppy = other.ppy;
+    aspect = other.aspect;
     R = other.R.clone();
     t = other.t.clone();
     return *this;
 }
 
+Mat CameraParams::K() const 
+{
+    Mat_<double> k = Mat::eye(3, 3, CV_64F);
+    k(0,0) = focal; k(0,2) = ppx;
+    k(1,1) = focal * aspect; k(1,2) = ppy;
+    return k;
+}
+
 } // namespace detail
 } // namespace cv
index 786acd7..4d5503b 100644 (file)
@@ -142,6 +142,13 @@ void HomographyBasedEstimator::estimate(const vector<ImageFeatures> &features, c
     findMaxSpanningTree(num_images, pairwise_matches, span_tree, span_tree_centers);\r
     span_tree.walkBreadthFirst(span_tree_centers[0], CalcRotation(num_images, pairwise_matches, cameras));\r
 \r
+    // As calculations were performed under assumption that p.p. is in image center\r
+    for (int i = 0; i < num_images; ++i)\r
+    {\r
+        cameras[i].ppx = 0.5 * features[i].img_size.width;\r
+        cameras[i].ppy = 0.5 * features[i].img_size.height;\r
+    }\r
+\r
     LOGLN("Estimating rotations, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");\r
 }\r
 \r
@@ -162,11 +169,14 @@ void BundleAdjuster::estimate(const vector<ImageFeatures> &features, const vecto
     pairwise_matches_ = &pairwise_matches[0];\r
 \r
     // Prepare focals and rotations\r
-    cameras_.create(num_images_ * 4, 1, CV_64F);\r
+    cameras_.create(num_images_ * 7, 1, CV_64F);\r
     SVD svd;\r
     for (int i = 0; i < num_images_; ++i)\r
     {\r
-        cameras_.at<double>(i * 4, 0) = cameras[i].focal;\r
+        cameras_.at<double>(i * 7, 0) = cameras[i].focal;\r
+        cameras_.at<double>(i * 7 + 1, 0) = cameras[i].ppx;\r
+        cameras_.at<double>(i * 7 + 2, 0) = cameras[i].ppy;\r
+        cameras_.at<double>(i * 7 + 3, 0) = cameras[i].aspect;\r
 \r
         svd(cameras[i].R, SVD::FULL_UV);\r
         Mat R = svd.u * svd.vt;\r
@@ -175,9 +185,9 @@ void BundleAdjuster::estimate(const vector<ImageFeatures> &features, const vecto
 \r
         Mat rvec;\r
         Rodrigues(R, rvec); CV_Assert(rvec.type() == CV_32F);\r
-        cameras_.at<double>(i * 4 + 1, 0) = rvec.at<float>(0, 0);\r
-        cameras_.at<double>(i * 4 + 2, 0) = rvec.at<float>(1, 0);\r
-        cameras_.at<double>(i * 4 + 3, 0) = rvec.at<float>(2, 0);\r
+        cameras_.at<double>(i * 7 + 4, 0) = rvec.at<float>(0, 0);\r
+        cameras_.at<double>(i * 7 + 5, 0) = rvec.at<float>(1, 0);\r
+        cameras_.at<double>(i * 7 + 6, 0) = rvec.at<float>(2, 0);\r
     }\r
 \r
     // Select only consistent image pairs for futher adjustment\r
@@ -197,7 +207,7 @@ void BundleAdjuster::estimate(const vector<ImageFeatures> &features, const vecto
     for (size_t i = 0; i < edges_.size(); ++i)\r
         total_num_matches_ += static_cast<int>(pairwise_matches[edges_[i].first * num_images_ + edges_[i].second].num_inliers);\r
 \r
-    CvLevMarq solver(num_images_ * 4, total_num_matches_ * 3,\r
+    CvLevMarq solver(num_images_ * 7, total_num_matches_ * 2,\r
                      cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 1000, DBL_EPSILON));\r
 \r
     CvMat matParams = cameras_;\r
@@ -234,17 +244,20 @@ void BundleAdjuster::estimate(const vector<ImageFeatures> &features, const vecto
         }\r
     }\r
     LOGLN("");\r
-    LOGLN("Bundle adjustment, final error: " << sqrt(err_.dot(err_)));\r
+    LOGLN("Bundle adjustment, final error: " << sqrt(err_.dot(err_)) / total_num_matches_);\r
     LOGLN("Bundle adjustment, iterations done: " << count);\r
 \r
     // Obtain global motion\r
     for (int i = 0; i < num_images_; ++i)\r
     {\r
-        cameras[i].focal = cameras_.at<double>(i * 4, 0);\r
+        cameras[i].focal = cameras_.at<double>(i * 7, 0);\r
+        cameras[i].ppx = cameras_.at<double>(i * 7 + 1, 0);\r
+        cameras[i].ppy = cameras_.at<double>(i * 7 + 2, 0);\r
+        cameras[i].aspect = cameras_.at<double>(i * 7 + 3, 0);\r
         Mat rvec(3, 1, CV_64F);\r
-        rvec.at<double>(0, 0) = cameras_.at<double>(i * 4 + 1, 0);\r
-        rvec.at<double>(1, 0) = cameras_.at<double>(i * 4 + 2, 0);\r
-        rvec.at<double>(2, 0) = cameras_.at<double>(i * 4 + 3, 0);\r
+        rvec.at<double>(0, 0) = cameras_.at<double>(i * 7 + 4, 0);\r
+        rvec.at<double>(1, 0) = cameras_.at<double>(i * 7 + 5, 0);\r
+        rvec.at<double>(2, 0) = cameras_.at<double>(i * 7 + 6, 0);\r
         Rodrigues(rvec, cameras[i].R);\r
         Mat Mf;\r
         cameras[i].R.convertTo(Mf, CV_32F);\r
@@ -265,62 +278,65 @@ void BundleAdjuster::estimate(const vector<ImageFeatures> &features, const vecto
 \r
 void BundleAdjuster::calcError(Mat &err)\r
 {\r
-    err.create(total_num_matches_ * 3, 1, CV_64F);\r
+    err.create(total_num_matches_ * 2, 1, CV_64F);\r
 \r
     int match_idx = 0;\r
     for (size_t edge_idx = 0; edge_idx < edges_.size(); ++edge_idx)\r
     {\r
         int i = edges_[edge_idx].first;\r
         int j = edges_[edge_idx].second;\r
-        double f1 = cameras_.at<double>(i * 4, 0);\r
-        double f2 = cameras_.at<double>(j * 4, 0);\r
-        double R1[9], R2[9];\r
-        Mat R1_(3, 3, CV_64F, R1), R2_(3, 3, CV_64F, R2);\r
+        double f1 = cameras_.at<double>(i * 7, 0);\r
+        double f2 = cameras_.at<double>(j * 7, 0);\r
+        double ppx1 = cameras_.at<double>(i * 7 + 1, 0);\r
+        double ppx2 = cameras_.at<double>(j * 7 + 1, 0);\r
+        double ppy1 = cameras_.at<double>(i * 7 + 2, 0);\r
+        double ppy2 = cameras_.at<double>(j * 7 + 2, 0);\r
+        double a1 = cameras_.at<double>(i * 7 + 3, 0);\r
+        double a2 = cameras_.at<double>(j * 7 + 3, 0);\r
+\r
+        double R1[9];\r
+        Mat R1_(3, 3, CV_64F, R1);\r
         Mat rvec(3, 1, CV_64F);\r
-        rvec.at<double>(0, 0) = cameras_.at<double>(i * 4 + 1, 0);\r
-        rvec.at<double>(1, 0) = cameras_.at<double>(i * 4 + 2, 0);\r
-        rvec.at<double>(2, 0) = cameras_.at<double>(i * 4 + 3, 0);\r
-        Rodrigues(rvec, R1_); CV_Assert(R1_.type() == CV_64F);\r
-        rvec.at<double>(0, 0) = cameras_.at<double>(j * 4 + 1, 0);\r
-        rvec.at<double>(1, 0) = cameras_.at<double>(j * 4 + 2, 0);\r
-        rvec.at<double>(2, 0) = cameras_.at<double>(j * 4 + 3, 0);\r
-        Rodrigues(rvec, R2_); CV_Assert(R2_.type() == CV_64F);\r
+        rvec.at<double>(0, 0) = cameras_.at<double>(i * 7 + 4, 0);\r
+        rvec.at<double>(1, 0) = cameras_.at<double>(i * 7 + 5, 0);\r
+        rvec.at<double>(2, 0) = cameras_.at<double>(i * 7 + 6, 0);\r
+        Rodrigues(rvec, R1_);\r
+\r
+        double R2[9];\r
+        Mat R2_(3, 3, CV_64F, R2);\r
+        rvec.at<double>(0, 0) = cameras_.at<double>(j * 7 + 4, 0);\r
+        rvec.at<double>(1, 0) = cameras_.at<double>(j * 7 + 5, 0);\r
+        rvec.at<double>(2, 0) = cameras_.at<double>(j * 7 + 6, 0);\r
+        Rodrigues(rvec, R2_);\r
 \r
         const ImageFeatures& features1 = features_[i];\r
         const ImageFeatures& features2 = features_[j];\r
         const MatchesInfo& matches_info = pairwise_matches_[i * num_images_ + j];\r
 \r
+        Mat_<double> K1 = Mat::eye(3, 3, CV_64F);\r
+        K1(0,0) = f1; K1(0,2) = ppx1;\r
+        K1(1,1) = f1*a1; K1(1,2) = ppy1;\r
+\r
+        Mat_<double> K2 = Mat::eye(3, 3, CV_64F);\r
+        K2(0,0) = f2; K2(0,2) = ppx2;\r
+        K2(1,1) = f2*a2; K2(1,2) = ppy2;\r
+\r
+        Mat_<double> H = K2 * R2_.inv() * R1_ * K1.inv();\r
+\r
         for (size_t k = 0; k < matches_info.matches.size(); ++k)\r
         {\r
             if (!matches_info.inliers_mask[k])\r
                 continue;\r
-\r
             const DMatch& m = matches_info.matches[k];\r
 \r
-            Point2d kp1 = features1.keypoints[m.queryIdx].pt;\r
-            kp1.x -= 0.5 * features1.img_size.width;\r
-            kp1.y -= 0.5 * features1.img_size.height;\r
-            Point2d kp2 = features2.keypoints[m.trainIdx].pt;\r
-            kp2.x -= 0.5 * features2.img_size.width;\r
-            kp2.y -= 0.5 * features2.img_size.height;\r
-            double len1 = sqrt(kp1.x * kp1.x + kp1.y * kp1.y + f1 * f1);\r
-            double len2 = sqrt(kp2.x * kp2.x + kp2.y * kp2.y + f2 * f2);\r
-            Point3d p1(kp1.x / len1, kp1.y / len1, f1 / len1);\r
-            Point3d p2(kp2.x / len2, kp2.y / len2, f2 / len2);\r
-\r
-            Point3d d1(p1.x * R1[0] + p1.y * R1[1] + p1.z * R1[2],\r
-                       p1.x * R1[3] + p1.y * R1[4] + p1.z * R1[5],\r
-                       p1.x * R1[6] + p1.y * R1[7] + p1.z * R1[8]);\r
-            Point3d d2(p2.x * R2[0] + p2.y * R2[1] + p2.z * R2[2],\r
-                       p2.x * R2[3] + p2.y * R2[4] + p2.z * R2[5],\r
-                       p2.x * R2[6] + p2.y * R2[7] + p2.z * R2[8]);\r
-\r
-            double mult = 1; // For cost_space_ == RAY_SPACE\r
-            if (cost_space_ == FOCAL_RAY_SPACE)\r
-                mult = sqrt(f1 * f2);\r
-            err.at<double>(3 * match_idx, 0) = mult * (d1.x - d2.x);\r
-            err.at<double>(3 * match_idx + 1, 0) = mult * (d1.y - d2.y);\r
-            err.at<double>(3 * match_idx + 2, 0) = mult * (d1.z - d2.z);\r
+            Point2d p1 = features1.keypoints[m.queryIdx].pt;\r
+            Point2d p2 = features2.keypoints[m.trainIdx].pt;\r
+            double x = H(0,0)*p1.x + H(0,1)*p1.y + H(0,2);\r
+            double y = H(1,0)*p1.x + H(1,1)*p1.y + H(1,2);\r
+            double z = H(2,0)*p1.x + H(2,1)*p1.y + H(2,2);\r
+\r
+            err.at<double>(2 * match_idx, 0) = p2.x - x/z;\r
+            err.at<double>(2 * match_idx + 1, 0) = p2.y - y/z;\r
             match_idx++;\r
         }\r
     }\r
@@ -329,45 +345,23 @@ void BundleAdjuster::calcError(Mat &err)
 \r
 void BundleAdjuster::calcJacobian()\r
 {\r
-    J_.create(total_num_matches_ * 3, num_images_ * 4, CV_64F);\r
+    J_.create(total_num_matches_ * 2, num_images_ * 7, CV_64F);\r
 \r
-    double f, r;\r
-    const double df = 0.001; // Focal length step\r
-    const double dr = 0.001; // Angle step\r
+    double val;\r
+    const double step = 1e-3;\r
 \r
     for (int i = 0; i < num_images_; ++i)\r
     {\r
-        f = cameras_.at<double>(i * 4, 0);\r
-        cameras_.at<double>(i * 4, 0) = f - df;\r
-        calcError(err1_);\r
-        cameras_.at<double>(i * 4, 0) = f + df;\r
-        calcError(err2_);\r
-        calcDeriv(err1_, err2_, 2 * df, J_.col(i * 4));\r
-        cameras_.at<double>(i * 4, 0) = f;\r
-\r
-        r = cameras_.at<double>(i * 4 + 1, 0);\r
-        cameras_.at<double>(i * 4 + 1, 0) = r - dr;\r
-        calcError(err1_);\r
-        cameras_.at<double>(i * 4 + 1, 0) = r + dr;\r
-        calcError(err2_);\r
-        calcDeriv(err1_, err2_, 2 * dr, J_.col(i * 4 + 1));\r
-        cameras_.at<double>(i * 4 + 1, 0) = r;\r
-\r
-        r = cameras_.at<double>(i * 4 + 2, 0);\r
-        cameras_.at<double>(i * 4 + 2, 0) = r - dr;\r
-        calcError(err1_);\r
-        cameras_.at<double>(i * 4 + 2, 0) = r + dr;\r
-        calcError(err2_);\r
-        calcDeriv(err1_, err2_, 2 * dr, J_.col(i * 4 + 2));\r
-        cameras_.at<double>(i * 4 + 2, 0) = r;\r
-\r
-        r = cameras_.at<double>(i * 4 + 3, 0);\r
-        cameras_.at<double>(i * 4 + 3, 0) = r - dr;\r
-        calcError(err1_);\r
-        cameras_.at<double>(i * 4 + 3, 0) = r + dr;\r
-        calcError(err2_);\r
-        calcDeriv(err1_, err2_, 2 * dr, J_.col(i * 4 + 3));\r
-        cameras_.at<double>(i * 4 + 3, 0) = r;\r
+        for (int j = 0; j < 7; ++j)\r
+        {\r
+            val = cameras_.at<double>(i * 7 + j, 0);\r
+            cameras_.at<double>(i * 7+ j, 0) = val - step;\r
+            calcError(err1_);\r
+            cameras_.at<double>(i * 7 + j, 0) = val + step;\r
+            calcError(err2_);\r
+            calcDeriv(err1_, err2_, 2 * step, J_.col(i * 7 + j));\r
+            cameras_.at<double>(i * 7 + j, 0) = val;\r
+        }\r
     }\r
 }\r
 \r
index 61722ca..45a9f97 100644 (file)
@@ -186,7 +186,7 @@ Stitcher::Status Stitcher::stitch(InputArray imgs_, OutputArray pano_)
         Mat R;
         cameras[i].R.convertTo(R, CV_32F);
         cameras[i].R = R;
-        LOGLN("Initial focal length #" << indices[i]+1 << ": " << cameras[i].focal);
+        LOGLN("Initial intrinsic parameters #" << indices[i]+1 << ":\n " << cameras[i].K());
     }
 
     detail::BundleAdjuster adjuster(detail::BundleAdjuster::FOCAL_RAY_SPACE, conf_thresh_);
@@ -196,7 +196,7 @@ Stitcher::Status Stitcher::stitch(InputArray imgs_, OutputArray pano_)
     vector<double> focals;
     for (size_t i = 0; i < cameras.size(); ++i)
     {
-        LOGLN("Camera #" << indices[i]+1 << " focal length: " << cameras[i].focal);
+        LOGLN("Camera #" << indices[i]+1 << ":\n" << cameras[i].K());
         focals.push_back(cameras[i].focal);
     }
     nth_element(focals.begin(), focals.begin() + focals.size()/2, focals.end());
@@ -229,14 +229,18 @@ Stitcher::Status Stitcher::stitch(InputArray imgs_, OutputArray pano_)
     }
 
     // Warp images and their masks
-    Ptr<detail::Warper> warper = warper_->createByFocalLength(warped_image_scale * seam_work_aspect);
+    Ptr<detail::Warper> warper = warper_->create(warped_image_scale * seam_work_aspect);
     for (int i = 0; i < num_imgs; ++i)
     {
-        corners[i] = warper->warp(seam_est_imgs[i], static_cast<float>(cameras[i].focal * seam_work_aspect),
-                                  cameras[i].R, images_warped[i]);
+        Mat_<float> K;
+        cameras[i].K().convertTo(K, CV_32F);
+        K(0,0) *= seam_work_aspect; K(0,2) *= seam_work_aspect;
+        K(1,1) *= seam_work_aspect; K(1,2) *= seam_work_aspect;
+
+        corners[i] = warper->warp(seam_est_imgs[i], K, cameras[i].R, images_warped[i]);
         sizes[i] = images_warped[i].size();
-        warper->warp(masks[i], static_cast<float>(cameras[i].focal * seam_work_aspect),
-                     cameras[i].R, masks_warped[i], INTER_NEAREST, BORDER_CONSTANT);
+
+        warper->warp(masks[i], K, cameras[i].R, masks_warped[i], INTER_NEAREST, BORDER_CONSTANT);
     }
 
     vector<Mat> images_warped_f(num_imgs);
@@ -281,13 +285,15 @@ Stitcher::Status Stitcher::stitch(InputArray imgs_, OutputArray pano_)
 
             // Update warped image scale
             warped_image_scale *= static_cast<float>(compose_work_aspect);
-            warper = warper_->createByFocalLength(warped_image_scale);
+            warper = warper_->create(warped_image_scale);
 
             // Update corners and sizes
             for (int i = 0; i < num_imgs; ++i)
             {
-                // Update camera focal
+                // Update intrinsics
                 cameras[i].focal *= compose_work_aspect;
+                cameras[i].ppx *= compose_work_aspect;
+                cameras[i].ppy *= compose_work_aspect;
 
                 // Update corner and size
                 Size sz = full_img_sizes[i];
@@ -297,7 +303,9 @@ Stitcher::Status Stitcher::stitch(InputArray imgs_, OutputArray pano_)
                     sz.height = cvRound(full_img_sizes[i].height * compose_scale);
                 }
 
-                Rect roi = warper->warpRoi(sz, static_cast<float>(cameras[i].focal), cameras[i].R);
+                Mat K;
+                cameras[i].K().convertTo(K, CV_32F);
+                Rect roi = warper->warpRoi(sz, K, cameras[i].R);
                 corners[i] = roi.tl();
                 sizes[i] = roi.size();
             }
@@ -309,15 +317,16 @@ Stitcher::Status Stitcher::stitch(InputArray imgs_, OutputArray pano_)
         full_img.release();
         Size img_size = img.size();
 
+        Mat K;
+        cameras[img_idx].K().convertTo(K, CV_32F);
+
         // Warp the current image
-        warper->warp(img, static_cast<float>(cameras[img_idx].focal), cameras[img_idx].R,
-                     img_warped);
+        warper->warp(img, K, cameras[img_idx].R, img_warped);
 
         // Warp the current image mask
         mask.create(img_size, CV_8U);
         mask.setTo(Scalar::all(255));
-        warper->warp(mask, static_cast<float>(cameras[img_idx].focal), cameras[img_idx].R, mask_warped,
-                     INTER_NEAREST, BORDER_CONSTANT);
+        warper->warp(mask, K, cameras[img_idx].R, mask_warped, INTER_NEAREST, BORDER_CONSTANT);
 
         // Compensate exposure
         exposure_comp_->apply(img_idx, corners[img_idx], img_warped, mask_warped);
index 05733a2..e7dd21e 100644 (file)
@@ -47,46 +47,30 @@ using namespace std;
 namespace cv {\r
 namespace detail {\r
 \r
-Ptr<Warper> Warper::createByCameraFocal(float focal, int type, bool try_gpu)\r
+void ProjectorBase::setCameraParams(const Mat &K, const Mat &R)\r
 {\r
-#ifndef ANDROID\r
-    bool can_use_gpu = try_gpu && gpu::getCudaEnabledDeviceCount();\r
-    if (can_use_gpu)\r
-    {\r
-        if (type == PLANE)\r
-            return new PlaneWarperGpu(focal);\r
-        if (type == CYLINDRICAL)\r
-            return new CylindricalWarperGpu(focal);\r
-        if (type == SPHERICAL)\r
-            return new SphericalWarperGpu(focal);\r
-    }\r
-    else\r
-#endif\r
-    {\r
-        if (type == PLANE)\r
-            return new PlaneWarper(focal);\r
-        if (type == CYLINDRICAL)\r
-            return new CylindricalWarper(focal);\r
-        if (type == SPHERICAL)\r
-            return new SphericalWarper(focal);\r
-    }\r
-    CV_Error(CV_StsBadArg, "unsupported warping type");\r
-    return NULL;\r
-}\r
-\r
-\r
-void ProjectorBase::setTransformation(const Mat &R)\r
-{\r
-    CV_Assert(R.size() == Size(3, 3));\r
-    CV_Assert(R.type() == CV_32F);\r
-    r[0] = R.at<float>(0, 0); r[1] = R.at<float>(0, 1); r[2] = R.at<float>(0, 2);\r
-    r[3] = R.at<float>(1, 0); r[4] = R.at<float>(1, 1); r[5] = R.at<float>(1, 2);\r
-    r[6] = R.at<float>(2, 0); r[7] = R.at<float>(2, 1); r[8] = R.at<float>(2, 2);\r
-\r
-    Mat Rinv = R.inv();\r
-    rinv[0] = Rinv.at<float>(0, 0); rinv[1] = Rinv.at<float>(0, 1); rinv[2] = Rinv.at<float>(0, 2);\r
-    rinv[3] = Rinv.at<float>(1, 0); rinv[4] = Rinv.at<float>(1, 1); rinv[5] = Rinv.at<float>(1, 2);\r
-    rinv[6] = Rinv.at<float>(2, 0); rinv[7] = Rinv.at<float>(2, 1); rinv[8] = Rinv.at<float>(2, 2);\r
+    CV_Assert(K.size() == Size(3, 3) && K.type() == CV_32F);\r
+    CV_Assert(R.size() == Size(3, 3) && R.type() == CV_32F);\r
+\r
+    Mat_<float> K_(K);\r
+    k[0] = K_(0,0); k[1] = K_(0,1); k[2] = K_(0,2);\r
+    k[3] = K_(1,0); k[4] = K_(1,1); k[5] = K_(1,2);\r
+    k[6] = K_(2,0); k[7] = K_(2,1); k[8] = K_(2,2);\r
+\r
+    Mat_<float> Rinv = R.t();\r
+    rinv[0] = Rinv(0,0); rinv[1] = Rinv(0,1); rinv[2] = Rinv(0,2);\r
+    rinv[3] = Rinv(1,0); rinv[4] = Rinv(1,1); rinv[5] = Rinv(1,2);\r
+    rinv[6] = Rinv(2,0); rinv[7] = Rinv(2,1); rinv[8] = Rinv(2,2);\r
+\r
+    Mat_<float> R_Kinv = R * K.inv();\r
+    r_kinv[0] = R_Kinv(0,0); r_kinv[1] = R_Kinv(0,1); r_kinv[2] = R_Kinv(0,2); \r
+    r_kinv[3] = R_Kinv(1,0); r_kinv[4] = R_Kinv(1,1); r_kinv[5] = R_Kinv(1,2); \r
+    r_kinv[6] = R_Kinv(2,0); r_kinv[7] = R_Kinv(2,1); r_kinv[8] = R_Kinv(2,2); \r
+\r
+    Mat_<float> K_Rinv = K * Rinv;\r
+    k_rinv[0] = K_Rinv(0,0); k_rinv[1] = K_Rinv(0,1); k_rinv[2] = K_Rinv(0,2); \r
+    k_rinv[3] = K_Rinv(1,0); k_rinv[4] = K_Rinv(1,1); k_rinv[5] = K_Rinv(1,2); \r
+    k_rinv[6] = K_Rinv(2,0); k_rinv[7] = K_Rinv(2,1); k_rinv[8] = K_Rinv(2,2); \r
 }\r
 \r
 \r
@@ -122,18 +106,16 @@ void PlaneWarper::detectResultRoi(Point &dst_tl, Point &dst_br)
 }\r
 \r
 #ifndef ANDROID\r
-Point PlaneWarperGpu::warp(const Mat &src, float focal, const Mat &R, Mat &dst, int interp_mode, int border_mode)\r
+Point PlaneWarperGpu::warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst, int interp_mode, int border_mode)\r
 {\r
     src_size_ = src.size();\r
-    projector_.size = src.size();\r
-    projector_.focal = focal;\r
-    projector_.setTransformation(R);\r
+    projector_.setCameraParams(K, R);\r
 \r
     Point dst_tl, dst_br;\r
     detectResultRoi(dst_tl, dst_br);\r
 \r
     gpu::buildWarpPlaneMaps(src.size(), Rect(dst_tl, Point(dst_br.x+1, dst_br.y+1)),\r
-                            R, focal, projector_.scale, projector_.plane_dist, d_xmap_, d_ymap_);\r
+                            K, R, projector_.scale, d_xmap_, d_ymap_);\r
 \r
     gpu::ensureSizeIsEnough(src.size(), src.type(), d_src_);\r
     d_src_.upload(src);\r
@@ -163,9 +145,11 @@ void SphericalWarper::detectResultRoi(Point &dst_tl, Point &dst_br)
     float z = projector_.rinv[7];\r
     if (y > 0.f)\r
     {\r
-        x = projector_.focal * x / z + src_size_.width * 0.5f;\r
-        y = projector_.focal * y / z + src_size_.height * 0.5f;\r
-        if (x > 0.f && x < src_size_.width && y > 0.f && y < src_size_.height)\r
+        //x = projector_.focal * x / z + src_size_.width * 0.5f;\r
+        //y = projector_.focal * y / z + src_size_.height * 0.5f;\r
+        float x_ = (projector_.k[0] * x + projector_.k[1] * y) / z + projector_.k[2];\r
+        float y_ = projector_.k[4] * y / z + projector_.k[5];\r
+        if (x_ > 0.f && x_ < src_size_.width && y_ > 0.f && y_ < src_size_.height)\r
         {\r
             tl_uf = min(tl_uf, 0.f); tl_vf = min(tl_vf, static_cast<float>(CV_PI * projector_.scale));\r
             br_uf = max(br_uf, 0.f); br_vf = max(br_vf, static_cast<float>(CV_PI * projector_.scale));\r
@@ -177,9 +161,11 @@ void SphericalWarper::detectResultRoi(Point &dst_tl, Point &dst_br)
     z = projector_.rinv[7];\r
     if (y > 0.f)\r
     {\r
-        x = projector_.focal * x / z + src_size_.width * 0.5f;\r
-        y = projector_.focal * y / z + src_size_.height * 0.5f;\r
-        if (x > 0.f && x < src_size_.width && y > 0.f && y < src_size_.height)\r
+        //x = projector_.focal * x / z + src_size_.width * 0.5f;\r
+        //y = projector_.focal * y / z + src_size_.height * 0.5f;\r
+        float x_ = (projector_.k[0] * x + projector_.k[1] * y) / z + projector_.k[2];\r
+        float y_ = projector_.k[4] * y / z + projector_.k[5];\r
+        if (x_ > 0.f && x_ < src_size_.width && y_ > 0.f && y_ < src_size_.height)\r
         {\r
             tl_uf = min(tl_uf, 0.f); tl_vf = min(tl_vf, static_cast<float>(0));\r
             br_uf = max(br_uf, 0.f); br_vf = max(br_vf, static_cast<float>(0));\r
@@ -193,19 +179,17 @@ void SphericalWarper::detectResultRoi(Point &dst_tl, Point &dst_br)
 }\r
 \r
 #ifndef ANDROID\r
-Point SphericalWarperGpu::warp(const Mat &src, float focal, const Mat &R, Mat &dst,\r
-                                   int interp_mode, int border_mode)\r
+Point SphericalWarperGpu::warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,\r
+                               int interp_mode, int border_mode)\r
 {\r
     src_size_ = src.size();\r
-    projector_.size = src.size();\r
-    projector_.focal = focal;\r
-    projector_.setTransformation(R);\r
+    projector_.setCameraParams(K, R);\r
 \r
     Point dst_tl, dst_br;\r
     detectResultRoi(dst_tl, dst_br);\r
 \r
     gpu::buildWarpSphericalMaps(src.size(), Rect(dst_tl, Point(dst_br.x+1, dst_br.y+1)),\r
-                                R, focal, projector_.scale, d_xmap_, d_ymap_);\r
+                                K, R, projector_.scale, d_xmap_, d_ymap_);\r
 \r
     gpu::ensureSizeIsEnough(src.size(), src.type(), d_src_);\r
     d_src_.upload(src);\r
@@ -220,19 +204,17 @@ Point SphericalWarperGpu::warp(const Mat &src, float focal, const Mat &R, Mat &d
 }\r
 \r
 \r
-Point CylindricalWarperGpu::warp(const Mat &src, float focal, const Mat &R, Mat &dst,\r
-                                     int interp_mode, int border_mode)\r
+Point CylindricalWarperGpu::warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,\r
+                                 int interp_mode, int border_mode)\r
 {\r
     src_size_ = src.size();\r
-    projector_.size = src.size();\r
-    projector_.focal = focal;\r
-    projector_.setTransformation(R);\r
+    projector_.setCameraParams(K, R);\r
 \r
     Point dst_tl, dst_br;\r
     detectResultRoi(dst_tl, dst_br);\r
 \r
     gpu::buildWarpCylindricalMaps(src.size(), Rect(dst_tl, Point(dst_br.x+1, dst_br.y+1)),\r
-                                  R, focal, projector_.scale, d_xmap_, d_ymap_);\r
+                                  K, R, projector_.scale, d_xmap_, d_ymap_);\r
 \r
     gpu::ensureSizeIsEnough(src.size(), src.type(), d_src_);\r
     d_src_.upload(src);\r
index ab087c5..4908abf 100644 (file)
@@ -103,7 +103,7 @@ int parseCmdArgs(int argc, char** argv)
             printUsage();
             return -1;
         }
-        else if (string(argv[i]) == "--try_gpu")
+        else if (string(argv[i]) == "--try_use_gpu")
         {
             if (string(argv[i + 1]) == "no")
                 try_use_gpu = false;
index 7c907de..fc5653c 100644 (file)
@@ -42,6 +42,7 @@
 //M*/
 
 #include <fstream>
+#include <string>
 #include "opencv2/highgui/highgui.hpp"
 #include "opencv2/stitching/detail/autocalib.hpp"
 #include "opencv2/stitching/detail/blenders.hpp"
@@ -52,6 +53,7 @@
 #include "opencv2/stitching/detail/seam_finders.hpp"
 #include "opencv2/stitching/detail/util.hpp"
 #include "opencv2/stitching/detail/warpers.hpp"
+#include "opencv2/stitching/warpers.hpp"
 
 using namespace std;
 using namespace cv;
@@ -118,7 +120,7 @@ float conf_thresh = 1.f;
 bool wave_correct = true;
 bool save_graph = false;
 std::string save_graph_to;
-int warp_type = Warper::SPHERICAL;
+string warp_type = "spherical";
 int expos_comp_type = ExposureCompensator::GAIN_BLOCKS;
 float match_conf = 0.65f;
 int seam_find_type = SeamFinder::GC_COLOR;
@@ -223,17 +225,7 @@ int parseCmdArgs(int argc, char** argv)
         }
         else if (string(argv[i]) == "--warp")
         {
-            if (string(argv[i + 1]) == "plane")
-                warp_type = Warper::PLANE;
-            else if (string(argv[i + 1]) == "cylindrical")
-                warp_type = Warper::CYLINDRICAL;
-            else if (string(argv[i + 1]) == "spherical")
-                warp_type = Warper::SPHERICAL;
-            else
-            {
-                cout << "Bad warping method\n";
-                return -1;
-            }
+            warp_type = string(argv[i + 1]);
             i++;
         }
         else if (string(argv[i]) == "--expos_comp")
@@ -479,15 +471,42 @@ int main(int argc, char* argv[])
     }
 
     // Warp images and their masks
-    Ptr<Warper> warper = Warper::createByCameraFocal(static_cast<float>(warped_image_scale * seam_work_aspect),
-                                                     warp_type, try_gpu);
+
+    Ptr<WarperCreator> warper_creator;
+#ifndef ANDROID
+    if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
+    {
+        if (warp_type == "plane") warper_creator = new cv::PlaneWarper();
+        else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarper();
+        else if (warp_type == "spherical") warper_creator = new cv::SphericalWarper();
+    }
+    else
+#endif
+    {
+        if (warp_type == "plane") warper_creator = new cv::PlaneWarperGpu();
+        else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarperGpu();
+        else if (warp_type == "spherical") warper_creator = new cv::SphericalWarperGpu();
+    }
+
+    if (warper_creator.empty())
+    {
+        cout << "Can't create the following warper '" << warp_type << "'\n";
+        return 1;
+    }
+    
+    Ptr<Warper> warper = warper_creator->create(static_cast<float>(warped_image_scale * seam_work_aspect));
+
     for (int i = 0; i < num_images; ++i)
     {
-        corners[i] = warper->warp(images[i], static_cast<float>(cameras[i].focal * seam_work_aspect),
-                                  cameras[i].R, images_warped[i]);
+        Mat_<float> K;
+        cameras[i].K().convertTo(K, CV_32F);
+        K(0,0) *= seam_work_aspect; K(0,2) *= seam_work_aspect;
+        K(1,1) *= seam_work_aspect; K(1,2) *= seam_work_aspect;
+
+        corners[i] = warper->warp(images[i], K, cameras[i].R, images_warped[i]);
         sizes[i] = images_warped[i].size();
-        warper->warp(masks[i], static_cast<float>(cameras[i].focal * seam_work_aspect),
-                     cameras[i].R, masks_warped[i], INTER_NEAREST, BORDER_CONSTANT);
+
+        warper->warp(masks[i], K, cameras[i].R, masks_warped[i], INTER_NEAREST, BORDER_CONSTANT);
     }
 
     vector<Mat> images_warped_f(num_images);
@@ -535,23 +554,27 @@ int main(int argc, char* argv[])
 
             // Update warped image scale
             warped_image_scale *= static_cast<float>(compose_work_aspect);
-            warper = Warper::createByCameraFocal(warped_image_scale, warp_type, try_gpu);
+            warper = warper_creator->create(warped_image_scale);
 
             // Update corners and sizes
             for (int i = 0; i < num_images; ++i)
             {
-                // Update camera focal
+                // Update intrinsics
                 cameras[i].focal *= compose_work_aspect;
+                cameras[i].ppx *= compose_work_aspect;
+                cameras[i].ppy *= compose_work_aspect;
 
                 // Update corner and size
                 Size sz = full_img_sizes[i];
-                if (abs(compose_scale - 1) > 1e-1)
+                if (std::abs(compose_scale - 1) > 1e-1)
                 {
                     sz.width = cvRound(full_img_sizes[i].width * compose_scale);
                     sz.height = cvRound(full_img_sizes[i].height * compose_scale);
                 }
 
-                Rect roi = warper->warpRoi(sz, static_cast<float>(cameras[i].focal), cameras[i].R);
+                Mat K;
+                cameras[i].K().convertTo(K, CV_32F);
+                Rect roi = warper->warpRoi(sz, K, cameras[i].R);
                 corners[i] = roi.tl();
                 sizes[i] = roi.size();
             }
@@ -563,15 +586,16 @@ int main(int argc, char* argv[])
         full_img.release();
         Size img_size = img.size();
 
+        Mat K;
+        cameras[img_idx].K().convertTo(K, CV_32F);
+
         // Warp the current image
-        warper->warp(img, static_cast<float>(cameras[img_idx].focal), cameras[img_idx].R,
-                     img_warped);
+        warper->warp(img, K, cameras[img_idx].R, img_warped);
 
         // Warp the current image mask
         mask.create(img_size, CV_8U);
         mask.setTo(Scalar::all(255));
-        warper->warp(mask, static_cast<float>(cameras[img_idx].focal), cameras[img_idx].R, mask_warped,
-                     INTER_NEAREST, BORDER_CONSTANT);
+        warper->warp(mask, K, cameras[img_idx].R, mask_warped, INTER_NEAREST, BORDER_CONSTANT);
 
         // Compensate exposure
         compensator->apply(img_idx, corners[img_idx], img_warped, mask_warped);