CV_EXPORTS void warpPerspective(const GpuMat& src, GpuMat& dst, const Mat& M, Size dsize, int flags = INTER_LINEAR, Stream& stream = Stream::Null());\r
\r
//! builds plane warping maps\r
- CV_EXPORTS void buildWarpPlaneMaps(Size src_size, Rect dst_roi, const Mat& R, double f, double s, double dist,\r
+ CV_EXPORTS void buildWarpPlaneMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale,\r
GpuMat& map_x, GpuMat& map_y, Stream& stream = Stream::Null());\r
\r
//! builds cylindrical warping maps\r
- CV_EXPORTS void buildWarpCylindricalMaps(Size src_size, Rect dst_roi, const Mat& R, double f, double s,\r
+ CV_EXPORTS void buildWarpCylindricalMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale,\r
GpuMat& map_x, GpuMat& map_y, Stream& stream = Stream::Null());\r
\r
//! builds spherical warping maps\r
- CV_EXPORTS void buildWarpSphericalMaps(Size src_size, Rect dst_roi, const Mat& R, double f, double s,\r
+ CV_EXPORTS void buildWarpSphericalMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale,\r
GpuMat& map_x, GpuMat& map_y, Stream& stream = Stream::Null());\r
\r
//! rotate 8bit single or four channel image\r
\r
SIMPLE_TEST_CYCLE()\r
{\r
- buildWarpPlaneMaps(size, Rect(0, 0, size.width, size.height), Mat::ones(3, 3, CV_32FC1), 1.0, 1.0, 1.0, map_x, map_y);\r
+ buildWarpPlaneMaps(size, Rect(0, 0, size.width, size.height), Mat::eye(3, 3, CV_32FC1), \r
+ Mat::ones(3, 3, CV_32FC1), 1.0, map_x, map_y);\r
}\r
\r
Mat map_x_host(map_x);\r
\r
SIMPLE_TEST_CYCLE()\r
{\r
- buildWarpCylindricalMaps(size, Rect(0, 0, size.width, size.height), Mat::ones(3, 3, CV_32FC1), 1.0, 1.0, map_x, map_y);\r
+ buildWarpCylindricalMaps(size, Rect(0, 0, size.width, size.height), Mat::eye(3, 3, CV_32FC1),\r
+ Mat::ones(3, 3, CV_32FC1), 1.0, map_x, map_y);\r
}\r
\r
Mat map_x_host(map_x);\r
}\r
\r
PERF_TEST_P(DevInfo_Size, buildWarpSphericalMaps, testing::Combine(testing::ValuesIn(devices()),\r
- testing::Values(GPU_TYPICAL_MAT_SIZES)))\r
+ testing::Values(GPU_TYPICAL_MAT_SIZES)))\r
{\r
DeviceInfo devInfo = std::tr1::get<0>(GetParam());\r
Size size = std::tr1::get<1>(GetParam());\r
\r
SIMPLE_TEST_CYCLE()\r
{\r
- buildWarpSphericalMaps(size, Rect(0, 0, size.width, size.height), Mat::ones(3, 3, CV_32FC1), 1.0, 1.0, map_x, map_y);\r
+ buildWarpSphericalMaps(size, Rect(0, 0, size.width, size.height), Mat::eye(3, 3, CV_32FC1),\r
+ Mat::ones(3, 3, CV_32FC1), 1.0, map_x, map_y);\r
}\r
\r
Mat map_x_host(map_x);\r
//////////////////////////////////////////////////////////////////////////\r
// buildWarpMaps\r
\r
+ // TODO use intrinsics like __sinf and so on\r
+\r
namespace build_warp_maps\r
{\r
\r
- __constant__ float cr[9];\r
- __constant__ float crinv[9];\r
- __constant__ float cf, cs;\r
- __constant__ float chalf_w, chalf_h;\r
- __constant__ float cdist;\r
+ __constant__ float ck_rinv[9];\r
+ __constant__ float cr_kinv[9];\r
+ __constant__ float cscale;\r
}\r
\r
\r
{\r
using namespace build_warp_maps;\r
\r
- float x_ = u / cs;\r
- float y_ = v / cs;\r
+ float x_ = u / cscale;\r
+ float y_ = v / cscale;\r
\r
float z;\r
- x = crinv[0]*x_ + crinv[1]*y_ + crinv[2]*cdist;\r
- y = crinv[3]*x_ + crinv[4]*y_ + crinv[5]*cdist;\r
- z = crinv[6]*x_ + crinv[7]*y_ + crinv[8]*cdist;\r
+ x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2];\r
+ y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5];\r
+ z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8];\r
\r
- x = cf*x/z + chalf_w;\r
- y = cf*y/z + chalf_h;\r
+ x /= z;\r
+ y /= z;\r
}\r
};\r
\r
{\r
using namespace build_warp_maps;\r
\r
- u /= cs;\r
+ u /= cscale;\r
float x_ = sinf(u);\r
- float y_ = v / cs;\r
+ float y_ = v / cscale;\r
float z_ = cosf(u);\r
\r
float z;\r
- x = crinv[0]*x_ + crinv[1]*y_ + crinv[2]*z_;\r
- y = crinv[3]*x_ + crinv[4]*y_ + crinv[5]*z_;\r
- z = crinv[6]*x_ + crinv[7]*y_ + crinv[8]*z_;\r
+ x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * z_;\r
+ y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * z_;\r
+ z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * z_;\r
\r
- x = cf*x/z + chalf_w;\r
- y = cf*y/z + chalf_h;\r
+ x /= z;\r
+ y /= z;\r
}\r
};\r
\r
{\r
using namespace build_warp_maps;\r
\r
- v /= cs;\r
- u /= cs;\r
+ v /= cscale;\r
+ u /= cscale;\r
\r
float sinv = sinf(v);\r
float x_ = sinv * sinf(u);\r
float z_ = sinv * cosf(u);\r
\r
float z;\r
- x = crinv[0]*x_ + crinv[1]*y_ + crinv[2]*z_;\r
- y = crinv[3]*x_ + crinv[4]*y_ + crinv[5]*z_;\r
- z = crinv[6]*x_ + crinv[7]*y_ + crinv[8]*z_;\r
+ x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * z_;\r
+ y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * z_;\r
+ z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * z_;\r
\r
- x = cf*x/z + chalf_w;\r
- y = cf*y/z + chalf_h;\r
+ x /= z;\r
+ y /= z;\r
}\r
};\r
\r
\r
\r
void buildWarpPlaneMaps(int tl_u, int tl_v, DevMem2Df map_x, DevMem2Df map_y,\r
- const float r[9], const float rinv[9], float f, float s, float dist,\r
- float half_w, float half_h, cudaStream_t stream)\r
+ const float k_rinv[9], const float r_kinv[9], float scale,\r
+ cudaStream_t stream)\r
{\r
- cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr, r, 9*sizeof(float)));\r
- cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::crinv, rinv, 9*sizeof(float)));\r
- cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cf, &f, sizeof(float)));\r
- cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cs, &s, sizeof(float)));\r
- cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::chalf_w, &half_w, sizeof(float)));\r
- cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::chalf_h, &half_h, sizeof(float)));\r
- cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cdist, &dist, sizeof(float)));\r
+ cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float)));\r
+ cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float)));\r
+ cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float)));\r
\r
int cols = map_x.cols;\r
int rows = map_x.rows;\r
\r
\r
void buildWarpCylindricalMaps(int tl_u, int tl_v, DevMem2Df map_x, DevMem2Df map_y,\r
- const float r[9], const float rinv[9], float f, float s,\r
- float half_w, float half_h, cudaStream_t stream)\r
+ const float k_rinv[9], const float r_kinv[9], float scale,\r
+ cudaStream_t stream)\r
{\r
- cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr, r, 9*sizeof(float)));\r
- cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::crinv, rinv, 9*sizeof(float)));\r
- cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cf, &f, sizeof(float)));\r
- cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cs, &s, sizeof(float)));\r
- cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::chalf_w, &half_w, sizeof(float)));\r
- cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::chalf_h, &half_h, sizeof(float)));\r
+ cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float)));\r
+ cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float)));\r
+ cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float)));\r
\r
int cols = map_x.cols;\r
int rows = map_x.rows;\r
\r
\r
void buildWarpSphericalMaps(int tl_u, int tl_v, DevMem2Df map_x, DevMem2Df map_y,\r
- const float r[9], const float rinv[9], float f, float s,\r
- float half_w, float half_h, cudaStream_t stream)\r
- {\r
- cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr, r, 9*sizeof(float)));\r
- cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::crinv, rinv, 9*sizeof(float)));\r
- cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cf, &f, sizeof(float)));\r
- cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cs, &s, sizeof(float)));\r
- cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::chalf_w, &half_w, sizeof(float)));\r
- cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::chalf_h, &half_h, sizeof(float)));\r
+ const float k_rinv[9], const float r_kinv[9], float scale,\r
+ cudaStream_t stream)\r
+ {\r
+ cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float)));\r
+ cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float)));\r
+ cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float)));\r
\r
int cols = map_x.cols;\r
int rows = map_x.rows;\r
void cv::gpu::copyMakeBorder(const GpuMat&, GpuMat&, int, int, int, int, const Scalar&, Stream&) { throw_nogpu(); }\r
void cv::gpu::warpAffine(const GpuMat&, GpuMat&, const Mat&, Size, int, Stream&) { throw_nogpu(); }\r
void cv::gpu::warpPerspective(const GpuMat&, GpuMat&, const Mat&, Size, int, Stream&) { throw_nogpu(); }\r
-void cv::gpu::buildWarpPlaneMaps(Size, Rect, const Mat&, double, double, double, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }\r
-void cv::gpu::buildWarpCylindricalMaps(Size, Rect, const Mat&, double, double, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }\r
-void cv::gpu::buildWarpSphericalMaps(Size, Rect, const Mat&, double, double, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }\r
+void cv::gpu::buildWarpPlaneMaps(Size, Rect, const Mat&, const Mat&, float, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }\r
+void cv::gpu::buildWarpCylindricalMaps(Size, Rect, const Mat&, const Mat&, float, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }\r
+void cv::gpu::buildWarpSphericalMaps(Size, Rect, const Mat&, const Mat&, float, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }\r
void cv::gpu::rotate(const GpuMat&, GpuMat&, Size, double, double, double, int, Stream&) { throw_nogpu(); }\r
void cv::gpu::integral(const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }\r
void cv::gpu::integralBuffered(const GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }\r
namespace cv { namespace gpu { namespace imgproc\r
{\r
void buildWarpPlaneMaps(int tl_u, int tl_v, DevMem2Df map_x, DevMem2Df map_y,\r
- const float r[9], const float rinv[9], float f, float s, float dist,\r
- float half_w, float half_h, cudaStream_t stream);\r
+ const float k_rinv[9], const float r_kinv[9], float scale,\r
+ cudaStream_t stream);\r
}}}\r
\r
-void cv::gpu::buildWarpPlaneMaps(Size src_size, Rect dst_roi, const Mat& R, double f, double s,\r
- double dist, GpuMat& map_x, GpuMat& map_y, Stream& stream)\r
+void cv::gpu::buildWarpPlaneMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale,\r
+ GpuMat& map_x, GpuMat& map_y, Stream& stream)\r
{\r
- CV_Assert(R.size() == Size(3,3) && R.isContinuous() && R.type() == CV_32F);\r
- Mat Rinv = R.inv();\r
- CV_Assert(Rinv.isContinuous());\r
+ CV_Assert(K.size() == Size(3,3) && K.type() == CV_32F);\r
+ CV_Assert(R.size() == Size(3,3) && R.type() == CV_32F);\r
+\r
+ Mat K_Rinv = K * R.t();\r
+ Mat R_Kinv = R * K.inv();\r
+ CV_Assert(K_Rinv.isContinuous());\r
+ CV_Assert(R_Kinv.isContinuous());\r
\r
map_x.create(dst_roi.size(), CV_32F);\r
map_y.create(dst_roi.size(), CV_32F);\r
- imgproc::buildWarpPlaneMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, R.ptr<float>(), Rinv.ptr<float>(),\r
- static_cast<float>(f), static_cast<float>(s), static_cast<float>(dist), \r
- 0.5f*src_size.width, 0.5f*src_size.height, StreamAccessor::getStream(stream));\r
+ imgproc::buildWarpPlaneMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, K_Rinv.ptr<float>(), R_Kinv.ptr<float>(),\r
+ scale, StreamAccessor::getStream(stream));\r
}\r
\r
//////////////////////////////////////////////////////////////////////////////\r
namespace cv { namespace gpu { namespace imgproc\r
{\r
void buildWarpCylindricalMaps(int tl_u, int tl_v, DevMem2Df map_x, DevMem2Df map_y,\r
- const float r[9], const float rinv[9], float f, float s,\r
- float half_w, float half_h, cudaStream_t stream);\r
+ const float k_rinv[9], const float r_kinv[9], float scale,\r
+ cudaStream_t stream);\r
}}}\r
\r
-void cv::gpu::buildWarpCylindricalMaps(Size src_size, Rect dst_roi, const Mat& R, double f, double s,\r
+void cv::gpu::buildWarpCylindricalMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale,\r
GpuMat& map_x, GpuMat& map_y, Stream& stream)\r
{\r
- CV_Assert(R.size() == Size(3,3) && R.isContinuous() && R.type() == CV_32F);\r
- Mat Rinv = R.inv();\r
- CV_Assert(Rinv.isContinuous());\r
+ CV_Assert(K.size() == Size(3,3) && K.type() == CV_32F);\r
+ CV_Assert(R.size() == Size(3,3) && R.type() == CV_32F);\r
+\r
+ Mat K_Rinv = K * R.t();\r
+ Mat R_Kinv = R * K.inv();\r
+ CV_Assert(K_Rinv.isContinuous());\r
+ CV_Assert(R_Kinv.isContinuous());\r
\r
map_x.create(dst_roi.size(), CV_32F);\r
map_y.create(dst_roi.size(), CV_32F);\r
- imgproc::buildWarpCylindricalMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, R.ptr<float>(), Rinv.ptr<float>(),\r
- static_cast<float>(f), static_cast<float>(s), 0.5f*src_size.width, 0.5f*src_size.height, \r
- StreamAccessor::getStream(stream));\r
+ imgproc::buildWarpCylindricalMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, K_Rinv.ptr<float>(), R_Kinv.ptr<float>(),\r
+ scale, StreamAccessor::getStream(stream));\r
}\r
\r
\r
namespace cv { namespace gpu { namespace imgproc\r
{\r
void buildWarpSphericalMaps(int tl_u, int tl_v, DevMem2Df map_x, DevMem2Df map_y,\r
- const float r[9], const float rinv[9], float f, float s,\r
- float half_w, float half_h, cudaStream_t stream);\r
+ const float k_rinv[9], const float r_kinv[9], float scale,\r
+ cudaStream_t stream);\r
}}}\r
\r
-void cv::gpu::buildWarpSphericalMaps(Size src_size, Rect dst_roi, const Mat& R, double f, double s,\r
+void cv::gpu::buildWarpSphericalMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale,\r
GpuMat& map_x, GpuMat& map_y, Stream& stream)\r
{\r
- CV_Assert(R.size() == Size(3,3) && R.isContinuous() && R.type() == CV_32F);\r
- Mat Rinv = R.inv();\r
- CV_Assert(Rinv.isContinuous());\r
+ CV_Assert(K.size() == Size(3,3) && K.type() == CV_32F);\r
+ CV_Assert(R.size() == Size(3,3) && R.type() == CV_32F);\r
+\r
+ Mat K_Rinv = K * R.t();\r
+ Mat R_Kinv = R * K.inv();\r
+ CV_Assert(K_Rinv.isContinuous());\r
+ CV_Assert(R_Kinv.isContinuous());\r
\r
map_x.create(dst_roi.size(), CV_32F);\r
map_y.create(dst_roi.size(), CV_32F);\r
- imgproc::buildWarpSphericalMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, R.ptr<float>(), Rinv.ptr<float>(),\r
- static_cast<float>(f), static_cast<float>(s), 0.5f*src_size.width, 0.5f*src_size.height, \r
- StreamAccessor::getStream(stream));\r
+ imgproc::buildWarpSphericalMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, K_Rinv.ptr<float>(), R_Kinv.ptr<float>(),\r
+ scale, StreamAccessor::getStream(stream));\r
}\r
\r
////////////////////////////////////////////////////////////////////////\r
CameraParams();
CameraParams(const CameraParams& other);
const CameraParams& operator =(const CameraParams& other);
+ Mat K() const;
double focal; // Focal length
+ double aspect; // Aspect ratio
+ double ppx; // Principal point X
+ double ppy; // Principal point Y
Mat R; // Rotation
Mat t; // Translation
};
\r
void operator ()(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches, \r
std::vector<CameraParams> &cameras)\r
- {\r
- estimate(features, pairwise_matches, cameras);\r
- }\r
+ { estimate(features, pairwise_matches, cameras); }\r
\r
protected:\r
virtual void estimate(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches, \r
BundleAdjuster(int cost_space = FOCAL_RAY_SPACE, float conf_thresh = 1.f) \r
: cost_space_(cost_space), conf_thresh_(conf_thresh) {}\r
\r
+ Mat K;\r
+\r
private:\r
void estimate(const std::vector<ImageFeatures> &features, const std::vector<MatchesInfo> &pairwise_matches, \r
std::vector<CameraParams> &cameras);\r
class CV_EXPORTS Warper\r
{\r
public:\r
- enum { PLANE, CYLINDRICAL, SPHERICAL };\r
-\r
- // TODO remove this method\r
- static Ptr<Warper> createByCameraFocal(float focal, int type, bool try_gpu = false);\r
-\r
virtual ~Warper() {}\r
- virtual Point warp(const Mat &src, float focal, const Mat& R, Mat &dst,\r
+ virtual Point warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,\r
int interp_mode = INTER_LINEAR, int border_mode = BORDER_REFLECT) = 0;\r
- virtual Rect warpRoi(const Size &sz, float focal, const Mat &R) = 0;\r
+ virtual Rect warpRoi(const Size &sz, const Mat &K, const Mat &R) = 0;\r
};\r
\r
\r
struct CV_EXPORTS ProjectorBase\r
{\r
- void setTransformation(const Mat& R);\r
+ void setCameraParams(const Mat &K, const Mat &R);\r
\r
- Size size;\r
- float focal;\r
- float r[9];\r
- float rinv[9];\r
float scale;\r
+ float k[9];\r
+ float rinv[9];\r
+ float r_kinv[9];\r
+ float k_rinv[9];\r
};\r
\r
\r
class CV_EXPORTS WarperBase : public Warper\r
{ \r
public:\r
- virtual Point warp(const Mat &src, float focal, const Mat &R, Mat &dst,\r
+ virtual Point warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,\r
int interp_mode, int border_mode);\r
\r
- virtual Rect warpRoi(const Size &sz, float focal, const Mat &R);\r
+ virtual Rect warpRoi(const Size &sz, const Mat &K, const Mat &R);\r
\r
protected:\r
// Detects ROI of the destination image. It's correct for any projection.\r
{\r
void mapForward(float x, float y, float &u, float &v);\r
void mapBackward(float u, float v, float &x, float &y);\r
- float plane_dist;\r
};\r
\r
\r
class CV_EXPORTS PlaneWarper : public WarperBase<PlaneProjector>\r
{\r
public:\r
- PlaneWarper(float plane_dist = 1.f, float scale = 1.f)\r
- {\r
- projector_.plane_dist = plane_dist;\r
- projector_.scale = scale;\r
- }\r
+ PlaneWarper(float scale = 1.f) { projector_.scale = scale; }\r
\r
protected:\r
void detectResultRoi(Point &dst_tl, Point &dst_br);\r
class CV_EXPORTS PlaneWarperGpu : public PlaneWarper\r
{\r
public:\r
- PlaneWarperGpu(float plane_dist = 1.f, float scale = 1.f) : PlaneWarper(plane_dist, scale) {}\r
- Point warp(const Mat &src, float focal, const Mat &R, Mat &dst,\r
+ PlaneWarperGpu(float scale = 1.f) : PlaneWarper(scale) {}\r
+ Point warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,\r
int interp_mode, int border_mode);\r
\r
private:\r
class CV_EXPORTS SphericalWarper : public WarperBase<SphericalProjector>\r
{\r
public:\r
- SphericalWarper(float scale = 300.f) { projector_.scale = scale; }\r
+ SphericalWarper(float scale) { projector_.scale = scale; }\r
\r
protected:\r
void detectResultRoi(Point &dst_tl, Point &dst_br);\r
class CV_EXPORTS SphericalWarperGpu : public SphericalWarper\r
{\r
public:\r
- SphericalWarperGpu(float scale = 300.f) : SphericalWarper(scale) {}\r
- Point warp(const Mat &src, float focal, const Mat &R, Mat &dst,\r
- int interp_mode, int border_mode);\r
+ SphericalWarperGpu(float scale) : SphericalWarper(scale) {}\r
+ Point warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,\r
+ int interp_mode, int border_mode);\r
\r
private:\r
gpu::GpuMat d_xmap_, d_ymap_, d_dst_, d_src_;\r
class CV_EXPORTS CylindricalWarper : public WarperBase<CylindricalProjector>\r
{\r
public:\r
- CylindricalWarper(float scale = 300.f) { projector_.scale = scale; }\r
+ CylindricalWarper(float scale) { projector_.scale = scale; }\r
\r
protected:\r
void detectResultRoi(Point &dst_tl, Point &dst_br)\r
- {\r
- WarperBase<CylindricalProjector>::detectResultRoiByBorder(dst_tl, dst_br);\r
- }\r
+ { WarperBase<CylindricalProjector>::detectResultRoiByBorder(dst_tl, dst_br); }\r
};\r
\r
\r
class CV_EXPORTS CylindricalWarperGpu : public CylindricalWarper\r
{\r
public:\r
- CylindricalWarperGpu(float scale = 300.f) : CylindricalWarper(scale) {}\r
- Point warp(const Mat &src, float focal, const Mat &R, Mat &dst,\r
- int interp_mode, int border_mode);\r
+ CylindricalWarperGpu(float scale) : CylindricalWarper(scale) {}\r
+ Point warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,\r
+ int interp_mode, int border_mode);\r
\r
private:\r
gpu::GpuMat d_xmap_, d_ymap_, d_dst_, d_src_;\r
namespace detail {\r
\r
template <class P>\r
-Point WarperBase<P>::warp(const Mat &src, float focal, const Mat &R, Mat &dst,\r
+Point WarperBase<P>::warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,\r
int interp_mode, int border_mode)\r
{\r
src_size_ = src.size();\r
-\r
- projector_.size = src.size();\r
- projector_.focal = focal;\r
- projector_.setTransformation(R);\r
+ projector_.setCameraParams(K, R);\r
\r
Point dst_tl, dst_br;\r
detectResultRoi(dst_tl, dst_br);\r
\r
\r
template <class P>\r
-Rect WarperBase<P>::warpRoi(const Size &sz, float focal, const Mat &R)\r
+Rect WarperBase<P>::warpRoi(const Size &sz, const Mat &K, const Mat &R)\r
{\r
src_size_ = sz;\r
-\r
- projector_.size = sz;\r
- projector_.focal = focal;\r
- projector_.setTransformation(R);\r
+ projector_.setCameraParams(K, R);\r
\r
Point dst_tl, dst_br;\r
detectResultRoi(dst_tl, dst_br);\r
inline\r
void PlaneProjector::mapForward(float x, float y, float &u, float &v)\r
{\r
- x -= size.width * 0.5f;\r
- y -= size.height * 0.5f;\r
+ float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];\r
+ float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];\r
+ float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];\r
\r
- float x_ = r[0] * x + r[1] * y + r[2] * focal;\r
- float y_ = r[3] * x + r[4] * y + r[5] * focal;\r
- float z_ = r[6] * x + r[7] * y + r[8] * focal;\r
-\r
- u = scale * x_ / z_ * plane_dist;\r
- v = scale * y_ / z_ * plane_dist;\r
+ u = scale * x_ / z_;\r
+ v = scale * y_ / z_;\r
}\r
\r
\r
inline\r
void PlaneProjector::mapBackward(float u, float v, float &x, float &y)\r
{\r
- float x_ = u / scale;\r
- float y_ = v / scale;\r
+ u /= scale;\r
+ v /= scale;\r
\r
float z;\r
- x = rinv[0] * x_ + rinv[1] * y_ + rinv[2] * plane_dist;\r
- y = rinv[3] * x_ + rinv[4] * y_ + rinv[5] * plane_dist;\r
- z = rinv[6] * x_ + rinv[7] * y_ + rinv[8] * plane_dist;\r
+ x = k_rinv[0] * u + k_rinv[1] * v + k_rinv[2];\r
+ y = k_rinv[3] * u + k_rinv[4] * v + k_rinv[5];\r
+ z = k_rinv[6] * u + k_rinv[7] * v + k_rinv[8];\r
\r
- x = focal * x / z + size.width * 0.5f;\r
- y = focal * y / z + size.height * 0.5f;\r
+ x /= z;\r
+ y /= z;\r
}\r
\r
\r
inline\r
void SphericalProjector::mapForward(float x, float y, float &u, float &v)\r
-{\r
- x -= size.width * 0.5f;\r
- y -= size.height * 0.5f;\r
-\r
- float x_ = r[0] * x + r[1] * y + r[2] * focal;\r
- float y_ = r[3] * x + r[4] * y + r[5] * focal;\r
- float z_ = r[6] * x + r[7] * y + r[8] * focal;\r
+{ \r
+ float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];\r
+ float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];\r
+ float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];\r
\r
u = scale * atan2f(x_, z_);\r
v = scale * (static_cast<float>(CV_PI) - acosf(y_ / sqrtf(x_ * x_ + y_ * y_ + z_ * z_)));\r
inline\r
void SphericalProjector::mapBackward(float u, float v, float &x, float &y)\r
{\r
- float sinv = sinf(static_cast<float>(CV_PI) - v / scale);\r
- float x_ = sinv * sinf(u / scale);\r
- float y_ = cosf(static_cast<float>(CV_PI) - v / scale);\r
- float z_ = sinv * cosf(u / scale);\r
+ u /= scale;\r
+ v /= scale;\r
+\r
+ float sinv = sinf(static_cast<float>(CV_PI) - v);\r
+ float x_ = sinv * sinf(u);\r
+ float y_ = cosf(static_cast<float>(CV_PI) - v);\r
+ float z_ = sinv * cosf(u);\r
\r
float z;\r
- x = rinv[0] * x_ + rinv[1] * y_ + rinv[2] * z_;\r
- y = rinv[3] * x_ + rinv[4] * y_ + rinv[5] * z_;\r
- z = rinv[6] * x_ + rinv[7] * y_ + rinv[8] * z_;\r
+ x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;\r
+ y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;\r
+ z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;\r
\r
- x = focal * x / z + size.width * 0.5f;\r
- y = focal * y / z + size.height * 0.5f;\r
+ x /= z;\r
+ y /= z;\r
}\r
\r
\r
inline\r
void CylindricalProjector::mapForward(float x, float y, float &u, float &v)\r
{\r
- x -= size.width * 0.5f;\r
- y -= size.height * 0.5f;\r
-\r
- float x_ = r[0] * x + r[1] * y + r[2] * focal;\r
- float y_ = r[3] * x + r[4] * y + r[5] * focal;\r
- float z_ = r[6] * x + r[7] * y + r[8] * focal;\r
+ float x_ = r_kinv[0] * x + r_kinv[1] * y + r_kinv[2];\r
+ float y_ = r_kinv[3] * x + r_kinv[4] * y + r_kinv[5];\r
+ float z_ = r_kinv[6] * x + r_kinv[7] * y + r_kinv[8];\r
\r
u = scale * atan2f(x_, z_);\r
v = scale * y_ / sqrtf(x_ * x_ + z_ * z_);\r
inline\r
void CylindricalProjector::mapBackward(float u, float v, float &x, float &y)\r
{\r
- float x_ = sinf(u / scale);\r
- float y_ = v / scale;\r
- float z_ = cosf(u / scale);\r
+ u /= scale;\r
+ v /= scale;\r
+\r
+ float x_ = sinf(u);\r
+ float y_ = v;\r
+ float z_ = cosf(u);\r
\r
float z;\r
- x = rinv[0] * x_ + rinv[1] * y_ + rinv[2] * z_;\r
- y = rinv[3] * x_ + rinv[4] * y_ + rinv[5] * z_;\r
- z = rinv[6] * x_ + rinv[7] * y_ + rinv[8] * z_;\r
+ x = k_rinv[0] * x_ + k_rinv[1] * y_ + k_rinv[2] * z_;\r
+ y = k_rinv[3] * x_ + k_rinv[4] * y_ + k_rinv[5] * z_;\r
+ z = k_rinv[6] * x_ + k_rinv[7] * y_ + k_rinv[8] * z_;\r
\r
- x = focal * x / z + size.width * 0.5f;\r
- y = focal * y / z + size.height * 0.5f;\r
+ x /= z;\r
+ y /= z;\r
}\r
\r
} // namespace detail\r
{
public:
virtual ~WarperCreator() {}
- virtual Ptr<detail::Warper> createByFocalLength(double f) const = 0;
+ virtual Ptr<detail::Warper> create(float scale) const = 0;
};
class PlaneWarper : public WarperCreator
{
public:
- Ptr<detail::Warper> createByFocalLength(double f) const { return new detail::PlaneWarper(f); }
+ Ptr<detail::Warper> create(float scale) const { return new detail::PlaneWarper(scale); }
};
class CylindricalWarper: public WarperCreator
{
public:
- Ptr<detail::Warper> createByFocalLength(double f) const { return new detail::CylindricalWarper(f); }
+ Ptr<detail::Warper> create(float scale) const { return new detail::CylindricalWarper(scale); }
};
class SphericalWarper: public WarperCreator
{
public:
- Ptr<detail::Warper> createByFocalLength(double f) const { return new detail::SphericalWarper(f); }
+ Ptr<detail::Warper> create(float scale) const { return new detail::SphericalWarper(scale); }
};
class PlaneWarperGpu: public WarperCreator
{
public:
- Ptr<detail::Warper> createByFocalLength(double f) const { return new detail::PlaneWarperGpu(f); }
+ Ptr<detail::Warper> create(float scale) const { return new detail::PlaneWarperGpu(scale); }
};
class CylindricalWarperGpu: public WarperCreator
{
public:
- Ptr<detail::Warper> createByFocalLength(double f) const { return new detail::CylindricalWarperGpu(f); }
+ Ptr<detail::Warper> create(float scale) const { return new detail::CylindricalWarperGpu(scale); }
};
class SphericalWarperGpu: public WarperCreator
{
public:
- Ptr<detail::Warper> createByFocalLength(double f) const { return new detail::SphericalWarperGpu(f); }
+ Ptr<detail::Warper> create(float scale) const { return new detail::SphericalWarperGpu(scale); }
};
#endif
namespace cv {
namespace detail {
-CameraParams::CameraParams() : focal(1), R(Mat::eye(3, 3, CV_64F)), t(Mat::zeros(3, 1, CV_64F)) {}
+CameraParams::CameraParams() : focal(1), aspect(1), ppx(0), ppy(0),
+ R(Mat::eye(3, 3, CV_64F)), t(Mat::zeros(3, 1, CV_64F)) {}
CameraParams::CameraParams(const CameraParams &other) { *this = other; }
const CameraParams& CameraParams::operator =(const CameraParams &other)
{
focal = other.focal;
+ ppx = other.ppx;
+ ppy = other.ppy;
+ aspect = other.aspect;
R = other.R.clone();
t = other.t.clone();
return *this;
}
+Mat CameraParams::K() const
+{
+ Mat_<double> k = Mat::eye(3, 3, CV_64F);
+ k(0,0) = focal; k(0,2) = ppx;
+ k(1,1) = focal * aspect; k(1,2) = ppy;
+ return k;
+}
+
} // namespace detail
} // namespace cv
findMaxSpanningTree(num_images, pairwise_matches, span_tree, span_tree_centers);\r
span_tree.walkBreadthFirst(span_tree_centers[0], CalcRotation(num_images, pairwise_matches, cameras));\r
\r
+ // As calculations were performed under assumption that p.p. is in image center\r
+ for (int i = 0; i < num_images; ++i)\r
+ {\r
+ cameras[i].ppx = 0.5 * features[i].img_size.width;\r
+ cameras[i].ppy = 0.5 * features[i].img_size.height;\r
+ }\r
+\r
LOGLN("Estimating rotations, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");\r
}\r
\r
pairwise_matches_ = &pairwise_matches[0];\r
\r
// Prepare focals and rotations\r
- cameras_.create(num_images_ * 4, 1, CV_64F);\r
+ cameras_.create(num_images_ * 7, 1, CV_64F);\r
SVD svd;\r
for (int i = 0; i < num_images_; ++i)\r
{\r
- cameras_.at<double>(i * 4, 0) = cameras[i].focal;\r
+ cameras_.at<double>(i * 7, 0) = cameras[i].focal;\r
+ cameras_.at<double>(i * 7 + 1, 0) = cameras[i].ppx;\r
+ cameras_.at<double>(i * 7 + 2, 0) = cameras[i].ppy;\r
+ cameras_.at<double>(i * 7 + 3, 0) = cameras[i].aspect;\r
\r
svd(cameras[i].R, SVD::FULL_UV);\r
Mat R = svd.u * svd.vt;\r
\r
Mat rvec;\r
Rodrigues(R, rvec); CV_Assert(rvec.type() == CV_32F);\r
- cameras_.at<double>(i * 4 + 1, 0) = rvec.at<float>(0, 0);\r
- cameras_.at<double>(i * 4 + 2, 0) = rvec.at<float>(1, 0);\r
- cameras_.at<double>(i * 4 + 3, 0) = rvec.at<float>(2, 0);\r
+ cameras_.at<double>(i * 7 + 4, 0) = rvec.at<float>(0, 0);\r
+ cameras_.at<double>(i * 7 + 5, 0) = rvec.at<float>(1, 0);\r
+ cameras_.at<double>(i * 7 + 6, 0) = rvec.at<float>(2, 0);\r
}\r
\r
// Select only consistent image pairs for futher adjustment\r
for (size_t i = 0; i < edges_.size(); ++i)\r
total_num_matches_ += static_cast<int>(pairwise_matches[edges_[i].first * num_images_ + edges_[i].second].num_inliers);\r
\r
- CvLevMarq solver(num_images_ * 4, total_num_matches_ * 3,\r
+ CvLevMarq solver(num_images_ * 7, total_num_matches_ * 2,\r
cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 1000, DBL_EPSILON));\r
\r
CvMat matParams = cameras_;\r
}\r
}\r
LOGLN("");\r
- LOGLN("Bundle adjustment, final error: " << sqrt(err_.dot(err_)));\r
+ LOGLN("Bundle adjustment, final error: " << sqrt(err_.dot(err_)) / total_num_matches_);\r
LOGLN("Bundle adjustment, iterations done: " << count);\r
\r
// Obtain global motion\r
for (int i = 0; i < num_images_; ++i)\r
{\r
- cameras[i].focal = cameras_.at<double>(i * 4, 0);\r
+ cameras[i].focal = cameras_.at<double>(i * 7, 0);\r
+ cameras[i].ppx = cameras_.at<double>(i * 7 + 1, 0);\r
+ cameras[i].ppy = cameras_.at<double>(i * 7 + 2, 0);\r
+ cameras[i].aspect = cameras_.at<double>(i * 7 + 3, 0);\r
Mat rvec(3, 1, CV_64F);\r
- rvec.at<double>(0, 0) = cameras_.at<double>(i * 4 + 1, 0);\r
- rvec.at<double>(1, 0) = cameras_.at<double>(i * 4 + 2, 0);\r
- rvec.at<double>(2, 0) = cameras_.at<double>(i * 4 + 3, 0);\r
+ rvec.at<double>(0, 0) = cameras_.at<double>(i * 7 + 4, 0);\r
+ rvec.at<double>(1, 0) = cameras_.at<double>(i * 7 + 5, 0);\r
+ rvec.at<double>(2, 0) = cameras_.at<double>(i * 7 + 6, 0);\r
Rodrigues(rvec, cameras[i].R);\r
Mat Mf;\r
cameras[i].R.convertTo(Mf, CV_32F);\r
\r
void BundleAdjuster::calcError(Mat &err)\r
{\r
- err.create(total_num_matches_ * 3, 1, CV_64F);\r
+ err.create(total_num_matches_ * 2, 1, CV_64F);\r
\r
int match_idx = 0;\r
for (size_t edge_idx = 0; edge_idx < edges_.size(); ++edge_idx)\r
{\r
int i = edges_[edge_idx].first;\r
int j = edges_[edge_idx].second;\r
- double f1 = cameras_.at<double>(i * 4, 0);\r
- double f2 = cameras_.at<double>(j * 4, 0);\r
- double R1[9], R2[9];\r
- Mat R1_(3, 3, CV_64F, R1), R2_(3, 3, CV_64F, R2);\r
+ double f1 = cameras_.at<double>(i * 7, 0);\r
+ double f2 = cameras_.at<double>(j * 7, 0);\r
+ double ppx1 = cameras_.at<double>(i * 7 + 1, 0);\r
+ double ppx2 = cameras_.at<double>(j * 7 + 1, 0);\r
+ double ppy1 = cameras_.at<double>(i * 7 + 2, 0);\r
+ double ppy2 = cameras_.at<double>(j * 7 + 2, 0);\r
+ double a1 = cameras_.at<double>(i * 7 + 3, 0);\r
+ double a2 = cameras_.at<double>(j * 7 + 3, 0);\r
+\r
+ double R1[9];\r
+ Mat R1_(3, 3, CV_64F, R1);\r
Mat rvec(3, 1, CV_64F);\r
- rvec.at<double>(0, 0) = cameras_.at<double>(i * 4 + 1, 0);\r
- rvec.at<double>(1, 0) = cameras_.at<double>(i * 4 + 2, 0);\r
- rvec.at<double>(2, 0) = cameras_.at<double>(i * 4 + 3, 0);\r
- Rodrigues(rvec, R1_); CV_Assert(R1_.type() == CV_64F);\r
- rvec.at<double>(0, 0) = cameras_.at<double>(j * 4 + 1, 0);\r
- rvec.at<double>(1, 0) = cameras_.at<double>(j * 4 + 2, 0);\r
- rvec.at<double>(2, 0) = cameras_.at<double>(j * 4 + 3, 0);\r
- Rodrigues(rvec, R2_); CV_Assert(R2_.type() == CV_64F);\r
+ rvec.at<double>(0, 0) = cameras_.at<double>(i * 7 + 4, 0);\r
+ rvec.at<double>(1, 0) = cameras_.at<double>(i * 7 + 5, 0);\r
+ rvec.at<double>(2, 0) = cameras_.at<double>(i * 7 + 6, 0);\r
+ Rodrigues(rvec, R1_);\r
+\r
+ double R2[9];\r
+ Mat R2_(3, 3, CV_64F, R2);\r
+ rvec.at<double>(0, 0) = cameras_.at<double>(j * 7 + 4, 0);\r
+ rvec.at<double>(1, 0) = cameras_.at<double>(j * 7 + 5, 0);\r
+ rvec.at<double>(2, 0) = cameras_.at<double>(j * 7 + 6, 0);\r
+ Rodrigues(rvec, R2_);\r
\r
const ImageFeatures& features1 = features_[i];\r
const ImageFeatures& features2 = features_[j];\r
const MatchesInfo& matches_info = pairwise_matches_[i * num_images_ + j];\r
\r
+ Mat_<double> K1 = Mat::eye(3, 3, CV_64F);\r
+ K1(0,0) = f1; K1(0,2) = ppx1;\r
+ K1(1,1) = f1*a1; K1(1,2) = ppy1;\r
+\r
+ Mat_<double> K2 = Mat::eye(3, 3, CV_64F);\r
+ K2(0,0) = f2; K2(0,2) = ppx2;\r
+ K2(1,1) = f2*a2; K2(1,2) = ppy2;\r
+\r
+ Mat_<double> H = K2 * R2_.inv() * R1_ * K1.inv();\r
+\r
for (size_t k = 0; k < matches_info.matches.size(); ++k)\r
{\r
if (!matches_info.inliers_mask[k])\r
continue;\r
-\r
const DMatch& m = matches_info.matches[k];\r
\r
- Point2d kp1 = features1.keypoints[m.queryIdx].pt;\r
- kp1.x -= 0.5 * features1.img_size.width;\r
- kp1.y -= 0.5 * features1.img_size.height;\r
- Point2d kp2 = features2.keypoints[m.trainIdx].pt;\r
- kp2.x -= 0.5 * features2.img_size.width;\r
- kp2.y -= 0.5 * features2.img_size.height;\r
- double len1 = sqrt(kp1.x * kp1.x + kp1.y * kp1.y + f1 * f1);\r
- double len2 = sqrt(kp2.x * kp2.x + kp2.y * kp2.y + f2 * f2);\r
- Point3d p1(kp1.x / len1, kp1.y / len1, f1 / len1);\r
- Point3d p2(kp2.x / len2, kp2.y / len2, f2 / len2);\r
-\r
- Point3d d1(p1.x * R1[0] + p1.y * R1[1] + p1.z * R1[2],\r
- p1.x * R1[3] + p1.y * R1[4] + p1.z * R1[5],\r
- p1.x * R1[6] + p1.y * R1[7] + p1.z * R1[8]);\r
- Point3d d2(p2.x * R2[0] + p2.y * R2[1] + p2.z * R2[2],\r
- p2.x * R2[3] + p2.y * R2[4] + p2.z * R2[5],\r
- p2.x * R2[6] + p2.y * R2[7] + p2.z * R2[8]);\r
-\r
- double mult = 1; // For cost_space_ == RAY_SPACE\r
- if (cost_space_ == FOCAL_RAY_SPACE)\r
- mult = sqrt(f1 * f2);\r
- err.at<double>(3 * match_idx, 0) = mult * (d1.x - d2.x);\r
- err.at<double>(3 * match_idx + 1, 0) = mult * (d1.y - d2.y);\r
- err.at<double>(3 * match_idx + 2, 0) = mult * (d1.z - d2.z);\r
+ Point2d p1 = features1.keypoints[m.queryIdx].pt;\r
+ Point2d p2 = features2.keypoints[m.trainIdx].pt;\r
+ double x = H(0,0)*p1.x + H(0,1)*p1.y + H(0,2);\r
+ double y = H(1,0)*p1.x + H(1,1)*p1.y + H(1,2);\r
+ double z = H(2,0)*p1.x + H(2,1)*p1.y + H(2,2);\r
+\r
+ err.at<double>(2 * match_idx, 0) = p2.x - x/z;\r
+ err.at<double>(2 * match_idx + 1, 0) = p2.y - y/z;\r
match_idx++;\r
}\r
}\r
\r
void BundleAdjuster::calcJacobian()\r
{\r
- J_.create(total_num_matches_ * 3, num_images_ * 4, CV_64F);\r
+ J_.create(total_num_matches_ * 2, num_images_ * 7, CV_64F);\r
\r
- double f, r;\r
- const double df = 0.001; // Focal length step\r
- const double dr = 0.001; // Angle step\r
+ double val;\r
+ const double step = 1e-3;\r
\r
for (int i = 0; i < num_images_; ++i)\r
{\r
- f = cameras_.at<double>(i * 4, 0);\r
- cameras_.at<double>(i * 4, 0) = f - df;\r
- calcError(err1_);\r
- cameras_.at<double>(i * 4, 0) = f + df;\r
- calcError(err2_);\r
- calcDeriv(err1_, err2_, 2 * df, J_.col(i * 4));\r
- cameras_.at<double>(i * 4, 0) = f;\r
-\r
- r = cameras_.at<double>(i * 4 + 1, 0);\r
- cameras_.at<double>(i * 4 + 1, 0) = r - dr;\r
- calcError(err1_);\r
- cameras_.at<double>(i * 4 + 1, 0) = r + dr;\r
- calcError(err2_);\r
- calcDeriv(err1_, err2_, 2 * dr, J_.col(i * 4 + 1));\r
- cameras_.at<double>(i * 4 + 1, 0) = r;\r
-\r
- r = cameras_.at<double>(i * 4 + 2, 0);\r
- cameras_.at<double>(i * 4 + 2, 0) = r - dr;\r
- calcError(err1_);\r
- cameras_.at<double>(i * 4 + 2, 0) = r + dr;\r
- calcError(err2_);\r
- calcDeriv(err1_, err2_, 2 * dr, J_.col(i * 4 + 2));\r
- cameras_.at<double>(i * 4 + 2, 0) = r;\r
-\r
- r = cameras_.at<double>(i * 4 + 3, 0);\r
- cameras_.at<double>(i * 4 + 3, 0) = r - dr;\r
- calcError(err1_);\r
- cameras_.at<double>(i * 4 + 3, 0) = r + dr;\r
- calcError(err2_);\r
- calcDeriv(err1_, err2_, 2 * dr, J_.col(i * 4 + 3));\r
- cameras_.at<double>(i * 4 + 3, 0) = r;\r
+ for (int j = 0; j < 7; ++j)\r
+ {\r
+ val = cameras_.at<double>(i * 7 + j, 0);\r
+ cameras_.at<double>(i * 7+ j, 0) = val - step;\r
+ calcError(err1_);\r
+ cameras_.at<double>(i * 7 + j, 0) = val + step;\r
+ calcError(err2_);\r
+ calcDeriv(err1_, err2_, 2 * step, J_.col(i * 7 + j));\r
+ cameras_.at<double>(i * 7 + j, 0) = val;\r
+ }\r
}\r
}\r
\r
Mat R;
cameras[i].R.convertTo(R, CV_32F);
cameras[i].R = R;
- LOGLN("Initial focal length #" << indices[i]+1 << ": " << cameras[i].focal);
+ LOGLN("Initial intrinsic parameters #" << indices[i]+1 << ":\n " << cameras[i].K());
}
detail::BundleAdjuster adjuster(detail::BundleAdjuster::FOCAL_RAY_SPACE, conf_thresh_);
vector<double> focals;
for (size_t i = 0; i < cameras.size(); ++i)
{
- LOGLN("Camera #" << indices[i]+1 << " focal length: " << cameras[i].focal);
+ LOGLN("Camera #" << indices[i]+1 << ":\n" << cameras[i].K());
focals.push_back(cameras[i].focal);
}
nth_element(focals.begin(), focals.begin() + focals.size()/2, focals.end());
}
// Warp images and their masks
- Ptr<detail::Warper> warper = warper_->createByFocalLength(warped_image_scale * seam_work_aspect);
+ Ptr<detail::Warper> warper = warper_->create(warped_image_scale * seam_work_aspect);
for (int i = 0; i < num_imgs; ++i)
{
- corners[i] = warper->warp(seam_est_imgs[i], static_cast<float>(cameras[i].focal * seam_work_aspect),
- cameras[i].R, images_warped[i]);
+ Mat_<float> K;
+ cameras[i].K().convertTo(K, CV_32F);
+ K(0,0) *= seam_work_aspect; K(0,2) *= seam_work_aspect;
+ K(1,1) *= seam_work_aspect; K(1,2) *= seam_work_aspect;
+
+ corners[i] = warper->warp(seam_est_imgs[i], K, cameras[i].R, images_warped[i]);
sizes[i] = images_warped[i].size();
- warper->warp(masks[i], static_cast<float>(cameras[i].focal * seam_work_aspect),
- cameras[i].R, masks_warped[i], INTER_NEAREST, BORDER_CONSTANT);
+
+ warper->warp(masks[i], K, cameras[i].R, masks_warped[i], INTER_NEAREST, BORDER_CONSTANT);
}
vector<Mat> images_warped_f(num_imgs);
// Update warped image scale
warped_image_scale *= static_cast<float>(compose_work_aspect);
- warper = warper_->createByFocalLength(warped_image_scale);
+ warper = warper_->create(warped_image_scale);
// Update corners and sizes
for (int i = 0; i < num_imgs; ++i)
{
- // Update camera focal
+ // Update intrinsics
cameras[i].focal *= compose_work_aspect;
+ cameras[i].ppx *= compose_work_aspect;
+ cameras[i].ppy *= compose_work_aspect;
// Update corner and size
Size sz = full_img_sizes[i];
sz.height = cvRound(full_img_sizes[i].height * compose_scale);
}
- Rect roi = warper->warpRoi(sz, static_cast<float>(cameras[i].focal), cameras[i].R);
+ Mat K;
+ cameras[i].K().convertTo(K, CV_32F);
+ Rect roi = warper->warpRoi(sz, K, cameras[i].R);
corners[i] = roi.tl();
sizes[i] = roi.size();
}
full_img.release();
Size img_size = img.size();
+ Mat K;
+ cameras[img_idx].K().convertTo(K, CV_32F);
+
// Warp the current image
- warper->warp(img, static_cast<float>(cameras[img_idx].focal), cameras[img_idx].R,
- img_warped);
+ warper->warp(img, K, cameras[img_idx].R, img_warped);
// Warp the current image mask
mask.create(img_size, CV_8U);
mask.setTo(Scalar::all(255));
- warper->warp(mask, static_cast<float>(cameras[img_idx].focal), cameras[img_idx].R, mask_warped,
- INTER_NEAREST, BORDER_CONSTANT);
+ warper->warp(mask, K, cameras[img_idx].R, mask_warped, INTER_NEAREST, BORDER_CONSTANT);
// Compensate exposure
exposure_comp_->apply(img_idx, corners[img_idx], img_warped, mask_warped);
namespace cv {\r
namespace detail {\r
\r
-Ptr<Warper> Warper::createByCameraFocal(float focal, int type, bool try_gpu)\r
+void ProjectorBase::setCameraParams(const Mat &K, const Mat &R)\r
{\r
-#ifndef ANDROID\r
- bool can_use_gpu = try_gpu && gpu::getCudaEnabledDeviceCount();\r
- if (can_use_gpu)\r
- {\r
- if (type == PLANE)\r
- return new PlaneWarperGpu(focal);\r
- if (type == CYLINDRICAL)\r
- return new CylindricalWarperGpu(focal);\r
- if (type == SPHERICAL)\r
- return new SphericalWarperGpu(focal);\r
- }\r
- else\r
-#endif\r
- {\r
- if (type == PLANE)\r
- return new PlaneWarper(focal);\r
- if (type == CYLINDRICAL)\r
- return new CylindricalWarper(focal);\r
- if (type == SPHERICAL)\r
- return new SphericalWarper(focal);\r
- }\r
- CV_Error(CV_StsBadArg, "unsupported warping type");\r
- return NULL;\r
-}\r
-\r
-\r
-void ProjectorBase::setTransformation(const Mat &R)\r
-{\r
- CV_Assert(R.size() == Size(3, 3));\r
- CV_Assert(R.type() == CV_32F);\r
- r[0] = R.at<float>(0, 0); r[1] = R.at<float>(0, 1); r[2] = R.at<float>(0, 2);\r
- r[3] = R.at<float>(1, 0); r[4] = R.at<float>(1, 1); r[5] = R.at<float>(1, 2);\r
- r[6] = R.at<float>(2, 0); r[7] = R.at<float>(2, 1); r[8] = R.at<float>(2, 2);\r
-\r
- Mat Rinv = R.inv();\r
- rinv[0] = Rinv.at<float>(0, 0); rinv[1] = Rinv.at<float>(0, 1); rinv[2] = Rinv.at<float>(0, 2);\r
- rinv[3] = Rinv.at<float>(1, 0); rinv[4] = Rinv.at<float>(1, 1); rinv[5] = Rinv.at<float>(1, 2);\r
- rinv[6] = Rinv.at<float>(2, 0); rinv[7] = Rinv.at<float>(2, 1); rinv[8] = Rinv.at<float>(2, 2);\r
+ CV_Assert(K.size() == Size(3, 3) && K.type() == CV_32F);\r
+ CV_Assert(R.size() == Size(3, 3) && R.type() == CV_32F);\r
+\r
+ Mat_<float> K_(K);\r
+ k[0] = K_(0,0); k[1] = K_(0,1); k[2] = K_(0,2);\r
+ k[3] = K_(1,0); k[4] = K_(1,1); k[5] = K_(1,2);\r
+ k[6] = K_(2,0); k[7] = K_(2,1); k[8] = K_(2,2);\r
+\r
+ Mat_<float> Rinv = R.t();\r
+ rinv[0] = Rinv(0,0); rinv[1] = Rinv(0,1); rinv[2] = Rinv(0,2);\r
+ rinv[3] = Rinv(1,0); rinv[4] = Rinv(1,1); rinv[5] = Rinv(1,2);\r
+ rinv[6] = Rinv(2,0); rinv[7] = Rinv(2,1); rinv[8] = Rinv(2,2);\r
+\r
+ Mat_<float> R_Kinv = R * K.inv();\r
+ r_kinv[0] = R_Kinv(0,0); r_kinv[1] = R_Kinv(0,1); r_kinv[2] = R_Kinv(0,2); \r
+ r_kinv[3] = R_Kinv(1,0); r_kinv[4] = R_Kinv(1,1); r_kinv[5] = R_Kinv(1,2); \r
+ r_kinv[6] = R_Kinv(2,0); r_kinv[7] = R_Kinv(2,1); r_kinv[8] = R_Kinv(2,2); \r
+\r
+ Mat_<float> K_Rinv = K * Rinv;\r
+ k_rinv[0] = K_Rinv(0,0); k_rinv[1] = K_Rinv(0,1); k_rinv[2] = K_Rinv(0,2); \r
+ k_rinv[3] = K_Rinv(1,0); k_rinv[4] = K_Rinv(1,1); k_rinv[5] = K_Rinv(1,2); \r
+ k_rinv[6] = K_Rinv(2,0); k_rinv[7] = K_Rinv(2,1); k_rinv[8] = K_Rinv(2,2); \r
}\r
\r
\r
}\r
\r
#ifndef ANDROID\r
-Point PlaneWarperGpu::warp(const Mat &src, float focal, const Mat &R, Mat &dst, int interp_mode, int border_mode)\r
+Point PlaneWarperGpu::warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst, int interp_mode, int border_mode)\r
{\r
src_size_ = src.size();\r
- projector_.size = src.size();\r
- projector_.focal = focal;\r
- projector_.setTransformation(R);\r
+ projector_.setCameraParams(K, R);\r
\r
Point dst_tl, dst_br;\r
detectResultRoi(dst_tl, dst_br);\r
\r
gpu::buildWarpPlaneMaps(src.size(), Rect(dst_tl, Point(dst_br.x+1, dst_br.y+1)),\r
- R, focal, projector_.scale, projector_.plane_dist, d_xmap_, d_ymap_);\r
+ K, R, projector_.scale, d_xmap_, d_ymap_);\r
\r
gpu::ensureSizeIsEnough(src.size(), src.type(), d_src_);\r
d_src_.upload(src);\r
float z = projector_.rinv[7];\r
if (y > 0.f)\r
{\r
- x = projector_.focal * x / z + src_size_.width * 0.5f;\r
- y = projector_.focal * y / z + src_size_.height * 0.5f;\r
- if (x > 0.f && x < src_size_.width && y > 0.f && y < src_size_.height)\r
+ //x = projector_.focal * x / z + src_size_.width * 0.5f;\r
+ //y = projector_.focal * y / z + src_size_.height * 0.5f;\r
+ float x_ = (projector_.k[0] * x + projector_.k[1] * y) / z + projector_.k[2];\r
+ float y_ = projector_.k[4] * y / z + projector_.k[5];\r
+ if (x_ > 0.f && x_ < src_size_.width && y_ > 0.f && y_ < src_size_.height)\r
{\r
tl_uf = min(tl_uf, 0.f); tl_vf = min(tl_vf, static_cast<float>(CV_PI * projector_.scale));\r
br_uf = max(br_uf, 0.f); br_vf = max(br_vf, static_cast<float>(CV_PI * projector_.scale));\r
z = projector_.rinv[7];\r
if (y > 0.f)\r
{\r
- x = projector_.focal * x / z + src_size_.width * 0.5f;\r
- y = projector_.focal * y / z + src_size_.height * 0.5f;\r
- if (x > 0.f && x < src_size_.width && y > 0.f && y < src_size_.height)\r
+ //x = projector_.focal * x / z + src_size_.width * 0.5f;\r
+ //y = projector_.focal * y / z + src_size_.height * 0.5f;\r
+ float x_ = (projector_.k[0] * x + projector_.k[1] * y) / z + projector_.k[2];\r
+ float y_ = projector_.k[4] * y / z + projector_.k[5];\r
+ if (x_ > 0.f && x_ < src_size_.width && y_ > 0.f && y_ < src_size_.height)\r
{\r
tl_uf = min(tl_uf, 0.f); tl_vf = min(tl_vf, static_cast<float>(0));\r
br_uf = max(br_uf, 0.f); br_vf = max(br_vf, static_cast<float>(0));\r
}\r
\r
#ifndef ANDROID\r
-Point SphericalWarperGpu::warp(const Mat &src, float focal, const Mat &R, Mat &dst,\r
- int interp_mode, int border_mode)\r
+Point SphericalWarperGpu::warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,\r
+ int interp_mode, int border_mode)\r
{\r
src_size_ = src.size();\r
- projector_.size = src.size();\r
- projector_.focal = focal;\r
- projector_.setTransformation(R);\r
+ projector_.setCameraParams(K, R);\r
\r
Point dst_tl, dst_br;\r
detectResultRoi(dst_tl, dst_br);\r
\r
gpu::buildWarpSphericalMaps(src.size(), Rect(dst_tl, Point(dst_br.x+1, dst_br.y+1)),\r
- R, focal, projector_.scale, d_xmap_, d_ymap_);\r
+ K, R, projector_.scale, d_xmap_, d_ymap_);\r
\r
gpu::ensureSizeIsEnough(src.size(), src.type(), d_src_);\r
d_src_.upload(src);\r
}\r
\r
\r
-Point CylindricalWarperGpu::warp(const Mat &src, float focal, const Mat &R, Mat &dst,\r
- int interp_mode, int border_mode)\r
+Point CylindricalWarperGpu::warp(const Mat &src, const Mat &K, const Mat &R, Mat &dst,\r
+ int interp_mode, int border_mode)\r
{\r
src_size_ = src.size();\r
- projector_.size = src.size();\r
- projector_.focal = focal;\r
- projector_.setTransformation(R);\r
+ projector_.setCameraParams(K, R);\r
\r
Point dst_tl, dst_br;\r
detectResultRoi(dst_tl, dst_br);\r
\r
gpu::buildWarpCylindricalMaps(src.size(), Rect(dst_tl, Point(dst_br.x+1, dst_br.y+1)),\r
- R, focal, projector_.scale, d_xmap_, d_ymap_);\r
+ K, R, projector_.scale, d_xmap_, d_ymap_);\r
\r
gpu::ensureSizeIsEnough(src.size(), src.type(), d_src_);\r
d_src_.upload(src);\r
printUsage();
return -1;
}
- else if (string(argv[i]) == "--try_gpu")
+ else if (string(argv[i]) == "--try_use_gpu")
{
if (string(argv[i + 1]) == "no")
try_use_gpu = false;
//M*/
#include <fstream>
+#include <string>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/stitching/detail/autocalib.hpp"
#include "opencv2/stitching/detail/blenders.hpp"
#include "opencv2/stitching/detail/seam_finders.hpp"
#include "opencv2/stitching/detail/util.hpp"
#include "opencv2/stitching/detail/warpers.hpp"
+#include "opencv2/stitching/warpers.hpp"
using namespace std;
using namespace cv;
bool wave_correct = true;
bool save_graph = false;
std::string save_graph_to;
-int warp_type = Warper::SPHERICAL;
+string warp_type = "spherical";
int expos_comp_type = ExposureCompensator::GAIN_BLOCKS;
float match_conf = 0.65f;
int seam_find_type = SeamFinder::GC_COLOR;
}
else if (string(argv[i]) == "--warp")
{
- if (string(argv[i + 1]) == "plane")
- warp_type = Warper::PLANE;
- else if (string(argv[i + 1]) == "cylindrical")
- warp_type = Warper::CYLINDRICAL;
- else if (string(argv[i + 1]) == "spherical")
- warp_type = Warper::SPHERICAL;
- else
- {
- cout << "Bad warping method\n";
- return -1;
- }
+ warp_type = string(argv[i + 1]);
i++;
}
else if (string(argv[i]) == "--expos_comp")
}
// Warp images and their masks
- Ptr<Warper> warper = Warper::createByCameraFocal(static_cast<float>(warped_image_scale * seam_work_aspect),
- warp_type, try_gpu);
+
+ Ptr<WarperCreator> warper_creator;
+#ifndef ANDROID
+ if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
+ {
+ if (warp_type == "plane") warper_creator = new cv::PlaneWarper();
+ else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarper();
+ else if (warp_type == "spherical") warper_creator = new cv::SphericalWarper();
+ }
+ else
+#endif
+ {
+ if (warp_type == "plane") warper_creator = new cv::PlaneWarperGpu();
+ else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarperGpu();
+ else if (warp_type == "spherical") warper_creator = new cv::SphericalWarperGpu();
+ }
+
+ if (warper_creator.empty())
+ {
+ cout << "Can't create the following warper '" << warp_type << "'\n";
+ return 1;
+ }
+
+ Ptr<Warper> warper = warper_creator->create(static_cast<float>(warped_image_scale * seam_work_aspect));
+
for (int i = 0; i < num_images; ++i)
{
- corners[i] = warper->warp(images[i], static_cast<float>(cameras[i].focal * seam_work_aspect),
- cameras[i].R, images_warped[i]);
+ Mat_<float> K;
+ cameras[i].K().convertTo(K, CV_32F);
+ K(0,0) *= seam_work_aspect; K(0,2) *= seam_work_aspect;
+ K(1,1) *= seam_work_aspect; K(1,2) *= seam_work_aspect;
+
+ corners[i] = warper->warp(images[i], K, cameras[i].R, images_warped[i]);
sizes[i] = images_warped[i].size();
- warper->warp(masks[i], static_cast<float>(cameras[i].focal * seam_work_aspect),
- cameras[i].R, masks_warped[i], INTER_NEAREST, BORDER_CONSTANT);
+
+ warper->warp(masks[i], K, cameras[i].R, masks_warped[i], INTER_NEAREST, BORDER_CONSTANT);
}
vector<Mat> images_warped_f(num_images);
// Update warped image scale
warped_image_scale *= static_cast<float>(compose_work_aspect);
- warper = Warper::createByCameraFocal(warped_image_scale, warp_type, try_gpu);
+ warper = warper_creator->create(warped_image_scale);
// Update corners and sizes
for (int i = 0; i < num_images; ++i)
{
- // Update camera focal
+ // Update intrinsics
cameras[i].focal *= compose_work_aspect;
+ cameras[i].ppx *= compose_work_aspect;
+ cameras[i].ppy *= compose_work_aspect;
// Update corner and size
Size sz = full_img_sizes[i];
- if (abs(compose_scale - 1) > 1e-1)
+ if (std::abs(compose_scale - 1) > 1e-1)
{
sz.width = cvRound(full_img_sizes[i].width * compose_scale);
sz.height = cvRound(full_img_sizes[i].height * compose_scale);
}
- Rect roi = warper->warpRoi(sz, static_cast<float>(cameras[i].focal), cameras[i].R);
+ Mat K;
+ cameras[i].K().convertTo(K, CV_32F);
+ Rect roi = warper->warpRoi(sz, K, cameras[i].R);
corners[i] = roi.tl();
sizes[i] = roi.size();
}
full_img.release();
Size img_size = img.size();
+ Mat K;
+ cameras[img_idx].K().convertTo(K, CV_32F);
+
// Warp the current image
- warper->warp(img, static_cast<float>(cameras[img_idx].focal), cameras[img_idx].R,
- img_warped);
+ warper->warp(img, K, cameras[img_idx].R, img_warped);
// Warp the current image mask
mask.create(img_size, CV_8U);
mask.setTo(Scalar::all(255));
- warper->warp(mask, static_cast<float>(cameras[img_idx].focal), cameras[img_idx].R, mask_warped,
- INTER_NEAREST, BORDER_CONSTANT);
+ warper->warp(mask, K, cameras[img_idx].R, mask_warped, INTER_NEAREST, BORDER_CONSTANT);
// Compensate exposure
compensator->apply(img_idx, corners[img_idx], img_warped, mask_warped);