1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
10 // Intel License Agreement
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000, Intel Corporation, all rights reserved.
14 // Third party copyrights are property of their respective owners.
16 // Redistribution and use in source and binary forms, with or without modification,
17 // are permitted provided that the following conditions are met:
19 // * Redistribution's of source code must retain the above copyright notice,
20 // this list of conditions and the following disclaimer.
22 // * Redistribution's in binary form must reproduce the above copyright notice,
23 // this list of conditions and the following disclaimer in the documentation
24 // and/or other materials provided with the distribution.
26 // * The name of Intel Corporation may not be used to endorse or promote products
27 // derived from this software without specific prior written permission.
29 // This software is provided by the copyright holders and contributors "as is" and
30 // any express or implied warranties, including, but not limited to, the implied
31 // warranties of merchantability and fitness for a particular purpose are disclaimed.
32 // In no event shall the Intel Corporation or contributors be liable for any direct,
33 // indirect, incidental, special, exemplary, or consequential damages
34 // (including, but not limited to, procurement of substitute goods or services;
35 // loss of use, data, or profits; or business interruption) however caused
36 // and on any theory of liability, whether in contract, strict liability,
37 // or tort (including negligence or otherwise) arising in any way out of
38 // the use of this software, even if advised of the possibility of such damage.
43 #include "precomp.hpp"
53 int cvComputeEssentialMatrix( CvMatr32f rotMatr,
57 int cvConvertEssential2Fundamental( CvMatr32f essMatr,
59 CvMatr32f cameraMatr1,
60 CvMatr32f cameraMatr2);
62 int cvComputeEpipolesFromFundMatrix(CvMatr32f fundMatr,
63 CvPoint3D32f* epipole1,
64 CvPoint3D32f* epipole2);
66 void icvTestPoint( CvPoint2D64d testPoint,
67 CvVect64d line1,CvVect64d line2,
68 CvPoint2D64d basePoint,
73 int icvGetSymPoint3D( CvPoint3D64d pointCorner,
76 CvPoint3D64d *pointSym2)
80 icvGetPieceLength3D(pointCorner,point1,&len1);
85 icvGetPieceLength3D(pointCorner,point2,&len2);
88 pointSym2->x = pointCorner.x + alpha*(point1.x - pointCorner.x);
89 pointSym2->y = pointCorner.y + alpha*(point1.y - pointCorner.y);
90 pointSym2->z = pointCorner.z + alpha*(point1.z - pointCorner.z);
94 /* author Valery Mosyagin */
96 /* Compute 3D point for scanline and alpha betta */
97 int icvCompute3DPoint( double alpha,double betta,
98 CvStereoLineCoeff* coeffs,
108 double alphabetta = alpha*betta;
110 partAll = alpha - betta;
111 if( fabs(partAll) > 0.00001 ) /* alpha must be > betta */
114 partX = coeffs->Xcoef + coeffs->XcoefA *alpha +
115 coeffs->XcoefB*betta + coeffs->XcoefAB*alphabetta;
117 partY = coeffs->Ycoef + coeffs->YcoefA *alpha +
118 coeffs->YcoefB*betta + coeffs->YcoefAB*alphabetta;
120 partZ = coeffs->Zcoef + coeffs->ZcoefA *alpha +
121 coeffs->ZcoefB*betta + coeffs->ZcoefAB*alphabetta;
123 invPartAll = 1.0 / partAll;
125 point->x = partX * invPartAll;
126 point->y = partY * invPartAll;
127 point->z = partZ * invPartAll;
132 return CV_BADFACTOR_ERR;
136 /*--------------------------------------------------------------------------------------*/
138 /* Compute rotate matrix and trans vector for change system */
139 int icvCreateConvertMatrVect( CvMatr64d rotMatr1,
140 CvMatr64d transVect1,
142 CvMatr64d transVect2,
143 CvMatr64d convRotMatr,
144 CvMatr64d convTransVect)
146 double invRotMatr2[9];
150 icvInvertMatrix_64d(rotMatr2,3,invRotMatr2);
153 icvMulMatrix_64d( rotMatr1,
159 icvMulMatrix_64d( convRotMatr,
165 icvSubVector_64d(transVect1,tmpVect,convTransVect,3);
171 /*--------------------------------------------------------------------------------------*/
173 /* Compute point coordinates in other system */
174 int icvConvertPointSystem(CvPoint3D64d M2,
182 icvMulMatrix_64d( rotMatr,
188 icvAddVector_64d(tmpVect,transVect,(double*)M1,3);
192 /*--------------------------------------------------------------------------------------*/
193 static int icvComputeCoeffForStereoV3( double quad1[4][2],
198 CvMatr64d transVect1,
201 CvMatr64d transVect2,
202 CvStereoLineCoeff* startCoeffs,
206 /* In this function we must define position of cameras */
215 for( currLine = 0; currLine < numScanlines; currLine++ )
218 double alpha = ((double)currLine)/((double)(numScanlines)); /* maybe - 1 */
220 point1.x = (1.0 - alpha) * quad1[0][0] + alpha * quad1[3][0];
221 point1.y = (1.0 - alpha) * quad1[0][1] + alpha * quad1[3][1];
223 point2.x = (1.0 - alpha) * quad1[1][0] + alpha * quad1[2][0];
224 point2.y = (1.0 - alpha) * quad1[1][1] + alpha * quad1[2][1];
226 point3.x = (1.0 - alpha) * quad2[0][0] + alpha * quad2[3][0];
227 point3.y = (1.0 - alpha) * quad2[0][1] + alpha * quad2[3][1];
229 point4.x = (1.0 - alpha) * quad2[1][0] + alpha * quad2[2][0];
230 point4.y = (1.0 - alpha) * quad2[1][1] + alpha * quad2[2][1];
232 /* We can compute coeffs for this line */
233 icvComCoeffForLine( point1,
243 &startCoeffs[currLine],
248 /*--------------------------------------------------------------------------------------*/
249 static int icvComputeCoeffForStereoNew( double quad1[4][2],
254 CvMatr32f transVect1,
256 CvStereoLineCoeff* startCoeffs,
261 double camMatr1_64d[9];
262 double camMatr2_64d[9];
264 double rotMatr1_64d[9];
265 double transVect1_64d[3];
267 double rotMatr2_64d[9];
268 double transVect2_64d[3];
270 icvCvt_32f_64d(camMatr1,camMatr1_64d,9);
271 icvCvt_32f_64d(camMatr2,camMatr2_64d,9);
273 icvCvt_32f_64d(rotMatr1,rotMatr1_64d,9);
274 icvCvt_32f_64d(transVect1,transVect1_64d,3);
286 transVect2_64d[0] = 0;
287 transVect2_64d[1] = 0;
288 transVect2_64d[2] = 0;
290 int status = icvComputeCoeffForStereoV3( quad1,
306 /*--------------------------------------------------------------------------------------*/
307 int icvComputeCoeffForStereo( CvStereoCamera* stereoCamera)
313 for( i = 0; i < 4; i++ )
315 quad1[i][0] = stereoCamera->quad[0][i].x;
316 quad1[i][1] = stereoCamera->quad[0][i].y;
318 quad2[i][0] = stereoCamera->quad[1][i].x;
319 quad2[i][1] = stereoCamera->quad[1][i].y;
322 icvComputeCoeffForStereoNew( quad1,
324 stereoCamera->warpSize.height,
325 stereoCamera->camera[0]->matrix,
326 stereoCamera->rotMatrix,
327 stereoCamera->transVector,
328 stereoCamera->camera[1]->matrix,
329 stereoCamera->lineCoeffs,
330 &(stereoCamera->needSwapCameras));
335 /*--------------------------------------------------------------------------------------*/
336 int icvComCoeffForLine( CvPoint2D64d point1,
342 CvMatr64d transVect1,
345 CvMatr64d transVect2,
346 CvStereoLineCoeff* coeffs,
349 /* Get direction for all points */
350 /* Direction for camera 1 */
352 CvPoint3D64f direct1;
353 CvPoint3D64f direct2;
354 CvPoint3D64f camPoint1;
356 CvPoint3D64f directS3;
357 CvPoint3D64f directS4;
358 CvPoint3D64f direct3;
359 CvPoint3D64f direct4;
360 CvPoint3D64f camPoint2;
362 icvGetDirectionForPoint( point1,
366 icvGetDirectionForPoint( point2,
370 /* Direction for camera 2 */
372 icvGetDirectionForPoint( point3,
376 icvGetDirectionForPoint( point4,
380 /* Create convertion for camera 2: two direction and camera point */
382 double convRotMatr[9];
383 double convTransVect[3];
385 icvCreateConvertMatrVect( rotMatr1,
392 CvPoint3D64f zeroVect;
393 zeroVect.x = zeroVect.y = zeroVect.z = 0.0;
394 camPoint1.x = camPoint1.y = camPoint1.z = 0.0;
396 icvConvertPointSystem(directS3,&direct3,convRotMatr,convTransVect);
397 icvConvertPointSystem(directS4,&direct4,convRotMatr,convTransVect);
398 icvConvertPointSystem(zeroVect,&camPoint2,convRotMatr,convTransVect);
405 /* Compute point B: xB,yB,zB */
406 icvGetCrossLines(camPoint1,direct2,
410 if( pointB.z < 0 )/* If negative use other lines for cross */
413 icvGetCrossLines(camPoint1,direct1,
418 CvPoint3D64d pointNewA;
419 CvPoint3D64d pointNewC;
421 pointNewA.x = pointNewA.y = pointNewA.z = 0;
422 pointNewC.x = pointNewC.y = pointNewC.z = 0;
426 icvGetSymPoint3D( camPoint1,
431 icvGetSymPoint3D( camPoint2,
437 {/* In this case we must change cameras */
439 icvGetSymPoint3D( camPoint2,
444 icvGetSymPoint3D( camPoint1,
470 len1 = sqrt( (xA-xB)*(xA-xB) + (yA-yB)*(yA-yB) + (zA-zB)*(zA-zB) );
471 len2 = sqrt( (xB-xC)*(xB-xC) + (yB-yC)*(yB-yC) + (zB-zC)*(zB-zC) );
474 icvComputeStereoLineCoeffs( pointNewA,
484 /*--------------------------------------------------------------------------------------*/
486 int icvGetDirectionForPoint( CvPoint2D64d point,
488 CvPoint3D64d* direct)
495 icvInvertMatrix_64d(camMatr,3,invMatr);
496 /* TEST FOR ERRORS */
504 icvMulMatrix_64d( invMatr,
513 /*--------------------------------------------------------------------------------------*/
515 int icvGetCrossLines(CvPoint3D64d point11,CvPoint3D64d point12,
516 CvPoint3D64d point21,CvPoint3D64d point22,
517 CvPoint3D64d* midPoint)
544 double a11,a12,a21,a22;
547 a11 = (xB-xA)*(xB-xA)+(yB-yA)*(yB-yA)+(zB-zA)*(zB-zA);
548 a12 = -(xD-xC)*(xB-xA)-(yD-yC)*(yB-yA)-(zD-zC)*(zB-zA);
549 a21 = (xB-xA)*(xD-xC)+(yB-yA)*(yD-yC)+(zB-zA)*(zD-zC);
550 a22 = -(xD-xC)*(xD-xC)-(yD-yC)*(yD-yC)-(zD-zC)*(zD-zC);
551 b1 = -( (xA-xC)*(xB-xA)+(yA-yC)*(yB-yA)+(zA-zC)*(zB-zA) );
552 b2 = -( (xA-xC)*(xD-xC)+(yA-yC)*(yD-yC)+(zA-zC)*(zD-zC) );
555 double deltaA,deltaB;
558 delta = a11*a22-a12*a21;
560 if( fabs(delta) < EPS64D )
565 deltaA = b1*a22-b2*a12;
566 deltaB = a11*b2-b1*a21;
568 alpha = deltaA / delta;
569 betta = deltaB / delta;
571 xM = xA+alpha*(xB-xA);
572 yM = yA+alpha*(yB-yA);
573 zM = zA+alpha*(zB-zA);
575 xN = xC+betta*(xD-xC);
576 yN = yC+betta*(yD-yC);
577 zN = zC+betta*(zD-zC);
579 /* Compute middle point */
580 midPoint->x = (xM + xN) * 0.5;
581 midPoint->y = (yM + yN) * 0.5;
582 midPoint->z = (zM + zN) * 0.5;
587 /*--------------------------------------------------------------------------------------*/
589 int icvComputeStereoLineCoeffs( CvPoint3D64d pointA,
591 CvPoint3D64d pointCam1,
593 CvStereoLineCoeff* coeffs)
614 coeffs->Xcoef = -x1 + xA;
615 coeffs->XcoefA = xB + x1 - xA;
616 coeffs->XcoefB = -xA - gamma * x1 + gamma * xA;
617 coeffs->XcoefAB = -xB + xA + gamma * xB - gamma * xA;
619 coeffs->Ycoef = -y1 + yA;
620 coeffs->YcoefA = yB + y1 - yA;
621 coeffs->YcoefB = -yA - gamma * y1 + gamma * yA;
622 coeffs->YcoefAB = -yB + yA + gamma * yB - gamma * yA;
624 coeffs->Zcoef = -z1 + zA;
625 coeffs->ZcoefA = zB + z1 - zA;
626 coeffs->ZcoefB = -zA - gamma * z1 + gamma * zA;
627 coeffs->ZcoefAB = -zB + zA + gamma * zB - gamma * zA;
632 coeffs->Xcoef = -( -x1 + xA);
633 coeffs->XcoefB = -( xB + x1 - xA);
634 coeffs->XcoefA = -( -xA - gamma * x1 + gamma * xA);
635 coeffs->XcoefAB = -( -xB + xA + gamma * xB - gamma * xA);
637 coeffs->Ycoef = -( -y1 + yA);
638 coeffs->YcoefB = -( yB + y1 - yA);
639 coeffs->YcoefA = -( -yA - gamma * y1 + gamma * yA);
640 coeffs->YcoefAB = -( -yB + yA + gamma * yB - gamma * yA);
642 coeffs->Zcoef = -( -z1 + zA);
643 coeffs->ZcoefB = -( zB + z1 - zA);
644 coeffs->ZcoefA = -( -zA - gamma * z1 + gamma * zA);
645 coeffs->ZcoefAB = -( -zB + zA + gamma * zB - gamma * zA);
652 /*--------------------------------------------------------------------------------------*/
655 /*---------------------------------------------------------------------------------------*/
657 /* This function get minimum angle started at point which contains rect */
658 int icvGetAngleLine( CvPoint2D64d startPoint, CvSize imageSize,CvPoint2D64d *point1,CvPoint2D64d *point2)
660 /* Get crosslines with image corners */
662 /* Find four lines */
664 CvPoint2D64d pa,pb,pc,pd;
669 pb.x = imageSize.width-1;
672 pd.x = imageSize.width-1;
673 pd.y = imageSize.height-1;
676 pc.y = imageSize.height-1;
678 /* We can compute points for angle */
679 /* Test for place section */
681 if( startPoint.x < 0 )
683 if( startPoint.y < 0)
688 else if( startPoint.y > imageSize.height-1 )
699 else if ( startPoint.x > imageSize.width-1 )
701 if( startPoint.y < 0 )
706 else if ( startPoint.y > imageSize.height-1 )
719 if( startPoint.y < 0 )
721 if( startPoint.x < imageSize.width/2 )
732 else if( startPoint.y > imageSize.height-1 )
734 if( startPoint.x < imageSize.width/2 )
746 {/* 5 - point in the image */
753 /*---------------------------------------------------------------------------------------*/
755 void icvGetCoefForPiece( CvPoint2D64d p_start,CvPoint2D64d p_end,
756 double *a,double *b,double *c,
760 double detA,detB,detC;
762 det = p_start.x*p_end.y+p_end.x+p_start.y-p_end.y-p_start.y*p_end.x-p_start.x;
763 if( fabs(det) < EPS64D)/* Error */
769 detA = p_start.y - p_end.y;
770 detB = p_end.x - p_start.x;
771 detC = p_start.x*p_end.y - p_end.x*p_start.y;
773 double invDet = 1.0 / det;
782 /*---------------------------------------------------------------------------------------*/
784 /* Get common area of rectifying */
785 static void icvGetCommonArea( CvSize imageSize,
786 CvPoint3D64d epipole1,CvPoint3D64d epipole2,
788 CvVect64d coeff11,CvVect64d coeff12,
789 CvVect64d coeff21,CvVect64d coeff22,
793 CvPoint2D64d point11;
794 CvPoint2D64d point12;
795 CvPoint2D64d point21;
796 CvPoint2D64d point22;
808 double transFundMatr[3*3];
809 /* Compute transpose of fundamental matrix */
810 icvTransposeMatrix_64d( fundMatr, 3, 3, transFundMatr );
812 CvPoint2D64d epipole1_2d;
813 CvPoint2D64d epipole2_2d;
815 if( fabs(epipole1.z) < 1e-8 )
816 {/* epipole1 in infinity */
820 epipole1_2d.x = epipole1.x / epipole1.z;
821 epipole1_2d.y = epipole1.y / epipole1.z;
823 if( fabs(epipole2.z) < 1e-8 )
824 {/* epipole2 in infinity */
828 epipole2_2d.x = epipole2.x / epipole2.z;
829 epipole2_2d.y = epipole2.y / epipole2.z;
831 int stat = icvGetAngleLine( epipole1_2d, imageSize,&point11,&point12);
839 stat = icvGetAngleLine( epipole2_2d, imageSize,&point21,&point22);
847 /* ============= Computation for line 1 ================ */
848 /* Find correspondence line for angle points11 */
849 /* corr21 = Fund'*p1 */
851 pointW11[0] = point11.x;
852 pointW11[1] = point11.y;
855 icvTransformVector_64d( transFundMatr, /* !!! Modified from not transposed */
860 /* Find crossing of line with image 2 */
863 icvGetCrossRectDirect( imageSize,
864 corr21[0],corr21[1],corr21[2],
869 {/* We have not cross */
870 /* We must define new angle */
872 pointW21[0] = point21.x;
873 pointW21[1] = point21.y;
876 /* Find correspondence line for this angle points */
877 /* We know point and try to get corr line */
879 /* corr11 = Fund * p21 */
881 icvTransformVector_64d( fundMatr, /* !!! Modified */
886 /* We have cross. And it's result cross for up line. Set result coefs */
888 /* Set coefs for line 1 image 1 */
889 coeff11[0] = corr11[0];
890 coeff11[1] = corr11[1];
891 coeff11[2] = corr11[2];
893 /* Set coefs for line 1 image 2 */
894 icvGetCoefForPiece( epipole2_2d,point21,
895 &coeff21[0],&coeff21[1],&coeff21[2],
904 {/* Line 1 cross image 2 */
905 /* Set coefs for line 1 image 1 */
906 icvGetCoefForPiece( epipole1_2d,point11,
907 &coeff11[0],&coeff11[1],&coeff11[2],
915 /* Set coefs for line 1 image 2 */
916 coeff21[0] = corr21[0];
917 coeff21[1] = corr21[1];
918 coeff21[2] = corr21[2];
922 /* ============= Computation for line 2 ================ */
923 /* Find correspondence line for angle points11 */
924 /* corr22 = Fund*p2 */
926 pointW12[0] = point12.x;
927 pointW12[1] = point12.y;
930 icvTransformVector_64d( transFundMatr,
935 /* Find crossing of line with image 2 */
936 icvGetCrossRectDirect( imageSize,
937 corr22[0],corr22[1],corr22[2],
942 {/* We have not cross */
943 /* We must define new angle */
945 pointW22[0] = point22.x;
946 pointW22[1] = point22.y;
949 /* Find correspondence line for this angle points */
950 /* We know point and try to get corr line */
952 /* corr2 = Fund' * p1 */
954 icvTransformVector_64d( fundMatr,
960 /* We have cross. And it's result cross for down line. Set result coefs */
962 /* Set coefs for line 2 image 1 */
963 coeff12[0] = corr12[0];
964 coeff12[1] = corr12[1];
965 coeff12[2] = corr12[2];
967 /* Set coefs for line 1 image 2 */
968 icvGetCoefForPiece( epipole2_2d,point22,
969 &coeff22[0],&coeff22[1],&coeff22[2],
978 {/* Line 2 cross image 2 */
979 /* Set coefs for line 2 image 1 */
980 icvGetCoefForPiece( epipole1_2d,point12,
981 &coeff12[0],&coeff12[1],&coeff12[2],
989 /* Set coefs for line 1 image 2 */
990 coeff22[0] = corr22[0];
991 coeff22[1] = corr22[1];
992 coeff22[2] = corr22[2];
996 /* Now we know common area */
1000 }/* GetCommonArea */
1002 /*---------------------------------------------------------------------------------------*/
1004 /* Get cross for direction1 and direction2 */
1005 /* Result = 1 - cross */
1006 /* Result = 2 - parallel and not equal */
1007 /* Result = 3 - parallel and equal */
1009 void icvGetCrossDirectDirect( CvVect64d direct1,CvVect64d direct2,
1010 CvPoint2D64d *cross,int* result)
1012 double det = direct1[0]*direct2[1] - direct2[0]*direct1[1];
1013 double detx = -direct1[2]*direct2[1] + direct1[1]*direct2[2];
1015 if( fabs(det) > EPS64D )
1017 cross->x = detx/det;
1018 cross->y = (-direct1[0]*direct2[2] + direct2[0]*direct1[2])/det;
1022 {/* may be parallel */
1023 if( fabs(detx) > EPS64D )
1024 {/* parallel and not equal */
1036 /*---------------------------------------------------------------------------------------*/
1038 /* Get cross for piece p1,p2 and direction a,b,c */
1039 /* Result = 0 - no cross */
1040 /* Result = 1 - cross */
1041 /* Result = 2 - parallel and not equal */
1042 /* Result = 3 - parallel and equal */
1044 void icvGetCrossPieceDirect( CvPoint2D64d p_start,CvPoint2D64d p_end,
1045 double a,double b,double c,
1046 CvPoint2D64d *cross,int* result)
1049 if( (a*p_start.x + b*p_start.y + c) * (a*p_end.x + b*p_end.y + c) <= 0 )
1054 det = a * (p_end.x - p_start.x) + b * (p_end.y - p_start.y);
1056 if( fabs(det) < EPS64D )
1057 {/* lines are parallel and may be equal or line is point */
1058 if( fabs(a*p_start.x + b*p_start.y + c) < EPS64D )
1059 {/* line is point or not diff */
1070 detxc = b*(p_end.y*p_start.x - p_start.y*p_end.x) + c*(p_start.x - p_end.x);
1071 detyc = a*(p_end.x*p_start.y - p_start.x*p_end.y) + c*(p_start.y - p_end.y);
1073 cross->x = detxc / det;
1074 cross->y = detyc / det;
1084 /*--------------------------------------------------------------------------------------*/
1086 void icvGetCrossPiecePiece( CvPoint2D64d p1_start,CvPoint2D64d p1_end,
1087 CvPoint2D64d p2_start,CvPoint2D64d p2_end,
1088 CvPoint2D64d* cross,
1091 double ex1,ey1,ex2,ey2;
1092 double px1,py1,px2,py2;
1094 double delA,delB,delX,delY;
1107 del = (py1-py2)*(ex1-ex2)-(px1-px2)*(ey1-ey2);
1108 if( fabs(del) <= EPS64D )
1109 {/* May be they are parallel !!! */
1114 delA = (ey1-ey2)*(ex1-px1) + (ex1-ex2)*(py1-ey1);
1115 delB = (py1-py2)*(ex1-px1) + (px1-px2)*(py1-ey1);
1120 if( alpha < 0 || alpha > 1.0 || betta < 0 || betta > 1.0)
1126 delX = (px1-px2)*(ey1*(ex1-ex2)-ex1*(ey1-ey2))+
1127 (ex1-ex2)*(px1*(py1-py2)-py1*(px1-px2));
1129 delY = (py1-py2)*(ey1*(ex1-ex2)-ex1*(ey1-ey2))+
1130 (ey1-ey2)*(px1*(py1-py2)-py1*(px1-px2));
1132 cross->x = delX / del;
1133 cross->y = delY / del;
1140 /*---------------------------------------------------------------------------------------*/
1142 void icvGetPieceLength(CvPoint2D64d point1,CvPoint2D64d point2,double* dist)
1144 double dx = point2.x - point1.x;
1145 double dy = point2.y - point1.y;
1146 *dist = sqrt( dx*dx + dy*dy );
1150 /*---------------------------------------------------------------------------------------*/
1152 void icvGetPieceLength3D(CvPoint3D64d point1,CvPoint3D64d point2,double* dist)
1154 double dx = point2.x - point1.x;
1155 double dy = point2.y - point1.y;
1156 double dz = point2.z - point1.z;
1157 *dist = sqrt( dx*dx + dy*dy + dz*dz );
1161 /*---------------------------------------------------------------------------------------*/
1163 /* Find line from epipole which cross image rect */
1164 /* Find points of cross 0 or 1 or 2. Return number of points in cross */
1165 void icvGetCrossRectDirect( CvSize imageSize,
1166 double a,double b,double c,
1167 CvPoint2D64d *start,CvPoint2D64d *end,
1170 CvPoint2D64d frameBeg;
1171 CvPoint2D64d frameEnd;
1172 CvPoint2D64d cross[4];
1182 frameEnd.x = imageSize.width;
1185 icvGetCrossPieceDirect(frameBeg,frameEnd,a,b,c,&cross[0],&haveCross[0]);
1187 frameBeg.x = imageSize.width;
1189 frameEnd.x = imageSize.width;
1190 frameEnd.y = imageSize.height;
1191 icvGetCrossPieceDirect(frameBeg,frameEnd,a,b,c,&cross[1],&haveCross[1]);
1193 frameBeg.x = imageSize.width;
1194 frameBeg.y = imageSize.height;
1196 frameEnd.y = imageSize.height;
1197 icvGetCrossPieceDirect(frameBeg,frameEnd,a,b,c,&cross[2],&haveCross[2]);
1200 frameBeg.y = imageSize.height;
1203 icvGetCrossPieceDirect(frameBeg,frameEnd,a,b,c,&cross[3],&haveCross[3]);
1216 for( i = 0; i < 3; i++ )
1218 if( haveCross[i] == 1 )
1220 for( j = i + 1; j < 4; j++ )
1222 if( haveCross[j] == 1)
1224 icvGetPieceLength(cross[i],cross[j],&distance);
1225 if( distance > maxDist )
1237 {/* We have cross */
1238 *start = cross[maxI];
1252 }/* GetCrossRectDirect */
1254 /*---------------------------------------------------------------------------------------*/
1255 void icvProjectPointToImage( CvPoint3D64d point,
1256 CvMatr64d camMatr,CvMatr64d rotMatr,CvVect64d transVect,
1257 CvPoint2D64d* projPoint)
1263 icvMulMatrix_64d ( rotMatr,
1269 icvAddVector_64d ( tmpVect1, transVect,tmpVect2, 3);
1271 icvMulMatrix_64d ( camMatr,
1277 projPoint->x = tmpVect1[0] / tmpVect1[2];
1278 projPoint->y = tmpVect1[1] / tmpVect1[2];
1283 /*---------------------------------------------------------------------------------------*/
1284 /* Get quads for transform images */
1285 void icvGetQuadsTransform(
1289 CvVect64d transVect1,
1292 CvVect64d transVect2,
1297 CvPoint3D64d* epipole1,
1298 CvPoint3D64d* epipole2
1301 /* First compute fundamental matrix and epipoles */
1305 /* Compute epipoles and fundamental matrix using new functions */
1307 double convRotMatr[9];
1308 double convTransVect[3];
1310 icvCreateConvertMatrVect( rotMatr1,
1316 float convRotMatr_32f[9];
1317 float convTransVect_32f[3];
1319 icvCvt_64d_32f(convRotMatr,convRotMatr_32f,9);
1320 icvCvt_64d_32f(convTransVect,convTransVect_32f,3);
1322 /* We know R and t */
1323 /* Compute essential matrix */
1325 float fundMatr_32f[9];
1327 float camMatr1_32f[9];
1328 float camMatr2_32f[9];
1330 icvCvt_64d_32f(camMatr1,camMatr1_32f,9);
1331 icvCvt_64d_32f(camMatr2,camMatr2_32f,9);
1333 cvComputeEssentialMatrix( convRotMatr_32f,
1337 cvConvertEssential2Fundamental( essMatr,
1342 CvPoint3D32f epipole1_32f;
1343 CvPoint3D32f epipole2_32f;
1345 cvComputeEpipolesFromFundMatrix( fundMatr_32f,
1348 /* copy to 64d epipoles */
1349 epipole1->x = epipole1_32f.x;
1350 epipole1->y = epipole1_32f.y;
1351 epipole1->z = epipole1_32f.z;
1353 epipole2->x = epipole2_32f.x;
1354 epipole2->y = epipole2_32f.y;
1355 epipole2->z = epipole2_32f.z;
1357 /* Convert fundamental matrix */
1358 icvCvt_32f_64d(fundMatr_32f,fundMatr,9);
1366 icvGetCommonArea( imageSize,
1367 *epipole1,*epipole2,
1373 CvPoint2D64d point11, point12,point21, point22;
1374 double width1,width2;
1375 double height1,height2;
1376 double tmpHeight1,tmpHeight2;
1378 CvPoint2D64d epipole1_2d;
1379 CvPoint2D64d epipole2_2d;
1381 /* ----- Image 1 ----- */
1382 if( fabs(epipole1->z) < 1e-8 )
1386 epipole1_2d.x = epipole1->x / epipole1->z;
1387 epipole1_2d.y = epipole1->y / epipole1->z;
1389 icvGetCutPiece( coeff11,coeff12,
1396 /* Compute distance */
1397 icvGetPieceLength(point11,point21,&width1);
1398 icvGetPieceLength(point11,point12,&tmpHeight1);
1399 icvGetPieceLength(point21,point22,&tmpHeight2);
1400 height1 = MAX(tmpHeight1,tmpHeight2);
1402 quad1[0][0] = point11.x;
1403 quad1[0][1] = point11.y;
1405 quad1[1][0] = point21.x;
1406 quad1[1][1] = point21.y;
1408 quad1[2][0] = point22.x;
1409 quad1[2][1] = point22.y;
1411 quad1[3][0] = point12.x;
1412 quad1[3][1] = point12.y;
1414 /* ----- Image 2 ----- */
1415 if( fabs(epipole2->z) < 1e-8 )
1419 epipole2_2d.x = epipole2->x / epipole2->z;
1420 epipole2_2d.y = epipole2->y / epipole2->z;
1422 icvGetCutPiece( coeff21,coeff22,
1429 /* Compute distance */
1430 icvGetPieceLength(point11,point21,&width2);
1431 icvGetPieceLength(point11,point12,&tmpHeight1);
1432 icvGetPieceLength(point21,point22,&tmpHeight2);
1433 height2 = MAX(tmpHeight1,tmpHeight2);
1435 quad2[0][0] = point11.x;
1436 quad2[0][1] = point11.y;
1438 quad2[1][0] = point21.x;
1439 quad2[1][1] = point21.y;
1441 quad2[2][0] = point22.x;
1442 quad2[2][1] = point22.y;
1444 quad2[3][0] = point12.x;
1445 quad2[3][1] = point12.y;
1448 /*=======================================================*/
1449 /* This is a new additional way to compute quads. */
1450 /* We must correct quads */
1452 double convRotMatr[9];
1453 double convTransVect[3];
1455 double newQuad1[4][2];
1456 double newQuad2[4][2];
1459 icvCreateConvertMatrVect( rotMatr1,
1466 /* -------------Compute for first image-------------- */
1467 CvPoint2D32f pointb1;
1468 CvPoint2D32f pointe1;
1470 CvPoint2D32f pointb2;
1471 CvPoint2D32f pointe2;
1473 pointb1.x = (float)quad1[0][0];
1474 pointb1.y = (float)quad1[0][1];
1476 pointe1.x = (float)quad1[3][0];
1477 pointe1.y = (float)quad1[3][1];
1479 icvComputeeInfiniteProject1(convRotMatr,
1485 icvComputeeInfiniteProject1(convRotMatr,
1491 /* JUST TEST FOR POINT */
1493 /* Compute distances */
1496 double distOld,distNew;
1498 dxOld = quad2[1][0] - quad2[0][0];
1499 dyOld = quad2[1][1] - quad2[0][1];
1500 distOld = dxOld*dxOld + dyOld*dyOld;
1502 dxNew = quad2[1][0] - pointb2.x;
1503 dyNew = quad2[1][1] - pointb2.y;
1504 distNew = dxNew*dxNew + dyNew*dyNew;
1506 if( distNew > distOld )
1507 {/* Get new points for second quad */
1508 newQuad2[0][0] = pointb2.x;
1509 newQuad2[0][1] = pointb2.y;
1510 newQuad2[3][0] = pointe2.x;
1511 newQuad2[3][1] = pointe2.y;
1512 newQuad1[0][0] = quad1[0][0];
1513 newQuad1[0][1] = quad1[0][1];
1514 newQuad1[3][0] = quad1[3][0];
1515 newQuad1[3][1] = quad1[3][1];
1518 {/* Get new points for first quad */
1520 pointb2.x = (float)quad2[0][0];
1521 pointb2.y = (float)quad2[0][1];
1523 pointe2.x = (float)quad2[3][0];
1524 pointe2.y = (float)quad2[3][1];
1526 icvComputeeInfiniteProject2(convRotMatr,
1532 icvComputeeInfiniteProject2(convRotMatr,
1539 /* JUST TEST FOR POINT */
1541 newQuad2[0][0] = quad2[0][0];
1542 newQuad2[0][1] = quad2[0][1];
1543 newQuad2[3][0] = quad2[3][0];
1544 newQuad2[3][1] = quad2[3][1];
1546 newQuad1[0][0] = pointb1.x;
1547 newQuad1[0][1] = pointb1.y;
1548 newQuad1[3][0] = pointe1.x;
1549 newQuad1[3][1] = pointe1.y;
1552 /* -------------Compute for second image-------------- */
1553 pointb1.x = (float)quad1[1][0];
1554 pointb1.y = (float)quad1[1][1];
1556 pointe1.x = (float)quad1[2][0];
1557 pointe1.y = (float)quad1[2][1];
1559 icvComputeeInfiniteProject1(convRotMatr,
1565 icvComputeeInfiniteProject1(convRotMatr,
1571 /* Compute distances */
1573 dxOld = quad2[0][0] - quad2[1][0];
1574 dyOld = quad2[0][1] - quad2[1][1];
1575 distOld = dxOld*dxOld + dyOld*dyOld;
1577 dxNew = quad2[0][0] - pointb2.x;
1578 dyNew = quad2[0][1] - pointb2.y;
1579 distNew = dxNew*dxNew + dyNew*dyNew;
1581 if( distNew > distOld )
1582 {/* Get new points for second quad */
1583 newQuad2[1][0] = pointb2.x;
1584 newQuad2[1][1] = pointb2.y;
1585 newQuad2[2][0] = pointe2.x;
1586 newQuad2[2][1] = pointe2.y;
1587 newQuad1[1][0] = quad1[1][0];
1588 newQuad1[1][1] = quad1[1][1];
1589 newQuad1[2][0] = quad1[2][0];
1590 newQuad1[2][1] = quad1[2][1];
1593 {/* Get new points for first quad */
1595 pointb2.x = (float)quad2[1][0];
1596 pointb2.y = (float)quad2[1][1];
1598 pointe2.x = (float)quad2[2][0];
1599 pointe2.y = (float)quad2[2][1];
1601 icvComputeeInfiniteProject2(convRotMatr,
1607 icvComputeeInfiniteProject2(convRotMatr,
1613 newQuad2[1][0] = quad2[1][0];
1614 newQuad2[1][1] = quad2[1][1];
1615 newQuad2[2][0] = quad2[2][0];
1616 newQuad2[2][1] = quad2[2][1];
1618 newQuad1[1][0] = pointb1.x;
1619 newQuad1[1][1] = pointb1.y;
1620 newQuad1[2][0] = pointe1.x;
1621 newQuad1[2][1] = pointe1.y;
1626 /*-------------------------------------------------------------------------------*/
1628 /* Copy new quads to old quad */
1630 for( i = 0; i < 4; i++ )
1633 quad1[i][0] = newQuad1[i][0];
1634 quad1[i][1] = newQuad1[i][1];
1635 quad2[i][0] = newQuad2[i][0];
1636 quad2[i][1] = newQuad2[i][1];
1640 /*=======================================================*/
1642 double warpWidth,warpHeight;
1644 warpWidth = MAX(width1,width2);
1645 warpHeight = MAX(height1,height2);
1647 warpSize->width = (int)warpWidth;
1648 warpSize->height = (int)warpHeight;
1650 warpSize->width = cvRound(warpWidth-1);
1651 warpSize->height = cvRound(warpHeight-1);
1653 /* !!! by Valery Mosyagin. this lines added just for test no warp */
1654 warpSize->width = imageSize.width;
1655 warpSize->height = imageSize.height;
1661 /*---------------------------------------------------------------------------------------*/
1663 static void icvGetQuadsTransformNew( CvSize imageSize,
1667 CvVect32f transVect1,
1672 CvPoint3D32f* epipole1,
1673 CvPoint3D32f* epipole2
1677 /* Convert camera matrix */
1678 double camMatr1_64d[9];
1679 double camMatr2_64d[9];
1680 double rotMatr1_64d[9];
1681 double transVect1_64d[3];
1682 double rotMatr2_64d[9];
1683 double transVect2_64d[3];
1684 double fundMatr_64d[9];
1685 CvPoint3D64d epipole1_64d;
1686 CvPoint3D64d epipole2_64d;
1688 icvCvt_32f_64d(camMatr1,camMatr1_64d,9);
1689 icvCvt_32f_64d(camMatr2,camMatr2_64d,9);
1690 icvCvt_32f_64d(rotMatr1,rotMatr1_64d,9);
1691 icvCvt_32f_64d(transVect1,transVect1_64d,3);
1693 /* Create vector and matrix */
1695 rotMatr2_64d[0] = 1;
1696 rotMatr2_64d[1] = 0;
1697 rotMatr2_64d[2] = 0;
1698 rotMatr2_64d[3] = 0;
1699 rotMatr2_64d[4] = 1;
1700 rotMatr2_64d[5] = 0;
1701 rotMatr2_64d[6] = 0;
1702 rotMatr2_64d[7] = 0;
1703 rotMatr2_64d[8] = 1;
1705 transVect2_64d[0] = 0;
1706 transVect2_64d[1] = 0;
1707 transVect2_64d[2] = 0;
1709 icvGetQuadsTransform( imageSize,
1724 /* Convert epipoles */
1725 epipole1->x = (float)(epipole1_64d.x);
1726 epipole1->y = (float)(epipole1_64d.y);
1727 epipole1->z = (float)(epipole1_64d.z);
1729 epipole2->x = (float)(epipole2_64d.x);
1730 epipole2->y = (float)(epipole2_64d.y);
1731 epipole2->z = (float)(epipole2_64d.z);
1733 /* Convert fundamental matrix */
1734 icvCvt_64d_32f(fundMatr_64d,fundMatr,9);
1739 /*---------------------------------------------------------------------------------------*/
1740 void icvGetQuadsTransformStruct( CvStereoCamera* stereoCamera)
1742 /* Wrapper for icvGetQuadsTransformNew */
1748 icvGetQuadsTransformNew( cvSize(cvRound(stereoCamera->camera[0]->imgSize[0]),cvRound(stereoCamera->camera[0]->imgSize[1])),
1749 stereoCamera->camera[0]->matrix,
1750 stereoCamera->camera[1]->matrix,
1751 stereoCamera->rotMatrix,
1752 stereoCamera->transVector,
1753 &(stereoCamera->warpSize),
1756 stereoCamera->fundMatr,
1757 &(stereoCamera->epipole[0]),
1758 &(stereoCamera->epipole[1])
1762 for( i = 0; i < 4; i++ )
1764 stereoCamera->quad[0][i] = cvPoint2D32f(quad1[i][0],quad1[i][1]);
1765 stereoCamera->quad[1][i] = cvPoint2D32f(quad2[i][0],quad2[i][1]);
1771 /*---------------------------------------------------------------------------------------*/
1772 void icvComputeStereoParamsForCameras(CvStereoCamera* stereoCamera)
1774 /* For given intrinsic and extrinsic parameters computes rest parameters
1775 ** such as fundamental matrix. warping coeffs, epipoles, ...
1779 /* compute rotate matrix and translate vector */
1783 double transVect1[3];
1784 double transVect2[3];
1786 double convRotMatr[9];
1787 double convTransVect[3];
1790 icvCvt_32f_64d(stereoCamera->camera[0]->rotMatr,rotMatr1,9);
1791 icvCvt_32f_64d(stereoCamera->camera[1]->rotMatr,rotMatr2,9);
1793 icvCvt_32f_64d(stereoCamera->camera[0]->transVect,transVect1,3);
1794 icvCvt_32f_64d(stereoCamera->camera[1]->transVect,transVect2,3);
1796 icvCreateConvertMatrVect( rotMatr1,
1803 /* copy to stereo camera params */
1804 icvCvt_64d_32f(convRotMatr,stereoCamera->rotMatrix,9);
1805 icvCvt_64d_32f(convTransVect,stereoCamera->transVector,3);
1808 icvGetQuadsTransformStruct(stereoCamera);
1809 icvComputeRestStereoParams(stereoCamera);
1814 /*---------------------------------------------------------------------------------------*/
1816 /* Get cut line for one image */
1817 void icvGetCutPiece( CvVect64d areaLineCoef1,CvVect64d areaLineCoef2,
1818 CvPoint2D64d epipole,
1820 CvPoint2D64d* point11,CvPoint2D64d* point12,
1821 CvPoint2D64d* point21,CvPoint2D64d* point22,
1824 /* Compute nearest cut line to epipole */
1825 /* Get corners inside sector */
1826 /* Collect all candidate point */
1828 CvPoint2D64d candPoints[8];
1829 CvPoint2D64d midPoint;
1837 /* Find middle line of sector */
1838 double midLine[3]={0,0,0};
1842 CvPoint2D64d pointOnLine1; pointOnLine1.x = pointOnLine1.y = 0;
1843 CvPoint2D64d pointOnLine2; pointOnLine2.x = pointOnLine2.y = 0;
1845 CvPoint2D64d start1,end1;
1847 icvGetCrossRectDirect( imageSize,
1848 areaLineCoef1[0],areaLineCoef1[1],areaLineCoef1[2],
1849 &start1,&end1,&res);
1852 pointOnLine1 = start1;
1855 icvGetCrossRectDirect( imageSize,
1856 areaLineCoef2[0],areaLineCoef2[1],areaLineCoef2[2],
1857 &start1,&end1,&res);
1860 pointOnLine2 = start1;
1863 icvGetMiddleAnglePoint(epipole,pointOnLine1,pointOnLine2,&midPoint);
1865 icvGetCoefForPiece(epipole,midPoint,&midLine[0],&midLine[1],&midLine[2],&res);
1867 /* Test corner points */
1868 CvPoint2D64d cornerPoint;
1869 CvPoint2D64d tmpPoints[2];
1873 icvTestPoint( cornerPoint, areaLineCoef1, areaLineCoef2, epipole, &res);
1876 candPoints[numPoints] = cornerPoint;
1880 cornerPoint.x = imageSize.width;
1882 icvTestPoint( cornerPoint, areaLineCoef1, areaLineCoef2, epipole, &res);
1885 candPoints[numPoints] = cornerPoint;
1889 cornerPoint.x = imageSize.width;
1890 cornerPoint.y = imageSize.height;
1891 icvTestPoint( cornerPoint, areaLineCoef1, areaLineCoef2, epipole, &res);
1894 candPoints[numPoints] = cornerPoint;
1899 cornerPoint.y = imageSize.height;
1900 icvTestPoint( cornerPoint, areaLineCoef1, areaLineCoef2, epipole, &res);
1903 candPoints[numPoints] = cornerPoint;
1907 /* Find cross line 1 with image border */
1908 icvGetCrossRectDirect( imageSize,
1909 areaLineCoef1[0],areaLineCoef1[1],areaLineCoef1[2],
1910 &tmpPoints[0], &tmpPoints[1],
1912 for( i = 0; i < res; i++ )
1914 candPoints[numPoints++] = tmpPoints[i];
1917 /* Find cross line 2 with image border */
1918 icvGetCrossRectDirect( imageSize,
1919 areaLineCoef2[0],areaLineCoef2[1],areaLineCoef2[2],
1920 &tmpPoints[0], &tmpPoints[1],
1923 for( i = 0; i < res; i++ )
1925 candPoints[numPoints++] = tmpPoints[i];
1931 return;/* Error. Not enought points */
1933 /* Project all points to middle line and get max and min */
1935 CvPoint2D64d projPoint;
1936 CvPoint2D64d minPoint; minPoint.x = minPoint.y = FLT_MAX;
1937 CvPoint2D64d maxPoint; maxPoint.x = maxPoint.y = -FLT_MAX;
1942 double minDist = 10000000;
1945 for( i = 0; i < numPoints; i++ )
1947 icvProjectPointToDirect(candPoints[i], midLine, &projPoint);
1948 icvGetPieceLength(epipole,projPoint,&dist);
1952 minPoint = projPoint;
1958 maxPoint = projPoint;
1962 /* We know maximum and minimum points. Now we can compute cut lines */
1964 icvGetNormalDirect(midLine,minPoint,cutLine1);
1965 icvGetNormalDirect(midLine,maxPoint,cutLine2);
1967 /* Test for begin of line. */
1968 CvPoint2D64d tmpPoint2;
1970 /* Get cross with */
1971 icvGetCrossDirectDirect(areaLineCoef1,cutLine1,point11,&res);
1972 icvGetCrossDirectDirect(areaLineCoef2,cutLine1,point12,&res);
1974 icvGetCrossDirectDirect(areaLineCoef1,cutLine2,point21,&res);
1975 icvGetCrossDirectDirect(areaLineCoef2,cutLine2,point22,&res);
1977 if( epipole.x > imageSize.width * 0.5 )
1978 {/* Need to change points */
1979 tmpPoint2 = *point11;
1980 *point11 = *point21;
1981 *point21 = tmpPoint2;
1983 tmpPoint2 = *point12;
1984 *point12 = *point22;
1985 *point22 = tmpPoint2;
1990 /*---------------------------------------------------------------------------------------*/
1991 /* Get middle angle */
1992 void icvGetMiddleAnglePoint( CvPoint2D64d basePoint,
1993 CvPoint2D64d point1,CvPoint2D64d point2,
1994 CvPoint2D64d* midPoint)
1995 {/* !!! May be need to return error */
1999 icvGetPieceLength(basePoint,point1,&dist1);
2000 icvGetPieceLength(basePoint,point2,&dist2);
2001 CvPoint2D64d pointNew1;
2002 CvPoint2D64d pointNew2;
2003 double alpha = dist2/dist1;
2005 pointNew1.x = basePoint.x + (1.0/alpha) * ( point2.x - basePoint.x );
2006 pointNew1.y = basePoint.y + (1.0/alpha) * ( point2.y - basePoint.y );
2008 pointNew2.x = basePoint.x + alpha * ( point1.x - basePoint.x );
2009 pointNew2.y = basePoint.y + alpha * ( point1.y - basePoint.y );
2012 icvGetCrossPiecePiece(point1,point2,pointNew1,pointNew2,midPoint,&res);
2017 /*---------------------------------------------------------------------------------------*/
2018 /* Get normal direct to direct in line */
2019 void icvGetNormalDirect(CvVect64d direct,CvPoint2D64d point,CvVect64d normDirect)
2021 normDirect[0] = direct[1];
2022 normDirect[1] = - direct[0];
2023 normDirect[2] = -(normDirect[0]*point.x + normDirect[1]*point.y);
2027 /*---------------------------------------------------------------------------------------*/
2028 CV_IMPL double icvGetVect(CvPoint2D64d basePoint,CvPoint2D64d point1,CvPoint2D64d point2)
2030 return (point1.x - basePoint.x)*(point2.y - basePoint.y) -
2031 (point2.x - basePoint.x)*(point1.y - basePoint.y);
2033 /*---------------------------------------------------------------------------------------*/
2034 /* Test for point in sector */
2035 /* Return 0 - point not inside sector */
2036 /* Return 1 - point inside sector */
2037 void icvTestPoint( CvPoint2D64d testPoint,
2038 CvVect64d line1,CvVect64d line2,
2039 CvPoint2D64d basePoint,
2042 CvPoint2D64d point1,point2;
2044 icvProjectPointToDirect(testPoint,line1,&point1);
2045 icvProjectPointToDirect(testPoint,line2,&point2);
2047 double sign1 = icvGetVect(basePoint,point1,point2);
2048 double sign2 = icvGetVect(basePoint,point1,testPoint);
2049 if( sign1 * sign2 > 0 )
2050 {/* Correct for first line */
2052 sign2 = icvGetVect(basePoint,point2,testPoint);
2053 if( sign1 * sign2 > 0 )
2054 {/* Correct for both lines */
2070 /*---------------------------------------------------------------------------------------*/
2071 /* Project point to line */
2072 void icvProjectPointToDirect( CvPoint2D64d point,CvVect64d lineCoeff,
2073 CvPoint2D64d* projectPoint)
2075 double a = lineCoeff[0];
2076 double b = lineCoeff[1];
2078 double det = 1.0 / ( a*a + b*b );
2079 double delta = a*point.y - b*point.x;
2081 projectPoint->x = ( -a*lineCoeff[2] - b * delta ) * det;
2082 projectPoint->y = ( -b*lineCoeff[2] + a * delta ) * det ;
2087 /*---------------------------------------------------------------------------------------*/
2088 /* Get distance from point to direction */
2089 void icvGetDistanceFromPointToDirect( CvPoint2D64d point,CvVect64d lineCoef,double*dist)
2091 CvPoint2D64d tmpPoint;
2092 icvProjectPointToDirect(point,lineCoef,&tmpPoint);
2093 double dx = point.x - tmpPoint.x;
2094 double dy = point.y - tmpPoint.y;
2095 *dist = sqrt(dx*dx+dy*dy);
2098 /*---------------------------------------------------------------------------------------*/
2100 CV_IMPL IplImage* icvCreateIsometricImage( IplImage* src, IplImage* dst,
2101 int desired_depth, int desired_num_channels )
2104 src_size.width = src->width;
2105 src_size.height = src->height;
2107 CvSize dst_size = src_size;
2111 dst_size.width = dst->width;
2112 dst_size.height = dst->height;
2115 if( !dst || dst->depth != desired_depth ||
2116 dst->nChannels != desired_num_channels ||
2117 dst_size.width != src_size.width ||
2118 dst_size.height != src_size.height )
2120 cvReleaseImage( &dst );
2121 dst = cvCreateImage( src_size, desired_depth, desired_num_channels );
2122 CvRect rect = cvRect(0,0,src_size.width,src_size.height);
2123 cvSetImageROI( dst, rect );
2131 icvCvt_32f_64d( float *src, double *dst, int size )
2136 return CV_NULLPTR_ERR;
2138 return CV_BADRANGE_ERR;
2140 for( t = 0; t < size; t++ )
2142 dst[t] = (double) (src[t]);
2148 /*======================================================================================*/
2149 /* Type conversion double -> float */
2151 icvCvt_64d_32f( double *src, float *dst, int size )
2156 return CV_NULLPTR_ERR;
2158 return CV_BADRANGE_ERR;
2160 for( t = 0; t < size; t++ )
2162 dst[t] = (float) (src[t]);
2168 /*----------------------------------------------------------------------------------*/
2171 /* Find line which cross frame by line(a,b,c) */
2172 static void FindLineForEpiline( CvSize imageSize,
2173 float a,float b,float c,
2174 CvPoint2D32f *start,CvPoint2D32f *end,
2177 CvPoint2D32f frameBeg;
2179 CvPoint2D32f frameEnd;
2180 CvPoint2D32f cross[4];
2191 frameEnd.x = (float)(imageSize.width);
2193 haveCross[0] = icvGetCrossLineDirect(frameBeg,frameEnd,a,b,c,&cross[0]);
2195 frameBeg.x = (float)(imageSize.width);
2197 frameEnd.x = (float)(imageSize.width);
2198 frameEnd.y = (float)(imageSize.height);
2199 haveCross[1] = icvGetCrossLineDirect(frameBeg,frameEnd,a,b,c,&cross[1]);
2201 frameBeg.x = (float)(imageSize.width);
2202 frameBeg.y = (float)(imageSize.height);
2204 frameEnd.y = (float)(imageSize.height);
2205 haveCross[2] = icvGetCrossLineDirect(frameBeg,frameEnd,a,b,c,&cross[2]);
2208 frameBeg.y = (float)(imageSize.height);
2211 haveCross[3] = icvGetCrossLineDirect(frameBeg,frameEnd,a,b,c,&cross[3]);
2214 float minDist = (float)(INT_MAX);
2215 float maxDist = (float)(INT_MIN);
2220 double midPointX = imageSize.width / 2.0;
2221 double midPointY = imageSize.height / 2.0;
2223 for( n = 0; n < 4; n++ )
2225 if( haveCross[n] > 0 )
2227 dist = (float)((midPointX - cross[n].x)*(midPointX - cross[n].x) +
2228 (midPointY - cross[n].y)*(midPointY - cross[n].y));
2230 if( dist < minDist )
2236 if( dist > maxDist )
2244 if( minN >= 0 && maxN >= 0 && (minN != maxN) )
2246 *start = cross[minN];
2262 /*----------------------------------------------------------------------------------*/
2263 static int GetAngleLinee( CvPoint2D32f epipole, CvSize imageSize,CvPoint2D32f point1,CvPoint2D32f point2)
2265 float width = (float)(imageSize.width);
2266 float height = (float)(imageSize.height);
2268 /* Get crosslines with image corners */
2270 /* Find four lines */
2272 CvPoint2D32f pa,pb,pc,pd;
2286 /* We can compute points for angle */
2287 /* Test for place section */
2300 else if( y > height )
2311 else if ( x > width )
2318 else if ( y > height )
2336 else if( y > height )
2342 {/* 5 - point in the image */
2353 /*--------------------------------------------------------------------------------------*/
2354 static void icvComputePerspectiveCoeffs(const CvPoint2D32f srcQuad[4],const CvPoint2D32f dstQuad[4],double coeffs[3][3])
2355 {/* Computes perspective coeffs for transformation from src to dst quad */
2358 CV_FUNCNAME( "icvComputePerspectiveCoeffs" );
2373 for( i = 0; i < 4; i++ )
2376 double x = dstQuad[i].x;
2377 double y = dstQuad[i].y;
2383 double X = dstQuad[i].x;
2384 double Y = dstQuad[i].y;
2386 double* a = A + i*16;
2414 CvMat matA = cvMat( 8, 8, CV_64F, A );
2415 CvMat matInvA = cvMat( 8, 8, CV_64F, invA );
2416 CvMat matB = cvMat( 8, 1, CV_64F, b );
2417 CvMat matX = cvMat( 8, 1, CV_64F, c );
2419 CV_CALL( cvPseudoInverse( &matA, &matInvA ));
2420 CV_CALL( cvMatMulAdd( &matInvA, &matB, 0, &matX ));
2423 coeffs[0][0] = c[0];
2424 coeffs[0][1] = c[1];
2425 coeffs[0][2] = c[2];
2426 coeffs[1][0] = c[3];
2427 coeffs[1][1] = c[4];
2428 coeffs[1][2] = c[5];
2429 coeffs[2][0] = c[6];
2430 coeffs[2][1] = c[7];
2439 /*--------------------------------------------------------------------------------------*/
2441 CV_IMPL void cvComputePerspectiveMap(const double c[3][3], CvArr* rectMapX, CvArr* rectMapY )
2443 CV_FUNCNAME( "cvComputePerspectiveMap" );
2448 CvMat stubx, *mapx = (CvMat*)rectMapX;
2449 CvMat stuby, *mapy = (CvMat*)rectMapY;
2452 CV_CALL( mapx = cvGetMat( mapx, &stubx ));
2453 CV_CALL( mapy = cvGetMat( mapy, &stuby ));
2455 if( CV_MAT_TYPE( mapx->type ) != CV_32FC1 || CV_MAT_TYPE( mapy->type ) != CV_32FC1 )
2456 CV_ERROR( CV_StsUnsupportedFormat, "" );
2458 size = cvGetMatSize(mapx);
2459 assert( fabs(c[2][2] - 1.) < FLT_EPSILON );
2461 for( i = 0; i < size.height; i++ )
2463 float* mx = (float*)(mapx->data.ptr + mapx->step*i);
2464 float* my = (float*)(mapy->data.ptr + mapy->step*i);
2466 for( j = 0; j < size.width; j++ )
2468 double w = 1./(c[2][0]*j + c[2][1]*i + 1.);
2469 double x = (c[0][0]*j + c[0][1]*i + c[0][2])*w;
2470 double y = (c[1][0]*j + c[1][1]*i + c[1][2])*w;
2480 /*--------------------------------------------------------------------------------------*/
2482 CV_IMPL void cvInitPerspectiveTransform( CvSize size, const CvPoint2D32f quad[4], double matrix[3][3],
2485 /* Computes Perspective Transform coeffs and map if need
2486 for given image size and given result quad */
2487 CV_FUNCNAME( "cvInitPerspectiveTransform" );
2495 CvMat mapstub, *map = (CvMat*)rectMap;
2500 CV_CALL( map = cvGetMat( map, &mapstub ));
2502 if( CV_MAT_TYPE( map->type ) != CV_32FC2 )
2503 CV_ERROR( CV_StsUnsupportedFormat, "" );
2505 if( map->width != size.width || map->height != size.height )
2506 CV_ERROR( CV_StsUnmatchedSizes, "" );
2509 pt[0] = cvPoint2D32f( 0, 0 );
2510 pt[1] = cvPoint2D32f( size.width, 0 );
2511 pt[2] = cvPoint2D32f( size.width, size.height );
2512 pt[3] = cvPoint2D32f( 0, size.height );
2514 for( i = 0; i < 4; i++ )
2517 double x = quad[i].x;
2518 double y = quad[i].y;
2524 double X = quad[i].x;
2525 double Y = quad[i].y;
2527 double* a = A + i*16;
2555 CvMat matA = cvMat( 8, 8, CV_64F, A );
2556 CvMat matInvA = cvMat( 8, 8, CV_64F, invA );
2557 CvMat matB = cvMat( 8, 1, CV_64F, b );
2558 CvMat matX = cvMat( 8, 1, CV_64F, c );
2560 CV_CALL( cvPseudoInverse( &matA, &matInvA ));
2561 CV_CALL( cvMatMulAdd( &matInvA, &matB, 0, &matX ));
2564 matrix[0][0] = c[0];
2565 matrix[0][1] = c[1];
2566 matrix[0][2] = c[2];
2567 matrix[1][0] = c[3];
2568 matrix[1][1] = c[4];
2569 matrix[1][2] = c[5];
2570 matrix[2][0] = c[6];
2571 matrix[2][1] = c[7];
2576 for( i = 0; i < size.height; i++ )
2578 CvPoint2D32f* maprow = (CvPoint2D32f*)(map->data.ptr + map->step*i);
2579 for( j = 0; j < size.width; j++ )
2581 double w = 1./(c[6]*j + c[7]*i + 1.);
2582 double x = (c[0]*j + c[1]*i + c[2])*w;
2583 double y = (c[3]*j + c[4]*i + c[5])*w;
2585 maprow[j].x = (float)x;
2586 maprow[j].y = (float)y;
2597 /*-----------------------------------------------------------------------*/
2598 /* Compute projected infinite point for second image if first image point is known */
2599 void icvComputeeInfiniteProject1( CvMatr64d rotMatr,
2602 CvPoint2D32f point1,
2603 CvPoint2D32f* point2)
2606 icvInvertMatrix_64d(camMatr1,3,invMatr1);
2609 p1[0] = (double)(point1.x);
2610 p1[1] = (double)(point1.y);
2613 icvMulMatrix_64d( invMatr1,
2620 icvTransposeMatrix_64d( rotMatr, 3, 3, invR );
2622 /* Change system 1 to system 2 */
2624 icvMulMatrix_64d( invR,
2630 /* Now we can project this point to image 2 */
2633 icvMulMatrix_64d( camMatr2,
2639 point2->x = (float)(projP[0] / projP[2]);
2640 point2->y = (float)(projP[1] / projP[2]);
2645 /*-----------------------------------------------------------------------*/
2646 /* Compute projected infinite point for second image if first image point is known */
2647 void icvComputeeInfiniteProject2( CvMatr64d rotMatr,
2650 CvPoint2D32f* point1,
2651 CvPoint2D32f point2)
2654 icvInvertMatrix_64d(camMatr2,3,invMatr2);
2657 p2[0] = (double)(point2.x);
2658 p2[1] = (double)(point2.y);
2661 icvMulMatrix_64d( invMatr2,
2667 /* Change system 1 to system 2 */
2670 icvMulMatrix_64d( rotMatr,
2676 /* Now we can project this point to image 2 */
2679 icvMulMatrix_64d( camMatr1,
2685 point1->x = (float)(projP[0] / projP[2]);
2686 point1->y = (float)(projP[1] / projP[2]);
2691 /* Select best R and t for given cameras, points, ... */
2692 /* For both cameras */
2693 static int icvSelectBestRt( int numImages,
2695 CvPoint2D32f* imagePoints1,
2696 CvPoint2D32f* imagePoints2,
2697 CvPoint3D32f* objectPoints,
2699 CvMatr32f cameraMatrix1,
2700 CvVect32f distortion1,
2701 CvMatr32f rotMatrs1,
2702 CvVect32f transVects1,
2704 CvMatr32f cameraMatrix2,
2705 CvVect32f distortion2,
2706 CvMatr32f rotMatrs2,
2707 CvVect32f transVects2,
2709 CvMatr32f bestRotMatr,
2710 CvVect32f bestTransVect
2714 /* Need to convert input data 32 -> 64 */
2715 CvPoint3D64d* objectPoints_64d;
2717 double* rotMatrs1_64d;
2718 double* rotMatrs2_64d;
2720 double* transVects1_64d;
2721 double* transVects2_64d;
2723 double cameraMatrix1_64d[9];
2724 double cameraMatrix2_64d[9];
2726 double distortion1_64d[4];
2727 double distortion2_64d[4];
2729 /* allocate memory for 64d data */
2732 for(int i = 0; i < numImages; i++ )
2734 totalNum += numPoints[i];
2737 objectPoints_64d = (CvPoint3D64d*)calloc(totalNum,sizeof(CvPoint3D64d));
2739 rotMatrs1_64d = (double*)calloc(numImages,sizeof(double)*9);
2740 rotMatrs2_64d = (double*)calloc(numImages,sizeof(double)*9);
2742 transVects1_64d = (double*)calloc(numImages,sizeof(double)*3);
2743 transVects2_64d = (double*)calloc(numImages,sizeof(double)*3);
2745 /* Convert input data to 64d */
2747 icvCvt_32f_64d((float*)objectPoints, (double*)objectPoints_64d, totalNum*3);
2749 icvCvt_32f_64d(rotMatrs1, rotMatrs1_64d, numImages*9);
2750 icvCvt_32f_64d(rotMatrs2, rotMatrs2_64d, numImages*9);
2752 icvCvt_32f_64d(transVects1, transVects1_64d, numImages*3);
2753 icvCvt_32f_64d(transVects2, transVects2_64d, numImages*3);
2755 /* Convert to arrays */
2756 icvCvt_32f_64d(cameraMatrix1, cameraMatrix1_64d, 9);
2757 icvCvt_32f_64d(cameraMatrix2, cameraMatrix2_64d, 9);
2759 icvCvt_32f_64d(distortion1, distortion1_64d, 4);
2760 icvCvt_32f_64d(distortion2, distortion2_64d, 4);
2763 /* for each R and t compute error for image pair */
2766 errors = (float*)calloc(numImages*numImages,sizeof(float));
2769 return CV_OUTOFMEM_ERR;
2774 for( currRt = 0; currRt < numImages; currRt++ )
2777 for(currImagePair = 0; currImagePair < numImages; currImagePair++ )
2779 /* For current R,t R,t compute relative position of cameras */
2781 double convRotMatr[9];
2782 double convTransVect[3];
2784 icvCreateConvertMatrVect( rotMatrs1_64d + currRt*9,
2785 transVects1_64d + currRt*3,
2786 rotMatrs2_64d + currRt*9,
2787 transVects2_64d + currRt*3,
2791 /* Project points using relative position of cameras */
2793 double convRotMatr2[9];
2794 double convTransVect2[3];
2796 convRotMatr2[0] = 1;
2797 convRotMatr2[1] = 0;
2798 convRotMatr2[2] = 0;
2800 convRotMatr2[3] = 0;
2801 convRotMatr2[4] = 1;
2802 convRotMatr2[5] = 0;
2804 convRotMatr2[6] = 0;
2805 convRotMatr2[7] = 0;
2806 convRotMatr2[8] = 1;
2808 convTransVect2[0] = 0;
2809 convTransVect2[1] = 0;
2810 convTransVect2[2] = 0;
2812 /* Compute error for given pair and Rt */
2813 /* We must project points to image and compute error */
2815 CvPoint2D64d* projImagePoints1;
2816 CvPoint2D64d* projImagePoints2;
2818 CvPoint3D64d* points1;
2819 CvPoint3D64d* points2;
2822 numberPnt = numPoints[currImagePair];
2823 projImagePoints1 = (CvPoint2D64d*)calloc(numberPnt,sizeof(CvPoint2D64d));
2824 projImagePoints2 = (CvPoint2D64d*)calloc(numberPnt,sizeof(CvPoint2D64d));
2826 points1 = (CvPoint3D64d*)calloc(numberPnt,sizeof(CvPoint3D64d));
2827 points2 = (CvPoint3D64d*)calloc(numberPnt,sizeof(CvPoint3D64d));
2829 /* Transform object points to first camera position */
2830 for(int i = 0; i < numberPnt; i++ )
2832 /* Create second camera point */
2833 CvPoint3D64d tmpPoint;
2834 tmpPoint.x = (double)(objectPoints[i].x);
2835 tmpPoint.y = (double)(objectPoints[i].y);
2836 tmpPoint.z = (double)(objectPoints[i].z);
2838 icvConvertPointSystem( tmpPoint,
2840 rotMatrs2_64d + currImagePair*9,
2841 transVects2_64d + currImagePair*3);
2843 /* Create first camera point using R, t */
2844 icvConvertPointSystem( points2[i],
2849 CvPoint3D64d tmpPoint2 = { 0, 0, 0 };
2850 icvConvertPointSystem( tmpPoint,
2852 rotMatrs1_64d + currImagePair*9,
2853 transVects1_64d + currImagePair*3);
2856 dx = tmpPoint2.x - points1[i].x;
2857 dy = tmpPoint2.y - points1[i].y;
2858 dz = tmpPoint2.z - points1[i].z;
2859 err = sqrt(dx*dx + dy*dy + dz*dz);*/
2863 cvProjectPointsSimple( numPoints[currImagePair],
2864 objectPoints_64d + begPoint,
2865 rotMatrs1_64d + currRt*9,
2866 transVects1_64d + currRt*3,
2871 cvProjectPointsSimple( numPoints[currImagePair],
2872 objectPoints_64d + begPoint,
2873 rotMatrs2_64d + currRt*9,
2874 transVects2_64d + currRt*3,
2880 /* Project with no translate and no rotation */
2884 double nodist[4] = {0,0,0,0};
2885 cvProjectPointsSimple( numPoints[currImagePair],
2893 cvProjectPointsSimple( numPoints[currImagePair],
2904 cvProjectPointsSimple( numPoints[currImagePair],
2912 cvProjectPointsSimple( numPoints[currImagePair],
2920 /* points are projected. Compute error */
2926 for( currPoint = 0; currPoint < numberPnt; currPoint++ )
2930 dx1 = imagePoints1[begPoint+currPoint].x - projImagePoints1[currPoint].x;
2931 dy1 = imagePoints1[begPoint+currPoint].y - projImagePoints1[currPoint].y;
2932 len1 = sqrt(dx1*dx1 + dy1*dy1);
2936 dx2 = imagePoints2[begPoint+currPoint].x - projImagePoints2[currPoint].x;
2937 dy2 = imagePoints2[begPoint+currPoint].y - projImagePoints2[currPoint].y;
2938 len2 = sqrt(dx2*dx2 + dy2*dy2);
2942 err1 /= (float)(numberPnt);
2943 err2 /= (float)(numberPnt);
2945 err = (err1+err2) * 0.5;
2946 begPoint += numberPnt;
2948 /* Set this error to */
2949 errors[numImages*currImagePair+currRt] = (float)err;
2953 free(projImagePoints1);
2954 free(projImagePoints2);
2958 /* Just select R and t with minimal average error */
2961 float minError = 0;/* Just for no warnings. Uses 'first' flag. */
2963 for( currRt = 0; currRt < numImages; currRt++ )
2966 for(currImagePair = 0; currImagePair < numImages; currImagePair++ )
2968 avErr += errors[numImages*currImagePair+currRt];
2970 avErr /= (float)(numImages);
2980 if( avErr < minError )
2989 double bestRotMatr_64d[9];
2990 double bestTransVect_64d[3];
2992 icvCreateConvertMatrVect( rotMatrs1_64d + bestnumRt * 9,
2993 transVects1_64d + bestnumRt * 3,
2994 rotMatrs2_64d + bestnumRt * 9,
2995 transVects2_64d + bestnumRt * 3,
2999 icvCvt_64d_32f(bestRotMatr_64d,bestRotMatr,9);
3000 icvCvt_64d_32f(bestTransVect_64d,bestTransVect,3);
3009 /* ----------------- Stereo calibration functions --------------------- */
3011 float icvDefinePointPosition(CvPoint2D32f point1,CvPoint2D32f point2,CvPoint2D32f point)
3013 float ax = point2.x - point1.x;
3014 float ay = point2.y - point1.y;
3016 float bx = point.x - point1.x;
3017 float by = point.y - point1.y;
3019 return (ax*by - ay*bx);
3022 /* Convert function for stereo warping */
3023 int icvConvertWarpCoordinates(double coeffs[3][3],
3024 CvPoint2D32f* cameraPoint,
3025 CvPoint2D32f* warpPoint,
3030 if( direction == CV_WARP_TO_CAMERA )
3031 {/* convert from camera image to warped image coordinates */
3035 det = (coeffs[2][0] * x + coeffs[2][1] * y + coeffs[2][2]);
3036 if( fabs(det) > 1e-8 )
3038 cameraPoint->x = (float)((coeffs[0][0] * x + coeffs[0][1] * y + coeffs[0][2]) / det);
3039 cameraPoint->y = (float)((coeffs[1][0] * x + coeffs[1][1] * y + coeffs[1][2]) / det);
3043 else if( direction == CV_CAMERA_TO_WARP )
3044 {/* convert from warped image to camera image coordinates */
3048 det = (coeffs[2][0]*x-coeffs[0][0])*(coeffs[2][1]*y-coeffs[1][1])-(coeffs[2][1]*x-coeffs[0][1])*(coeffs[2][0]*y-coeffs[1][0]);
3050 if( fabs(det) > 1e-8 )
3052 warpPoint->x = (float)(((coeffs[0][2]-coeffs[2][2]*x)*(coeffs[2][1]*y-coeffs[1][1])-(coeffs[2][1]*x-coeffs[0][1])*(coeffs[1][2]-coeffs[2][2]*y))/det);
3053 warpPoint->y = (float)(((coeffs[2][0]*x-coeffs[0][0])*(coeffs[1][2]-coeffs[2][2]*y)-(coeffs[0][2]-coeffs[2][2]*x)*(coeffs[2][0]*y-coeffs[1][0]))/det);
3058 return CV_BADFACTOR_ERR;
3061 /* Compute stereo params using some camera params */
3062 /* by Valery Mosyagin. int ComputeRestStereoParams(StereoParams *stereoparams) */
3063 int icvComputeRestStereoParams(CvStereoCamera *stereoparams)
3067 icvGetQuadsTransformStruct(stereoparams);
3069 cvInitPerspectiveTransform( stereoparams->warpSize,
3070 stereoparams->quad[0],
3071 stereoparams->coeffs[0],
3074 cvInitPerspectiveTransform( stereoparams->warpSize,
3075 stereoparams->quad[1],
3076 stereoparams->coeffs[1],
3079 /* Create border for warped images */
3080 CvPoint2D32f corns[4];
3084 corns[1].x = (float)(stereoparams->camera[0]->imgSize[0]-1);
3087 corns[2].x = (float)(stereoparams->camera[0]->imgSize[0]-1);
3088 corns[2].y = (float)(stereoparams->camera[0]->imgSize[1]-1);
3091 corns[3].y = (float)(stereoparams->camera[0]->imgSize[1]-1);
3093 for(int i = 0; i < 4; i++ )
3095 /* For first camera */
3096 icvConvertWarpCoordinates( stereoparams->coeffs[0],
3098 stereoparams->border[0]+i,
3101 /* For second camera */
3102 icvConvertWarpCoordinates( stereoparams->coeffs[1],
3104 stereoparams->border[1]+i,
3110 CvPoint2D32f warpPoints[4];
3111 warpPoints[0] = cvPoint2D32f(0,0);
3112 warpPoints[1] = cvPoint2D32f(stereoparams->warpSize.width-1,0);
3113 warpPoints[2] = cvPoint2D32f(stereoparams->warpSize.width-1,stereoparams->warpSize.height-1);
3114 warpPoints[3] = cvPoint2D32f(0,stereoparams->warpSize.height-1);
3116 CvPoint2D32f camPoints1[4];
3117 CvPoint2D32f camPoints2[4];
3119 for( int i = 0; i < 4; i++ )
3121 icvConvertWarpCoordinates(stereoparams->coeffs[0],
3126 icvConvertWarpCoordinates(stereoparams->coeffs[1],
3134 /* Allocate memory for scanlines coeffs */
3136 stereoparams->lineCoeffs = (CvStereoLineCoeff*)calloc(stereoparams->warpSize.height,sizeof(CvStereoLineCoeff));
3138 /* Compute coeffs for epilines */
3140 icvComputeCoeffForStereo( stereoparams);
3142 /* all coeffs are known */
3146 /*-------------------------------------------------------------------------------------------*/
3148 int icvStereoCalibration( int numImages,
3151 CvPoint2D32f* imagePoints1,
3152 CvPoint2D32f* imagePoints2,
3153 CvPoint3D32f* objectPoints,
3154 CvStereoCamera* stereoparams
3157 /* Firstly we must calibrate both cameras */
3158 /* Alocate memory for data */
3159 /* Allocate for translate vectors */
3165 transVects1 = (float*)calloc(numImages,sizeof(float)*3);
3166 transVects2 = (float*)calloc(numImages,sizeof(float)*3);
3168 rotMatrs1 = (float*)calloc(numImages,sizeof(float)*9);
3169 rotMatrs2 = (float*)calloc(numImages,sizeof(float)*9);
3171 /* Calibrate first camera */
3172 cvCalibrateCamera( numImages,
3177 stereoparams->camera[0]->distortion,
3178 stereoparams->camera[0]->matrix,
3183 /* Calibrate second camera */
3184 cvCalibrateCamera( numImages,
3189 stereoparams->camera[1]->distortion,
3190 stereoparams->camera[1]->matrix,
3195 /* Cameras are calibrated */
3197 stereoparams->camera[0]->imgSize[0] = (float)imageSize.width;
3198 stereoparams->camera[0]->imgSize[1] = (float)imageSize.height;
3200 stereoparams->camera[1]->imgSize[0] = (float)imageSize.width;
3201 stereoparams->camera[1]->imgSize[1] = (float)imageSize.height;
3203 icvSelectBestRt( numImages,
3208 stereoparams->camera[0]->matrix,
3209 stereoparams->camera[0]->distortion,
3212 stereoparams->camera[1]->matrix,
3213 stereoparams->camera[1]->distortion,
3216 stereoparams->rotMatrix,
3217 stereoparams->transVector
3226 icvComputeRestStereoParams(stereoparams);
3232 /* Find line from epipole */
3233 static void FindLine(CvPoint2D32f epipole,CvSize imageSize,CvPoint2D32f point,CvPoint2D32f *start,CvPoint2D32f *end)
3235 CvPoint2D32f frameBeg;
3236 CvPoint2D32f frameEnd;
3237 CvPoint2D32f cross[4];
3248 frameEnd.x = (float)(imageSize.width);
3250 haveCross[0] = icvGetCrossPieceVector(frameBeg,frameEnd,epipole,point,&cross[0]);
3252 frameBeg.x = (float)(imageSize.width);
3254 frameEnd.x = (float)(imageSize.width);
3255 frameEnd.y = (float)(imageSize.height);
3256 haveCross[1] = icvGetCrossPieceVector(frameBeg,frameEnd,epipole,point,&cross[1]);
3258 frameBeg.x = (float)(imageSize.width);
3259 frameBeg.y = (float)(imageSize.height);
3261 frameEnd.y = (float)(imageSize.height);
3262 haveCross[2] = icvGetCrossPieceVector(frameBeg,frameEnd,epipole,point,&cross[2]);
3265 frameBeg.y = (float)(imageSize.height);
3268 haveCross[3] = icvGetCrossPieceVector(frameBeg,frameEnd,epipole,point,&cross[3]);
3271 float minDist = (float)(INT_MAX);
3272 float maxDist = (float)(INT_MIN);
3277 for( n = 0; n < 4; n++ )
3279 if( haveCross[n] > 0 )
3281 dist = (epipole.x - cross[n].x)*(epipole.x - cross[n].x) +
3282 (epipole.y - cross[n].y)*(epipole.y - cross[n].y);
3284 if( dist < minDist )
3290 if( dist > maxDist )
3298 if( minN >= 0 && maxN >= 0 && (minN != maxN) )
3300 *start = cross[minN];
3314 /* Find line which cross frame by line(a,b,c) */
3315 static void FindLineForEpiline(CvSize imageSize,float a,float b,float c,CvPoint2D32f *start,CvPoint2D32f *end)
3317 CvPoint2D32f frameBeg;
3318 CvPoint2D32f frameEnd;
3319 CvPoint2D32f cross[4];
3330 frameEnd.x = (float)(imageSize.width);
3332 haveCross[0] = icvGetCrossLineDirect(frameBeg,frameEnd,a,b,c,&cross[0]);
3334 frameBeg.x = (float)(imageSize.width);
3336 frameEnd.x = (float)(imageSize.width);
3337 frameEnd.y = (float)(imageSize.height);
3338 haveCross[1] = icvGetCrossLineDirect(frameBeg,frameEnd,a,b,c,&cross[1]);
3340 frameBeg.x = (float)(imageSize.width);
3341 frameBeg.y = (float)(imageSize.height);
3343 frameEnd.y = (float)(imageSize.height);
3344 haveCross[2] = icvGetCrossLineDirect(frameBeg,frameEnd,a,b,c,&cross[2]);
3347 frameBeg.y = (float)(imageSize.height);
3350 haveCross[3] = icvGetCrossLineDirect(frameBeg,frameEnd,a,b,c,&cross[3]);
3353 float minDist = (float)(INT_MAX);
3354 float maxDist = (float)(INT_MIN);
3359 double midPointX = imageSize.width / 2.0;
3360 double midPointY = imageSize.height / 2.0;
3362 for( n = 0; n < 4; n++ )
3364 if( haveCross[n] > 0 )
3366 dist = (float)((midPointX - cross[n].x)*(midPointX - cross[n].x) +
3367 (midPointY - cross[n].y)*(midPointY - cross[n].y));
3369 if( dist < minDist )
3375 if( dist > maxDist )
3383 if( minN >= 0 && maxN >= 0 && (minN != maxN) )
3385 *start = cross[minN];
3401 static int GetCrossLines(CvPoint2D32f p1_start,CvPoint2D32f p1_end,CvPoint2D32f p2_start,CvPoint2D32f p2_end,CvPoint2D32f *cross)
3403 double ex1,ey1,ex2,ey2;
3404 double px1,py1,px2,py2;
3406 double delA,delB,delX,delY;
3419 del = (ex1-ex2)*(py2-py1)+(ey2-ey1)*(px2-px1);
3425 delA = (px1-ex1)*(py1-py2) + (ey1-py1)*(px1-px2);
3426 delB = (ex1-px1)*(ey1-ey2) + (py1-ey1)*(ex1-ex2);
3429 betta = -delB / del;
3431 if( alpha < 0 || alpha > 1.0 || betta < 0 || betta > 1.0)
3436 delX = (ex1-ex2)*(py1*(px1-px2)-px1*(py1-py2))+
3437 (px1-px2)*(ex1*(ey1-ey2)-ey1*(ex1-ex2));
3439 delY = (ey1-ey2)*(px1*(py1-py2)-py1*(px1-px2))+
3440 (py1-py2)*(ey1*(ex1-ex2)-ex1*(ey1-ey2));
3442 cross->x = (float)( delX / del);
3443 cross->y = (float)(-delY / del);
3448 int icvGetCrossPieceVector(CvPoint2D32f p1_start,CvPoint2D32f p1_end,CvPoint2D32f v2_start,CvPoint2D32f v2_end,CvPoint2D32f *cross)
3450 double ex1 = p1_start.x;
3451 double ey1 = p1_start.y;
3452 double ex2 = p1_end.x;
3453 double ey2 = p1_end.y;
3455 double px1 = v2_start.x;
3456 double py1 = v2_start.y;
3457 double px2 = v2_end.x;
3458 double py2 = v2_end.y;
3460 double del = (ex1-ex2)*(py2-py1)+(ey2-ey1)*(px2-px1);
3466 double delA = (px1-ex1)*(py1-py2) + (ey1-py1)*(px1-px2);
3467 //double delB = (ex1-px1)*(ey1-ey2) + (py1-ey1)*(ex1-ex2);
3469 double alpha = delA / del;
3470 //double betta = -delB / del;
3472 if( alpha < 0 || alpha > 1.0 )
3477 double delX = (ex1-ex2)*(py1*(px1-px2)-px1*(py1-py2))+
3478 (px1-px2)*(ex1*(ey1-ey2)-ey1*(ex1-ex2));
3480 double delY = (ey1-ey2)*(px1*(py1-py2)-py1*(px1-px2))+
3481 (py1-py2)*(ey1*(ex1-ex2)-ex1*(ey1-ey2));
3483 cross->x = (float)( delX / del);
3484 cross->y = (float)(-delY / del);
3489 int icvGetCrossLineDirect(CvPoint2D32f p1,CvPoint2D32f p2,float a,float b,float c,CvPoint2D32f* cross)
3492 double delX,delY,delA;
3494 double px1,px2,py1,py2;
3503 del = a * (px2 - px1) + b * (py2-py1);
3509 delA = - c - a*px1 - b*py1;
3512 if( alpha < 0 || alpha > 1.0 )
3514 return -1;/* no cross */
3517 delX = b * (py1*(px1-px2) - px1*(py1-py2)) + c * (px1-px2);
3518 delY = a * (px1*(py1-py2) - py1*(px1-px2)) + c * (py1-py2);
3523 cross->x = (float)X;
3524 cross->y = (float)Y;
3530 static int cvComputeEpipoles( CvMatr32f camMatr1, CvMatr32f camMatr2,
3531 CvMatr32f rotMatr1, CvMatr32f rotMatr2,
3532 CvVect32f transVect1,CvVect32f transVect2,
3539 CvMat ccamMatr1 = cvMat(3,3,CV_MAT32F,camMatr1);
3540 CvMat ccamMatr2 = cvMat(3,3,CV_MAT32F,camMatr2);
3541 CvMat crotMatr1 = cvMat(3,3,CV_MAT32F,rotMatr1);
3542 CvMat crotMatr2 = cvMat(3,3,CV_MAT32F,rotMatr2);
3543 CvMat ctransVect1 = cvMat(3,1,CV_MAT32F,transVect1);
3544 CvMat ctransVect2 = cvMat(3,1,CV_MAT32F,transVect2);
3545 CvMat cepipole1 = cvMat(3,1,CV_MAT32F,epipole1);
3546 CvMat cepipole2 = cvMat(3,1,CV_MAT32F,epipole2);
3549 CvMat cmatrP1 = cvMat(3,3,CV_MAT32F,0); cvmAlloc(&cmatrP1);
3550 CvMat cmatrP2 = cvMat(3,3,CV_MAT32F,0); cvmAlloc(&cmatrP2);
3551 CvMat cvectp1 = cvMat(3,1,CV_MAT32F,0); cvmAlloc(&cvectp1);
3552 CvMat cvectp2 = cvMat(3,1,CV_MAT32F,0); cvmAlloc(&cvectp2);
3553 CvMat ctmpF1 = cvMat(3,1,CV_MAT32F,0); cvmAlloc(&ctmpF1);
3554 CvMat ctmpM1 = cvMat(3,3,CV_MAT32F,0); cvmAlloc(&ctmpM1);
3555 CvMat ctmpM2 = cvMat(3,3,CV_MAT32F,0); cvmAlloc(&ctmpM2);
3556 CvMat cinvP1 = cvMat(3,3,CV_MAT32F,0); cvmAlloc(&cinvP1);
3557 CvMat cinvP2 = cvMat(3,3,CV_MAT32F,0); cvmAlloc(&cinvP2);
3558 CvMat ctmpMatr = cvMat(3,3,CV_MAT32F,0); cvmAlloc(&ctmpMatr);
3559 CvMat ctmpVect1 = cvMat(3,1,CV_MAT32F,0); cvmAlloc(&ctmpVect1);
3560 CvMat ctmpVect2 = cvMat(3,1,CV_MAT32F,0); cvmAlloc(&ctmpVect2);
3561 CvMat cmatrF1 = cvMat(3,3,CV_MAT32F,0); cvmAlloc(&cmatrF1);
3562 CvMat ctmpF = cvMat(3,3,CV_MAT32F,0); cvmAlloc(&ctmpF);
3563 CvMat ctmpE1 = cvMat(3,1,CV_MAT32F,0); cvmAlloc(&ctmpE1);
3564 CvMat ctmpE2 = cvMat(3,1,CV_MAT32F,0); cvmAlloc(&ctmpE2);
3567 cvmMul( &ccamMatr1, &crotMatr1, &cmatrP1);
3568 cvmInvert( &cmatrP1,&cinvP1 );
3569 cvmMul( &ccamMatr1, &ctransVect1, &cvectp1 );
3571 /* Compute second */
3572 cvmMul( &ccamMatr2, &crotMatr2, &cmatrP2 );
3573 cvmInvert( &cmatrP2,&cinvP2 );
3574 cvmMul( &ccamMatr2, &ctransVect2, &cvectp2 );
3576 cvmMul( &cmatrP1, &cinvP2, &ctmpM1);
3577 cvmMul( &ctmpM1, &cvectp2, &ctmpVect1);
3578 cvmSub( &cvectp1,&ctmpVect1,&ctmpE1);
3580 cvmMul( &cmatrP2, &cinvP1, &ctmpM2);
3581 cvmMul( &ctmpM2, &cvectp1, &ctmpVect2);
3582 cvmSub( &cvectp2, &ctmpVect2, &ctmpE2);
3587 cvmScale(&ctmpE1,&cepipole1,1.0);
3588 cvmScale(&ctmpE2,&cepipole2,1.0);
3600 cvmFree(&ctmpVect1);
3601 cvmFree(&ctmpVect2);
3608 }/* cvComputeEpipoles */
3611 /* Compute epipoles for fundamental matrix */
3612 int cvComputeEpipolesFromFundMatrix(CvMatr32f fundMatr,
3613 CvPoint3D32f* epipole1,
3614 CvPoint3D32f* epipole2)
3616 /* Decompose fundamental matrix using SVD ( A = U W V') */
3617 CvMat fundMatrC = cvMat(3,3,CV_MAT32F,fundMatr);
3619 CvMat* matrW = cvCreateMat(3,3,CV_MAT32F);
3620 CvMat* matrU = cvCreateMat(3,3,CV_MAT32F);
3621 CvMat* matrV = cvCreateMat(3,3,CV_MAT32F);
3623 /* From svd we need just last vector of U and V or last row from U' and V' */
3624 /* We get transposed matrices U and V */
3625 cvSVD(&fundMatrC,matrW,matrU,matrV,CV_SVD_V_T|CV_SVD_U_T);
3627 /* Get last row from U' and compute epipole1 */
3628 epipole1->x = matrU->data.fl[6];
3629 epipole1->y = matrU->data.fl[7];
3630 epipole1->z = matrU->data.fl[8];
3632 /* Get last row from V' and compute epipole2 */
3633 epipole2->x = matrV->data.fl[6];
3634 epipole2->y = matrV->data.fl[7];
3635 epipole2->z = matrV->data.fl[8];
3637 cvReleaseMat(&matrW);
3638 cvReleaseMat(&matrU);
3639 cvReleaseMat(&matrV);
3643 int cvConvertEssential2Fundamental( CvMatr32f essMatr,
3645 CvMatr32f cameraMatr1,
3646 CvMatr32f cameraMatr2)
3647 {/* Fund = inv(CM1') * Ess * inv(CM2) */
3649 CvMat essMatrC = cvMat(3,3,CV_MAT32F,essMatr);
3650 CvMat fundMatrC = cvMat(3,3,CV_MAT32F,fundMatr);
3651 CvMat cameraMatr1C = cvMat(3,3,CV_MAT32F,cameraMatr1);
3652 CvMat cameraMatr2C = cvMat(3,3,CV_MAT32F,cameraMatr2);
3654 CvMat* invCM2 = cvCreateMat(3,3,CV_MAT32F);
3655 CvMat* tmpMatr = cvCreateMat(3,3,CV_MAT32F);
3656 CvMat* invCM1T = cvCreateMat(3,3,CV_MAT32F);
3658 cvTranspose(&cameraMatr1C,tmpMatr);
3659 cvInvert(tmpMatr,invCM1T);
3660 cvmMul(invCM1T,&essMatrC,tmpMatr);
3661 cvInvert(&cameraMatr2C,invCM2);
3662 cvmMul(tmpMatr,invCM2,&fundMatrC);
3664 /* Scale fundamental matrix */
3666 scale = 1.0/fundMatrC.data.fl[8];
3667 cvConvertScale(&fundMatrC,&fundMatrC,scale);
3669 cvReleaseMat(&invCM2);
3670 cvReleaseMat(&tmpMatr);
3671 cvReleaseMat(&invCM1T);
3676 /* Compute essential matrix */
3678 int cvComputeEssentialMatrix( CvMatr32f rotMatr,
3679 CvMatr32f transVect,
3684 /* Make antisymmetric matrix from transpose vector */
3686 transMatr[1] = - transVect[2];
3687 transMatr[2] = transVect[1];
3689 transMatr[3] = transVect[2];
3691 transMatr[5] = - transVect[0];
3693 transMatr[6] = - transVect[1];
3694 transMatr[7] = transVect[0];
3697 icvMulMatrix_32f(transMatr,3,3,rotMatr,3,3,essMatr);