--- /dev/null
+//Yannick Verdie 2010\r
+\r
+//--- Please read me: ---\r
+\r
+//1). This demo is mainly based on work from Javier Barandiaran Martirena\r
+//See this page http://opencv.willowgarage.com/wiki/Posit.\r
+//2). This is a demo to illustrate how to use **OpenGL Callback**.\r
+//3). You need Qt binding to compile this sample with OpenGL support enabled.\r
+//4). The features' detection is very basic and could highly be improved (basic thresholding tuned for the specific video) but 2).\r
+\r
+\r
+#include <iostream>\r
+#include <vector>\r
+\r
+#include <opencv/highgui.h>\r
+#include <gl/GL.h>\r
+#include <opencv/cxcore.h>\r
+#include <opencv/cv.h>\r
+\r
+\r
+#define FOCAL_LENGTH 600\r
+#define CUBE_SIZE 10\r
+\r
+using namespace std;\r
+using namespace cv;\r
+\r
+void renderCube(float size)\r
+{\r
+ glBegin(GL_QUADS);\r
+ // Front Face\r
+ glNormal3f( 0.0f, 0.0f, 1.0f);\r
+ glVertex3f( 0.0f, 0.0f, 0.0f);\r
+ glVertex3f( size, 0.0f, 0.0f);\r
+ glVertex3f( size, size, 0.0f);\r
+ glVertex3f( 0.0f, size, 0.0f);\r
+ // Back Face\r
+ glNormal3f( 0.0f, 0.0f,-1.0f);\r
+ glVertex3f( 0.0f, 0.0f, size);\r
+ glVertex3f( 0.0f, size, size);\r
+ glVertex3f( size, size, size);\r
+ glVertex3f( size, 0.0f, size); \r
+ // Top Face\r
+ glNormal3f( 0.0f, 1.0f, 0.0f);\r
+ glVertex3f( 0.0f, size, 0.0f);\r
+ glVertex3f( size, size, 0.0f);\r
+ glVertex3f( size, size, size);\r
+ glVertex3f( 0.0f, size, size);\r
+ // Bottom Face\r
+ glNormal3f( 0.0f,-1.0f, 0.0f);\r
+ glVertex3f( 0.0f, 0.0f, 0.0f);\r
+ glVertex3f( 0.0f, 0.0f, size);\r
+ glVertex3f( size, 0.0f, size);\r
+ glVertex3f( size, 0.0f, 0.0f);\r
+ // Right face\r
+ glNormal3f( 1.0f, 0.0f, 0.0f);\r
+ glVertex3f( size, 0.0f, 0.0f);\r
+ glVertex3f( size, 0.0f, size);\r
+ glVertex3f( size, size, size);\r
+ glVertex3f( size, size, 0.0f);\r
+ // Left Face\r
+ glNormal3f(-1.0f, 0.0f, 0.0f);\r
+ glVertex3f( 0.0f, 0.0f, 0.0f);\r
+ glVertex3f( 0.0f, size, 0.0f);\r
+ glVertex3f( 0.0f, size, size);\r
+ glVertex3f( 0.0f, 0.0f, size);\r
+ glEnd();\r
+}\r
+\r
+\r
+void on_opengl(void* param)\r
+{\r
+ //Draw the object with the estimated pose\r
+ glLoadIdentity();\r
+ glScalef( 1.0f, 1.0f, -1.0f);\r
+ glMultMatrixf( (float*)param );\r
+ glEnable( GL_LIGHTING );\r
+ glEnable( GL_LIGHT0 );\r
+ glEnable( GL_BLEND );\r
+ glBlendFunc(GL_SRC_ALPHA, GL_ONE);\r
+ renderCube( CUBE_SIZE );\r
+ glDisable(GL_BLEND);\r
+ glDisable( GL_LIGHTING );\r
+}\r
+\r
+void initPOSIT(std::vector<CvPoint3D32f> *modelPoints)\r
+{\r
+ //Create the model pointss\r
+ modelPoints->push_back(cvPoint3D32f(0.0f, 0.0f, 0.0f)); //The first must be (0,0,0)\r
+ modelPoints->push_back(cvPoint3D32f(0.0f, 0.0f, CUBE_SIZE));\r
+ modelPoints->push_back(cvPoint3D32f(CUBE_SIZE, 0.0f, 0.0f));\r
+ modelPoints->push_back(cvPoint3D32f(0.0f, CUBE_SIZE, 0.0f));\r
+}\r
+\r
+void foundCorners(vector<CvPoint2D32f> *srcImagePoints,IplImage* source, IplImage* grayImage)\r
+{\r
+ cvCvtColor(source,grayImage,CV_RGB2GRAY);\r
+ cvSmooth( grayImage, grayImage,CV_GAUSSIAN,11);\r
+ cvNormalize(grayImage, grayImage, 0, 255, CV_MINMAX);\r
+ cvThreshold( grayImage, grayImage, 26, 255, CV_THRESH_BINARY_INV);//25\r
+\r
+ Mat MgrayImage = grayImage;\r
+ //For debug\r
+ //MgrayImage = MgrayImage.clone();//deep copy\r
+ vector<vector<Point> > contours;\r
+ vector<Vec4i> hierarchy;\r
+ findContours(MgrayImage, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);\r
+\r
+ Point p;\r
+ vector<CvPoint2D32f> srcImagePoints_temp(4,cvPoint2D32f(0,0));\r
+\r
+ if (contours.size() == srcImagePoints_temp.size())\r
+ {\r
+\r
+ for(int i = 0 ; i<contours.size(); i++ )\r
+ {\r
+\r
+ p.x = p.y = 0;\r
+\r
+ for(int j = 0 ; j<contours[i].size(); j++ )\r
+ p+=contours[i][j];\r
+\r
+ srcImagePoints_temp.at(i)=cvPoint2D32f(float(p.x)/contours[i].size(),float(p.y)/contours[i].size());\r
+ }\r
+\r
+ //Need to keep the same order\r
+ //> y = 0\r
+ //> x = 1\r
+ //< x = 2\r
+ //< y = 3\r
+\r
+ //get point 0;\r
+ int index = 0;\r
+ for(int i = 1 ; i<srcImagePoints_temp.size(); i++ )\r
+ {\r
+ if (srcImagePoints_temp.at(i).y > srcImagePoints_temp.at(index).y)\r
+ index = i;\r
+ }\r
+ srcImagePoints->at(0) = srcImagePoints_temp.at(index);\r
+\r
+ //get point 1;\r
+ index = 0;\r
+ for(int i = 1 ; i<srcImagePoints_temp.size(); i++ )\r
+ {\r
+ if (srcImagePoints_temp.at(i).x > srcImagePoints_temp.at(index).x)\r
+ index = i;\r
+ }\r
+ srcImagePoints->at(1) = srcImagePoints_temp.at(index);\r
+\r
+ //get point 2;\r
+ index = 0;\r
+ for(int i = 1 ; i<srcImagePoints_temp.size(); i++ )\r
+ {\r
+ if (srcImagePoints_temp.at(i).x < srcImagePoints_temp.at(index).x)\r
+ index = i;\r
+ }\r
+ srcImagePoints->at(2) = srcImagePoints_temp.at(index);\r
+\r
+ //get point 3;\r
+ index = 0;\r
+ for(int i = 1 ; i<srcImagePoints_temp.size(); i++ )\r
+ {\r
+ if (srcImagePoints_temp.at(i).y < srcImagePoints_temp.at(index).y)\r
+ index = i;\r
+ }\r
+ srcImagePoints->at(3) = srcImagePoints_temp.at(index);\r
+\r
+ Mat Msource = source;\r
+ stringstream ss;\r
+ for(int i = 0 ; i<srcImagePoints_temp.size(); i++ )\r
+ {\r
+ ss<<i;\r
+ circle(Msource,srcImagePoints->at(i),5,CV_RGB(255,0,0));\r
+ putText( Msource, ss.str(), srcImagePoints->at(i),CV_FONT_HERSHEY_SIMPLEX,1,CV_RGB(255,0,0));\r
+ ss.str("");\r
+\r
+ //new coordinate system in the middle of the frame and reversed (camera coordinate system)\r
+ srcImagePoints->at(i) = cvPoint2D32f(srcImagePoints_temp.at(i).x-source->width/2,source->height/2-srcImagePoints_temp.at(i).y);\r
+ }\r
+ }\r
+\r
+}\r
+\r
+void createOpenGLMatrixFrom(float *posePOSIT,const CvMatr32f &rotationMatrix, const CvVect32f &translationVector)\r
+{\r
+\r
+\r
+ //coordinate system returned is relative to the first 3D input point \r
+ for (int f=0; f<3; f++)\r
+ {\r
+ for (int c=0; c<3; c++)\r
+ {\r
+ posePOSIT[c*4+f] = rotationMatrix[f*3+c]; //transposed\r
+ }\r
+ } \r
+ posePOSIT[3] = 0.0;\r
+ posePOSIT[7] = 0.0; \r
+ posePOSIT[11] = 0.0;\r
+ posePOSIT[12] = translationVector[0];\r
+ posePOSIT[13] = translationVector[1]; \r
+ posePOSIT[14] = translationVector[2];\r
+ posePOSIT[15] = 1.0; \r
+}\r
+\r
+int main(int argc, char *argv[])\r
+{\r
+ CvCapture* video = cvCaptureFromFile("cube4.avi");\r
+ CV_Assert(video);\r
+\r
+ IplImage* source = cvCreateImage(cvGetSize(cvQueryFrame(video)),8,3);\r
+ IplImage* grayImage = cvCreateImage(cvGetSize(cvQueryFrame(video)),8,1);\r
+\r
+ cvNamedWindow("original",CV_WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);\r
+ cvNamedWindow("POSIT",CV_WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);\r
+ displayOverlay("POSIT", "We lost the 4 corners' detection quite often (the red circles disappear). This demo is only to illustrate how to use OpenGL callback.\n -- Press ESC to exit.", 10000);\r
+ //For debug\r
+ //cvNamedWindow("tempGray",CV_WINDOW_AUTOSIZE);\r
+ float OpenGLMatrix[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};\r
+ cvCreateOpenGLCallback("POSIT",on_opengl,OpenGLMatrix);\r
+\r
+ vector<CvPoint3D32f> modelPoints;\r
+ initPOSIT(&modelPoints);\r
+\r
+ //Create the POSIT object with the model points\r
+ CvPOSITObject* positObject = cvCreatePOSITObject( &modelPoints[0], (int)modelPoints.size() );\r
+\r
+ CvMatr32f rotation_matrix = new float[9];\r
+ CvVect32f translation_vector = new float[3]; \r
+ CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 100, 1.0e-4f);\r
+\r
+ vector<CvPoint2D32f> srcImagePoints(4,cvPoint2D32f(0,0));\r
+\r
+\r
+ while(cvWaitKey(33) != 27)\r
+ {\r
+ source=cvQueryFrame(video);\r
+ cvShowImage("original",source);\r
+\r
+ foundCorners(&srcImagePoints,source,grayImage);\r
+ cvPOSIT( positObject, &srcImagePoints[0], FOCAL_LENGTH, criteria, rotation_matrix, translation_vector );\r
+ createOpenGLMatrixFrom(OpenGLMatrix,rotation_matrix,translation_vector);\r
+\r
+ cvShowImage("POSIT",source);\r
+ //For debug\r
+ //cvShowImage("tempGray",grayImage);\r
+\r
+ if (cvGetCaptureProperty(video,CV_CAP_PROP_POS_AVI_RATIO)>0.99)\r
+ cvSetCaptureProperty(video,CV_CAP_PROP_POS_AVI_RATIO,0);\r
+ }\r
+\r
+ cvDestroyAllWindows();\r
+ cvReleaseImage(&grayImage);\r
+ cvReleaseCapture(&video);\r
+ cvReleasePOSITObject(&positObject);\r
+\r
+ return 0;\r
+}
\ No newline at end of file