Merge pull request #1804 from alekcac:youtube_link_fix
[profile/ivi/opencv.git] / samples / cpp / Qt_sample / main.cpp
1 //Yannick Verdie 2010
2
3 //--- Please read help() below: ---
4
5 #include <iostream>
6 #include <vector>
7
8 #include <opencv2/core/core_c.h>
9 #include <opencv2/imgproc/imgproc_c.h>
10 #include <opencv2/legacy/compat.hpp>
11 #include <opencv2/calib3d/calib3d_c.h>
12
13 #include <opencv2/imgproc.hpp>
14 #include <opencv2/highgui.hpp>
15 #include <opencv2/calib3d.hpp>
16
17 #if defined WIN32 || defined _WIN32 || defined WINCE
18     #include <windows.h>
19     #undef small
20     #undef min
21     #undef max
22     #undef abs
23 #endif
24
25 #ifdef __APPLE__
26     #include <OpenGL/gl.h>
27 #else
28     #include <GL/gl.h>
29 #endif
30
31 using namespace std;
32 using namespace cv;
33
34 static void help()
35 {
36     cout << "\nThis demo demonstrates the use of the Qt enhanced version of the highgui GUI interface\n"
37             "  and dang if it doesn't throw in the use of of the POSIT 3D tracking algorithm too\n"
38             "It works off of the video: cube4.avi\n"
39             "Using OpenCV version %s\n" << CV_VERSION << "\n\n"
40 " 1). This demo is mainly based on work from Javier Barandiaran Martirena\n"
41 "     See this page http://code.opencv.org/projects/opencv/wiki/Posit.\n"
42 " 2). This is a demo to illustrate how to use **OpenGL Callback**.\n"
43 " 3). You need Qt binding to compile this sample with OpenGL support enabled.\n"
44 " 4). The features' detection is very basic and could highly be improved \n"
45 "     (basic thresholding tuned for the specific video) but 2).\n"
46 " 5) THANKS TO Google Summer of Code 2010 for supporting this work!\n" << endl;
47 }
48
49 #define FOCAL_LENGTH 600
50 #define CUBE_SIZE 10
51
52 static void renderCube(float size)
53 {
54     glBegin(GL_QUADS);
55     // Front Face
56     glNormal3f( 0.0f, 0.0f, 1.0f);
57     glVertex3f( 0.0f,  0.0f,  0.0f);
58     glVertex3f( size,  0.0f,  0.0f);
59     glVertex3f( size,  size,  0.0f);
60     glVertex3f( 0.0f,  size,  0.0f);
61     // Back Face
62     glNormal3f( 0.0f, 0.0f,-1.0f);
63     glVertex3f( 0.0f,  0.0f, size);
64     glVertex3f( 0.0f,  size, size);
65     glVertex3f( size,  size, size);
66     glVertex3f( size,  0.0f, size);
67     // Top Face
68     glNormal3f( 0.0f, 1.0f, 0.0f);
69     glVertex3f( 0.0f,  size,  0.0f);
70     glVertex3f( size,  size,  0.0f);
71     glVertex3f( size,  size, size);
72     glVertex3f( 0.0f,  size, size);
73     // Bottom Face
74     glNormal3f( 0.0f,-1.0f, 0.0f);
75     glVertex3f( 0.0f,  0.0f,  0.0f);
76     glVertex3f( 0.0f,  0.0f, size);
77     glVertex3f( size,  0.0f, size);
78     glVertex3f( size,  0.0f,  0.0f);
79     // Right face
80     glNormal3f( 1.0f, 0.0f, 0.0f);
81     glVertex3f( size,  0.0f, 0.0f);
82     glVertex3f( size,  0.0f, size);
83     glVertex3f( size,  size, size);
84     glVertex3f( size,  size, 0.0f);
85     // Left Face
86     glNormal3f(-1.0f, 0.0f, 0.0f);
87     glVertex3f( 0.0f,  0.0f, 0.0f);
88     glVertex3f( 0.0f,  size, 0.0f);
89     glVertex3f( 0.0f,  size, size);
90     glVertex3f( 0.0f,  0.0f, size);
91     glEnd();
92 }
93
94
95 static void on_opengl(void* param)
96 {
97     //Draw the object with the estimated pose
98     glLoadIdentity();
99     glScalef( 1.0f, 1.0f, -1.0f);
100     glMultMatrixf( (float*)param );
101     glEnable( GL_LIGHTING );
102     glEnable( GL_LIGHT0 );
103     glEnable( GL_BLEND );
104     glBlendFunc(GL_SRC_ALPHA, GL_ONE);
105     renderCube( CUBE_SIZE );
106     glDisable(GL_BLEND);
107     glDisable( GL_LIGHTING );
108 }
109
110 static void initPOSIT(std::vector<CvPoint3D32f> *modelPoints)
111 {
112     //Create the model pointss
113     modelPoints->push_back(cvPoint3D32f(0.0f, 0.0f, 0.0f)); //The first must be (0,0,0)
114     modelPoints->push_back(cvPoint3D32f(0.0f, 0.0f, CUBE_SIZE));
115     modelPoints->push_back(cvPoint3D32f(CUBE_SIZE, 0.0f, 0.0f));
116     modelPoints->push_back(cvPoint3D32f(0.0f, CUBE_SIZE, 0.0f));
117 }
118
119 static void foundCorners(vector<CvPoint2D32f> *srcImagePoints,IplImage* source, IplImage* grayImage)
120 {
121     cvCvtColor(source,grayImage,CV_RGB2GRAY);
122     cvSmooth( grayImage, grayImage,CV_GAUSSIAN,11);
123     cvNormalize(grayImage, grayImage, 0, 255, CV_MINMAX);
124     cvThreshold( grayImage, grayImage, 26, 255, CV_THRESH_BINARY_INV);//25
125
126     Mat MgrayImage = cv::cvarrToMat(grayImage);
127     //For debug
128     //MgrayImage = MgrayImage.clone();//deep copy
129     vector<vector<Point> > contours;
130     vector<Vec4i> hierarchy;
131     findContours(MgrayImage, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_NONE);
132
133     Point p;
134     vector<CvPoint2D32f> srcImagePoints_temp(4,cvPoint2D32f(0,0));
135
136     if (contours.size() == srcImagePoints_temp.size())
137     {
138
139         for(size_t i = 0 ; i<contours.size(); i++ )
140         {
141
142             p.x = p.y = 0;
143
144             for(size_t j = 0 ; j<contours[i].size(); j++ )
145                 p+=contours[i][j];
146
147             srcImagePoints_temp.at(i)=cvPoint2D32f(float(p.x)/contours[i].size(),float(p.y)/contours[i].size());
148         }
149
150         //Need to keep the same order
151         //> y = 0
152         //> x = 1
153         //< x = 2
154         //< y = 3
155
156         //get point 0;
157         size_t index = 0;
158         for(size_t i = 1 ; i<srcImagePoints_temp.size(); i++ )
159         {
160             if (srcImagePoints_temp.at(i).y > srcImagePoints_temp.at(index).y)
161                 index = i;
162         }
163         srcImagePoints->at(0) = srcImagePoints_temp.at(index);
164
165         //get point 1;
166         index = 0;
167         for(size_t i = 1 ; i<srcImagePoints_temp.size(); i++ )
168         {
169             if (srcImagePoints_temp.at(i).x > srcImagePoints_temp.at(index).x)
170                 index = i;
171         }
172         srcImagePoints->at(1) = srcImagePoints_temp.at(index);
173
174         //get point 2;
175         index = 0;
176         for(size_t i = 1 ; i<srcImagePoints_temp.size(); i++ )
177         {
178             if (srcImagePoints_temp.at(i).x < srcImagePoints_temp.at(index).x)
179                 index = i;
180         }
181         srcImagePoints->at(2) = srcImagePoints_temp.at(index);
182
183         //get point 3;
184         index = 0;
185         for(size_t i = 1 ; i<srcImagePoints_temp.size(); i++ )
186         {
187             if (srcImagePoints_temp.at(i).y < srcImagePoints_temp.at(index).y)
188                 index = i;
189         }
190         srcImagePoints->at(3) = srcImagePoints_temp.at(index);
191
192         Mat Msource = cv::cvarrToMat(source);
193         stringstream ss;
194         for(size_t i = 0 ; i<srcImagePoints_temp.size(); i++ )
195         {
196             ss<<i;
197             circle(Msource,srcImagePoints->at(i),5,CV_RGB(255,0,0));
198             putText( Msource, ss.str(), srcImagePoints->at(i),CV_FONT_HERSHEY_SIMPLEX,1,CV_RGB(255,0,0));
199             ss.str("");
200
201             //new coordinate system in the middle of the frame and reversed (camera coordinate system)
202             srcImagePoints->at(i) = cvPoint2D32f(srcImagePoints_temp.at(i).x-source->width/2,source->height/2-srcImagePoints_temp.at(i).y);
203         }
204     }
205
206 }
207
208 static void createOpenGLMatrixFrom(float *posePOSIT,const CvMatr32f &rotationMatrix, const CvVect32f &translationVector)
209 {
210
211
212     //coordinate system returned is relative to the first 3D input point
213     for (int f=0; f<3; f++)
214     {
215         for (int c=0; c<3; c++)
216         {
217             posePOSIT[c*4+f] = rotationMatrix[f*3+c];   //transposed
218         }
219     }
220     posePOSIT[3] = 0.0;
221     posePOSIT[7] = 0.0;
222     posePOSIT[11] = 0.0;
223     posePOSIT[12] = translationVector[0];
224     posePOSIT[13] = translationVector[1];
225     posePOSIT[14] = translationVector[2];
226     posePOSIT[15] = 1.0;
227 }
228
229 int main(void)
230 {
231     help();
232     VideoCapture video("cube4.avi");
233     CV_Assert(video.isOpened());
234
235     Mat frame; video >> frame;
236
237     IplImage* grayImage = cvCreateImage(frame.size(),8,1);
238
239     namedWindow("original", WINDOW_AUTOSIZE | WINDOW_FREERATIO);
240     namedWindow("POSIT", WINDOW_AUTOSIZE | WINDOW_FREERATIO);
241     displayOverlay("POSIT", "We lost the 4 corners' detection quite often (the red circles disappear). This demo is only to illustrate how to use OpenGL callback.\n -- Press ESC to exit.", 10000);
242     //For debug
243     //cvNamedWindow("tempGray",CV_WINDOW_AUTOSIZE);
244     float OpenGLMatrix[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
245     setOpenGlDrawCallback("POSIT",on_opengl,OpenGLMatrix);
246
247     vector<CvPoint3D32f> modelPoints;
248     initPOSIT(&modelPoints);
249
250     //Create the POSIT object with the model points
251     CvPOSITObject* positObject = cvCreatePOSITObject( &modelPoints[0], (int)modelPoints.size() );
252
253     CvMatr32f rotation_matrix = new float[9];
254     CvVect32f translation_vector = new float[3];
255     CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 100, 1.0e-4f);
256
257     vector<CvPoint2D32f> srcImagePoints(4,cvPoint2D32f(0,0));
258
259
260     while(waitKey(33) != 27)
261     {
262         video >> frame;
263         imshow("original", frame);
264
265         IplImage source = frame;
266         foundCorners(&srcImagePoints, &source, grayImage);
267         cvPOSIT( positObject, &srcImagePoints[0], FOCAL_LENGTH, criteria, rotation_matrix, translation_vector );
268         createOpenGLMatrixFrom(OpenGLMatrix,rotation_matrix,translation_vector);
269
270         imshow("POSIT", frame);
271         //For debug
272         //cvShowImage("tempGray",grayImage);
273
274         if (video.get(CAP_PROP_POS_AVI_RATIO) > 0.99)
275             video.set(CAP_PROP_POS_AVI_RATIO, 0);
276     }
277
278     destroyAllWindows();
279     cvReleaseImage(&grayImage);
280     video.release();
281     cvReleasePOSITObject(&positObject);
282
283     return 0;
284 }