Merge remote-tracking branch 'origin/2.4' into merge-2.4
[profile/ivi/opencv.git] / samples / cpp / Qt_sample / main.cpp
1 //Yannick Verdie 2010
2
3 //--- Please read help() below: ---
4
5 #include <iostream>
6 #include <vector>
7 #include <opencv2/core/core_c.h>
8 #include <opencv2/calib3d/calib3d_c.h>
9 #include <opencv2/imgproc.hpp>
10 #include <opencv2/highgui.hpp>
11 #include <opencv2/legacy/compat.hpp>
12
13 #if defined WIN32 || defined _WIN32 || defined WINCE
14     #include <windows.h>
15     #undef small
16     #undef min
17     #undef max
18     #undef abs
19 #endif
20
21 #ifdef __APPLE__
22     #include <OpenGL/gl.h>
23 #else
24     #include <GL/gl.h>
25 #endif
26
27 using namespace std;
28 using namespace cv;
29
30 static void help()
31 {
32     cout << "\nThis demo demonstrates the use of the Qt enhanced version of the highgui GUI interface\n"
33             "  and dang if it doesn't throw in the use of of the POSIT 3D tracking algorithm too\n"
34             "It works off of the video: cube4.avi\n"
35             "Using OpenCV version %s\n" << CV_VERSION << "\n\n"
36 " 1). This demo is mainly based on work from Javier Barandiaran Martirena\n"
37 "     See this page http://code.opencv.org/projects/opencv/wiki/Posit.\n"
38 " 2). This is a demo to illustrate how to use **OpenGL Callback**.\n"
39 " 3). You need Qt binding to compile this sample with OpenGL support enabled.\n"
40 " 4). The features' detection is very basic and could highly be improved \n"
41 "     (basic thresholding tuned for the specific video) but 2).\n"
42 " 5) THANKS TO Google Summer of Code 2010 for supporting this work!\n" << endl;
43 }
44
45 #define FOCAL_LENGTH 600
46 #define CUBE_SIZE 10
47
48 static void renderCube(float size)
49 {
50     glBegin(GL_QUADS);
51     // Front Face
52     glNormal3f( 0.0f, 0.0f, 1.0f);
53     glVertex3f( 0.0f,  0.0f,  0.0f);
54     glVertex3f( size,  0.0f,  0.0f);
55     glVertex3f( size,  size,  0.0f);
56     glVertex3f( 0.0f,  size,  0.0f);
57     // Back Face
58     glNormal3f( 0.0f, 0.0f,-1.0f);
59     glVertex3f( 0.0f,  0.0f, size);
60     glVertex3f( 0.0f,  size, size);
61     glVertex3f( size,  size, size);
62     glVertex3f( size,  0.0f, size);
63     // Top Face
64     glNormal3f( 0.0f, 1.0f, 0.0f);
65     glVertex3f( 0.0f,  size,  0.0f);
66     glVertex3f( size,  size,  0.0f);
67     glVertex3f( size,  size, size);
68     glVertex3f( 0.0f,  size, size);
69     // Bottom Face
70     glNormal3f( 0.0f,-1.0f, 0.0f);
71     glVertex3f( 0.0f,  0.0f,  0.0f);
72     glVertex3f( 0.0f,  0.0f, size);
73     glVertex3f( size,  0.0f, size);
74     glVertex3f( size,  0.0f,  0.0f);
75     // Right face
76     glNormal3f( 1.0f, 0.0f, 0.0f);
77     glVertex3f( size,  0.0f, 0.0f);
78     glVertex3f( size,  0.0f, size);
79     glVertex3f( size,  size, size);
80     glVertex3f( size,  size, 0.0f);
81     // Left Face
82     glNormal3f(-1.0f, 0.0f, 0.0f);
83     glVertex3f( 0.0f,  0.0f, 0.0f);
84     glVertex3f( 0.0f,  size, 0.0f);
85     glVertex3f( 0.0f,  size, size);
86     glVertex3f( 0.0f,  0.0f, size);
87     glEnd();
88 }
89
90
91 static void on_opengl(void* param)
92 {
93     //Draw the object with the estimated pose
94     glLoadIdentity();
95     glScalef( 1.0f, 1.0f, -1.0f);
96     glMultMatrixf( (float*)param );
97     glEnable( GL_LIGHTING );
98     glEnable( GL_LIGHT0 );
99     glEnable( GL_BLEND );
100     glBlendFunc(GL_SRC_ALPHA, GL_ONE);
101     renderCube( CUBE_SIZE );
102     glDisable(GL_BLEND);
103     glDisable( GL_LIGHTING );
104 }
105
106 static void initPOSIT(std::vector<CvPoint3D32f> *modelPoints)
107 {
108     //Create the model pointss
109     modelPoints->push_back(cvPoint3D32f(0.0f, 0.0f, 0.0f)); //The first must be (0,0,0)
110     modelPoints->push_back(cvPoint3D32f(0.0f, 0.0f, CUBE_SIZE));
111     modelPoints->push_back(cvPoint3D32f(CUBE_SIZE, 0.0f, 0.0f));
112     modelPoints->push_back(cvPoint3D32f(0.0f, CUBE_SIZE, 0.0f));
113 }
114
115 static void foundCorners(vector<CvPoint2D32f> *srcImagePoints, const Mat& source, Mat& grayImage)
116 {
117     cvtColor(source, grayImage, COLOR_RGB2GRAY);
118     GaussianBlur(grayImage, grayImage, Size(11,11), 0, 0);
119     normalize(grayImage, grayImage, 0, 255, NORM_MINMAX);
120     threshold(grayImage, grayImage, 26, 255, THRESH_BINARY_INV); //25
121
122     vector<vector<Point> > contours;
123     vector<Vec4i> hierarchy;
124     findContours(grayImage, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_NONE);
125
126     Point p;
127     vector<CvPoint2D32f> srcImagePoints_temp(4,cvPoint2D32f(0,0));
128
129     if (contours.size() == srcImagePoints_temp.size())
130     {
131
132         for(size_t i = 0 ; i<contours.size(); i++ )
133         {
134
135             p.x = p.y = 0;
136
137             for(size_t j = 0 ; j<contours[i].size(); j++ )
138                 p+=contours[i][j];
139
140             srcImagePoints_temp.at(i)=cvPoint2D32f(float(p.x)/contours[i].size(),float(p.y)/contours[i].size());
141         }
142
143         //Need to keep the same order
144         //> y = 0
145         //> x = 1
146         //< x = 2
147         //< y = 3
148
149         //get point 0;
150         size_t index = 0;
151         for(size_t i = 1 ; i<srcImagePoints_temp.size(); i++ )
152         {
153             if (srcImagePoints_temp.at(i).y > srcImagePoints_temp.at(index).y)
154                 index = i;
155         }
156         srcImagePoints->at(0) = srcImagePoints_temp.at(index);
157
158         //get point 1;
159         index = 0;
160         for(size_t i = 1 ; i<srcImagePoints_temp.size(); i++ )
161         {
162             if (srcImagePoints_temp.at(i).x > srcImagePoints_temp.at(index).x)
163                 index = i;
164         }
165         srcImagePoints->at(1) = srcImagePoints_temp.at(index);
166
167         //get point 2;
168         index = 0;
169         for(size_t i = 1 ; i<srcImagePoints_temp.size(); i++ )
170         {
171             if (srcImagePoints_temp.at(i).x < srcImagePoints_temp.at(index).x)
172                 index = i;
173         }
174         srcImagePoints->at(2) = srcImagePoints_temp.at(index);
175
176         //get point 3;
177         index = 0;
178         for(size_t i = 1 ; i<srcImagePoints_temp.size(); i++ )
179         {
180             if (srcImagePoints_temp.at(i).y < srcImagePoints_temp.at(index).y)
181                 index = i;
182         }
183         srcImagePoints->at(3) = srcImagePoints_temp.at(index);
184
185         Mat Msource = source;
186         stringstream ss;
187         for(size_t i = 0 ; i<srcImagePoints_temp.size(); i++ )
188         {
189             ss<<i;
190             circle(Msource,srcImagePoints->at(i),5,Scalar(0,0,255));
191             putText(Msource,ss.str(),srcImagePoints->at(i),FONT_HERSHEY_SIMPLEX,1,Scalar(0,0,255));
192             ss.str("");
193
194             //new coordinate system in the middle of the frame and reversed (camera coordinate system)
195             srcImagePoints->at(i) = cvPoint2D32f(srcImagePoints_temp.at(i).x-source.cols/2,source.rows/2-srcImagePoints_temp.at(i).y);
196         }
197     }
198
199 }
200
201 static void createOpenGLMatrixFrom(float *posePOSIT,const CvMatr32f &rotationMatrix, const CvVect32f &translationVector)
202 {
203
204
205     //coordinate system returned is relative to the first 3D input point
206     for (int f=0; f<3; f++)
207     {
208         for (int c=0; c<3; c++)
209         {
210             posePOSIT[c*4+f] = rotationMatrix[f*3+c];   //transposed
211         }
212     }
213     posePOSIT[3] = 0.0;
214     posePOSIT[7] = 0.0;
215     posePOSIT[11] = 0.0;
216     posePOSIT[12] = translationVector[0];
217     posePOSIT[13] = translationVector[1];
218     posePOSIT[14] = translationVector[2];
219     posePOSIT[15] = 1.0;
220 }
221
222 int main(void)
223 {
224     help();
225     VideoCapture video("cube4.avi");
226     CV_Assert(video.isOpened());
227
228     Mat source, grayImage;
229
230     video >> source;
231
232     namedWindow("original", WINDOW_AUTOSIZE | WINDOW_FREERATIO);
233     namedWindow("POSIT", WINDOW_AUTOSIZE | WINDOW_FREERATIO);
234     displayOverlay("POSIT", "We lost the 4 corners' detection quite often (the red circles disappear). This demo is only to illustrate how to use OpenGL callback.\n -- Press ESC to exit.", 10000);
235
236     float OpenGLMatrix[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
237     setOpenGlDrawCallback("POSIT",on_opengl,OpenGLMatrix);
238
239     vector<CvPoint3D32f> modelPoints;
240     initPOSIT(&modelPoints);
241
242     //Create the POSIT object with the model points
243     CvPOSITObject* positObject = cvCreatePOSITObject( &modelPoints[0], (int)modelPoints.size() );
244
245     CvMatr32f rotation_matrix = new float[9];
246     CvVect32f translation_vector = new float[3];
247     CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 100, 1.0e-4f);
248
249     vector<CvPoint2D32f> srcImagePoints(4,cvPoint2D32f(0,0));
250
251
252     while(waitKey(33) != 27)
253     {
254         video >> source;
255         imshow("original",source);
256
257         foundCorners(&srcImagePoints, source, grayImage);
258         cvPOSIT( positObject, &srcImagePoints[0], FOCAL_LENGTH, criteria, rotation_matrix, translation_vector );
259         createOpenGLMatrixFrom(OpenGLMatrix,rotation_matrix,translation_vector);
260
261         imshow("POSIT",source);
262
263         if (video.get(CAP_PROP_POS_AVI_RATIO) > 0.99)
264             video.set(CAP_PROP_POS_AVI_RATIO, 0);
265     }
266
267     destroyAllWindows();
268     cvReleasePOSITObject(&positObject);
269
270     return 0;
271 }