Merge pull request #1722 from StevenPuttemans:feature_1631_second
[profile/ivi/opencv.git] / samples / cpp / Qt_sample / main.cpp
1 //Yannick Verdie 2010
2
3 //--- Please read help() below: ---
4
5 #include <iostream>
6 #include <vector>
7 #include <opencv2/highgui/highgui.hpp>
8
9 #if defined WIN32 || defined _WIN32 || defined WINCE
10     #include <windows.h>
11     #undef small
12     #undef min
13     #undef max
14     #undef abs
15 #endif
16
17 #ifdef __APPLE__
18     #include <OpenGL/gl.h>
19 #else
20     #include <GL/gl.h>
21 #endif
22
23 #include <opencv2/core/core.hpp>
24
25 using namespace std;
26 using namespace cv;
27
28 static void help()
29 {
30     cout << "\nThis demo demonstrates the use of the Qt enhanced version of the highgui GUI interface\n"
31             "  and dang if it doesn't throw in the use of of the POSIT 3D tracking algorithm too\n"
32             "It works off of the video: cube4.avi\n"
33             "Using OpenCV version %s\n" << CV_VERSION << "\n\n"
34 " 1). This demo is mainly based on work from Javier Barandiaran Martirena\n"
35 "     See this page http://code.opencv.org/projects/opencv/wiki/Posit.\n"
36 " 2). This is a demo to illustrate how to use **OpenGL Callback**.\n"
37 " 3). You need Qt binding to compile this sample with OpenGL support enabled.\n"
38 " 4). The features' detection is very basic and could highly be improved \n"
39 "     (basic thresholding tuned for the specific video) but 2).\n"
40 " 5) THANKS TO Google Summer of Code 2010 for supporting this work!\n" << endl;
41 }
42
43 #define FOCAL_LENGTH 600
44 #define CUBE_SIZE 10
45
46 static void renderCube(float size)
47 {
48     glBegin(GL_QUADS);
49     // Front Face
50     glNormal3f( 0.0f, 0.0f, 1.0f);
51     glVertex3f( 0.0f,  0.0f,  0.0f);
52     glVertex3f( size,  0.0f,  0.0f);
53     glVertex3f( size,  size,  0.0f);
54     glVertex3f( 0.0f,  size,  0.0f);
55     // Back Face
56     glNormal3f( 0.0f, 0.0f,-1.0f);
57     glVertex3f( 0.0f,  0.0f, size);
58     glVertex3f( 0.0f,  size, size);
59     glVertex3f( size,  size, size);
60     glVertex3f( size,  0.0f, size);
61     // Top Face
62     glNormal3f( 0.0f, 1.0f, 0.0f);
63     glVertex3f( 0.0f,  size,  0.0f);
64     glVertex3f( size,  size,  0.0f);
65     glVertex3f( size,  size, size);
66     glVertex3f( 0.0f,  size, size);
67     // Bottom Face
68     glNormal3f( 0.0f,-1.0f, 0.0f);
69     glVertex3f( 0.0f,  0.0f,  0.0f);
70     glVertex3f( 0.0f,  0.0f, size);
71     glVertex3f( size,  0.0f, size);
72     glVertex3f( size,  0.0f,  0.0f);
73     // Right face
74     glNormal3f( 1.0f, 0.0f, 0.0f);
75     glVertex3f( size,  0.0f, 0.0f);
76     glVertex3f( size,  0.0f, size);
77     glVertex3f( size,  size, size);
78     glVertex3f( size,  size, 0.0f);
79     // Left Face
80     glNormal3f(-1.0f, 0.0f, 0.0f);
81     glVertex3f( 0.0f,  0.0f, 0.0f);
82     glVertex3f( 0.0f,  size, 0.0f);
83     glVertex3f( 0.0f,  size, size);
84     glVertex3f( 0.0f,  0.0f, size);
85     glEnd();
86 }
87
88
89 static void on_opengl(void* param)
90 {
91     //Draw the object with the estimated pose
92     glLoadIdentity();
93     glScalef( 1.0f, 1.0f, -1.0f);
94     glMultMatrixf( (float*)param );
95     glEnable( GL_LIGHTING );
96     glEnable( GL_LIGHT0 );
97     glEnable( GL_BLEND );
98     glBlendFunc(GL_SRC_ALPHA, GL_ONE);
99     renderCube( CUBE_SIZE );
100     glDisable(GL_BLEND);
101     glDisable( GL_LIGHTING );
102 }
103
104 static void initPOSIT(std::vector<CvPoint3D32f> *modelPoints)
105 {
106     //Create the model pointss
107     modelPoints->push_back(cvPoint3D32f(0.0f, 0.0f, 0.0f)); //The first must be (0,0,0)
108     modelPoints->push_back(cvPoint3D32f(0.0f, 0.0f, CUBE_SIZE));
109     modelPoints->push_back(cvPoint3D32f(CUBE_SIZE, 0.0f, 0.0f));
110     modelPoints->push_back(cvPoint3D32f(0.0f, CUBE_SIZE, 0.0f));
111 }
112
113 static void foundCorners(vector<CvPoint2D32f> *srcImagePoints, const Mat& source, Mat& grayImage)
114 {
115     cvtColor(source, grayImage, COLOR_RGB2GRAY);
116     GaussianBlur(grayImage, grayImage, Size(11,11), 0, 0);
117     normalize(grayImage, grayImage, 0, 255, NORM_MINMAX);
118     threshold(grayImage, grayImage, 26, 255, THRESH_BINARY_INV); //25
119
120     Mat MgrayImage = grayImage;
121     //For debug
122     //MgrayImage = MgrayImage.clone();//deep copy
123     vector<vector<Point> > contours;
124     vector<Vec4i> hierarchy;
125     findContours(MgrayImage, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
126
127     Point p;
128     vector<CvPoint2D32f> srcImagePoints_temp(4,cvPoint2D32f(0,0));
129
130     if (contours.size() == srcImagePoints_temp.size())
131     {
132
133         for(size_t i = 0 ; i<contours.size(); i++ )
134         {
135
136             p.x = p.y = 0;
137
138             for(size_t j = 0 ; j<contours[i].size(); j++ )
139                 p+=contours[i][j];
140
141             srcImagePoints_temp.at(i)=cvPoint2D32f(float(p.x)/contours[i].size(),float(p.y)/contours[i].size());
142         }
143
144         //Need to keep the same order
145         //> y = 0
146         //> x = 1
147         //< x = 2
148         //< y = 3
149
150         //get point 0;
151         size_t index = 0;
152         for(size_t i = 1 ; i<srcImagePoints_temp.size(); i++ )
153         {
154             if (srcImagePoints_temp.at(i).y > srcImagePoints_temp.at(index).y)
155                 index = i;
156         }
157         srcImagePoints->at(0) = srcImagePoints_temp.at(index);
158
159         //get point 1;
160         index = 0;
161         for(size_t i = 1 ; i<srcImagePoints_temp.size(); i++ )
162         {
163             if (srcImagePoints_temp.at(i).x > srcImagePoints_temp.at(index).x)
164                 index = i;
165         }
166         srcImagePoints->at(1) = srcImagePoints_temp.at(index);
167
168         //get point 2;
169         index = 0;
170         for(size_t i = 1 ; i<srcImagePoints_temp.size(); i++ )
171         {
172             if (srcImagePoints_temp.at(i).x < srcImagePoints_temp.at(index).x)
173                 index = i;
174         }
175         srcImagePoints->at(2) = srcImagePoints_temp.at(index);
176
177         //get point 3;
178         index = 0;
179         for(size_t i = 1 ; i<srcImagePoints_temp.size(); i++ )
180         {
181             if (srcImagePoints_temp.at(i).y < srcImagePoints_temp.at(index).y)
182                 index = i;
183         }
184         srcImagePoints->at(3) = srcImagePoints_temp.at(index);
185
186         Mat Msource = source;
187         stringstream ss;
188         for(size_t i = 0 ; i<srcImagePoints_temp.size(); i++ )
189         {
190             ss<<i;
191             circle(Msource,srcImagePoints->at(i),5,Scalar(0,0,255));
192             putText(Msource,ss.str(),srcImagePoints->at(i),FONT_HERSHEY_SIMPLEX,1,Scalar(0,0,255));
193             ss.str("");
194
195             //new coordinate system in the middle of the frame and reversed (camera coordinate system)
196             srcImagePoints->at(i) = cvPoint2D32f(srcImagePoints_temp.at(i).x-source->width/2,source->height/2-srcImagePoints_temp.at(i).y);
197         }
198     }
199
200 }
201
202 static void createOpenGLMatrixFrom(float *posePOSIT,const CvMatr32f &rotationMatrix, const CvVect32f &translationVector)
203 {
204
205
206     //coordinate system returned is relative to the first 3D input point
207     for (int f=0; f<3; f++)
208     {
209         for (int c=0; c<3; c++)
210         {
211             posePOSIT[c*4+f] = rotationMatrix[f*3+c];   //transposed
212         }
213     }
214     posePOSIT[3] = 0.0;
215     posePOSIT[7] = 0.0;
216     posePOSIT[11] = 0.0;
217     posePOSIT[12] = translationVector[0];
218     posePOSIT[13] = translationVector[1];
219     posePOSIT[14] = translationVector[2];
220     posePOSIT[15] = 1.0;
221 }
222
223 int main(void)
224 {
225     help();
226     VideoCapture video("cube4.avi");
227     CV_Assert(video.isOpened());
228
229     Mat source, grayImage;
230
231     video >> source;
232
233     namedWindow("original", WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);
234     namedWindow("POSIT", WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);
235     displayOverlay("POSIT", "We lost the 4 corners' detection quite often (the red circles disappear). This demo is only to illustrate how to use OpenGL callback.\n -- Press ESC to exit.", 10000);
236
237     float OpenGLMatrix[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
238     setOpenGlDrawCallback("POSIT",on_opengl,OpenGLMatrix);
239
240     vector<CvPoint3D32f> modelPoints;
241     initPOSIT(&modelPoints);
242
243     //Create the POSIT object with the model points
244     CvPOSITObject* positObject = cvCreatePOSITObject( &modelPoints[0], (int)modelPoints.size() );
245
246     CvMatr32f rotation_matrix = new float[9];
247     CvVect32f translation_vector = new float[3];
248     CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 100, 1.0e-4f);
249
250     vector<CvPoint2D32f> srcImagePoints(4,cvPoint2D32f(0,0));
251
252
253     while(waitKey(33) != 27)
254     {
255         video >> source;
256         imshow("original",source);
257
258         foundCorners(&srcImagePoints,source,grayImage);
259         cvPOSIT( positObject, &srcImagePoints[0], FOCAL_LENGTH, criteria, rotation_matrix, translation_vector );
260         createOpenGLMatrixFrom(OpenGLMatrix,rotation_matrix,translation_vector);
261
262         imshow("POSIT",source);
263
264         if (VideoCapture::get(video,CV_CAP_PROP_POS_AVI_RATIO)>0.99)
265             VideoCapture::get(video,CV_CAP_PROP_POS_AVI_RATIO,0);
266     }
267
268     destroyAllWindows();
269     cvReleasePOSITObject(&positObject);
270
271     return 0;
272 }