Warning fixes continued
[platform/upstream/opencv.git] / modules / legacy / include / opencv2 / legacy / legacy.hpp
1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 //  By downloading, copying, installing or using the software you agree to this license.
6 //  If you do not agree to this license, do not download, install,
7 //  copy or use the software.
8 //
9 //
10 //                        Intel License Agreement
11 //                For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000, Intel Corporation, all rights reserved.
14 // Third party copyrights are property of their respective owners.
15 //
16 // Redistribution and use in source and binary forms, with or without modification,
17 // are permitted provided that the following conditions are met:
18 //
19 //   * Redistribution's of source code must retain the above copyright notice,
20 //     this list of conditions and the following disclaimer.
21 //
22 //   * Redistribution's in binary form must reproduce the above copyright notice,
23 //     this list of conditions and the following disclaimer in the documentation
24 //     and/or other materials provided with the distribution.
25 //
26 //   * The name of Intel Corporation may not be used to endorse or promote products
27 //     derived from this software without specific prior written permission.
28 //
29 // This software is provided by the copyright holders and contributors "as is" and
30 // any express or implied warranties, including, but not limited to, the implied
31 // warranties of merchantability and fitness for a particular purpose are disclaimed.
32 // In no event shall the Intel Corporation or contributors be liable for any direct,
33 // indirect, incidental, special, exemplary, or consequential damages
34 // (including, but not limited to, procurement of substitute goods or services;
35 // loss of use, data, or profits; or business interruption) however caused
36 // and on any theory of liability, whether in contract, strict liability,
37 // or tort (including negligence or otherwise) arising in any way out of
38 // the use of this software, even if advised of the possibility of such damage.
39 //
40 //M*/
41
42 #ifndef __OPENCV_LEGACY_HPP__
43 #define __OPENCV_LEGACY_HPP__
44
45 #include "opencv2/imgproc/imgproc.hpp"
46 #include "opencv2/imgproc/imgproc_c.h"
47 #include "opencv2/features2d/features2d.hpp"
48 #include "opencv2/calib3d/calib3d.hpp"
49 #include "opencv2/ml/ml.hpp"
50
51 #ifdef __cplusplus
52 extern "C" {
53 #endif
54
55 CVAPI(CvSeq*) cvSegmentImage( const CvArr* srcarr, CvArr* dstarr,
56                                     double canny_threshold,
57                                     double ffill_threshold,
58                                     CvMemStorage* storage );
59
60 /****************************************************************************************\
61 *                                  Eigen objects                                         *
62 \****************************************************************************************/
63
64 typedef int (CV_CDECL * CvCallback)(int index, void* buffer, void* user_data);
65 typedef union
66 {
67     CvCallback callback;
68     void* data;
69 }
70 CvInput;
71
72 #define CV_EIGOBJ_NO_CALLBACK     0
73 #define CV_EIGOBJ_INPUT_CALLBACK  1
74 #define CV_EIGOBJ_OUTPUT_CALLBACK 2
75 #define CV_EIGOBJ_BOTH_CALLBACK   3
76
77 /* Calculates covariation matrix of a set of arrays */
78 CVAPI(void)  cvCalcCovarMatrixEx( int nObjects, void* input, int ioFlags,
79                                   int ioBufSize, uchar* buffer, void* userData,
80                                   IplImage* avg, float* covarMatrix );
81
82 /* Calculates eigen values and vectors of covariation matrix of a set of
83    arrays */
84 CVAPI(void)  cvCalcEigenObjects( int nObjects, void* input, void* output,
85                                  int ioFlags, int ioBufSize, void* userData,
86                                  CvTermCriteria* calcLimit, IplImage* avg,
87                                  float* eigVals );
88
89 /* Calculates dot product (obj - avg) * eigObj (i.e. projects image to eigen vector) */
90 CVAPI(double)  cvCalcDecompCoeff( IplImage* obj, IplImage* eigObj, IplImage* avg );
91
92 /* Projects image to eigen space (finds all decomposion coefficients */
93 CVAPI(void)  cvEigenDecomposite( IplImage* obj, int nEigObjs, void* eigInput,
94                                  int ioFlags, void* userData, IplImage* avg,
95                                  float* coeffs );
96
97 /* Projects original objects used to calculate eigen space basis to that space */
98 CVAPI(void)  cvEigenProjection( void* eigInput, int nEigObjs, int ioFlags,
99                                 void* userData, float* coeffs, IplImage* avg,
100                                 IplImage* proj );
101
102 /****************************************************************************************\
103 *                                       1D/2D HMM                                        *
104 \****************************************************************************************/
105
106 typedef struct CvImgObsInfo
107 {
108     int obs_x;
109     int obs_y;
110     int obs_size;
111     float* obs;//consequtive observations
112
113     int* state;/* arr of pairs superstate/state to which observation belong */
114     int* mix;  /* number of mixture to which observation belong */
115
116 } CvImgObsInfo;/*struct for 1 image*/
117
118 typedef CvImgObsInfo Cv1DObsInfo;
119
120 typedef struct CvEHMMState
121 {
122     int num_mix;        /*number of mixtures in this state*/
123     float* mu;          /*mean vectors corresponding to each mixture*/
124     float* inv_var;     /* square root of inversed variances corresp. to each mixture*/
125     float* log_var_val; /* sum of 0.5 (LN2PI + ln(variance[i]) ) for i=1,n */
126     float* weight;      /*array of mixture weights. Summ of all weights in state is 1. */
127
128 } CvEHMMState;
129
130 typedef struct CvEHMM
131 {
132     int level; /* 0 - lowest(i.e its states are real states), ..... */
133     int num_states; /* number of HMM states */
134     float*  transP;/*transition probab. matrices for states */
135     float** obsProb; /* if level == 0 - array of brob matrices corresponding to hmm
136                         if level == 1 - martix of matrices */
137     union
138     {
139         CvEHMMState* state; /* if level == 0 points to real states array,
140                                if not - points to embedded hmms */
141         struct CvEHMM* ehmm; /* pointer to an embedded model or NULL, if it is a leaf */
142     } u;
143
144 } CvEHMM;
145
146 /*CVAPI(int)  icvCreate1DHMM( CvEHMM** this_hmm,
147                                    int state_number, int* num_mix, int obs_size );
148
149 CVAPI(int)  icvRelease1DHMM( CvEHMM** phmm );
150
151 CVAPI(int)  icvUniform1DSegm( Cv1DObsInfo* obs_info, CvEHMM* hmm );
152
153 CVAPI(int)  icvInit1DMixSegm( Cv1DObsInfo** obs_info_array, int num_img, CvEHMM* hmm);
154
155 CVAPI(int)  icvEstimate1DHMMStateParams( CvImgObsInfo** obs_info_array, int num_img, CvEHMM* hmm);
156
157 CVAPI(int)  icvEstimate1DObsProb( CvImgObsInfo* obs_info, CvEHMM* hmm );
158
159 CVAPI(int)  icvEstimate1DTransProb( Cv1DObsInfo** obs_info_array,
160                                            int num_seq,
161                                            CvEHMM* hmm );
162
163 CVAPI(float)  icvViterbi( Cv1DObsInfo* obs_info, CvEHMM* hmm);
164
165 CVAPI(int)  icv1DMixSegmL2( CvImgObsInfo** obs_info_array, int num_img, CvEHMM* hmm );*/
166
167 /*********************************** Embedded HMMs *************************************/
168
169 /* Creates 2D HMM */
170 CVAPI(CvEHMM*)  cvCreate2DHMM( int* stateNumber, int* numMix, int obsSize );
171
172 /* Releases HMM */
173 CVAPI(void)  cvRelease2DHMM( CvEHMM** hmm );
174
175 #define CV_COUNT_OBS(roi, win, delta, numObs )                                       \
176 {                                                                                    \
177    (numObs)->width  =((roi)->width  -(win)->width  +(delta)->width)/(delta)->width;  \
178    (numObs)->height =((roi)->height -(win)->height +(delta)->height)/(delta)->height;\
179 }
180
181 /* Creates storage for observation vectors */
182 CVAPI(CvImgObsInfo*)  cvCreateObsInfo( CvSize numObs, int obsSize );
183
184 /* Releases storage for observation vectors */
185 CVAPI(void)  cvReleaseObsInfo( CvImgObsInfo** obs_info );
186
187
188 /* The function takes an image on input and and returns the sequnce of observations
189    to be used with an embedded HMM; Each observation is top-left block of DCT
190    coefficient matrix */
191 CVAPI(void)  cvImgToObs_DCT( const CvArr* arr, float* obs, CvSize dctSize,
192                              CvSize obsSize, CvSize delta );
193
194
195 /* Uniformly segments all observation vectors extracted from image */
196 CVAPI(void)  cvUniformImgSegm( CvImgObsInfo* obs_info, CvEHMM* ehmm );
197
198 /* Does mixture segmentation of the states of embedded HMM */
199 CVAPI(void)  cvInitMixSegm( CvImgObsInfo** obs_info_array,
200                             int num_img, CvEHMM* hmm );
201
202 /* Function calculates means, variances, weights of every Gaussian mixture
203    of every low-level state of embedded HMM */
204 CVAPI(void)  cvEstimateHMMStateParams( CvImgObsInfo** obs_info_array,
205                                        int num_img, CvEHMM* hmm );
206
207 /* Function computes transition probability matrices of embedded HMM
208    given observations segmentation */
209 CVAPI(void)  cvEstimateTransProb( CvImgObsInfo** obs_info_array,
210                                   int num_img, CvEHMM* hmm );
211
212 /* Function computes probabilities of appearing observations at any state
213    (i.e. computes P(obs|state) for every pair(obs,state)) */
214 CVAPI(void)  cvEstimateObsProb( CvImgObsInfo* obs_info,
215                                 CvEHMM* hmm );
216
217 /* Runs Viterbi algorithm for embedded HMM */
218 CVAPI(float)  cvEViterbi( CvImgObsInfo* obs_info, CvEHMM* hmm );
219
220
221 /* Function clusters observation vectors from several images
222    given observations segmentation.
223    Euclidean distance used for clustering vectors.
224    Centers of clusters are given means of every mixture */
225 CVAPI(void)  cvMixSegmL2( CvImgObsInfo** obs_info_array,
226                           int num_img, CvEHMM* hmm );
227
228 /****************************************************************************************\
229 *               A few functions from old stereo gesture recognition demosions            *
230 \****************************************************************************************/
231
232 /* Creates hand mask image given several points on the hand */
233 CVAPI(void)  cvCreateHandMask( CvSeq* hand_points,
234                                    IplImage *img_mask, CvRect *roi);
235
236 /* Finds hand region in range image data */
237 CVAPI(void)  cvFindHandRegion (CvPoint3D32f* points, int count,
238                                 CvSeq* indexs,
239                                 float* line, CvSize2D32f size, int flag,
240                                 CvPoint3D32f* center,
241                                 CvMemStorage* storage, CvSeq **numbers);
242
243 /* Finds hand region in range image data (advanced version) */
244 CVAPI(void)  cvFindHandRegionA( CvPoint3D32f* points, int count,
245                                 CvSeq* indexs,
246                                 float* line, CvSize2D32f size, int jc,
247                                 CvPoint3D32f* center,
248                                 CvMemStorage* storage, CvSeq **numbers);
249
250 /* Calculates the cooficients of the homography matrix */
251 CVAPI(void)  cvCalcImageHomography( float* line, CvPoint3D32f* center,
252                                     float* intrinsic, float* homography );
253
254 /****************************************************************************************\
255 *                           More operations on sequences                                 *
256 \****************************************************************************************/
257
258 /*****************************************************************************************/
259
260 #define CV_CURRENT_INT( reader ) (*((int *)(reader).ptr))
261 #define CV_PREV_INT( reader ) (*((int *)(reader).prev_elem))
262
263 #define  CV_GRAPH_WEIGHTED_VERTEX_FIELDS() CV_GRAPH_VERTEX_FIELDS()\
264     float weight;
265
266 #define  CV_GRAPH_WEIGHTED_EDGE_FIELDS() CV_GRAPH_EDGE_FIELDS()
267
268 typedef struct CvGraphWeightedVtx
269 {
270     CV_GRAPH_WEIGHTED_VERTEX_FIELDS()
271 } CvGraphWeightedVtx;
272
273 typedef struct CvGraphWeightedEdge
274 {
275     CV_GRAPH_WEIGHTED_EDGE_FIELDS()
276 } CvGraphWeightedEdge;
277
278 typedef enum CvGraphWeightType
279 {
280     CV_NOT_WEIGHTED,
281     CV_WEIGHTED_VTX,
282     CV_WEIGHTED_EDGE,
283     CV_WEIGHTED_ALL
284 } CvGraphWeightType;
285
286
287 /* Calculates histogram of a contour */
288 CVAPI(void)  cvCalcPGH( const CvSeq* contour, CvHistogram* hist );
289
290 #define CV_DOMINANT_IPAN 1
291
292 /* Finds high-curvature points of the contour */
293 CVAPI(CvSeq*) cvFindDominantPoints( CvSeq* contour, CvMemStorage* storage,
294                                    int method CV_DEFAULT(CV_DOMINANT_IPAN),
295                                    double parameter1 CV_DEFAULT(0),
296                                    double parameter2 CV_DEFAULT(0),
297                                    double parameter3 CV_DEFAULT(0),
298                                    double parameter4 CV_DEFAULT(0));
299
300 /*****************************************************************************************/
301
302
303 /*******************************Stereo correspondence*************************************/
304
305 typedef struct CvCliqueFinder
306 {
307     CvGraph* graph;
308     int**    adj_matr;
309     int N; //graph size
310
311     // stacks, counters etc/
312     int k; //stack size
313     int* current_comp;
314     int** All;
315
316     int* ne;
317     int* ce;
318     int* fixp; //node with minimal disconnections
319     int* nod;
320     int* s; //for selected candidate
321     int status;
322     int best_score;
323     int weighted;
324     int weighted_edges;
325     float best_weight;
326     float* edge_weights;
327     float* vertex_weights;
328     float* cur_weight;
329     float* cand_weight;
330
331 } CvCliqueFinder;
332
333 #define CLIQUE_TIME_OFF 2
334 #define CLIQUE_FOUND 1
335 #define CLIQUE_END   0
336
337 /*CVAPI(void) cvStartFindCliques( CvGraph* graph, CvCliqueFinder* finder, int reverse,
338                                    int weighted CV_DEFAULT(0),  int weighted_edges CV_DEFAULT(0));
339 CVAPI(int) cvFindNextMaximalClique( CvCliqueFinder* finder, int* clock_rest CV_DEFAULT(0) );
340 CVAPI(void) cvEndFindCliques( CvCliqueFinder* finder );
341
342 CVAPI(void) cvBronKerbosch( CvGraph* graph );*/
343
344
345 /*F///////////////////////////////////////////////////////////////////////////////////////
346 //
347 //    Name:    cvSubgraphWeight
348 //    Purpose: finds weight of subgraph in a graph
349 //    Context:
350 //    Parameters:
351 //      graph - input graph.
352 //      subgraph - sequence of pairwise different ints.  These are indices of vertices of subgraph.
353 //      weight_type - describes the way we measure weight.
354 //            one of the following:
355 //            CV_NOT_WEIGHTED - weight of a clique is simply its size
356 //            CV_WEIGHTED_VTX - weight of a clique is the sum of weights of its vertices
357 //            CV_WEIGHTED_EDGE - the same but edges
358 //            CV_WEIGHTED_ALL - the same but both edges and vertices
359 //      weight_vtx - optional vector of floats, with size = graph->total.
360 //            If weight_type is either CV_WEIGHTED_VTX or CV_WEIGHTED_ALL
361 //            weights of vertices must be provided.  If weight_vtx not zero
362 //            these weights considered to be here, otherwise function assumes
363 //            that vertices of graph are inherited from CvGraphWeightedVtx.
364 //      weight_edge - optional matrix of floats, of width and height = graph->total.
365 //            If weight_type is either CV_WEIGHTED_EDGE or CV_WEIGHTED_ALL
366 //            weights of edges ought to be supplied.  If weight_edge is not zero
367 //            function finds them here, otherwise function expects
368 //            edges of graph to be inherited from CvGraphWeightedEdge.
369 //            If this parameter is not zero structure of the graph is determined from matrix
370 //            rather than from CvGraphEdge's.  In particular, elements corresponding to
371 //            absent edges should be zero.
372 //    Returns:
373 //      weight of subgraph.
374 //    Notes:
375 //F*/
376 /*CVAPI(float) cvSubgraphWeight( CvGraph *graph, CvSeq *subgraph,
377                                   CvGraphWeightType weight_type CV_DEFAULT(CV_NOT_WEIGHTED),
378                                   CvVect32f weight_vtx CV_DEFAULT(0),
379                                   CvMatr32f weight_edge CV_DEFAULT(0) );*/
380
381
382 /*F///////////////////////////////////////////////////////////////////////////////////////
383 //
384 //    Name:    cvFindCliqueEx
385 //    Purpose: tries to find clique with maximum possible weight in a graph
386 //    Context:
387 //    Parameters:
388 //      graph - input graph.
389 //      storage - memory storage to be used by the result.
390 //      is_complementary - optional flag showing whether function should seek for clique
391 //            in complementary graph.
392 //      weight_type - describes our notion about weight.
393 //            one of the following:
394 //            CV_NOT_WEIGHTED - weight of a clique is simply its size
395 //            CV_WEIGHTED_VTX - weight of a clique is the sum of weights of its vertices
396 //            CV_WEIGHTED_EDGE - the same but edges
397 //            CV_WEIGHTED_ALL - the same but both edges and vertices
398 //      weight_vtx - optional vector of floats, with size = graph->total.
399 //            If weight_type is either CV_WEIGHTED_VTX or CV_WEIGHTED_ALL
400 //            weights of vertices must be provided.  If weight_vtx not zero
401 //            these weights considered to be here, otherwise function assumes
402 //            that vertices of graph are inherited from CvGraphWeightedVtx.
403 //      weight_edge - optional matrix of floats, of width and height = graph->total.
404 //            If weight_type is either CV_WEIGHTED_EDGE or CV_WEIGHTED_ALL
405 //            weights of edges ought to be supplied.  If weight_edge is not zero
406 //            function finds them here, otherwise function expects
407 //            edges of graph to be inherited from CvGraphWeightedEdge.
408 //            Note that in case of CV_WEIGHTED_EDGE or CV_WEIGHTED_ALL
409 //            nonzero is_complementary implies nonzero weight_edge.
410 //      start_clique - optional sequence of pairwise different ints.  They are indices of
411 //            vertices that shall be present in the output clique.
412 //      subgraph_of_ban - optional sequence of (maybe equal) ints.  They are indices of
413 //            vertices that shall not be present in the output clique.
414 //      clique_weight_ptr - optional output parameter.  Weight of found clique stored here.
415 //      num_generations - optional number of generations in evolutionary part of algorithm,
416 //            zero forces to return first found clique.
417 //      quality - optional parameter determining degree of required quality/speed tradeoff.
418 //            Must be in the range from 0 to 9.
419 //            0 is fast and dirty, 9 is slow but hopefully yields good clique.
420 //    Returns:
421 //      sequence of pairwise different ints.
422 //      These are indices of vertices that form found clique.
423 //    Notes:
424 //      in cases of CV_WEIGHTED_EDGE and CV_WEIGHTED_ALL weights should be nonnegative.
425 //      start_clique has a priority over subgraph_of_ban.
426 //F*/
427 /*CVAPI(CvSeq*) cvFindCliqueEx( CvGraph *graph, CvMemStorage *storage,
428                                  int is_complementary CV_DEFAULT(0),
429                                  CvGraphWeightType weight_type CV_DEFAULT(CV_NOT_WEIGHTED),
430                                  CvVect32f weight_vtx CV_DEFAULT(0),
431                                  CvMatr32f weight_edge CV_DEFAULT(0),
432                                  CvSeq *start_clique CV_DEFAULT(0),
433                                  CvSeq *subgraph_of_ban CV_DEFAULT(0),
434                                  float *clique_weight_ptr CV_DEFAULT(0),
435                                  int num_generations CV_DEFAULT(3),
436                                  int quality CV_DEFAULT(2) );*/
437
438
439 #define CV_UNDEF_SC_PARAM         12345 //default value of parameters
440
441 #define CV_IDP_BIRCHFIELD_PARAM1  25
442 #define CV_IDP_BIRCHFIELD_PARAM2  5
443 #define CV_IDP_BIRCHFIELD_PARAM3  12
444 #define CV_IDP_BIRCHFIELD_PARAM4  15
445 #define CV_IDP_BIRCHFIELD_PARAM5  25
446
447
448 #define  CV_DISPARITY_BIRCHFIELD  0
449
450
451 /*F///////////////////////////////////////////////////////////////////////////
452 //
453 //    Name:    cvFindStereoCorrespondence
454 //    Purpose: find stereo correspondence on stereo-pair
455 //    Context:
456 //    Parameters:
457 //      leftImage - left image of stereo-pair (format 8uC1).
458 //      rightImage - right image of stereo-pair (format 8uC1).
459 //   mode - mode of correspondence retrieval (now CV_DISPARITY_BIRCHFIELD only)
460 //      dispImage - destination disparity image
461 //      maxDisparity - maximal disparity
462 //      param1, param2, param3, param4, param5 - parameters of algorithm
463 //    Returns:
464 //    Notes:
465 //      Images must be rectified.
466 //      All images must have format 8uC1.
467 //F*/
468 CVAPI(void)
469 cvFindStereoCorrespondence(
470                    const  CvArr* leftImage, const  CvArr* rightImage,
471                    int     mode,
472                    CvArr*  dispImage,
473                    int     maxDisparity,
474                    double  param1 CV_DEFAULT(CV_UNDEF_SC_PARAM),
475                    double  param2 CV_DEFAULT(CV_UNDEF_SC_PARAM),
476                    double  param3 CV_DEFAULT(CV_UNDEF_SC_PARAM),
477                    double  param4 CV_DEFAULT(CV_UNDEF_SC_PARAM),
478                    double  param5 CV_DEFAULT(CV_UNDEF_SC_PARAM) );
479
480 /*****************************************************************************************/
481 /************ Epiline functions *******************/
482
483
484
485 typedef struct CvStereoLineCoeff
486 {
487     double Xcoef;
488     double XcoefA;
489     double XcoefB;
490     double XcoefAB;
491
492     double Ycoef;
493     double YcoefA;
494     double YcoefB;
495     double YcoefAB;
496
497     double Zcoef;
498     double ZcoefA;
499     double ZcoefB;
500     double ZcoefAB;
501 }CvStereoLineCoeff;
502
503
504 typedef struct CvCamera
505 {
506     float   imgSize[2]; /* size of the camera view, used during calibration */
507     float   matrix[9]; /* intinsic camera parameters:  [ fx 0 cx; 0 fy cy; 0 0 1 ] */
508     float   distortion[4]; /* distortion coefficients - two coefficients for radial distortion
509                               and another two for tangential: [ k1 k2 p1 p2 ] */
510     float   rotMatr[9];
511     float   transVect[3]; /* rotation matrix and transition vector relatively
512                              to some reference point in the space. */
513 } CvCamera;
514
515 typedef struct CvStereoCamera
516 {
517     CvCamera* camera[2]; /* two individual camera parameters */
518     float fundMatr[9]; /* fundamental matrix */
519
520     /* New part for stereo */
521     CvPoint3D32f epipole[2];
522     CvPoint2D32f quad[2][4]; /* coordinates of destination quadrangle after
523                                 epipolar geometry rectification */
524     double coeffs[2][3][3];/* coefficients for transformation */
525     CvPoint2D32f border[2][4];
526     CvSize warpSize;
527     CvStereoLineCoeff* lineCoeffs;
528     int needSwapCameras;/* flag set to 1 if need to swap cameras for good reconstruction */
529     float rotMatrix[9];
530     float transVector[3];
531 } CvStereoCamera;
532
533
534 typedef struct CvContourOrientation
535 {
536     float egvals[2];
537     float egvects[4];
538
539     float max, min; // minimum and maximum projections
540     int imax, imin;
541 } CvContourOrientation;
542
543 #define CV_CAMERA_TO_WARP 1
544 #define CV_WARP_TO_CAMERA 2
545
546 CVAPI(int) icvConvertWarpCoordinates(double coeffs[3][3],
547                                 CvPoint2D32f* cameraPoint,
548                                 CvPoint2D32f* warpPoint,
549                                 int direction);
550
551 CVAPI(int) icvGetSymPoint3D(  CvPoint3D64f pointCorner,
552                             CvPoint3D64f point1,
553                             CvPoint3D64f point2,
554                             CvPoint3D64f *pointSym2);
555
556 CVAPI(void) icvGetPieceLength3D(CvPoint3D64f point1,CvPoint3D64f point2,double* dist);
557
558 CVAPI(int) icvCompute3DPoint(    double alpha,double betta,
559                             CvStereoLineCoeff* coeffs,
560                             CvPoint3D64f* point);
561
562 CVAPI(int) icvCreateConvertMatrVect( double*     rotMatr1,
563                                 double*     transVect1,
564                                 double*     rotMatr2,
565                                 double*     transVect2,
566                                 double*     convRotMatr,
567                                 double*     convTransVect);
568
569 CVAPI(int) icvConvertPointSystem(CvPoint3D64f  M2,
570                             CvPoint3D64f* M1,
571                             double*     rotMatr,
572                             double*     transVect
573                             );
574
575 CVAPI(int) icvComputeCoeffForStereo(  CvStereoCamera* stereoCamera);
576
577 CVAPI(int) icvGetCrossPieceVector(CvPoint2D32f p1_start,CvPoint2D32f p1_end,CvPoint2D32f v2_start,CvPoint2D32f v2_end,CvPoint2D32f *cross);
578 CVAPI(int) icvGetCrossLineDirect(CvPoint2D32f p1,CvPoint2D32f p2,float a,float b,float c,CvPoint2D32f* cross);
579 CVAPI(float) icvDefinePointPosition(CvPoint2D32f point1,CvPoint2D32f point2,CvPoint2D32f point);
580 CVAPI(int) icvStereoCalibration( int numImages,
581                             int* nums,
582                             CvSize imageSize,
583                             CvPoint2D32f* imagePoints1,
584                             CvPoint2D32f* imagePoints2,
585                             CvPoint3D32f* objectPoints,
586                             CvStereoCamera* stereoparams
587                            );
588
589
590 CVAPI(int) icvComputeRestStereoParams(CvStereoCamera *stereoparams);
591
592 CVAPI(void) cvComputePerspectiveMap( const double coeffs[3][3], CvArr* rectMapX, CvArr* rectMapY );
593
594 CVAPI(int) icvComCoeffForLine(   CvPoint2D64f point1,
595                             CvPoint2D64f point2,
596                             CvPoint2D64f point3,
597                             CvPoint2D64f point4,
598                             double*    camMatr1,
599                             double*    rotMatr1,
600                             double*    transVect1,
601                             double*    camMatr2,
602                             double*    rotMatr2,
603                             double*    transVect2,
604                             CvStereoLineCoeff*    coeffs,
605                             int* needSwapCameras);
606
607 CVAPI(int) icvGetDirectionForPoint(  CvPoint2D64f point,
608                                 double* camMatr,
609                                 CvPoint3D64f* direct);
610
611 CVAPI(int) icvGetCrossLines(CvPoint3D64f point11,CvPoint3D64f point12,
612                        CvPoint3D64f point21,CvPoint3D64f point22,
613                        CvPoint3D64f* midPoint);
614
615 CVAPI(int) icvComputeStereoLineCoeffs(   CvPoint3D64f pointA,
616                                     CvPoint3D64f pointB,
617                                     CvPoint3D64f pointCam1,
618                                     double gamma,
619                                     CvStereoLineCoeff*    coeffs);
620
621 /*CVAPI(int) icvComputeFundMatrEpipoles ( double* camMatr1,
622                                     double*     rotMatr1,
623                                     double*     transVect1,
624                                     double*     camMatr2,
625                                     double*     rotMatr2,
626                                     double*     transVect2,
627                                     CvPoint2D64f* epipole1,
628                                     CvPoint2D64f* epipole2,
629                                     double*     fundMatr);*/
630
631 CVAPI(int) icvGetAngleLine( CvPoint2D64f startPoint, CvSize imageSize,CvPoint2D64f *point1,CvPoint2D64f *point2);
632
633 CVAPI(void) icvGetCoefForPiece(   CvPoint2D64f p_start,CvPoint2D64f p_end,
634                         double *a,double *b,double *c,
635                         int* result);
636
637 /*CVAPI(void) icvGetCommonArea( CvSize imageSize,
638                     CvPoint2D64f epipole1,CvPoint2D64f epipole2,
639                     double* fundMatr,
640                     double* coeff11,double* coeff12,
641                     double* coeff21,double* coeff22,
642                     int* result);*/
643
644 CVAPI(void) icvComputeeInfiniteProject1(double*    rotMatr,
645                                      double*    camMatr1,
646                                      double*    camMatr2,
647                                      CvPoint2D32f point1,
648                                      CvPoint2D32f *point2);
649
650 CVAPI(void) icvComputeeInfiniteProject2(double*    rotMatr,
651                                      double*    camMatr1,
652                                      double*    camMatr2,
653                                      CvPoint2D32f* point1,
654                                      CvPoint2D32f point2);
655
656 CVAPI(void) icvGetCrossDirectDirect(  double* direct1,double* direct2,
657                             CvPoint2D64f *cross,int* result);
658
659 CVAPI(void) icvGetCrossPieceDirect(   CvPoint2D64f p_start,CvPoint2D64f p_end,
660                             double a,double b,double c,
661                             CvPoint2D64f *cross,int* result);
662
663 CVAPI(void) icvGetCrossPiecePiece( CvPoint2D64f p1_start,CvPoint2D64f p1_end,
664                             CvPoint2D64f p2_start,CvPoint2D64f p2_end,
665                             CvPoint2D64f* cross,
666                             int* result);
667
668 CVAPI(void) icvGetPieceLength(CvPoint2D64f point1,CvPoint2D64f point2,double* dist);
669
670 CVAPI(void) icvGetCrossRectDirect(    CvSize imageSize,
671                             double a,double b,double c,
672                             CvPoint2D64f *start,CvPoint2D64f *end,
673                             int* result);
674
675 CVAPI(void) icvProjectPointToImage(   CvPoint3D64f point,
676                             double* camMatr,double* rotMatr,double* transVect,
677                             CvPoint2D64f* projPoint);
678
679 CVAPI(void) icvGetQuadsTransform( CvSize        imageSize,
680                         double*     camMatr1,
681                         double*     rotMatr1,
682                         double*     transVect1,
683                         double*     camMatr2,
684                         double*     rotMatr2,
685                         double*     transVect2,
686                         CvSize*       warpSize,
687                         double quad1[4][2],
688                         double quad2[4][2],
689                         double*     fundMatr,
690                         CvPoint3D64f* epipole1,
691                         CvPoint3D64f* epipole2
692                         );
693
694 CVAPI(void) icvGetQuadsTransformStruct(  CvStereoCamera* stereoCamera);
695
696 CVAPI(void) icvComputeStereoParamsForCameras(CvStereoCamera* stereoCamera);
697
698 CVAPI(void) icvGetCutPiece(   double* areaLineCoef1,double* areaLineCoef2,
699                     CvPoint2D64f epipole,
700                     CvSize imageSize,
701                     CvPoint2D64f* point11,CvPoint2D64f* point12,
702                     CvPoint2D64f* point21,CvPoint2D64f* point22,
703                     int* result);
704
705 CVAPI(void) icvGetMiddleAnglePoint(   CvPoint2D64f basePoint,
706                             CvPoint2D64f point1,CvPoint2D64f point2,
707                             CvPoint2D64f* midPoint);
708
709 CVAPI(void) icvGetNormalDirect(double* direct,CvPoint2D64f point,double* normDirect);
710
711 CVAPI(double) icvGetVect(CvPoint2D64f basePoint,CvPoint2D64f point1,CvPoint2D64f point2);
712
713 CVAPI(void) icvProjectPointToDirect(  CvPoint2D64f point,double* lineCoeff,
714                             CvPoint2D64f* projectPoint);
715
716 CVAPI(void) icvGetDistanceFromPointToDirect( CvPoint2D64f point,double* lineCoef,double*dist);
717
718 CVAPI(IplImage*) icvCreateIsometricImage( IplImage* src, IplImage* dst,
719                               int desired_depth, int desired_num_channels );
720
721 CVAPI(void) cvDeInterlace( const CvArr* frame, CvArr* fieldEven, CvArr* fieldOdd );
722
723 /*CVAPI(int) icvSelectBestRt(           int           numImages,
724                                     int*          numPoints,
725                                     CvSize        imageSize,
726                                     CvPoint2D32f* imagePoints1,
727                                     CvPoint2D32f* imagePoints2,
728                                     CvPoint3D32f* objectPoints,
729
730                                     CvMatr32f     cameraMatrix1,
731                                     CvVect32f     distortion1,
732                                     CvMatr32f     rotMatrs1,
733                                     CvVect32f     transVects1,
734
735                                     CvMatr32f     cameraMatrix2,
736                                     CvVect32f     distortion2,
737                                     CvMatr32f     rotMatrs2,
738                                     CvVect32f     transVects2,
739
740                                     CvMatr32f     bestRotMatr,
741                                     CvVect32f     bestTransVect
742                                     );*/
743
744
745 /****************************************************************************************\
746 *                                     Contour Tree                                       *
747 \****************************************************************************************/
748
749 /* Contour tree header */
750 typedef struct CvContourTree
751 {
752     CV_SEQUENCE_FIELDS()
753     CvPoint p1;            /* the first point of the binary tree root segment */
754     CvPoint p2;            /* the last point of the binary tree root segment */
755 } CvContourTree;
756
757 /* Builds hierarhical representation of a contour */
758 CVAPI(CvContourTree*)  cvCreateContourTree( const CvSeq* contour,
759                                             CvMemStorage* storage,
760                                             double threshold );
761
762 /* Reconstruct (completelly or partially) contour a from contour tree */
763 CVAPI(CvSeq*)  cvContourFromContourTree( const CvContourTree* tree,
764                                          CvMemStorage* storage,
765                                          CvTermCriteria criteria );
766
767 /* Compares two contour trees */
768 enum { CV_CONTOUR_TREES_MATCH_I1 = 1 };
769
770 CVAPI(double)  cvMatchContourTrees( const CvContourTree* tree1,
771                                     const CvContourTree* tree2,
772                                     int method, double threshold );
773
774 /****************************************************************************************\
775 *                                   Contour Morphing                                     *
776 \****************************************************************************************/
777
778 /* finds correspondence between two contours */
779 CvSeq* cvCalcContoursCorrespondence( const CvSeq* contour1,
780                                      const CvSeq* contour2,
781                                      CvMemStorage* storage);
782
783 /* morphs contours using the pre-calculated correspondence:
784    alpha=0 ~ contour1, alpha=1 ~ contour2 */
785 CvSeq* cvMorphContours( const CvSeq* contour1, const CvSeq* contour2,
786                         CvSeq* corr, double alpha,
787                         CvMemStorage* storage );
788
789
790 /****************************************************************************************\
791 *                                   Active Contours                                      *
792 \****************************************************************************************/
793
794 #define  CV_VALUE  1
795 #define  CV_ARRAY  2
796 /* Updates active contour in order to minimize its cummulative
797    (internal and external) energy. */
798 CVAPI(void)  cvSnakeImage( const IplImage* image, CvPoint* points,
799                            int  length, float* alpha,
800                            float* beta, float* gamma,
801                            int coeff_usage, CvSize  win,
802                            CvTermCriteria criteria, int calc_gradient CV_DEFAULT(1));
803
804 /****************************************************************************************\
805 *                                    Texture Descriptors                                 *
806 \****************************************************************************************/
807
808 #define CV_GLCM_OPTIMIZATION_NONE                   -2
809 #define CV_GLCM_OPTIMIZATION_LUT                    -1
810 #define CV_GLCM_OPTIMIZATION_HISTOGRAM              0
811
812 #define CV_GLCMDESC_OPTIMIZATION_ALLOWDOUBLENEST    10
813 #define CV_GLCMDESC_OPTIMIZATION_ALLOWTRIPLENEST    11
814 #define CV_GLCMDESC_OPTIMIZATION_HISTOGRAM          4
815
816 #define CV_GLCMDESC_ENTROPY                         0
817 #define CV_GLCMDESC_ENERGY                          1
818 #define CV_GLCMDESC_HOMOGENITY                      2
819 #define CV_GLCMDESC_CONTRAST                        3
820 #define CV_GLCMDESC_CLUSTERTENDENCY                 4
821 #define CV_GLCMDESC_CLUSTERSHADE                    5
822 #define CV_GLCMDESC_CORRELATION                     6
823 #define CV_GLCMDESC_CORRELATIONINFO1                7
824 #define CV_GLCMDESC_CORRELATIONINFO2                8
825 #define CV_GLCMDESC_MAXIMUMPROBABILITY              9
826
827 #define CV_GLCM_ALL                                 0
828 #define CV_GLCM_GLCM                                1
829 #define CV_GLCM_DESC                                2
830
831 typedef struct CvGLCM CvGLCM;
832
833 CVAPI(CvGLCM*) cvCreateGLCM( const IplImage* srcImage,
834                                 int stepMagnitude,
835                                 const int* stepDirections CV_DEFAULT(0),
836                                 int numStepDirections CV_DEFAULT(0),
837                                 int optimizationType CV_DEFAULT(CV_GLCM_OPTIMIZATION_NONE));
838
839 CVAPI(void) cvReleaseGLCM( CvGLCM** GLCM, int flag CV_DEFAULT(CV_GLCM_ALL));
840
841 CVAPI(void) cvCreateGLCMDescriptors( CvGLCM* destGLCM,
842                                         int descriptorOptimizationType
843                                         CV_DEFAULT(CV_GLCMDESC_OPTIMIZATION_ALLOWDOUBLENEST));
844
845 CVAPI(double) cvGetGLCMDescriptor( CvGLCM* GLCM, int step, int descriptor );
846
847 CVAPI(void) cvGetGLCMDescriptorStatistics( CvGLCM* GLCM, int descriptor,
848                                               double* average, double* standardDeviation );
849
850 CVAPI(IplImage*) cvCreateGLCMImage( CvGLCM* GLCM, int step );
851
852 /****************************************************************************************\
853 *                                  Face eyes&mouth tracking                              *
854 \****************************************************************************************/
855
856
857 typedef struct CvFaceTracker CvFaceTracker;
858
859 #define CV_NUM_FACE_ELEMENTS    3
860 enum CV_FACE_ELEMENTS
861 {
862     CV_FACE_MOUTH = 0,
863     CV_FACE_LEFT_EYE = 1,
864     CV_FACE_RIGHT_EYE = 2
865 };
866
867 CVAPI(CvFaceTracker*) cvInitFaceTracker(CvFaceTracker* pFaceTracking, const IplImage* imgGray,
868                                                 CvRect* pRects, int nRects);
869 CVAPI(int) cvTrackFace( CvFaceTracker* pFaceTracker, IplImage* imgGray,
870                               CvRect* pRects, int nRects,
871                               CvPoint* ptRotate, double* dbAngleRotate);
872 CVAPI(void) cvReleaseFaceTracker(CvFaceTracker** ppFaceTracker);
873
874
875 typedef struct CvFace
876 {
877     CvRect MouthRect;
878     CvRect LeftEyeRect;
879     CvRect RightEyeRect;
880 } CvFaceData;
881
882 CvSeq * cvFindFace(IplImage * Image,CvMemStorage* storage);
883 CvSeq * cvPostBoostingFindFace(IplImage * Image,CvMemStorage* storage);
884
885
886 /****************************************************************************************\
887 *                                         3D Tracker                                     *
888 \****************************************************************************************/
889
890 typedef unsigned char CvBool;
891
892 typedef struct Cv3dTracker2dTrackedObject
893 {
894     int id;
895     CvPoint2D32f p; // pgruebele: So we do not loose precision, this needs to be float
896 } Cv3dTracker2dTrackedObject;
897
898 CV_INLINE Cv3dTracker2dTrackedObject cv3dTracker2dTrackedObject(int id, CvPoint2D32f p)
899 {
900     Cv3dTracker2dTrackedObject r;
901     r.id = id;
902     r.p = p;
903     return r;
904 }
905
906 typedef struct Cv3dTrackerTrackedObject
907 {
908     int id;
909     CvPoint3D32f p;             // location of the tracked object
910 } Cv3dTrackerTrackedObject;
911
912 CV_INLINE Cv3dTrackerTrackedObject cv3dTrackerTrackedObject(int id, CvPoint3D32f p)
913 {
914     Cv3dTrackerTrackedObject r;
915     r.id = id;
916     r.p = p;
917     return r;
918 }
919
920 typedef struct Cv3dTrackerCameraInfo
921 {
922     CvBool valid;
923     float mat[4][4];              /* maps camera coordinates to world coordinates */
924     CvPoint2D32f principal_point; /* copied from intrinsics so this structure */
925                                   /* has all the info we need */
926 } Cv3dTrackerCameraInfo;
927
928 typedef struct Cv3dTrackerCameraIntrinsics
929 {
930     CvPoint2D32f principal_point;
931     float focal_length[2];
932     float distortion[4];
933 } Cv3dTrackerCameraIntrinsics;
934
935 CVAPI(CvBool) cv3dTrackerCalibrateCameras(int num_cameras,
936                      const Cv3dTrackerCameraIntrinsics camera_intrinsics[], /* size is num_cameras */
937                      CvSize etalon_size,
938                      float square_size,
939                      IplImage *samples[],                                   /* size is num_cameras */
940                      Cv3dTrackerCameraInfo camera_info[]);                  /* size is num_cameras */
941
942 CVAPI(int)  cv3dTrackerLocateObjects(int num_cameras, int num_objects,
943                    const Cv3dTrackerCameraInfo camera_info[],        /* size is num_cameras */
944                    const Cv3dTracker2dTrackedObject tracking_info[], /* size is num_objects*num_cameras */
945                    Cv3dTrackerTrackedObject tracked_objects[]);      /* size is num_objects */
946 /****************************************************************************************
947  tracking_info is a rectangular array; one row per camera, num_objects elements per row.
948  The id field of any unused slots must be -1. Ids need not be ordered or consecutive. On
949  completion, the return value is the number of objects located; i.e., the number of objects
950  visible by more than one camera. The id field of any unused slots in tracked objects is
951  set to -1.
952 ****************************************************************************************/
953
954
955 /****************************************************************************************\
956 *                           Skeletons and Linear-Contour Models                          *
957 \****************************************************************************************/
958
959 typedef enum CvLeeParameters
960 {
961     CV_LEE_INT = 0,
962     CV_LEE_FLOAT = 1,
963     CV_LEE_DOUBLE = 2,
964     CV_LEE_AUTO = -1,
965     CV_LEE_ERODE = 0,
966     CV_LEE_ZOOM = 1,
967     CV_LEE_NON = 2
968 } CvLeeParameters;
969
970 #define CV_NEXT_VORONOISITE2D( SITE ) ((SITE)->edge[0]->site[((SITE)->edge[0]->site[0] == (SITE))])
971 #define CV_PREV_VORONOISITE2D( SITE ) ((SITE)->edge[1]->site[((SITE)->edge[1]->site[0] == (SITE))])
972 #define CV_FIRST_VORONOIEDGE2D( SITE ) ((SITE)->edge[0])
973 #define CV_LAST_VORONOIEDGE2D( SITE ) ((SITE)->edge[1])
974 #define CV_NEXT_VORONOIEDGE2D( EDGE, SITE ) ((EDGE)->next[(EDGE)->site[0] != (SITE)])
975 #define CV_PREV_VORONOIEDGE2D( EDGE, SITE ) ((EDGE)->next[2 + ((EDGE)->site[0] != (SITE))])
976 #define CV_VORONOIEDGE2D_BEGINNODE( EDGE, SITE ) ((EDGE)->node[((EDGE)->site[0] != (SITE))])
977 #define CV_VORONOIEDGE2D_ENDNODE( EDGE, SITE ) ((EDGE)->node[((EDGE)->site[0] == (SITE))])
978 #define CV_TWIN_VORONOISITE2D( SITE, EDGE ) ( (EDGE)->site[((EDGE)->site[0] == (SITE))])
979
980 #define CV_VORONOISITE2D_FIELDS()    \
981     struct CvVoronoiNode2D *node[2]; \
982     struct CvVoronoiEdge2D *edge[2];
983
984 typedef struct CvVoronoiSite2D
985 {
986     CV_VORONOISITE2D_FIELDS()
987     struct CvVoronoiSite2D *next[2];
988 } CvVoronoiSite2D;
989
990 #define CV_VORONOIEDGE2D_FIELDS()    \
991     struct CvVoronoiNode2D *node[2]; \
992     struct CvVoronoiSite2D *site[2]; \
993     struct CvVoronoiEdge2D *next[4];
994
995 typedef struct CvVoronoiEdge2D
996 {
997     CV_VORONOIEDGE2D_FIELDS()
998 } CvVoronoiEdge2D;
999
1000 #define CV_VORONOINODE2D_FIELDS()       \
1001     CV_SET_ELEM_FIELDS(CvVoronoiNode2D) \
1002     CvPoint2D32f pt;                    \
1003     float radius;
1004
1005 typedef struct CvVoronoiNode2D
1006 {
1007     CV_VORONOINODE2D_FIELDS()
1008 } CvVoronoiNode2D;
1009
1010 #define CV_VORONOIDIAGRAM2D_FIELDS() \
1011     CV_GRAPH_FIELDS()                \
1012     CvSet *sites;
1013
1014 typedef struct CvVoronoiDiagram2D
1015 {
1016     CV_VORONOIDIAGRAM2D_FIELDS()
1017 } CvVoronoiDiagram2D;
1018
1019 /* Computes Voronoi Diagram for given polygons with holes */
1020 CVAPI(int)  cvVoronoiDiagramFromContour(CvSeq* ContourSeq,
1021                                            CvVoronoiDiagram2D** VoronoiDiagram,
1022                                            CvMemStorage* VoronoiStorage,
1023                                            CvLeeParameters contour_type CV_DEFAULT(CV_LEE_INT),
1024                                            int contour_orientation CV_DEFAULT(-1),
1025                                            int attempt_number CV_DEFAULT(10));
1026
1027 /* Computes Voronoi Diagram for domains in given image */
1028 CVAPI(int)  cvVoronoiDiagramFromImage(IplImage* pImage,
1029                                          CvSeq** ContourSeq,
1030                                          CvVoronoiDiagram2D** VoronoiDiagram,
1031                                          CvMemStorage* VoronoiStorage,
1032                                          CvLeeParameters regularization_method CV_DEFAULT(CV_LEE_NON),
1033                                          float approx_precision CV_DEFAULT(CV_LEE_AUTO));
1034
1035 /* Deallocates the storage */
1036 CVAPI(void) cvReleaseVoronoiStorage(CvVoronoiDiagram2D* VoronoiDiagram,
1037                                           CvMemStorage** pVoronoiStorage);
1038
1039 /*********************** Linear-Contour Model ****************************/
1040
1041 struct CvLCMEdge;
1042 struct CvLCMNode;
1043
1044 typedef struct CvLCMEdge
1045 {
1046     CV_GRAPH_EDGE_FIELDS()
1047     CvSeq* chain;
1048     float width;
1049     int index1;
1050     int index2;
1051 } CvLCMEdge;
1052
1053 typedef struct CvLCMNode
1054 {
1055     CV_GRAPH_VERTEX_FIELDS()
1056     CvContour* contour;
1057 } CvLCMNode;
1058
1059
1060 /* Computes hybrid model from Voronoi Diagram */
1061 CVAPI(CvGraph*) cvLinearContorModelFromVoronoiDiagram(CvVoronoiDiagram2D* VoronoiDiagram,
1062                                                          float maxWidth);
1063
1064 /* Releases hybrid model storage */
1065 CVAPI(int) cvReleaseLinearContorModelStorage(CvGraph** Graph);
1066
1067
1068 /* two stereo-related functions */
1069
1070 CVAPI(void) cvInitPerspectiveTransform( CvSize size, const CvPoint2D32f vertex[4], double matrix[3][3],
1071                                               CvArr* rectMap );
1072
1073 /*CVAPI(void) cvInitStereoRectification( CvStereoCamera* params,
1074                                              CvArr* rectMap1, CvArr* rectMap2,
1075                                              int do_undistortion );*/
1076
1077 /*************************** View Morphing Functions ************************/
1078
1079 typedef struct CvMatrix3
1080 {
1081     float m[3][3];
1082 } CvMatrix3;
1083
1084 /* The order of the function corresponds to the order they should appear in
1085    the view morphing pipeline */
1086
1087 /* Finds ending points of scanlines on left and right images of stereo-pair */
1088 CVAPI(void)  cvMakeScanlines( const CvMatrix3* matrix, CvSize  img_size,
1089                               int*  scanlines1, int*  scanlines2,
1090                               int*  lengths1, int*  lengths2,
1091                               int*  line_count );
1092
1093 /* Grab pixel values from scanlines and stores them sequentially
1094    (some sort of perspective image transform) */
1095 CVAPI(void)  cvPreWarpImage( int       line_count,
1096                              IplImage* img,
1097                              uchar*    dst,
1098                              int*      dst_nums,
1099                              int*      scanlines);
1100
1101 /* Approximate each grabbed scanline by a sequence of runs
1102    (lossy run-length compression) */
1103 CVAPI(void)  cvFindRuns( int    line_count,
1104                          uchar* prewarp1,
1105                          uchar* prewarp2,
1106                          int*   line_lengths1,
1107                          int*   line_lengths2,
1108                          int*   runs1,
1109                          int*   runs2,
1110                          int*   num_runs1,
1111                          int*   num_runs2);
1112
1113 /* Compares two sets of compressed scanlines */
1114 CVAPI(void)  cvDynamicCorrespondMulti( int  line_count,
1115                                        int* first,
1116                                        int* first_runs,
1117                                        int* second,
1118                                        int* second_runs,
1119                                        int* first_corr,
1120                                        int* second_corr);
1121
1122 /* Finds scanline ending coordinates for some intermediate "virtual" camera position */
1123 CVAPI(void)  cvMakeAlphaScanlines( int*  scanlines1,
1124                                    int*  scanlines2,
1125                                    int*  scanlinesA,
1126                                    int*  lengths,
1127                                    int   line_count,
1128                                    float alpha);
1129
1130 /* Blends data of the left and right image scanlines to get
1131    pixel values of "virtual" image scanlines */
1132 CVAPI(void)  cvMorphEpilinesMulti( int    line_count,
1133                                    uchar* first_pix,
1134                                    int*   first_num,
1135                                    uchar* second_pix,
1136                                    int*   second_num,
1137                                    uchar* dst_pix,
1138                                    int*   dst_num,
1139                                    float  alpha,
1140                                    int*   first,
1141                                    int*   first_runs,
1142                                    int*   second,
1143                                    int*   second_runs,
1144                                    int*   first_corr,
1145                                    int*   second_corr);
1146
1147 /* Does reverse warping of the morphing result to make
1148    it fill the destination image rectangle */
1149 CVAPI(void)  cvPostWarpImage( int       line_count,
1150                               uchar*    src,
1151                               int*      src_nums,
1152                               IplImage* img,
1153                               int*      scanlines);
1154
1155 /* Deletes Moire (missed pixels that appear due to discretization) */
1156 CVAPI(void)  cvDeleteMoire( IplImage*  img );
1157
1158
1159 typedef struct CvConDensation
1160 {
1161     int MP;
1162     int DP;
1163     float* DynamMatr;       /* Matrix of the linear Dynamics system  */
1164     float* State;           /* Vector of State                       */
1165     int SamplesNum;         /* Number of the Samples                 */
1166     float** flSamples;      /* arr of the Sample Vectors             */
1167     float** flNewSamples;   /* temporary array of the Sample Vectors */
1168     float* flConfidence;    /* Confidence for each Sample            */
1169     float* flCumulative;    /* Cumulative confidence                 */
1170     float* Temp;            /* Temporary vector                      */
1171     float* RandomSample;    /* RandomVector to update sample set     */
1172     struct CvRandState* RandS; /* Array of structures to generate random vectors */
1173 } CvConDensation;
1174
1175 /* Creates ConDensation filter state */
1176 CVAPI(CvConDensation*)  cvCreateConDensation( int dynam_params,
1177                                              int measure_params,
1178                                              int sample_count );
1179
1180 /* Releases ConDensation filter state */
1181 CVAPI(void)  cvReleaseConDensation( CvConDensation** condens );
1182
1183 /* Updates ConDensation filter by time (predict future state of the system) */
1184 CVAPI(void)  cvConDensUpdateByTime( CvConDensation* condens);
1185
1186 /* Initializes ConDensation filter samples  */
1187 CVAPI(void)  cvConDensInitSampleSet( CvConDensation* condens, CvMat* lower_bound, CvMat* upper_bound );
1188
1189 CV_INLINE int iplWidth( const IplImage* img )
1190 {
1191     return !img ? 0 : !img->roi ? img->width : img->roi->width;
1192 }
1193
1194 CV_INLINE int iplHeight( const IplImage* img )
1195 {
1196     return !img ? 0 : !img->roi ? img->height : img->roi->height;
1197 }
1198
1199 #ifdef __cplusplus
1200 }
1201 #endif
1202
1203 #ifdef __cplusplus
1204
1205 /****************************************************************************************\
1206 *                                   Calibration engine                                   *
1207 \****************************************************************************************/
1208
1209 typedef enum CvCalibEtalonType
1210 {
1211     CV_CALIB_ETALON_USER = -1,
1212     CV_CALIB_ETALON_CHESSBOARD = 0,
1213     CV_CALIB_ETALON_CHECKERBOARD = CV_CALIB_ETALON_CHESSBOARD
1214 }
1215 CvCalibEtalonType;
1216
1217 class CV_EXPORTS CvCalibFilter
1218 {
1219 public:
1220     /* Constructor & destructor */
1221     CvCalibFilter();
1222     virtual ~CvCalibFilter();
1223
1224     /* Sets etalon type - one for all cameras.
1225        etalonParams is used in case of pre-defined etalons (such as chessboard).
1226        Number of elements in etalonParams is determined by etalonType.
1227        E.g., if etalon type is CV_ETALON_TYPE_CHESSBOARD then:
1228          etalonParams[0] is number of squares per one side of etalon
1229          etalonParams[1] is number of squares per another side of etalon
1230          etalonParams[2] is linear size of squares in the board in arbitrary units.
1231        pointCount & points are used in case of
1232        CV_CALIB_ETALON_USER (user-defined) etalon. */
1233     virtual bool
1234         SetEtalon( CvCalibEtalonType etalonType, double* etalonParams,
1235                    int pointCount = 0, CvPoint2D32f* points = 0 );
1236
1237     /* Retrieves etalon parameters/or and points */
1238     virtual CvCalibEtalonType
1239         GetEtalon( int* paramCount = 0, const double** etalonParams = 0,
1240                    int* pointCount = 0, const CvPoint2D32f** etalonPoints = 0 ) const;
1241
1242     /* Sets number of cameras calibrated simultaneously. It is equal to 1 initially */
1243     virtual void SetCameraCount( int cameraCount );
1244
1245     /* Retrieves number of cameras */
1246     int GetCameraCount() const { return cameraCount; }
1247
1248     /* Starts cameras calibration */
1249     virtual bool SetFrames( int totalFrames );
1250
1251     /* Stops cameras calibration */
1252     virtual void Stop( bool calibrate = false );
1253
1254     /* Retrieves number of cameras */
1255     bool IsCalibrated() const { return isCalibrated; }
1256
1257     /* Feeds another serie of snapshots (one per each camera) to filter.
1258        Etalon points on these images are found automatically.
1259        If the function can't locate points, it returns false */
1260     virtual bool FindEtalon( IplImage** imgs );
1261
1262     /* The same but takes matrices */
1263     virtual bool FindEtalon( CvMat** imgs );
1264
1265     /* Lower-level function for feeding filter with already found etalon points.
1266        Array of point arrays for each camera is passed. */
1267     virtual bool Push( const CvPoint2D32f** points = 0 );
1268
1269     /* Returns total number of accepted frames and, optionally,
1270        total number of frames to collect */
1271     virtual int GetFrameCount( int* framesTotal = 0 ) const;
1272
1273     /* Retrieves camera parameters for specified camera.
1274        If camera is not calibrated the function returns 0 */
1275     virtual const CvCamera* GetCameraParams( int idx = 0 ) const;
1276
1277     virtual const CvStereoCamera* GetStereoParams() const;
1278
1279     /* Sets camera parameters for all cameras */
1280     virtual bool SetCameraParams( CvCamera* params );
1281
1282     /* Saves all camera parameters to file */
1283     virtual bool SaveCameraParams( const char* filename );
1284
1285     /* Loads all camera parameters from file */
1286     virtual bool LoadCameraParams( const char* filename );
1287
1288     /* Undistorts images using camera parameters. Some of src pointers can be NULL. */
1289     virtual bool Undistort( IplImage** src, IplImage** dst );
1290
1291     /* Undistorts images using camera parameters. Some of src pointers can be NULL. */
1292     virtual bool Undistort( CvMat** src, CvMat** dst );
1293
1294     /* Returns array of etalon points detected/partally detected
1295        on the latest frame for idx-th camera */
1296     virtual bool GetLatestPoints( int idx, CvPoint2D32f** pts,
1297                                                   int* count, bool* found );
1298
1299     /* Draw the latest detected/partially detected etalon */
1300     virtual void DrawPoints( IplImage** dst );
1301
1302     /* Draw the latest detected/partially detected etalon */
1303     virtual void DrawPoints( CvMat** dst );
1304
1305     virtual bool Rectify( IplImage** srcarr, IplImage** dstarr );
1306     virtual bool Rectify( CvMat** srcarr, CvMat** dstarr );
1307
1308 protected:
1309
1310     enum { MAX_CAMERAS = 3 };
1311
1312     /* etalon data */
1313     CvCalibEtalonType  etalonType;
1314     int     etalonParamCount;
1315     double* etalonParams;
1316     int     etalonPointCount;
1317     CvPoint2D32f* etalonPoints;
1318     CvSize  imgSize;
1319     CvMat*  grayImg;
1320     CvMat*  tempImg;
1321     CvMemStorage* storage;
1322
1323     /* camera data */
1324     int     cameraCount;
1325     CvCamera cameraParams[MAX_CAMERAS];
1326     CvStereoCamera stereo;
1327     CvPoint2D32f* points[MAX_CAMERAS];
1328     CvMat*  undistMap[MAX_CAMERAS][2];
1329     CvMat*  undistImg;
1330     int     latestCounts[MAX_CAMERAS];
1331     CvPoint2D32f* latestPoints[MAX_CAMERAS];
1332     CvMat*  rectMap[MAX_CAMERAS][2];
1333
1334     /* Added by Valery */
1335     //CvStereoCamera stereoParams;
1336
1337     int     maxPoints;
1338     int     framesTotal;
1339     int     framesAccepted;
1340     bool    isCalibrated;
1341 };
1342
1343 #include <iosfwd>
1344 #include <limits>
1345
1346 class CV_EXPORTS CvImage
1347 {
1348 public:
1349     CvImage() : image(0), refcount(0) {}
1350     CvImage( CvSize _size, int _depth, int _channels )
1351     {
1352         image = cvCreateImage( _size, _depth, _channels );
1353         refcount = image ? new int(1) : 0;
1354     }
1355
1356     CvImage( IplImage* img ) : image(img)
1357     {
1358         refcount = image ? new int(1) : 0;
1359     }
1360
1361     CvImage( const CvImage& img ) : image(img.image), refcount(img.refcount)
1362     {
1363         if( refcount ) ++(*refcount);
1364     }
1365
1366     CvImage( const char* filename, const char* imgname=0, int color=-1 ) : image(0), refcount(0)
1367     { load( filename, imgname, color ); }
1368
1369     CvImage( CvFileStorage* fs, const char* mapname, const char* imgname ) : image(0), refcount(0)
1370     { read( fs, mapname, imgname ); }
1371
1372     CvImage( CvFileStorage* fs, const char* seqname, int idx ) : image(0), refcount(0)
1373     { read( fs, seqname, idx ); }
1374
1375     ~CvImage()
1376     {
1377         if( refcount && !(--*refcount) )
1378         {
1379             cvReleaseImage( &image );
1380             delete refcount;
1381         }
1382     }
1383
1384     CvImage clone() { return CvImage(image ? cvCloneImage(image) : 0); }
1385
1386     void create( CvSize _size, int _depth, int _channels )
1387     {
1388         if( !image || !refcount ||
1389            image->width != _size.width || image->height != _size.height ||
1390            image->depth != _depth || image->nChannels != _channels )
1391             attach( cvCreateImage( _size, _depth, _channels ));
1392     }
1393
1394     void release() { detach(); }
1395     void clear() { detach(); }
1396
1397     void attach( IplImage* img, bool use_refcount=true )
1398     {
1399         if( refcount && --*refcount == 0 )
1400         {
1401             cvReleaseImage( &image );
1402             delete refcount;
1403         }
1404         image = img;
1405         refcount = use_refcount && image ? new int(1) : 0;
1406     }
1407
1408     void detach()
1409     {
1410         if( refcount && --*refcount == 0 )
1411         {
1412             cvReleaseImage( &image );
1413             delete refcount;
1414         }
1415         image = 0;
1416         refcount = 0;
1417     }
1418
1419     bool load( const char* filename, const char* imgname=0, int color=-1 );
1420     bool read( CvFileStorage* fs, const char* mapname, const char* imgname );
1421     bool read( CvFileStorage* fs, const char* seqname, int idx );
1422     void save( const char* filename, const char* imgname, const int* params=0 );
1423     void write( CvFileStorage* fs, const char* imgname );
1424
1425     void show( const char* window_name );
1426     bool is_valid() { return image != 0; }
1427
1428     int width() const { return image ? image->width : 0; }
1429     int height() const { return image ? image->height : 0; }
1430
1431     CvSize size() const { return image ? cvSize(image->width, image->height) : cvSize(0,0); }
1432
1433     CvSize roi_size() const
1434     {
1435         return !image ? cvSize(0,0) :
1436         !image->roi ? cvSize(image->width,image->height) :
1437         cvSize(image->roi->width, image->roi->height);
1438     }
1439
1440     CvRect roi() const
1441     {
1442         return !image ? cvRect(0,0,0,0) :
1443         !image->roi ? cvRect(0,0,image->width,image->height) :
1444         cvRect(image->roi->xOffset,image->roi->yOffset,
1445                image->roi->width,image->roi->height);
1446     }
1447
1448     int coi() const { return !image || !image->roi ? 0 : image->roi->coi; }
1449
1450     void set_roi(CvRect _roi) { cvSetImageROI(image,_roi); }
1451     void reset_roi() { cvResetImageROI(image); }
1452     void set_coi(int _coi) { cvSetImageCOI(image,_coi); }
1453     int depth() const { return image ? image->depth : 0; }
1454     int channels() const { return image ? image->nChannels : 0; }
1455     int pix_size() const { return image ? ((image->depth & 255)>>3)*image->nChannels : 0; }
1456
1457     uchar* data() { return image ? (uchar*)image->imageData : 0; }
1458     const uchar* data() const { return image ? (const uchar*)image->imageData : 0; }
1459     int step() const { return image ? image->widthStep : 0; }
1460     int origin() const { return image ? image->origin : 0; }
1461
1462     uchar* roi_row(int y)
1463     {
1464         assert(0<=y);
1465         assert(!image ?
1466                1 : image->roi ?
1467                y<image->roi->height : y<image->height);
1468
1469         return !image ? 0 :
1470         !image->roi ?
1471         (uchar*)(image->imageData + y*image->widthStep) :
1472         (uchar*)(image->imageData + (y+image->roi->yOffset)*image->widthStep +
1473                  image->roi->xOffset*((image->depth & 255)>>3)*image->nChannels);
1474     }
1475
1476     const uchar* roi_row(int y) const
1477     {
1478         assert(0<=y);
1479         assert(!image ?
1480                1 : image->roi ?
1481                y<image->roi->height : y<image->height);
1482
1483         return !image ? 0 :
1484         !image->roi ?
1485         (const uchar*)(image->imageData + y*image->widthStep) :
1486         (const uchar*)(image->imageData + (y+image->roi->yOffset)*image->widthStep +
1487                        image->roi->xOffset*((image->depth & 255)>>3)*image->nChannels);
1488     }
1489
1490     operator const IplImage* () const { return image; }
1491     operator IplImage* () { return image; }
1492
1493     CvImage& operator = (const CvImage& img)
1494     {
1495         if( img.refcount )
1496             ++*img.refcount;
1497         if( refcount && !(--*refcount) )
1498             cvReleaseImage( &image );
1499         image=img.image;
1500         refcount=img.refcount;
1501         return *this;
1502     }
1503
1504 protected:
1505     IplImage* image;
1506     int* refcount;
1507 };
1508
1509
1510 class CV_EXPORTS CvMatrix
1511 {
1512 public:
1513     CvMatrix() : matrix(0) {}
1514     CvMatrix( int _rows, int _cols, int _type )
1515     { matrix = cvCreateMat( _rows, _cols, _type ); }
1516
1517     CvMatrix( int _rows, int _cols, int _type, CvMat* hdr,
1518              void* _data=0, int _step=CV_AUTOSTEP )
1519     { matrix = cvInitMatHeader( hdr, _rows, _cols, _type, _data, _step ); }
1520
1521     CvMatrix( int rows, int cols, int type, CvMemStorage* storage, bool alloc_data=true );
1522
1523     CvMatrix( int _rows, int _cols, int _type, void* _data, int _step=CV_AUTOSTEP )
1524     { matrix = cvCreateMatHeader( _rows, _cols, _type );
1525         cvSetData( matrix, _data, _step ); }
1526
1527     CvMatrix( CvMat* m )
1528     { matrix = m; }
1529
1530     CvMatrix( const CvMatrix& m )
1531     {
1532         matrix = m.matrix;
1533         addref();
1534     }
1535
1536     CvMatrix( const char* filename, const char* matname=0, int color=-1 ) : matrix(0)
1537     {  load( filename, matname, color ); }
1538
1539     CvMatrix( CvFileStorage* fs, const char* mapname, const char* matname ) : matrix(0)
1540     {  read( fs, mapname, matname ); }
1541
1542     CvMatrix( CvFileStorage* fs, const char* seqname, int idx ) : matrix(0)
1543     {  read( fs, seqname, idx ); }
1544
1545     ~CvMatrix()
1546     {
1547         release();
1548     }
1549
1550     CvMatrix clone() { return CvMatrix(matrix ? cvCloneMat(matrix) : 0); }
1551
1552     void set( CvMat* m, bool add_ref )
1553     {
1554         release();
1555         matrix = m;
1556         if( add_ref )
1557             addref();
1558     }
1559
1560     void create( int _rows, int _cols, int _type )
1561     {
1562         if( !matrix || !matrix->refcount ||
1563            matrix->rows != _rows || matrix->cols != _cols ||
1564            CV_MAT_TYPE(matrix->type) != _type )
1565             set( cvCreateMat( _rows, _cols, _type ), false );
1566     }
1567
1568     void addref() const
1569     {
1570         if( matrix )
1571         {
1572             if( matrix->hdr_refcount )
1573                 ++matrix->hdr_refcount;
1574             else if( matrix->refcount )
1575                 ++*matrix->refcount;
1576         }
1577     }
1578
1579     void release()
1580     {
1581         if( matrix )
1582         {
1583             if( matrix->hdr_refcount )
1584             {
1585                 if( --matrix->hdr_refcount == 0 )
1586                     cvReleaseMat( &matrix );
1587             }
1588             else if( matrix->refcount )
1589             {
1590                 if( --*matrix->refcount == 0 )
1591                     cvFree( &matrix->refcount );
1592             }
1593             matrix = 0;
1594         }
1595     }
1596
1597     void clear()
1598     {
1599         release();
1600     }
1601
1602     bool load( const char* filename, const char* matname=0, int color=-1 );
1603     bool read( CvFileStorage* fs, const char* mapname, const char* matname );
1604     bool read( CvFileStorage* fs, const char* seqname, int idx );
1605     void save( const char* filename, const char* matname, const int* params=0 );
1606     void write( CvFileStorage* fs, const char* matname );
1607
1608     void show( const char* window_name );
1609
1610     bool is_valid() { return matrix != 0; }
1611
1612     int rows() const { return matrix ? matrix->rows : 0; }
1613     int cols() const { return matrix ? matrix->cols : 0; }
1614
1615     CvSize size() const
1616     {
1617         return !matrix ? cvSize(0,0) : cvSize(matrix->rows,matrix->cols);
1618     }
1619
1620     int type() const { return matrix ? CV_MAT_TYPE(matrix->type) : 0; }
1621     int depth() const { return matrix ? CV_MAT_DEPTH(matrix->type) : 0; }
1622     int channels() const { return matrix ? CV_MAT_CN(matrix->type) : 0; }
1623     int pix_size() const { return matrix ? CV_ELEM_SIZE(matrix->type) : 0; }
1624
1625     uchar* data() { return matrix ? matrix->data.ptr : 0; }
1626     const uchar* data() const { return matrix ? matrix->data.ptr : 0; }
1627     int step() const { return matrix ? matrix->step : 0; }
1628
1629     void set_data( void* _data, int _step=CV_AUTOSTEP )
1630     { cvSetData( matrix, _data, _step ); }
1631
1632     uchar* row(int i) { return !matrix ? 0 : matrix->data.ptr + i*matrix->step; }
1633     const uchar* row(int i) const
1634     { return !matrix ? 0 : matrix->data.ptr + i*matrix->step; }
1635
1636     operator const CvMat* () const { return matrix; }
1637     operator CvMat* () { return matrix; }
1638
1639     CvMatrix& operator = (const CvMatrix& _m)
1640     {
1641         _m.addref();
1642         release();
1643         matrix = _m.matrix;
1644         return *this;
1645     }
1646
1647 protected:
1648     CvMat* matrix;
1649 };
1650
1651 /****************************************************************************************\
1652  *                                       CamShiftTracker                                  *
1653  \****************************************************************************************/
1654
1655 class CV_EXPORTS CvCamShiftTracker
1656 {
1657 public:
1658
1659     CvCamShiftTracker();
1660     virtual ~CvCamShiftTracker();
1661
1662     /**** Characteristics of the object that are calculated by track_object method *****/
1663     float   get_orientation() const // orientation of the object in degrees
1664     { return m_box.angle; }
1665     float   get_length() const // the larger linear size of the object
1666     { return m_box.size.height; }
1667     float   get_width() const // the smaller linear size of the object
1668     { return m_box.size.width; }
1669     CvPoint2D32f get_center() const // center of the object
1670     { return m_box.center; }
1671     CvRect get_window() const // bounding rectangle for the object
1672     { return m_comp.rect; }
1673
1674     /*********************** Tracking parameters ************************/
1675     int     get_threshold() const // thresholding value that applied to back project
1676     { return m_threshold; }
1677
1678     int     get_hist_dims( int* dims = 0 ) const // returns number of histogram dimensions and sets
1679     { return m_hist ? cvGetDims( m_hist->bins, dims ) : 0; }
1680
1681     int     get_min_ch_val( int channel ) const // get the minimum allowed value of the specified channel
1682     { return m_min_ch_val[channel]; }
1683
1684     int     get_max_ch_val( int channel ) const // get the maximum allowed value of the specified channel
1685     { return m_max_ch_val[channel]; }
1686
1687     // set initial object rectangle (must be called before initial calculation of the histogram)
1688     bool    set_window( CvRect window)
1689     { m_comp.rect = window; return true; }
1690
1691     bool    set_threshold( int threshold ) // threshold applied to the histogram bins
1692     { m_threshold = threshold; return true; }
1693
1694     bool    set_hist_bin_range( int dim, int min_val, int max_val );
1695
1696     bool    set_hist_dims( int c_dims, int* dims );// set the histogram parameters
1697
1698     bool    set_min_ch_val( int channel, int val ) // set the minimum allowed value of the specified channel
1699     { m_min_ch_val[channel] = val; return true; }
1700     bool    set_max_ch_val( int channel, int val ) // set the maximum allowed value of the specified channel
1701     { m_max_ch_val[channel] = val; return true; }
1702
1703     /************************ The processing methods *********************************/
1704     // update object position
1705     virtual bool  track_object( const IplImage* cur_frame );
1706
1707     // update object histogram
1708     virtual bool  update_histogram( const IplImage* cur_frame );
1709
1710     // reset histogram
1711     virtual void  reset_histogram();
1712
1713     /************************ Retrieving internal data *******************************/
1714     // get back project image
1715     virtual IplImage* get_back_project()
1716     { return m_back_project; }
1717
1718     float query( int* bin ) const
1719     { return m_hist ? (float)cvGetRealND(m_hist->bins, bin) : 0.f; }
1720
1721 protected:
1722
1723     // internal method for color conversion: fills m_color_planes group
1724     virtual void color_transform( const IplImage* img );
1725
1726     CvHistogram* m_hist;
1727
1728     CvBox2D    m_box;
1729     CvConnectedComp m_comp;
1730
1731     float      m_hist_ranges_data[CV_MAX_DIM][2];
1732     float*     m_hist_ranges[CV_MAX_DIM];
1733
1734     int        m_min_ch_val[CV_MAX_DIM];
1735     int        m_max_ch_val[CV_MAX_DIM];
1736     int        m_threshold;
1737
1738     IplImage*  m_color_planes[CV_MAX_DIM];
1739     IplImage*  m_back_project;
1740     IplImage*  m_temp;
1741     IplImage*  m_mask;
1742 };
1743
1744 /****************************************************************************************\
1745 *                              Expectation - Maximization                                *
1746 \****************************************************************************************/
1747 struct CV_EXPORTS_W_MAP CvEMParams
1748 {
1749     CvEMParams();
1750     CvEMParams( int nclusters, int cov_mat_type=cv::EM::COV_MAT_DIAGONAL,
1751                 int start_step=cv::EM::START_AUTO_STEP,
1752                 CvTermCriteria term_crit=cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, FLT_EPSILON),
1753                 const CvMat* probs=0, const CvMat* weights=0, const CvMat* means=0, const CvMat** covs=0 );
1754
1755     CV_PROP_RW int nclusters;
1756     CV_PROP_RW int cov_mat_type;
1757     CV_PROP_RW int start_step;
1758     const CvMat* probs;
1759     const CvMat* weights;
1760     const CvMat* means;
1761     const CvMat** covs;
1762     CV_PROP_RW CvTermCriteria term_crit;
1763 };
1764
1765
1766 class CV_EXPORTS_W CvEM : public CvStatModel
1767 {
1768 public:
1769     // Type of covariation matrices
1770     enum { COV_MAT_SPHERICAL=cv::EM::COV_MAT_SPHERICAL,
1771            COV_MAT_DIAGONAL =cv::EM::COV_MAT_DIAGONAL,
1772            COV_MAT_GENERIC  =cv::EM::COV_MAT_GENERIC };
1773
1774     // The initial step
1775     enum { START_E_STEP=cv::EM::START_E_STEP,
1776            START_M_STEP=cv::EM::START_M_STEP,
1777            START_AUTO_STEP=cv::EM::START_AUTO_STEP };
1778
1779     CV_WRAP CvEM();
1780     CvEM( const CvMat* samples, const CvMat* sampleIdx=0,
1781           CvEMParams params=CvEMParams(), CvMat* labels=0 );
1782
1783     virtual ~CvEM();
1784
1785     virtual bool train( const CvMat* samples, const CvMat* sampleIdx=0,
1786                         CvEMParams params=CvEMParams(), CvMat* labels=0 );
1787
1788     virtual float predict( const CvMat* sample, CV_OUT CvMat* probs ) const;
1789
1790 #ifndef SWIG
1791     CV_WRAP CvEM( const cv::Mat& samples, const cv::Mat& sampleIdx=cv::Mat(),
1792                   CvEMParams params=CvEMParams() );
1793
1794     CV_WRAP virtual bool train( const cv::Mat& samples,
1795                                 const cv::Mat& sampleIdx=cv::Mat(),
1796                                 CvEMParams params=CvEMParams(),
1797                                 CV_OUT cv::Mat* labels=0 );
1798
1799     CV_WRAP virtual float predict( const cv::Mat& sample, CV_OUT cv::Mat* probs=0 ) const;
1800     CV_WRAP virtual double calcLikelihood( const cv::Mat &sample ) const;
1801
1802     CV_WRAP int getNClusters() const;
1803     CV_WRAP cv::Mat getMeans() const;
1804     CV_WRAP void getCovs(CV_OUT std::vector<cv::Mat>& covs) const;
1805     CV_WRAP cv::Mat getWeights() const;
1806     CV_WRAP cv::Mat getProbs() const;
1807
1808     CV_WRAP inline double getLikelihood() const { return emObj.isTrained() ? logLikelihood : DBL_MAX; }
1809 #endif
1810
1811     CV_WRAP virtual void clear();
1812
1813     int get_nclusters() const;
1814     const CvMat* get_means() const;
1815     const CvMat** get_covs() const;
1816     const CvMat* get_weights() const;
1817     const CvMat* get_probs() const;
1818
1819     inline double get_log_likelihood() const { return getLikelihood(); }
1820
1821     virtual void read( CvFileStorage* fs, CvFileNode* node );
1822     virtual void write( CvFileStorage* fs, const char* name ) const;
1823
1824 protected:
1825     void set_mat_hdrs();
1826
1827     cv::EM emObj;
1828     cv::Mat probs;
1829     double logLikelihood;
1830
1831     CvMat meansHdr;
1832     std::vector<CvMat> covsHdrs;
1833     std::vector<CvMat*> covsPtrs;
1834     CvMat weightsHdr;
1835     CvMat probsHdr;
1836 };
1837
1838 namespace cv
1839 {
1840
1841 typedef CvEMParams EMParams;
1842 typedef CvEM ExpectationMaximization;
1843
1844 /*!
1845  The Patch Generator class
1846  */
1847 class CV_EXPORTS PatchGenerator
1848 {
1849 public:
1850     PatchGenerator();
1851     PatchGenerator(double _backgroundMin, double _backgroundMax,
1852                    double _noiseRange, bool _randomBlur=true,
1853                    double _lambdaMin=0.6, double _lambdaMax=1.5,
1854                    double _thetaMin=-CV_PI, double _thetaMax=CV_PI,
1855                    double _phiMin=-CV_PI, double _phiMax=CV_PI );
1856     void operator()(const Mat& image, Point2f pt, Mat& patch, Size patchSize, RNG& rng) const;
1857     void operator()(const Mat& image, const Mat& transform, Mat& patch,
1858                     Size patchSize, RNG& rng) const;
1859     void warpWholeImage(const Mat& image, Mat& matT, Mat& buf,
1860                         CV_OUT Mat& warped, int border, RNG& rng) const;
1861     void generateRandomTransform(Point2f srcCenter, Point2f dstCenter,
1862                                  CV_OUT Mat& transform, RNG& rng,
1863                                  bool inverse=false) const;
1864     void setAffineParam(double lambda, double theta, double phi);
1865
1866     double backgroundMin, backgroundMax;
1867     double noiseRange;
1868     bool randomBlur;
1869     double lambdaMin, lambdaMax;
1870     double thetaMin, thetaMax;
1871     double phiMin, phiMax;
1872 };
1873
1874
1875 class CV_EXPORTS LDetector
1876 {
1877 public:
1878     LDetector();
1879     LDetector(int _radius, int _threshold, int _nOctaves,
1880               int _nViews, double _baseFeatureSize, double _clusteringDistance);
1881     void operator()(const Mat& image,
1882                     CV_OUT vector<KeyPoint>& keypoints,
1883                     int maxCount=0, bool scaleCoords=true) const;
1884     void operator()(const vector<Mat>& pyr,
1885                     CV_OUT vector<KeyPoint>& keypoints,
1886                     int maxCount=0, bool scaleCoords=true) const;
1887     void getMostStable2D(const Mat& image, CV_OUT vector<KeyPoint>& keypoints,
1888                          int maxCount, const PatchGenerator& patchGenerator) const;
1889     void setVerbose(bool verbose);
1890
1891     void read(const FileNode& node);
1892     void write(FileStorage& fs, const String& name=String()) const;
1893
1894     int radius;
1895     int threshold;
1896     int nOctaves;
1897     int nViews;
1898     bool verbose;
1899
1900     double baseFeatureSize;
1901     double clusteringDistance;
1902 };
1903
1904 typedef LDetector YAPE;
1905
1906 class CV_EXPORTS FernClassifier
1907 {
1908 public:
1909     FernClassifier();
1910     FernClassifier(const FileNode& node);
1911     FernClassifier(const vector<vector<Point2f> >& points,
1912                    const vector<Mat>& refimgs,
1913                    const vector<vector<int> >& labels=vector<vector<int> >(),
1914                    int _nclasses=0, int _patchSize=PATCH_SIZE,
1915                    int _signatureSize=DEFAULT_SIGNATURE_SIZE,
1916                    int _nstructs=DEFAULT_STRUCTS,
1917                    int _structSize=DEFAULT_STRUCT_SIZE,
1918                    int _nviews=DEFAULT_VIEWS,
1919                    int _compressionMethod=COMPRESSION_NONE,
1920                    const PatchGenerator& patchGenerator=PatchGenerator());
1921     virtual ~FernClassifier();
1922     virtual void read(const FileNode& n);
1923     virtual void write(FileStorage& fs, const String& name=String()) const;
1924     virtual void trainFromSingleView(const Mat& image,
1925                                      const vector<KeyPoint>& keypoints,
1926                                      int _patchSize=PATCH_SIZE,
1927                                      int _signatureSize=DEFAULT_SIGNATURE_SIZE,
1928                                      int _nstructs=DEFAULT_STRUCTS,
1929                                      int _structSize=DEFAULT_STRUCT_SIZE,
1930                                      int _nviews=DEFAULT_VIEWS,
1931                                      int _compressionMethod=COMPRESSION_NONE,
1932                                      const PatchGenerator& patchGenerator=PatchGenerator());
1933     virtual void train(const vector<vector<Point2f> >& points,
1934                        const vector<Mat>& refimgs,
1935                        const vector<vector<int> >& labels=vector<vector<int> >(),
1936                        int _nclasses=0, int _patchSize=PATCH_SIZE,
1937                        int _signatureSize=DEFAULT_SIGNATURE_SIZE,
1938                        int _nstructs=DEFAULT_STRUCTS,
1939                        int _structSize=DEFAULT_STRUCT_SIZE,
1940                        int _nviews=DEFAULT_VIEWS,
1941                        int _compressionMethod=COMPRESSION_NONE,
1942                        const PatchGenerator& patchGenerator=PatchGenerator());
1943     virtual int operator()(const Mat& img, Point2f kpt, vector<float>& signature) const;
1944     virtual int operator()(const Mat& patch, vector<float>& signature) const;
1945     virtual void clear();
1946     virtual bool empty() const;
1947     void setVerbose(bool verbose);
1948
1949     int getClassCount() const;
1950     int getStructCount() const;
1951     int getStructSize() const;
1952     int getSignatureSize() const;
1953     int getCompressionMethod() const;
1954     Size getPatchSize() const;
1955
1956     struct Feature
1957     {
1958         uchar x1, y1, x2, y2;
1959         Feature() : x1(0), y1(0), x2(0), y2(0) {}
1960         Feature(int _x1, int _y1, int _x2, int _y2)
1961         : x1((uchar)_x1), y1((uchar)_y1), x2((uchar)_x2), y2((uchar)_y2)
1962         {}
1963         template<typename _Tp> bool operator ()(const Mat_<_Tp>& patch) const
1964         { return patch(y1,x1) > patch(y2, x2); }
1965     };
1966
1967     enum
1968     {
1969         PATCH_SIZE = 31,
1970         DEFAULT_STRUCTS = 50,
1971         DEFAULT_STRUCT_SIZE = 9,
1972         DEFAULT_VIEWS = 5000,
1973         DEFAULT_SIGNATURE_SIZE = 176,
1974         COMPRESSION_NONE = 0,
1975         COMPRESSION_RANDOM_PROJ = 1,
1976         COMPRESSION_PCA = 2,
1977         DEFAULT_COMPRESSION_METHOD = COMPRESSION_NONE
1978     };
1979
1980 protected:
1981     virtual void prepare(int _nclasses, int _patchSize, int _signatureSize,
1982                          int _nstructs, int _structSize,
1983                          int _nviews, int _compressionMethod);
1984     virtual void finalize(RNG& rng);
1985     virtual int getLeaf(int fidx, const Mat& patch) const;
1986
1987     bool verbose;
1988     int nstructs;
1989     int structSize;
1990     int nclasses;
1991     int signatureSize;
1992     int compressionMethod;
1993     int leavesPerStruct;
1994     Size patchSize;
1995     vector<Feature> features;
1996     vector<int> classCounters;
1997     vector<float> posteriors;
1998 };
1999
2000
2001 /****************************************************************************************\
2002  *                                 Calonder Classifier                                    *
2003  \****************************************************************************************/
2004
2005 struct RTreeNode;
2006
2007 struct CV_EXPORTS BaseKeypoint
2008 {
2009     int x;
2010     int y;
2011     IplImage* image;
2012
2013     BaseKeypoint()
2014     : x(0), y(0), image(NULL)
2015     {}
2016
2017     BaseKeypoint(int _x, int _y, IplImage* _image)
2018     : x(_x), y(_y), image(_image)
2019     {}
2020 };
2021
2022 class CV_EXPORTS RandomizedTree
2023 {
2024 public:
2025     friend class RTreeClassifier;
2026
2027     static const uchar PATCH_SIZE = 32;
2028     static const int DEFAULT_DEPTH = 9;
2029     static const int DEFAULT_VIEWS = 5000;
2030     static const size_t DEFAULT_REDUCED_NUM_DIM = 176;
2031     static float GET_LOWER_QUANT_PERC() { return .03f; }
2032     static float GET_UPPER_QUANT_PERC() { return .92f; }
2033
2034     RandomizedTree();
2035     ~RandomizedTree();
2036
2037     void train(vector<BaseKeypoint> const& base_set, RNG &rng,
2038                int depth, int views, size_t reduced_num_dim, int num_quant_bits);
2039     void train(vector<BaseKeypoint> const& base_set, RNG &rng,
2040                PatchGenerator &make_patch, int depth, int views, size_t reduced_num_dim,
2041                int num_quant_bits);
2042
2043     // following two funcs are EXPERIMENTAL (do not use unless you know exactly what you do)
2044     static void quantizeVector(float *vec, int dim, int N, float bnds[2], int clamp_mode=0);
2045     static void quantizeVector(float *src, int dim, int N, float bnds[2], uchar *dst);
2046
2047     // patch_data must be a 32x32 array (no row padding)
2048     float* getPosterior(uchar* patch_data);
2049     const float* getPosterior(uchar* patch_data) const;
2050     uchar* getPosterior2(uchar* patch_data);
2051     const uchar* getPosterior2(uchar* patch_data) const;
2052
2053     void read(const char* file_name, int num_quant_bits);
2054     void read(std::istream &is, int num_quant_bits);
2055     void write(const char* file_name) const;
2056     void write(std::ostream &os) const;
2057
2058     int classes() { return classes_; }
2059     int depth() { return depth_; }
2060
2061     //void setKeepFloatPosteriors(bool b) { keep_float_posteriors_ = b; }
2062     void discardFloatPosteriors() { freePosteriors(1); }
2063
2064     inline void applyQuantization(int num_quant_bits) { makePosteriors2(num_quant_bits); }
2065
2066     // debug
2067     void savePosteriors(std::string url, bool append=false);
2068     void savePosteriors2(std::string url, bool append=false);
2069
2070 private:
2071     int classes_;
2072     int depth_;
2073     int num_leaves_;
2074     vector<RTreeNode> nodes_;
2075     float **posteriors_;        // 16-bytes aligned posteriors
2076     uchar **posteriors2_;     // 16-bytes aligned posteriors
2077     vector<int> leaf_counts_;
2078
2079     void createNodes(int num_nodes, RNG &rng);
2080     void allocPosteriorsAligned(int num_leaves, int num_classes);
2081     void freePosteriors(int which);    // which: 1=posteriors_, 2=posteriors2_, 3=both
2082     void init(int classes, int depth, RNG &rng);
2083     void addExample(int class_id, uchar* patch_data);
2084     void finalize(size_t reduced_num_dim, int num_quant_bits);
2085     int getIndex(uchar* patch_data) const;
2086     inline float* getPosteriorByIndex(int index);
2087     inline const float* getPosteriorByIndex(int index) const;
2088     inline uchar* getPosteriorByIndex2(int index);
2089     inline const uchar* getPosteriorByIndex2(int index) const;
2090     //void makeRandomMeasMatrix(float *cs_phi, PHI_DISTR_TYPE dt, size_t reduced_num_dim);
2091     void convertPosteriorsToChar();
2092     void makePosteriors2(int num_quant_bits);
2093     void compressLeaves(size_t reduced_num_dim);
2094     void estimateQuantPercForPosteriors(float perc[2]);
2095 };
2096
2097
2098 inline uchar* getData(IplImage* image)
2099 {
2100     return reinterpret_cast<uchar*>(image->imageData);
2101 }
2102
2103 inline float* RandomizedTree::getPosteriorByIndex(int index)
2104 {
2105     return const_cast<float*>(const_cast<const RandomizedTree*>(this)->getPosteriorByIndex(index));
2106 }
2107
2108 inline const float* RandomizedTree::getPosteriorByIndex(int index) const
2109 {
2110     return posteriors_[index];
2111 }
2112
2113 inline uchar* RandomizedTree::getPosteriorByIndex2(int index)
2114 {
2115     return const_cast<uchar*>(const_cast<const RandomizedTree*>(this)->getPosteriorByIndex2(index));
2116 }
2117
2118 inline const uchar* RandomizedTree::getPosteriorByIndex2(int index) const
2119 {
2120     return posteriors2_[index];
2121 }
2122
2123 struct CV_EXPORTS RTreeNode
2124 {
2125     short offset1, offset2;
2126
2127     RTreeNode() {}
2128     RTreeNode(uchar x1, uchar y1, uchar x2, uchar y2)
2129     : offset1(y1*RandomizedTree::PATCH_SIZE + x1),
2130     offset2(y2*RandomizedTree::PATCH_SIZE + x2)
2131     {}
2132
2133     //! Left child on 0, right child on 1
2134     inline bool operator() (uchar* patch_data) const
2135     {
2136         return patch_data[offset1] > patch_data[offset2];
2137     }
2138 };
2139
2140 class CV_EXPORTS RTreeClassifier
2141 {
2142 public:
2143     static const int DEFAULT_TREES = 48;
2144     static const size_t DEFAULT_NUM_QUANT_BITS = 4;
2145
2146     RTreeClassifier();
2147     void train(vector<BaseKeypoint> const& base_set,
2148                RNG &rng,
2149                int num_trees = RTreeClassifier::DEFAULT_TREES,
2150                int depth = RandomizedTree::DEFAULT_DEPTH,
2151                int views = RandomizedTree::DEFAULT_VIEWS,
2152                size_t reduced_num_dim = RandomizedTree::DEFAULT_REDUCED_NUM_DIM,
2153                int num_quant_bits = DEFAULT_NUM_QUANT_BITS);
2154     void train(vector<BaseKeypoint> const& base_set,
2155                RNG &rng,
2156                PatchGenerator &make_patch,
2157                int num_trees = RTreeClassifier::DEFAULT_TREES,
2158                int depth = RandomizedTree::DEFAULT_DEPTH,
2159                int views = RandomizedTree::DEFAULT_VIEWS,
2160                size_t reduced_num_dim = RandomizedTree::DEFAULT_REDUCED_NUM_DIM,
2161                int num_quant_bits = DEFAULT_NUM_QUANT_BITS);
2162
2163     // sig must point to a memory block of at least classes()*sizeof(float|uchar) bytes
2164     void getSignature(IplImage *patch, uchar *sig) const;
2165     void getSignature(IplImage *patch, float *sig) const;
2166     void getSparseSignature(IplImage *patch, float *sig, float thresh) const;
2167     // TODO: deprecated in favor of getSignature overload, remove
2168     void getFloatSignature(IplImage *patch, float *sig) const { getSignature(patch, sig); }
2169
2170     static int countNonZeroElements(float *vec, int n, double tol=1e-10);
2171     static inline void safeSignatureAlloc(uchar **sig, int num_sig=1, int sig_len=176);
2172     static inline uchar* safeSignatureAlloc(int num_sig=1, int sig_len=176);
2173
2174     inline int classes() const { return classes_; }
2175     inline int original_num_classes() const { return original_num_classes_; }
2176
2177     void setQuantization(int num_quant_bits);
2178     void discardFloatPosteriors();
2179
2180     void read(const char* file_name);
2181     void read(std::istream &is);
2182     void write(const char* file_name) const;
2183     void write(std::ostream &os) const;
2184
2185     // experimental and debug
2186     void saveAllFloatPosteriors(std::string file_url);
2187     void saveAllBytePosteriors(std::string file_url);
2188     void setFloatPosteriorsFromTextfile_176(std::string url);
2189     float countZeroElements();
2190
2191     vector<RandomizedTree> trees_;
2192
2193 private:
2194     int classes_;
2195     int num_quant_bits_;
2196     mutable uchar **posteriors_;
2197     mutable unsigned short *ptemp_;
2198     int original_num_classes_;
2199     bool keep_floats_;
2200 };
2201
2202 /****************************************************************************************\
2203 *                                     One-Way Descriptor                                 *
2204 \****************************************************************************************/
2205
2206 // CvAffinePose: defines a parameterized affine transformation of an image patch.
2207 // An image patch is rotated on angle phi (in degrees), then scaled lambda1 times
2208 // along horizontal and lambda2 times along vertical direction, and then rotated again
2209 // on angle (theta - phi).
2210 class CV_EXPORTS CvAffinePose
2211 {
2212 public:
2213     float phi;
2214     float theta;
2215     float lambda1;
2216     float lambda2;
2217 };
2218
2219 class CV_EXPORTS OneWayDescriptor
2220 {
2221 public:
2222     OneWayDescriptor();
2223     ~OneWayDescriptor();
2224
2225     // allocates memory for given descriptor parameters
2226     void Allocate(int pose_count, CvSize size, int nChannels);
2227
2228     // GenerateSamples: generates affine transformed patches with averaging them over small transformation variations.
2229     // If external poses and transforms were specified, uses them instead of generating random ones
2230     // - pose_count: the number of poses to be generated
2231     // - frontal: the input patch (can be a roi in a larger image)
2232     // - norm: if nonzero, normalizes the output patch so that the sum of pixel intensities is 1
2233     void GenerateSamples(int pose_count, IplImage* frontal, int norm = 0);
2234
2235     // GenerateSamplesFast: generates affine transformed patches with averaging them over small transformation variations.
2236     // Uses precalculated transformed pca components.
2237     // - frontal: the input patch (can be a roi in a larger image)
2238     // - pca_hr_avg: pca average vector
2239     // - pca_hr_eigenvectors: pca eigenvectors
2240     // - pca_descriptors: an array of precomputed descriptors of pca components containing their affine transformations
2241     //   pca_descriptors[0] corresponds to the average, pca_descriptors[1]-pca_descriptors[pca_dim] correspond to eigenvectors
2242     void GenerateSamplesFast(IplImage* frontal, CvMat* pca_hr_avg,
2243                              CvMat* pca_hr_eigenvectors, OneWayDescriptor* pca_descriptors);
2244
2245     // sets the poses and corresponding transforms
2246     void SetTransforms(CvAffinePose* poses, CvMat** transforms);
2247
2248     // Initialize: builds a descriptor.
2249     // - pose_count: the number of poses to build. If poses were set externally, uses them rather than generating random ones
2250     // - frontal: input patch. Can be a roi in a larger image
2251     // - feature_name: the feature name to be associated with the descriptor
2252     // - norm: if 1, the affine transformed patches are normalized so that their sum is 1
2253     void Initialize(int pose_count, IplImage* frontal, const char* feature_name = 0, int norm = 0);
2254
2255     // InitializeFast: builds a descriptor using precomputed descriptors of pca components
2256     // - pose_count: the number of poses to build
2257     // - frontal: input patch. Can be a roi in a larger image
2258     // - feature_name: the feature name to be associated with the descriptor
2259     // - pca_hr_avg: average vector for PCA
2260     // - pca_hr_eigenvectors: PCA eigenvectors (one vector per row)
2261     // - pca_descriptors: precomputed descriptors of PCA components, the first descriptor for the average vector
2262     // followed by the descriptors for eigenvectors
2263     void InitializeFast(int pose_count, IplImage* frontal, const char* feature_name,
2264                         CvMat* pca_hr_avg, CvMat* pca_hr_eigenvectors, OneWayDescriptor* pca_descriptors);
2265
2266     // ProjectPCASample: unwarps an image patch into a vector and projects it into PCA space
2267     // - patch: input image patch
2268     // - avg: PCA average vector
2269     // - eigenvectors: PCA eigenvectors, one per row
2270     // - pca_coeffs: output PCA coefficients
2271     void ProjectPCASample(IplImage* patch, CvMat* avg, CvMat* eigenvectors, CvMat* pca_coeffs) const;
2272
2273     // InitializePCACoeffs: projects all warped patches into PCA space
2274     // - avg: PCA average vector
2275     // - eigenvectors: PCA eigenvectors, one per row
2276     void InitializePCACoeffs(CvMat* avg, CvMat* eigenvectors);
2277
2278     // EstimatePose: finds the closest match between an input patch and a set of patches with different poses
2279     // - patch: input image patch
2280     // - pose_idx: the output index of the closest pose
2281     // - distance: the distance to the closest pose (L2 distance)
2282     void EstimatePose(IplImage* patch, int& pose_idx, float& distance) const;
2283
2284     // EstimatePosePCA: finds the closest match between an input patch and a set of patches with different poses.
2285     // The distance between patches is computed in PCA space
2286     // - patch: input image patch
2287     // - pose_idx: the output index of the closest pose
2288     // - distance: distance to the closest pose (L2 distance in PCA space)
2289     // - avg: PCA average vector. If 0, matching without PCA is used
2290     // - eigenvectors: PCA eigenvectors, one per row
2291     void EstimatePosePCA(CvArr* patch, int& pose_idx, float& distance, CvMat* avg, CvMat* eigenvalues) const;
2292
2293     // GetPatchSize: returns the size of each image patch after warping (2 times smaller than the input patch)
2294     CvSize GetPatchSize() const
2295     {
2296         return m_patch_size;
2297     }
2298
2299     // GetInputPatchSize: returns the required size of the patch that the descriptor is built from
2300     // (2 time larger than the patch after warping)
2301     CvSize GetInputPatchSize() const
2302     {
2303         return cvSize(m_patch_size.width*2, m_patch_size.height*2);
2304     }
2305
2306     // GetPatch: returns a patch corresponding to specified pose index
2307     // - index: pose index
2308     // - return value: the patch corresponding to specified pose index
2309     IplImage* GetPatch(int index);
2310
2311     // GetPose: returns a pose corresponding to specified pose index
2312     // - index: pose index
2313     // - return value: the pose corresponding to specified pose index
2314     CvAffinePose GetPose(int index) const;
2315
2316     // Save: saves all patches with different poses to a specified path
2317     void Save(const char* path);
2318
2319     // ReadByName: reads a descriptor from a file storage
2320     // - fs: file storage
2321     // - parent: parent node
2322     // - name: node name
2323     // - return value: 1 if succeeded, 0 otherwise
2324     int ReadByName(CvFileStorage* fs, CvFileNode* parent, const char* name);
2325
2326     // ReadByName: reads a descriptor from a file node
2327     // - parent: parent node
2328     // - name: node name
2329     // - return value: 1 if succeeded, 0 otherwise
2330     int ReadByName(const FileNode &parent, const char* name);
2331
2332     // Write: writes a descriptor into a file storage
2333     // - fs: file storage
2334     // - name: node name
2335     void Write(CvFileStorage* fs, const char* name);
2336
2337     // GetFeatureName: returns a name corresponding to a feature
2338     const char* GetFeatureName() const;
2339
2340     // GetCenter: returns the center of the feature
2341     CvPoint GetCenter() const;
2342
2343     void SetPCADimHigh(int pca_dim_high) {m_pca_dim_high = pca_dim_high;};
2344     void SetPCADimLow(int pca_dim_low) {m_pca_dim_low = pca_dim_low;};
2345
2346     int GetPCADimLow() const;
2347     int GetPCADimHigh() const;
2348
2349     CvMat** GetPCACoeffs() const {return m_pca_coeffs;}
2350
2351 protected:
2352     int m_pose_count; // the number of poses
2353     CvSize m_patch_size; // size of each image
2354     IplImage** m_samples; // an array of length m_pose_count containing the patch in different poses
2355     IplImage* m_input_patch;
2356     IplImage* m_train_patch;
2357     CvMat** m_pca_coeffs; // an array of length m_pose_count containing pca decomposition of the patch in different poses
2358     CvAffinePose* m_affine_poses; // an array of poses
2359     CvMat** m_transforms; // an array of affine transforms corresponding to poses
2360
2361     string m_feature_name; // the name of the feature associated with the descriptor
2362     CvPoint m_center; // the coordinates of the feature (the center of the input image ROI)
2363
2364     int m_pca_dim_high; // the number of descriptor pca components to use for generating affine poses
2365     int m_pca_dim_low; // the number of pca components to use for comparison
2366 };
2367
2368
2369 // OneWayDescriptorBase: encapsulates functionality for training/loading a set of one way descriptors
2370 // and finding the nearest closest descriptor to an input feature
2371 class CV_EXPORTS OneWayDescriptorBase
2372 {
2373 public:
2374
2375     // creates an instance of OneWayDescriptor from a set of training files
2376     // - patch_size: size of the input (large) patch
2377     // - pose_count: the number of poses to generate for each descriptor
2378     // - train_path: path to training files
2379     // - pca_config: the name of the file that contains PCA for small patches (2 times smaller
2380     // than patch_size each dimension
2381     // - pca_hr_config: the name of the file that contains PCA for large patches (of patch_size size)
2382     // - pca_desc_config: the name of the file that contains descriptors of PCA components
2383     OneWayDescriptorBase(CvSize patch_size, int pose_count, const char* train_path = 0, const char* pca_config = 0,
2384                          const char* pca_hr_config = 0, const char* pca_desc_config = 0, int pyr_levels = 1,
2385                          int pca_dim_high = 100, int pca_dim_low = 100);
2386
2387     OneWayDescriptorBase(CvSize patch_size, int pose_count, const string &pca_filename, const string &train_path = string(), const string &images_list = string(),
2388                          float _scale_min = 0.7f, float _scale_max=1.5f, float _scale_step=1.2f, int pyr_levels = 1,
2389                          int pca_dim_high = 100, int pca_dim_low = 100);
2390
2391
2392     virtual ~OneWayDescriptorBase();
2393     void clear ();
2394
2395
2396     // Allocate: allocates memory for a given number of descriptors
2397     void Allocate(int train_feature_count);
2398
2399     // AllocatePCADescriptors: allocates memory for pca descriptors
2400     void AllocatePCADescriptors();
2401
2402     // returns patch size
2403     CvSize GetPatchSize() const {return m_patch_size;};
2404     // returns the number of poses for each descriptor
2405     int GetPoseCount() const {return m_pose_count;};
2406
2407     // returns the number of pyramid levels
2408     int GetPyrLevels() const {return m_pyr_levels;};
2409
2410     // returns the number of descriptors
2411     int GetDescriptorCount() const {return m_train_feature_count;};
2412
2413     // CreateDescriptorsFromImage: creates descriptors for each of the input features
2414     // - src: input image
2415     // - features: input features
2416     // - pyr_levels: the number of pyramid levels
2417     void CreateDescriptorsFromImage(IplImage* src, const vector<KeyPoint>& features);
2418
2419     // CreatePCADescriptors: generates descriptors for PCA components, needed for fast generation of feature descriptors
2420     void CreatePCADescriptors();
2421
2422     // returns a feature descriptor by feature index
2423     const OneWayDescriptor* GetDescriptor(int desc_idx) const {return &m_descriptors[desc_idx];};
2424
2425     // FindDescriptor: finds the closest descriptor
2426     // - patch: input image patch
2427     // - desc_idx: output index of the closest descriptor to the input patch
2428     // - pose_idx: output index of the closest pose of the closest descriptor to the input patch
2429     // - distance: distance from the input patch to the closest feature pose
2430     // - _scales: scales of the input patch for each descriptor
2431     // - scale_ranges: input scales variation (float[2])
2432     void FindDescriptor(IplImage* patch, int& desc_idx, int& pose_idx, float& distance, float* _scale = 0, float* scale_ranges = 0) const;
2433
2434     // - patch: input image patch
2435     // - n: number of the closest indexes
2436     // - desc_idxs: output indexes of the closest descriptor to the input patch (n)
2437     // - pose_idx: output indexes of the closest pose of the closest descriptor to the input patch (n)
2438     // - distances: distance from the input patch to the closest feature pose (n)
2439     // - _scales: scales of the input patch
2440     // - scale_ranges: input scales variation (float[2])
2441     void FindDescriptor(IplImage* patch, int n, vector<int>& desc_idxs, vector<int>& pose_idxs,
2442                         vector<float>& distances, vector<float>& _scales, float* scale_ranges = 0) const;
2443
2444     // FindDescriptor: finds the closest descriptor
2445     // - src: input image
2446     // - pt: center of the feature
2447     // - desc_idx: output index of the closest descriptor to the input patch
2448     // - pose_idx: output index of the closest pose of the closest descriptor to the input patch
2449     // - distance: distance from the input patch to the closest feature pose
2450     void FindDescriptor(IplImage* src, cv::Point2f pt, int& desc_idx, int& pose_idx, float& distance) const;
2451
2452     // InitializePoses: generates random poses
2453     void InitializePoses();
2454
2455     // InitializeTransformsFromPoses: generates 2x3 affine matrices from poses (initializes m_transforms)
2456     void InitializeTransformsFromPoses();
2457
2458     // InitializePoseTransforms: subsequently calls InitializePoses and InitializeTransformsFromPoses
2459     void InitializePoseTransforms();
2460
2461     // InitializeDescriptor: initializes a descriptor
2462     // - desc_idx: descriptor index
2463     // - train_image: image patch (ROI is supported)
2464     // - feature_label: feature textual label
2465     void InitializeDescriptor(int desc_idx, IplImage* train_image, const char* feature_label);
2466
2467     void InitializeDescriptor(int desc_idx, IplImage* train_image, const KeyPoint& keypoint, const char* feature_label);
2468
2469     // InitializeDescriptors: load features from an image and create descriptors for each of them
2470     void InitializeDescriptors(IplImage* train_image, const vector<KeyPoint>& features,
2471                                const char* feature_label = "", int desc_start_idx = 0);
2472
2473     // Write: writes this object to a file storage
2474     // - fs: output filestorage
2475     void Write (FileStorage &fs) const;
2476
2477     // Read: reads OneWayDescriptorBase object from a file node
2478     // - fn: input file node
2479     void Read (const FileNode &fn);
2480
2481     // LoadPCADescriptors: loads PCA descriptors from a file
2482     // - filename: input filename
2483     int LoadPCADescriptors(const char* filename);
2484
2485     // LoadPCADescriptors: loads PCA descriptors from a file node
2486     // - fn: input file node
2487     int LoadPCADescriptors(const FileNode &fn);
2488
2489     // SavePCADescriptors: saves PCA descriptors to a file
2490     // - filename: output filename
2491     void SavePCADescriptors(const char* filename);
2492
2493     // SavePCADescriptors: saves PCA descriptors to a file storage
2494     // - fs: output file storage
2495     void SavePCADescriptors(CvFileStorage* fs) const;
2496
2497     // GeneratePCA: calculate and save PCA components and descriptors
2498     // - img_path: path to training PCA images directory
2499     // - images_list: filename with filenames of training PCA images
2500     void GeneratePCA(const char* img_path, const char* images_list, int pose_count=500);
2501
2502     // SetPCAHigh: sets the high resolution pca matrices (copied to internal structures)
2503     void SetPCAHigh(CvMat* avg, CvMat* eigenvectors);
2504
2505     // SetPCALow: sets the low resolution pca matrices (copied to internal structures)
2506     void SetPCALow(CvMat* avg, CvMat* eigenvectors);
2507
2508     int GetLowPCA(CvMat** avg, CvMat** eigenvectors)
2509     {
2510         *avg = m_pca_avg;
2511         *eigenvectors = m_pca_eigenvectors;
2512         return m_pca_dim_low;
2513     };
2514
2515     int GetPCADimLow() const {return m_pca_dim_low;};
2516     int GetPCADimHigh() const {return m_pca_dim_high;};
2517
2518     void ConvertDescriptorsArrayToTree(); // Converting pca_descriptors array to KD tree
2519
2520     // GetPCAFilename: get default PCA filename
2521     static string GetPCAFilename () { return "pca.yml"; }
2522
2523     virtual bool empty() const { return m_train_feature_count <= 0 ? true : false; }
2524
2525 protected:
2526     CvSize m_patch_size; // patch size
2527     int m_pose_count; // the number of poses for each descriptor
2528     int m_train_feature_count; // the number of the training features
2529     OneWayDescriptor* m_descriptors; // array of train feature descriptors
2530     CvMat* m_pca_avg; // PCA average Vector for small patches
2531     CvMat* m_pca_eigenvectors; // PCA eigenvectors for small patches
2532     CvMat* m_pca_hr_avg; // PCA average Vector for large patches
2533     CvMat* m_pca_hr_eigenvectors; // PCA eigenvectors for large patches
2534     OneWayDescriptor* m_pca_descriptors; // an array of PCA descriptors
2535
2536     cv::flann::Index* m_pca_descriptors_tree;
2537     CvMat* m_pca_descriptors_matrix;
2538
2539     CvAffinePose* m_poses; // array of poses
2540     CvMat** m_transforms; // array of affine transformations corresponding to poses
2541
2542     int m_pca_dim_high;
2543     int m_pca_dim_low;
2544
2545     int m_pyr_levels;
2546     float scale_min;
2547     float scale_max;
2548     float scale_step;
2549
2550     // SavePCAall: saves PCA components and descriptors to a file storage
2551     // - fs: output file storage
2552     void SavePCAall (FileStorage &fs) const;
2553
2554     // LoadPCAall: loads PCA components and descriptors from a file node
2555     // - fn: input file node
2556     void LoadPCAall (const FileNode &fn);
2557 };
2558
2559 class CV_EXPORTS OneWayDescriptorObject : public OneWayDescriptorBase
2560 {
2561 public:
2562     // creates an instance of OneWayDescriptorObject from a set of training files
2563     // - patch_size: size of the input (large) patch
2564     // - pose_count: the number of poses to generate for each descriptor
2565     // - train_path: path to training files
2566     // - pca_config: the name of the file that contains PCA for small patches (2 times smaller
2567     // than patch_size each dimension
2568     // - pca_hr_config: the name of the file that contains PCA for large patches (of patch_size size)
2569     // - pca_desc_config: the name of the file that contains descriptors of PCA components
2570     OneWayDescriptorObject(CvSize patch_size, int pose_count, const char* train_path, const char* pca_config,
2571                            const char* pca_hr_config = 0, const char* pca_desc_config = 0, int pyr_levels = 1);
2572
2573     OneWayDescriptorObject(CvSize patch_size, int pose_count, const string &pca_filename,
2574                            const string &train_path = string (), const string &images_list = string (),
2575                            float _scale_min = 0.7f, float _scale_max=1.5f, float _scale_step=1.2f, int pyr_levels = 1);
2576
2577
2578     virtual ~OneWayDescriptorObject();
2579
2580     // Allocate: allocates memory for a given number of features
2581     // - train_feature_count: the total number of features
2582     // - object_feature_count: the number of features extracted from the object
2583     void Allocate(int train_feature_count, int object_feature_count);
2584
2585
2586     void SetLabeledFeatures(const vector<KeyPoint>& features) {m_train_features = features;};
2587     vector<KeyPoint>& GetLabeledFeatures() {return m_train_features;};
2588     const vector<KeyPoint>& GetLabeledFeatures() const {return m_train_features;};
2589     vector<KeyPoint> _GetLabeledFeatures() const;
2590
2591     // IsDescriptorObject: returns 1 if descriptor with specified index is positive, otherwise 0
2592     int IsDescriptorObject(int desc_idx) const;
2593
2594     // MatchPointToPart: returns the part number of a feature if it matches one of the object parts, otherwise -1
2595     int MatchPointToPart(CvPoint pt) const;
2596
2597     // GetDescriptorPart: returns the part number of the feature corresponding to a specified descriptor
2598     // - desc_idx: descriptor index
2599     int GetDescriptorPart(int desc_idx) const;
2600
2601
2602     void InitializeObjectDescriptors(IplImage* train_image, const vector<KeyPoint>& features,
2603                                      const char* feature_label, int desc_start_idx = 0, float scale = 1.0f,
2604                                      int is_background = 0);
2605
2606     // GetObjectFeatureCount: returns the number of object features
2607     int GetObjectFeatureCount() const {return m_object_feature_count;};
2608
2609 protected:
2610     int* m_part_id; // contains part id for each of object descriptors
2611     vector<KeyPoint> m_train_features; // train features
2612     int m_object_feature_count; // the number of the positive features
2613
2614 };
2615
2616
2617 /*
2618  *  OneWayDescriptorMatcher
2619  */
2620 class OneWayDescriptorMatcher;
2621 typedef OneWayDescriptorMatcher OneWayDescriptorMatch;
2622
2623 class CV_EXPORTS OneWayDescriptorMatcher : public GenericDescriptorMatcher
2624 {
2625 public:
2626     class CV_EXPORTS Params
2627     {
2628     public:
2629         static const int POSE_COUNT = 500;
2630         static const int PATCH_WIDTH = 24;
2631         static const int PATCH_HEIGHT = 24;
2632         static float GET_MIN_SCALE() { return 0.7f; }
2633         static float GET_MAX_SCALE() { return 1.5f; }
2634         static float GET_STEP_SCALE() { return 1.2f; }
2635
2636         Params( int poseCount = POSE_COUNT,
2637                Size patchSize = Size(PATCH_WIDTH, PATCH_HEIGHT),
2638                string pcaFilename = string(),
2639                string trainPath = string(), string trainImagesList = string(),
2640                float minScale = GET_MIN_SCALE(), float maxScale = GET_MAX_SCALE(),
2641                float stepScale = GET_STEP_SCALE() );
2642
2643         int poseCount;
2644         Size patchSize;
2645         string pcaFilename;
2646         string trainPath;
2647         string trainImagesList;
2648
2649         float minScale, maxScale, stepScale;
2650     };
2651
2652     OneWayDescriptorMatcher( const Params& params=Params() );
2653     virtual ~OneWayDescriptorMatcher();
2654
2655     void initialize( const Params& params, const Ptr<OneWayDescriptorBase>& base=Ptr<OneWayDescriptorBase>() );
2656
2657     // Clears keypoints storing in collection and OneWayDescriptorBase
2658     virtual void clear();
2659
2660     virtual void train();
2661
2662     virtual bool isMaskSupported();
2663
2664     virtual void read( const FileNode &fn );
2665     virtual void write( FileStorage& fs ) const;
2666
2667     virtual bool empty() const;
2668
2669     virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const;
2670
2671 protected:
2672     // Matches a set of keypoints from a single image of the training set. A rectangle with a center in a keypoint
2673     // and size (patch_width/2*scale, patch_height/2*scale) is cropped from the source image for each
2674     // keypoint. scale is iterated from DescriptorOneWayParams::min_scale to DescriptorOneWayParams::max_scale.
2675     // The minimum distance to each training patch with all its affine poses is found over all scales.
2676     // The class ID of a match is returned for each keypoint. The distance is calculated over PCA components
2677     // loaded with DescriptorOneWay::Initialize, kd tree is used for finding minimum distances.
2678     virtual void knnMatchImpl( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
2679                               vector<vector<DMatch> >& matches, int k,
2680                               const vector<Mat>& masks, bool compactResult );
2681     virtual void radiusMatchImpl( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
2682                                  vector<vector<DMatch> >& matches, float maxDistance,
2683                                  const vector<Mat>& masks, bool compactResult );
2684
2685     Ptr<OneWayDescriptorBase> base;
2686     Params params;
2687     int prevTrainCount;
2688 };
2689
2690 /*
2691  *  FernDescriptorMatcher
2692  */
2693 class FernDescriptorMatcher;
2694 typedef FernDescriptorMatcher FernDescriptorMatch;
2695
2696 class CV_EXPORTS FernDescriptorMatcher : public GenericDescriptorMatcher
2697 {
2698 public:
2699     class CV_EXPORTS Params
2700     {
2701     public:
2702         Params( int nclasses=0,
2703                int patchSize=FernClassifier::PATCH_SIZE,
2704                int signatureSize=FernClassifier::DEFAULT_SIGNATURE_SIZE,
2705                int nstructs=FernClassifier::DEFAULT_STRUCTS,
2706                int structSize=FernClassifier::DEFAULT_STRUCT_SIZE,
2707                int nviews=FernClassifier::DEFAULT_VIEWS,
2708                int compressionMethod=FernClassifier::COMPRESSION_NONE,
2709                const PatchGenerator& patchGenerator=PatchGenerator() );
2710
2711         Params( const string& filename );
2712
2713         int nclasses;
2714         int patchSize;
2715         int signatureSize;
2716         int nstructs;
2717         int structSize;
2718         int nviews;
2719         int compressionMethod;
2720         PatchGenerator patchGenerator;
2721
2722         string filename;
2723     };
2724
2725     FernDescriptorMatcher( const Params& params=Params() );
2726     virtual ~FernDescriptorMatcher();
2727
2728     virtual void clear();
2729
2730     virtual void train();
2731
2732     virtual bool isMaskSupported();
2733
2734     virtual void read( const FileNode &fn );
2735     virtual void write( FileStorage& fs ) const;
2736     virtual bool empty() const;
2737
2738     virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const;
2739
2740 protected:
2741     virtual void knnMatchImpl( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
2742                               vector<vector<DMatch> >& matches, int k,
2743                               const vector<Mat>& masks, bool compactResult );
2744     virtual void radiusMatchImpl( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
2745                                  vector<vector<DMatch> >& matches, float maxDistance,
2746                                  const vector<Mat>& masks, bool compactResult );
2747
2748     void trainFernClassifier();
2749     void calcBestProbAndMatchIdx( const Mat& image, const Point2f& pt,
2750                                  float& bestProb, int& bestMatchIdx, vector<float>& signature );
2751     Ptr<FernClassifier> classifier;
2752     Params params;
2753     int prevTrainCount;
2754 };
2755
2756
2757 /*
2758  * CalonderDescriptorExtractor
2759  */
2760 template<typename T>
2761 class CV_EXPORTS CalonderDescriptorExtractor : public DescriptorExtractor
2762 {
2763 public:
2764     CalonderDescriptorExtractor( const string& classifierFile );
2765
2766     virtual void read( const FileNode &fn );
2767     virtual void write( FileStorage &fs ) const;
2768
2769     virtual int descriptorSize() const { return classifier_.classes(); }
2770     virtual int descriptorType() const { return DataType<T>::type; }
2771
2772     virtual bool empty() const;
2773
2774 protected:
2775     virtual void computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors ) const;
2776
2777     RTreeClassifier classifier_;
2778     static const int BORDER_SIZE = 16;
2779 };
2780
2781 template<typename T>
2782 CalonderDescriptorExtractor<T>::CalonderDescriptorExtractor(const std::string& classifier_file)
2783 {
2784     classifier_.read( classifier_file.c_str() );
2785 }
2786
2787 template<typename T>
2788 void CalonderDescriptorExtractor<T>::computeImpl( const Mat& image,
2789                                                  vector<KeyPoint>& keypoints,
2790                                                  Mat& descriptors) const
2791 {
2792     // Cannot compute descriptors for keypoints on the image border.
2793     KeyPointsFilter::runByImageBorder(keypoints, image.size(), BORDER_SIZE);
2794
2795     /// @todo Check 16-byte aligned
2796     descriptors.create((int)keypoints.size(), classifier_.classes(), cv::DataType<T>::type);
2797
2798     int patchSize = RandomizedTree::PATCH_SIZE;
2799     int offset = patchSize / 2;
2800     for (size_t i = 0; i < keypoints.size(); ++i)
2801     {
2802         cv::Point2f pt = keypoints[i].pt;
2803         IplImage ipl = image( Rect((int)(pt.x - offset), (int)(pt.y - offset), patchSize, patchSize) );
2804         classifier_.getSignature( &ipl, descriptors.ptr<T>((int)i));
2805     }
2806 }
2807
2808 template<typename T>
2809 void CalonderDescriptorExtractor<T>::read( const FileNode& )
2810 {}
2811
2812 template<typename T>
2813 void CalonderDescriptorExtractor<T>::write( FileStorage& ) const
2814 {}
2815
2816 template<typename T>
2817 bool CalonderDescriptorExtractor<T>::empty() const
2818 {
2819     return classifier_.trees_.empty();
2820 }
2821
2822
2823 ////////////////////// Brute Force Matcher //////////////////////////
2824
2825 template<class Distance>
2826 class CV_EXPORTS BruteForceMatcher : public BFMatcher
2827 {
2828 public:
2829     BruteForceMatcher( Distance d = Distance() ) : BFMatcher(Distance::normType, false) {}
2830     virtual ~BruteForceMatcher() {}
2831 };
2832
2833
2834 /****************************************************************************************\
2835 *                                Planar Object Detection                                 *
2836 \****************************************************************************************/
2837
2838 class CV_EXPORTS PlanarObjectDetector
2839 {
2840 public:
2841     PlanarObjectDetector();
2842     PlanarObjectDetector(const FileNode& node);
2843     PlanarObjectDetector(const vector<Mat>& pyr, int _npoints=300,
2844                          int _patchSize=FernClassifier::PATCH_SIZE,
2845                          int _nstructs=FernClassifier::DEFAULT_STRUCTS,
2846                          int _structSize=FernClassifier::DEFAULT_STRUCT_SIZE,
2847                          int _nviews=FernClassifier::DEFAULT_VIEWS,
2848                          const LDetector& detector=LDetector(),
2849                          const PatchGenerator& patchGenerator=PatchGenerator());
2850     virtual ~PlanarObjectDetector();
2851     virtual void train(const vector<Mat>& pyr, int _npoints=300,
2852                        int _patchSize=FernClassifier::PATCH_SIZE,
2853                        int _nstructs=FernClassifier::DEFAULT_STRUCTS,
2854                        int _structSize=FernClassifier::DEFAULT_STRUCT_SIZE,
2855                        int _nviews=FernClassifier::DEFAULT_VIEWS,
2856                        const LDetector& detector=LDetector(),
2857                        const PatchGenerator& patchGenerator=PatchGenerator());
2858     virtual void train(const vector<Mat>& pyr, const vector<KeyPoint>& keypoints,
2859                        int _patchSize=FernClassifier::PATCH_SIZE,
2860                        int _nstructs=FernClassifier::DEFAULT_STRUCTS,
2861                        int _structSize=FernClassifier::DEFAULT_STRUCT_SIZE,
2862                        int _nviews=FernClassifier::DEFAULT_VIEWS,
2863                        const LDetector& detector=LDetector(),
2864                        const PatchGenerator& patchGenerator=PatchGenerator());
2865     Rect getModelROI() const;
2866     vector<KeyPoint> getModelPoints() const;
2867     const LDetector& getDetector() const;
2868     const FernClassifier& getClassifier() const;
2869     void setVerbose(bool verbose);
2870
2871     void read(const FileNode& node);
2872     void write(FileStorage& fs, const String& name=String()) const;
2873     bool operator()(const Mat& image, CV_OUT Mat& H, CV_OUT vector<Point2f>& corners) const;
2874     bool operator()(const vector<Mat>& pyr, const vector<KeyPoint>& keypoints,
2875                     CV_OUT Mat& H, CV_OUT vector<Point2f>& corners,
2876                     CV_OUT vector<int>* pairs=0) const;
2877
2878 protected:
2879     bool verbose;
2880     Rect modelROI;
2881     vector<KeyPoint> modelPoints;
2882     LDetector ldetector;
2883     FernClassifier fernClassifier;
2884 };
2885
2886 }
2887
2888 // 2009-01-12, Xavier Delacour <xavier.delacour@gmail.com>
2889
2890 struct lsh_hash {
2891     int h1, h2;
2892 };
2893
2894 struct CvLSHOperations
2895 {
2896     virtual ~CvLSHOperations() {}
2897
2898     virtual int vector_add(const void* data) = 0;
2899     virtual void vector_remove(int i) = 0;
2900     virtual const void* vector_lookup(int i) = 0;
2901     virtual void vector_reserve(int n) = 0;
2902     virtual unsigned int vector_count() = 0;
2903
2904     virtual void hash_insert(lsh_hash h, int l, int i) = 0;
2905     virtual void hash_remove(lsh_hash h, int l, int i) = 0;
2906     virtual int hash_lookup(lsh_hash h, int l, int* ret_i, int ret_i_max) = 0;
2907 };
2908
2909 #endif
2910
2911 #ifdef __cplusplus
2912 extern "C" {
2913 #endif
2914
2915 /* Splits color or grayscale image into multiple connected components
2916  of nearly the same color/brightness using modification of Burt algorithm.
2917  comp with contain a pointer to sequence (CvSeq)
2918  of connected components (CvConnectedComp) */
2919 CVAPI(void) cvPyrSegmentation( IplImage* src, IplImage* dst,
2920                               CvMemStorage* storage, CvSeq** comp,
2921                               int level, double threshold1,
2922                               double threshold2 );
2923
2924 /****************************************************************************************\
2925 *                              Planar subdivisions                                       *
2926 \****************************************************************************************/
2927
2928 typedef size_t CvSubdiv2DEdge;
2929
2930 #define CV_QUADEDGE2D_FIELDS()     \
2931     int flags;                     \
2932     struct CvSubdiv2DPoint* pt[4]; \
2933     CvSubdiv2DEdge  next[4];
2934
2935 #define CV_SUBDIV2D_POINT_FIELDS()\
2936     int            flags;      \
2937     CvSubdiv2DEdge first;      \
2938     CvPoint2D32f   pt;         \
2939     int id;
2940
2941 #define CV_SUBDIV2D_VIRTUAL_POINT_FLAG (1 << 30)
2942
2943 typedef struct CvQuadEdge2D
2944 {
2945     CV_QUADEDGE2D_FIELDS()
2946 }
2947 CvQuadEdge2D;
2948
2949 typedef struct CvSubdiv2DPoint
2950 {
2951     CV_SUBDIV2D_POINT_FIELDS()
2952 }
2953 CvSubdiv2DPoint;
2954
2955 #define CV_SUBDIV2D_FIELDS()    \
2956     CV_GRAPH_FIELDS()           \
2957     int  quad_edges;            \
2958     int  is_geometry_valid;     \
2959     CvSubdiv2DEdge recent_edge; \
2960     CvPoint2D32f  topleft;      \
2961     CvPoint2D32f  bottomright;
2962
2963 typedef struct CvSubdiv2D
2964 {
2965     CV_SUBDIV2D_FIELDS()
2966 }
2967 CvSubdiv2D;
2968
2969 typedef enum CvSubdiv2DPointLocation
2970 {
2971     CV_PTLOC_ERROR = -2,
2972     CV_PTLOC_OUTSIDE_RECT = -1,
2973     CV_PTLOC_INSIDE = 0,
2974     CV_PTLOC_VERTEX = 1,
2975     CV_PTLOC_ON_EDGE = 2
2976 }
2977 CvSubdiv2DPointLocation;
2978
2979 typedef enum CvNextEdgeType
2980 {
2981     CV_NEXT_AROUND_ORG   = 0x00,
2982     CV_NEXT_AROUND_DST   = 0x22,
2983     CV_PREV_AROUND_ORG   = 0x11,
2984     CV_PREV_AROUND_DST   = 0x33,
2985     CV_NEXT_AROUND_LEFT  = 0x13,
2986     CV_NEXT_AROUND_RIGHT = 0x31,
2987     CV_PREV_AROUND_LEFT  = 0x20,
2988     CV_PREV_AROUND_RIGHT = 0x02
2989 }
2990 CvNextEdgeType;
2991
2992 /* get the next edge with the same origin point (counterwise) */
2993 #define  CV_SUBDIV2D_NEXT_EDGE( edge )  (((CvQuadEdge2D*)((edge) & ~3))->next[(edge)&3])
2994
2995
2996 /* Initializes Delaunay triangulation */
2997 CVAPI(void)  cvInitSubdivDelaunay2D( CvSubdiv2D* subdiv, CvRect rect );
2998
2999 /* Creates new subdivision */
3000 CVAPI(CvSubdiv2D*)  cvCreateSubdiv2D( int subdiv_type, int header_size,
3001                                      int vtx_size, int quadedge_size,
3002                                      CvMemStorage* storage );
3003
3004 /************************* high-level subdivision functions ***************************/
3005
3006 /* Simplified Delaunay diagram creation */
3007 CV_INLINE  CvSubdiv2D* cvCreateSubdivDelaunay2D( CvRect rect, CvMemStorage* storage )
3008 {
3009     CvSubdiv2D* subdiv = cvCreateSubdiv2D( CV_SEQ_KIND_SUBDIV2D, sizeof(*subdiv),
3010                                           sizeof(CvSubdiv2DPoint), sizeof(CvQuadEdge2D), storage );
3011
3012     cvInitSubdivDelaunay2D( subdiv, rect );
3013     return subdiv;
3014 }
3015
3016
3017 /* Inserts new point to the Delaunay triangulation */
3018 CVAPI(CvSubdiv2DPoint*)  cvSubdivDelaunay2DInsert( CvSubdiv2D* subdiv, CvPoint2D32f pt);
3019
3020 /* Locates a point within the Delaunay triangulation (finds the edge
3021  the point is left to or belongs to, or the triangulation point the given
3022  point coinsides with */
3023 CVAPI(CvSubdiv2DPointLocation)  cvSubdiv2DLocate(
3024                                                  CvSubdiv2D* subdiv, CvPoint2D32f pt,
3025                                                  CvSubdiv2DEdge* edge,
3026                                                  CvSubdiv2DPoint** vertex CV_DEFAULT(NULL) );
3027
3028 /* Calculates Voronoi tesselation (i.e. coordinates of Voronoi points) */
3029 CVAPI(void)  cvCalcSubdivVoronoi2D( CvSubdiv2D* subdiv );
3030
3031
3032 /* Removes all Voronoi points from the tesselation */
3033 CVAPI(void)  cvClearSubdivVoronoi2D( CvSubdiv2D* subdiv );
3034
3035
3036 /* Finds the nearest to the given point vertex in subdivision. */
3037 CVAPI(CvSubdiv2DPoint*) cvFindNearestPoint2D( CvSubdiv2D* subdiv, CvPoint2D32f pt );
3038
3039
3040 /************ Basic quad-edge navigation and operations ************/
3041
3042 CV_INLINE  CvSubdiv2DEdge  cvSubdiv2DNextEdge( CvSubdiv2DEdge edge )
3043 {
3044     return  CV_SUBDIV2D_NEXT_EDGE(edge);
3045 }
3046
3047
3048 CV_INLINE  CvSubdiv2DEdge  cvSubdiv2DRotateEdge( CvSubdiv2DEdge edge, int rotate )
3049 {
3050     return  (edge & ~3) + ((edge + rotate) & 3);
3051 }
3052
3053 CV_INLINE  CvSubdiv2DEdge  cvSubdiv2DSymEdge( CvSubdiv2DEdge edge )
3054 {
3055     return edge ^ 2;
3056 }
3057
3058 CV_INLINE  CvSubdiv2DEdge  cvSubdiv2DGetEdge( CvSubdiv2DEdge edge, CvNextEdgeType type )
3059 {
3060     CvQuadEdge2D* e = (CvQuadEdge2D*)(edge & ~3);
3061     edge = e->next[(edge + (int)type) & 3];
3062     return  (edge & ~3) + ((edge + ((int)type >> 4)) & 3);
3063 }
3064
3065
3066 CV_INLINE  CvSubdiv2DPoint*  cvSubdiv2DEdgeOrg( CvSubdiv2DEdge edge )
3067 {
3068     CvQuadEdge2D* e = (CvQuadEdge2D*)(edge & ~3);
3069     return (CvSubdiv2DPoint*)e->pt[edge & 3];
3070 }
3071
3072
3073 CV_INLINE  CvSubdiv2DPoint*  cvSubdiv2DEdgeDst( CvSubdiv2DEdge edge )
3074 {
3075     CvQuadEdge2D* e = (CvQuadEdge2D*)(edge & ~3);
3076     return (CvSubdiv2DPoint*)e->pt[(edge + 2) & 3];
3077 }
3078
3079 /****************************************************************************************\
3080 *                           Additional operations on Subdivisions                        *
3081 \****************************************************************************************/
3082
3083 // paints voronoi diagram: just demo function
3084 CVAPI(void)  icvDrawMosaic( CvSubdiv2D* subdiv, IplImage* src, IplImage* dst );
3085
3086 // checks planar subdivision for correctness. It is not an absolute check,
3087 // but it verifies some relations between quad-edges
3088 CVAPI(int)   icvSubdiv2DCheck( CvSubdiv2D* subdiv );
3089
3090 // returns squared distance between two 2D points with floating-point coordinates.
3091 CV_INLINE double icvSqDist2D32f( CvPoint2D32f pt1, CvPoint2D32f pt2 )
3092 {
3093     double dx = pt1.x - pt2.x;
3094     double dy = pt1.y - pt2.y;
3095
3096     return dx*dx + dy*dy;
3097 }
3098
3099
3100
3101
3102 CV_INLINE  double  cvTriangleArea( CvPoint2D32f a, CvPoint2D32f b, CvPoint2D32f c )
3103 {
3104     return ((double)b.x - a.x) * ((double)c.y - a.y) - ((double)b.y - a.y) * ((double)c.x - a.x);
3105 }
3106
3107
3108 /* Constructs kd-tree from set of feature descriptors */
3109 CVAPI(struct CvFeatureTree*) cvCreateKDTree(CvMat* desc);
3110
3111 /* Constructs spill-tree from set of feature descriptors */
3112 CVAPI(struct CvFeatureTree*) cvCreateSpillTree( const CvMat* raw_data,
3113                                                const int naive CV_DEFAULT(50),
3114                                                const double rho CV_DEFAULT(.7),
3115                                                const double tau CV_DEFAULT(.1) );
3116
3117 /* Release feature tree */
3118 CVAPI(void) cvReleaseFeatureTree(struct CvFeatureTree* tr);
3119
3120 /* Searches feature tree for k nearest neighbors of given reference points,
3121  searching (in case of kd-tree/bbf) at most emax leaves. */
3122 CVAPI(void) cvFindFeatures(struct CvFeatureTree* tr, const CvMat* query_points,
3123                            CvMat* indices, CvMat* dist, int k, int emax CV_DEFAULT(20));
3124
3125 /* Search feature tree for all points that are inlier to given rect region.
3126  Only implemented for kd trees */
3127 CVAPI(int) cvFindFeaturesBoxed(struct CvFeatureTree* tr,
3128                                CvMat* bounds_min, CvMat* bounds_max,
3129                                CvMat* out_indices);
3130
3131
3132 /* Construct a Locality Sensitive Hash (LSH) table, for indexing d-dimensional vectors of
3133  given type. Vectors will be hashed L times with k-dimensional p-stable (p=2) functions. */
3134 CVAPI(struct CvLSH*) cvCreateLSH(struct CvLSHOperations* ops, int d,
3135                                  int L CV_DEFAULT(10), int k CV_DEFAULT(10),
3136                                  int type CV_DEFAULT(CV_64FC1), double r CV_DEFAULT(4),
3137                                  int64 seed CV_DEFAULT(-1));
3138
3139 /* Construct in-memory LSH table, with n bins. */
3140 CVAPI(struct CvLSH*) cvCreateMemoryLSH(int d, int n, int L CV_DEFAULT(10), int k CV_DEFAULT(10),
3141                                        int type CV_DEFAULT(CV_64FC1), double r CV_DEFAULT(4),
3142                                        int64 seed CV_DEFAULT(-1));
3143
3144 /* Free the given LSH structure. */
3145 CVAPI(void) cvReleaseLSH(struct CvLSH** lsh);
3146
3147 /* Return the number of vectors in the LSH. */
3148 CVAPI(unsigned int) LSHSize(struct CvLSH* lsh);
3149
3150 /* Add vectors to the LSH structure, optionally returning indices. */
3151 CVAPI(void) cvLSHAdd(struct CvLSH* lsh, const CvMat* data, CvMat* indices CV_DEFAULT(0));
3152
3153 /* Remove vectors from LSH, as addressed by given indices. */
3154 CVAPI(void) cvLSHRemove(struct CvLSH* lsh, const CvMat* indices);
3155
3156 /* Query the LSH n times for at most k nearest points; data is n x d,
3157  indices and dist are n x k. At most emax stored points will be accessed. */
3158 CVAPI(void) cvLSHQuery(struct CvLSH* lsh, const CvMat* query_points,
3159                        CvMat* indices, CvMat* dist, int k, int emax);
3160
3161 /* Kolmogorov-Zabin stereo-correspondence algorithm (a.k.a. KZ1) */
3162 #define CV_STEREO_GC_OCCLUDED  SHRT_MAX
3163
3164 typedef struct CvStereoGCState
3165 {
3166     int Ithreshold;
3167     int interactionRadius;
3168     float K, lambda, lambda1, lambda2;
3169     int occlusionCost;
3170     int minDisparity;
3171     int numberOfDisparities;
3172     int maxIters;
3173
3174     CvMat* left;
3175     CvMat* right;
3176     CvMat* dispLeft;
3177     CvMat* dispRight;
3178     CvMat* ptrLeft;
3179     CvMat* ptrRight;
3180     CvMat* vtxBuf;
3181     CvMat* edgeBuf;
3182 } CvStereoGCState;
3183
3184 CVAPI(CvStereoGCState*) cvCreateStereoGCState( int numberOfDisparities, int maxIters );
3185 CVAPI(void) cvReleaseStereoGCState( CvStereoGCState** state );
3186
3187 CVAPI(void) cvFindStereoCorrespondenceGC( const CvArr* left, const CvArr* right,
3188                                          CvArr* disparityLeft, CvArr* disparityRight,
3189                                          CvStereoGCState* state,
3190                                          int useDisparityGuess CV_DEFAULT(0) );
3191
3192 /* Calculates optical flow for 2 images using classical Lucas & Kanade algorithm */
3193 CVAPI(void)  cvCalcOpticalFlowLK( const CvArr* prev, const CvArr* curr,
3194                                  CvSize win_size, CvArr* velx, CvArr* vely );
3195
3196 /* Calculates optical flow for 2 images using block matching algorithm */
3197 CVAPI(void)  cvCalcOpticalFlowBM( const CvArr* prev, const CvArr* curr,
3198                                  CvSize block_size, CvSize shift_size,
3199                                  CvSize max_range, int use_previous,
3200                                  CvArr* velx, CvArr* vely );
3201
3202 /* Calculates Optical flow for 2 images using Horn & Schunck algorithm */
3203 CVAPI(void)  cvCalcOpticalFlowHS( const CvArr* prev, const CvArr* curr,
3204                                  int use_previous, CvArr* velx, CvArr* vely,
3205                                  double lambda, CvTermCriteria criteria );
3206
3207
3208 /****************************************************************************************\
3209 *                           Background/foreground segmentation                           *
3210 \****************************************************************************************/
3211
3212 /* We discriminate between foreground and background pixels
3213  * by building and maintaining a model of the background.
3214  * Any pixel which does not fit this model is then deemed
3215  * to be foreground.
3216  *
3217  * At present we support two core background models,
3218  * one of which has two variations:
3219  *
3220  *  o CV_BG_MODEL_FGD: latest and greatest algorithm, described in
3221  *
3222  *       Foreground Object Detection from Videos Containing Complex Background.
3223  *       Liyuan Li, Weimin Huang, Irene Y.H. Gu, and Qi Tian.
3224  *       ACM MM2003 9p
3225  *
3226  *  o CV_BG_MODEL_FGD_SIMPLE:
3227  *       A code comment describes this as a simplified version of the above,
3228  *       but the code is in fact currently identical
3229  *
3230  *  o CV_BG_MODEL_MOG: "Mixture of Gaussians", older algorithm, described in
3231  *
3232  *       Moving target classification and tracking from real-time video.
3233  *       A Lipton, H Fujijoshi, R Patil
3234  *       Proceedings IEEE Workshop on Application of Computer Vision pp 8-14 1998
3235  *
3236  *       Learning patterns of activity using real-time tracking
3237  *       C Stauffer and W Grimson  August 2000
3238  *       IEEE Transactions on Pattern Analysis and Machine Intelligence 22(8):747-757
3239  */
3240
3241
3242 #define CV_BG_MODEL_FGD         0
3243 #define CV_BG_MODEL_MOG         1                       /* "Mixture of Gaussians".      */
3244 #define CV_BG_MODEL_FGD_SIMPLE  2
3245
3246 struct CvBGStatModel;
3247
3248 typedef void (CV_CDECL * CvReleaseBGStatModel)( struct CvBGStatModel** bg_model );
3249 typedef int (CV_CDECL * CvUpdateBGStatModel)( IplImage* curr_frame, struct CvBGStatModel* bg_model,
3250                                              double learningRate );
3251
3252 #define CV_BG_STAT_MODEL_FIELDS()                                               \
3253 int             type; /*type of BG model*/                                      \
3254 CvReleaseBGStatModel release;                                                   \
3255 CvUpdateBGStatModel update;                                                     \
3256 IplImage*       background;   /*8UC3 reference background image*/               \
3257 IplImage*       foreground;   /*8UC1 foreground image*/                         \
3258 IplImage**      layers;       /*8UC3 reference background image, can be null */ \
3259 int             layer_count;  /* can be zero */                                 \
3260 CvMemStorage*   storage;      /*storage for foreground_regions*/                \
3261 CvSeq*          foreground_regions /*foreground object contours*/
3262
3263 typedef struct CvBGStatModel
3264 {
3265     CV_BG_STAT_MODEL_FIELDS();
3266 } CvBGStatModel;
3267
3268 //
3269
3270 // Releases memory used by BGStatModel
3271 CVAPI(void) cvReleaseBGStatModel( CvBGStatModel** bg_model );
3272
3273 // Updates statistical model and returns number of found foreground regions
3274 CVAPI(int) cvUpdateBGStatModel( IplImage* current_frame, CvBGStatModel*  bg_model,
3275                                double learningRate CV_DEFAULT(-1));
3276
3277 // Performs FG post-processing using segmentation
3278 // (all pixels of a region will be classified as foreground if majority of pixels of the region are FG).
3279 // parameters:
3280 //      segments - pointer to result of segmentation (for example MeanShiftSegmentation)
3281 //      bg_model - pointer to CvBGStatModel structure
3282 CVAPI(void) cvRefineForegroundMaskBySegm( CvSeq* segments, CvBGStatModel*  bg_model );
3283
3284 /* Common use change detection function */
3285 CVAPI(int)  cvChangeDetection( IplImage*  prev_frame,
3286                               IplImage*  curr_frame,
3287                               IplImage*  change_mask );
3288
3289 /*
3290  Interface of ACM MM2003 algorithm
3291  */
3292
3293 /* Default parameters of foreground detection algorithm: */
3294 #define  CV_BGFG_FGD_LC              128
3295 #define  CV_BGFG_FGD_N1C             15
3296 #define  CV_BGFG_FGD_N2C             25
3297
3298 #define  CV_BGFG_FGD_LCC             64
3299 #define  CV_BGFG_FGD_N1CC            25
3300 #define  CV_BGFG_FGD_N2CC            40
3301
3302 /* Background reference image update parameter: */
3303 #define  CV_BGFG_FGD_ALPHA_1         0.1f
3304
3305 /* stat model update parameter
3306  * 0.002f ~ 1K frame(~45sec), 0.005 ~ 18sec (if 25fps and absolutely static BG)
3307  */
3308 #define  CV_BGFG_FGD_ALPHA_2         0.005f
3309
3310 /* start value for alpha parameter (to fast initiate statistic model) */
3311 #define  CV_BGFG_FGD_ALPHA_3         0.1f
3312
3313 #define  CV_BGFG_FGD_DELTA           2
3314
3315 #define  CV_BGFG_FGD_T               0.9f
3316
3317 #define  CV_BGFG_FGD_MINAREA         15.f
3318
3319 #define  CV_BGFG_FGD_BG_UPDATE_TRESH 0.5f
3320
3321 /* See the above-referenced Li/Huang/Gu/Tian paper
3322  * for a full description of these background-model
3323  * tuning parameters.
3324  *
3325  * Nomenclature:  'c'  == "color", a three-component red/green/blue vector.
3326  *                         We use histograms of these to model the range of
3327  *                         colors we've seen at a given background pixel.
3328  *
3329  *                'cc' == "color co-occurrence", a six-component vector giving
3330  *                         RGB color for both this frame and preceding frame.
3331  *                             We use histograms of these to model the range of
3332  *                         color CHANGES we've seen at a given background pixel.
3333  */
3334 typedef struct CvFGDStatModelParams
3335 {
3336     int    Lc;                  /* Quantized levels per 'color' component. Power of two, typically 32, 64 or 128.                               */
3337     int    N1c;                 /* Number of color vectors used to model normal background color variation at a given pixel.                    */
3338     int    N2c;                 /* Number of color vectors retained at given pixel.  Must be > N1c, typically ~ 5/3 of N1c.                     */
3339     /* Used to allow the first N1c vectors to adapt over time to changing background.                           */
3340
3341     int    Lcc;                 /* Quantized levels per 'color co-occurrence' component.  Power of two, typically 16, 32 or 64.                 */
3342     int    N1cc;                /* Number of color co-occurrence vectors used to model normal background color variation at a given pixel.      */
3343     int    N2cc;                /* Number of color co-occurrence vectors retained at given pixel.  Must be > N1cc, typically ~ 5/3 of N1cc.     */
3344     /* Used to allow the first N1cc vectors to adapt over time to changing background.                          */
3345
3346     int    is_obj_without_holes;/* If TRUE we ignore holes within foreground blobs. Defaults to TRUE.                                           */
3347     int    perform_morphing;    /* Number of erode-dilate-erode foreground-blob cleanup iterations.                                             */
3348     /* These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1.                    */
3349
3350     float  alpha1;              /* How quickly we forget old background pixel values seen.  Typically set to 0.1                                */
3351     float  alpha2;              /* "Controls speed of feature learning". Depends on T. Typical value circa 0.005.                               */
3352     float  alpha3;              /* Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1.                         */
3353
3354     float  delta;               /* Affects color and color co-occurrence quantization, typically set to 2.                                      */
3355     float  T;                   /* "A percentage value which determines when new features can be recognized as new background." (Typically 0.9).*/
3356     float  minArea;             /* Discard foreground blobs whose bounding box is smaller than this threshold.                                  */
3357 } CvFGDStatModelParams;
3358
3359 typedef struct CvBGPixelCStatTable
3360 {
3361     float          Pv, Pvb;
3362     uchar          v[3];
3363 } CvBGPixelCStatTable;
3364
3365 typedef struct CvBGPixelCCStatTable
3366 {
3367     float          Pv, Pvb;
3368     uchar          v[6];
3369 } CvBGPixelCCStatTable;
3370
3371 typedef struct CvBGPixelStat
3372 {
3373     float                 Pbc;
3374     float                 Pbcc;
3375     CvBGPixelCStatTable*  ctable;
3376     CvBGPixelCCStatTable* cctable;
3377     uchar                 is_trained_st_model;
3378     uchar                 is_trained_dyn_model;
3379 } CvBGPixelStat;
3380
3381
3382 typedef struct CvFGDStatModel
3383 {
3384     CV_BG_STAT_MODEL_FIELDS();
3385     CvBGPixelStat*         pixel_stat;
3386     IplImage*              Ftd;
3387     IplImage*              Fbd;
3388     IplImage*              prev_frame;
3389     CvFGDStatModelParams   params;
3390 } CvFGDStatModel;
3391
3392 /* Creates FGD model */
3393 CVAPI(CvBGStatModel*) cvCreateFGDStatModel( IplImage* first_frame,
3394                                            CvFGDStatModelParams* parameters CV_DEFAULT(NULL));
3395
3396 /*
3397  Interface of Gaussian mixture algorithm
3398
3399  "An improved adaptive background mixture model for real-time tracking with shadow detection"
3400  P. KadewTraKuPong and R. Bowden,
3401  Proc. 2nd European Workshp on Advanced Video-Based Surveillance Systems, 2001."
3402  http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf
3403  */
3404
3405 /* Note:  "MOG" == "Mixture Of Gaussians": */
3406
3407 #define CV_BGFG_MOG_MAX_NGAUSSIANS 500
3408
3409 /* default parameters of gaussian background detection algorithm */
3410 #define CV_BGFG_MOG_BACKGROUND_THRESHOLD     0.7     /* threshold sum of weights for background test */
3411 #define CV_BGFG_MOG_STD_THRESHOLD            2.5     /* lambda=2.5 is 99% */
3412 #define CV_BGFG_MOG_WINDOW_SIZE              200     /* Learning rate; alpha = 1/CV_GBG_WINDOW_SIZE */
3413 #define CV_BGFG_MOG_NGAUSSIANS               5       /* = K = number of Gaussians in mixture */
3414 #define CV_BGFG_MOG_WEIGHT_INIT              0.05
3415 #define CV_BGFG_MOG_SIGMA_INIT               30
3416 #define CV_BGFG_MOG_MINAREA                  15.f
3417
3418
3419 #define CV_BGFG_MOG_NCOLORS                  3
3420
3421 typedef struct CvGaussBGStatModelParams
3422 {
3423     int     win_size;               /* = 1/alpha */
3424     int     n_gauss;
3425     double  bg_threshold, std_threshold, minArea;
3426     double  weight_init, variance_init;
3427 }CvGaussBGStatModelParams;
3428
3429 typedef struct CvGaussBGValues
3430 {
3431     int         match_sum;
3432     double      weight;
3433     double      variance[CV_BGFG_MOG_NCOLORS];
3434     double      mean[CV_BGFG_MOG_NCOLORS];
3435 } CvGaussBGValues;
3436
3437 typedef struct CvGaussBGPoint
3438 {
3439     CvGaussBGValues* g_values;
3440 } CvGaussBGPoint;
3441
3442
3443 typedef struct CvGaussBGModel
3444 {
3445     CV_BG_STAT_MODEL_FIELDS();
3446     CvGaussBGStatModelParams   params;
3447     CvGaussBGPoint*            g_point;
3448     int                        countFrames;
3449     void*                      mog;
3450 } CvGaussBGModel;
3451
3452
3453 /* Creates Gaussian mixture background model */
3454 CVAPI(CvBGStatModel*) cvCreateGaussianBGModel( IplImage* first_frame,
3455                                               CvGaussBGStatModelParams* parameters CV_DEFAULT(NULL));
3456
3457
3458 typedef struct CvBGCodeBookElem
3459 {
3460     struct CvBGCodeBookElem* next;
3461     int tLastUpdate;
3462     int stale;
3463     uchar boxMin[3];
3464     uchar boxMax[3];
3465     uchar learnMin[3];
3466     uchar learnMax[3];
3467 } CvBGCodeBookElem;
3468
3469 typedef struct CvBGCodeBookModel
3470 {
3471     CvSize size;
3472     int t;
3473     uchar cbBounds[3];
3474     uchar modMin[3];
3475     uchar modMax[3];
3476     CvBGCodeBookElem** cbmap;
3477     CvMemStorage* storage;
3478     CvBGCodeBookElem* freeList;
3479 } CvBGCodeBookModel;
3480
3481 CVAPI(CvBGCodeBookModel*) cvCreateBGCodeBookModel( void );
3482 CVAPI(void) cvReleaseBGCodeBookModel( CvBGCodeBookModel** model );
3483
3484 CVAPI(void) cvBGCodeBookUpdate( CvBGCodeBookModel* model, const CvArr* image,
3485                                CvRect roi CV_DEFAULT(cvRect(0,0,0,0)),
3486                                const CvArr* mask CV_DEFAULT(0) );
3487
3488 CVAPI(int) cvBGCodeBookDiff( const CvBGCodeBookModel* model, const CvArr* image,
3489                             CvArr* fgmask, CvRect roi CV_DEFAULT(cvRect(0,0,0,0)) );
3490
3491 CVAPI(void) cvBGCodeBookClearStale( CvBGCodeBookModel* model, int staleThresh,
3492                                    CvRect roi CV_DEFAULT(cvRect(0,0,0,0)),
3493                                    const CvArr* mask CV_DEFAULT(0) );
3494
3495 CVAPI(CvSeq*) cvSegmentFGMask( CvArr *fgmask, int poly1Hull0 CV_DEFAULT(1),
3496                               float perimScale CV_DEFAULT(4.f),
3497                               CvMemStorage* storage CV_DEFAULT(0),
3498                               CvPoint offset CV_DEFAULT(cvPoint(0,0)));
3499
3500 #ifdef __cplusplus
3501 }
3502 #endif
3503
3504 #endif
3505
3506 /* End of file. */