Set stricter warning rules for gcc
authorAndrey Kamaev <no@email>
Thu, 7 Jun 2012 17:21:29 +0000 (17:21 +0000)
committerAndrey Kamaev <no@email>
Thu, 7 Jun 2012 17:21:29 +0000 (17:21 +0000)
241 files changed:
apps/haartraining/_cvcommon.h
apps/haartraining/cvboost.cpp
apps/haartraining/cvhaarclassifier.cpp
apps/haartraining/cvhaartraining.cpp
apps/haartraining/cvsamples.cpp
apps/haartraining/performance.cpp
apps/traincascade/HOGfeatures.cpp
apps/traincascade/boost.cpp
apps/traincascade/cascadeclassifier.cpp
apps/traincascade/features.cpp
apps/traincascade/haarfeatures.cpp
apps/traincascade/imagestorage.cpp
apps/traincascade/lbpfeatures.cpp
apps/traincascade/traincascade.cpp
cmake/OpenCVCompilerOptions.cmake
cmake/OpenCVPCHSupport.cmake
modules/calib3d/perf/perf_precomp.hpp
modules/calib3d/src/checkchessboard.cpp
modules/calib3d/src/circlesgrid.cpp
modules/calib3d/src/precomp.hpp
modules/calib3d/src/quadsubpix.cpp
modules/calib3d/src/solvepnp.cpp
modules/contrib/src/ba.cpp
modules/contrib/src/chamfermatching.cpp
modules/contrib/src/colormap.cpp
modules/contrib/src/detection_based_tracker.cpp
modules/contrib/src/gencolors.cpp
modules/contrib/src/lda.cpp
modules/contrib/src/octree.cpp
modules/contrib/src/precomp.hpp
modules/core/include/opencv2/core/core.hpp
modules/core/include/opencv2/core/internal.hpp
modules/core/include/opencv2/core/operations.hpp
modules/core/include/opencv2/core/types_c.h
modules/core/perf/perf_precomp.hpp
modules/core/perf/perf_stat.cpp
modules/core/src/alloc.cpp
modules/core/src/arithm.cpp
modules/core/src/cmdparser.cpp
modules/core/src/mathfuncs.cpp
modules/core/src/matrix.cpp
modules/core/src/out.cpp
modules/core/src/stat.cpp
modules/core/src/system.cpp
modules/core/test/test_ds.cpp
modules/features2d/perf/perf_precomp.hpp
modules/features2d/src/brief.cpp
modules/features2d/src/descriptors.cpp
modules/features2d/src/detectors.cpp
modules/features2d/src/features2d_init.cpp
modules/features2d/src/matchers.cpp
modules/features2d/src/precomp.hpp
modules/features2d/test/test_features2d.cpp
modules/flann/include/opencv2/flann/any.h
modules/flann/include/opencv2/flann/dynamic_bitset.h
modules/flann/include/opencv2/flann/lsh_table.h
modules/gpu/perf/perf_precomp.hpp
modules/gpu/perf_cpu/perf_cpu_precomp.hpp
modules/gpu/src/precomp.hpp
modules/highgui/CMakeLists.txt
modules/highgui/include/opencv2/highgui/highgui_c.h
modules/highgui/perf/perf_precomp.hpp
modules/highgui/src/cap.cpp
modules/highgui/src/cap_ffmpeg.cpp
modules/highgui/src/cap_ffmpeg_impl.hpp
modules/highgui/src/cap_openni.cpp
modules/highgui/src/grfmt_jpeg.cpp
modules/highgui/src/loadsave.cpp
modules/highgui/src/precomp.hpp
modules/highgui/src/window_QT.cpp
modules/highgui/src/window_gtk.cpp
modules/imgproc/perf/perf_precomp.hpp
modules/imgproc/src/_list.h
modules/imgproc/src/floodfill.cpp
modules/imgproc/src/grabcut.cpp
modules/imgproc/src/hough.cpp
modules/imgproc/src/phasecorr.cpp
modules/imgproc/src/smooth.cpp
modules/imgproc/src/sumpixels.cpp
modules/legacy/include/opencv2/legacy/blobtrack.hpp
modules/legacy/include/opencv2/legacy/legacy.hpp
modules/legacy/src/3dtracker.cpp
modules/legacy/src/_kdtree.hpp
modules/legacy/src/blobtrackanalysishist.cpp
modules/legacy/src/blobtrackingkalman.cpp
modules/legacy/src/blobtrackingmsfg.cpp
modules/legacy/src/blobtrackingmsfgs.cpp
modules/legacy/src/blobtrackpostprockalman.cpp
modules/legacy/src/blobtrackpostproclinear.cpp
modules/legacy/src/calibfilter.cpp
modules/legacy/src/corrimages.cpp
modules/legacy/src/dpstereo.cpp
modules/legacy/src/eigenobjects.cpp
modules/legacy/src/enteringblobdetection.cpp
modules/legacy/src/enteringblobdetectionreal.cpp
modules/legacy/src/epilines.cpp
modules/legacy/src/kdtree.cpp
modules/legacy/src/lee.cpp
modules/legacy/src/levmar.cpp
modules/legacy/src/levmarprojbandle.cpp
modules/legacy/src/levmartrif.cpp
modules/legacy/src/morphcontours.cpp
modules/legacy/src/oneway.cpp
modules/legacy/src/precomp.hpp
modules/legacy/src/testseq.cpp
modules/legacy/src/trifocal.cpp
modules/legacy/src/vecfacetracking.cpp
modules/ml/src/gbt.cpp
modules/ml/src/inner_functions.cpp
modules/ml/src/precomp.hpp
modules/ml/src/svm.cpp
modules/ml/src/testset.cpp
modules/nonfree/perf/perf_precomp.hpp
modules/nonfree/src/precomp.hpp
modules/nonfree/src/surf.cpp
modules/objdetect/perf/perf_precomp.hpp
modules/objdetect/src/distancetransform.cpp
modules/objdetect/src/featurepyramid.cpp
modules/objdetect/src/fft.cpp
modules/objdetect/src/haar.cpp
modules/objdetect/src/latentsvm.cpp
modules/objdetect/src/latentsvmdetector.cpp
modules/objdetect/src/linemod.cpp
modules/objdetect/src/lsvmparser.cpp
modules/objdetect/src/matching.cpp
modules/objdetect/src/precomp.hpp
modules/photo/perf/perf_precomp.hpp
modules/photo/src/inpaint.cpp
modules/photo/src/precomp.hpp
modules/python/src2/cv2.cpp
modules/python/src2/cv2.cv.hpp
modules/stitching/include/opencv2/stitching/detail/util.hpp
modules/stitching/perf/perf_precomp.hpp
modules/stitching/perf/perf_stich.cpp
modules/ts/include/opencv2/ts/ts.hpp
modules/ts/include/opencv2/ts/ts_gtest.h
modules/ts/src/precomp.hpp
modules/ts/src/ts.cpp
modules/ts/src/ts_func.cpp
modules/ts/src/ts_gtest.cpp
modules/ts/src/ts_perf.cpp
modules/video/include/opencv2/video/video.hpp
modules/video/perf/perf_precomp.hpp
modules/video/src/precomp.hpp
modules/video/src/video_init.cpp
modules/videostab/include/opencv2/videostab/optical_flow.hpp
modules/videostab/src/clp.hpp
samples/c/adaptiveskindetector.cpp
samples/c/bgfg_codebook.cpp
samples/c/contours.c
samples/c/convert_cascade.c
samples/c/delaunay.c
samples/c/facedetect.cpp
samples/c/fback_c.c
samples/c/find_obj.cpp
samples/c/find_obj_calonder.cpp
samples/c/find_obj_ferns.cpp
samples/c/latentsvmdetect.cpp
samples/c/morphology.c
samples/c/motempl.c
samples/c/mser_sample.cpp
samples/c/mushroom.cpp
samples/c/one_way_sample.cpp
samples/c/polar_transforms.c
samples/c/pyramid_segmentation.c
samples/c/tree_engine.cpp
samples/cpp/3calibration.cpp
samples/cpp/OpenEXRimages_HighDynamicRange_Retina_toneMapping.cpp
samples/cpp/OpenEXRimages_HighDynamicRange_Retina_toneMapping_video.cpp
samples/cpp/bagofwords_classification.cpp
samples/cpp/bgfg_segm.cpp
samples/cpp/brief_match_test.cpp
samples/cpp/build3dmodel.cpp
samples/cpp/calibration.cpp
samples/cpp/calibration_artificial.cpp
samples/cpp/camshiftdemo.cpp
samples/cpp/chamfer.cpp
samples/cpp/connected_components.cpp
samples/cpp/contours2.cpp
samples/cpp/convexhull.cpp
samples/cpp/cout_mat.cpp
samples/cpp/delaunay2.cpp
samples/cpp/demhist.cpp
samples/cpp/descriptor_extractor_matcher.cpp
samples/cpp/detection_based_tracker_sample.cpp
samples/cpp/detector_descriptor_evaluation.cpp
samples/cpp/dft.cpp
samples/cpp/distrans.cpp
samples/cpp/drawing.cpp
samples/cpp/edge.cpp
samples/cpp/facerec_demo.cpp
samples/cpp/fback.cpp
samples/cpp/ffilldemo.cpp
samples/cpp/filestorage.cpp
samples/cpp/fitellipse.cpp
samples/cpp/generic_descriptor_match.cpp
samples/cpp/grabcut.cpp
samples/cpp/houghcircles.cpp
samples/cpp/houghlines.cpp
samples/cpp/hybridtrackingsample.cpp
samples/cpp/image.cpp
samples/cpp/imagelist_creator.cpp
samples/cpp/inpaint.cpp
samples/cpp/kalman.cpp
samples/cpp/kmeans.cpp
samples/cpp/laplace.cpp
samples/cpp/latentsvm_multidetect.cpp
samples/cpp/letter_recog.cpp
samples/cpp/linemod.cpp
samples/cpp/lkdemo.cpp
samples/cpp/logpolar_bsm.cpp
samples/cpp/matcher_simple.cpp
samples/cpp/matching_to_many_images.cpp
samples/cpp/meanshift_segmentation.cpp
samples/cpp/minarea.cpp
samples/cpp/morphology2.cpp
samples/cpp/multicascadeclassifier.cpp
samples/cpp/openni_capture.cpp
samples/cpp/peopledetect.cpp
samples/cpp/point_cloud.cpp
samples/cpp/points_classifier.cpp
samples/cpp/retinaDemo.cpp
samples/cpp/segment_objects.cpp
samples/cpp/select3dobj.cpp
samples/cpp/squares.cpp
samples/cpp/stereo_calib.cpp
samples/cpp/stereo_match.cpp
samples/cpp/stitching_detailed.cpp
samples/cpp/video_homography.cpp
samples/cpp/watershed.cpp
samples/gpu/cascadeclassifier.cpp
samples/gpu/cascadeclassifier_nvidia_api.cpp
samples/gpu/farneback_optical_flow.cpp
samples/gpu/hog.cpp
samples/gpu/morfology.cpp
samples/gpu/opticalflow_nvidia_api.cpp
samples/gpu/performance/performance.cpp
samples/gpu/performance/tests.cpp
samples/gpu/pyrlk_optical_flow.cpp
samples/gpu/stereo_match.cpp
samples/gpu/surf_keypoint_matcher.cpp

index 688e3b2..e4f1081 100644 (file)
@@ -42,6 +42,9 @@
 #ifndef __CVCOMMON_H_
 #define __CVCOMMON_H_
 
+#include "opencv2/core/core.hpp"
+#include "opencv2/core/internal.hpp"
+
 #include "cxcore.h"
 #include "cv.h"
 #include "cxmisc.h"
index 2ee3637..88ada60 100644 (file)
@@ -80,11 +80,11 @@ typedef struct CvValArray
     ( *( (float*) (aux->data + ((int) (idx1)) * aux->step ) ) <  \
       *( (float*) (aux->data + ((int) (idx2)) * aux->step ) ) )
 
-CV_IMPLEMENT_QSORT_EX( icvSortIndexedValArray_16s, short, CMP_VALUES, CvValArray* )
+static CV_IMPLEMENT_QSORT_EX( icvSortIndexedValArray_16s, short, CMP_VALUES, CvValArray* )
 
-CV_IMPLEMENT_QSORT_EX( icvSortIndexedValArray_32s, int,   CMP_VALUES, CvValArray* )
+static CV_IMPLEMENT_QSORT_EX( icvSortIndexedValArray_32s, int,   CMP_VALUES, CvValArray* )
 
-CV_IMPLEMENT_QSORT_EX( icvSortIndexedValArray_32f, float, CMP_VALUES, CvValArray* )
+static CV_IMPLEMENT_QSORT_EX( icvSortIndexedValArray_32f, float, CMP_VALUES, CvValArray* )
 
 CV_BOOST_IMPL
 void cvGetSortedIndices( CvMat* val, CvMat* idx, int sortcols )
@@ -181,16 +181,16 @@ float cvEvalStumpClassifier( CvClassifier* classifier, CvMat* sample )
     assert( classifier != NULL );
     assert( sample != NULL );
     assert( CV_MAT_TYPE( sample->type ) == CV_32FC1 );
-    
+
     if( (CV_MAT_ELEM( (*sample), float, 0,
             ((CvStumpClassifier*) classifier)->compidx )) <
-        ((CvStumpClassifier*) classifier)->threshold ) 
+        ((CvStumpClassifier*) classifier)->threshold )
         return ((CvStumpClassifier*) classifier)->left;
     return ((CvStumpClassifier*) classifier)->right;
 }
 
 #define ICV_DEF_FIND_STUMP_THRESHOLD( suffix, type, error )                              \
-CV_BOOST_IMPL int icvFindStumpThreshold_##suffix(                                              \
+static int icvFindStumpThreshold_##suffix(                                               \
         uchar* data, size_t datastep,                                                    \
         uchar* wdata, size_t wstep,                                                      \
         uchar* ydata, size_t ystep,                                                      \
@@ -430,13 +430,13 @@ CvClassifier* cvCreateStumpClassifier( CvMat* trainData,
     int ystep    = 0;
     uchar* idxdata = NULL;
     int idxstep    = 0;
-    int l = 0; /* number of indices */     
+    int l = 0; /* number of indices */
     uchar* wdata = NULL;
     int wstep    = 0;
 
     int* idx = NULL;
     int i = 0;
-    
+
     float sumw   = FLT_MAX;
     float sumwy  = FLT_MAX;
     float sumwyy = FLT_MAX;
@@ -553,7 +553,7 @@ CvClassifier* cvCreateStumpClassifier( CvMat* trainData,
               ( data + i * ((size_t) cstep), sstep,
                 wdata, wstep, ydata, ystep, (uchar*) idx, sizeof( int ), l,
                 &(stump->lerror), &(stump->rerror),
-                &(stump->threshold), &(stump->left), &(stump->right), 
+                &(stump->threshold), &(stump->left), &(stump->right),
                 &sumw, &sumwy, &sumwyy ) )
         {
             stump->compidx = i;
@@ -601,7 +601,7 @@ CvClassifier* cvCreateMTStumpClassifier( CvMat* trainData,
     size_t ystep = 0;
     uchar* idxdata = NULL;
     size_t idxstep = 0;
-    int    l = 0; /* number of indices */     
+    int    l = 0; /* number of indices */
     uchar* wdata = NULL;
     size_t wstep = 0;
 
@@ -614,7 +614,7 @@ CvClassifier* cvCreateMTStumpClassifier( CvMat* trainData,
 
     char* filter = NULL;
     int i = 0;
-    
+
     int compidx = 0;
     int stumperror;
     int portion;
@@ -635,7 +635,7 @@ CvClassifier* cvCreateMTStumpClassifier( CvMat* trainData,
 
     int t_compidx;
     int t_n;
-    
+
     int ti;
     int tj;
     int tk;
@@ -722,7 +722,7 @@ CvClassifier* cvCreateMTStumpClassifier( CvMat* trainData,
         if( ((CvMTStumpTrainParams*) trainParams)->getTrainData != NULL )
         {
             n = ((CvMTStumpTrainParams*) trainParams)->numcomp;
-        }        
+        }
     }
     assert( datan <= n );
 
@@ -755,14 +755,14 @@ CvClassifier* cvCreateMTStumpClassifier( CvMat* trainData,
     memset( (void*) stump, 0, sizeof( CvStumpClassifier ) );
 
     portion = ((CvMTStumpTrainParams*)trainParams)->portion;
-    
+
     if( portion < 1 )
     {
         /* auto portion */
         portion = n;
         #ifdef _OPENMP
-        portion /= omp_get_max_threads();        
-        #endif /* _OPENMP */        
+        portion /= omp_get_max_threads();
+        #endif /* _OPENMP */
     }
 
     stump->eval = cvEvalStumpClassifier;
@@ -796,7 +796,7 @@ CvClassifier* cvCreateMTStumpClassifier( CvMat* trainData,
 
         t_compidx = 0;
         t_n = 0;
-        
+
         ti = 0;
         tj = 0;
         tk = 0;
@@ -811,7 +811,7 @@ CvClassifier* cvCreateMTStumpClassifier( CvMat* trainData,
         t_idx = NULL;
 
         mat.data.ptr = NULL;
-        
+
         if( datan < n )
         {
             /* prepare matrix for callback */
@@ -848,7 +848,7 @@ CvClassifier* cvCreateMTStumpClassifier( CvMat* trainData,
                     {
                         t_idx[ti] = ti;
                     }
-                }                
+                }
             }
         }
 
@@ -902,12 +902,12 @@ CvClassifier* cvCreateMTStumpClassifier( CvMat* trainData,
                                         t_idx[tk++] = curidx;
                                     }
                                 }
-                                if( findStumpThreshold_32s[stumperror]( 
+                                if( findStumpThreshold_32s[stumperror](
                                         t_data + ti * t_cstep, t_sstep,
                                         wdata, wstep, ydata, ystep,
                                         (uchar*) t_idx, sizeof( int ), tk,
                                         &lerror, &rerror,
-                                        &threshold, &left, &right, 
+                                        &threshold, &left, &right,
                                         &sumw, &sumwy, &sumwyy ) )
                                 {
                                     optcompidx = ti;
@@ -927,12 +927,12 @@ CvClassifier* cvCreateMTStumpClassifier( CvMat* trainData,
                                         t_idx[tk++] = curidx;
                                     }
                                 }
-                                if( findStumpThreshold_32s[stumperror]( 
+                                if( findStumpThreshold_32s[stumperror](
                                         t_data + ti * t_cstep, t_sstep,
                                         wdata, wstep, ydata, ystep,
                                         (uchar*) t_idx, sizeof( int ), tk,
                                         &lerror, &rerror,
-                                        &threshold, &left, &right, 
+                                        &threshold, &left, &right,
                                         &sumw, &sumwy, &sumwyy ) )
                                 {
                                     optcompidx = ti;
@@ -952,12 +952,12 @@ CvClassifier* cvCreateMTStumpClassifier( CvMat* trainData,
                                         t_idx[tk++] = curidx;
                                     }
                                 }
-                                if( findStumpThreshold_32s[stumperror]( 
+                                if( findStumpThreshold_32s[stumperror](
                                         t_data + ti * t_cstep, t_sstep,
                                         wdata, wstep, ydata, ystep,
                                         (uchar*) t_idx, sizeof( int ), tk,
                                         &lerror, &rerror,
-                                        &threshold, &left, &right, 
+                                        &threshold, &left, &right,
                                         &sumw, &sumwy, &sumwyy ) )
                                 {
                                     optcompidx = ti;
@@ -977,12 +977,12 @@ CvClassifier* cvCreateMTStumpClassifier( CvMat* trainData,
                         case CV_16SC1:
                             for( ti = t_compidx; ti < MIN( sortedn, t_compidx + t_n ); ti++ )
                             {
-                                if( findStumpThreshold_16s[stumperror]( 
+                                if( findStumpThreshold_16s[stumperror](
                                         t_data + ti * t_cstep, t_sstep,
                                         wdata, wstep, ydata, ystep,
                                         sorteddata + ti * sortedcstep, sortedsstep, sortedm,
                                         &lerror, &rerror,
-                                        &threshold, &left, &right, 
+                                        &threshold, &left, &right,
                                         &sumw, &sumwy, &sumwyy ) )
                                 {
                                     optcompidx = ti;
@@ -992,12 +992,12 @@ CvClassifier* cvCreateMTStumpClassifier( CvMat* trainData,
                         case CV_32SC1:
                             for( ti = t_compidx; ti < MIN( sortedn, t_compidx + t_n ); ti++ )
                             {
-                                if( findStumpThreshold_32s[stumperror]( 
+                                if( findStumpThreshold_32s[stumperror](
                                         t_data + ti * t_cstep, t_sstep,
                                         wdata, wstep, ydata, ystep,
                                         sorteddata + ti * sortedcstep, sortedsstep, sortedm,
                                         &lerror, &rerror,
-                                        &threshold, &left, &right, 
+                                        &threshold, &left, &right,
                                         &sumw, &sumwy, &sumwyy ) )
                                 {
                                     optcompidx = ti;
@@ -1007,12 +1007,12 @@ CvClassifier* cvCreateMTStumpClassifier( CvMat* trainData,
                         case CV_32FC1:
                             for( ti = t_compidx; ti < MIN( sortedn, t_compidx + t_n ); ti++ )
                             {
-                                if( findStumpThreshold_32f[stumperror]( 
+                                if( findStumpThreshold_32f[stumperror](
                                         t_data + ti * t_cstep, t_sstep,
                                         wdata, wstep, ydata, ystep,
                                         sorteddata + ti * sortedcstep, sortedsstep, sortedm,
                                         &lerror, &rerror,
-                                        &threshold, &left, &right, 
+                                        &threshold, &left, &right,
                                         &sumw, &sumwy, &sumwyy ) )
                                 {
                                     optcompidx = ti;
@@ -1032,12 +1032,12 @@ CvClassifier* cvCreateMTStumpClassifier( CvMat* trainData,
                 va.data = t_data + ti * t_cstep;
                 va.step = t_sstep;
                 icvSortIndexedValArray_32s( t_idx, l, &va );
-                if( findStumpThreshold_32s[stumperror]( 
+                if( findStumpThreshold_32s[stumperror](
                         t_data + ti * t_cstep, t_sstep,
                         wdata, wstep, ydata, ystep,
                         (uchar*)t_idx, sizeof( int ), l,
                         &lerror, &rerror,
-                        &threshold, &left, &right, 
+                        &threshold, &left, &right,
                         &sumw, &sumwy, &sumwyy ) )
                 {
                     optcompidx = ti;
@@ -1117,7 +1117,7 @@ float cvEvalCARTClassifier( CvClassifier* classifier, CvMat* sample )
         {
             if( (CV_MAT_ELEM( (*sample), float, 0,
                     ((CvCARTClassifier*) classifier)->compidx[idx] )) <
-                ((CvCARTClassifier*) classifier)->threshold[idx] ) 
+                ((CvCARTClassifier*) classifier)->threshold[idx] )
             {
                 idx = ((CvCARTClassifier*) classifier)->left[idx];
             }
@@ -1133,7 +1133,7 @@ float cvEvalCARTClassifier( CvClassifier* classifier, CvMat* sample )
         {
             if( (CV_MAT_ELEM( (*sample), float,
                     ((CvCARTClassifier*) classifier)->compidx[idx], 0 )) <
-                ((CvCARTClassifier*) classifier)->threshold[idx] ) 
+                ((CvCARTClassifier*) classifier)->threshold[idx] )
             {
                 idx = ((CvCARTClassifier*) classifier)->left[idx];
             }
@@ -1142,14 +1142,14 @@ float cvEvalCARTClassifier( CvClassifier* classifier, CvMat* sample )
                 idx = ((CvCARTClassifier*) classifier)->right[idx];
             }
         } while( idx > 0 );
-    } 
+    }
 
     __END__;
 
     return ((CvCARTClassifier*) classifier)->val[-idx];
 }
 
-CV_BOOST_IMPL
+static
 float cvEvalCARTClassifierIdx( CvClassifier* classifier, CvMat* sample )
 {
     CV_FUNCNAME( "cvEvalCARTClassifierIdx" );
@@ -1170,7 +1170,7 @@ float cvEvalCARTClassifierIdx( CvClassifier* classifier, CvMat* sample )
         {
             if( (CV_MAT_ELEM( (*sample), float, 0,
                     ((CvCARTClassifier*) classifier)->compidx[idx] )) <
-                ((CvCARTClassifier*) classifier)->threshold[idx] ) 
+                ((CvCARTClassifier*) classifier)->threshold[idx] )
             {
                 idx = ((CvCARTClassifier*) classifier)->left[idx];
             }
@@ -1186,7 +1186,7 @@ float cvEvalCARTClassifierIdx( CvClassifier* classifier, CvMat* sample )
         {
             if( (CV_MAT_ELEM( (*sample), float,
                     ((CvCARTClassifier*) classifier)->compidx[idx], 0 )) <
-                ((CvCARTClassifier*) classifier)->threshold[idx] ) 
+                ((CvCARTClassifier*) classifier)->threshold[idx] )
             {
                 idx = ((CvCARTClassifier*) classifier)->left[idx];
             }
@@ -1195,7 +1195,7 @@ float cvEvalCARTClassifierIdx( CvClassifier* classifier, CvMat* sample )
                 idx = ((CvCARTClassifier*) classifier)->right[idx];
             }
         } while( idx > 0 );
-    } 
+    }
 
     __END__;
 
@@ -1209,7 +1209,7 @@ void cvReleaseCARTClassifier( CvClassifier** classifier )
     *classifier = NULL;
 }
 
-void CV_CDECL icvDefaultSplitIdx_R( int compidx, float threshold,
+static void CV_CDECL icvDefaultSplitIdx_R( int compidx, float threshold,
                                     CvMat* idx, CvMat** left, CvMat** right,
                                     void* userdata )
 {
@@ -1258,7 +1258,7 @@ void CV_CDECL icvDefaultSplitIdx_R( int compidx, float threshold,
     }
 }
 
-void CV_CDECL icvDefaultSplitIdx_C( int compidx, float threshold,
+static void CV_CDECL icvDefaultSplitIdx_C( int compidx, float threshold,
                                     CvMat* idx, CvMat** left, CvMat** right,
                                     void* userdata )
 {
@@ -1333,13 +1333,13 @@ CvClassifier* cvCreateCARTClassifier( CvMat* trainData,
     int count = 0;
     int i = 0;
     int j = 0;
-    
+
     CvCARTNode* intnode = NULL;
     CvCARTNode* list = NULL;
     int listcount = 0;
     CvMat* lidx = NULL;
     CvMat* ridx = NULL;
-    
+
     float maxerrdrop = 0.0F;
     int idx = 0;
 
@@ -1349,17 +1349,17 @@ CvClassifier* cvCreateCARTClassifier( CvMat* trainData,
     void* userdata;
 
     count = ((CvCARTTrainParams*) trainParams)->count;
-    
+
     assert( count > 0 );
 
-    datasize = sizeof( *cart ) + (sizeof( float ) + 3 * sizeof( int )) * count + 
+    datasize = sizeof( *cart ) + (sizeof( float ) + 3 * sizeof( int )) * count +
         sizeof( float ) * (count + 1);
-    
+
     cart = (CvCARTClassifier*) cvAlloc( datasize );
     memset( cart, 0, datasize );
-    
+
     cart->count = count;
-    
+
     cart->eval = cvEvalCARTClassifier;
     cart->save = NULL;
     cart->release = cvReleaseCARTClassifier;
@@ -1399,7 +1399,7 @@ CvClassifier* cvCreateCARTClassifier( CvMat* trainData,
         /* split last added node */
         splitIdxCallback( intnode[i-1].stump->compidx, intnode[i-1].stump->threshold,
             intnode[i-1].sampleIdx, &lidx, &ridx, userdata );
-        
+
         if( intnode[i-1].stump->lerror != 0.0F )
         {
             list[listcount].sampleIdx = lidx;
@@ -1436,7 +1436,7 @@ CvClassifier* cvCreateCARTClassifier( CvMat* trainData,
         {
             cvReleaseMat( &ridx );
         }
-        
+
         if( listcount == 0 ) break;
 
         /* find the best node to be added to the tree */
@@ -1474,7 +1474,7 @@ CvClassifier* cvCreateCARTClassifier( CvMat* trainData,
         cart->count++;
         cart->compidx[i] = intnode[i].stump->compidx;
         cart->threshold[i] = intnode[i].stump->threshold;
-        
+
         /* leaves */
         if( cart->left[i] <= 0 )
         {
@@ -1489,7 +1489,7 @@ CvClassifier* cvCreateCARTClassifier( CvMat* trainData,
             j++;
         }
     }
-    
+
     /* CLEAN UP */
     for( i = 0; i < count && (intnode[i].stump != NULL); i++ )
     {
@@ -1504,7 +1504,7 @@ CvClassifier* cvCreateCARTClassifier( CvMat* trainData,
         list[i].stump->release( (CvClassifier**) &(list[i].stump) );
         cvReleaseMat( &(list[i].sampleIdx) );
     }
-    
+
     cvFree( &intnode );
 
     return (CvClassifier*) cart;
@@ -1529,7 +1529,7 @@ typedef struct CvBoostTrainer
  * using ANY appropriate weak classifier
  */
 
-CV_BOOST_IMPL
+static
 CvBoostTrainer* icvBoostStartTraining( CvMat* trainClasses,
                                        CvMat* weakTrainVals,
                                        CvMat* /*weights*/,
@@ -1569,7 +1569,7 @@ CvBoostTrainer* icvBoostStartTraining( CvMat* trainClasses,
     {
         CV_MAT2VEC( *sampleIdx, idxdata, idxstep, idxnum );
     }
-        
+
     datasize = sizeof( *ptr ) + sizeof( *ptr->idx ) * idxnum;
     ptr = (CvBoostTrainer*) cvAlloc( datasize );
     memset( ptr, 0, datasize );
@@ -1578,7 +1578,7 @@ CvBoostTrainer* icvBoostStartTraining( CvMat* trainClasses,
 
     ptr->count = m;
     ptr->type = type;
-    
+
     if( idxnum > 0 )
     {
         CvScalar s;
@@ -1595,7 +1595,7 @@ CvBoostTrainer* icvBoostStartTraining( CvMat* trainClasses,
     {
         idx = (ptr->idx) ? ptr->idx[i] : i;
 
-        *((float*) (traindata + idx * trainstep)) = 
+        *((float*) (traindata + idx * trainstep)) =
             2.0F * (*((float*) (ydata + idx * ystep))) - 1.0F;
     }
 
@@ -1607,7 +1607,7 @@ CvBoostTrainer* icvBoostStartTraining( CvMat* trainClasses,
  * Discrete AdaBoost functions
  *
  */
-CV_BOOST_IMPL
+static
 float icvBoostNextWeakClassifierDAB( CvMat* weakEvalVals,
                                      CvMat* trainClasses,
                                      CvMat* /*weakTrainVals*/,
@@ -1651,18 +1651,18 @@ float icvBoostNextWeakClassifierDAB( CvMat* weakEvalVals,
 
         sumw += *((float*) (wdata + idx*wstep));
         err += (*((float*) (wdata + idx*wstep))) *
-            ( (*((float*) (evaldata + idx*evalstep))) != 
+            ( (*((float*) (evaldata + idx*evalstep))) !=
                 2.0F * (*((float*) (ydata + idx*ystep))) - 1.0F );
     }
     err /= sumw;
     err = -cvLogRatio( err );
-    
+
     for( i = 0; i < trainer->count; i++ )
     {
         idx = (trainer->idx) ? trainer->idx[i] : i;
 
-        *((float*) (wdata + idx*wstep)) *= expf( err * 
-            ((*((float*) (evaldata + idx*evalstep))) != 
+        *((float*) (wdata + idx*wstep)) *= expf( err *
+            ((*((float*) (evaldata + idx*evalstep))) !=
                 2.0F * (*((float*) (ydata + idx*ystep))) - 1.0F) );
         sumw += *((float*) (wdata + idx*wstep));
     }
@@ -1672,7 +1672,7 @@ float icvBoostNextWeakClassifierDAB( CvMat* weakEvalVals,
 
         *((float*) (wdata + idx * wstep)) /= sumw;
     }
-    
+
     return err;
 }
 
@@ -1681,7 +1681,7 @@ float icvBoostNextWeakClassifierDAB( CvMat* weakEvalVals,
  * Real AdaBoost functions
  *
  */
-CV_BOOST_IMPL
+static
 float icvBoostNextWeakClassifierRAB( CvMat* weakEvalVals,
                                      CvMat* trainClasses,
                                      CvMat* /*weakTrainVals*/,
@@ -1731,7 +1731,7 @@ float icvBoostNextWeakClassifierRAB( CvMat* weakEvalVals,
 
         *((float*) (wdata + idx*wstep)) /= sumw;
     }
-    
+
     return 1.0F;
 }
 
@@ -1743,7 +1743,7 @@ float icvBoostNextWeakClassifierRAB( CvMat* weakEvalVals,
 #define CV_LB_PROB_THRESH      0.01F
 #define CV_LB_WEIGHT_THRESHOLD 0.0001F
 
-CV_BOOST_IMPL
+static
 void icvResponsesAndWeightsLB( int num, uchar* wdata, int wstep,
                                uchar* ydata, int ystep,
                                uchar* fdata, int fstep,
@@ -1761,18 +1761,18 @@ void icvResponsesAndWeightsLB( int num, uchar* wdata, int wstep,
         *((float*) (wdata + idx*wstep)) = MAX( p * (1.0F - p), CV_LB_WEIGHT_THRESHOLD );
         if( *((float*) (ydata + idx*ystep)) == 1.0F )
         {
-            *((float*) (traindata + idx*trainstep)) = 
+            *((float*) (traindata + idx*trainstep)) =
                 1.0F / (MAX( p, CV_LB_PROB_THRESH ));
         }
         else
         {
-            *((float*) (traindata + idx*trainstep)) = 
+            *((float*) (traindata + idx*trainstep)) =
                 -1.0F / (MAX( 1.0F - p, CV_LB_PROB_THRESH ));
         }
     }
 }
 
-CV_BOOST_IMPL
+static
 CvBoostTrainer* icvBoostStartTrainingLB( CvMat* trainClasses,
                                          CvMat* weakTrainVals,
                                          CvMat* weights,
@@ -1819,7 +1819,7 @@ CvBoostTrainer* icvBoostStartTrainingLB( CvMat* trainClasses,
     {
         CV_MAT2VEC( *sampleIdx, idxdata, idxstep, idxnum );
     }
-        
+
     datasize = sizeof( *ptr ) + sizeof( *ptr->F ) * m + sizeof( *ptr->idx ) * idxnum;
     ptr = (CvBoostTrainer*) cvAlloc( datasize );
     memset( ptr, 0, datasize );
@@ -1828,7 +1828,7 @@ CvBoostTrainer* icvBoostStartTrainingLB( CvMat* trainClasses,
 
     ptr->count = m;
     ptr->type = type;
-    
+
     if( idxnum > 0 )
     {
         CvScalar s;
@@ -1854,7 +1854,7 @@ CvBoostTrainer* icvBoostStartTrainingLB( CvMat* trainClasses,
     return ptr;
 }
 
-CV_BOOST_IMPL
+static
 float icvBoostNextWeakClassifierLB( CvMat* weakEvalVals,
                                     CvMat* trainClasses,
                                     CvMat* weakTrainVals,
@@ -1900,7 +1900,7 @@ float icvBoostNextWeakClassifierLB( CvMat* weakEvalVals,
 
         trainer->F[idx] += *((float*) (evaldata + idx * evalstep));
     }
-    
+
     icvResponsesAndWeightsLB( trainer->count, wdata, wstep, ydata, ystep,
                               (uchar*) trainer->F, sizeof( *trainer->F ),
                               traindata, trainstep, trainer->idx );
@@ -1913,7 +1913,7 @@ float icvBoostNextWeakClassifierLB( CvMat* weakEvalVals,
  * Gentle AdaBoost
  *
  */
-CV_BOOST_IMPL
+static
 float icvBoostNextWeakClassifierGAB( CvMat* weakEvalVals,
                                      CvMat* trainClasses,
                                      CvMat* /*weakTrainVals*/,
@@ -1952,12 +1952,12 @@ float icvBoostNextWeakClassifierGAB( CvMat* weakEvalVals,
     {
         idx = (trainer->idx) ? trainer->idx[i] : i;
 
-        *((float*) (wdata + idx*wstep)) *= 
+        *((float*) (wdata + idx*wstep)) *=
             expf( -(*((float*) (evaldata + idx*evalstep)))
                   * ( 2.0F * (*((float*) (ydata + idx*ystep))) - 1.0F ) );
         sumw += *((float*) (wdata + idx*wstep));
     }
-    
+
     for( i = 0; i < trainer->count; i++ )
     {
         idx = (trainer->idx) ? trainer->idx[i] : i;
@@ -2033,10 +2033,10 @@ float cvBoostNextWeakClassifier( CvMat* weakEvalVals,
 
 typedef struct CvBtTrainer
 {
-    /* {{ external */    
+    /* {{ external */
     CvMat* trainData;
     int flags;
-    
+
     CvMat* trainClasses;
     int m;
     uchar* ydata;
@@ -2044,7 +2044,7 @@ typedef struct CvBtTrainer
 
     CvMat* sampleIdx;
     int numsamples;
-    
+
     float param[2];
     CvBoostType type;
     int numclasses;
@@ -2071,7 +2071,7 @@ typedef struct CvBtTrainer
 typedef void (*CvZeroApproxFunc)( float* approx, CvBtTrainer* trainer );
 
 /* Mean zero approximation */
-void icvZeroApproxMean( float* approx, CvBtTrainer* trainer )
+static void icvZeroApproxMean( float* approx, CvBtTrainer* trainer )
 {
     int i;
     int idx;
@@ -2088,7 +2088,7 @@ void icvZeroApproxMean( float* approx, CvBtTrainer* trainer )
 /*
  * Median zero approximation
  */
-void icvZeroApproxMed( float* approx, CvBtTrainer* trainer )
+static void icvZeroApproxMed( float* approx, CvBtTrainer* trainer )
 {
     int i;
     int idx;
@@ -2098,7 +2098,7 @@ void icvZeroApproxMed( float* approx, CvBtTrainer* trainer )
         idx = icvGetIdxAt( trainer->sampleIdx, i );
         trainer->f[i] = *((float*) (trainer->ydata + idx * trainer->ystep));
     }
-    
+
     icvSort_32f( trainer->f, trainer->numsamples, 0 );
     approx[0] = trainer->f[trainer->numsamples / 2];
 }
@@ -2106,7 +2106,7 @@ void icvZeroApproxMed( float* approx, CvBtTrainer* trainer )
 /*
  * 0.5 * log( mean(y) / (1 - mean(y)) ) where y in {0, 1}
  */
-void icvZeroApproxLog( float* approx, CvBtTrainer* trainer )
+static void icvZeroApproxLog( float* approx, CvBtTrainer* trainer )
 {
     float y_mean;
 
@@ -2117,7 +2117,7 @@ void icvZeroApproxLog( float* approx, CvBtTrainer* trainer )
 /*
  * 0 zero approximation
  */
-void icvZeroApprox0( float* approx, CvBtTrainer* trainer )
+static void icvZeroApprox0( float* approx, CvBtTrainer* trainer )
 {
     int i;
 
@@ -2143,7 +2143,7 @@ static CvZeroApproxFunc icvZeroApproxFunc[] =
 CV_BOOST_IMPL
 void cvBtNext( CvCARTClassifier** trees, CvBtTrainer* trainer );
 
-CV_BOOST_IMPL
+static
 CvBtTrainer* cvBtStart( CvCARTClassifier** trees,
                         CvMat* trainData,
                         int flags,
@@ -2164,13 +2164,13 @@ CvBtTrainer* cvBtStart( CvCARTClassifier** trees,
     float* zero_approx;
     int m;
     int i, j;
-    
+
     if( trees == NULL )
     {
         CV_ERROR( CV_StsNullPtr, "Invalid trees parameter" );
     }
-    
-    if( type < CV_DABCLASS || type > CV_MREG ) 
+
+    if( type < CV_DABCLASS || type > CV_MREG )
     {
         CV_ERROR( CV_StsUnsupportedFormat, "Unsupported type parameter" );
     }
@@ -2198,7 +2198,7 @@ CvBtTrainer* cvBtStart( CvCARTClassifier** trees,
     ptr->flags = flags;
     ptr->trainClasses = trainClasses;
     CV_MAT2VEC( *trainClasses, ptr->ydata, ptr->ystep, ptr->m );
-    
+
     memset( &(ptr->cartParams), 0, sizeof( ptr->cartParams ) );
     memset( &(ptr->stumpParams), 0, sizeof( ptr->stumpParams ) );
 
@@ -2229,10 +2229,10 @@ CvBtTrainer* cvBtStart( CvCARTClassifier** trees,
     ptr->sampleIdx = sampleIdx;
     ptr->numsamples = ( sampleIdx == NULL ) ? ptr->m
                              : MAX( sampleIdx->rows, sampleIdx->cols );
-    
+
     ptr->weights = cvCreateMat( 1, m, CV_32FC1 );
-    cvSet( ptr->weights, cvScalar( 1.0 ) );    
-    
+    cvSet( ptr->weights, cvScalar( 1.0 ) );
+
     if( type <= CV_GABCLASS )
     {
         ptr->boosttrainer = cvBoostStartTraining( ptr->trainClasses, ptr->y,
@@ -2261,7 +2261,7 @@ CvBtTrainer* cvBtStart( CvCARTClassifier** trees,
             {
                 trees[i]->val[j] += zero_approx[i];
             }
-        }    
+        }
         CV_CALL( cvFree( &zero_approx ) );
     }
 
@@ -2270,14 +2270,14 @@ CvBtTrainer* cvBtStart( CvCARTClassifier** trees,
     return ptr;
 }
 
-void icvBtNext_LSREG( CvCARTClassifier** trees, CvBtTrainer* trainer )
+static void icvBtNext_LSREG( CvCARTClassifier** trees, CvBtTrainer* trainer )
 {
     int i;
 
     /* yhat_i = y_i - F_(m-1)(x_i) */
     for( i = 0; i < trainer->m; i++ )
     {
-        trainer->y->data.fl[i] = 
+        trainer->y->data.fl[i] =
             *((float*) (trainer->ydata + i * trainer->ystep)) - trainer->f[i];
     }
 
@@ -2288,7 +2288,7 @@ void icvBtNext_LSREG( CvCARTClassifier** trees, CvBtTrainer* trainer )
 }
 
 
-void icvBtNext_LADREG( CvCARTClassifier** trees, CvBtTrainer* trainer )
+static void icvBtNext_LADREG( CvCARTClassifier** trees, CvBtTrainer* trainer )
 {
     CvCARTClassifier* ptr;
     int i, j;
@@ -2296,7 +2296,7 @@ void icvBtNext_LADREG( CvCARTClassifier** trees, CvBtTrainer* trainer )
     int sample_step;
     uchar* sample_data;
     int index;
-    
+
     int data_size;
     int* idx;
     float* resp;
@@ -2356,19 +2356,19 @@ void icvBtNext_LADREG( CvCARTClassifier** trees, CvBtTrainer* trainer )
 
     cvFree( &idx );
     cvFree( &resp );
-    
+
     trees[0] = ptr;
 }
 
 
-void icvBtNext_MREG( CvCARTClassifier** trees, CvBtTrainer* trainer )
+static void icvBtNext_MREG( CvCARTClassifier** trees, CvBtTrainer* trainer )
 {
     CvCARTClassifier* ptr;
     int i, j;
     CvMat sample;
     int sample_step;
     uchar* sample_data;
-    
+
     int data_size;
     int* idx;
     float* resid;
@@ -2395,7 +2395,7 @@ void icvBtNext_MREG( CvCARTClassifier** trees, CvBtTrainer* trainer )
         /* for delta */
         resp[i] = (float) fabs( resid[index] );
     }
-    
+
     /* delta = quantile_alpha{abs(resid_i)} */
     icvSort_32f( resp, trainer->numsamples, 0 );
     delta = resp[(int)(trainer->param[1] * (trainer->numsamples - 1))];
@@ -2407,7 +2407,7 @@ void icvBtNext_MREG( CvCARTClassifier** trees, CvBtTrainer* trainer )
         trainer->y->data.fl[index] = MIN( delta, ((float) fabs( resid[index] )) ) *
                                  CV_SIGN( resid[index] );
     }
-    
+
     ptr = (CvCARTClassifier*) cvCreateCARTClassifier( trainer->trainData, trainer->flags,
         trainer->y, NULL, NULL, NULL, trainer->sampleIdx, trainer->weights,
         (CvClassifierTrainParams*) &trainer->cartParams );
@@ -2439,7 +2439,7 @@ void icvBtNext_MREG( CvCARTClassifier** trees, CvBtTrainer* trainer )
             /* rhat = median(y_i - F_(m-1)(x_i)) */
             icvSort_32f( resp, respnum, 0 );
             rhat = resp[respnum / 2];
-            
+
             /* val = sum{sign(r_i - rhat_i) * min(delta, abs(r_i - rhat_i)}
              * r_i = y_i - F_(m-1)(x_i)
              */
@@ -2464,7 +2464,7 @@ void icvBtNext_MREG( CvCARTClassifier** trees, CvBtTrainer* trainer )
     cvFree( &resid );
     cvFree( &resp );
     cvFree( &idx );
-    
+
     trees[0] = ptr;
 }
 
@@ -2476,14 +2476,14 @@ void icvBtNext_MREG( CvCARTClassifier** trees, CvBtTrainer* trainer )
 
 #define CV_LOG_VAL_MAX 18.0
 
-void icvBtNext_L2CLASS( CvCARTClassifier** trees, CvBtTrainer* trainer )
+static void icvBtNext_L2CLASS( CvCARTClassifier** trees, CvBtTrainer* trainer )
 {
     CvCARTClassifier* ptr;
     int i, j;
     CvMat sample;
     int sample_step;
     uchar* sample_data;
-    
+
     int data_size;
     int* idx;
     int respnum;
@@ -2505,7 +2505,7 @@ void icvBtNext_L2CLASS( CvCARTClassifier** trees, CvBtTrainer* trainer )
     weights = (float*) cvAlloc( data_size );
     data_size = trainer->m * sizeof( *sorted_weights );
     sorted_weights = (float*) cvAlloc( data_size );
-    
+
     /* yhat_i = (4 * y_i - 2) / ( 1 + exp( (4 * y_i - 2) * F_(m-1)(x_i) ) ).
      *   y_i in {0, 1}
      */
@@ -2523,32 +2523,32 @@ void icvBtNext_L2CLASS( CvCARTClassifier** trees, CvBtTrainer* trainer )
         sorted_weights[i] = weights[index];
         sum_weights += sorted_weights[i];
     }
-    
+
     trimmed_idx = NULL;
     sample_idx = trainer->sampleIdx;
     trimmed_num = trainer->numsamples;
     if( trainer->param[1] < 1.0F )
     {
         /* perform weight trimming */
-        
+
         float threshold;
         int count;
-        
+
         icvSort_32f( sorted_weights, trainer->numsamples, 0 );
 
         sum_weights *= (1.0F - trainer->param[1]);
-        
+
         i = -1;
         do { sum_weights -= sorted_weights[++i]; }
         while( sum_weights > 0.0F && i < (trainer->numsamples - 1) );
-        
+
         threshold = sorted_weights[i];
 
         while( i > 0 && sorted_weights[i-1] == threshold ) i--;
 
         if( i > 0 )
         {
-            trimmed_num = trainer->numsamples - i;            
+            trimmed_num = trainer->numsamples - i;
             trimmed_idx = cvCreateMat( 1, trimmed_num, CV_32FC1 );
             count = 0;
             for( i = 0; i < trainer->numsamples; i++ )
@@ -2560,12 +2560,12 @@ void icvBtNext_L2CLASS( CvCARTClassifier** trees, CvBtTrainer* trainer )
                     count++;
                 }
             }
-            
+
             assert( count == trimmed_num );
 
             sample_idx = trimmed_idx;
 
-            printf( "Used samples %%: %g\n", 
+            printf( "Used samples %%: %g\n",
                 (float) trimmed_num / (float) trainer->numsamples * 100.0F );
         }
     }
@@ -2608,22 +2608,22 @@ void icvBtNext_L2CLASS( CvCARTClassifier** trees, CvBtTrainer* trainer )
         }
         ptr->val[j] = val;
     }
-    
+
     if( trimmed_idx != NULL ) cvReleaseMat( &trimmed_idx );
     cvFree( &sorted_weights );
     cvFree( &weights );
     cvFree( &idx );
-    
+
     trees[0] = ptr;
 }
 
-void icvBtNext_LKCLASS( CvCARTClassifier** trees, CvBtTrainer* trainer )
+static void icvBtNext_LKCLASS( CvCARTClassifier** trees, CvBtTrainer* trainer )
 {
     int i, j, k, kk, num;
     CvMat sample;
     int sample_step;
     uchar* sample_data;
-    
+
     int data_size;
     int* idx;
     int respnum;
@@ -2673,7 +2673,7 @@ void icvBtNext_LKCLASS( CvCARTClassifier** trees, CvBtTrainer* trainer )
                 sum_exp_f += exp_f;
             }
 
-            val = (float) ( (*((float*) (trainer->ydata + index * trainer->ystep))) 
+            val = (float) ( (*((float*) (trainer->ydata + index * trainer->ystep)))
                             == (float) k );
             val -= (float) ( (sum_exp_f == CV_VAL_MAX) ? 0.0 : ( 1.0 / sum_exp_f ) );
 
@@ -2692,25 +2692,25 @@ void icvBtNext_LKCLASS( CvCARTClassifier** trees, CvBtTrainer* trainer )
         if( trainer->param[1] < 1.0F )
         {
             /* perform weight trimming */
-        
+
             float threshold;
             int count;
-        
+
             icvSort_32f( sorted_weights, trainer->numsamples, 0 );
 
             sum_weights *= (1.0F - trainer->param[1]);
-        
+
             i = -1;
             do { sum_weights -= sorted_weights[++i]; }
             while( sum_weights > 0.0F && i < (trainer->numsamples - 1) );
-        
+
             threshold = sorted_weights[i];
 
             while( i > 0 && sorted_weights[i-1] == threshold ) i--;
 
             if( i > 0 )
             {
-                trimmed_num = trainer->numsamples - i;            
+                trimmed_num = trainer->numsamples - i;
                 trimmed_idx->cols = trimmed_num;
                 count = 0;
                 for( i = 0; i < trainer->numsamples; i++ )
@@ -2722,12 +2722,12 @@ void icvBtNext_LKCLASS( CvCARTClassifier** trees, CvBtTrainer* trainer )
                         count++;
                     }
                 }
-            
+
                 assert( count == trimmed_num );
 
                 sample_idx = trimmed_idx;
 
-                printf( "k: %d Used samples %%: %g\n", k, 
+                printf( "k: %d Used samples %%: %g\n", k,
                     (float) trimmed_num / (float) trainer->numsamples * 100.0F );
             }
         } /* weight trimming */
@@ -2773,7 +2773,7 @@ void icvBtNext_LKCLASS( CvCARTClassifier** trees, CvBtTrainer* trainer )
             trees[k]->val[j] = val;
         }
     } /* for each class */
-    
+
     cvReleaseMat( &trimmed_idx );
     cvFree( &sorted_weights );
     cvFree( &weights );
@@ -2781,7 +2781,7 @@ void icvBtNext_LKCLASS( CvCARTClassifier** trees, CvBtTrainer* trainer )
 }
 
 
-void icvBtNext_XXBCLASS( CvCARTClassifier** trees, CvBtTrainer* trainer )
+static void icvBtNext_XXBCLASS( CvCARTClassifier** trees, CvBtTrainer* trainer )
 {
     float alpha;
     int i;
@@ -2799,19 +2799,19 @@ void icvBtNext_XXBCLASS( CvCARTClassifier** trees, CvBtTrainer* trainer )
     num_samples = ( sample_idx == NULL )
         ? trainer->m : MAX( sample_idx->rows, sample_idx->cols );
 
-    printf( "Used samples %%: %g\n", 
+    printf( "Used samples %%: %g\n",
         (float) num_samples / (float) trainer->numsamples * 100.0F );
 
     trees[0] = (CvCARTClassifier*) cvCreateCARTClassifier( trainer->trainData,
         trainer->flags, trainer->y, NULL, NULL, NULL,
         sample_idx, trainer->weights,
         (CvClassifierTrainParams*) &trainer->cartParams );
-    
+
     /* evaluate samples */
     CV_GET_SAMPLE( *trainer->trainData, trainer->flags, 0, sample );
     CV_GET_SAMPLE_STEP( *trainer->trainData, trainer->flags, sample_step );
     sample_data = sample.data.ptr;
-    
+
     for( i = 0; i < trainer->m; i++ )
     {
         sample.data.ptr = sample_data + i * sample_step;
@@ -2820,7 +2820,7 @@ void icvBtNext_XXBCLASS( CvCARTClassifier** trees, CvBtTrainer* trainer )
 
     alpha = cvBoostNextWeakClassifier( weak_eval_vals, trainer->trainClasses,
         trainer->y, trainer->weights, trainer->boosttrainer );
-    
+
     /* multiply tree by alpha */
     for( i = 0; i <= trees[0]->count; i++ )
     {
@@ -2833,7 +2833,7 @@ void icvBtNext_XXBCLASS( CvCARTClassifier** trees, CvBtTrainer* trainer )
             trees[0]->val[i] = cvLogRatio( trees[0]->val[i] );
         }
     }
-    
+
     if( sample_idx != NULL && sample_idx != trainer->sampleIdx )
     {
         cvReleaseMat( &sample_idx );
@@ -2865,7 +2865,7 @@ void cvBtNext( CvCARTClassifier** trees, CvBtTrainer* trainer )
     int sample_step;
     uchar* sample_data;
 
-    icvBtNextFunc[trainer->type]( trees, trainer );        
+    icvBtNextFunc[trainer->type]( trees, trainer );
 
     /* shrinkage */
     if( trainer->param[0] != 1.0F )
@@ -2890,26 +2890,26 @@ void cvBtNext( CvCARTClassifier** trees, CvBtTrainer* trainer )
             index = icvGetIdxAt( trainer->sampleIdx, i );
             sample.data.ptr = sample_data + index * sample_step;
             for( j = 0; j < trainer->numclasses; j++ )
-            {            
-                trainer->f[index * trainer->numclasses + j] += 
+            {
+                trainer->f[index * trainer->numclasses + j] +=
                     trees[j]->eval( (CvClassifier*) (trees[j]), &sample );
             }
         }
     }
 }
 
-CV_BOOST_IMPL
+static
 void cvBtEnd( CvBtTrainer** trainer )
 {
     CV_FUNCNAME( "cvBtEnd" );
-    
+
     __BEGIN__;
-    
+
     if( trainer == NULL || (*trainer) == NULL )
     {
         CV_ERROR( CV_StsNullPtr, "Invalid trainer parameter" );
     }
-    
+
     if( (*trainer)->y != NULL )
     {
         CV_CALL( cvReleaseMat( &((*trainer)->y) ) );
@@ -2931,7 +2931,7 @@ void cvBtEnd( CvBtTrainer** trainer )
 *                         Boosted tree model as a classifier                             *
 \****************************************************************************************/
 
-CV_BOOST_IMPL
+static
 float cvEvalBtClassifier( CvClassifier* classifier, CvMat* sample )
 {
     float val;
@@ -2939,7 +2939,7 @@ float cvEvalBtClassifier( CvClassifier* classifier, CvMat* sample )
     CV_FUNCNAME( "cvEvalBtClassifier" );
 
     __BEGIN__;
-    
+
     int i;
 
     val = 0.0F;
@@ -2972,7 +2972,7 @@ float cvEvalBtClassifier( CvClassifier* classifier, CvMat* sample )
     return val;
 }
 
-CV_BOOST_IMPL
+static
 float cvEvalBtClassifier2( CvClassifier* classifier, CvMat* sample )
 {
     float val;
@@ -2980,7 +2980,7 @@ float cvEvalBtClassifier2( CvClassifier* classifier, CvMat* sample )
     CV_FUNCNAME( "cvEvalBtClassifier2" );
 
     __BEGIN__;
-    
+
     CV_CALL( val = cvEvalBtClassifier( classifier, sample ) );
 
     __END__;
@@ -2988,7 +2988,7 @@ float cvEvalBtClassifier2( CvClassifier* classifier, CvMat* sample )
     return (float) (val >= 0.0F);
 }
 
-CV_BOOST_IMPL
+static
 float cvEvalBtClassifierK( CvClassifier* classifier, CvMat* sample )
 {
     int cls = 0;
@@ -2996,7 +2996,7 @@ float cvEvalBtClassifierK( CvClassifier* classifier, CvMat* sample )
     CV_FUNCNAME( "cvEvalBtClassifierK" );
 
     __BEGIN__;
-    
+
     int i, k;
     float max_val;
     int numclasses;
@@ -3072,7 +3072,7 @@ static CvEvalBtClassifier icvEvalBtClassifier[] =
     cvEvalBtClassifier
 };
 
-CV_BOOST_IMPL
+static
 int cvSaveBtClassifier( CvClassifier* classifier, const char* filename )
 {
     CV_FUNCNAME( "cvSaveBtClassifier" );
@@ -3087,7 +3087,7 @@ int cvSaveBtClassifier( CvClassifier* classifier, const char* filename )
 
     CV_ASSERT( classifier );
     CV_ASSERT( filename );
-    
+
     if( !icvMkDir( filename ) || (file = fopen( filename, "w" )) == 0 )
     {
         CV_ERROR( CV_StsError, "Unable to create file" );
@@ -3101,7 +3101,7 @@ int cvSaveBtClassifier( CvClassifier* classifier, const char* filename )
                                       ((CvBtClassifier*) classifier)->numclasses,
                                       ((CvBtClassifier*) classifier)->numfeatures,
                                       ((CvBtClassifier*) classifier)->numiter );
-    
+
     for( i = 0; i < ((CvBtClassifier*) classifier)->numclasses *
                     ((CvBtClassifier*) classifier)->numiter; i++ )
     {
@@ -3137,7 +3137,7 @@ int cvSaveBtClassifier( CvClassifier* classifier, const char* filename )
 }
 
 
-CV_BOOST_IMPL
+static
 void cvReleaseBtClassifier( CvClassifier** ptr )
 {
     CV_FUNCNAME( "cvReleaseBtClassifier" );
@@ -3183,7 +3183,7 @@ void cvReleaseBtClassifier( CvClassifier** ptr )
     __END__;
 }
 
-void cvTuneBtClassifier( CvClassifier* classifier, CvMat*, int flags,
+static void cvTuneBtClassifier( CvClassifier* classifier, CvMat*, int flags,
                          CvMat*, CvMat* , CvMat*, CvMat*, CvMat* )
 {
     CV_FUNCNAME( "cvTuneBtClassifier" );
@@ -3231,7 +3231,7 @@ void cvTuneBtClassifier( CvClassifier* classifier, CvMat*, int flags,
                 ((CvBtClassifier*) classifier)->seq->total;
             CV_CALL( ptr = cvAlloc( data_size ) );
             CV_CALL( cvCvtSeqToArray( ((CvBtClassifier*) classifier)->seq, ptr ) );
-            CV_CALL( cvReleaseMemStorage( 
+            CV_CALL( cvReleaseMemStorage(
                     &(((CvBtClassifier*) classifier)->seq->storage) ) );
             ((CvBtClassifier*) classifier)->trees = (CvCARTClassifier**) ptr;
             classifier->flags &= ~CV_TUNABLE;
@@ -3244,7 +3244,7 @@ void cvTuneBtClassifier( CvClassifier* classifier, CvMat*, int flags,
     __END__;
 }
 
-CvBtClassifier* icvAllocBtClassifier( CvBoostType type, int flags, int numclasses,
+static CvBtClassifier* icvAllocBtClassifier( CvBoostType type, int flags, int numclasses,
                                       int numiter )
 {
     CvBtClassifier* ptr;
@@ -3317,7 +3317,7 @@ CvClassifier* cvCreateBtClassifier( CvMat* trainData,
     CV_ASSERT( trainParams != NULL );
 
     type = ((CvBtClassifierTrainParams*) trainParams)->type;
-    
+
     if( type >= CV_DABCLASS && type <= CV_GABCLASS && sampleIdx )
     {
         CV_ERROR( CV_StsBadArg, "Sample indices are not supported for this type" );
@@ -3330,7 +3330,7 @@ CvClassifier* cvCreateBtClassifier( CvMat* trainData,
 
         cvMinMaxLoc( trainClasses, &min_val, &max_val );
         num_classes = (int) (max_val + 1.0);
-        
+
         CV_ASSERT( num_classes >= 2 );
     }
     else
@@ -3338,12 +3338,12 @@ CvClassifier* cvCreateBtClassifier( CvMat* trainData,
         num_classes = 1;
     }
     num_iter = ((CvBtClassifierTrainParams*) trainParams)->numiter;
-    
+
     CV_ASSERT( num_iter > 0 );
 
     ptr = icvAllocBtClassifier( type, CV_TUNABLE | flags, num_classes, num_iter );
     ptr->numfeatures = (CV_IS_ROW_SAMPLE( flags )) ? trainData->cols : trainData->rows;
-    
+
     i = 0;
 
     printf( "Iteration %d\n", 1 );
@@ -3358,7 +3358,7 @@ CvClassifier* cvCreateBtClassifier( CvMat* trainData,
     CV_CALL( cvSeqPushMulti( ptr->seq, trees, ptr->numclasses ) );
     CV_CALL( cvFree( &trees ) );
     ptr->numiter++;
-    
+
     for( i = 1; i < num_iter; i++ )
     {
         ptr->tune( (CvClassifier*) ptr, NULL, CV_TUNABLE, NULL, NULL, NULL, NULL, NULL );
@@ -3380,7 +3380,7 @@ CvClassifier* cvCreateBtClassifierFromFile( const char* filename )
     CvBtClassifier* ptr = 0;
 
     CV_FUNCNAME( "cvCreateBtClassifierFromFile" );
-    
+
     __BEGIN__;
 
     FILE* file;
@@ -3400,7 +3400,7 @@ CvClassifier* cvCreateBtClassifierFromFile( const char* filename )
     {
         CV_ERROR( CV_StsError, "Unable to open file" );
     }
-    
+
     values_read = fscanf( file, "%d %d %d %d", &type, &num_classes, &num_features, &num_classifiers );
     CV_Assert(values_read == 4);
 
@@ -3414,7 +3414,7 @@ CvClassifier* cvCreateBtClassifierFromFile( const char* filename )
     }
     ptr = icvAllocBtClassifier( (CvBoostType) type, 0, num_classes, num_classifiers );
     ptr->numfeatures = num_features;
-    
+
     for( i = 0; i < num_classes * num_classifiers; i++ )
     {
         int count;
@@ -3532,7 +3532,7 @@ CvMat* cvTrimWeights( CvMat* weights, CvMat* idx, float factor )
                     count++;
                 }
             }
-        
+
             assert( count == ptr->cols );
         }
         cvFree( &sorted_weights );
@@ -3572,7 +3572,7 @@ void cvReadTrainData( const char* filename, int flags,
     {
         CV_ERROR( CV_StsNullPtr, "trainClasses must be not NULL" );
     }
-    
+
     *trainData = NULL;
     *trainClasses = NULL;
     file = fopen( filename, "r" );
@@ -3592,7 +3592,7 @@ void cvReadTrainData( const char* filename, int flags,
     {
         CV_CALL( *trainData = cvCreateMat( n, m, CV_32FC1 ) );
     }
-    
+
     CV_CALL( *trainClasses = cvCreateMat( 1, m, CV_32FC1 ) );
 
     for( i = 0; i < m; i++ )
@@ -3618,7 +3618,7 @@ void cvReadTrainData( const char* filename, int flags,
     fclose( file );
 
     __END__;
-    
+
 }
 
 CV_BOOST_IMPL
@@ -3665,7 +3665,7 @@ void cvWriteTrainData( const char* filename, int flags,
     {
         CV_ERROR( CV_StsUnmatchedSizes, "Incorrect trainData and trainClasses sizes" );
     }
-    
+
     if( sampleIdx != NULL )
     {
         count = (sampleIdx->rows == 1) ? sampleIdx->cols : sampleIdx->rows;
@@ -3674,7 +3674,7 @@ void cvWriteTrainData( const char* filename, int flags,
     {
         count = m;
     }
-    
+
 
     file = fopen( filename, "w" );
     if( !file )
@@ -3705,7 +3705,7 @@ void cvWriteTrainData( const char* filename, int flags,
         for( j = 0; j < n; j++ )
         {
             fprintf( file, "%g ", ( (CV_IS_ROW_SAMPLE( flags ))
-                                    ? CV_MAT_ELEM( *trainData, float, idx, j ) 
+                                    ? CV_MAT_ELEM( *trainData, float, idx, j )
                                     : CV_MAT_ELEM( *trainData, float, j, idx ) ) );
         }
         fprintf( file, "%g\n", ( (clsrow)
@@ -3714,13 +3714,13 @@ void cvWriteTrainData( const char* filename, int flags,
     }
 
     fclose( file );
-    
+
     __END__;
 }
 
 
 #define ICV_RAND_SHUFFLE( suffix, type )                                                 \
-void icvRandShuffle_##suffix( uchar* data, size_t step, int num )                        \
+static void icvRandShuffle_##suffix( uchar* data, size_t step, int num )                 \
 {                                                                                        \
     time_t seed;                                                                         \
     type tmp;                                                                            \
index 458712b..af69bf1 100644 (file)
@@ -394,7 +394,7 @@ void icvSaveStageHaarClassifier( CvIntHaarClassifier* classifier, FILE* file )
 
 
 
-CvIntHaarClassifier* icvLoadCARTStageHaarClassifierF( FILE* file, int step )
+static CvIntHaarClassifier* icvLoadCARTStageHaarClassifierF( FILE* file, int step )
 {
     CvStageHaarClassifier* ptr = NULL;
 
index dc9f3ac..253ffe6 100644 (file)
@@ -108,7 +108,7 @@ CvBackgroundData* cvbgdata = NULL;
 
 
 /*
- * get sum image offsets for <rect> corner points 
+ * get sum image offsets for <rect> corner points
  * step - row step (measured in image pixels!) of sum image
  */
 #define CV_SUM_OFFSETS( p0, p1, p2, p3, rect, step )                      \
@@ -122,7 +122,7 @@ CvBackgroundData* cvbgdata = NULL;
     (p3) = (rect).x + (rect).width + (step) * ((rect).y + (rect).height);
 
 /*
- * get tilted image offsets for <rect> corner points 
+ * get tilted image offsets for <rect> corner points
  * step - row step (measured in image pixels!) of tilted image
  */
 #define CV_TILTED_OFFSETS( p0, p1, p2, p3, rect, step )                   \
@@ -154,7 +154,7 @@ CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize,
 {
     CvIntHaarFeatures* features = NULL;
     CvTHaarFeature haarFeature;
-    
+
     CvMemStorage* storage = NULL;
     CvSeq* seq = NULL;
     CvSeqWriter writer;
@@ -172,7 +172,7 @@ CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize,
     float factor = 1.0F;
 
     factor = ((float) winsize.width) * winsize.height / (24 * 24);
-#if 0    
+#if 0
     s0 = (int) (s0 * factor);
     s1 = (int) (s1 * factor);
     s2 = (int) (s2 * factor);
@@ -252,7 +252,7 @@ CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize,
                                 CV_WRITE_SEQ_ELEM( haarFeature, writer );
                             }
                         }
-                            
+
                         // haar_y4
                         if ( (x+dx <= winsize.width ) && (y+dy*4 <= winsize.height) ) {
                             if (dx*4*dy < s0) continue;
@@ -277,7 +277,7 @@ CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize,
                         }
                     }
 
-                    if (mode != 0 /*BASIC*/) {                
+                    if (mode != 0 /*BASIC*/) {
                         // point
                         if ( (x+dx*3 <= winsize.width) && (y+dy*3 <= winsize.height) ) {
                             if (dx*9*dy < s0) continue;
@@ -289,12 +289,12 @@ CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize,
                             }
                         }
                     }
-                    
-                    if (mode == 2 /*ALL*/) {                
+
+                    if (mode == 2 /*ALL*/) {
                         // tilted haar_x2                                      (x, y, w, h, b, weight)
                         if ( (x+2*dx <= winsize.width) && (y+2*dx+dy <= winsize.height) && (x-dy>= 0) ) {
                             if (dx*2*dy < s1) continue;
-                            
+
                             if (!symmetric || (x <= (winsize.width / 2) )) {
                                 haarFeature = cvHaarFeature( "tilted_haar_x2",
                                     x, y, dx*2, dy, -1,
@@ -302,11 +302,11 @@ CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize,
                                 CV_WRITE_SEQ_ELEM( haarFeature, writer );
                             }
                         }
-                        
+
                         // tilted haar_y2                                      (x, y, w, h, b, weight)
                         if ( (x+dx <= winsize.width) && (y+dx+2*dy <= winsize.height) && (x-2*dy>= 0) ) {
                             if (dx*2*dy < s1) continue;
-                            
+
                             if (!symmetric || (x <= (winsize.width / 2) )) {
                                 haarFeature = cvHaarFeature( "tilted_haar_y2",
                                     x, y, dx, 2*dy, -1,
@@ -314,11 +314,11 @@ CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize,
                                 CV_WRITE_SEQ_ELEM( haarFeature, writer );
                             }
                         }
-                        
+
                         // tilted haar_x3                                   (x, y, w, h, b, weight)
                         if ( (x+3*dx <= winsize.width) && (y+3*dx+dy <= winsize.height) && (x-dy>= 0) ) {
                             if (dx*3*dy < s2) continue;
-                            
+
                             if (!symmetric || (x <= (winsize.width / 2) )) {
                                 haarFeature = cvHaarFeature( "tilted_haar_x3",
                                     x,    y,    dx*3, dy, -1,
@@ -326,11 +326,11 @@ CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize,
                                 CV_WRITE_SEQ_ELEM( haarFeature, writer );
                             }
                         }
-                        
+
                         // tilted haar_y3                                      (x, y, w, h, b, weight)
                         if ( (x+dx <= winsize.width) && (y+dx+3*dy <= winsize.height) && (x-3*dy>= 0) ) {
                             if (dx*3*dy < s2) continue;
-                            
+
                             if (!symmetric || (x <= (winsize.width / 2) )) {
                                 haarFeature = cvHaarFeature( "tilted_haar_y3",
                                     x,    y,    dx, 3*dy, -1,
@@ -338,12 +338,12 @@ CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize,
                                 CV_WRITE_SEQ_ELEM( haarFeature, writer );
                             }
                         }
-                        
-                        
+
+
                         // tilted haar_x4                                   (x, y, w, h, b, weight)
                         if ( (x+4*dx <= winsize.width) && (y+4*dx+dy <= winsize.height) && (x-dy>= 0) ) {
                             if (dx*4*dy < s3) continue;
-                            
+
                             if (!symmetric || (x <= (winsize.width / 2) )) {
                                 haarFeature = cvHaarFeature( "tilted_haar_x4",
 
@@ -353,11 +353,11 @@ CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize,
                                 CV_WRITE_SEQ_ELEM( haarFeature, writer );
                             }
                         }
-                        
+
                         // tilted haar_y4                                      (x, y, w, h, b, weight)
                         if ( (x+dx <= winsize.width) && (y+dx+4*dy <= winsize.height) && (x-4*dy>= 0) ) {
                             if (dx*4*dy < s3) continue;
-                            
+
                             if (!symmetric || (x <= (winsize.width / 2) )) {
                                 haarFeature = cvHaarFeature( "tilted_haar_y4",
                                     x,    y,    dx, 4*dy, -1,
@@ -365,10 +365,10 @@ CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize,
                                 CV_WRITE_SEQ_ELEM( haarFeature, writer );
                             }
                         }
-                        
+
 
                         /*
-                        
+
                           // tilted point
                           if ( (x+dx*3 <= winsize.width - 1) && (y+dy*3 <= winsize.height - 1) && (x-3*dy>= 0)) {
                           if (dx*9*dy < 36) continue;
@@ -395,10 +395,10 @@ CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize,
     features->winsize = winsize;
     cvCvtSeqToArray( seq, (CvArr*) features->feature );
     cvReleaseMemStorage( &storage );
-    
+
     icvConvertToFastHaarFeature( features->feature, features->fastfeature,
                                  features->count, (winsize.width + 1) );
-    
+
     return features;
 }
 
@@ -438,7 +438,7 @@ void icvConvertToFastHaarFeature( CvTHaarFeature* haarFeature,
                                 fastHaarFeature[i].rect[j].p3,
                                 haarFeature[i].rect[j].r, step )
             }
-            
+
         }
         else
         {
@@ -469,15 +469,15 @@ static
 CvHaarTrainigData* icvCreateHaarTrainingData( CvSize winsize, int maxnumsamples )
 {
     CvHaarTrainigData* data;
-    
+
     CV_FUNCNAME( "icvCreateHaarTrainingData" );
-    
+
     __BEGIN__;
 
     data = NULL;
     uchar* ptr = NULL;
     size_t datasize = 0;
-    
+
     datasize = sizeof( CvHaarTrainigData ) +
           /* sum and tilted */
         ( 2 * (winsize.width + 1) * (winsize.height + 1) * sizeof( sum_type ) +
@@ -548,7 +548,7 @@ void icvGetTrainingDataCallback( CvMat* mat, CvMat* sampleIdx, CvMat*,
     int j = 0;
     float val = 0.0F;
     float normfactor = 0.0F;
-    
+
     CvHaarTrainingData* training_data;
     CvIntHaarFeatures* haar_features;
 
@@ -639,7 +639,7 @@ void icvGetTrainingDataCallback( CvMat* mat, CvMat* sampleIdx, CvMat*,
 #if 0 /*def CV_VERBOSE*/
     if( first % 5000 == 0 )
     {
-        fprintf( stderr, "%3d%%\r", (int) (100.0 * first / 
+        fprintf( stderr, "%3d%%\r", (int) (100.0 * first /
             haar_features->count) );
         fflush( stderr );
     }
@@ -692,7 +692,7 @@ void icvPrecalculate( CvHaarTrainingData* data, CvIntHaarFeatures* haarFeatures,
             t_data = *data->valcache;
             t_idx = *data->idxcache;
             t_portion = MIN( portion, (numprecalculated - first) );
-            
+
             /* indices */
             t_idx.rows = t_portion;
             t_idx.data.ptr = data->idxcache->data.ptr + first * ((size_t)t_idx.step);
@@ -766,7 +766,7 @@ void icvSplitIndicesCallback( int compidx, float threshold,
         {
             if( cvEvalFastHaarFeature( fastfeature,
                     (sum_type*) (data->sum.data.ptr + i * data->sum.step),
-                    (sum_type*) (data->tilted.data.ptr + i * data->tilted.step) ) 
+                    (sum_type*) (data->tilted.data.ptr + i * data->tilted.step) )
                 < threshold * data->normfactor.data.fl[i] )
             {
                 (*left)->data.fl[(*left)->cols++] = (float) i;
@@ -792,7 +792,7 @@ void icvSplitIndicesCallback( int compidx, float threshold,
             index = (int) *((float*) (idxdata + i * idxstep));
             if( cvEvalFastHaarFeature( fastfeature,
                     (sum_type*) (data->sum.data.ptr + index * data->sum.step),
-                    (sum_type*) (data->tilted.data.ptr + index * data->tilted.step) ) 
+                    (sum_type*) (data->tilted.data.ptr + index * data->tilted.step) )
                 < threshold * data->normfactor.data.fl[index] )
             {
                 (*left)->data.fl[(*left)->cols++] = (float) index;
@@ -858,7 +858,7 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
     float sum_stage = 0.0F;
     float threshold = 0.0F;
     float falsealarm = 0.0F;
-    
+
     //CvMat* sampleIdx = NULL;
     CvMat* trimmedIdx;
     //float* idxdata = NULL;
@@ -871,7 +871,7 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
     int idx;
     int numsamples;
     int numtrimmed;
-    
+
     CvCARTHaarClassifier* classifier;
     CvSeq* seq = NULL;
     CvMemStorage* storage = NULL;
@@ -885,7 +885,7 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
     printf( "|  N |%%SMP|F|  ST.THR |    HR   |    FA   | EXP. ERR|\n" );
     printf( "+----+----+-+---------+---------+---------+---------+\n" );
 #endif /* CV_VERBOSE */
-    
+
     n = haarFeatures->count;
     m = data->sum.rows;
     numsamples = (sampleIdx) ? MAX( sampleIdx->rows, sampleIdx->cols ) : m;
@@ -909,7 +909,7 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
     trainParams.userdata = &userdata;
 
     eval = cvMat( 1, m, CV_32FC1, cvAlloc( sizeof( float ) * m ) );
-    
+
     storage = cvCreateMemStorage();
     seq = cvCreateSeq( 0, sizeof( *seq ), sizeof( classifier ), storage );
 
@@ -919,7 +919,7 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
     num_splits = 0;
     sumalpha = 0.0F;
     do
-    {     
+    {
 
 #ifdef CV_VERBOSE
         int v_wt = 0;
@@ -947,12 +947,12 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
         num_splits += classifier->count;
 
         cart->release( (CvClassifier**) &cart );
-        
+
         if( symmetric && (seq->total % 2) )
         {
             float normfactor = 0.0F;
             CvStumpClassifier* stump;
-            
+
             /* flip haar features */
             for( i = 0; i < classifier->count; i++ )
             {
@@ -961,9 +961,9 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
                     for( j = 0; j < CV_HAAR_FEATURE_MAX &&
                                     classifier->feature[i].rect[j].weight != 0.0F; j++ )
                     {
-                        classifier->feature[i].rect[j].r.x = data->winsize.width - 
+                        classifier->feature[i].rect[j].r.x = data->winsize.width -
                             classifier->feature[i].rect[j].r.x -
-                            classifier->feature[i].rect[j].r.width;                
+                            classifier->feature[i].rect[j].r.width;
                     }
                 }
                 else
@@ -975,7 +975,7 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
                     for( j = 0; j < CV_HAAR_FEATURE_MAX &&
                                     classifier->feature[i].rect[j].weight != 0.0F; j++ )
                     {
-                        classifier->feature[i].rect[j].r.x = data->winsize.width - 
+                        classifier->feature[i].rect[j].r.x = data->winsize.width -
                             classifier->feature[i].rect[j].r.x;
                         CV_SWAP( classifier->feature[i].rect[j].r.width,
                                  classifier->feature[i].rect[j].r.height, tmp );
@@ -1010,7 +1010,7 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
                     weakTrainVals, 0, 0, 0, trimmedIdx,
                     &(data->weights),
                     trainParams.stumpTrainParams );
-            
+
                 classifier->threshold[i] = stump->threshold;
                 if( classifier->left[i] <= 0 )
                 {
@@ -1021,8 +1021,8 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
                     classifier->val[-classifier->right[i]] = stump->right;
                 }
 
-                stump->release( (CvClassifier**) &stump );        
-                
+                stump->release( (CvClassifier**) &stump );
+
             }
 
             stumpTrainParams.getTrainData = icvGetTrainingDataCallback;
@@ -1040,7 +1040,7 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
             cvReleaseMat( &trimmedIdx );
             trimmedIdx = NULL;
         }
-        
+
         for( i = 0; i < numsamples; i++ )
         {
             idx = icvGetIdxAt( sampleIdx, i );
@@ -1054,10 +1054,10 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
         alpha = cvBoostNextWeakClassifier( &eval, &data->cls, weakTrainVals,
                                            &data->weights, trainer );
         sumalpha += alpha;
-        
+
         for( i = 0; i <= classifier->count; i++ )
         {
-            if( boosttype == CV_RABCLASS ) 
+            if( boosttype == CV_RABCLASS )
             {
                 classifier->val[i] = cvLogRatio( classifier->val[i] );
             }
@@ -1077,7 +1077,7 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
                 for( j = 0; j < seq->total; j++ )
                 {
                     classifier = *((CvCARTHaarClassifier**) cvGetSeqElem( seq, j ));
-                    eval.data.fl[numpos] += classifier->eval( 
+                    eval.data.fl[numpos] += classifier->eval(
                         (CvIntHaarClassifier*) classifier,
                         (sum_type*) (data->sum.data.ptr + idx * data->sum.step),
                         (sum_type*) (data->tilted.data.ptr + idx * data->tilted.step),
@@ -1163,7 +1163,7 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
             fflush( stdout );
         }
 #endif /* CV_VERBOSE */
-        
+
     } while( falsealarm > maxfalsealarm && (!maxsplits || (num_splits < maxsplits) ) );
     cvBoostEndTraining( &trainer );
 
@@ -1177,12 +1177,12 @@ CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
                                                                        threshold );
         cvCvtSeqToArray( seq, (CvArr*) stage->classifier );
     }
-    
+
     /* CLEANUP */
     cvReleaseMemStorage( &storage );
     cvReleaseMat( &weakTrainVals );
     cvFree( &(eval.data.ptr) );
-    
+
     return (CvIntHaarClassifier*) stage;
 }
 
@@ -1192,7 +1192,7 @@ CvBackgroundData* icvCreateBackgroundData( const char* filename, CvSize winsize
 {
     CvBackgroundData* data = NULL;
 
-    const char* dir = NULL;    
+    const char* dir = NULL;
     char full[PATH_MAX];
     char* imgfilename = NULL;
     size_t datasize = 0;
@@ -1202,7 +1202,7 @@ CvBackgroundData* icvCreateBackgroundData( const char* filename, CvSize winsize
     int    len   = 0;
 
     assert( filename != NULL );
-    
+
     dir = strrchr( filename, '\\' );
     if( dir == NULL )
     {
@@ -1223,7 +1223,7 @@ CvBackgroundData* icvCreateBackgroundData( const char* filename, CvSize winsize
     {
         count = 0;
         datasize = 0;
-        
+
         /* count */
         while( !feof( input ) )
         {
@@ -1257,11 +1257,11 @@ CvBackgroundData* icvCreateBackgroundData( const char* filename, CvSize winsize
             while( !feof( input ) )
             {
                 *imgfilename = '\0';
-                               if( !fgets( imgfilename, PATH_MAX - (int)(imgfilename - full) - 1, input ))
+                if( !fgets( imgfilename, PATH_MAX - (int)(imgfilename - full) - 1, input ))
                     break;
                 len = (int)strlen( imgfilename );
-                               if( len > 0 && imgfilename[len-1] == '\n' )
-                                       imgfilename[len-1] = 0, len--;
+                if( len > 0 && imgfilename[len-1] == '\n' )
+                    imgfilename[len-1] = 0, len--;
                 if( len > 0 )
                 {
                     if( (*imgfilename) == '#' ) continue; /* comment */
@@ -1351,14 +1351,14 @@ void icvGetNextFromBackgroundData( CvBackgroundData* data,
         {
             round = data->round;
 
-//#ifdef CV_VERBOSE 
+//#ifdef CV_VERBOSE
 //            printf( "Open background image: %s\n", data->filename[data->last] );
 //#endif /* CV_VERBOSE */
-          
+
             data->last = rand() % data->count;
             data->last %= data->count;
             img = cvLoadImage( data->filename[data->last], 0 );
-            if( !img )                         
+            if( !img )
                 continue;
             data->round += data->last / data->count;
             data->round = data->round % (data->winsize.width * data->winsize.height);
@@ -1368,7 +1368,7 @@ void icvGetNextFromBackgroundData( CvBackgroundData* data,
 
             offset.x = MIN( offset.x, img->width - data->winsize.width );
             offset.y = MIN( offset.y, img->height - data->winsize.height );
-            
+
             if( img != NULL && img->depth == IPL_DEPTH_8U && img->nChannels == 1 &&
                 offset.x >= 0 && offset.y >= 0 )
             {
@@ -1403,7 +1403,7 @@ void icvGetNextFromBackgroundData( CvBackgroundData* data,
     reader->scale = MAX(
         ((float) data->winsize.width + reader->point.x) / ((float) reader->src.cols),
         ((float) data->winsize.height + reader->point.y) / ((float) reader->src.rows) );
-    
+
     reader->img = cvMat( (int) (reader->scale * reader->src.rows + 0.5F),
                          (int) (reader->scale * reader->src.cols + 0.5F),
                           CV_8UC1, (void*) cvAlloc( datasize ) );
@@ -1576,11 +1576,11 @@ void icvGetAuxImages( CvMat* img, CvMat* sum, CvMat* tilted,
     sum_type   valsum   = 0;
     sqsum_type valsqsum = 0;
     double area = 0.0;
-    
+
     cvIntegral( img, sum, sqsum, tilted );
     normrect = cvRect( 1, 1, img->cols - 2, img->rows - 2 );
     CV_SUM_OFFSETS( p0, p1, p2, p3, normrect, img->cols + 1 )
-    
+
     area = normrect.width * normrect.height;
     valsum = ((sum_type*) (sum->data.ptr))[p0] - ((sum_type*) (sum->data.ptr))[p1]
            - ((sum_type*) (sum->data.ptr))[p2] + ((sum_type*) (sum->data.ptr))[p3];
@@ -1621,28 +1621,28 @@ int icvGetHaarTrainingData( CvHaarTrainingData* data, int first, int count,
     int i = 0;
     ccounter_t getcount = 0;
     ccounter_t thread_getcount = 0;
-    ccounter_t consumed_count; 
+    ccounter_t consumed_count;
     ccounter_t thread_consumed_count;
-    
+
     /* private variables */
     CvMat img;
     CvMat sum;
     CvMat tilted;
     CvMat sqsum;
-    
+
     sum_type* sumdata;
     sum_type* tilteddata;
     float*    normfactor;
-    
+
     /* end private variables */
-    
+
     assert( data != NULL );
     assert( first + count <= data->maxnum );
     assert( cascade != NULL );
     assert( callback != NULL );
-    
+
     // if( !cvbgdata ) return 0; this check needs to be done in the callback for BG
-    
+
     CCOUNTER_SET_ZERO(getcount);
     CCOUNTER_SET_ZERO(thread_getcount);
     CCOUNTER_SET_ZERO(consumed_count);
@@ -1691,14 +1691,14 @@ int icvGetHaarTrainingData( CvHaarTrainingData* data, int first, int count,
                 normfactor = data->normfactor.data.fl + i;
                 sum.data.ptr = (uchar*) sumdata;
                 tilted.data.ptr = (uchar*) tilteddata;
-                icvGetAuxImages( &img, &sum, &tilted, &sqsum, normfactor );            
+                icvGetAuxImages( &img, &sum, &tilted, &sqsum, normfactor );
                 if( cascade->eval( cascade, sumdata, tilteddata, *normfactor ) != 0.0F )
                 {
                     CCOUNTER_INC(thread_getcount);
                     break;
                 }
             }
-            
+
 #ifdef CV_VERBOSE
             if( (i - first) % 500 == 0 )
             {
@@ -1720,7 +1720,7 @@ int icvGetHaarTrainingData( CvHaarTrainingData* data, int first, int count,
             CCOUNTER_ADD(consumed_count, thread_consumed_count);
         }
     } /* omp parallel */
-    
+
     if( consumed != NULL )
     {
         *consumed = (int)consumed_count;
@@ -1731,7 +1731,7 @@ int icvGetHaarTrainingData( CvHaarTrainingData* data, int first, int count,
         /* *acceptance_ratio = ((double) count) / consumed_count; */
         *acceptance_ratio = CCOUNTER_DIV(count, consumed_count);
     }
-    
+
     return static_cast<int>(getcount);
 }
 
@@ -1791,7 +1791,7 @@ int icvGetHaarTrainingData( CvHaarTrainingData* data, int first, int count,
 //                       CV_SQSUM_MAT_TYPE,
 //                       cvAlloc( sizeof( sqsum_type ) * (data->winsize.height + 1)
 //                                                     * (data->winsize.width + 1) ) );
-//        
+//
 //        #ifdef CV_OPENMP
 //        #pragma omp for schedule(static, 1)
 //        #endif /* CV_OPENMP */
@@ -1800,7 +1800,7 @@ int icvGetHaarTrainingData( CvHaarTrainingData* data, int first, int count,
 //            for( ; ; )
 //            {
 //                icvGetBackgroundImage( cvbgdata, cvbgreader, &img );
-//                
+//
 //                CCOUNTER_INC(thread_consumed_count);
 //
 //                sumdata = (sum_type*) (data->sum.data.ptr + i * data->sum.step);
@@ -1808,7 +1808,7 @@ int icvGetHaarTrainingData( CvHaarTrainingData* data, int first, int count,
 //                normfactor = data->normfactor.data.fl + i;
 //                sum.data.ptr = (uchar*) sumdata;
 //                tilted.data.ptr = (uchar*) tilteddata;
-//                icvGetAuxImages( &img, &sum, &tilted, &sqsum, normfactor );            
+//                icvGetAuxImages( &img, &sum, &tilted, &sqsum, normfactor );
 //                if( cascade->eval( cascade, sumdata, tilteddata, *normfactor ) != 0.0F )
 //                {
 //                    break;
@@ -1822,7 +1822,7 @@ int icvGetHaarTrainingData( CvHaarTrainingData* data, int first, int count,
 //                fflush( stderr );
 //            }
 //#endif /* CV_VERBOSE */
-//            
+//
 //        }
 //
 //        cvFree( &(img.data.ptr) );
@@ -1842,7 +1842,7 @@ int icvGetHaarTrainingData( CvHaarTrainingData* data, int first, int count,
 //        /* *acceptance_ratio = ((double) count) / consumed_count; */
 //        *acceptance_ratio = CCOUNTER_DIV(count, consumed_count);
 //    }
-//    
+//
 //    return count;
 //}
 
@@ -1853,24 +1853,24 @@ int icvGetHaarTraininDataFromVecCallback( CvMat* img, void* userdata )
     int c = 0;
 
     assert( img->rows * img->cols == ((CvVecFile*) userdata)->vecsize );
-    
+
     size_t elements_read = fread( &tmp, sizeof( tmp ), 1, ((CvVecFile*) userdata)->input );
     CV_Assert(elements_read == 1);
     elements_read = fread( ((CvVecFile*) userdata)->vector, sizeof( short ),
            ((CvVecFile*) userdata)->vecsize, ((CvVecFile*) userdata)->input );
     CV_Assert(elements_read == (size_t)((CvVecFile*) userdata)->vecsize);
-    
-    if( feof( ((CvVecFile*) userdata)->input ) || 
+
+    if( feof( ((CvVecFile*) userdata)->input ) ||
         (((CvVecFile*) userdata)->last)++ >= ((CvVecFile*) userdata)->count )
     {
         return 0;
     }
-    
+
     for( r = 0; r < img->rows; r++ )
     {
         for( c = 0; c < img->cols; c++ )
         {
-            CV_MAT_ELEM( *img, uchar, r, c ) = 
+            CV_MAT_ELEM( *img, uchar, r, c ) =
                 (uchar) ( ((CvVecFile*) userdata)->vector[r * img->cols + c] );
         }
     }
@@ -1878,14 +1878,14 @@ int icvGetHaarTraininDataFromVecCallback( CvMat* img, void* userdata )
     return 1;
 }
 
-int icvGetHaarTrainingDataFromBGCallback ( CvMat* img, void* /*userdata*/ )
+static int icvGetHaarTrainingDataFromBGCallback ( CvMat* img, void* /*userdata*/ )
 {
     if (! cvbgdata)
       return 0;
-    
+
     if (! cvbgreader)
       return 0;
-    
+
     // just in case icvGetBackgroundImage is not thread-safe ...
     #ifdef CV_OPENMP
     #pragma omp critical (get_background_image_callback)
@@ -1893,7 +1893,7 @@ int icvGetHaarTrainingDataFromBGCallback ( CvMat* img, void* /*userdata*/ )
     {
       icvGetBackgroundImage( cvbgdata, cvbgreader, img );
     }
-    
+
     return 1;
 }
 
@@ -1902,7 +1902,7 @@ int icvGetHaarTrainingDataFromBGCallback ( CvMat* img, void* /*userdata*/ )
  * Get training data from .vec file
  */
 static
-int icvGetHaarTrainingDataFromVec( CvHaarTrainingData* data, int first, int count,                                   
+int icvGetHaarTrainingDataFromVec( CvHaarTrainingData* data, int first, int count,
                                    CvIntHaarClassifier* cascade,
                                    const char* filename,
                                    int* consumed )
@@ -1914,8 +1914,8 @@ int icvGetHaarTrainingDataFromVec( CvHaarTrainingData* data, int first, int coun
     __BEGIN__;
 
     CvVecFile file;
-    short tmp = 0;    
-    
+    short tmp = 0;
+
     file.input = NULL;
     if( filename ) file.input = fopen( filename, "rb" );
 
@@ -1967,8 +1967,8 @@ int icvGetHaarTrainingDataFromBG( CvHaarTrainingData* data, int first, int count
     if (filename)
     {
         CvVecFile file;
-        short tmp = 0;    
-        
+        short tmp = 0;
+
         file.input = NULL;
         if( filename ) file.input = fopen( filename, "rb" );
 
@@ -2009,7 +2009,7 @@ int icvGetHaarTrainingDataFromBG( CvHaarTrainingData* data, int first, int count
 
 void cvCreateCascadeClassifier( const char* dirname,
                                 const char* vecfilename,
-                                const char* bgfilename, 
+                                const char* bgfilename,
                                 int npos, int nneg, int nstages,
                                 int numprecalculated,
                                 int numsplits,
@@ -2048,7 +2048,7 @@ void cvCreateCascadeClassifier( const char* dirname,
 
     cascade = (CvCascadeHaarClassifier*) icvCreateCascadeHaarClassifier( nstages );
     cascade->count = 0;
-    
+
     if( icvInitBackgroundReaders( bgfilename, winsize ) )
     {
         data = icvCreateHaarTrainingData( winsize, npos + nneg );
@@ -2061,7 +2061,7 @@ void cvCreateCascadeClassifier( const char* dirname,
         for( i = 0; i < nstages; i++, cascade->count++ )
         {
             sprintf( stagename, "%s%d/%s", dirname, i, CV_STAGE_CART_FILE_NAME );
-            cascade->classifier[i] = 
+            cascade->classifier[i] =
                 icvLoadCARTStageHaarClassifier( stagename, winsize.width + 1 );
 
             if( !icvMkDir( stagename ) )
@@ -2129,7 +2129,7 @@ void cvCreateCascadeClassifier( const char* dirname,
             data->sum.rows = data->tilted.rows = poscount + negcount;
             data->normfactor.cols = data->weights.cols = data->cls.cols =
                     poscount + negcount;
-        
+
             posweight = (equalweights) ? 1.0F / (poscount + negcount) : (0.5F / poscount);
             negweight = (equalweights) ? 1.0F / (poscount + negcount) : (0.5F / negcount);
             for( j = 0; j < poscount; j++ )
@@ -2169,7 +2169,7 @@ void cvCreateCascadeClassifier( const char* dirname,
             file = fopen( stagename, "w" );
             if( file != NULL )
             {
-                cascade->classifier[i]->save( 
+                cascade->classifier[i]->save(
                     (CvIntHaarClassifier*) cascade->classifier[i], file );
                 fclose( file );
             }
@@ -2207,7 +2207,7 @@ void cvCreateCascadeClassifier( const char* dirname,
         printf( "FAILED TO INITIALIZE BACKGROUND READERS\n" );
 #endif /* CV_VERBOSE */
     }
-    
+
     /* CLEAN UP */
     icvDestroyBackgroundReaders();
     cascade->release( (CvIntHaarClassifier**) &cascade );
@@ -2215,7 +2215,7 @@ void cvCreateCascadeClassifier( const char* dirname,
 
 /* tree cascade classifier */
 
-int icvNumSplits( CvStageHaarClassifier* stage )
+static int icvNumSplits( CvStageHaarClassifier* stage )
 {
     int i;
     int num;
@@ -2229,7 +2229,7 @@ int icvNumSplits( CvStageHaarClassifier* stage )
     return num;
 }
 
-void icvSetNumSamples( CvHaarTrainingData* training_data, int num )
+static void icvSetNumSamples( CvHaarTrainingData* training_data, int num )
 {
     assert( num <= training_data->maxnum );
 
@@ -2238,7 +2238,7 @@ void icvSetNumSamples( CvHaarTrainingData* training_data, int num )
     training_data->cls.cols = training_data->weights.cols = num;
 }
 
-void icvSetWeightsAndClasses( CvHaarTrainingData* training_data,
+static void icvSetWeightsAndClasses( CvHaarTrainingData* training_data,
                               int num1, float weight1, float cls1,
                               int num2, float weight2, float cls2 )
 {
@@ -2258,7 +2258,7 @@ void icvSetWeightsAndClasses( CvHaarTrainingData* training_data,
     }
 }
 
-CvMat* icvGetUsedValues( CvHaarTrainingData* training_data,
+static CvMat* icvGetUsedValues( CvHaarTrainingData* training_data,
                          int start, int num,
                          CvIntHaarFeatures* haar_features,
                          CvStageHaarClassifier* stage )
@@ -2302,7 +2302,7 @@ CvMat* icvGetUsedValues( CvHaarTrainingData* training_data,
     }
     total = last + 1;
     CV_CALL( ptr = cvCreateMat( num, total, CV_32FC1 ) );
-    
+
 
     #ifdef CV_OPENMP
     #pragma omp parallel for
@@ -2351,7 +2351,7 @@ typedef struct CvSplit
 
 void cvCreateTreeCascadeClassifier( const char* dirname,
                                     const char* vecfilename,
-                                    const char* bgfilename, 
+                                    const char* bgfilename,
                                     int npos, int nneg, int nstages,
                                     int numprecalculated,
                                     int numsplits,
@@ -2425,11 +2425,11 @@ void cvCreateTreeCascadeClassifier( const char* dirname,
 
     sprintf( stage_name, "%s/", dirname );
     suffix = stage_name + strlen( stage_name );
-    
+
     if (! bg_vecfile)
       if( !icvInitBackgroundReaders( bgfilename, winsize ) && nstages > 0 )
           CV_ERROR( CV_StsError, "Unable to read negative images" );
-    
+
     if( nstages > 0 )
     {
         /* width-first search in the tree */
@@ -2438,7 +2438,7 @@ void cvCreateTreeCascadeClassifier( const char* dirname,
             CvSplit* first_split;
             CvSplit* last_split;
             CvSplit* cur_split;
-            
+
             CvTreeCascadeNode* parent;
             CvTreeCascadeNode* cur_node;
             CvTreeCascadeNode* last_node;
@@ -2447,7 +2447,7 @@ void cvCreateTreeCascadeClassifier( const char* dirname,
             parent = leaves;
             leaves = NULL;
             do
-            {                
+            {
                 int best_clusters; /* best selected number of clusters */
                 float posweight, negweight;
                 double leaf_fa_rate;
@@ -2536,7 +2536,7 @@ void cvCreateTreeCascadeClassifier( const char* dirname,
                     multiple_clusters = NULL;
 
                     printf( "Number of used features: %d\n", single_num );
-                    
+
                     if( maxtreesplits >= 0 )
                     {
                         max_clusters = MIN( max_clusters, maxtreesplits - total_splits + 1 );
@@ -2594,7 +2594,7 @@ void cvCreateTreeCascadeClassifier( const char* dirname,
                             printf( "Clusters are too small. Clustering aborted.\n" );
                             break;
                         }
-                        
+
                         cur_num = 0;
                         cur_node = last_node = NULL;
                         for( cluster = 0; (cluster < k) && (cur_num < best_num); cluster++ )
@@ -2676,7 +2676,7 @@ void cvCreateTreeCascadeClassifier( const char* dirname,
 
                     CV_CALL( cur_split = (CvSplit*) cvAlloc( sizeof( *cur_split ) ) );
                     CV_ZERO_OBJ( cur_split );
-                    
+
                     if( last_split ) last_split->next = cur_split;
                     else first_split = cur_split;
                     last_split = cur_split;
@@ -2734,7 +2734,7 @@ void cvCreateTreeCascadeClassifier( const char* dirname,
                     ? last_split->multiple_clusters : last_split->single_cluster;
                 parent = last_split->parent;
                 if( parent ) parent->child = cur_node;
-                
+
                 /* connect leaves via next_same_level and save them */
                 for( ; cur_node; cur_node = cur_node->next )
                 {
@@ -2768,14 +2768,14 @@ void cvCreateTreeCascadeClassifier( const char* dirname,
                 printf( "\nParent node: %s\n", buf );
                 printf( "Chosen number of splits: %d\n\n", (last_split->multiple_clusters)
                     ? (last_split->num_clusters - 1) : 0 );
-                
+
                 cur_split = last_split;
                 last_split = last_split->next;
                 cvFree( &cur_split );
             } /* for each split point */
 
             printf( "Total number of splits: %d\n", total_splits );
-            
+
             if( !(tcc->root) ) tcc->root = leaves;
             CV_CALL( icvPrintTreeCascade( tcc->root ) );
 
@@ -2903,7 +2903,7 @@ void cvCreateTrainingSamples( const char* filename,
                     inverse = (rand() > (RAND_MAX/2));
                 }
                 icvPlaceDistortedSample( &sample, inverse, maxintensitydev,
-                    maxxangle, maxyangle, maxzangle, 
+                    maxxangle, maxyangle, maxzangle,
                     0   /* nonzero means placing image without cut offs */,
                     0.0 /* nozero adds random shifting                  */,
                     0.0 /* nozero adds random scaling                   */,
@@ -2931,13 +2931,13 @@ void cvCreateTrainingSamples( const char* filename,
             cvFree( &(sample.data.ptr) );
             fclose( output );
         } /* if( output != NULL ) */
-        
+
         icvEndSampleDistortion( &data );
     }
-    
+
 #ifdef CV_VERBOSE
     printf( "\r      \r" );
-#endif /* CV_VERBOSE */ 
+#endif /* CV_VERBOSE */
 
 }
 
@@ -2986,7 +2986,7 @@ void cvCreateTestSamples( const char* infoname,
             {
                 cvNamedWindow( "Image", CV_WINDOW_AUTOSIZE );
             }
-            
+
             info = fopen( infoname, "w" );
             strcpy( fullname, infoname );
             filename = strrchr( fullname, '\\' );
@@ -3008,7 +3008,7 @@ void cvCreateTestSamples( const char* infoname,
             for( i = 0; i < count; i++ )
             {
                 icvGetNextFromBackgroundData( cvbgdata, cvbgreader );
-                
+
                 maxscale = MIN( 0.7F * cvbgreader->src.cols / winwidth,
                                    0.7F * cvbgreader->src.rows / winheight );
                 if( maxscale < 1.0F ) continue;
@@ -3025,14 +3025,14 @@ void cvCreateTestSamples( const char* infoname,
                     inverse = (rand() > (RAND_MAX/2));
                 }
                 icvPlaceDistortedSample( &win, inverse, maxintensitydev,
-                                         maxxangle, maxyangle, maxzangle, 
+                                         maxxangle, maxyangle, maxzangle,
                                          1, 0.0, 0.0, &data );
-                
-                
+
+
                 sprintf( filename, "%04d_%04d_%04d_%04d_%04d.jpg",
                          (i + 1), x, y, width, height );
-                
-                if( info ) 
+
+                if( info )
                 {
                     fprintf( info, "%s %d %d %d %d %d\n",
                         filename, 1, x, y, width, height );
index 2702384..3b4c872 100644 (file)
@@ -83,7 +83,7 @@
  *        cij - coeffs[i][j], coeffs[2][2] = 1
  *   (ui, vi) - rectangle vertices
  */
-void cvGetPerspectiveTransform( CvSize src_size, double quad[4][2],
+static void cvGetPerspectiveTransform( CvSize src_size, double quad[4][2],
                                 double coeffs[3][3] )
 {
     //CV_FUNCNAME( "cvWarpPerspective" );
@@ -130,7 +130,7 @@ void cvGetPerspectiveTransform( CvSize src_size, double quad[4][2],
 }
 
 /* Warps source into destination by a perspective transform */
-void cvWarpPerspective( CvArr* src, CvArr* dst, double quad[4][2] )
+static void cvWarpPerspective( CvArr* src, CvArr* dst, double quad[4][2] )
 {
     CV_FUNCNAME( "cvWarpPerspective" );
 
index c14e41c..2341f3a 100644 (file)
@@ -44,6 +44,9 @@
  *
  * Measure performance of classifier
  */
+#include "opencv2/core/core.hpp"
+#include "opencv2/core/internal.hpp"
+
 #include "cv.h"
 #include "highgui.h"
 
index 68c1943..69f5b4a 100644 (file)
@@ -1,3 +1,6 @@
+#include "opencv2/core/core.hpp"\r
+#include "opencv2/core/internal.hpp"\r
+\r
 #include "HOGfeatures.h"\r
 #include "cascadeclassifier.h"\r
 \r
@@ -54,7 +57,7 @@ void CvHOGEvaluator::writeFeatures( FileStorage &fs, const Mat& featureMap ) con
             features[featIdx].write( fs, componentIdx );\r
             fs << "}";\r
         }\r
-    fs << "]";  \r
+    fs << "]";\r
 }\r
 \r
 void CvHOGEvaluator::generateFeatures()\r
@@ -85,11 +88,11 @@ void CvHOGEvaluator::generateFeatures()
             }\r
         }\r
         w = 4*t;\r
-        h = 2*t; \r
+        h = 2*t;\r
         for (x = 0; x <= winSize.width - w; x += blockStep.width)\r
         {\r
             for (y = 0; y <= winSize.height - h; y += blockStep.height)\r
-            {     \r
+            {\r
                 features.push_back(Feature(offset, x, y, 2*t, t));\r
             }\r
         }\r
@@ -136,7 +139,7 @@ void CvHOGEvaluator::Feature::write(FileStorage &fs) const
 //    int cellIdx = featComponent / N_BINS;\r
 //    int binIdx = featComponent % N_BINS;\r
 //\r
-//    fs << CC_RECTS << "[:" << rect[cellIdx].x << rect[cellIdx].y << \r
+//    fs << CC_RECTS << "[:" << rect[cellIdx].x << rect[cellIdx].y <<\r
 //        rect[cellIdx].width << rect[cellIdx].height << binIdx << "]";\r
 //}\r
 \r
@@ -144,7 +147,7 @@ void CvHOGEvaluator::Feature::write(FileStorage &fs) const
 //All block is nessesary for block normalization\r
 void CvHOGEvaluator::Feature::write(FileStorage &fs, int featComponentIdx) const\r
 {\r
-    fs << CC_RECT << "[:" << rect[0].x << rect[0].y << \r
+    fs << CC_RECT << "[:" << rect[0].x << rect[0].y <<\r
         rect[0].width << rect[0].height << featComponentIdx << "]";\r
 }\r
 \r
@@ -228,7 +231,7 @@ void CvHOGEvaluator::integralHistogram(const Mat &img, vector<Mat> &histogram, M
         memset( histBuf, 0, histSize.width * sizeof(histBuf[0]) );\r
         histBuf += histStep + 1;\r
         for( y = 0; y < qangle.rows; y++ )\r
-        { \r
+        {\r
             histBuf[-1] = 0.f;\r
             float strSum = 0.f;\r
             for( x = 0; x < qangle.cols; x++ )\r
index f05f458..18165fd 100644 (file)
@@ -1,3 +1,6 @@
+#include "opencv2/core/core.hpp"
+#include "opencv2/core/internal.hpp"
+
 #include "boost.h"
 #include "cascadeclassifier.h"
 #include <queue>
@@ -139,7 +142,7 @@ static CvMat* cvPreprocessIndexArray( const CvMat* idx_arr, int data_arr_size, b
 //----------------------------- CascadeBoostParams -------------------------------------------------
 
 CvCascadeBoostParams::CvCascadeBoostParams() : minHitRate( 0.995F), maxFalseAlarm( 0.5F )
-{  
+{
     boost_type = CvBoost::GENTLE;
     use_surrogates = use_1se_rule = truncate_pruned_tree = false;
 }
@@ -157,7 +160,7 @@ CvCascadeBoostParams::CvCascadeBoostParams( int _boostType,
 
 void CvCascadeBoostParams::write( FileStorage &fs ) const
 {
-    String boostTypeStr = boost_type == CvBoost::DISCRETE ? CC_DISCRETE_BOOST : 
+    String boostTypeStr = boost_type == CvBoost::DISCRETE ? CC_DISCRETE_BOOST :
                           boost_type == CvBoost::REAL ? CC_REAL_BOOST :
                           boost_type == CvBoost::LOGIT ? CC_LOGIT_BOOST :
                           boost_type == CvBoost::GENTLE ? CC_GENTLE_BOOST : String();
@@ -197,7 +200,7 @@ bool CvCascadeBoostParams::read( const FileNode &node )
 void CvCascadeBoostParams::printDefaults() const
 {
     cout << "--boostParams--" << endl;
-    cout << "  [-bt <{" << CC_DISCRETE_BOOST << ", " 
+    cout << "  [-bt <{" << CC_DISCRETE_BOOST << ", "
                         << CC_REAL_BOOST << ", "
                         << CC_LOGIT_BOOST ", "
                         << CC_GENTLE_BOOST << "(default)}>]" << endl;
@@ -210,7 +213,7 @@ void CvCascadeBoostParams::printDefaults() const
 
 void CvCascadeBoostParams::printAttrs() const
 {
-    String boostTypeStr = boost_type == CvBoost::DISCRETE ? CC_DISCRETE_BOOST : 
+    String boostTypeStr = boost_type == CvBoost::DISCRETE ? CC_DISCRETE_BOOST :
                           boost_type == CvBoost::REAL ? CC_REAL_BOOST :
                           boost_type == CvBoost::LOGIT  ? CC_LOGIT_BOOST :
                           boost_type == CvBoost::GENTLE ? CC_GENTLE_BOOST : String();
@@ -259,7 +262,7 @@ bool CvCascadeBoostParams::scanAttr( const String prmName, const String val)
     else
         res = false;
 
-    return res;        
+    return res;
 }
 
 CvDTreeNode* CvCascadeBoostTrainData::subsample_data( const CvMat* _subsample_idx )
@@ -440,7 +443,7 @@ CvCascadeBoostTrainData::CvCascadeBoostTrainData( const CvFeatureEvaluator* _fea
     set_params( _params );
     max_c_count = MAX( 2, featureEvaluator->getMaxCatCount() );
     var_type = cvCreateMat( 1, var_count + 2, CV_32SC1 );
-    if ( featureEvaluator->getMaxCatCount() > 0 ) 
+    if ( featureEvaluator->getMaxCatCount() > 0 )
     {
         numPrecalcIdx = 0;
         cat_var_count = var_count;
@@ -448,7 +451,7 @@ CvCascadeBoostTrainData::CvCascadeBoostTrainData( const CvFeatureEvaluator* _fea
         for( int vi = 0; vi < var_count; vi++ )
         {
             var_type->data.i[vi] = vi;
-        }    
+        }
     }
     else
     {
@@ -457,8 +460,8 @@ CvCascadeBoostTrainData::CvCascadeBoostTrainData( const CvFeatureEvaluator* _fea
         for( int vi = 1; vi <= var_count; vi++ )
         {
             var_type->data.i[vi-1] = -vi;
-        }        
-    }    
+        }
+    }
     var_type->data.i[var_count] = cat_var_count;
     var_type->data.i[var_count+1] = cat_var_count+1;
 
@@ -467,7 +470,7 @@ CvCascadeBoostTrainData::CvCascadeBoostTrainData( const CvFeatureEvaluator* _fea
     treeBlockSize = MAX(treeBlockSize + BlockSizeDelta, MinBlockSize);
     tree_storage = cvCreateMemStorage( treeBlockSize );
     node_heap = cvCreateSet( 0, sizeof(node_heap[0]), sizeof(CvDTreeNode), tree_storage );
-    split_heap = cvCreateSet( 0, sizeof(split_heap[0]), maxSplitSize, tree_storage );  
+    split_heap = cvCreateSet( 0, sizeof(split_heap[0]), maxSplitSize, tree_storage );
 }
 
 CvCascadeBoostTrainData::CvCascadeBoostTrainData( const CvFeatureEvaluator* _featureEvaluator,
@@ -477,15 +480,15 @@ CvCascadeBoostTrainData::CvCascadeBoostTrainData( const CvFeatureEvaluator* _fea
 {
     setData( _featureEvaluator, _numSamples, _precalcValBufSize, _precalcIdxBufSize, _params );
 }
+
 void CvCascadeBoostTrainData::setData( const CvFeatureEvaluator* _featureEvaluator,
                                       int _numSamples,
                                       int _precalcValBufSize, int _precalcIdxBufSize,
-                                                                         const CvDTreeParams& _params )
-{    
+                                      const CvDTreeParams& _params )
+{
     int* idst = 0;
     unsigned short* udst = 0;
-        
+
     clear();
     shared = true;
     have_labels = true;
@@ -503,16 +506,16 @@ void CvCascadeBoostTrainData::setData( const CvFeatureEvaluator* _featureEvaluat
     _resp = featureEvaluator->getCls();
     responses = &_resp;
     // TODO: check responses: elements must be 0 or 1
-    
-       if( _precalcValBufSize < 0 || _precalcIdxBufSize < 0)
+
+    if( _precalcValBufSize < 0 || _precalcIdxBufSize < 0)
         CV_Error( CV_StsOutOfRange, "_numPrecalcVal and _numPrecalcIdx must be positive or 0" );
 
-       var_count = var_all = featureEvaluator->getNumFeatures() * featureEvaluator->getFeatureSize();
+    var_count = var_all = featureEvaluator->getNumFeatures() * featureEvaluator->getFeatureSize();
     sample_count = _numSamples;
-    
-    is_buf_16u = false;     
-    if (sample_count < 65536) 
-        is_buf_16u = true; 
+
+    is_buf_16u = false;
+    if (sample_count < 65536)
+        is_buf_16u = true;
 
     numPrecalcVal = min( cvRound((double)_precalcValBufSize*1048576. / (sizeof(float)*sample_count)), var_count );
     numPrecalcIdx = min( cvRound((double)_precalcIdxBufSize*1048576. /
@@ -522,8 +525,8 @@ void CvCascadeBoostTrainData::setData( const CvFeatureEvaluator* _featureEvaluat
 
     valCache.create( numPrecalcVal, sample_count, CV_32FC1 );
     var_type = cvCreateMat( 1, var_count + 2, CV_32SC1 );
-    
-    if ( featureEvaluator->getMaxCatCount() > 0 ) 
+
+    if ( featureEvaluator->getMaxCatCount() > 0 )
     {
         numPrecalcIdx = 0;
         cat_var_count = var_count;
@@ -531,7 +534,7 @@ void CvCascadeBoostTrainData::setData( const CvFeatureEvaluator* _featureEvaluat
         for( int vi = 0; vi < var_count; vi++ )
         {
             var_type->data.i[vi] = vi;
-        }    
+        }
     }
     else
     {
@@ -540,14 +543,14 @@ void CvCascadeBoostTrainData::setData( const CvFeatureEvaluator* _featureEvaluat
         for( int vi = 1; vi <= var_count; vi++ )
         {
             var_type->data.i[vi-1] = -vi;
-        }        
+        }
     }
     var_type->data.i[var_count] = cat_var_count;
     var_type->data.i[var_count+1] = cat_var_count+1;
     work_var_count = ( cat_var_count ? 0 : numPrecalcIdx ) + 1/*cv_lables*/;
     buf_size = (work_var_count + 1) * sample_count/*sample_indices*/;
     buf_count = 2;
-    
+
     if ( is_buf_16u )
         buf = cvCreateMat( buf_count, buf_size, CV_16UC1 );
     else
@@ -556,7 +559,7 @@ void CvCascadeBoostTrainData::setData( const CvFeatureEvaluator* _featureEvaluat
     cat_count = cvCreateMat( 1, cat_var_count + 1, CV_32SC1 );
 
     // precalculate valCache and set indices in buf
-       precalculate();
+    precalculate();
 
     // now calculate the maximum size of split,
     // create memory storage that will keep nodes and splits of the decision tree
@@ -574,7 +577,7 @@ void CvCascadeBoostTrainData::setData( const CvFeatureEvaluator* _featureEvaluat
     tempBlockSize = MAX( tempBlockSize + BlockSizeDelta, MinBlockSize );
     temp_storage = cvCreateMemStorage( tempBlockSize );
     nv_heap = cvCreateSet( 0, sizeof(*nv_heap), nvSize, temp_storage );
-    
+
     data_root = new_node( 0, sample_count, 0, 0 );
 
     // set sample labels
@@ -617,7 +620,7 @@ void CvCascadeBoostTrainData::free_train_data()
 
 const int* CvCascadeBoostTrainData::get_class_labels( CvDTreeNode* n, int* labelsBuf)
 {
-    int nodeSampleCount = n->sample_count; 
+    int nodeSampleCount = n->sample_count;
     int rStep = CV_IS_MAT_CONT( responses->type ) ? 1 : responses->step / CV_ELEM_SIZE( responses->type );
 
     int* sampleIndicesBuf = labelsBuf; //
@@ -626,7 +629,7 @@ const int* CvCascadeBoostTrainData::get_class_labels( CvDTreeNode* n, int* label
     {
         int sidx = sampleIndices[si];
         labelsBuf[si] = (int)responses->data.fl[sidx*rStep];
-    }    
+    }
     return labelsBuf;
 }
 
@@ -643,9 +646,9 @@ const int* CvCascadeBoostTrainData::get_cv_labels( CvDTreeNode* n, int* labels_b
 void CvCascadeBoostTrainData::get_ord_var_data( CvDTreeNode* n, int vi, float* ordValuesBuf, int* sortedIndicesBuf,
         const float** ordValues, const int** sortedIndices, int* sampleIndicesBuf )
 {
-    int nodeSampleCount = n->sample_count; 
+    int nodeSampleCount = n->sample_count;
     const int* sampleIndices = get_sample_indices(n, sampleIndicesBuf);
-    
+
     if ( vi < numPrecalcIdx )
     {
         if( !is_buf_16u )
@@ -659,7 +662,7 @@ void CvCascadeBoostTrainData::get_ord_var_data( CvDTreeNode* n, int vi, float* o
 
             *sortedIndices = sortedIndicesBuf;
         }
-               
+
         if( vi < numPrecalcVal )
         {
             for( int i = 0; i < nodeSampleCount; i++ )
@@ -705,10 +708,10 @@ void CvCascadeBoostTrainData::get_ord_var_data( CvDTreeNode* n, int vi, float* o
             ordValuesBuf[i] = (&sampleValues[0])[sortedIndicesBuf[i]];
         *sortedIndices = sortedIndicesBuf;
     }
-       
+
     *ordValues = ordValuesBuf;
 }
+
 const int* CvCascadeBoostTrainData::get_cat_var_data( CvDTreeNode* n, int vi, int* catValuesBuf )
 {
     int nodeSampleCount = n->sample_count;
@@ -739,8 +742,8 @@ const int* CvCascadeBoostTrainData::get_cat_var_data( CvDTreeNode* n, int vi, in
 float CvCascadeBoostTrainData::getVarValue( int vi, int si )
 {
     if ( vi < numPrecalcVal && !valCache.empty() )
-           return valCache.at<float>( vi, si );
-       return (*featureEvaluator)( vi, si );
+        return valCache.at<float>( vi, si );
+    return (*featureEvaluator)( vi, si );
 }
 
 
@@ -858,7 +861,7 @@ CvDTreeNode* CvCascadeBoostTree::predict( int sampleIdx ) const
     CvDTreeNode* node = root;
     if( !node )
         CV_Error( CV_StsError, "The tree has not been trained yet" );
-   
+
     if ( ((CvCascadeBoostTrainData*)data)->featureEvaluator->getMaxCatCount() == 0 ) // ordered
     {
         while( node->left )
@@ -946,7 +949,7 @@ void CvCascadeBoostTree::read( const FileNode &node, CvBoost* _ensemble,
     int maxCatCount = ((CvCascadeBoostTrainData*)_data)->featureEvaluator->getMaxCatCount();
     int subsetN = (maxCatCount + 31)/32;
     int step = 3 + ( maxCatCount>0 ? subsetN : 1 );
-    
+
     queue<CvDTreeNode*> internalNodesQueue;
     FileNodeIterator internalNodesIt, leafValsuesIt;
     CvDTreeNode* prntNode, *cldNode;
@@ -986,11 +989,11 @@ void CvCascadeBoostTree::read( const FileNode &node, CvBoost* _ensemble,
         {
             prntNode->right = cldNode = data->new_node( 0, 0, 0, 0 );
             *leafValsuesIt >> cldNode->value; leafValsuesIt--;
-            cldNode->parent = prntNode;            
+            cldNode->parent = prntNode;
         }
         else
         {
-            prntNode->right = internalNodesQueue.front(); 
+            prntNode->right = internalNodesQueue.front();
             prntNode->right->parent = prntNode;
             internalNodesQueue.pop();
         }
@@ -999,7 +1002,7 @@ void CvCascadeBoostTree::read( const FileNode &node, CvBoost* _ensemble,
         {
             prntNode->left = cldNode = data->new_node( 0, 0, 0, 0 );
             *leafValsuesIt >> cldNode->value; leafValsuesIt--;
-            cldNode->parent = prntNode;            
+            cldNode->parent = prntNode;
         }
         else
         {
@@ -1089,7 +1092,7 @@ void CvCascadeBoostTree::split_node_data( CvDTreeNode* node )
                 }
             }
             CV_Assert( n1 == n );
-        }   
+        }
         else
         {
             int *ldst, *rdst;
@@ -1116,7 +1119,7 @@ void CvCascadeBoostTree::split_node_data( CvDTreeNode* node )
                 }
             }
             CV_Assert( n1 == n );
-        }  
+        }
     }
 
     // split cv_labels using newIdx relocation table
@@ -1171,7 +1174,7 @@ void CvCascadeBoostTree::split_node_data( CvDTreeNode* node )
             }
         }
     }
-    
+
     // split sample indices
     int *sampleIdx_src_buf = tempBuf + n;
     const int* sampleIdx_src = data->get_sample_indices(node, sampleIdx_src_buf);
@@ -1181,9 +1184,9 @@ void CvCascadeBoostTree::split_node_data( CvDTreeNode* node )
 
     if (data->is_buf_16u)
     {
-        unsigned short* ldst = (unsigned short*)(buf->data.s + left->buf_idx*buf->cols + 
+        unsigned short* ldst = (unsigned short*)(buf->data.s + left->buf_idx*buf->cols +
             workVarCount*scount + left->offset);
-        unsigned short* rdst = (unsigned short*)(buf->data.s + right->buf_idx*buf->cols + 
+        unsigned short* rdst = (unsigned short*)(buf->data.s + right->buf_idx*buf->cols +
             workVarCount*scount + right->offset);
         for (int i = 0; i < n; i++)
         {
@@ -1202,9 +1205,9 @@ void CvCascadeBoostTree::split_node_data( CvDTreeNode* node )
     }
     else
     {
-        int* ldst = buf->data.i + left->buf_idx*buf->cols + 
+        int* ldst = buf->data.i + left->buf_idx*buf->cols +
             workVarCount*scount + left->offset;
-        int* rdst = buf->data.i + right->buf_idx*buf->cols + 
+        int* rdst = buf->data.i + right->buf_idx*buf->cols +
             workVarCount*scount + right->offset;
         for (int i = 0; i < n; i++)
         {
@@ -1229,10 +1232,10 @@ void CvCascadeBoostTree::split_node_data( CvDTreeNode* node )
     }
 
     // deallocate the parent node data that is not needed anymore
-    data->free_node_data(node); 
+    data->free_node_data(node);
 }
 
-void auxMarkFeaturesInMap( const CvDTreeNode* node, Mat& featureMap)
+static void auxMarkFeaturesInMap( const CvDTreeNode* node, Mat& featureMap)
 {
     if ( node && node->split )
     {
@@ -1265,7 +1268,7 @@ bool CvCascadeBoost::train( const CvFeatureEvaluator* _featureEvaluator,
     set_params( _params );
     if ( (_params.boost_type == LOGIT) || (_params.boost_type == GENTLE) )
         data->do_responses_copy();
-    
+
     update_weights( 0 );
 
     cout << "+----+---------+---------+" << endl;
@@ -1316,7 +1319,7 @@ bool CvCascadeBoost::set_params( const CvBoostParams& _params )
     minHitRate = ((CvCascadeBoostParams&)_params).minHitRate;
     maxFalseAlarm = ((CvCascadeBoostParams&)_params).maxFalseAlarm;
     return ( ( minHitRate > 0 ) && ( minHitRate < 1) &&
-        ( maxFalseAlarm > 0 ) && ( maxFalseAlarm < 1) && 
+        ( maxFalseAlarm > 0 ) && ( maxFalseAlarm < 1) &&
         CvBoost::set_params( _params ));
 }
 
@@ -1364,7 +1367,7 @@ void CvCascadeBoost::update_weights( CvBoostTree* tree )
 
         if (data->is_buf_16u)
         {
-            unsigned short* labels = (unsigned short*)(buf->data.s + data->data_root->buf_idx*buf->cols + 
+            unsigned short* labels = (unsigned short*)(buf->data.s + data->data_root->buf_idx*buf->cols +
                 data->data_root->offset + (data->work_var_count-1)*data->sample_count);
             for( int i = 0; i < n; i++ )
             {
@@ -1382,7 +1385,7 @@ void CvCascadeBoost::update_weights( CvBoostTree* tree )
         }
         else
         {
-            int* labels = buf->data.i + data->data_root->buf_idx*buf->cols + 
+            int* labels = buf->data.i + data->data_root->buf_idx*buf->cols +
                 data->data_root->offset + (data->work_var_count-1)*data->sample_count;
 
             for( int i = 0; i < n; i++ )
@@ -1425,7 +1428,7 @@ void CvCascadeBoost::update_weights( CvBoostTree* tree )
         {
             // invert the subsample mask
             cvXorS( subsample_mask, cvScalar(1.), subsample_mask );
-            
+
             // run tree through all the non-processed samples
             for( int i = 0; i < n; i++ )
                 if( subsample_mask->data.ptr[i] )
@@ -1565,7 +1568,7 @@ bool CvCascadeBoost::isErrDesired()
     int sCount = data->sample_count,
         numPos = 0, numNeg = 0, numFalse = 0, numPosTrue = 0;
     vector<float> eval(sCount);
-    
+
     for( int i = 0; i < sCount; i++ )
         if( ((CvCascadeBoostTrainData*)data)->featureEvaluator->getCls( i ) == 1.0F )
             eval[numPos++] = predict( i, true );
@@ -1625,7 +1628,7 @@ bool CvCascadeBoost::read( const FileNode &node,
     set_params( _params );
 
     node[CC_STAGE_THRESHOLD] >> threshold;
-    FileNode rnode = node[CC_WEAK_CLASSIFIERS]; 
+    FileNode rnode = node[CC_WEAK_CLASSIFIERS];
 
     storage = cvCreateMemStorage();
     weak = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvBoostTree*), storage );
index 3433d74..ef6d181 100644 (file)
@@ -1,3 +1,6 @@
+#include "opencv2/core/core.hpp"
+#include "opencv2/core/internal.hpp"
+
 #include "cascadeclassifier.h"
 #include <queue>
 
@@ -6,14 +9,14 @@ using namespace std;
 static const char* stageTypes[] = { CC_BOOST };
 static const char* featureTypes[] = { CC_HAAR, CC_LBP, CC_HOG };
 
-CvCascadeParams::CvCascadeParams() : stageType( defaultStageType ), 
+CvCascadeParams::CvCascadeParams() : stageType( defaultStageType ),
     featureType( defaultFeatureType ), winSize( cvSize(24, 24) )
-{ 
-    name = CC_CASCADE_PARAMS; 
+{
+    name = CC_CASCADE_PARAMS;
 }
 CvCascadeParams::CvCascadeParams( int _stageType, int _featureType ) : stageType( _stageType ),
     featureType( _featureType ), winSize( cvSize(24, 24) )
-{ 
+{
     name = CC_CASCADE_PARAMS;
 }
 
@@ -25,7 +28,7 @@ void CvCascadeParams::write( FileStorage &fs ) const
     CV_Assert( !stageTypeStr.empty() );
     fs << CC_STAGE_TYPE << stageTypeStr;
     String featureTypeStr = featureType == CvFeatureParams::HAAR ? CC_HAAR :
-                            featureType == CvFeatureParams::LBP ? CC_LBP : 
+                            featureType == CvFeatureParams::LBP ? CC_LBP :
                             featureType == CvFeatureParams::HOG ? CC_HOG :
                             0;
     CV_Assert( !stageTypeStr.empty() );
@@ -51,7 +54,7 @@ bool CvCascadeParams::read( const FileNode &node )
         return false;
     rnode >> featureTypeStr;
     featureType = !featureTypeStr.compare( CC_HAAR ) ? CvFeatureParams::HAAR :
-                  !featureTypeStr.compare( CC_LBP ) ? CvFeatureParams::LBP : 
+                  !featureTypeStr.compare( CC_LBP ) ? CvFeatureParams::LBP :
                   !featureTypeStr.compare( CC_HOG ) ? CvFeatureParams::HOG :
                   -1;
     if (featureType == -1)
@@ -125,15 +128,15 @@ bool CvCascadeParams::scanAttr( const String prmName, const String val )
 
 bool CvCascadeClassifier::train( const String _cascadeDirName,
                                 const String _posFilename,
-                                const String _negFilename, 
-                                int _numPos, int _numNeg, 
+                                const String _negFilename,
+                                int _numPos, int _numNeg,
                                 int _precalcValBufSize, int _precalcIdxBufSize,
                                 int _numStages,
                                 const CvCascadeParams& _cascadeParams,
                                 const CvFeatureParams& _featureParams,
                                 const CvCascadeBoostParams& _stageParams,
                                 bool baseFormatSave )
-{   
+{
     if( _cascadeDirName.empty() || _posFilename.empty() || _negFilename.empty() )
         CV_Error( CV_StsBadArg, "_cascadeDirName or _bgfileName or _vecFileName is NULL" );
 
@@ -181,17 +184,17 @@ bool CvCascadeClassifier::train( const String _cascadeDirName,
         cout << endl << "Stages 0-" << startNumStages-1 << " are loaded" << endl;
     else if ( startNumStages == 1)
         cout << endl << "Stage 0 is loaded" << endl;
-    
+
     double requiredLeafFARate = pow( (double) stageParams->maxFalseAlarm, (double) numStages ) /
                                 (double)stageParams->max_depth;
     double tempLeafFARate;
-    
+
     for( int i = startNumStages; i < numStages; i++ )
     {
         cout << endl << "===== TRAINING " << i << "-stage =====" << endl;
         cout << "<BEGIN" << endl;
 
-        if ( !updateTrainingSet( tempLeafFARate ) ) 
+        if ( !updateTrainingSet( tempLeafFARate ) )
         {
             cout << "Train dataset for temp stage can not be filled. "
                 "Branch training terminated." << endl;
@@ -211,10 +214,10 @@ bool CvCascadeClassifier::train( const String _cascadeDirName,
         stageClassifiers.push_back( tempStage );
 
         cout << "END>" << endl;
-        
+
         // save params
         String filename;
-        if ( i == 0) 
+        if ( i == 0)
         {
             filename = dirName + CC_PARAMS_FILENAME;
             FileStorage fs( filename, FileStorage::WRITE);
@@ -289,7 +292,7 @@ int CvCascadeClassifier::fillPassedSamples( int first, int count, bool isPositiv
         {
             bool isGetImg = isPositive ? imgReader.getPos( img ) :
                                            imgReader.getNeg( img );
-            if( !isGetImg ) 
+            if( !isGetImg )
                 return getcount;
             consumed++;
 
@@ -313,14 +316,14 @@ void CvCascadeClassifier::writeParams( FileStorage &fs ) const
 
 void CvCascadeClassifier::writeFeatures( FileStorage &fs, const Mat& featureMap ) const
 {
-    ((CvFeatureEvaluator*)((Ptr<CvFeatureEvaluator>)featureEvaluator))->writeFeatures( fs, featureMap ); 
+    ((CvFeatureEvaluator*)((Ptr<CvFeatureEvaluator>)featureEvaluator))->writeFeatures( fs, featureMap );
 }
 
 void CvCascadeClassifier::writeStages( FileStorage &fs, const Mat& featureMap ) const
 {
     char cmnt[30];
     int i = 0;
-    fs << CC_STAGES << "["; 
+    fs << CC_STAGES << "[";
     for( vector< Ptr<CvCascadeBoost> >::const_iterator it = stageClassifiers.begin();
         it != stageClassifiers.end(); it++, i++ )
     {
@@ -337,17 +340,17 @@ bool CvCascadeClassifier::readParams( const FileNode &node )
 {
     if ( !node.isMap() || !cascadeParams.read( node ) )
         return false;
-    
+
     stageParams = new CvCascadeBoostParams;
     FileNode rnode = node[CC_STAGE_PARAMS];
     if ( !stageParams->read( rnode ) )
         return false;
-    
+
     featureParams = CvFeatureParams::create(cascadeParams.featureType);
     rnode = node[CC_FEATURE_PARAMS];
     if ( !featureParams->read( rnode ) )
         return false;
-    return true;    
+    return true;
 }
 
 bool CvCascadeClassifier::readStages( const FileNode &node)
@@ -396,7 +399,7 @@ void CvCascadeClassifier::save( const String filename, bool baseFormat )
     fs << FileStorage::getDefaultObjectName(filename) << "{";
     if ( !baseFormat )
     {
-        Mat featureMap; 
+        Mat featureMap;
         getUsedFeaturesIdxMap( featureMap );
         writeParams( fs );
         fs << CC_STAGE_NUM << (int)stageClassifiers.size();
@@ -409,7 +412,7 @@ void CvCascadeClassifier::save( const String filename, bool baseFormat )
         CvSeq* weak;
         if ( cascadeParams.featureType != CvFeatureParams::HAAR )
             CV_Error( CV_StsBadFunc, "old file format is used for Haar-like features only");
-        fs << ICV_HAAR_SIZE_NAME << "[:" << cascadeParams.winSize.width << 
+        fs << ICV_HAAR_SIZE_NAME << "[:" << cascadeParams.winSize.width <<
             cascadeParams.winSize.height << "]";
         fs << ICV_HAAR_STAGES_NAME << "[";
         for( size_t si = 0; si < stageClassifiers.size(); si++ )
@@ -424,16 +427,16 @@ void CvCascadeClassifier::save( const String filename, bool baseFormat )
                 int inner_node_idx = -1, total_inner_node_idx = -1;
                 queue<const CvDTreeNode*> inner_nodes_queue;
                 CvCascadeBoostTree* tree = *((CvCascadeBoostTree**) cvGetSeqElem( weak, wi ));
-                
+
                 fs << "[";
                 /*sprintf( buf, "tree %d", wi );
                 CV_CALL( cvWriteComment( fs, buf, 1 ) );*/
 
                 const CvDTreeNode* tempNode;
-                
+
                 inner_nodes_queue.push( tree->get_root() );
                 total_inner_node_idx++;
-                
+
                 while (!inner_nodes_queue.empty())
                 {
                     tempNode = inner_nodes_queue.front();
@@ -498,7 +501,7 @@ bool CvCascadeClassifier::load( const String cascadeDirName )
         node = fs.getFirstTopLevelNode();
         if ( !fs.isOpened() )
             break;
-        CvCascadeBoost *tempStage = new CvCascadeBoost; 
+        CvCascadeBoost *tempStage = new CvCascadeBoost;
 
         if ( !tempStage->read( node, (CvFeatureEvaluator*)featureEvaluator, *((CvCascadeBoostParams*)stageParams )) )
         {
@@ -516,11 +519,11 @@ void CvCascadeClassifier::getUsedFeaturesIdxMap( Mat& featureMap )
     int varCount = featureEvaluator->getNumFeatures() * featureEvaluator->getFeatureSize();
     featureMap.create( 1, varCount, CV_32SC1 );
     featureMap.setTo(Scalar(-1));
-    
+
     for( vector< Ptr<CvCascadeBoost> >::const_iterator it = stageClassifiers.begin();
         it != stageClassifiers.end(); it++ )
         ((CvCascadeBoost*)((Ptr<CvCascadeBoost>)(*it)))->markUsedFeaturesInMap( featureMap );
-    
+
     for( int fi = 0, idx = 0; fi < varCount; fi++ )
         if ( featureMap.at<int>(0, fi) >= 0 )
             featureMap.ptr<int>(0)[fi] = idx++;
index c117a99..8ecdfcc 100644 (file)
@@ -1,3 +1,6 @@
+#include "opencv2/core/core.hpp"
+#include "opencv2/core/internal.hpp"
+
 #include "traincascade_features.h"
 #include "cascadeclassifier.h"
 
@@ -28,7 +31,7 @@ bool CvParams::scanAttr( const String prmName, const String val ) { return false
 
 CvFeatureParams::CvFeatureParams() : maxCatCount( 0 ), featSize( 1 )
 {
-    name = CC_FEATURE_PARAMS; 
+    name = CC_FEATURE_PARAMS;
 }
 
 void CvFeatureParams::init( const CvFeatureParams& fp )
@@ -55,7 +58,7 @@ bool CvFeatureParams::read( const FileNode &node )
 Ptr<CvFeatureParams> CvFeatureParams::create( int featureType )
 {
     return featureType == HAAR ? Ptr<CvFeatureParams>(new CvHaarFeatureParams) :
-        featureType == LBP ? Ptr<CvFeatureParams>(new CvLBPFeatureParams) : 
+        featureType == LBP ? Ptr<CvFeatureParams>(new CvLBPFeatureParams) :
         featureType == HOG ? Ptr<CvFeatureParams>(new CvHOGFeatureParams) :
         Ptr<CvFeatureParams>();
 }
@@ -84,7 +87,7 @@ void CvFeatureEvaluator::setImage(const Mat &img, uchar clsLabel, int idx)
 Ptr<CvFeatureEvaluator> CvFeatureEvaluator::create(int type)
 {
     return type == CvFeatureParams::HAAR ? Ptr<CvFeatureEvaluator>(new CvHaarEvaluator) :
-        type == CvFeatureParams::LBP ? Ptr<CvFeatureEvaluator>(new CvLBPEvaluator) : 
+        type == CvFeatureParams::LBP ? Ptr<CvFeatureEvaluator>(new CvLBPEvaluator) :
         type == CvFeatureParams::HOG ? Ptr<CvFeatureEvaluator>(new CvHOGEvaluator) :
         Ptr<CvFeatureEvaluator>();
 }
index 6344af5..0298b42 100644 (file)
@@ -1,16 +1,19 @@
+#include "opencv2/core/core.hpp"
+#include "opencv2/core/internal.hpp"
+
 #include "haarfeatures.h"
 #include "cascadeclassifier.h"
 
 using namespace std;
 
 CvHaarFeatureParams::CvHaarFeatureParams() : mode(BASIC)
-{ 
+{
     name = HFP_NAME;
 }
 
 CvHaarFeatureParams::CvHaarFeatureParams( int _mode ) : mode( _mode )
 {
-    name = HFP_NAME; 
+    name = HFP_NAME;
 }
 
 void CvHaarFeatureParams::init( const CvFeatureParams& fp )
@@ -22,7 +25,7 @@ void CvHaarFeatureParams::init( const CvFeatureParams& fp )
 void CvHaarFeatureParams::write( FileStorage &fs ) const
 {
     CvFeatureParams::write( fs );
-    String modeStr = mode == BASIC ? CC_MODE_BASIC : 
+    String modeStr = mode == BASIC ? CC_MODE_BASIC :
                      mode == CORE ? CC_MODE_CORE :
                      mode == ALL ? CC_MODE_ALL : String();
     CV_Assert( !modeStr.empty() );
@@ -55,7 +58,7 @@ void CvHaarFeatureParams::printDefaults() const
 void CvHaarFeatureParams::printAttrs() const
 {
     CvFeatureParams::printAttrs();
-    String mode_str = mode == BASIC ? CC_MODE_BASIC : 
+    String mode_str = mode == BASIC ? CC_MODE_BASIC :
                        mode == CORE ? CC_MODE_CORE :
                        mode == ALL ? CC_MODE_ALL : 0;
     cout << "mode: " <<  mode_str << endl;
@@ -156,7 +159,7 @@ void CvHaarEvaluator::generateFeatures()
                     if( mode != CvHaarFeatureParams::BASIC )
                     {
                         // haar_x4
-                        if ( (x+dx*4 <= winSize.width) && (y+dy <= winSize.height) ) 
+                        if ( (x+dx*4 <= winSize.width) && (y+dy <= winSize.height) )
                         {
                             features.push_back( Feature( offset, false,
                                 x,    y, dx*4, dy, -1,
@@ -171,61 +174,61 @@ void CvHaarEvaluator::generateFeatures()
                         }
                     }
                     // x2_y2
-                    if ( (x+dx*2 <= winSize.width) && (y+dy*2 <= winSize.height) ) 
+                    if ( (x+dx*2 <= winSize.width) && (y+dy*2 <= winSize.height) )
                     {
                         features.push_back( Feature( offset, false,
                             x,    y,    dx*2, dy*2, -1,
                             x,    y,    dx,   dy,   +2,
                             x+dx, y+dy, dx,   dy,   +2 ) );
                     }
-                    if (mode != CvHaarFeatureParams::BASIC) 
-                    {                
-                        if ( (x+dx*3 <= winSize.width) && (y+dy*3 <= winSize.height) ) 
+                    if (mode != CvHaarFeatureParams::BASIC)
+                    {
+                        if ( (x+dx*3 <= winSize.width) && (y+dy*3 <= winSize.height) )
                         {
                             features.push_back( Feature( offset, false,
                                 x   , y   , dx*3, dy*3, -1,
                                 x+dx, y+dy, dx  , dy  , +9) );
                         }
                     }
-                    if (mode == CvHaarFeatureParams::ALL) 
-                    {                
+                    if (mode == CvHaarFeatureParams::ALL)
+                    {
                         // tilted haar_x2
-                        if ( (x+2*dx <= winSize.width) && (y+2*dx+dy <= winSize.height) && (x-dy>= 0) ) 
+                        if ( (x+2*dx <= winSize.width) && (y+2*dx+dy <= winSize.height) && (x-dy>= 0) )
                         {
                             features.push_back( Feature( offset, true,
                                 x, y, dx*2, dy, -1,
                                 x, y, dx,   dy, +2 ) );
                         }
                         // tilted haar_y2
-                        if ( (x+dx <= winSize.width) && (y+dx+2*dy <= winSize.height) && (x-2*dy>= 0) ) 
+                        if ( (x+dx <= winSize.width) && (y+dx+2*dy <= winSize.height) && (x-2*dy>= 0) )
                         {
                             features.push_back( Feature( offset, true,
                                 x, y, dx, 2*dy, -1,
                                 x, y, dx, dy,   +2 ) );
                         }
                         // tilted haar_x3
-                        if ( (x+3*dx <= winSize.width) && (y+3*dx+dy <= winSize.height) && (x-dy>= 0) ) 
+                        if ( (x+3*dx <= winSize.width) && (y+3*dx+dy <= winSize.height) && (x-dy>= 0) )
                         {
                             features.push_back( Feature( offset, true,
                                 x,    y,    dx*3, dy, -1,
                                 x+dx, y+dx, dx,   dy, +3 ) );
                         }
                         // tilted haar_y3
-                        if ( (x+dx <= winSize.width) && (y+dx+3*dy <= winSize.height) && (x-3*dy>= 0) ) 
+                        if ( (x+dx <= winSize.width) && (y+dx+3*dy <= winSize.height) && (x-3*dy>= 0) )
                         {
                             features.push_back( Feature( offset, true,
                                 x,    y,    dx, 3*dy, -1,
                                 x-dy, y+dy, dx, dy,   +3 ) );
                         }
                         // tilted haar_x4
-                        if ( (x+4*dx <= winSize.width) && (y+4*dx+dy <= winSize.height) && (x-dy>= 0) ) 
+                        if ( (x+4*dx <= winSize.width) && (y+4*dx+dy <= winSize.height) && (x-dy>= 0) )
                         {
                             features.push_back( Feature( offset, true,
                                 x,    y,    dx*4, dy, -1,
                                 x+dx, y+dx, dx*2, dy, +2 ) );
                         }
                         // tilted haar_y4
-                        if ( (x+dx <= winSize.width) && (y+dx+4*dy <= winSize.height) && (x-4*dy>= 0) ) 
+                        if ( (x+dx <= winSize.width) && (y+dx+4*dy <= winSize.height) && (x-4*dy>= 0) )
                         {
                             features.push_back( Feature( offset, true,
                                 x,    y,    dx, 4*dy, -1,
@@ -296,7 +299,7 @@ void CvHaarEvaluator::Feature::write( FileStorage &fs ) const
     fs << CC_RECTS << "[";
     for( int ri = 0; ri < CV_HAAR_FEATURE_MAX && rect[ri].r.width != 0; ++ri )
     {
-        fs << "[:" << rect[ri].r.x << rect[ri].r.y << 
+        fs << "[:" << rect[ri].r.x << rect[ri].r.y <<
             rect[ri].r.width << rect[ri].r.height << rect[ri].weight << "]";
     }
     fs << "]" << CC_TILTED << tilted;
index 64089c6..3830a4b 100644 (file)
@@ -1,3 +1,6 @@
+#include "opencv2/core/core.hpp"
+#include "opencv2/core/internal.hpp"
+
 #include "cv.h"
 #include "imagestorage.h"
 #include <stdio.h>
@@ -55,7 +58,7 @@ bool CvCascadeImageReader::NegReader::nextImg()
     for( size_t i = 0; i < count; i++ )
     {
         src = imread( imgFilenames[last++], 0 );
-        if( src.empty() ) 
+        if( src.empty() )
             continue;
         round += last / count;
         round = round % (winSize.width * winSize.height);
@@ -63,7 +66,7 @@ bool CvCascadeImageReader::NegReader::nextImg()
 
         _offset.x = min( (int)round % winSize.width, src.cols - winSize.width );
         _offset.y = min( (int)round / winSize.width, src.rows - winSize.height );
-        if( !src.empty() && src.type() == CV_8UC1 
+        if( !src.empty() && src.type() == CV_8UC1
                 && offset.x >= 0 && offset.y >= 0 )
             break;
     }
@@ -73,7 +76,7 @@ bool CvCascadeImageReader::NegReader::nextImg()
     point = offset = _offset;
     scale = max( ((float)winSize.width + point.x) / ((float)src.cols),
                  ((float)winSize.height + point.y) / ((float)src.rows) );
-    
+
     Size sz( (int)(scale*src.cols + 0.5F), (int)(scale*src.rows + 0.5F) );
     resize( src, img, sz );
     return true;
@@ -87,7 +90,7 @@ bool CvCascadeImageReader::NegReader::get( Mat& _img )
     CV_Assert( _img.rows == winSize.height );
 
     if( img.empty() )
-        if ( !nextImg() ) 
+        if ( !nextImg() )
             return false;
 
     Mat mat( winSize.height, winSize.width, CV_8UC1,
@@ -109,7 +112,7 @@ bool CvCascadeImageReader::NegReader::get( Mat& _img )
                 resize( src, img, Size( (int)(scale*src.cols), (int)(scale*src.rows) ) );
             else
             {
-                if ( !nextImg() ) 
+                if ( !nextImg() )
                     return false;
             }
         }
@@ -131,7 +134,7 @@ bool CvCascadeImageReader::PosReader::create( const String _filename )
 
     if( !file )
         return false;
-    short tmp = 0;  
+    short tmp = 0;
     if( fread( &count, sizeof( count ), 1, file ) != 1 ||
         fread( &vecSize, sizeof( vecSize ), 1, file ) != 1 ||
         fread( &tmp, sizeof( tmp ), 1, file ) != 1 ||
index ac1d23c..cf9bb7b 100644 (file)
@@ -1,3 +1,6 @@
+#include "opencv2/core/core.hpp"
+#include "opencv2/core/internal.hpp"
+
 #include "lbpfeatures.h"
 #include "cascadeclassifier.h"
 
index 07dbe3e..5a969f4 100644 (file)
@@ -1,3 +1,6 @@
+#include "opencv2/core/core.hpp"
+#include "opencv2/core/internal.hpp"
+
 #include "cv.h"
 #include "cascadeclassifier.h"
 
@@ -13,11 +16,11 @@ int main( int argc, char* argv[] )
     int precalcValBufSize = 256,
         precalcIdxBufSize = 256;
     bool baseFormatSave = false;
-    
+
     CvCascadeParams cascadeParams;
     CvCascadeBoostParams stageParams;
     Ptr<CvFeatureParams> featureParams[] = { Ptr<CvFeatureParams>(new CvHaarFeatureParams),
-                                             Ptr<CvFeatureParams>(new CvLBPFeatureParams), 
+                                             Ptr<CvFeatureParams>(new CvLBPFeatureParams),
                                              Ptr<CvFeatureParams>(new CvHOGFeatureParams)
                                            };
     int fc = sizeof(featureParams)/sizeof(featureParams[0]);
@@ -85,7 +88,7 @@ int main( int argc, char* argv[] )
         {
             for( int fi = 0; fi < fc; fi++ )
             {
-                set = featureParams[fi]->scanAttr(argv[i], argv[i+1]);          
+                set = featureParams[fi]->scanAttr(argv[i], argv[i+1]);
                 if ( !set )
                 {
                     i++;
@@ -94,11 +97,11 @@ int main( int argc, char* argv[] )
             }
         }
     }
-  
+
     classifier.train( cascadeDirName,
                       vecName,
-                      bgName, 
-                      numPos, numNeg, 
+                      bgName,
+                      numPos, numNeg,
                       precalcValBufSize, precalcIdxBufSize,
                       numStages,
                       cascadeParams,
index e00128a..3a45b6d 100644 (file)
@@ -47,6 +47,9 @@ if(CMAKE_COMPILER_IS_GNUCXX)
   # High level of warnings.
   set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -Wall")
 
+  set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -Werror=return-type -Werror=non-virtual-dtor -Werror=address -Werror=sequence-point -Werror=format-security")
+  set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -Wmissing-declarations -Wcast-align -Wundef -Winit-self -Wpointer-arith") #-Wstrict-aliasing=2
+
   # The -Wno-long-long is required in 64bit systems when including sytem headers.
   if(X86_64)
     set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} -Wno-long-long")
@@ -171,18 +174,18 @@ if(MSVC)
       set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /arch:SSE2")
     endif()
   endif()
-  
+
   if(ENABLE_SSE3)
     set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /arch:SSE3")
   endif()
   if(ENABLE_SSE4_1)
     set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /arch:SSE4.1")
   endif()
-  
+
   if(ENABLE_SSE OR ENABLE_SSE2 OR ENABLE_SSE3 OR ENABLE_SSE4_1)
     set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /Oi")
   endif()
-  
+
   if(X86 OR X86_64)
     if(CMAKE_SIZEOF_VOID_P EQUAL 4 AND ENABLE_SSE2)
       set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /fp:fast")# !! important - be on the same wave with x64 compilers
@@ -217,6 +220,10 @@ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OPENCV_EXTRA_EXE_LINKER_
 set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} ${OPENCV_EXTRA_EXE_LINKER_FLAGS_RELEASE}")
 set(CMAKE_EXE_LINKER_FLAGS_DEBUG "${CMAKE_EXE_LINKER_FLAGS_DEBUG} ${OPENCV_EXTRA_EXE_LINKER_FLAGS_DEBUG}")
 
+if(CMAKE_COMPILER_IS_GNUCXX)
+  set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wmissing-prototypes -Wstrict-prototypes")
+endif()
+
 if(MSVC)
   # avoid warnings from MSVC about overriding the /W* option
   # we replace /W3 with /W4 only for C++ files,
@@ -225,12 +232,12 @@ if(MSVC)
   string(REPLACE "/W3" "/W4" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
   string(REPLACE "/W3" "/W4" CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE}")
   string(REPLACE "/W3" "/W4" CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG}")
-  
+
   # allow extern "C" functions throw exceptions
   foreach(flags CMAKE_C_FLAGS CMAKE_C_FLAGS_RELEASE CMAKE_C_FLAGS_RELEASE CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_DEBUG)
     string(REPLACE "/EHsc-" "/EHs" ${flags} "${${flags}}")
     string(REPLACE "/EHsc" "/EHs" ${flags} "${${flags}}")
-    
+
     string(REPLACE "/Zm1000" "" ${flags} "${${flags}}")
   endforeach()
 
index 7e7ef15..c22a44a 100644 (file)
@@ -72,6 +72,7 @@ MACRO(_PCH_WRITE_PCHDEP_CXX _targetName _include_file _dephelp)
         ADD_CUSTOM_COMMAND(
           OUTPUT "${${_dephelp}}"
           COMMAND ${CMAKE_COMMAND} -E echo "#include \\\"${_include_file}\\\"" >  "${${_dephelp}}"
+          COMMAND ${CMAKE_COMMAND} -E echo "int testfunction();"               >> "${${_dephelp}}"
           COMMAND ${CMAKE_COMMAND} -E echo "int testfunction()"                >> "${${_dephelp}}"
           COMMAND ${CMAKE_COMMAND} -E echo "{"                                 >> "${${_dephelp}}"
           COMMAND ${CMAKE_COMMAND} -E echo "    return 0;"                     >> "${${_dephelp}}"
@@ -82,6 +83,7 @@ MACRO(_PCH_WRITE_PCHDEP_CXX _targetName _include_file _dephelp)
         ADD_CUSTOM_COMMAND(
           OUTPUT "${${_dephelp}}"
           COMMAND ${CMAKE_COMMAND} -E echo "\\#include \\\"${_include_file}\\\"" >  "${${_dephelp}}"
+          COMMAND ${CMAKE_COMMAND} -E echo "int testfunction\\(\\)\\;"         >> "${${_dephelp}}"
           COMMAND ${CMAKE_COMMAND} -E echo "int testfunction\\(\\)"            >> "${${_dephelp}}"
           COMMAND ${CMAKE_COMMAND} -E echo "{"                                 >> "${${_dephelp}}"
           COMMAND ${CMAKE_COMMAND} -E echo "    \\return 0\\;"                 >> "${${_dephelp}}"
index ccadd24..1616273 100644 (file)
@@ -6,7 +6,7 @@
 #include "opencv2/highgui/highgui.hpp"
 #include "opencv2/imgproc/imgproc.hpp"
 
-#if GTEST_CREATE_SHARED_LIBRARY
+#ifdef GTEST_CREATE_SHARED_LIBRARY
 #error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
 #endif
 
index 61e44a7..60e275d 100644 (file)
 #  endif
 #endif
 
-void icvGetQuadrangleHypotheses(CvSeq* contours, std::vector<std::pair<float, int> >& quads, int class_id)
+static void icvGetQuadrangleHypotheses(CvSeq* contours, std::vector<std::pair<float, int> >& quads, int class_id)
 {
     const float min_aspect_ratio = 0.3f;
     const float max_aspect_ratio = 3.0f;
     const float min_box_size = 10.0f;
-    
+
     for(CvSeq* seq = contours; seq != NULL; seq = seq->h_next)
     {
         CvBox2D box = cvMinAreaRect2(seq);
@@ -75,12 +75,12 @@ void icvGetQuadrangleHypotheses(CvSeq* contours, std::vector<std::pair<float, in
         {
             continue;
         }
-        
+
         quads.push_back(std::pair<float, int>(box_size, class_id));
     }
 }
 
-void countClasses(const std::vector<std::pair<float, int> >& pairs, size_t idx1, size_t idx2, std::vector<int>& counts)
+static void countClasses(const std::vector<std::pair<float, int> >& pairs, size_t idx1, size_t idx2, std::vector<int>& counts)
 {
     counts.assign(2, 0);
     for(size_t i = idx1; i != idx2; i++)
@@ -89,36 +89,36 @@ void countClasses(const std::vector<std::pair<float, int> >& pairs, size_t idx1,
     }
 }
 
-bool less_pred(const std::pair<float, int>& p1, const std::pair<float, int>& p2)
+inline bool less_pred(const std::pair<float, int>& p1, const std::pair<float, int>& p2)
 {
     return p1.first < p2.first;
 }
 
-// does a fast check if a chessboard is in the input image. This is a workaround to 
+// does a fast check if a chessboard is in the input image. This is a workaround to
 // a problem of cvFindChessboardCorners being slow on images with no chessboard
 // - src: input image
 // - size: chessboard size
-// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called, 
+// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called,
 // 0 if there is no chessboard, -1 in case of error
 int cvCheckChessboard(IplImage* src, CvSize size)
 {
     if(src->nChannels > 1)
     {
-        cvError(CV_BadNumChannels, "cvCheckChessboard", "supports single-channel images only", 
+        cvError(CV_BadNumChannels, "cvCheckChessboard", "supports single-channel images only",
                 __FILE__, __LINE__);
     }
-    
+
     if(src->depth != 8)
     {
-        cvError(CV_BadDepth, "cvCheckChessboard", "supports depth=8 images only", 
+        cvError(CV_BadDepth, "cvCheckChessboard", "supports depth=8 images only",
                 __FILE__, __LINE__);
     }
-    
+
     const int erosion_count = 1;
     const float black_level = 20.f;
     const float white_level = 130.f;
     const float black_white_gap = 70.f;
-    
+
 #if defined(DEBUG_WINDOWS)
     cvNamedWindow("1", 1);
     cvShowImage("1", src);
@@ -126,46 +126,46 @@ int cvCheckChessboard(IplImage* src, CvSize size)
 #endif //DEBUG_WINDOWS
 
     CvMemStorage* storage = cvCreateMemStorage();
-    
+
     IplImage* white = cvCloneImage(src);
     IplImage* black = cvCloneImage(src);
-        
+
     cvErode(white, white, NULL, erosion_count);
     cvDilate(black, black, NULL, erosion_count);
     IplImage* thresh = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
-    
+
     int result = 0;
     for(float thresh_level = black_level; thresh_level < white_level && !result; thresh_level += 20.0f)
     {
         cvThreshold(white, thresh, thresh_level + black_white_gap, 255, CV_THRESH_BINARY);
-        
+
 #if defined(DEBUG_WINDOWS)
         cvShowImage("1", thresh);
         cvWaitKey(0);
 #endif //DEBUG_WINDOWS
-        
+
         CvSeq* first = 0;
         std::vector<std::pair<float, int> > quads;
-        cvFindContours(thresh, storage, &first, sizeof(CvContour), CV_RETR_CCOMP);        
+        cvFindContours(thresh, storage, &first, sizeof(CvContour), CV_RETR_CCOMP);
         icvGetQuadrangleHypotheses(first, quads, 1);
-        
+
         cvThreshold(black, thresh, thresh_level, 255, CV_THRESH_BINARY_INV);
-        
+
 #if defined(DEBUG_WINDOWS)
         cvShowImage("1", thresh);
         cvWaitKey(0);
 #endif //DEBUG_WINDOWS
-        
+
         cvFindContours(thresh, storage, &first, sizeof(CvContour), CV_RETR_CCOMP);
         icvGetQuadrangleHypotheses(first, quads, 0);
-        
+
         const size_t min_quads_count = size.width*size.height/2;
         std::sort(quads.begin(), quads.end(), less_pred);
-        
+
         // now check if there are many hypotheses with similar sizes
         // do this by floodfill-style algorithm
         const float size_rel_dev = 0.4f;
-        
+
         for(size_t i = 0; i < quads.size(); i++)
         {
             size_t j = i + 1;
@@ -176,7 +176,7 @@ int cvCheckChessboard(IplImage* src, CvSize size)
                     break;
                 }
             }
-            
+
             if(j + 1 > min_quads_count + i)
             {
                 // check the number of black and white squares
@@ -194,12 +194,12 @@ int cvCheckChessboard(IplImage* src, CvSize size)
             }
         }
     }
-    
-    
+
+
     cvReleaseImage(&thresh);
     cvReleaseImage(&white);
     cvReleaseImage(&black);
     cvReleaseMemStorage(&storage);
-    
+
     return result;
 }
index 870a657..f729919 100644 (file)
@@ -1223,7 +1223,7 @@ void computePredecessorMatrix(const Mat &dm, int verticesCount, Mat &predecessor
   }
 }
 
-void computeShortestPath(Mat &predecessorMatrix, size_t v1, size_t v2, vector<size_t> &path)
+static void computeShortestPath(Mat &predecessorMatrix, size_t v1, size_t v2, vector<size_t> &path)
 {
   if (predecessorMatrix.at<int> ((int)v1, (int)v2) < 0)
   {
@@ -1403,7 +1403,7 @@ void CirclesGridFinder::getHoles(vector<Point2f> &outHoles) const
   }
 }
 
-bool areIndicesCorrect(Point pos, vector<vector<size_t> > *points)
+static bool areIndicesCorrect(Point pos, vector<vector<size_t> > *points)
 {
   if (pos.y < 0 || pos.x < 0)
     return false;
index 50b4d40..14bcfa3 100644 (file)
 #ifndef __OPENCV_PRECOMP_H__
 #define __OPENCV_PRECOMP_H__
 
-#if _MSC_VER >= 1200
+#if defined _MSC_VER && _MSC_VER >= 1200
 #pragma warning( disable: 4251 4710 4711 4514 4996 )
 #endif
 
-#ifdef HAVE_CVCONFIG_H 
+#ifdef HAVE_CVCONFIG_H
 #include "cvconfig.h"
 #endif
 
index 35257fb..2aec9d6 100644 (file)
 #undef max
 
 namespace cv {
-    
-    
-void drawCircles(Mat& img, const vector<Point2f>& corners, const vector<float>& radius)
-{
-    for(size_t i = 0; i < corners.size(); i++)
-    {
-        circle(img, corners[i], cvRound(radius[i]), CV_RGB(255, 0, 0));
-    }
-}
-    
-int histQuantile(const Mat& hist, float quantile)
-{
-    if(hist.dims > 1) return -1; // works for 1D histograms only
-    
-    float cur_sum = 0;
-    float total_sum = (float)sum(hist).val[0];
-    float quantile_sum = total_sum*quantile;
-    for(int j = 0; j < hist.size[0]; j++)
-    {
-        cur_sum += (float)hist.at<float>(j);
-        if(cur_sum > quantile_sum)
-        {
-            return j;
-        }
-    }
-    
-    return hist.size[0] - 1;
-}
-    
-bool is_smaller(const std::pair<int, float>& p1, const std::pair<int, float>& p2)
+
+
+// static void drawCircles(Mat& img, const vector<Point2f>& corners, const vector<float>& radius)
+// {
+//     for(size_t i = 0; i < corners.size(); i++)
+//     {
+//         circle(img, corners[i], cvRound(radius[i]), CV_RGB(255, 0, 0));
+//     }
+// }
+
+// static int histQuantile(const Mat& hist, float quantile)
+// {
+//     if(hist.dims > 1) return -1; // works for 1D histograms only
+
+//     float cur_sum = 0;
+//     float total_sum = (float)sum(hist).val[0];
+//     float quantile_sum = total_sum*quantile;
+//     for(int j = 0; j < hist.size[0]; j++)
+//     {
+//         cur_sum += (float)hist.at<float>(j);
+//         if(cur_sum > quantile_sum)
+//         {
+//             return j;
+//         }
+//     }
+
+//     return hist.size[0] - 1;
+// }
+
+inline bool is_smaller(const std::pair<int, float>& p1, const std::pair<int, float>& p2)
 {
     return p1.second < p2.second;
 }
 
-void orderContours(const vector<vector<Point> >& contours, Point2f point, vector<std::pair<int, float> >& order)
+static void orderContours(const vector<vector<Point> >& contours, Point2f point, vector<std::pair<int, float> >& order)
 {
     order.clear();
     size_t i, j, n = contours.size();
@@ -101,58 +101,58 @@ void orderContours(const vector<vector<Point> >& contours, Point2f point, vector
         }
         order.push_back(std::pair<int, float>((int)i, (float)min_dist));
     }
-    
+
     std::sort(order.begin(), order.end(), is_smaller);
 }
 
 // fit second order curve to a set of 2D points
-void fitCurve2Order(const vector<Point2f>& /*points*/, vector<float>& /*curve*/)
+inline void fitCurve2Order(const vector<Point2f>& /*points*/, vector<float>& /*curve*/)
 {
     // TBD
 }
-    
-void findCurvesCross(const vector<float>& /*curve1*/, const vector<float>& /*curve2*/, Point2f& /*cross_point*/)
+
+inline void findCurvesCross(const vector<float>& /*curve1*/, const vector<float>& /*curve2*/, Point2f& /*cross_point*/)
 {
 }
-    
-void findLinesCrossPoint(Point2f origin1, Point2f dir1, Point2f origin2, Point2f dir2, Point2f& cross_point)
+
+static void findLinesCrossPoint(Point2f origin1, Point2f dir1, Point2f origin2, Point2f dir2, Point2f& cross_point)
 {
     float det = dir2.x*dir1.y - dir2.y*dir1.x;
     Point2f offset = origin2 - origin1;
-    
+
     float alpha = (dir2.x*offset.y - dir2.y*offset.x)/det;
     cross_point = origin1 + dir1*alpha;
 }
-    
-void findCorner(const vector<Point>& contour, Point2f point, Point2f& corner)
-{
-    // find the nearest point
-    double min_dist = std::numeric_limits<double>::max();
-    int min_idx = -1;
-    
-    // find corner idx
-    for(size_t i = 0; i < contour.size(); i++)
-    {
-        double dist = norm(Point2f((float)contour[i].x, (float)contour[i].y) - point);
-        if(dist < min_dist)
-        {
-            min_dist = dist;
-            min_idx = (int)i;
-        }
-    }
-    assert(min_idx >= 0);
-    
-    // temporary solution, have to make something more precise
-    corner = contour[min_idx];
-    return;
-}
 
-void findCorner(const vector<Point2f>& contour, Point2f point, Point2f& corner)
+// static void findCorner(const vector<Point>& contour, Point2f point, Point2f& corner)
+// {
+//     // find the nearest point
+//     double min_dist = std::numeric_limits<double>::max();
+//     int min_idx = -1;
+
+//     // find corner idx
+//     for(size_t i = 0; i < contour.size(); i++)
+//     {
+//         double dist = norm(Point2f((float)contour[i].x, (float)contour[i].y) - point);
+//         if(dist < min_dist)
+//         {
+//             min_dist = dist;
+//             min_idx = (int)i;
+//         }
+//     }
+//     assert(min_idx >= 0);
+
+//     // temporary solution, have to make something more precise
+//     corner = contour[min_idx];
+//     return;
+// }
+
+static void findCorner(const vector<Point2f>& contour, Point2f point, Point2f& corner)
 {
     // find the nearest point
     double min_dist = std::numeric_limits<double>::max();
     int min_idx = -1;
-    
+
     // find corner idx
     for(size_t i = 0; i < contour.size(); i++)
     {
@@ -164,23 +164,23 @@ void findCorner(const vector<Point2f>& contour, Point2f point, Point2f& corner)
         }
     }
     assert(min_idx >= 0);
-    
+
     // temporary solution, have to make something more precise
     corner = contour[min_idx];
     return;
 }
-    
-int segment_hist_max(const Mat& hist, int& low_thresh, int& high_thresh)
+
+static int segment_hist_max(const Mat& hist, int& low_thresh, int& high_thresh)
 {
     Mat bw;
     //const double max_bell_width = 20; // we expect two bells with width bounded above
     //const double min_bell_width = 5; // and below
-    
+
     double total_sum = sum(hist).val[0];
     //double thresh = total_sum/(2*max_bell_width)*0.25f; // quarter of a bar inside a bell
-    
+
 //    threshold(hist, bw, thresh, 255.0, CV_THRESH_BINARY);
-    
+
     double quantile_sum = 0.0;
     //double min_quantile = 0.2;
     double low_sum = 0;
@@ -193,7 +193,7 @@ int segment_hist_max(const Mat& hist, int& low_thresh, int& high_thresh)
     {
         quantile_sum += hist.at<float>(x);
         if(quantile_sum < 0.2*total_sum) continue;
-        
+
         if(quantile_sum - low_sum > out_of_bells_fraction*total_sum)
         {
             if(max_segment_length < x - start_x)
@@ -207,7 +207,7 @@ int segment_hist_max(const Mat& hist, int& low_thresh, int& high_thresh)
             start_x = x;
         }
     }
-    
+
     if(start_x == -1)
     {
         return 0;
@@ -219,9 +219,9 @@ int segment_hist_max(const Mat& hist, int& low_thresh, int& high_thresh)
         return 1;
     }
 }
+
 }
-    
+
 bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size region_size)
 {
     Mat img = _img.getMat(), cornersM = _corners.getMat();
@@ -232,22 +232,22 @@ bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size
     float ranges[] = {0, 256};
     const float* _ranges = ranges;
     Mat hist;
-    
+
 #if defined(_SUBPIX_VERBOSE)
     vector<float> radius;
     radius.assign(corners.size(), 0.0f);
 #endif //_SUBPIX_VERBOSE
-    
-    
+
+
     Mat black_comp, white_comp;
     for(int i = 0; i < ncorners; i++)
-    {        
+    {
         int channels = 0;
         Rect roi(cvRound(corners[i].x - region_size.width), cvRound(corners[i].y - region_size.height),
             region_size.width*2 + 1, region_size.height*2 + 1);
         Mat img_roi = img(roi);
         calcHist(&img_roi, 1, &channels, Mat(), hist, 1, &nbins, &_ranges);
-        
+
 #if 0
         int black_thresh = histQuantile(hist, 0.45f);
         int white_thresh = histQuantile(hist, 0.55f);
@@ -255,10 +255,10 @@ bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size
         int black_thresh, white_thresh;
         segment_hist_max(hist, black_thresh, white_thresh);
 #endif
-        
+
         threshold(img, black_comp, black_thresh, 255.0, CV_THRESH_BINARY_INV);
         threshold(img, white_comp, white_thresh, 255.0, CV_THRESH_BINARY);
-        
+
         const int erode_count = 1;
         erode(black_comp, black_comp, Mat(), Point(-1, -1), erode_count);
         erode(white_comp, white_comp, Mat(), Point(-1, -1), erode_count);
@@ -275,28 +275,28 @@ bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size
         imwrite("black.jpg", black_comp);
         imwrite("white.jpg", white_comp);
 #endif
-        
-        
+
+
         vector<vector<Point> > white_contours, black_contours;
         vector<Vec4i> white_hierarchy, black_hierarchy;
         findContours(black_comp, black_contours, black_hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
         findContours(white_comp, white_contours, white_hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
-        
+
         if(black_contours.size() < 5 || white_contours.size() < 5) continue;
-        
+
         // find two white and black blobs that are close to the input point
         vector<std::pair<int, float> > white_order, black_order;
         orderContours(black_contours, corners[i], black_order);
         orderContours(white_contours, corners[i], white_order);
 
         const float max_dist = 10.0f;
-        if(black_order[0].second > max_dist || black_order[1].second > max_dist || 
+        if(black_order[0].second > max_dist || black_order[1].second > max_dist ||
            white_order[0].second > max_dist || white_order[1].second > max_dist)
         {
             continue; // there will be no improvement in this corner position
         }
-        
-        const vector<Point>* quads[4] = {&black_contours[black_order[0].first], &black_contours[black_order[1].first], 
+
+        const vector<Point>* quads[4] = {&black_contours[black_order[0].first], &black_contours[black_order[1].first],
                                          &white_contours[white_order[0].first], &white_contours[white_order[1].first]};
         vector<Point2f> quads_approx[4];
         Point2f quad_corners[4];
@@ -306,14 +306,14 @@ bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size
             vector<Point2f> temp;
             for(size_t j = 0; j < quads[k]->size(); j++) temp.push_back((*quads[k])[j]);
             approxPolyDP(Mat(temp), quads_approx[k], 0.5, true);
-            
+
             findCorner(quads_approx[k], corners[i], quad_corners[k]);
 #else
             findCorner(*quads[k], corners[i], quad_corners[k]);
 #endif
             quad_corners[k] += Point2f(0.5f, 0.5f);
         }
-        
+
         // cross two lines
         Point2f origin1 = quad_corners[0];
         Point2f dir1 = quad_corners[1] - quad_corners[0];
@@ -321,12 +321,12 @@ bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size
         Point2f dir2 = quad_corners[3] - quad_corners[2];
         double angle = acos(dir1.dot(dir2)/(norm(dir1)*norm(dir2)));
         if(cvIsNaN(angle) || cvIsInf(angle) || angle < 0.5 || angle > CV_PI - 0.5) continue;
-           
+
         findLinesCrossPoint(origin1, dir1, origin2, dir2, corners[i]);
-      
+
 #if defined(_SUBPIX_VERBOSE)
         radius[i] = norm(corners[i] - ground_truth_corners[ground_truth_idx])*6;
-        
+
 #if 1
         Mat test(img.size(), CV_32FC3);
         cvtColor(img, test, CV_GRAY2RGB);
@@ -349,9 +349,9 @@ bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size
         waitKey(0);
 #endif
 #endif //_SUBPIX_VERBOSE
-        
+
     }
-    
+
 #if defined(_SUBPIX_VERBOSE)
     Mat test(img.size(), CV_32FC3);
     cvtColor(img, test, CV_GRAY2RGB);
@@ -361,6 +361,6 @@ bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size
     imshow("corners", test);
     waitKey();
 #endif //_SUBPIX_VERBOSE
-    
+
     return true;
 }
index 9d0f621..4465339 100644 (file)
@@ -52,48 +52,48 @@ bool cv::solvePnP( InputArray _opoints, InputArray _ipoints,
 {
     Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
     int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
-    CV_Assert( npoints >= 0 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) ); 
+    CV_Assert( npoints >= 0 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );
     _rvec.create(3, 1, CV_64F);
     _tvec.create(3, 1, CV_64F);
     Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();
 
     if (flags == CV_EPNP)
     {
-               cv::Mat undistortedPoints;
-               cv::undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
-               epnp PnP(cameraMatrix, opoints, undistortedPoints);
-                               
+        cv::Mat undistortedPoints;
+        cv::undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
+        epnp PnP(cameraMatrix, opoints, undistortedPoints);
+
         cv::Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
         PnP.compute_pose(R, tvec);
         cv::Rodrigues(R, rvec);
-               return true;
-       }
-       else if (flags == CV_P3P) 
-       {
-               CV_Assert( npoints == 4);
-               cv::Mat undistortedPoints;
-               cv::undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
-               p3p P3Psolver(cameraMatrix);
+        return true;
+    }
+    else if (flags == CV_P3P)
+    {
+        CV_Assert( npoints == 4);
+        cv::Mat undistortedPoints;
+        cv::undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
+        p3p P3Psolver(cameraMatrix);
 
         cv::Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
         bool result = P3Psolver.solve(R, tvec, opoints, undistortedPoints);
         if (result)
-                       cv::Rodrigues(R, rvec);
-               return result;
-       }
-       else if (flags == CV_ITERATIVE) 
-       {
-               CvMat c_objectPoints = opoints, c_imagePoints = ipoints;
-               CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
-               CvMat c_rvec = _rvec.getMat(), c_tvec = _tvec.getMat();
-               cvFindExtrinsicCameraParams2(&c_objectPoints, &c_imagePoints, &c_cameraMatrix,
-                                                                        c_distCoeffs.rows*c_distCoeffs.cols ? &c_distCoeffs : 0,
-                                                                        &c_rvec, &c_tvec, useExtrinsicGuess );
-               return true;
-       }
-       else 
+            cv::Rodrigues(R, rvec);
+        return result;
+    }
+    else if (flags == CV_ITERATIVE)
+    {
+        CvMat c_objectPoints = opoints, c_imagePoints = ipoints;
+        CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
+        CvMat c_rvec = _rvec.getMat(), c_tvec = _tvec.getMat();
+        cvFindExtrinsicCameraParams2(&c_objectPoints, &c_imagePoints, &c_cameraMatrix,
+                                     c_distCoeffs.rows*c_distCoeffs.cols ? &c_distCoeffs : 0,
+                                     &c_rvec, &c_tvec, useExtrinsicGuess );
+        return true;
+    }
+    else
         CV_Error(CV_StsBadArg, "The flags argument must be one of CV_ITERATIVE or CV_EPNP");
-       return false;
+    return false;
 }
 
 namespace cv
@@ -101,8 +101,8 @@ namespace cv
     namespace pnpransac
     {
         const int MIN_POINTS_COUNT = 4;
-        
-        void project3dPoints(const Mat& points, const Mat& rvec, const Mat& tvec, Mat& modif_points)
+
+        static void project3dPoints(const Mat& points, const Mat& rvec, const Mat& tvec, Mat& modif_points)
         {
             modif_points.create(1, points.cols, CV_32FC3);
             Mat R(3, 3, CV_64FC1);
@@ -114,32 +114,32 @@ namespace cv
             tvec.copyTo(t);
             transform(points, modif_points, transformation);
         }
-        
+
         class Mutex
         {
         public:
             Mutex() {
-                       }
+            }
             void lock()
             {
 #ifdef HAVE_TBB
-                               resultsMutex.lock();
+                resultsMutex.lock();
 #endif
             }
-            
+
             void unlock()
             {
 #ifdef HAVE_TBB
                 resultsMutex.unlock();
 #endif
             }
-            
+
         private:
 #ifdef HAVE_TBB
             tbb::mutex resultsMutex;
 #endif
         };
-        
+
         struct CameraParameters
         {
             void init(Mat _intrinsics, Mat _distCoeffs)
@@ -147,22 +147,22 @@ namespace cv
                 _intrinsics.copyTo(intrinsics);
                 _distCoeffs.copyTo(distortion);
             }
-            
+
             Mat intrinsics;
             Mat distortion;
         };
-        
+
         struct Parameters
         {
             int iterationsCount;
             float reprojectionError;
             int minInliersCount;
             bool useExtrinsicGuess;
-                       int flags;
+            int flags;
             CameraParameters camera;
         };
-        
-        void pnpTask(const vector<char>& pointsMask, const Mat& objectPoints, const Mat& imagePoints,
+
+        static void pnpTask(const vector<char>& pointsMask, const Mat& objectPoints, const Mat& imagePoints,
                      const Parameters& params, vector<int>& inliers, Mat& rvec, Mat& tvec,
                      const Mat& rvecInit, const Mat& tvecInit, Mutex& resultsMutex)
         {
@@ -178,7 +178,7 @@ namespace cv
                     colIndex = colIndex+1;
                 }
             }
-            
+
             //filter same 3d points, hang in solvePnP
             double eps = 1e-10;
             int num_same_points = 0;
@@ -190,22 +190,22 @@ namespace cv
                 }
             if (num_same_points > 0)
                 return;
-            
+
             Mat localRvec, localTvec;
             rvecInit.copyTo(localRvec);
             tvecInit.copyTo(localTvec);
-        
-                   solvePnP(modelObjectPoints, modelImagePoints, params.camera.intrinsics, params.camera.distortion, localRvec, localTvec,
-                                    params.useExtrinsicGuess, params.flags);
-               
-            
+
+            solvePnP(modelObjectPoints, modelImagePoints, params.camera.intrinsics, params.camera.distortion, localRvec, localTvec,
+                     params.useExtrinsicGuess, params.flags);
+
+
             vector<Point2f> projected_points;
             projected_points.resize(objectPoints.cols);
             projectPoints(objectPoints, localRvec, localTvec, params.camera.intrinsics, params.camera.distortion, projected_points);
-            
+
             Mat rotatedPoints;
             project3dPoints(objectPoints, localRvec, localTvec, rotatedPoints);
-            
+
             vector<int> localInliers;
             for (int i = 0; i < objectPoints.cols; i++)
             {
@@ -216,21 +216,21 @@ namespace cv
                     localInliers.push_back(i);
                 }
             }
-            
+
             if (localInliers.size() > inliers.size())
             {
                 resultsMutex.lock();
-                
+
                 inliers.clear();
                 inliers.resize(localInliers.size());
                 memcpy(&inliers[0], &localInliers[0], sizeof(int) * localInliers.size());
                 localRvec.copyTo(rvec);
                 localTvec.copyTo(tvec);
-                
+
                 resultsMutex.unlock();
             }
         }
-        
+
         class PnPSolver
         {
         public:
@@ -262,18 +262,18 @@ namespace cv
                 tvec.copyTo(initTvec);
             }
         private:
-                       PnPSolver& operator=(const PnPSolver&);
-                       
+            PnPSolver& operator=(const PnPSolver&);
+
             const Mat& objectPoints;
             const Mat& imagePoints;
             const Parameters& parameters;
             Mat &rvec, &tvec;
             vector<int>& inliers;
             Mat initRvec, initTvec;
-            
+
             static RNG generator;
             static Mutex syncMutex;
-            
+
             void generateVar(vector<char>& mask) const
             {
                 int size = (int)mask.size();
@@ -287,10 +287,10 @@ namespace cv
                 }
             }
         };
-        
+
         Mutex PnPSolver::syncMutex;
         RNG PnPSolver::generator;
-        
+
     }
 }
 
@@ -302,21 +302,21 @@ void cv::solvePnPRansac(InputArray _opoints, InputArray _ipoints,
 {
     Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
     Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();
-    
+
     CV_Assert(opoints.isContinuous());
     CV_Assert(opoints.depth() == CV_32F);
     CV_Assert((opoints.rows == 1 && opoints.channels() == 3) || opoints.cols*opoints.channels() == 3);
     CV_Assert(ipoints.isContinuous());
     CV_Assert(ipoints.depth() == CV_32F);
     CV_Assert((ipoints.rows == 1 && ipoints.channels() == 2) || ipoints.cols*ipoints.channels() == 2);
-    
+
     _rvec.create(3, 1, CV_64FC1);
     _tvec.create(3, 1, CV_64FC1);
     Mat rvec = _rvec.getMat();
     Mat tvec = _tvec.getMat();
-    
+
     Mat objectPoints = opoints.reshape(3, 1), imagePoints = ipoints.reshape(2, 1);
-    
+
     if (minInliersCount <= 0)
         minInliersCount = objectPoints.cols;
     cv::pnpransac::Parameters params;
@@ -325,36 +325,36 @@ void cv::solvePnPRansac(InputArray _opoints, InputArray _ipoints,
     params.reprojectionError = reprojectionError;
     params.useExtrinsicGuess = useExtrinsicGuess;
     params.camera.init(cameraMatrix, distCoeffs);
-       params.flags = flags;
-    
+    params.flags = flags;
+
     vector<int> localInliers;
     Mat localRvec, localTvec;
     rvec.copyTo(localRvec);
     tvec.copyTo(localTvec);
-    
+
     if (objectPoints.cols >= pnpransac::MIN_POINTS_COUNT)
     {
         parallel_for(BlockedRange(0,iterationsCount), cv::pnpransac::PnPSolver(objectPoints, imagePoints, params,
                                                                                localRvec, localTvec, localInliers));
     }
-    
+
     if (localInliers.size() >= (size_t)pnpransac::MIN_POINTS_COUNT)
     {
-               if (flags != CV_P3P)
-               {
-                       int i, pointsCount = (int)localInliers.size();
-                       Mat inlierObjectPoints(1, pointsCount, CV_32FC3), inlierImagePoints(1, pointsCount, CV_32FC2);
-                       for (i = 0; i < pointsCount; i++)
-                       {
-                               int index = localInliers[i];
-                               Mat colInlierImagePoints = inlierImagePoints(Rect(i, 0, 1, 1));
-                               imagePoints.col(index).copyTo(colInlierImagePoints);
-                               Mat colInlierObjectPoints = inlierObjectPoints(Rect(i, 0, 1, 1));
-                               objectPoints.col(index).copyTo(colInlierObjectPoints);
-                       }
-                       solvePnP(inlierObjectPoints, inlierImagePoints, params.camera.intrinsics, params.camera.distortion, localRvec, localTvec, true, flags);
-               }
-               localRvec.copyTo(rvec);
+        if (flags != CV_P3P)
+        {
+            int i, pointsCount = (int)localInliers.size();
+            Mat inlierObjectPoints(1, pointsCount, CV_32FC3), inlierImagePoints(1, pointsCount, CV_32FC2);
+            for (i = 0; i < pointsCount; i++)
+            {
+                int index = localInliers[i];
+                Mat colInlierImagePoints = inlierImagePoints(Rect(i, 0, 1, 1));
+                imagePoints.col(index).copyTo(colInlierImagePoints);
+                Mat colInlierObjectPoints = inlierObjectPoints(Rect(i, 0, 1, 1));
+                objectPoints.col(index).copyTo(colInlierObjectPoints);
+            }
+            solvePnP(inlierObjectPoints, inlierImagePoints, params.camera.intrinsics, params.camera.distortion, localRvec, localTvec, true, flags);
+        }
+        localRvec.copyTo(rvec);
         localTvec.copyTo(tvec);
         if (_inliers.needed())
             Mat(localInliers).copyTo(_inliers);
index 636ba59..f08bdda 100644 (file)
@@ -55,36 +55,36 @@ LevMarqSparse::LevMarqSparse() {
 
 LevMarqSparse::~LevMarqSparse() {
   clear();
-} 
+}
 
 LevMarqSparse::LevMarqSparse(int npoints, // number of points
-                            int ncameras, // number of cameras
-                            int nPointParams, // number of params per one point  (3 in case of 3D points)
-                            int nCameraParams, // number of parameters per one camera
-                            int nErrParams, // number of parameters in measurement vector
-                            // for 1 point at one camera (2 in case of 2D projections)
-                            Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras
-                            // 1 - point is visible for the camera, 0 - invisible
-                            Mat& P0, // starting vector of parameters, first cameras then points
-                            Mat& X_, // measurements, in order of visibility. non visible cases are skipped 
-                            TermCriteria criteria, // termination criteria
-        
-                            // callback for estimation of Jacobian matrices
-                            void (CV_CDECL * fjac)(int i, int j, Mat& point_params,
-                                                   Mat& cam_params, Mat& A, Mat& B, void* data),
-                            // callback for estimation of backprojection errors
-                            void (CV_CDECL * func)(int i, int j, Mat& point_params,
-                                                   Mat& cam_params, Mat& estim, void* data),
-                            void* data, // user-specific data passed to the callbacks
-                            BundleAdjustCallback _cb, void* _user_data
-                            ) {
+           int ncameras, // number of cameras
+           int nPointParams, // number of params per one point  (3 in case of 3D points)
+           int nCameraParams, // number of parameters per one camera
+           int nErrParams, // number of parameters in measurement vector
+           // for 1 point at one camera (2 in case of 2D projections)
+           Mat& visibility, // visibility matrix. rows correspond to points, columns correspond to cameras
+           // 1 - point is visible for the camera, 0 - invisible
+           Mat& P0, // starting vector of parameters, first cameras then points
+           Mat& X_, // measurements, in order of visibility. non visible cases are skipped
+           TermCriteria criteria, // termination criteria
+
+           // callback for estimation of Jacobian matrices
+           void (CV_CDECL * fjac)(int i, int j, Mat& point_params,
+                Mat& cam_params, Mat& A, Mat& B, void* data),
+           // callback for estimation of backprojection errors
+           void (CV_CDECL * func)(int i, int j, Mat& point_params,
+                Mat& cam_params, Mat& estim, void* data),
+           void* data, // user-specific data passed to the callbacks
+           BundleAdjustCallback _cb, void* _user_data
+           ) {
   Vis_index = X = prevP = P = deltaP = err = JtJ_diag = S = hX = NULL;
   U = ea = V = inv_V_star = eb = Yj = NULL;
   A = B = W = NULL;
 
   cb = _cb;
   user_data = _user_data;
-    
+
   run(npoints, ncameras, nPointParams, nCameraParams, nErrParams, visibility,
       P0, X_, criteria, fjac, func, data);
 }
@@ -95,19 +95,19 @@ void LevMarqSparse::clear() {
       //CvMat* tmp = ((CvMat**)(A->data.ptr + i * A->step))[j];
       CvMat* tmp = A[j+i*num_cams];
       if (tmp)
-       cvReleaseMat( &tmp );
+  cvReleaseMat( &tmp );
 
       //tmp = ((CvMat**)(B->data.ptr + i * B->step))[j];
       tmp  = B[j+i*num_cams];
       if (tmp)
-       cvReleaseMat( &tmp );
-                 
+  cvReleaseMat( &tmp );
+
       //tmp = ((CvMat**)(W->data.ptr + j * W->step))[i];
       tmp  = W[j+i*num_cams];
       if (tmp)
-       cvReleaseMat( &tmp ); 
+  cvReleaseMat( &tmp );
     }
-  }   
+  }
   delete A; //cvReleaseMat(&A);
   delete B;//cvReleaseMat(&B);
   delete W;//cvReleaseMat(&W);
@@ -122,7 +122,7 @@ void LevMarqSparse::clear() {
     cvReleaseMat( &ea[j] );
   }
   delete ea;
-     
+
   //allocate V and inv_V_star
   for( int i = 0; i < num_points; i++ ) {
     cvReleaseMat(&V[i]);
@@ -138,16 +138,16 @@ void LevMarqSparse::clear() {
 
   for( int i = 0; i < num_points; i++ ) {
     cvReleaseMat(&Yj[i]);
-  }   
+  }
   delete Yj;
-     
+
   cvReleaseMat(&X);
   cvReleaseMat(&prevP);
   cvReleaseMat(&P);
   cvReleaseMat(&deltaP);
 
-  cvReleaseMat(&err);      
-    
+  cvReleaseMat(&err);
+
   cvReleaseMat(&JtJ_diag);
   cvReleaseMat(&S);
   cvReleaseMat(&hX);
@@ -165,28 +165,28 @@ void LevMarqSparse::clear() {
 //num_errors - number of measurements.
 
 void LevMarqSparse::run( int num_points_, //number of points
-                        int num_cams_, //number of cameras
-                        int num_point_param_, //number of params per one point  (3 in case of 3D points)
-                        int num_cam_param_, //number of parameters per one camera
-                        int num_err_param_, //number of parameters in measurement vector for 1 point at one camera (2 in case of 2D projections)
-                        Mat& visibility,   //visibility matrix . rows correspond to points, columns correspond to cameras
-                        // 0 - point is visible for the camera, 0 - invisible
-                        Mat& P0, //starting vector of parameters, first cameras then points
-                        Mat& X_init, //measurements, in order of visibility. non visible cases are skipped 
-                        TermCriteria criteria_init,
-                        void (*fjac_)(int i, int j, Mat& point_params, Mat& cam_params, Mat& A, Mat& B, void* data),
-                        void (*func_)(int i, int j, Mat& point_params, Mat& cam_params, Mat& estim, void* data),
-                        void* data_
-                        ) { //termination criteria
+       int num_cams_, //number of cameras
+       int num_point_param_, //number of params per one point  (3 in case of 3D points)
+       int num_cam_param_, //number of parameters per one camera
+       int num_err_param_, //number of parameters in measurement vector for 1 point at one camera (2 in case of 2D projections)
+       Mat& visibility,   //visibility matrix . rows correspond to points, columns correspond to cameras
+       // 0 - point is visible for the camera, 0 - invisible
+       Mat& P0, //starting vector of parameters, first cameras then points
+       Mat& X_init, //measurements, in order of visibility. non visible cases are skipped
+       TermCriteria criteria_init,
+       void (*fjac_)(int i, int j, Mat& point_params, Mat& cam_params, Mat& A, Mat& B, void* data),
+       void (*func_)(int i, int j, Mat& point_params, Mat& cam_params, Mat& estim, void* data),
+       void* data_
+       ) { //termination criteria
   //clear();
-    
+
   func = func_; //assign evaluation function
   fjac = fjac_; //assign jacobian
   data = data_;
 
   num_cams = num_cams_;
   num_points = num_points_;
-  num_err_param = num_err_param_; 
+  num_err_param = num_err_param_;
   num_cam_param = num_cam_param_;
   num_point_param = num_point_param_;
 
@@ -204,9 +204,9 @@ void LevMarqSparse::run( int num_points_, //number of points
   int Wij_width = Bij_width;
 
   //allocate memory for all Aij, Bij, U, V, W
-    
+
   //allocate num_points*num_cams matrices A
-    
+
   //Allocate matrix A whose elements are nointers to Aij
   //if Aij is zero (point i is not visible in camera j) then A(i,j) contains NULL
   //A = cvCreateMat( num_points, num_cams, CV_32S /*pointer is stored here*/ );
@@ -221,39 +221,39 @@ void LevMarqSparse::run( int num_points_, //number of points
   //cvSetZero( B );
   //cvSetZero( W );
   cvSet( Vis_index, cvScalar(-1) );
-    
+
   //fill matrices A and B based on visibility
   CvMat _vis = visibility;
   int index = 0;
   for (int i = 0; i < num_points; i++ ) {
     for (int j = 0; j < num_cams; j++ ) {
       if (((int*)(_vis.data.ptr+ i * _vis.step))[j] ) {
-       ((int*)(Vis_index->data.ptr + i * Vis_index->step))[j] = index;
-       index += num_err_param;
-    
-       //create matrices Aij, Bij
-       CvMat* tmp = cvCreateMat(Aij_height, Aij_width, CV_64F );
-       //((CvMat**)(A->data.ptr + i * A->step))[j] = tmp;
-       cvSet(tmp,cvScalar(1.0,1.0,1.0,1.0));
-       A[j+i*num_cams] = tmp;
-
-       tmp = cvCreateMat( Bij_height, Bij_width, CV_64F );
-       //((CvMat**)(B->data.ptr + i * B->step))[j] = tmp;
-       cvSet(tmp,cvScalar(1.0,1.0,1.0,1.0));
-       B[j+i*num_cams] = tmp;
-    
-       tmp = cvCreateMat( Wij_height, Wij_width, CV_64F );
-       //((CvMat**)(W->data.ptr + j * W->step))[i] = tmp;  //note indices i and j swapped
-       cvSet(tmp,cvScalar(1.0,1.0,1.0,1.0));
-       W[j+i*num_cams] = tmp;
+  ((int*)(Vis_index->data.ptr + i * Vis_index->step))[j] = index;
+  index += num_err_param;
+
+  //create matrices Aij, Bij
+  CvMat* tmp = cvCreateMat(Aij_height, Aij_width, CV_64F );
+  //((CvMat**)(A->data.ptr + i * A->step))[j] = tmp;
+  cvSet(tmp,cvScalar(1.0,1.0,1.0,1.0));
+  A[j+i*num_cams] = tmp;
+
+  tmp = cvCreateMat( Bij_height, Bij_width, CV_64F );
+  //((CvMat**)(B->data.ptr + i * B->step))[j] = tmp;
+  cvSet(tmp,cvScalar(1.0,1.0,1.0,1.0));
+  B[j+i*num_cams] = tmp;
+
+  tmp = cvCreateMat( Wij_height, Wij_width, CV_64F );
+  //((CvMat**)(W->data.ptr + j * W->step))[i] = tmp;  //note indices i and j swapped
+  cvSet(tmp,cvScalar(1.0,1.0,1.0,1.0));
+  W[j+i*num_cams] = tmp;
       } else{
-       A[j+i*num_cams] = NULL;
-       B[j+i*num_cams] = NULL;
-       W[j+i*num_cams] = NULL;
+  A[j+i*num_cams] = NULL;
+  B[j+i*num_cams] = NULL;
+  W[j+i*num_cams] = NULL;
       }
-    }                
+    }
   }
-    
+
   //allocate U
   U = new CvMat* [num_cams];
   for (int j = 0; j < num_cams; j++ ) {
@@ -267,7 +267,7 @@ void LevMarqSparse::run( int num_points_, //number of points
     ea[j] = cvCreateMat( U_size, 1, CV_64F );
     cvSetZero(ea[j]);
   }
-    
+
   //allocate V and inv_V_star
   V = new CvMat* [num_points];
   inv_V_star = new CvMat* [num_points];
@@ -277,36 +277,36 @@ void LevMarqSparse::run( int num_points_, //number of points
     cvSetZero(V[i]);
     cvSetZero(inv_V_star[i]);
   }
-    
+
   //allocate eb
   eb = new CvMat* [num_points];
   for (int i = 0; i < num_points; i++ ) {
     eb[i] = cvCreateMat( V_size, 1, CV_64F );
     cvSetZero(eb[i]);
-  }   
-    
+  }
+
   //allocate Yj
   Yj = new CvMat* [num_points];
   for (int i = 0; i < num_points; i++ ) {
     Yj[i] = cvCreateMat( Wij_height, Wij_width, CV_64F );  //Yij has the same size as Wij
     cvSetZero(Yj[i]);
-  }        
-    
+  }
+
   //allocate matrix S
   S = cvCreateMat( num_cams * num_cam_param, num_cams * num_cam_param, CV_64F);
   cvSetZero(S);
   JtJ_diag = cvCreateMat( num_cams * num_cam_param + num_points * num_point_param, 1, CV_64F );
   cvSetZero(JtJ_diag);
-    
+
   //set starting parameters
-  CvMat _tmp_ = CvMat(P0); 
-  prevP = cvCloneMat( &_tmp_ );          
+  CvMat _tmp_ = CvMat(P0);
+  prevP = cvCloneMat( &_tmp_ );
   P = cvCloneMat( &_tmp_ );
   deltaP = cvCloneMat( &_tmp_ );
-    
+
   //set measurements
   _tmp_ = CvMat(X_init);
-  X = cvCloneMat( &_tmp_ );  
+  X = cvCloneMat( &_tmp_ );
   //create vector for estimated measurements
   hX = cvCreateMat( X->rows, X->cols, CV_64F );
   cvSetZero(hX);
@@ -334,9 +334,9 @@ void LevMarqSparse::run( int num_points_, //number of points
 
   prevErrNorm = cvNorm( err, 0,  CV_L2 );
   //    std::cerr<<"prevErrNorm = "<<prevErrNorm<<std::endl;
-  iters = 0; 
+  iters = 0;
   criteria = criteria_init;
-    
+
   optimize(_vis);
 
   ask_for_proj(_vis,true);
@@ -363,8 +363,8 @@ void LevMarqSparse::ask_for_proj(CvMat &/*_vis*/,bool once) {
                 func( i, j, _point_mat, _cam_mat, _measur_mat, data);
                 assert( ind*num_err_param == ((int*)(Vis_index->data.ptr + i * Vis_index->step))[j]);
                 ind+=1;
-            }  
-        } 
+            }
+        }
     }
 }
 
@@ -372,20 +372,20 @@ void LevMarqSparse::ask_for_proj(CvMat &/*_vis*/,bool once) {
 void LevMarqSparse::ask_for_projac(CvMat &/*_vis*/)   //should be evaluated at point prevP
 {
     // compute jacobians Aij and Bij
-    for (int i = 0; i < num_points; i++ ) 
+    for (int i = 0; i < num_points; i++ )
     {
         CvMat point_mat;
         cvGetSubRect( prevP, &point_mat, cvRect( 0, num_cams * num_cam_param + num_point_param * i, 1, num_point_param ));
 
         //CvMat** A_line = (CvMat**)(A->data.ptr + A->step * i);
         //CvMat** B_line = (CvMat**)(B->data.ptr + B->step * i);
-        for( int j = 0; j < num_cams; j++ ) 
+        for( int j = 0; j < num_cams; j++ )
         {
             //CvMat* Aij = A_line[j];
             //if( Aij ) //Aij is not zero
             CvMat* Aij = A[j+i*num_cams];
             CvMat* Bij = B[j+i*num_cams];
-            if(Aij) 
+            if(Aij)
             {
                 //CvMat** A_line = (CvMat**)(A->data.ptr + A->step * i);
                 //CvMat** B_line = (CvMat**)(B->data.ptr + B->step * i);
@@ -403,13 +403,13 @@ void LevMarqSparse::ask_for_projac(CvMat &/*_vis*/)   //should be evaluated at p
             }
         }
     }
-}  
+}
 
 void LevMarqSparse::optimize(CvMat &_vis) { //main function that runs minimization
   bool done = false;
-    
-  CvMat* YWt = cvCreateMat( num_cam_param, num_cam_param, CV_64F ); //this matrix used to store Yij*Wik' 
-  CvMat* E = cvCreateMat( S->height, 1 , CV_64F ); //this is right part of system with S       
+
+  CvMat* YWt = cvCreateMat( num_cam_param, num_cam_param, CV_64F ); //this matrix used to store Yij*Wik'
+  CvMat* E = cvCreateMat( S->height, 1 , CV_64F ); //this is right part of system with S
   cvSetZero(YWt);
   cvSetZero(E);
 
@@ -419,26 +419,26 @@ void LevMarqSparse::optimize(CvMat &_vis) { //main function that runs minimizati
     int invisible_count=0;
     //compute U_j  and  ea_j
     for (int j = 0; j < num_cams; j++ ) {
-      cvSetZero(U[j]); 
+      cvSetZero(U[j]);
       cvSetZero(ea[j]);
       //summ by i (number of points)
       for (int i = 0; i < num_points; i++ ) {
-       //get Aij
-       //CvMat* Aij = ((CvMat**)(A->data.ptr + A->step * i))[j];
-       CvMat* Aij = A[j+i*num_cams];
-       if (Aij ) {
-         //Uj+= AijT*Aij
-         cvGEMM( Aij, Aij, 1, U[j], 1, U[j], CV_GEMM_A_T );
-         //ea_j += AijT * e_ij
-         CvMat eij;
-
-         int index = ((int*)(Vis_index->data.ptr + i * Vis_index->step))[j];
-
-         cvGetSubRect( err, &eij, cvRect( 0, index, 1, Aij->height  ) ); //width of transposed Aij
-         cvGEMM( Aij, &eij, 1, ea[j], 1, ea[j], CV_GEMM_A_T );
-       }
-       else
-         invisible_count++;
+  //get Aij
+  //CvMat* Aij = ((CvMat**)(A->data.ptr + A->step * i))[j];
+  CvMat* Aij = A[j+i*num_cams];
+  if (Aij ) {
+    //Uj+= AijT*Aij
+    cvGEMM( Aij, Aij, 1, U[j], 1, U[j], CV_GEMM_A_T );
+    //ea_j += AijT * e_ij
+    CvMat eij;
+
+    int index = ((int*)(Vis_index->data.ptr + i * Vis_index->step))[j];
+
+    cvGetSubRect( err, &eij, cvRect( 0, index, 1, Aij->height  ) ); //width of transposed Aij
+    cvGEMM( Aij, &eij, 1, ea[j], 1, ea[j], CV_GEMM_A_T );
+  }
+  else
+    invisible_count++;
       }
     } //U_j and ea_j computed for all j
 
@@ -450,272 +450,272 @@ void LevMarqSparse::optimize(CvMat &_vis) { //main function that runs minimizati
       cb(iters, prevErrNorm, user_data);
     //compute V_i  and  eb_i
     for (int i = 0; i < num_points; i++ ) {
-      cvSetZero(V[i]); 
+      cvSetZero(V[i]);
       cvSetZero(eb[i]);
-            
+
       //summ by i (number of points)
       for( int j = 0; j < num_cams; j++ ) {
-       //get Bij
-       //CvMat* Bij = ((CvMat**)(B->data.ptr + B->step * i))[j];
-       CvMat* Bij = B[j+i*num_cams];
-       if (Bij ) {
-         //Vi+= BijT*Bij
-         cvGEMM( Bij, Bij, 1, V[i], 1, V[i], CV_GEMM_A_T );
-
-         //eb_i += BijT * e_ij
-         int index = ((int*)(Vis_index->data.ptr + i * Vis_index->step))[j];
-
-         CvMat eij;
-         cvGetSubRect( err, &eij, cvRect( 0, index, 1, Bij->height  ) ); //width of transposed Bij
-         cvGEMM( Bij, &eij, 1, eb[i], 1, eb[i], CV_GEMM_A_T );
-       }
+  //get Bij
+  //CvMat* Bij = ((CvMat**)(B->data.ptr + B->step * i))[j];
+  CvMat* Bij = B[j+i*num_cams];
+  if (Bij ) {
+    //Vi+= BijT*Bij
+    cvGEMM( Bij, Bij, 1, V[i], 1, V[i], CV_GEMM_A_T );
+
+    //eb_i += BijT * e_ij
+    int index = ((int*)(Vis_index->data.ptr + i * Vis_index->step))[j];
+
+    CvMat eij;
+    cvGetSubRect( err, &eij, cvRect( 0, index, 1, Bij->height  ) ); //width of transposed Bij
+    cvGEMM( Bij, &eij, 1, eb[i], 1, eb[i], CV_GEMM_A_T );
+  }
       }
     } //V_i and eb_i computed for all i
 
       //compute W_ij
     for( int i = 0; i < num_points; i++ ) {
       for( int j = 0; j < num_cams; j++ ) {
-       //CvMat* Aij = ((CvMat**)(A->data.ptr + A->step * i))[j];
-       CvMat* Aij = A[j+i*num_cams];
-       if( Aij ) { //visible
-         //CvMat* Bij = ((CvMat**)(B->data.ptr + B->step * i))[j];
-         CvMat* Bij = B[j+i*num_cams];
-         //CvMat* Wij = ((CvMat**)(W->data.ptr + W->step * j))[i];
-         CvMat* Wij = W[j+i*num_cams];
-
-         //multiply
-         cvGEMM( Aij, Bij, 1, NULL, 0, Wij, CV_GEMM_A_T );                     
-       }
+  //CvMat* Aij = ((CvMat**)(A->data.ptr + A->step * i))[j];
+  CvMat* Aij = A[j+i*num_cams];
+  if( Aij ) { //visible
+    //CvMat* Bij = ((CvMat**)(B->data.ptr + B->step * i))[j];
+    CvMat* Bij = B[j+i*num_cams];
+    //CvMat* Wij = ((CvMat**)(W->data.ptr + W->step * j))[i];
+    CvMat* Wij = W[j+i*num_cams];
+
+    //multiply
+    cvGEMM( Aij, Bij, 1, NULL, 0, Wij, CV_GEMM_A_T );
+  }
       }
     } //Wij computed
 
       //backup diagonal of JtJ before we start augmenting it
-    {               
+    {
       CvMat dia;
       CvMat subr;
       for( int j = 0; j < num_cams; j++ ) {
-       cvGetDiag(U[j], &dia);
-       cvGetSubRect(JtJ_diag, &subr, 
-                    cvRect(0, j*num_cam_param, 1, num_cam_param ));
-       cvCopy( &dia, &subr );
-      } 
+  cvGetDiag(U[j], &dia);
+  cvGetSubRect(JtJ_diag, &subr,
+         cvRect(0, j*num_cam_param, 1, num_cam_param ));
+  cvCopy( &dia, &subr );
+      }
       for( int i = 0; i < num_points; i++ ) {
-       cvGetDiag(V[i], &dia);
-       cvGetSubRect(JtJ_diag, &subr, 
-                    cvRect(0, num_cams*num_cam_param + i * num_point_param, 1, num_point_param ));
-       cvCopy( &dia, &subr );
-      }   
-    } 
+  cvGetDiag(V[i], &dia);
+  cvGetSubRect(JtJ_diag, &subr,
+         cvRect(0, num_cams*num_cam_param + i * num_point_param, 1, num_point_param ));
+  cvCopy( &dia, &subr );
+      }
+    }
 
     if( iters == 0 ) {
       //initialize lambda. It is set to 1e-3 * average diagonal element in JtJ
       double average_diag = 0;
       for( int j = 0; j < num_cams; j++ ) {
-       average_diag += cvTrace( U[j] ).val[0];
+  average_diag += cvTrace( U[j] ).val[0];
       }
       for( int i = 0; i < num_points; i++ ) {
-       average_diag += cvTrace( V[i] ).val[0];
+  average_diag += cvTrace( V[i] ).val[0];
       }
       average_diag /= (num_cams*num_cam_param + num_points * num_point_param );
-                        
-      //      lambda = 1e-3 * average_diag;        
-      lambda = 1e-3 * average_diag;        
+
+      //      lambda = 1e-3 * average_diag;
+      lambda = 1e-3 * average_diag;
       lambda = 0.245560;
     }
-       
+
     //now we are going to find good step and make it
     for(;;) {
       //augmentation of diagonal
       for(int j = 0; j < num_cams; j++ ) {
-       CvMat diag;
-       cvGetDiag( U[j], &diag );
+  CvMat diag;
+  cvGetDiag( U[j], &diag );
 #if 1
-       cvAddS( &diag, cvScalar( lambda ), &diag );
+  cvAddS( &diag, cvScalar( lambda ), &diag );
 #else
-       cvScale( &diag, &diag, 1 + lambda );
+  cvScale( &diag, &diag, 1 + lambda );
 #endif
       }
       for(int i = 0; i < num_points; i++ ) {
-       CvMat diag;
-       cvGetDiag( V[i], &diag );
+  CvMat diag;
+  cvGetDiag( V[i], &diag );
 #if 1
-       cvAddS( &diag, cvScalar( lambda ), &diag );
+  cvAddS( &diag, cvScalar( lambda ), &diag );
 #else
-       cvScale( &diag, &diag, 1 + lambda );
+  cvScale( &diag, &diag, 1 + lambda );
 #endif
-      }                              
+      }
       bool error = false;
       //compute inv(V*)
       bool inverted_ok = true;
       for(int i = 0; i < num_points; i++ ) {
-       double det = cvInvert( V[i], inv_V_star[i] );
+  double det = cvInvert( V[i], inv_V_star[i] );
 
-       if( fabs(det) <= FLT_EPSILON )  {
-         inverted_ok = false;
-         std::cerr<<"V["<<i<<"] failed"<<std::endl;
-         break;
-       } //means we did wrong augmentation, try to choose different lambda
+  if( fabs(det) <= FLT_EPSILON )  {
+    inverted_ok = false;
+    std::cerr<<"V["<<i<<"] failed"<<std::endl;
+    break;
+  } //means we did wrong augmentation, try to choose different lambda
       }
 
       if( inverted_ok ) {
-       cvSetZero( E ); 
-       //loop through cameras, compute upper diagonal blocks of matrix S 
-       for( int j = 0; j < num_cams; j++ ) {
-         //compute Yij = Wij (V*_i)^-1  for all i   (if Wij exists/nonzero)
-         for( int i = 0; i < num_points; i++ ) {
-           //
-           //CvMat* Wij = ((CvMat**)(W->data.ptr + W->step * j))[i];
-           CvMat* Wij = W[j+i*num_cams];
-           if( Wij ) {
-             cvMatMul( Wij, inv_V_star[i], Yj[i] );
-           }
-         }
-
-         //compute Sjk   for k>=j  (because Sjk = Skj)
-         for( int k = j; k < num_cams; k++ ) {
-           cvSetZero( YWt );
-           for( int i = 0; i < num_points; i++ ) {
-             //check that both Wij and Wik exist
-             // CvMat* Wij = ((CvMat**)(W->data.ptr + W->step * j))[i];
-             CvMat* Wij = W[j+i*num_cams];
-             //CvMat* Wik = ((CvMat**)(W->data.ptr + W->step * k))[i];
-             CvMat* Wik = W[k+i*num_cams];
-
-             if( Wij && Wik ) {
-               //multiply YWt += Yj[i]*Wik'
-               cvGEMM( Yj[i], Wik, 1, YWt, 1, YWt, CV_GEMM_B_T  ); ///*transpose Wik
-             }
-           }
-
-           //copy result to matrix S
-
-           CvMat Sjk;
-           //extract submat
-           cvGetSubRect( S, &Sjk, cvRect( k * num_cam_param, j * num_cam_param, num_cam_param, num_cam_param ));  
-                        
-
-           //if j==k, add diagonal
-           if( j != k ) {
-             //just copy with minus
-             cvScale( YWt, &Sjk, -1 ); //if we set initial S to zero then we can use cvSub( Sjk, YWt, Sjk);
-           } else {
-             //add diagonal value
-
-             //subtract YWt from augmented Uj
-             cvSub( U[j], YWt, &Sjk );
-           }                
-         }
-
-         //compute right part of equation involving matrix S
-         // e_j=ea_j - \sum_i Y_ij eb_i 
-         {
-           CvMat e_j; 
-                    
-           //select submat
-           cvGetSubRect( E, &e_j, cvRect( 0, j * num_cam_param, 1, num_cam_param ) ); 
-                    
-           for( int i = 0; i < num_points; i++ ) {
-             //CvMat* Wij = ((CvMat**)(W->data.ptr + W->step * j))[i];
-             CvMat* Wij = W[j+i*num_cams];
-             if( Wij )
-               cvMatMulAdd( Yj[i], eb[i], &e_j, &e_j );
-           }
-
-           cvSub( ea[j], &e_j, &e_j );
-         }
-
-       } 
-       //fill below diagonal elements of matrix S
-       cvCompleteSymm( S,  0  ); ///*from upper to low //operation may be done by nonzero blocks or during upper diagonal computation
-                
-       //Solve linear system  S * deltaP_a = E
-       CvMat dpa;
-       cvGetSubRect( deltaP, &dpa, cvRect(0, 0, 1, S->width ) );
-       int res = cvSolve( S, E, &dpa, CV_CHOLESKY );
-            
-       if( res ) { //system solved ok
-         //compute db_i
-         for( int i = 0; i < num_points; i++ ) {
-           CvMat dbi;
-           cvGetSubRect( deltaP, &dbi, cvRect( 0, dpa.height + i * num_point_param, 1, num_point_param ) );   
-
-           // compute \sum_j W_ij^T da_j
-           for( int j = 0; j < num_cams; j++ ) {
-             //get Wij
-             //CvMat* Wij = ((CvMat**)(W->data.ptr + W->step * j))[i];
-             CvMat* Wij = W[j+i*num_cams];
-             if( Wij ) {
-               //get da_j
-               CvMat daj;
-               cvGetSubRect( &dpa, &daj, cvRect( 0, j * num_cam_param, 1, num_cam_param ));  
-               cvGEMM( Wij, &daj, 1, &dbi, 1, &dbi, CV_GEMM_A_T  ); ///* transpose Wij
-             }  
-           }
-           //finalize dbi
-           cvSub( eb[i], &dbi, &dbi );
-           cvMatMul(inv_V_star[i], &dbi, &dbi );  //here we get final dbi  
-         }  //now we computed whole deltaP
-
-         //add deltaP to delta 
-         cvAdd( prevP, deltaP, P );
-                                        
-         //evaluate  function with new parameters
-         ask_for_proj(_vis); // func( P, hX );
-
-         //compute error
-         errNorm = cvNorm( X, hX, CV_L2 );
-                                        
-       } else {
-         error = true;
-       }                
+  cvSetZero( E );
+  //loop through cameras, compute upper diagonal blocks of matrix S
+  for( int j = 0; j < num_cams; j++ ) {
+    //compute Yij = Wij (V*_i)^-1  for all i   (if Wij exists/nonzero)
+    for( int i = 0; i < num_points; i++ ) {
+      //
+      //CvMat* Wij = ((CvMat**)(W->data.ptr + W->step * j))[i];
+      CvMat* Wij = W[j+i*num_cams];
+      if( Wij ) {
+        cvMatMul( Wij, inv_V_star[i], Yj[i] );
+      }
+    }
+
+    //compute Sjk   for k>=j  (because Sjk = Skj)
+    for( int k = j; k < num_cams; k++ ) {
+      cvSetZero( YWt );
+      for( int i = 0; i < num_points; i++ ) {
+        //check that both Wij and Wik exist
+        // CvMat* Wij = ((CvMat**)(W->data.ptr + W->step * j))[i];
+        CvMat* Wij = W[j+i*num_cams];
+        //CvMat* Wik = ((CvMat**)(W->data.ptr + W->step * k))[i];
+        CvMat* Wik = W[k+i*num_cams];
+
+        if( Wij && Wik ) {
+    //multiply YWt += Yj[i]*Wik'
+    cvGEMM( Yj[i], Wik, 1, YWt, 1, YWt, CV_GEMM_B_T  ); ///*transpose Wik
+        }
+      }
+
+      //copy result to matrix S
+
+      CvMat Sjk;
+      //extract submat
+      cvGetSubRect( S, &Sjk, cvRect( k * num_cam_param, j * num_cam_param, num_cam_param, num_cam_param ));
+
+
+      //if j==k, add diagonal
+      if( j != k ) {
+        //just copy with minus
+        cvScale( YWt, &Sjk, -1 ); //if we set initial S to zero then we can use cvSub( Sjk, YWt, Sjk);
+      } else {
+        //add diagonal value
+
+        //subtract YWt from augmented Uj
+        cvSub( U[j], YWt, &Sjk );
+      }
+    }
+
+    //compute right part of equation involving matrix S
+    // e_j=ea_j - \sum_i Y_ij eb_i
+    {
+      CvMat e_j;
+
+      //select submat
+      cvGetSubRect( E, &e_j, cvRect( 0, j * num_cam_param, 1, num_cam_param ) );
+
+      for( int i = 0; i < num_points; i++ ) {
+        //CvMat* Wij = ((CvMat**)(W->data.ptr + W->step * j))[i];
+        CvMat* Wij = W[j+i*num_cams];
+        if( Wij )
+    cvMatMulAdd( Yj[i], eb[i], &e_j, &e_j );
+      }
+
+      cvSub( ea[j], &e_j, &e_j );
+    }
+
+  }
+  //fill below diagonal elements of matrix S
+  cvCompleteSymm( S,  0  ); ///*from upper to low //operation may be done by nonzero blocks or during upper diagonal computation
+
+  //Solve linear system  S * deltaP_a = E
+  CvMat dpa;
+  cvGetSubRect( deltaP, &dpa, cvRect(0, 0, 1, S->width ) );
+  int res = cvSolve( S, E, &dpa, CV_CHOLESKY );
+
+  if( res ) { //system solved ok
+    //compute db_i
+    for( int i = 0; i < num_points; i++ ) {
+      CvMat dbi;
+      cvGetSubRect( deltaP, &dbi, cvRect( 0, dpa.height + i * num_point_param, 1, num_point_param ) );
+
+      // compute \sum_j W_ij^T da_j
+      for( int j = 0; j < num_cams; j++ ) {
+        //get Wij
+        //CvMat* Wij = ((CvMat**)(W->data.ptr + W->step * j))[i];
+        CvMat* Wij = W[j+i*num_cams];
+        if( Wij ) {
+    //get da_j
+    CvMat daj;
+    cvGetSubRect( &dpa, &daj, cvRect( 0, j * num_cam_param, 1, num_cam_param ));
+    cvGEMM( Wij, &daj, 1, &dbi, 1, &dbi, CV_GEMM_A_T  ); ///* transpose Wij
+        }
+      }
+      //finalize dbi
+      cvSub( eb[i], &dbi, &dbi );
+      cvMatMul(inv_V_star[i], &dbi, &dbi );  //here we get final dbi
+    }  //now we computed whole deltaP
+
+    //add deltaP to delta
+    cvAdd( prevP, deltaP, P );
+
+    //evaluate  function with new parameters
+    ask_for_proj(_vis); // func( P, hX );
+
+    //compute error
+    errNorm = cvNorm( X, hX, CV_L2 );
+
+  } else {
+    error = true;
+  }
       } else {
-       error = true;
+  error = true;
       }
       //check solution
       if( error || ///* singularities somewhere
-         errNorm > prevErrNorm )  { //step was not accepted
-       //increase lambda and reject change 
-       lambda *= 10;
-       int nviz = X->rows / num_err_param;
-       double e2 = errNorm*errNorm, e2_prev = prevErrNorm*prevErrNorm;
-       double e2n = e2/nviz, e2n_prev = e2_prev/nviz;
-       std::cerr<<"move failed: lambda = "<<lambda<<", e2 = "<<e2<<" ("<<e2n<<") > "<<e2_prev<<" ("<<e2n_prev<<")"<<std::endl;
-
-       //restore diagonal from backup
-       {               
-         CvMat dia;
-         CvMat subr;
-         for( int j = 0; j < num_cams; j++ ) {
-           cvGetDiag(U[j], &dia);
-           cvGetSubRect(JtJ_diag, &subr, 
-                        cvRect(0, j*num_cam_param, 1, num_cam_param ));
-           cvCopy( &subr, &dia );
-         } 
-         for( int i = 0; i < num_points; i++ ) {
-           cvGetDiag(V[i], &dia);
-           cvGetSubRect(JtJ_diag, &subr, 
-                        cvRect(0, num_cams*num_cam_param + i * num_point_param, 1, num_point_param ));
-           cvCopy( &subr, &dia );
-         }   
-       }                  
+    errNorm > prevErrNorm )  { //step was not accepted
+  //increase lambda and reject change
+  lambda *= 10;
+  int nviz = X->rows / num_err_param;
+  double e2 = errNorm*errNorm, e2_prev = prevErrNorm*prevErrNorm;
+  double e2n = e2/nviz, e2n_prev = e2_prev/nviz;
+  std::cerr<<"move failed: lambda = "<<lambda<<", e2 = "<<e2<<" ("<<e2n<<") > "<<e2_prev<<" ("<<e2n_prev<<")"<<std::endl;
+
+  //restore diagonal from backup
+  {
+    CvMat dia;
+    CvMat subr;
+    for( int j = 0; j < num_cams; j++ ) {
+      cvGetDiag(U[j], &dia);
+      cvGetSubRect(JtJ_diag, &subr,
+       cvRect(0, j*num_cam_param, 1, num_cam_param ));
+      cvCopy( &subr, &dia );
+    }
+    for( int i = 0; i < num_points; i++ ) {
+      cvGetDiag(V[i], &dia);
+      cvGetSubRect(JtJ_diag, &subr,
+       cvRect(0, num_cams*num_cam_param + i * num_point_param, 1, num_point_param ));
+      cvCopy( &subr, &dia );
+    }
+  }
       } else {  //all is ok
-       //accept change and decrease lambda
-       lambda /= 10;
-       lambda = MAX(lambda, 1e-16);
-       std::cerr<<"decreasing lambda to "<<lambda<<std::endl;
-       prevErrNorm = errNorm;
-
-       //compute new projection error vector
-       cvSub(  X, hX, err );
-       break;
+  //accept change and decrease lambda
+  lambda /= 10;
+  lambda = MAX(lambda, 1e-16);
+  std::cerr<<"decreasing lambda to "<<lambda<<std::endl;
+  prevErrNorm = errNorm;
+
+  //compute new projection error vector
+  cvSub(  X, hX, err );
+  break;
       }
-    }      
+    }
     iters++;
 
     double param_change_norm = cvNorm(P, prevP, CV_RELATIVE_L2);
     //check termination criteria
-    if( (criteria.type&CV_TERMCRIT_ITER && iters > criteria.max_iter ) || 
-       (criteria.type&CV_TERMCRIT_EPS && param_change_norm < criteria.epsilon) ) {
+    if( (criteria.type&CV_TERMCRIT_ITER && iters > criteria.max_iter ) ||
+  (criteria.type&CV_TERMCRIT_EPS && param_change_norm < criteria.epsilon) ) {
       //      std::cerr<<"relative norm change "<<param_change_norm<<" lower than eps "<<criteria.epsilon<<", stopping"<<std::endl;
       done = true;
       break;
@@ -723,17 +723,17 @@ void LevMarqSparse::optimize(CvMat &_vis) { //main function that runs minimizati
       //copy new params and continue iterations
       cvCopy( P, prevP );
     }
-  }   
-  cvReleaseMat(&YWt); 
+  }
+  cvReleaseMat(&YWt);
   cvReleaseMat(&E);
-} 
+}
 
 //Utilities
 
-void fjac(int /*i*/, int /*j*/, CvMat *point_params, CvMat* cam_params, CvMat* A, CvMat* B, void* /*data*/) {
+static void fjac(int /*i*/, int /*j*/, CvMat *point_params, CvMat* cam_params, CvMat* A, CvMat* B, void* /*data*/) {
   //compute jacobian per camera parameters (i.e. Aij)
   //take i-th point 3D current coordinates
-    
+
   CvMat _Mi;
   cvReshape(point_params, &_Mi, 3, 1 );
 
@@ -750,25 +750,25 @@ void fjac(int /*i*/, int /*j*/, CvMat *point_params, CvMat* cam_params, CvMat* A
   intr_data[2] = cam_params->data.db[8];
   intr_data[5] = cam_params->data.db[9];
 
-  CvMat _A = cvMat(3,3, CV_64F, intr_data ); 
+  CvMat _A = cvMat(3,3, CV_64F, intr_data );
 
   CvMat _dpdr, _dpdt, _dpdf, _dpdc, _dpdk;
-    
+
   bool have_dk = cam_params->height - 10 ? true : false;
 
   cvGetCols( A, &_dpdr, 0, 3 );
   cvGetCols( A, &_dpdt, 3, 6 );
   cvGetCols( A, &_dpdf, 6, 8 );
   cvGetCols( A, &_dpdc, 8, 10 );
-    
+
   if( have_dk ) {
     cvGetRows( cam_params, &_k, 10, cam_params->height );
     cvGetCols( A, &_dpdk, 10, A->width );
   }
   cvProjectPoints2(&_Mi, &_ri, &_ti, &_A, have_dk ? &_k : NULL, _mp, &_dpdr, &_dpdt,
-                  &_dpdf, &_dpdc, have_dk ? &_dpdk : NULL, 0);   
+       &_dpdf, &_dpdc, have_dk ? &_dpdk : NULL, 0);
 
-  cvReleaseMat( &_mp );                                 
+  cvReleaseMat( &_mp );
 
   //compute jacobian for point params
   //compute dMeasure/dPoint3D
@@ -781,30 +781,30 @@ void fjac(int /*i*/, int /*j*/, CvMat *point_params, CvMat* cam_params, CvMat* A
   // y' = y/z
 
   //d(x') = ( dx*z - x*dz)/(z*z)
-  //d(y') = ( dy*z - y*dz)/(z*z) 
+  //d(y') = ( dy*z - y*dz)/(z*z)
 
   //g = 1 + k1*r_2 + k2*r_4 + k3*r_6
   //r_2 = x'*x' + y'*y'
 
   //d(r_2) = 2*x'*dx' + 2*y'*dy'
 
-  //dg = k1* d(r_2) + k2*2*r_2*d(r_2) + k3*3*r_2*r_2*d(r_2) 
+  //dg = k1* d(r_2) + k2*2*r_2*d(r_2) + k3*3*r_2*r_2*d(r_2)
 
   //x" = x'*g + 2*p1*x'*y' + p2(r_2+2*x'_2)
   //y" = y'*g + p1(r_2+2*y'_2) + 2*p2*x'*y'
-               
+
   //d(x") = d(x') * g + x' * d(g) + 2*p1*( d(x')*y' + x'*dy) + p2*(d(r_2) + 2*2*x'* dx')
-  //d(y") = d(y') * g + y' * d(g) + 2*p2*( d(x')*y' + x'*dy) + p1*(d(r_2) + 2*2*y'* dy')  
+  //d(y") = d(y') * g + y' * d(g) + 2*p2*( d(x')*y' + x'*dy) + p1*(d(r_2) + 2*2*y'* dy')
 
   // u = fx*( x") + cx
   // v = fy*( y") + cy
-    
+
   // du = fx * d(x")  = fx * ( dx*z - x*dz)/ (z*z)
   // dv = fy * d(y")  = fy * ( dy*z - y*dz)/ (z*z)
 
-  // dx/dX = r11,  dx/dY = r12, dx/dZ = r13 
+  // dx/dX = r11,  dx/dY = r12, dx/dZ = r13
   // dy/dX = r21,  dy/dY = r22, dy/dZ = r23
-  // dz/dX = r31,  dz/dY = r32, dz/dZ = r33 
+  // dz/dX = r31,  dz/dY = r32, dz/dZ = r33
 
   // du/dX = fx*(r11*z-x*r31)/(z*z)
   // du/dY = fx*(r12*z-x*r32)/(z*z)
@@ -833,27 +833,27 @@ void fjac(int /*i*/, int /*j*/, CvMat *point_params, CvMat* cam_params, CvMat* A
   double y = R[3] * X + R[4] * Y + R[5] * Z + t[1];
   double z = R[6] * X + R[7] * Y + R[8] * Z + t[2];
 
-#if 1    
+#if 1
   //compute x',y'
   double x_strike = x/z;
-  double y_strike = y/z;   
+  double y_strike = y/z;
   //compute dx',dy'  matrix
   //
-  //    dx'/dX  dx'/dY dx'/dZ    =    
+  //    dx'/dX  dx'/dY dx'/dZ    =
   //    dy'/dX  dy'/dY dy'/dZ
 
   double coeff[6] = { z, 0, -x,
-                     0, z, -y };
+          0, z, -y };
   CvMat coeffmat = cvMat( 2, 3, CV_64F, coeff );
 
   CvMat* dstrike_dbig = cvCreateMat(2,3,CV_64F);
   cvMatMul(&coeffmat, &_R, dstrike_dbig);
-  cvScale(dstrike_dbig, dstrike_dbig, 1/(z*z) );      
-    
+  cvScale(dstrike_dbig, dstrike_dbig, 1/(z*z) );
+
   if( have_dk ) {
     double strike_[2] = {x_strike, y_strike};
-    CvMat strike = cvMat(1, 2, CV_64F, strike_);       
-        
+    CvMat strike = cvMat(1, 2, CV_64F, strike_);
+
     //compute r_2
     double r_2 = x_strike*x_strike + y_strike*y_strike;
     double r_4 = r_2*r_2;
@@ -867,24 +867,24 @@ void fjac(int /*i*/, int /*j*/, CvMat *point_params, CvMat* cam_params, CvMat* A
     double& k1 = _k.data.db[0];
     double& k2 = _k.data.db[1];
     double& p1 = _k.data.db[2];
-    double& p2 = _k.data.db[3];          
+    double& p2 = _k.data.db[3];
     double k3 = 0;
 
     if( _k.cols*_k.rows == 5 ) {
       k3 = _k.data.db[4];
-    }    
+    }
     //compute dg/dbig
     double dg_dr2 = k1 + k2*2*r_2 + k3*3*r_4;
     double g = 1+k1*r_2+k2*r_4+k3*r_6;
 
     CvMat* dg_dbig = cvCreateMat(1,3,CV_64F);
-    cvScale( dr2_dbig, dg_dbig, dg_dr2 ); 
+    cvScale( dr2_dbig, dg_dbig, dg_dr2 );
 
     CvMat* tmp = cvCreateMat( 2, 3, CV_64F );
     CvMat* dstrike2_dbig = cvCreateMat( 2, 3, CV_64F );
-                                  
+
     double c[4] = { g+2*p1*y_strike+4*p2*x_strike,       2*p1*x_strike,
-                   2*p2*y_strike,                 g+2*p2*x_strike + 4*p1*y_strike };
+        2*p2*y_strike,                 g+2*p2*x_strike + 4*p1*y_strike };
 
     CvMat coeffmat = cvMat(2,2,CV_64F, c );
 
@@ -897,7 +897,7 @@ void fjac(int /*i*/, int /*j*/, CvMat *point_params, CvMat* cam_params, CvMat* A
     CvMat pmat = cvMat(2, 1, CV_64F, p );
 
     cvMatMul( &pmat, dr2_dbig ,tmp);
-    cvAdd( dstrike2_dbig, tmp, dstrike2_dbig );   
+    cvAdd( dstrike2_dbig, tmp, dstrike2_dbig );
 
     cvCopy( dstrike2_dbig, B );
 
@@ -906,15 +906,15 @@ void fjac(int /*i*/, int /*j*/, CvMat *point_params, CvMat* cam_params, CvMat* A
 
     cvReleaseMat(&tmp);
     cvReleaseMat(&dstrike2_dbig);
-    cvReleaseMat(&tmp);  
+    cvReleaseMat(&tmp);
   } else {
     cvCopy(dstrike_dbig, B);
   }
   //multiply by fx, fy
   CvMat row;
   cvGetRows( B, &row, 0, 1 );
-  cvScale( &row, &row, fx );    
-    
+  cvScale( &row, &row, fx );
+
   cvGetRows( B, &row, 1, 2 );
   cvScale( &row, &row, fy );
 
@@ -925,17 +925,17 @@ void fjac(int /*i*/, int /*j*/, CvMat *point_params, CvMat* cam_params, CvMat* A
   cvmSet( B, 0, 0, k*(R[0]*z-x*R[6]));
   cvmSet( B, 0, 1, k*(R[1]*z-x*R[7]));
   cvmSet( B, 0, 2, k*(R[2]*z-x*R[8]));
-    
-  k = fy/(z*z);        
-    
+
+  k = fy/(z*z);
+
   cvmSet( B, 1, 0, k*(R[3]*z-y*R[6]));
   cvmSet( B, 1, 1, k*(R[4]*z-y*R[7]));
   cvmSet( B, 1, 2, k*(R[5]*z-y*R[8]));
-    
+
 #endif
-    
+
 };
-void func(int /*i*/, int /*j*/, CvMat *point_params, CvMat* cam_params, CvMat* estim, void* /*data*/) {
+static void func(int /*i*/, int /*j*/, CvMat *point_params, CvMat* cam_params, CvMat* estim, void* /*data*/) {
   //just do projections
   CvMat _Mi;
   cvReshape( point_params, &_Mi, 3, 1 );
@@ -955,19 +955,19 @@ void func(int /*i*/, int /*j*/, CvMat *point_params, CvMat* cam_params, CvMat* e
   intr_data[2] = cam_params->data.db[8];
   intr_data[5] = cam_params->data.db[9];
 
-  CvMat _A = cvMat(3,3, CV_64F, intr_data ); 
+  CvMat _A = cvMat(3,3, CV_64F, intr_data );
 
   //int cn = CV_MAT_CN(_Mi.type);
 
   bool have_dk = cam_params->height - 10 ? true : false;
-           
+
   if( have_dk ) {
-    cvGetRows( cam_params, &_k, 10, cam_params->height );        
-  }  
+    cvGetRows( cam_params, &_k, 10, cam_params->height );
+  }
   cvProjectPoints2( &_Mi, &_ri, &_ti, &_A, have_dk ? &_k : NULL, _mp, NULL, NULL,
-                   NULL, NULL, NULL, 0);   
+        NULL, NULL, NULL, 0);
   //    std::cerr<<"_mp = "<<_mp->data.db[0]<<","<<_mp->data.db[1]<<std::endl;
-  //    
+  //
   _mp2->data.db[0] = _mp->data.db[0];
   _mp2->data.db[1] = _mp->data.db[1];
   cvTranspose( _mp2, estim );
@@ -975,41 +975,41 @@ void func(int /*i*/, int /*j*/, CvMat *point_params, CvMat* cam_params, CvMat* e
   cvReleaseMat( &_mp2 );
 };
 
-void fjac_new(int i, int j, Mat& point_params, Mat& cam_params, Mat& A, Mat& B, void* data) {
+static void fjac_new(int i, int j, Mat& point_params, Mat& cam_params, Mat& A, Mat& B, void* data) {
   CvMat _point_params = point_params, _cam_params = cam_params, _Al = A, _Bl = B;
   fjac(i,j, &_point_params, &_cam_params, &_Al, &_Bl, data);
 };
 
-void func_new(int i, int j, Mat& point_params, Mat& cam_params, Mat& estim, void* data)  {
+static void func_new(int i, int j, Mat& point_params, Mat& cam_params, Mat& estim, void* data)  {
   CvMat _point_params = point_params, _cam_params = cam_params, _estim = estim;
   func(i,j,&_point_params,&_cam_params,&_estim,data);
-};                                                 
+};
 
 void LevMarqSparse::bundleAdjust( vector<Point3d>& points, //positions of points in global coordinate system (input and output)
-                                 const vector<vector<Point2d> >& imagePoints, //projections of 3d points for every camera
-                                 const vector<vector<int> >& visibility, //visibility of 3d points for every camera 
-                                 vector<Mat>& cameraMatrix, //intrinsic matrices of all cameras (input and output)
-                                 vector<Mat>& R, //rotation matrices of all cameras (input and output)
-                                 vector<Mat>& T, //translation vector of all cameras (input and output)
-                                 vector<Mat>& distCoeffs, //distortion coefficients of all cameras (input and output)
-                                 const TermCriteria& criteria,
-                                 BundleAdjustCallback cb, void* user_data) {
+          const vector<vector<Point2d> >& imagePoints, //projections of 3d points for every camera
+          const vector<vector<int> >& visibility, //visibility of 3d points for every camera
+          vector<Mat>& cameraMatrix, //intrinsic matrices of all cameras (input and output)
+          vector<Mat>& R, //rotation matrices of all cameras (input and output)
+          vector<Mat>& T, //translation vector of all cameras (input and output)
+          vector<Mat>& distCoeffs, //distortion coefficients of all cameras (input and output)
+          const TermCriteria& criteria,
+          BundleAdjustCallback cb, void* user_data) {
   //,enum{MOTION_AND_STRUCTURE,MOTION,STRUCTURE})
   int num_points = (int)points.size();
   int num_cameras = (int)cameraMatrix.size();
 
-  CV_Assert( imagePoints.size() == (size_t)num_cameras && 
-            visibility.size() == (size_t)num_cameras && 
-            R.size() == (size_t)num_cameras &&
-            T.size() == (size_t)num_cameras &&
-            (distCoeffs.size() == (size_t)num_cameras || distCoeffs.size() == 0) );                
+  CV_Assert( imagePoints.size() == (size_t)num_cameras &&
+       visibility.size() == (size_t)num_cameras &&
+       R.size() == (size_t)num_cameras &&
+       T.size() == (size_t)num_cameras &&
+       (distCoeffs.size() == (size_t)num_cameras || distCoeffs.size() == 0) );
 
   int numdist = distCoeffs.size() ? (distCoeffs[0].rows * distCoeffs[0].cols) : 0;
 
   int num_cam_param = 3 /* rotation vector */ + 3 /* translation vector */
-    + 2 /* fx, fy */ + 2 /* cx, cy */ + numdist; 
+    + 2 /* fx, fy */ + 2 /* cx, cy */ + numdist;
 
-  int num_point_param = 3; 
+  int num_point_param = 3;
 
   //collect camera parameters into vector
   Mat params( num_cameras * num_cam_param + num_points * num_point_param, 1, CV_64F );
@@ -1023,8 +1023,8 @@ void LevMarqSparse::bundleAdjust( vector<Point3d>& points, //positions of points
 
     //translation
     dst = params.rowRange(i*num_cam_param + 3, i*num_cam_param+6);
-    T[i].copyTo(dst); 
-        
+    T[i].copyTo(dst);
+
     //intrinsic camera matrix
     double* intr_data = (double*)cameraMatrix[i].data;
     double* intr = (double*)(params.data + params.step * (i*num_cam_param+6));
@@ -1033,14 +1033,14 @@ void LevMarqSparse::bundleAdjust( vector<Point3d>& points, //positions of points
     intr[1] = intr_data[4];  //fy
     //center of projection
     intr[2] = intr_data[2];  //cx
-    intr[3] = intr_data[5];  //cy  
+    intr[3] = intr_data[5];  //cy
 
     //add distortion if exists
     if( distCoeffs.size() ) {
       dst = params.rowRange(i*num_cam_param + 10, i*num_cam_param+10+numdist);
-      distCoeffs[i].copyTo(dst); 
+      distCoeffs[i].copyTo(dst);
     }
-  }  
+  }
 
   //fill point params
   Mat ptparams(num_points, 1, CV_64FC3, params.data + num_cameras*num_cam_param*params.step);
@@ -1059,26 +1059,26 @@ void LevMarqSparse::bundleAdjust( vector<Point3d>& points, //positions of points
   int num_proj = countNonZero(vismat); //total number of points projections
 
   //collect measurements
-  Mat X(num_proj*2,1,CV_64F); //measurement vector      
-    
+  Mat X(num_proj*2,1,CV_64F); //measurement vector
+
   int counter = 0;
   for(int i = 0; i < num_points; i++ ) {
     for(int j = 0; j < num_cameras; j++ ) {
       //check visibility
       if( visibility[j][i] ) {
-       //extract point and put tu vector
-       Point2d p = imagePoints[j][i];
-       ((double*)(X.data))[counter] = p.x;
-       ((double*)(X.data))[counter+1] = p.y;
-       assert(p.x != -1 || p.y != -1);
-       counter+=2;
-      }             
-    }   
+  //extract point and put tu vector
+  Point2d p = imagePoints[j][i];
+  ((double*)(X.data))[counter] = p.x;
+  ((double*)(X.data))[counter+1] = p.y;
+  assert(p.x != -1 || p.y != -1);
+  counter+=2;
+      }
+    }
   }
 
   LevMarqSparse levmar( num_points, num_cameras, num_point_param, num_cam_param, 2, vismat, params, X,
-                       TermCriteria(criteria), fjac_new, func_new, NULL,
-                       cb, user_data);
+      TermCriteria(criteria), fjac_new, func_new, NULL,
+      cb, user_data);
   //extract results
   //fill point params
   /*Mat final_points(num_points, 1, CV_64FC3,
@@ -1101,7 +1101,7 @@ void LevMarqSparse::bundleAdjust( vector<Point3d>& points, //positions of points
     Mat rot_vec = Mat(levmar.P).rowRange(i*num_cam_param, i*num_cam_param+3);
     Rodrigues( rot_vec, R[i] );
     //translation
-    T[i] = Mat(levmar.P).rowRange(i*num_cam_param + 3, i*num_cam_param+6);  
+    T[i] = Mat(levmar.P).rowRange(i*num_cam_param + 3, i*num_cam_param+6);
 
     //intrinsic camera matrix
     double* intr_data = (double*)cameraMatrix[i].data;
@@ -1111,11 +1111,11 @@ void LevMarqSparse::bundleAdjust( vector<Point3d>& points, //positions of points
     intr_data[4] = intr[1];  //fy
     //center of projection
     intr_data[2] = intr[2];  //cx
-    intr_data[5] = intr[3];  //cy  
+    intr_data[5] = intr[3];  //cy
 
     //add distortion if exists
     if( distCoeffs.size() ) {
       Mat(levmar.P).rowRange(i*num_cam_param + 10, i*num_cam_param+10+numdist).copyTo(distCoeffs[i]);
     }
-  } 
-}    
+  }
+}
index 37c1d10..42fcd29 100644 (file)
@@ -81,6 +81,7 @@ private:
     {
     public:
         virtual ImageIterator* iterator() const = 0;
+        virtual ~ImageRange() {}
     };
 
     // Sliding window
index c0081c1..ce5c8f4 100644 (file)
@@ -59,8 +59,8 @@ static Mat sortMatrixRowsByIndices(InputArray src, InputArray indices)
     return dst;
 }
 
-    
-Mat argsort(InputArray _src, bool ascending=true)
+
+static Mat argsort(InputArray _src, bool ascending=true)
 {
     Mat src = _src.getMat();
     if (src.rows != 1 && src.cols != 1)
@@ -70,14 +70,14 @@ Mat argsort(InputArray _src, bool ascending=true)
     sortIdx(src.reshape(1,1),sorted_indices,flags);
     return sorted_indices;
 }
-    
+
 template <typename _Tp> static
 Mat interp1_(const Mat& X_, const Mat& Y_, const Mat& XI)
 {
     int n = XI.rows;
     // sort input table
     vector<int> sort_indices = argsort(X_);
-    
+
     Mat X = sortMatrixRowsByIndices(X_,sort_indices);
     Mat Y = sortMatrixRowsByIndices(Y_,sort_indices);
     // interpolated values
@@ -131,7 +131,7 @@ static Mat interp1(InputArray _x, InputArray _Y, InputArray _xi)
     }
     return Mat();
 }
-    
+
 namespace colormap
 {
 
@@ -531,7 +531,7 @@ namespace colormap
                     n);  // number of sample points
         }
     };
-    
+
     void ColorMap::operator()(InputArray _src, OutputArray _dst) const
     {
         if(_lut.total() != 256)
@@ -550,7 +550,7 @@ namespace colormap
         // Apply the ColorMap.
         LUT(src, _lut, _dst);
     }
-    
+
     Mat ColorMap::linear_colormap(InputArray X,
             InputArray r, InputArray g, InputArray b,
             InputArray xi) {
@@ -581,12 +581,12 @@ namespace colormap
             colormap == COLORMAP_HOT ? (colormap::ColorMap*)(new colormap::Hot) :
             colormap == COLORMAP_MKPJ1 ? (colormap::ColorMap*)(new colormap::MKPJ1) :
             colormap == COLORMAP_MKPJ2 ? (colormap::ColorMap*)(new colormap::MKPJ2) : 0;
-        
+
         if( !cm )
             CV_Error( CV_StsBadArg, "Unknown colormap id; use one of COLORMAP_*");
-        
+
         (*cm)(src, dst);
-        
+
         delete cm;
     }
 }
index bbf27b8..d65e9d9 100644 (file)
@@ -3,7 +3,7 @@
 
 #define DEBUGLOGS 1
 
-#if ANDROID
+#ifdef ANDROID
 #include <android/log.h>
 #define LOG_TAG "OBJECT_DETECTOR"
 #define LOGD0(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__))
@@ -25,7 +25,7 @@
 #define LOGI(_str, ...) LOGI0(_str , ## __VA_ARGS__)
 #define LOGW(_str, ...) LOGW0(_str , ## __VA_ARGS__)
 #define LOGE(_str, ...) LOGE0(_str , ## __VA_ARGS__)
-#else 
+#else
 #define LOGD(...) do{} while(0)
 #define LOGI(...) do{} while(0)
 #define LOGW(...) do{} while(0)
@@ -193,7 +193,7 @@ do {
     } catch(...) {                                                                          \
         LOGE0("\n ERROR: UNKNOWN Exception caught\n\n");                                     \
     }                                                                                       \
-} while(0) 
+} while(0)
 #endif
 
 void* workcycleObjectDetectorFunction(void* p)
@@ -214,7 +214,7 @@ void DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector()
     vector<Rect> objects;
 
     CV_Assert(stateThread==STATE_THREAD_WORKING_SLEEPING);
-    pthread_mutex_lock(&mutex); 
+    pthread_mutex_lock(&mutex);
     {
         pthread_cond_signal(&objectDetectorThreadStartStop);
 
@@ -268,7 +268,7 @@ void DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector()
             LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- imageSeparateDetecting is empty, continue");
             continue;
         }
-        LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- start handling imageSeparateDetecting, img.size=%dx%d, img.data=0x%p", 
+        LOGD("DetectionBasedTracker::SeparateDetectionWork::workcycleObjectDetector() --- start handling imageSeparateDetecting, img.size=%dx%d, img.data=0x%p",
                 imageSeparateDetecting.size().width, imageSeparateDetecting.size().height, (void*)imageSeparateDetecting.data);
 
 
@@ -368,7 +368,7 @@ void DetectionBasedTracker::SeparateDetectionWork::resetTracking()
 
 
     pthread_mutex_unlock(&mutex);
-    
+
 }
 
 bool DetectionBasedTracker::SeparateDetectionWork::communicateWithDetectingThread(const Mat& imageGray, vector<Rect>& rectsWhereRegions)
@@ -398,7 +398,7 @@ bool DetectionBasedTracker::SeparateDetectionWork::communicateWithDetectingThrea
     if (timeWhenDetectingThreadStartedWork > 0) {
         double time_from_previous_launch_in_ms=1000.0 * (((double)(getTickCount()  - timeWhenDetectingThreadStartedWork )) / freq); //the same formula as for lastBigDetectionDuration
         shouldSendNewDataToWorkThread = (time_from_previous_launch_in_ms >= detectionBasedTracker.parameters.minDetectionPeriod);
-        LOGD("DetectionBasedTracker::SeparateDetectionWork::communicateWithDetectingThread: shouldSendNewDataToWorkThread was 1, now it is %d, since time_from_previous_launch_in_ms=%.2f, minDetectionPeriod=%d", 
+        LOGD("DetectionBasedTracker::SeparateDetectionWork::communicateWithDetectingThread: shouldSendNewDataToWorkThread was 1, now it is %d, since time_from_previous_launch_in_ms=%.2f, minDetectionPeriod=%d",
                 (shouldSendNewDataToWorkThread?1:0), time_from_previous_launch_in_ms, detectionBasedTracker.parameters.minDetectionPeriod);
     }
 
@@ -454,7 +454,7 @@ DetectionBasedTracker::DetectionBasedTracker(const std::string& cascadeFilename,
             && (params.scaleFactor > 1.0)
             && (params.maxTrackLifetime >= 0) );
 
-    if (!cascadeForTracking.load(cascadeFilename)) {        
+    if (!cascadeForTracking.load(cascadeFilename)) {
         CV_Error(CV_StsBadArg, "DetectionBasedTracker::DetectionBasedTracker: Cannot load a cascade from the file '"+cascadeFilename+"'");
     }
 
@@ -495,7 +495,7 @@ void DetectionBasedTracker::process(const Mat& imageGray)
     Mat imageDetect=imageGray;
 
     int D=parameters.minObjectSize;
-    if (D < 1) 
+    if (D < 1)
         D=1;
 
     vector<Rect> rectsWhereRegions;
@@ -633,7 +633,7 @@ void DetectionBasedTracker::updateTrackedObjects(const vector<Rect>& detectedObj
                 LOGD("DetectionBasedTracker::updateTrackedObjects: j=%d is rejected, because it is intersected with another rectangle", j);
                 continue;
             }
-            LOGD("DetectionBasedTracker::updateTrackedObjects: detectedObjects[%d]={%d, %d, %d x %d}", 
+            LOGD("DetectionBasedTracker::updateTrackedObjects: detectedObjects[%d]={%d, %d, %d x %d}",
                     j, detectedObjects[j].x, detectedObjects[j].y, detectedObjects[j].width, detectedObjects[j].height);
 
             Rect r=prevRect & detectedObjects[j];
@@ -691,9 +691,9 @@ void DetectionBasedTracker::updateTrackedObjects(const vector<Rect>& detectedObj
 
     std::vector<TrackedObject>::iterator it=trackedObjects.begin();
     while( it != trackedObjects.end() ) {
-        if ( (it->numFramesNotDetected > parameters.maxTrackLifetime) 
+        if ( (it->numFramesNotDetected > parameters.maxTrackLifetime)
                 ||
-                ( 
+                (
                  (it->numDetectedFrames <= innerParameters.numStepsToWaitBeforeFirstShow)
                  &&
                  (it->numFramesNotDetected > innerParameters.numStepsToTrackWithoutDetectingIfObjectHasNotBeenShown)
@@ -718,7 +718,7 @@ Rect DetectionBasedTracker::calcTrackedObjectPositionToShow(int i) const
         return Rect();
     }
     if (trackedObjects[i].numDetectedFrames <= innerParameters.numStepsToWaitBeforeFirstShow){
-        LOGI("DetectionBasedTracker::calcTrackedObjectPositionToShow: trackedObjects[%d].numDetectedFrames=%d <= numStepsToWaitBeforeFirstShow=%d --- return empty Rect()", 
+        LOGI("DetectionBasedTracker::calcTrackedObjectPositionToShow: trackedObjects[%d].numDetectedFrames=%d <= numStepsToWaitBeforeFirstShow=%d --- return empty Rect()",
                 i, trackedObjects[i].numDetectedFrames, innerParameters.numStepsToWaitBeforeFirstShow);
         return Rect();
     }
index 12ef9d9..42fc411 100644 (file)
@@ -46,7 +46,7 @@
 
 using namespace cv;
 
-void downsamplePoints( const Mat& src, Mat& dst, size_t count )
+static void downsamplePoints( const Mat& src, Mat& dst, size_t count )
 {
     CV_Assert( count >= 2 );
     CV_Assert( src.cols == 1 || src.rows == 1 );
index 5983530..d9c9fb1 100644 (file)
@@ -28,7 +28,7 @@ using std::map;
 using std::set;
 using std::cout;
 using std::endl;
-    
+
 // Removes duplicate elements in a given vector.
 template<typename _Tp>
 inline vector<_Tp> remove_dups(const vector<_Tp>& src) {
@@ -42,7 +42,7 @@ inline vector<_Tp> remove_dups(const vector<_Tp>& src) {
         elems.push_back(*it);
     return elems;
 }
-    
+
 static Mat argsort(InputArray _src, bool ascending=true)
 {
     Mat src = _src.getMat();
@@ -72,8 +72,8 @@ static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double
     }
     return data;
 }
-    
-void sortMatrixColumnsByIndices(InputArray _src, InputArray _indices, OutputArray _dst) {
+
+static void sortMatrixColumnsByIndices(InputArray _src, InputArray _indices, OutputArray _dst) {
     if(_indices.getMat().type() != CV_32SC1)
         CV_Error(CV_StsUnsupportedFormat, "cv::sortColumnsByIndices only works on integer indices!");
     Mat src = _src.getMat();
@@ -87,13 +87,13 @@ void sortMatrixColumnsByIndices(InputArray _src, InputArray _indices, OutputArra
     }
 }
 
-Mat sortMatrixColumnsByIndices(InputArray src, InputArray indices) {
+static Mat sortMatrixColumnsByIndices(InputArray src, InputArray indices) {
     Mat dst;
     sortMatrixColumnsByIndices(src, indices, dst);
     return dst;
 }
-    
-    
+
+
 template<typename _Tp> static bool
 isSymmetric_(InputArray src) {
     Mat _src = src.getMat();
@@ -151,7 +151,7 @@ static bool isSymmetric(InputArray src, double eps=1e-16)
     return false;
 }
 
-    
+
 //------------------------------------------------------------------------------
 // subspace::project
 //------------------------------------------------------------------------------
@@ -198,32 +198,32 @@ Mat subspaceReconstruct(InputArray _W, InputArray _mean, InputArray _src)
     return X;
 }
 
-    
+
 class EigenvalueDecomposition {
 private:
-    
+
     // Holds the data dimension.
     int n;
-    
+
     // Stores real/imag part of a complex division.
     double cdivr, cdivi;
-    
+
     // Pointer to internal memory.
     double *d, *e, *ort;
     double **V, **H;
-    
+
     // Holds the computed eigenvalues.
     Mat _eigenvalues;
-    
+
     // Holds the computed eigenvectors.
     Mat _eigenvectors;
-    
+
     // Allocates memory.
     template<typename _Tp>
     _Tp *alloc_1d(int m) {
         return new _Tp[m];
     }
-    
+
     // Allocates memory.
     template<typename _Tp>
     _Tp *alloc_1d(int m, _Tp val) {
@@ -232,7 +232,7 @@ private:
             arr[i] = val;
         return arr;
     }
-    
+
     // Allocates memory.
     template<typename _Tp>
     _Tp **alloc_2d(int m, int n) {
@@ -241,7 +241,7 @@ private:
             arr[i] = new _Tp[n];
         return arr;
     }
-    
+
     // Allocates memory.
     template<typename _Tp>
     _Tp **alloc_2d(int m, int n, _Tp val) {
@@ -253,7 +253,7 @@ private:
         }
         return arr;
     }
-    
+
     void cdiv(double xr, double xi, double yr, double yi) {
         double r, d;
         if (std::abs(yr) > std::abs(yi)) {
@@ -268,16 +268,16 @@ private:
             cdivi = (r * xi - xr) / d;
         }
     }
-    
+
     // Nonsymmetric reduction from Hessenberg to real Schur form.
-    
+
     void hqr2() {
-        
+
         //  This is derived from the Algol procedure hqr2,
         //  by Martin and Wilkinson, Handbook for Auto. Comp.,
         //  Vol.ii-Linear Algebra, and the corresponding
         //  Fortran subroutine in EISPACK.
-        
+
         // Initialize
         int nn = this->n;
         int n = nn - 1;
@@ -286,9 +286,9 @@ private:
         double eps = pow(2.0, -52.0);
         double exshift = 0.0;
         double p = 0, q = 0, r = 0, s = 0, z = 0, t, w, x, y;
-        
+
         // Store roots isolated by balanc and compute matrix norm
-        
+
         double norm = 0.0;
         for (int i = 0; i < nn; i++) {
             if (i < low || i > high) {
@@ -299,11 +299,11 @@ private:
                 norm = norm + std::abs(H[i][j]);
             }
         }
-        
+
         // Outer loop over eigenvalue index
         int iter = 0;
         while (n >= low) {
-            
+
             // Look for single small sub-diagonal element
             int l = n;
             while (l > low) {
@@ -316,19 +316,19 @@ private:
                 }
                 l--;
             }
-            
+
             // Check for convergence
             // One root found
-            
+
             if (l == n) {
                 H[n][n] = H[n][n] + exshift;
                 d[n] = H[n][n];
                 e[n] = 0.0;
                 n--;
                 iter = 0;
-                
+
                 // Two roots found
-                
+
             } else if (l == n - 1) {
                 w = H[n][n - 1] * H[n - 1][n];
                 p = (H[n - 1][n - 1] - H[n][n]) / 2.0;
@@ -337,9 +337,9 @@ private:
                 H[n][n] = H[n][n] + exshift;
                 H[n - 1][n - 1] = H[n - 1][n - 1] + exshift;
                 x = H[n][n];
-                
+
                 // Real pair
-                
+
                 if (q >= 0) {
                     if (p >= 0) {
                         z = p + z;
@@ -360,33 +360,33 @@ private:
                     r = sqrt(p * p + q * q);
                     p = p / r;
                     q = q / r;
-                    
+
                     // Row modification
-                    
+
                     for (int j = n - 1; j < nn; j++) {
                         z = H[n - 1][j];
                         H[n - 1][j] = q * z + p * H[n][j];
                         H[n][j] = q * H[n][j] - p * z;
                     }
-                    
+
                     // Column modification
-                    
+
                     for (int i = 0; i <= n; i++) {
                         z = H[i][n - 1];
                         H[i][n - 1] = q * z + p * H[i][n];
                         H[i][n] = q * H[i][n] - p * z;
                     }
-                    
+
                     // Accumulate transformations
-                    
+
                     for (int i = low; i <= high; i++) {
                         z = V[i][n - 1];
                         V[i][n - 1] = q * z + p * V[i][n];
                         V[i][n] = q * V[i][n] - p * z;
                     }
-                    
+
                     // Complex pair
-                    
+
                 } else {
                     d[n - 1] = x + p;
                     d[n] = x + p;
@@ -395,13 +395,13 @@ private:
                 }
                 n = n - 2;
                 iter = 0;
-                
+
                 // No convergence yet
-                
+
             } else {
-                
+
                 // Form shift
-                
+
                 x = H[n][n];
                 y = 0.0;
                 w = 0.0;
@@ -409,9 +409,9 @@ private:
                     y = H[n - 1][n - 1];
                     w = H[n][n - 1] * H[n - 1][n];
                 }
-                
+
                 // Wilkinson's original ad hoc shift
-                
+
                 if (iter == 10) {
                     exshift += x;
                     for (int i = low; i <= n; i++) {
@@ -421,9 +421,9 @@ private:
                     x = y = 0.75 * s;
                     w = -0.4375 * s * s;
                 }
-                
+
                 // MATLAB's new ad hoc shift
-                
+
                 if (iter == 30) {
                     s = (y - x) / 2.0;
                     s = s * s + w;
@@ -440,9 +440,9 @@ private:
                         x = y = w = 0.964;
                     }
                 }
-                
+
                 iter = iter + 1; // (Could check iteration count here.)
-                
+
                 // Look for two consecutive small sub-diagonal elements
                 int m = n - 2;
                 while (m >= l) {
@@ -466,16 +466,16 @@ private:
                     }
                     m--;
                 }
-                
+
                 for (int i = m + 2; i <= n; i++) {
                     H[i][i - 2] = 0.0;
                     if (i > m + 2) {
                         H[i][i - 3] = 0.0;
                     }
                 }
-                
+
                 // Double QR step involving rows l:n and columns m:n
-                
+
                 for (int k = m; k <= n - 1; k++) {
                     bool notlast = (k != n - 1);
                     if (k != m) {
@@ -508,9 +508,9 @@ private:
                         z = r / s;
                         q = q / p;
                         r = r / p;
-                        
+
                         // Row modification
-                        
+
                         for (int j = k; j < nn; j++) {
                             p = H[k][j] + q * H[k + 1][j];
                             if (notlast) {
@@ -520,9 +520,9 @@ private:
                             H[k][j] = H[k][j] - p * x;
                             H[k + 1][j] = H[k + 1][j] - p * y;
                         }
-                        
+
                         // Column modification
-                        
+
                         for (int i = 0; i <= min(n, k + 3); i++) {
                             p = x * H[i][k] + y * H[i][k + 1];
                             if (notlast) {
@@ -532,9 +532,9 @@ private:
                             H[i][k] = H[i][k] - p;
                             H[i][k + 1] = H[i][k + 1] - p * q;
                         }
-                        
+
                         // Accumulate transformations
-                        
+
                         for (int i = low; i <= high; i++) {
                             p = x * V[i][k] + y * V[i][k + 1];
                             if (notlast) {
@@ -548,19 +548,19 @@ private:
                 } // k loop
             } // check convergence
         } // while (n >= low)
-        
+
         // Backsubstitute to find vectors of upper triangular form
-        
+
         if (norm == 0.0) {
             return;
         }
-        
+
         for (n = nn - 1; n >= 0; n--) {
             p = d[n];
             q = e[n];
-            
+
             // Real vector
-            
+
             if (q == 0) {
                 int l = n;
                 H[n][n] = 1.0;
@@ -581,9 +581,9 @@ private:
                             } else {
                                 H[i][n] = -r / (eps * norm);
                             }
-                            
+
                             // Solve real equations
-                            
+
                         } else {
                             x = H[i][i + 1];
                             y = H[i + 1][i];
@@ -596,9 +596,9 @@ private:
                                 H[i + 1][n] = (-s - y * t) / z;
                             }
                         }
-                        
+
                         // Overflow control
-                        
+
                         t = std::abs(H[i][n]);
                         if ((eps * t) * t > 1) {
                             for (int j = i; j <= n; j++) {
@@ -607,14 +607,14 @@ private:
                         }
                     }
                 }
-                
+
                 // Complex vector
-                
+
             } else if (q < 0) {
                 int l = n - 1;
-                
+
                 // Last vector component imaginary so matrix is triangular
-                
+
                 if (std::abs(H[n][n - 1]) > std::abs(H[n - 1][n])) {
                     H[n - 1][n - 1] = q / H[n][n - 1];
                     H[n - 1][n] = -(H[n][n] - p) / H[n][n - 1];
@@ -634,7 +634,7 @@ private:
                         sa = sa + H[i][j] * H[j][n];
                     }
                     w = H[i][i] - p;
-                    
+
                     if (e[i] < 0.0) {
                         z = w;
                         r = ra;
@@ -646,9 +646,9 @@ private:
                             H[i][n - 1] = cdivr;
                             H[i][n] = cdivi;
                         } else {
-                            
+
                             // Solve complex equations
-                            
+
                             x = H[i][i + 1];
                             y = H[i + 1][i];
                             vr = (d[i] - p) * (d[i] - p) + e[i] * e[i] - q * q;
@@ -673,9 +673,9 @@ private:
                                 H[i + 1][n] = cdivi;
                             }
                         }
-                        
+
                         // Overflow control
-                        
+
                         t = max(std::abs(H[i][n - 1]), std::abs(H[i][n]));
                         if ((eps * t) * t > 1) {
                             for (int j = i; j <= n; j++) {
@@ -687,9 +687,9 @@ private:
                 }
             }
         }
-        
+
         // Vectors of isolated roots
-        
+
         for (int i = 0; i < nn; i++) {
             if (i < low || i > high) {
                 for (int j = i; j < nn; j++) {
@@ -697,9 +697,9 @@ private:
                 }
             }
         }
-        
+
         // Back transformation to get eigenvectors of original matrix
-        
+
         for (int j = nn - 1; j >= low; j--) {
             for (int i = low; i <= high; i++) {
                 z = 0.0;
@@ -710,7 +710,7 @@ private:
             }
         }
     }
-    
+
     // Nonsymmetric reduction to Hessenberg form.
     void orthes() {
         //  This is derived from the Algol procedures orthes and ortran,
@@ -719,19 +719,19 @@ private:
         //  Fortran subroutines in EISPACK.
         int low = 0;
         int high = n - 1;
-        
+
         for (int m = low + 1; m <= high - 1; m++) {
-            
+
             // Scale column.
-            
+
             double scale = 0.0;
             for (int i = m; i <= high; i++) {
                 scale = scale + std::abs(H[i][m - 1]);
             }
             if (scale != 0.0) {
-                
+
                 // Compute Householder transformation.
-                
+
                 double h = 0.0;
                 for (int i = high; i >= m; i--) {
                     ort[i] = H[i][m - 1] / scale;
@@ -743,10 +743,10 @@ private:
                 }
                 h = h - ort[m] * g;
                 ort[m] = ort[m] - g;
-                
+
                 // Apply Householder similarity transformation
                 // H = (I-u*u'/h)*H*(I-u*u')/h)
-                
+
                 for (int j = m; j < n; j++) {
                     double f = 0.0;
                     for (int i = high; i >= m; i--) {
@@ -757,7 +757,7 @@ private:
                         H[i][j] -= f * ort[i];
                     }
                 }
-                
+
                 for (int i = 0; i <= high; i++) {
                     double f = 0.0;
                     for (int j = high; j >= m; j--) {
@@ -772,15 +772,15 @@ private:
                 H[m][m - 1] = scale * g;
             }
         }
-        
+
         // Accumulate transformations (Algol's ortran).
-        
+
         for (int i = 0; i < n; i++) {
             for (int j = 0; j < n; j++) {
                 V[i][j] = (i == j ? 1.0 : 0.0);
             }
         }
-        
+
         for (int m = high - 1; m >= low + 1; m--) {
             if (H[m][m - 1] != 0.0) {
                 for (int i = m + 1; i <= high; i++) {
@@ -800,7 +800,7 @@ private:
             }
         }
     }
-    
+
     // Releases all internal working memory.
     void release() {
         // releases the working data
@@ -814,7 +814,7 @@ private:
         delete[] H;
         delete[] V;
     }
-    
+
     // Computes the Eigenvalue Decomposition for a matrix given in H.
     void compute() {
         // Allocate memory for the working data.
@@ -839,11 +839,11 @@ private:
         // Deallocate the memory by releasing all internal working data.
         release();
     }
-    
+
 public:
     EigenvalueDecomposition()
     : n(0) { }
-    
+
     // Initializes & computes the Eigenvalue Decomposition for a general matrix
     // given in src. This function is a port of the EigenvalueSolver in JAMA,
     // which has been released to public domain by The MathWorks and the
@@ -851,7 +851,7 @@ public:
     EigenvalueDecomposition(InputArray src) {
         compute(src);
     }
-    
+
     // This function computes the Eigenvalue Decomposition for a general matrix
     // given in src. This function is a port of the EigenvalueSolver in JAMA,
     // which has been released to public domain by The MathWorks and the
@@ -883,9 +883,9 @@ public:
             compute();
         }
     }
-    
+
     ~EigenvalueDecomposition() {}
-    
+
     // Returns the eigenvalues of the Eigenvalue Decomposition.
     Mat eigenvalues() {    return _eigenvalues; }
     // Returns the eigenvectors of the Eigenvalue Decomposition.
@@ -1045,6 +1045,6 @@ Mat LDA::project(InputArray src) {
 Mat LDA::reconstruct(InputArray src) {
    return subspaceReconstruct(_eigenvectors, Mat(), _dataAsRow ? src : src.getMat().t());
 }
-    
+
 }
 
index e62cfb8..cb0f12e 100644 (file)
 #include "precomp.hpp"
 #include <limits>
 
-namespace cv
+namespace
 {
+    using namespace cv;
     const size_t MAX_STACK_SIZE = 255;
     const size_t MAX_LEAFS = 8;
-    
+
     bool checkIfNodeOutsideSphere(const Octree::Node& node, const Point3f& c, float r)
     {
         if (node.x_max < (c.x - r) ||  node.y_max < (c.y - r) || node.z_max < (c.z - r))
             return true;
-            
+
         if ((c.x + r) < node.x_min || (c.y + r) < node.y_min || (c.z + r) < node.z_min)
             return true;
-            
+
         return false;
     }
-    
+
     bool checkIfNodeInsideSphere(const Octree::Node& node, const Point3f& c, float r)
     {
         r *= r;
-        
+
         float d2_xmin = (node.x_min - c.x) * (node.x_min - c.x);
         float d2_ymin = (node.y_min - c.y) * (node.y_min - c.y);
         float d2_zmin = (node.z_min - c.z) * (node.z_min - c.z);
-        
+
         if (d2_xmin + d2_ymin + d2_zmin > r)
             return false;
-            
+
         float d2_zmax = (node.z_max - c.z) * (node.z_max - c.z);
-        
+
         if (d2_xmin + d2_ymin + d2_zmax > r)
             return false;
-            
+
         float d2_ymax = (node.y_max - c.y) * (node.y_max - c.y);
-        
+
         if (d2_xmin + d2_ymax + d2_zmin > r)
             return false;
-            
+
         if (d2_xmin + d2_ymax + d2_zmax > r)
             return false;
-            
+
         float d2_xmax = (node.x_max - c.x) * (node.x_max - c.x);
-        
+
         if (d2_xmax + d2_ymin + d2_zmin > r)
             return false;
-            
+
         if (d2_xmax + d2_ymin + d2_zmax > r)
             return false;
-            
+
         if (d2_xmax + d2_ymax + d2_zmin > r)
             return false;
-            
+
         if (d2_xmax + d2_ymax + d2_zmax > r)
             return false;
-            
+
         return true;
     }
-    
+
     void fillMinMax(const vector<Point3f>& points, Octree::Node& node)
     {
         node.x_max = node.y_max = node.z_max = std::numeric_limits<float>::min();
         node.x_min = node.y_min = node.z_min = std::numeric_limits<float>::max();
-        
+
         for (size_t i = 0; i < points.size(); ++i)
         {
             const Point3f& point = points[i];
-            
+
             if (node.x_max < point.x)
                 node.x_max = point.x;
-                
+
             if (node.y_max < point.y)
                 node.y_max = point.y;
-                
+
             if (node.z_max < point.z)
                 node.z_max = point.z;
-                
+
             if (node.x_min > point.x)
                 node.x_min = point.x;
-                
+
             if (node.y_min > point.y)
                 node.y_min = point.y;
-                
+
             if (node.z_min > point.z)
                 node.z_min = point.z;
         }
     }
-    
+
     size_t findSubboxForPoint(const Point3f& point, const Octree::Node& node)
     {
         size_t ind_x = point.x < (node.x_max + node.x_min) / 2 ? 0 : 1;
         size_t ind_y = point.y < (node.y_max + node.y_min) / 2 ? 0 : 1;
         size_t ind_z = point.z < (node.z_max + node.z_min) / 2 ? 0 : 1;
-        
+
         return (ind_x << 2) + (ind_y << 1) + (ind_z << 0);
     }
     void initChildBox(const Octree::Node& parent, size_t boxIndex, Octree::Node& child)
@@ -142,58 +143,61 @@ namespace cv
         child.x_min = child.x_max = (parent.x_max + parent.x_min) / 2;
         child.y_min = child.y_max = (parent.y_max + parent.y_min) / 2;
         child.z_min = child.z_max = (parent.z_max + parent.z_min) / 2;
-        
+
         if ((boxIndex >> 0) & 1)
             child.z_max = parent.z_max;
         else
             child.z_min = parent.z_min;
-            
+
         if ((boxIndex >> 1) & 1)
             child.y_max = parent.y_max;
         else
             child.y_min = parent.y_min;
-            
+
         if ((boxIndex >> 2) & 1)
             child.x_max = parent.x_max;
         else
             child.x_min = parent.x_min;
     }
-    
+
+}//namespace
+
 ////////////////////////////////////////////////////////////////////////////////////////
 ///////////////////////////       Octree       //////////////////////////////////////
 ////////////////////////////////////////////////////////////////////////////////////////
-
+namespace cv
+{
     Octree::Octree()
     {
     }
-    
+
     Octree::Octree(const vector<Point3f>& points3d, int maxLevels, int minPoints)
     {
         buildTree(points3d, maxLevels, minPoints);
     }
-    
+
     Octree::~Octree()
     {
     }
-    
+
     void Octree::getPointsWithinSphere(const Point3f& center, float radius, vector<Point3f>& out) const
     {
         out.clear();
-        
+
         if (nodes.empty())
             return;
-            
+
         int stack[MAX_STACK_SIZE];
         int pos = 0;
         stack[pos] = 0;
-        
+
         while (pos >= 0)
         {
             const Node& cur = nodes[stack[pos--]];
-            
+
             if (checkIfNodeOutsideSphere(cur, center, radius))
                 continue;
-                
+
             if (checkIfNodeInsideSphere(cur, center, radius))
             {
                 size_t sz = out.size();
@@ -202,133 +206,133 @@ namespace cv
                     out[sz++] = points[i];
                 continue;
             }
-            
+
             if (cur.isLeaf)
             {
                 double r2 = radius * radius;
                 size_t sz = out.size();
                 out.resize(sz + (cur.end - cur.begin));
-                
+
                 for (int i = cur.begin; i < cur.end; ++i)
                 {
                     const Point3f& point = points[i];
-                    
+
                     double dx = (point.x - center.x);
                     double dy = (point.y - center.y);
                     double dz = (point.z - center.z);
-                    
+
                     double dist2 = dx * dx + dy * dy + dz * dz;
-                    
+
                     if (dist2 < r2)
                         out[sz++] = point;
                 };
                 out.resize(sz);
                 continue;
             }
-            
+
             if (cur.children[0])
                 stack[++pos] = cur.children[0];
-                
+
             if (cur.children[1])
                 stack[++pos] = cur.children[1];
-                
+
             if (cur.children[2])
                 stack[++pos] = cur.children[2];
-                
+
             if (cur.children[3])
                 stack[++pos] = cur.children[3];
-                
+
             if (cur.children[4])
                 stack[++pos] = cur.children[4];
-                
+
             if (cur.children[5])
                 stack[++pos] = cur.children[5];
-                
+
             if (cur.children[6])
                 stack[++pos] = cur.children[6];
-                
+
             if (cur.children[7])
                 stack[++pos] = cur.children[7];
         }
     }
-    
+
     void Octree::buildTree(const vector<Point3f>& points3d, int maxLevels, int minPoints)
     {
         assert((size_t)maxLevels * 8 < MAX_STACK_SIZE);
         points.resize(points3d.size());
         std::copy(points3d.begin(), points3d.end(), points.begin());
         this->minPoints = minPoints;
-        
+
         nodes.clear();
         nodes.push_back(Node());
         Node& root = nodes[0];
         fillMinMax(points, root);
-        
+
         root.isLeaf = true;
         root.maxLevels = maxLevels;
         root.begin = 0;
         root.end = (int)points.size();
         for (size_t i = 0; i < MAX_LEAFS; i++)
             root.children[i] = 0;
-            
+
         if (maxLevels != 1 && (root.end - root.begin) > minPoints)
         {
             root.isLeaf = false;
             buildNext(0);
         }
     }
-    
+
     void  Octree::buildNext(size_t nodeInd)
     {
         size_t size = nodes[nodeInd].end - nodes[nodeInd].begin;
-        
+
         vector<size_t> boxBorders(MAX_LEAFS+1, 0);
         vector<size_t> boxIndices(size);
         vector<Point3f> tempPoints(size);
-        
+
         for (int i = nodes[nodeInd].begin, j = 0; i < nodes[nodeInd].end; ++i, ++j)
         {
             const Point3f& p = points[i];
-            
+
             size_t subboxInd = findSubboxForPoint(p, nodes[nodeInd]);
-            
+
             boxBorders[subboxInd+1]++;
             boxIndices[j] = subboxInd;
             tempPoints[j] = p;
         }
-        
+
         for (size_t i = 1; i < boxBorders.size(); ++i)
             boxBorders[i] += boxBorders[i-1];
-            
+
         vector<size_t> writeInds(boxBorders.begin(), boxBorders.end());
-        
+
         for (size_t i = 0; i < size; ++i)
         {
             size_t boxIndex = boxIndices[i];
             Point3f& curPoint = tempPoints[i];
-            
+
             size_t copyTo = nodes[nodeInd].begin + writeInds[boxIndex]++;
             points[copyTo] = curPoint;
         }
-        
+
         for (size_t i = 0; i < MAX_LEAFS; ++i)
         {
             if (boxBorders[i] == boxBorders[i+1])
                 continue;
-                
+
             nodes.push_back(Node());
             Node& child = nodes.back();
             initChildBox(nodes[nodeInd], i, child);
-            
+
             child.isLeaf = true;
             child.maxLevels = nodes[nodeInd].maxLevels - 1;
             child.begin = nodes[nodeInd].begin + (int)boxBorders[i+0];
             child.end   = nodes[nodeInd].begin + (int)boxBorders[i+1];
             for (size_t k = 0; k < MAX_LEAFS; k++)
                 child.children[k] = 0;
-                
+
             nodes[nodeInd].children[i] = (int)(nodes.size() - 1);
-            
+
             if (child.maxLevels != 1 && (child.end - child.begin) > minPoints)
             {
                 child.isLeaf = false;
@@ -336,5 +340,5 @@ namespace cv
             }
         }
     }
-    
+
 }
index 1f0ef9b..0a1cb10 100644 (file)
 #ifndef __OPENCV_PRECOMP_H__
 #define __OPENCV_PRECOMP_H__
 
-#if _MSC_VER >= 1200
+#if defined _MSC_VER && _MSC_VER >= 1200
 #pragma warning( disable: 4251 4710 4711 4514 4996 )
 #endif
 
-#ifdef HAVE_CVCONFIG_H 
+#ifdef HAVE_CVCONFIG_H
 #include "cvconfig.h"
 #endif
 
index 1e005c3..101f7b0 100644 (file)
@@ -1299,6 +1299,7 @@ public:
         GPU_MAT           = 9 << KIND_SHIFT
     };
     _InputArray();
+
     _InputArray(const Mat& m);
     _InputArray(const MatExpr& expr);
     template<typename _Tp> _InputArray(const _Tp* vec, int n);
@@ -1328,6 +1329,8 @@ public:
     virtual int channels(int i=-1) const;
     virtual bool empty() const;
 
+    virtual ~_InputArray();
+
     int flags;
     void* obj;
     Size sz;
@@ -1384,6 +1387,8 @@ public:
     virtual void create(int dims, const int* size, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const;
     virtual void release() const;
     virtual void clear() const;
+
+    virtual ~_OutputArray();
 };
 
 typedef const _InputArray& InputArray;
@@ -3977,7 +3982,7 @@ public:
     CV_WRAP virtual bool isOpened() const;
     //! closes the file and releases all the memory buffers
     CV_WRAP virtual void release();
-    //! closes the file, releases all the memory buffers and returns the text string    
+    //! closes the file, releases all the memory buffers and returns the text string
     CV_WRAP virtual string releaseAndGetString();
 
     //! returns the first element of the top-level mapping
index d0b3cd4..39f8292 100644 (file)
 #endif
 
 #if defined WIN32 || defined WINCE
-#ifndef _WIN32_WINNT         // This is needed for the declaration of TryEnterCriticalSection in winbase.h with Visual Studio 2005 (and older?)
-#define _WIN32_WINNT 0x0400  // http://msdn.microsoft.com/en-us/library/ms686857(VS.85).aspx
-#endif
-#include <windows.h>
-#undef small
-#undef min
-#undef max
+#  ifndef _WIN32_WINNT         // This is needed for the declaration of TryEnterCriticalSection in winbase.h with Visual Studio 2005 (and older?)
+#    define _WIN32_WINNT 0x0400  // http://msdn.microsoft.com/en-us/library/ms686857(VS.85).aspx
+#  endif
+#  include <windows.h>
+#  undef small
+#  undef min
+#  undef max
 #else
-#include <pthread.h>
+#  include <pthread.h>
 #endif
 
 #ifdef __BORLANDC__
-#ifndef WIN32
-    #define     WIN32
-#endif
-#ifndef _WIN32
-    #define     _WIN32
-#endif
-    #define     CV_DLL
-    #undef      _CV_ALWAYS_PROFILE_
-    #define     _CV_ALWAYS_NO_PROFILE_
+#  ifndef WIN32
+#    define WIN32
+#  endif
+#  ifndef _WIN32
+#    define _WIN32
+#  endif
+#  define CV_DLL
+#  undef _CV_ALWAYS_PROFILE_
+#  define _CV_ALWAYS_NO_PROFILE_
 #endif
 
 #ifndef FALSE
-#define FALSE 0
+#  define FALSE 0
 #endif
 #ifndef TRUE
-#define TRUE 1
+#  define TRUE 1
 #endif
 
 #define __BEGIN__ __CV_BEGIN__
@@ -95,7 +95,7 @@
 #define EXIT __CV_EXIT__
 
 #ifdef HAVE_IPP
-#include "ipp.h"
+#  include "ipp.h"
 
 CV_INLINE IppiSize ippiSize(int width, int height)
 {
@@ -104,137 +104,132 @@ CV_INLINE IppiSize ippiSize(int width, int height)
 }
 #endif
 
-#if defined __SSE2__ || _MSC_VER >= 1300
-#include "emmintrin.h"
-#define CV_SSE 1
-#define CV_SSE2 1
-#if defined __SSE3__ || _MSC_VER >= 1500
-#include "pmmintrin.h"
-#define CV_SSE3 1
-#endif
-#if defined __SSSE3__
-#include "tmmintrin.h"
-#define CV_SSSE3 1
-#endif
+#if defined __SSE2__ || (defined _MSC_VER && _MSC_VER >= 1300)
+#  include "emmintrin.h"
+#  define CV_SSE 1
+#  define CV_SSE2 1
+#  if defined __SSE3__ || (defined _MSC_VER && _MSC_VER >= 1500)
+#    include "pmmintrin.h"
+#    define CV_SSE3 1
+#  else
+#    define CV_SSE3 0
+#  endif
+#  if defined __SSSE3__
+#    include "tmmintrin.h"
+#    define CV_SSSE3 1
+#  else
+#    define CV_SSSE3 0
+#  endif
 #else
-#define CV_SSE 0
-#define CV_SSE2 0
-#define CV_SSE3 0
-#define CV_SSSE3 0
+#  define CV_SSE 0
+#  define CV_SSE2 0
+#  define CV_SSE3 0
+#  define CV_SSSE3 0
 #endif
 
-#if defined ANDROID && defined __ARM_NEON__ && defined __GNUC__
-#include "arm_neon.h"
-#define CV_NEON 1
+#if defined ANDROID && defined __ARM_NEON__
+#  include "arm_neon.h"
+#  define CV_NEON 1
 
-#define CPU_HAS_NEON_FEATURE (true)
+#  define CPU_HAS_NEON_FEATURE (true)
 //TODO: make real check using stuff from "cpu-features.h"
 //((bool)android_getCpuFeatures() & ANDROID_CPU_ARM_FEATURE_NEON)
 #else
-#define CV_NEON 0
-#define CPU_HAS_NEON_FEATURE (false)
-#endif
-
-#ifdef CV_ICC
-#define CV_ENABLE_UNROLLED 0
-#else
-#define CV_ENABLE_UNROLLED 1
+#  define CV_NEON 0
+#  define CPU_HAS_NEON_FEATURE (false)
 #endif
 
 #ifndef IPPI_CALL
-#define IPPI_CALL(func) CV_Assert((func) >= 0)
+#  define IPPI_CALL(func) CV_Assert((func) >= 0)
 #endif
 
 #ifdef HAVE_TBB
-    #include "tbb/tbb_stddef.h"
-    #if TBB_VERSION_MAJOR*100 + TBB_VERSION_MINOR >= 202
-        #include "tbb/tbb.h"
-        #include "tbb/task.h"
-        #undef min
-        #undef max
-    #else
-        #undef HAVE_TBB
-    #endif
+#  include "tbb/tbb_stddef.h"
+#  if TBB_VERSION_MAJOR*100 + TBB_VERSION_MINOR >= 202
+#    include "tbb/tbb.h"
+#    include "tbb/task.h"
+#    undef min
+#    undef max
+#  else
+#    undef HAVE_TBB
+#  endif
 #endif
 
 #ifdef HAVE_EIGEN
-    #include <Eigen/Core>
-    #include "opencv2/core/eigen.hpp"
+#  include <Eigen/Core>
+#  include "opencv2/core/eigen.hpp"
 #endif
 
 #ifdef __cplusplus
 
+namespace cv
+{
 #ifdef HAVE_TBB
-    namespace cv
+
+    typedef tbb::blocked_range<int> BlockedRange;
+
+    template<typename Body> static inline
+    void parallel_for( const BlockedRange& range, const Body& body )
     {
-        typedef tbb::blocked_range<int> BlockedRange;
-        
-        template<typename Body> static inline
-        void parallel_for( const BlockedRange& range, const Body& body )
-        {
-            tbb::parallel_for(range, body);
-        }
-        
-        template<typename Iterator, typename Body> static inline
-        void parallel_do( Iterator first, Iterator last, const Body& body )
-        {
-            tbb::parallel_do(first, last, body);
-        }
-        
-        typedef tbb::split Split;
-        
-        template<typename Body> static inline
-        void parallel_reduce( const BlockedRange& range, Body& body )
-        {
-            tbb::parallel_reduce(range, body);
-        }
-        
-        typedef tbb::concurrent_vector<Rect> ConcurrentRectVector;
-        typedef tbb::concurrent_vector<double> ConcurrentDoubleVector;
+        tbb::parallel_for(range, body);
     }
+
+    template<typename Iterator, typename Body> static inline
+    void parallel_do( Iterator first, Iterator last, const Body& body )
+    {
+        tbb::parallel_do(first, last, body);
+    }
+
+    typedef tbb::split Split;
+
+    template<typename Body> static inline
+    void parallel_reduce( const BlockedRange& range, Body& body )
+    {
+        tbb::parallel_reduce(range, body);
+    }
+
+    typedef tbb::concurrent_vector<Rect> ConcurrentRectVector;
+    typedef tbb::concurrent_vector<double> ConcurrentDoubleVector;
 #else
-    namespace cv
+    class BlockedRange
+    {
+    public:
+        BlockedRange() : _begin(0), _end(0), _grainsize(0) {}
+        BlockedRange(int b, int e, int g=1) : _begin(b), _end(e), _grainsize(g) {}
+        int begin() const { return _begin; }
+        int end() const { return _end; }
+        int grainsize() const { return _grainsize; }
+
+    protected:
+        int _begin, _end, _grainsize;
+    };
+
+    template<typename Body> static inline
+    void parallel_for( const BlockedRange& range, const Body& body )
     {
-        class BlockedRange
-        {
-        public:
-            BlockedRange() : _begin(0), _end(0), _grainsize(0) {}
-            BlockedRange(int b, int e, int g=1) : _begin(b), _end(e), _grainsize(g) {}
-            int begin() const { return _begin; }
-            int end() const { return _end; }
-            int grainsize() const { return _grainsize; }
-            
-        protected:
-            int _begin, _end, _grainsize;
-        };
-
-        template<typename Body> static inline
-        void parallel_for( const BlockedRange& range, const Body& body )
-        {
-            body(range); 
-        }
-        typedef std::vector<Rect> ConcurrentRectVector;
-        typedef std::vector<double> ConcurrentDoubleVector;
-        
-        template<typename Iterator, typename Body> static inline
-        void parallel_do( Iterator first, Iterator last, const Body& body )
-        {
-            for( ; first != last; ++first )
-                body(*first);
-        }
-        
-        class Split {};
-        
-        template<typename Body> static inline
-        void parallel_reduce( const BlockedRange& range, Body& body )
-        {
-            body(range);
-        }
-        
+        body(range);
+    }
+    typedef std::vector<Rect> ConcurrentRectVector;
+    typedef std::vector<double> ConcurrentDoubleVector;
+
+    template<typename Iterator, typename Body> static inline
+    void parallel_do( Iterator first, Iterator last, const Body& body )
+    {
+        for( ; first != last; ++first )
+            body(*first);
+    }
+
+    class Split {};
+
+    template<typename Body> static inline
+    void parallel_reduce( const BlockedRange& range, Body& body )
+    {
+        body(range);
     }
 #endif
+} //namespace cv
 
-    #define CV_INIT_ALGORITHM(classname, algname, memberinit) \
+#define CV_INIT_ALGORITHM(classname, algname, memberinit) \
     static Algorithm* create##classname() \
     { \
         return new classname; \
@@ -261,7 +256,7 @@ CV_INLINE IppiSize ippiSize(int width, int height)
         return &classname##_info(); \
     }
 
-#endif
+#endif //__cplusplus
 
 /* maximal size of vector to run matrix operations on it inline (i.e. w/o ipp calls) */
 #define  CV_MAX_INLINE_MAT_OP_SIZE  10
@@ -305,9 +300,9 @@ CV_INLINE IppiSize ippiSize(int width, int height)
 #define  CV_MAX_STRLEN  1024
 
 #if 0 /*def  CV_CHECK_FOR_NANS*/
-    #define CV_CHECK_NANS( arr ) cvCheckArray((arr))
+#  define CV_CHECK_NANS( arr ) cvCheckArray((arr))
 #else
-    #define CV_CHECK_NANS( arr )
+#  define CV_CHECK_NANS( arr )
 #endif
 
 /****************************************************************************************\
@@ -316,38 +311,38 @@ CV_INLINE IppiSize ippiSize(int width, int height)
 
 /* get alloca declaration */
 #ifdef __GNUC__
-    #undef alloca
-    #define alloca __builtin_alloca
-    #define CV_HAVE_ALLOCA 1
+#  undef alloca
+#  define alloca __builtin_alloca
+#  define CV_HAVE_ALLOCA 1
 #elif defined WIN32 || defined _WIN32 || \
       defined WINCE || defined _MSC_VER || defined __BORLANDC__
-    #include <malloc.h>
-    #define CV_HAVE_ALLOCA 1
+#  include <malloc.h>
+#  define CV_HAVE_ALLOCA 1
 #elif defined HAVE_ALLOCA_H
-    #include <alloca.h>
-    #define CV_HAVE_ALLOCA 1
+#  include <alloca.h>
+#  define CV_HAVE_ALLOCA 1
 #elif defined HAVE_ALLOCA
-    #include <stdlib.h>
-    #define CV_HAVE_ALLOCA 1
+#  include <stdlib.h>
+#  define CV_HAVE_ALLOCA 1
 #else
-    #undef CV_HAVE_ALLOCA
+#  undef CV_HAVE_ALLOCA
 #endif
 
 #ifdef __GNUC__
-#define CV_DECL_ALIGNED(x) __attribute__ ((aligned (x)))
+#  define CV_DECL_ALIGNED(x) __attribute__ ((aligned (x)))
 #elif defined _MSC_VER
-#define CV_DECL_ALIGNED(x) __declspec(align(x))
+#  define CV_DECL_ALIGNED(x) __declspec(align(x))
 #else
-#define CV_DECL_ALIGNED(x)
+#  define CV_DECL_ALIGNED(x)
 #endif
 
 #if CV_HAVE_ALLOCA
 /* ! DO NOT make it an inline function */
-#define cvStackAlloc(size) cvAlignPtr( alloca((size) + CV_MALLOC_ALIGN), CV_MALLOC_ALIGN )
+#  define cvStackAlloc(size) cvAlignPtr( alloca((size) + CV_MALLOC_ALIGN), CV_MALLOC_ALIGN )
 #endif
 
 #ifndef CV_IMPL
-#define CV_IMPL CV_EXTERN_C
+#  define CV_IMPL CV_EXTERN_C
 #endif
 
 #define CV_DBG_BREAK() { volatile int* crashMe = 0; *crashMe = 0; }
@@ -687,25 +682,25 @@ typedef enum CvStatus
     CV_UNSUPPORTED_DEPTH_ERR    = -101,
     CV_UNSUPPORTED_FORMAT_ERR   = -100,
 
-    CV_BADARG_ERR      = -49,  //ipp comp
-    CV_NOTDEFINED_ERR  = -48,  //ipp comp
-
-    CV_BADCHANNELS_ERR = -47,  //ipp comp
-    CV_BADRANGE_ERR    = -44,  //ipp comp
-    CV_BADSTEP_ERR     = -29,  //ipp comp
-
-    CV_BADFLAG_ERR     =  -12,
-    CV_DIV_BY_ZERO_ERR =  -11, //ipp comp
-    CV_BADCOEF_ERR     =  -10,
-
-    CV_BADFACTOR_ERR   =  -7,
-    CV_BADPOINT_ERR    =  -6,
-    CV_BADSCALE_ERR    =  -4,
-    CV_OUTOFMEM_ERR    =  -3,
-    CV_NULLPTR_ERR     =  -2,
-    CV_BADSIZE_ERR     =  -1,
-    CV_NO_ERR          =   0,
-    CV_OK              =   CV_NO_ERR
+    CV_BADARG_ERR               = -49,  //ipp comp
+    CV_NOTDEFINED_ERR           = -48,  //ipp comp
+
+    CV_BADCHANNELS_ERR          = -47,  //ipp comp
+    CV_BADRANGE_ERR             = -44,  //ipp comp
+    CV_BADSTEP_ERR              = -29,  //ipp comp
+
+    CV_BADFLAG_ERR              =  -12,
+    CV_DIV_BY_ZERO_ERR          =  -11, //ipp comp
+    CV_BADCOEF_ERR              =  -10,
+
+    CV_BADFACTOR_ERR            =  -7,
+    CV_BADPOINT_ERR             =  -6,
+    CV_BADSCALE_ERR             =  -4,
+    CV_OUTOFMEM_ERR             =  -3,
+    CV_NULLPTR_ERR              =  -2,
+    CV_BADSIZE_ERR              =  -1,
+    CV_NO_ERR                   =   0,
+    CV_OK                       =   CV_NO_ERR
 }
 CvStatus;
 
@@ -720,8 +715,7 @@ CvFuncTable;
 typedef struct CvBigFuncTable
 {
     void*   fn_2d[CV_DEPTH_MAX*4];
-}
-CvBigFuncTable;
+} CvBigFuncTable;
 
 #define CV_INIT_FUNC_TAB( tab, FUNCNAME, FLAG )         \
     (tab).fn_2d[CV_8U] = (void*)FUNCNAME##_8u##FLAG;    \
@@ -732,13 +726,14 @@ CvBigFuncTable;
     (tab).fn_2d[CV_32F] = (void*)FUNCNAME##_32f##FLAG;  \
     (tab).fn_2d[CV_64F] = (void*)FUNCNAME##_64f##FLAG
 
+#ifdef __cplusplus
 //! OpenGL extension table
 class CV_EXPORTS CvOpenGlFuncTab
 {
 public:
     virtual ~CvOpenGlFuncTab();
 
-    virtual void genBuffers(int n, unsigned int* buffers) const = 0;        
+    virtual void genBuffers(int n, unsigned int* buffers) const = 0;
     virtual void deleteBuffers(int n, const unsigned int* buffers) const = 0;
 
     virtual void bufferData(unsigned int target, ptrdiff_t size, const void* data, unsigned int usage) const = 0;
@@ -764,4 +759,6 @@ CV_EXPORTS bool icvCheckGlError(const char* file, const int line, const char* fu
     #define CV_CheckGlError() CV_DbgAssert( (::icvCheckGlError(__FILE__, __LINE__)) )
 #endif
 
-#endif
+#endif //__cplusplus
+
+#endif // __OPENCV_CORE_INTERNAL_HPP__
index 1d8d42f..0420a75 100644 (file)
@@ -55,7 +55,7 @@
 #if defined __INTEL_COMPILER && !(defined WIN32 || defined _WIN32)   // atomic increment on the linux version of the Intel(tm) compiler
   #define CV_XADD(addr,delta) _InterlockedExchangeAdd(const_cast<void*>(reinterpret_cast<volatile void*>(addr)), delta)
 #elif defined __GNUC__
-    
+
   #if __GNUC__*10 + __GNUC_MINOR__ >= 42
 
     #if !defined WIN32 && (defined __i486__ || defined __i586__ || \
@@ -74,7 +74,7 @@
       #define CV_XADD __exchange_and_add
     #endif
   #endif
-    
+
 #elif defined WIN32 || defined _WIN32
   #define WIN32_MEAN_AND_LEAN
   #ifndef _WIN32_WINNT           // This is needed for the declaration of TryEnterCriticalSection in winbase.h with Visual Studio 2005 (and older?)
 
 #else
   static inline int CV_XADD(int* addr, int delta)
-  { int tmp = *addr; *addr += delta; return tmp; }    
+  { int tmp = *addr; *addr += delta; return tmp; }
 #endif
 
 #include <limits>
 
 namespace cv
 {
-    
+
 using std::cos;
 using std::sin;
 using std::max;
@@ -105,7 +105,7 @@ using std::log;
 using std::pow;
 using std::sqrt;
 
-    
+
 /////////////// saturate_cast (used in image & signal processing) ///////////////////
 
 template<typename _Tp> static inline _Tp saturate_cast(uchar v) { return _Tp(v); }
@@ -184,7 +184,7 @@ template<> inline int saturate_cast<int>(double v) { return cvRound(v); }
 // we intentionally do not clip negative numbers, to make -1 become 0xffffffff etc.
 template<> inline unsigned saturate_cast<unsigned>(float v){ return cvRound(v); }
 template<> inline unsigned saturate_cast<unsigned>(double v) { return cvRound(v); }
-    
+
 inline int fast_abs(uchar v) { return v; }
 inline int fast_abs(schar v) { return std::abs((int)v); }
 inline int fast_abs(ushort v) { return v; }
@@ -284,7 +284,7 @@ template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1
     for(int i = 10; i < channels; i++) val[i] = _Tp(0);
 }
 
-    
+
 template<typename _Tp, int m, int n>
 inline Matx<_Tp,m,n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3,
                             _Tp v4, _Tp v5, _Tp v6, _Tp v7,
@@ -349,7 +349,7 @@ template<typename _Tp, int m, int n> inline _Tp Matx<_Tp, m, n>::dot(const Matx<
     return s;
 }
 
-    
+
 template<typename _Tp, int m, int n> inline double Matx<_Tp, m, n>::ddot(const Matx<_Tp, m, n>& M) const
 {
     double s = 0;
@@ -376,7 +376,7 @@ Matx<_Tp,m,n> Matx<_Tp,m,n>::randu(_Tp a, _Tp b)
     cv::randu(matM, Scalar(a), Scalar(b));
     return M;
 }
-    
+
 template<typename _Tp, int m, int n> inline
 Matx<_Tp,m,n> Matx<_Tp,m,n>::randn(_Tp a, _Tp b)
 {
@@ -385,7 +385,7 @@ Matx<_Tp,m,n> Matx<_Tp,m,n>::randn(_Tp a, _Tp b)
     cv::randn(matM, Scalar(a), Scalar(b));
     return M;
 }
-    
+
 template<typename _Tp, int m, int n> template<typename T2>
 inline Matx<_Tp, m, n>::operator Matx<T2, m, n>() const
 {
@@ -393,7 +393,7 @@ inline Matx<_Tp, m, n>::operator Matx<T2, m, n>() const
     for( int i = 0; i < m*n; i++ ) M.val[i] = saturate_cast<T2>(val[i]);
     return M;
 }
-    
+
 
 template<typename _Tp, int m, int n> template<int m1, int n1> inline
 Matx<_Tp, m1, n1> Matx<_Tp, m, n>::reshape() const
@@ -423,7 +423,7 @@ Matx<_Tp, 1, n> Matx<_Tp, m, n>::row(int i) const
     return Matx<_Tp, 1, n>(&val[i*n]);
 }
 
-    
+
 template<typename _Tp, int m, int n> inline
 Matx<_Tp, m, 1> Matx<_Tp, m, n>::col(int j) const
 {
@@ -434,7 +434,7 @@ Matx<_Tp, m, 1> Matx<_Tp, m, n>::col(int j) const
     return v;
 }
 
-    
+
 template<typename _Tp, int m, int n> inline
 typename Matx<_Tp, m, n>::diag_type Matx<_Tp, m, n>::diag() const
 {
@@ -444,7 +444,7 @@ typename Matx<_Tp, m, n>::diag_type Matx<_Tp, m, n>::diag() const
     return d;
 }
 
-    
+
 template<typename _Tp, int m, int n> inline
 const _Tp& Matx<_Tp, m, n>::operator ()(int i, int j) const
 {
@@ -452,7 +452,7 @@ const _Tp& Matx<_Tp, m, n>::operator ()(int i, int j) const
     return this->val[i*n + j];
 }
 
-    
+
 template<typename _Tp, int m, int n> inline
 _Tp& Matx<_Tp, m, n>::operator ()(int i, int j)
 {
@@ -476,23 +476,23 @@ _Tp& Matx<_Tp, m, n>::operator ()(int i)
     return val[i];
 }
 
-    
+
 template<typename _Tp1, typename _Tp2, int m, int n> static inline
 Matx<_Tp1, m, n>& operator += (Matx<_Tp1, m, n>& a, const Matx<_Tp2, m, n>& b)
 {
     for( int i = 0; i < m*n; i++ )
         a.val[i] = saturate_cast<_Tp1>(a.val[i] + b.val[i]);
     return a;
-}    
+}
+
 
-    
 template<typename _Tp1, typename _Tp2, int m, int n> static inline
 Matx<_Tp1, m, n>& operator -= (Matx<_Tp1, m, n>& a, const Matx<_Tp2, m, n>& b)
 {
     for( int i = 0; i < m*n; i++ )
         a.val[i] = saturate_cast<_Tp1>(a.val[i] - b.val[i]);
     return a;
-}    
+}
 
 
 template<typename _Tp, int m, int n> inline
@@ -502,31 +502,31 @@ Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_Add
         val[i] = saturate_cast<_Tp>(a.val[i] + b.val[i]);
 }
 
-    
+
 template<typename _Tp, int m, int n> inline
 Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_SubOp)
 {
     for( int i = 0; i < m*n; i++ )
         val[i] = saturate_cast<_Tp>(a.val[i] - b.val[i]);
 }
-    
-    
+
+
 template<typename _Tp, int m, int n> template<typename _T2> inline
 Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, _T2 alpha, Matx_ScaleOp)
 {
     for( int i = 0; i < m*n; i++ )
         val[i] = saturate_cast<_Tp>(a.val[i] * alpha);
 }
-    
-    
+
+
 template<typename _Tp, int m, int n> inline
 Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_MulOp)
 {
     for( int i = 0; i < m*n; i++ )
         val[i] = saturate_cast<_Tp>(a.val[i] * b.val[i]);
 }
-    
-    
+
+
 template<typename _Tp, int m, int n> template<int l> inline
 Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b, Matx_MatMulOp)
 {
@@ -539,8 +539,8 @@ Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b, Matx_Mat
             val[i*n + j] = s;
         }
 }
-    
-    
+
+
 template<typename _Tp, int m, int n> inline
 Matx<_Tp,m,n>::Matx(const Matx<_Tp, n, m>& a, Matx_TOp)
 {
@@ -549,20 +549,20 @@ Matx<_Tp,m,n>::Matx(const Matx<_Tp, n, m>& a, Matx_TOp)
             val[i*n + j] = a(j, i);
 }
 
-    
+
 template<typename _Tp, int m, int n> static inline
 Matx<_Tp, m, n> operator + (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b)
 {
     return Matx<_Tp, m, n>(a, b, Matx_AddOp());
 }
-    
-    
+
+
 template<typename _Tp, int m, int n> static inline
 Matx<_Tp, m, n> operator - (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b)
 {
     return Matx<_Tp, m, n>(a, b, Matx_SubOp());
-}    
-    
+}
+
 
 template<typename _Tp, int m, int n> static inline
 Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, int alpha)
@@ -570,15 +570,15 @@ Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, int alpha)
     for( int i = 0; i < m*n; i++ )
         a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha);
     return a;
-}        
-    
+}
+
 template<typename _Tp, int m, int n> static inline
 Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, float alpha)
 {
     for( int i = 0; i < m*n; i++ )
         a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha);
     return a;
-}    
+}
 
 template<typename _Tp, int m, int n> static inline
 Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, double alpha)
@@ -586,44 +586,44 @@ Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, double alpha)
     for( int i = 0; i < m*n; i++ )
         a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha);
     return a;
-}        
+}
 
 template<typename _Tp, int m, int n> static inline
 Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, int alpha)
 {
     return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp());
-}        
+}
 
 template<typename _Tp, int m, int n> static inline
 Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, float alpha)
 {
     return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp());
-}        
+}
 
 template<typename _Tp, int m, int n> static inline
 Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, double alpha)
 {
     return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp());
-}            
-    
+}
+
 template<typename _Tp, int m, int n> static inline
 Matx<_Tp, m, n> operator * (int alpha, const Matx<_Tp, m, n>& a)
 {
     return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp());
-}        
+}
 
 template<typename _Tp, int m, int n> static inline
 Matx<_Tp, m, n> operator * (float alpha, const Matx<_Tp, m, n>& a)
 {
     return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp());
-}        
+}
 
 template<typename _Tp, int m, int n> static inline
 Matx<_Tp, m, n> operator * (double alpha, const Matx<_Tp, m, n>& a)
 {
     return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp());
-}            
-    
+}
+
 template<typename _Tp, int m, int n> static inline
 Matx<_Tp, m, n> operator - (const Matx<_Tp, m, n>& a)
 {
@@ -637,15 +637,15 @@ Matx<_Tp, m, n> operator * (const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b)
     return Matx<_Tp, m, n>(a, b, Matx_MatMulOp());
 }
 
-    
+
 template<typename _Tp, int m, int n> static inline
 Vec<_Tp, m> operator * (const Matx<_Tp, m, n>& a, const Vec<_Tp, n>& b)
 {
     Matx<_Tp, m, 1> c(a, b, Matx_MatMulOp());
     return reinterpret_cast<const Vec<_Tp, m>&>(c);
 }
-    
-    
+
+
 template<typename _Tp> static inline
 Point_<_Tp> operator * (const Matx<_Tp, 2, 2>& a, const Point_<_Tp>& b)
 {
@@ -653,13 +653,13 @@ Point_<_Tp> operator * (const Matx<_Tp, 2, 2>& a, const Point_<_Tp>& b)
     return Point_<_Tp>(tmp.val[0], tmp.val[1]);
 }
 
-    
+
 template<typename _Tp> static inline
 Point3_<_Tp> operator * (const Matx<_Tp, 3, 3>& a, const Point3_<_Tp>& b)
 {
     Matx<_Tp, 3, 1> tmp = a*Vec<_Tp,3>(b.x, b.y, b.z);
     return Point3_<_Tp>(tmp.val[0], tmp.val[1], tmp.val[2]);
-}    
+}
 
 
 template<typename _Tp> static inline
@@ -667,14 +667,14 @@ Point3_<_Tp> operator * (const Matx<_Tp, 3, 3>& a, const Point_<_Tp>& b)
 {
     Matx<_Tp, 3, 1> tmp = a*Vec<_Tp,3>(b.x, b.y, 1);
     return Point3_<_Tp>(tmp.val[0], tmp.val[1], tmp.val[2]);
-}    
+}
+
 
-    
 template<typename _Tp> static inline
 Matx<_Tp, 4, 1> operator * (const Matx<_Tp, 4, 4>& a, const Point3_<_Tp>& b)
 {
     return a*Matx<_Tp, 4, 1>(b.x, b.y, b.z, 1);
-}    
+}
 
 
 template<typename _Tp> static inline
@@ -684,7 +684,7 @@ Scalar operator * (const Matx<_Tp, 4, 4>& a, const Scalar& b)
     return reinterpret_cast<const Scalar&>(c);
 }
 
-    
+
 static inline
 Scalar operator * (const Matx<double, 4, 4>& a, const Scalar& b)
 {
@@ -692,18 +692,18 @@ Scalar operator * (const Matx<double, 4, 4>& a, const Scalar& b)
     return reinterpret_cast<const Scalar&>(c);
 }
 
-    
+
 template<typename _Tp, int m, int n> inline
 Matx<_Tp, m, n> Matx<_Tp, m, n>::mul(const Matx<_Tp, m, n>& a) const
 {
     return Matx<_Tp, m, n>(*this, a, Matx_MulOp());
 }
 
-    
+
 CV_EXPORTS int LU(float* A, size_t astep, int m, float* b, size_t bstep, int n);
 CV_EXPORTS int LU(double* A, size_t astep, int m, double* b, size_t bstep, int n);
 CV_EXPORTS bool Cholesky(float* A, size_t astep, int m, float* b, size_t bstep, int n);
-CV_EXPORTS bool Cholesky(double* A, size_t astep, int m, double* b, size_t bstep, int n);    
+CV_EXPORTS bool Cholesky(double* A, size_t astep, int m, double* b, size_t bstep, int n);
 
 
 template<typename _Tp, int m> struct CV_EXPORTS Matx_DetOp
@@ -719,7 +719,7 @@ template<typename _Tp, int m> struct CV_EXPORTS Matx_DetOp
         return p;
     }
 };
-    
+
 
 template<typename _Tp> struct CV_EXPORTS Matx_DetOp<_Tp, 1>
 {
@@ -748,13 +748,13 @@ template<typename _Tp> struct CV_EXPORTS Matx_DetOp<_Tp, 3>
             a(0,2)*(a(1,0)*a(2,1) - a(2,0)*a(1,1));
     }
 };
-    
+
 template<typename _Tp, int m> static inline
 double determinant(const Matx<_Tp, m, m>& a)
 {
-    return Matx_DetOp<_Tp, m>()(a);   
+    return Matx_DetOp<_Tp, m>()(a);
 }
-        
+
 
 template<typename _Tp, int m, int n> static inline
 double trace(const Matx<_Tp, m, n>& a)
@@ -763,9 +763,9 @@ double trace(const Matx<_Tp, m, n>& a)
     for( int i = 0; i < std::min(m, n); i++ )
         s += a(i,i);
     return s;
-}       
+}
+
 
-    
 template<typename _Tp, int m, int n> inline
 Matx<_Tp, n, m> Matx<_Tp, m, n>::t() const
 {
@@ -778,19 +778,19 @@ template<typename _Tp, int m> struct CV_EXPORTS Matx_FastInvOp
     bool operator()(const Matx<_Tp, m, m>& a, Matx<_Tp, m, m>& b, int method) const
     {
         Matx<_Tp, m, m> temp = a;
-        
+
         // assume that b is all 0's on input => make it a unity matrix
         for( int i = 0; i < m; i++ )
             b(i, i) = (_Tp)1;
-        
+
         if( method == DECOMP_CHOLESKY )
             return Cholesky(temp.val, m*sizeof(_Tp), m, b.val, m*sizeof(_Tp), m);
-        
+
         return LU(temp.val, m*sizeof(_Tp), m, b.val, m*sizeof(_Tp), m) != 0;
     }
 };
 
-    
+
 template<typename _Tp> struct CV_EXPORTS Matx_FastInvOp<_Tp, 2>
 {
     bool operator()(const Matx<_Tp, 2, 2>& a, Matx<_Tp, 2, 2>& b, int) const
@@ -807,7 +807,7 @@ template<typename _Tp> struct CV_EXPORTS Matx_FastInvOp<_Tp, 2>
     }
 };
 
-    
+
 template<typename _Tp> struct CV_EXPORTS Matx_FastInvOp<_Tp, 3>
 {
     bool operator()(const Matx<_Tp, 3, 3>& a, Matx<_Tp, 3, 3>& b, int) const
@@ -819,11 +819,11 @@ template<typename _Tp> struct CV_EXPORTS Matx_FastInvOp<_Tp, 3>
         b(0,0) = (a(1,1) * a(2,2) - a(1,2) * a(2,1)) * d;
         b(0,1) = (a(0,2) * a(2,1) - a(0,1) * a(2,2)) * d;
         b(0,2) = (a(0,1) * a(1,2) - a(0,2) * a(1,1)) * d;
-                                      
+
         b(1,0) = (a(1,2) * a(2,0) - a(1,0) * a(2,2)) * d;
         b(1,1) = (a(0,0) * a(2,2) - a(0,2) * a(2,0)) * d;
         b(1,2) = (a(0,2) * a(1,0) - a(0,0) * a(1,2)) * d;
-                                                                    
+
         b(2,0) = (a(1,0) * a(2,1) - a(1,1) * a(2,0)) * d;
         b(2,1) = (a(0,1) * a(2,0) - a(0,0) * a(2,1)) * d;
         b(2,2) = (a(0,0) * a(1,1) - a(0,1) * a(1,0)) * d;
@@ -831,7 +831,7 @@ template<typename _Tp> struct CV_EXPORTS Matx_FastInvOp<_Tp, 3>
     }
 };
 
-    
+
 template<typename _Tp, int m, int n> inline
 Matx<_Tp, n, m> Matx<_Tp, m, n>::inv(int method) const
 {
@@ -857,7 +857,7 @@ template<typename _Tp, int m, int n> struct CV_EXPORTS Matx_FastSolveOp
         x = b;
         if( method == DECOMP_CHOLESKY )
             return Cholesky(temp.val, m*sizeof(_Tp), m, x.val, n*sizeof(_Tp), n);
-        
+
         return LU(temp.val, m*sizeof(_Tp), m, x.val, n*sizeof(_Tp), n) != 0;
     }
 };
@@ -878,7 +878,7 @@ template<typename _Tp> struct CV_EXPORTS Matx_FastSolveOp<_Tp, 2, 1>
     }
 };
 
-    
+
 template<typename _Tp> struct CV_EXPORTS Matx_FastSolveOp<_Tp, 3, 1>
 {
     bool operator()(const Matx<_Tp, 3, 3>& a, const Matx<_Tp, 3, 1>& b,
@@ -891,19 +891,19 @@ template<typename _Tp> struct CV_EXPORTS Matx_FastSolveOp<_Tp, 3, 1>
         x(0) = d*(b(0)*(a(1,1)*a(2,2) - a(1,2)*a(2,1)) -
                 a(0,1)*(b(1)*a(2,2) - a(1,2)*b(2)) +
                 a(0,2)*(b(1)*a(2,1) - a(1,1)*b(2)));
-        
+
         x(1) = d*(a(0,0)*(b(1)*a(2,2) - a(1,2)*b(2)) -
                 b(0)*(a(1,0)*a(2,2) - a(1,2)*a(2,0)) +
                 a(0,2)*(a(1,0)*b(2) - b(1)*a(2,0)));
-        
+
         x(2) = d*(a(0,0)*(a(1,1)*b(2) - b(1)*a(2,1)) -
                 a(0,1)*(a(1,0)*b(2) - b(1)*a(2,0)) +
                 b(0)*(a(1,0)*a(2,1) - a(1,1)*a(2,0)));
         return true;
     }
 };
-                      
-    
+
+
 template<typename _Tp, int m, int n> template<int l> inline
 Matx<_Tp, n, l> Matx<_Tp, m, n>::solve(const Matx<_Tp, m, l>& rhs, int method) const
 {
@@ -920,13 +920,13 @@ Matx<_Tp, n, l> Matx<_Tp, m, n>::solve(const Matx<_Tp, m, l>& rhs, int method) c
     return ok ? x : Matx<_Tp, n, l>::zeros();
 }
 
-template<typename _Tp, int m, int n> inline    
+template<typename _Tp, int m, int n> inline
 Vec<_Tp, n> Matx<_Tp, m, n>::solve(const Vec<_Tp, m>& rhs, int method) const
 {
     Matx<_Tp, n, 1> x = solve(reinterpret_cast<const Matx<_Tp, m, 1>&>(rhs), method);
     return reinterpret_cast<Vec<_Tp, n>&>(x);
 }
-    
+
 template<typename _Tp, typename _AccTp> static inline
 _AccTp normL2Sqr(const _Tp* a, int n)
 {
@@ -974,8 +974,8 @@ _AccTp normInf(const _Tp* a, int n)
         s = std::max(s, (_AccTp)fast_abs(a[i]));
     return s;
 }
-    
-    
+
+
 template<typename _Tp, typename _AccTp> static inline
 _AccTp normL2Sqr(const _Tp* a, const _Tp* b, int n)
 {
@@ -1001,7 +1001,7 @@ CV_EXPORTS float normL1_(const float* a, const float* b, int n);
 CV_EXPORTS int normL1_(const uchar* a, const uchar* b, int n);
 CV_EXPORTS int normHamming(const uchar* a, const uchar* b, int n);
 CV_EXPORTS int normHamming(const uchar* a, const uchar* b, int n, int cellSize);
-    
+
 template<> inline float normL2Sqr(const float* a, const float* b, int n)
 {
     if( n >= 8 )
@@ -1015,7 +1015,7 @@ template<> inline float normL2Sqr(const float* a, const float* b, int n)
     return s;
 }
 
-    
+
 template<typename _Tp, typename _AccTp> static inline
 _AccTp normL1(const _Tp* a, const _Tp* b, int n)
 {
@@ -1052,7 +1052,7 @@ template<> inline float normL1(const float* a, const float* b, int n)
 template<> inline int normL1(const uchar* a, const uchar* b, int n)
 {
     return normL1_(a, b, n);
-}    
+}
 
 template<typename _Tp, typename _AccTp> static inline
 _AccTp normInf(const _Tp* a, const _Tp* b, int n)
@@ -1065,7 +1065,7 @@ _AccTp normInf(const _Tp* a, const _Tp* b, int n)
     }
     return s;
 }
-    
+
 
 template<typename _Tp, int m, int n> static inline
 double norm(const Matx<_Tp, m, n>& M)
@@ -1073,7 +1073,7 @@ double norm(const Matx<_Tp, m, n>& M)
     return std::sqrt(normL2Sqr<_Tp, double>(M.val, m*n));
 }
 
-    
+
 template<typename _Tp, int m, int n> static inline
 double norm(const Matx<_Tp, m, n>& M, int normType)
 {
@@ -1081,8 +1081,8 @@ double norm(const Matx<_Tp, m, n>& M, int normType)
         normType == NORM_L1 ? (double)normL1<_Tp, DataType<_Tp>::work_type>(M.val, m*n) :
         std::sqrt((double)normL2Sqr<_Tp, DataType<_Tp>::work_type>(M.val, m*n));
 }
-    
-    
+
+
 template<typename _Tp, int m, int n> static inline
 bool operator == (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b)
 {
@@ -1090,7 +1090,7 @@ bool operator == (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b)
         if( a.val[i] != b.val[i] ) return false;
     return true;
 }
-    
+
 template<typename _Tp, int m, int n> static inline
 bool operator != (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b)
 {
@@ -1123,7 +1123,7 @@ Matx<_Tp, m, n> MatxCommaInitializer<_Tp, m, n>::operator *() const
 {
     CV_DbgAssert( idx == n*m );
     return *dst;
-}    
+}
 
 /////////////////////////// short vector (Vec) /////////////////////////////
 
@@ -1175,11 +1175,11 @@ template<typename _Tp, int cn> inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2,
                                                         _Tp v8, _Tp v9)
     : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9)
 {}
-    
+
 template<typename _Tp, int cn> inline Vec<_Tp, cn>::Vec(const _Tp* values)
     : Matx<_Tp, cn, 1>(values)
 {}
-        
+
 
 template<typename _Tp, int cn> inline Vec<_Tp, cn>::Vec(const Vec<_Tp, cn>& m)
     : Matx<_Tp, cn, 1>(m.val)
@@ -1198,8 +1198,8 @@ Vec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_Sub
 template<typename _Tp, int cn> template<typename _T2> inline
 Vec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, _T2 alpha, Matx_ScaleOp op)
 : Matx<_Tp, cn, 1>(a, alpha, op)
-{}    
-        
+{}
+
 template<typename _Tp, int cn> inline Vec<_Tp, cn> Vec<_Tp, cn>::all(_Tp alpha)
 {
     Vec v;
@@ -1222,8 +1222,8 @@ template<typename _Tp> Vec<_Tp, 2> conjugate(const Vec<_Tp, 2>& v)
 template<typename _Tp> Vec<_Tp, 4> conjugate(const Vec<_Tp, 4>& v)
 {
     return Vec<_Tp, 4>(v[0], -v[1], -v[2], -v[3]);
-}    
-    
+}
+
 template<> inline Vec<float, 2> Vec<float, 2>::conj() const
 {
     return conjugate(*this);
@@ -1243,13 +1243,13 @@ template<> inline Vec<double, 4> Vec<double, 4>::conj() const
 {
     return conjugate(*this);
 }
-        
+
 template<typename _Tp, int cn> inline Vec<_Tp, cn> Vec<_Tp, cn>::cross(const Vec<_Tp, cn>& v) const
 {
     CV_Error(CV_StsError, "for arbitrary-size vector there is no cross-product defined");
     return Vec<_Tp, cn>();
 }
-    
+
 template<typename _Tp, int cn> template<typename T2>
 inline Vec<_Tp, cn>::operator Vec<T2, cn>() const
 {
@@ -1272,7 +1272,7 @@ template<typename _Tp, int cn> inline const _Tp& Vec<_Tp, cn>::operator [](int i
     CV_DbgAssert( (unsigned)i < (unsigned)cn );
     return this->val[i];
 }
-    
+
 template<typename _Tp, int cn> inline _Tp& Vec<_Tp, cn>::operator [](int i)
 {
     CV_DbgAssert( (unsigned)i < (unsigned)cn );
@@ -1289,15 +1289,15 @@ template<typename _Tp, int cn> inline _Tp& Vec<_Tp, cn>::operator ()(int i)
 {
     CV_DbgAssert( (unsigned)i < (unsigned)cn );
     return this->val[i];
-}    
-    
+}
+
 template<typename _Tp1, typename _Tp2, int cn> static inline Vec<_Tp1, cn>&
 operator += (Vec<_Tp1, cn>& a, const Vec<_Tp2, cn>& b)
 {
     for( int i = 0; i < cn; i++ )
         a.val[i] = saturate_cast<_Tp1>(a.val[i] + b.val[i]);
     return a;
-}    
+}
 
 template<typename _Tp1, typename _Tp2, int cn> static inline Vec<_Tp1, cn>&
 operator -= (Vec<_Tp1, cn>& a, const Vec<_Tp2, cn>& b)
@@ -1305,8 +1305,8 @@ operator -= (Vec<_Tp1, cn>& a, const Vec<_Tp2, cn>& b)
     for( int i = 0; i < cn; i++ )
         a.val[i] = saturate_cast<_Tp1>(a.val[i] - b.val[i]);
     return a;
-}        
-    
+}
+
 template<typename _Tp, int cn> static inline Vec<_Tp, cn>
 operator + (const Vec<_Tp, cn>& a, const Vec<_Tp, cn>& b)
 {
@@ -1334,7 +1334,7 @@ Vec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, float alpha)
         a[i] = saturate_cast<_Tp>(a[i]*alpha);
     return a;
 }
-    
+
 template<typename _Tp, int cn> static inline
 Vec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, double alpha)
 {
@@ -1351,7 +1351,7 @@ Vec<_Tp, cn>& operator /= (Vec<_Tp, cn>& a, int alpha)
         a[i] = saturate_cast<_Tp>(a[i]*ialpha);
     return a;
 }
-    
+
 template<typename _Tp, int cn> static inline
 Vec<_Tp, cn>& operator /= (Vec<_Tp, cn>& a, float alpha)
 {
@@ -1368,8 +1368,8 @@ Vec<_Tp, cn>& operator /= (Vec<_Tp, cn>& a, double alpha)
     for( int i = 0; i < cn; i++ )
         a[i] = saturate_cast<_Tp>(a[i]*ialpha);
     return a;
-}    
-    
+}
+
 template<typename _Tp, int cn> static inline Vec<_Tp, cn>
 operator * (const Vec<_Tp, cn>& a, int alpha)
 {
@@ -1404,7 +1404,7 @@ template<typename _Tp, int cn> static inline Vec<_Tp, cn>
 operator * (double alpha, const Vec<_Tp, cn>& a)
 {
     return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp());
-}    
+}
 
 template<typename _Tp, int cn> static inline Vec<_Tp, cn>
 operator / (const Vec<_Tp, cn>& a, int alpha)
@@ -1416,14 +1416,14 @@ template<typename _Tp, int cn> static inline Vec<_Tp, cn>
 operator / (const Vec<_Tp, cn>& a, float alpha)
 {
     return Vec<_Tp, cn>(a, 1.f/alpha, Matx_ScaleOp());
-}    
+}
 
 template<typename _Tp, int cn> static inline Vec<_Tp, cn>
 operator / (const Vec<_Tp, cn>& a, double alpha)
 {
     return Vec<_Tp, cn>(a, 1./alpha, Matx_ScaleOp());
-}        
-    
+}
+
 template<typename _Tp, int cn> static inline Vec<_Tp, cn>
 operator - (const Vec<_Tp, cn>& a)
 {
@@ -1439,13 +1439,13 @@ template<typename _Tp> inline Vec<_Tp, 4> operator * (const Vec<_Tp, 4>& v1, con
                        saturate_cast<_Tp>(v1[0]*v2[2] - v1[1]*v2[3] + v1[2]*v2[0] + v1[3]*v2[1]),
                        saturate_cast<_Tp>(v1[0]*v2[3] + v1[1]*v2[2] - v1[2]*v2[1] + v1[3]*v2[0]));
 }
-    
+
 template<typename _Tp> inline Vec<_Tp, 4>& operator *= (Vec<_Tp, 4>& v1, const Vec<_Tp, 4>& v2)
 {
     v1 = v1 * v2;
     return v1;
 }
-    
+
 template<> inline Vec<float, 3> Vec<float, 3>::cross(const Vec<float, 3>& v) const
 {
     return Vec<float,3>(val[1]*v.val[2] - val[2]*v.val[1],
@@ -1465,14 +1465,14 @@ template<typename _Tp, int cn> inline Vec<_Tp, cn> normalize(const Vec<_Tp, cn>&
     double nv = norm(v);
     return v * (nv ? 1./nv : 0.);
 }
-    
+
 template<typename _Tp, typename _T2, int cn> static inline
 VecCommaInitializer<_Tp, cn> operator << (const Vec<_Tp, cn>& vec, _T2 val)
 {
     VecCommaInitializer<_Tp, cn> commaInitializer((Vec<_Tp, cn>*)&vec);
     return (commaInitializer, val);
 }
-    
+
 template<typename _Tp, int cn> inline
 VecCommaInitializer<_Tp, cn>::VecCommaInitializer(Vec<_Tp, cn>* _vec)
     : MatxCommaInitializer<_Tp, cn, 1>(_vec)
@@ -1491,7 +1491,7 @@ Vec<_Tp, cn> VecCommaInitializer<_Tp, cn>::operator *() const
 {
     CV_DbgAssert( this->idx == cn );
     return *this->dst;
-}    
+}
 
 //////////////////////////////// Complex //////////////////////////////
 
@@ -1508,8 +1508,8 @@ bool operator == (const Complex<_Tp>& a, const Complex<_Tp>& b)
 
 template<typename _Tp> static inline
 bool operator != (const Complex<_Tp>& a, const Complex<_Tp>& b)
-{ return a.re != b.re || a.im != b.im; }    
-    
+{ return a.re != b.re || a.im != b.im; }
+
 template<typename _Tp> static inline
 Complex<_Tp> operator + (const Complex<_Tp>& a, const Complex<_Tp>& b)
 { return Complex<_Tp>( a.re + b.re, a.im + b.im ); }
@@ -1637,7 +1637,7 @@ template<typename _Tp> inline double Point_<_Tp>::ddot(const Point_& pt) const
 
 template<typename _Tp> inline double Point_<_Tp>::cross(const Point_& pt) const
 { return (double)x*pt.y - (double)y*pt.x; }
-    
+
 template<typename _Tp> static inline Point_<_Tp>&
 operator += (Point_<_Tp>& a, const Point_<_Tp>& b)
 {
@@ -1676,8 +1676,8 @@ operator *= (Point_<_Tp>& a, double b)
     a.x = saturate_cast<_Tp>(a.x*b);
     a.y = saturate_cast<_Tp>(a.y*b);
     return a;
-}    
-    
+}
+
 template<typename _Tp> static inline double norm(const Point_<_Tp>& pt)
 { return std::sqrt((double)pt.x*pt.x + (double)pt.y*pt.y); }
 
@@ -1701,7 +1701,7 @@ template<typename _Tp> static inline Point_<_Tp> operator * (const Point_<_Tp>&
 
 template<typename _Tp> static inline Point_<_Tp> operator * (int a, const Point_<_Tp>& b)
 { return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); }
-    
+
 template<typename _Tp> static inline Point_<_Tp> operator * (const Point_<_Tp>& a, float b)
 { return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); }
 
@@ -1712,8 +1712,8 @@ template<typename _Tp> static inline Point_<_Tp> operator * (const Point_<_Tp>&
 { return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); }
 
 template<typename _Tp> static inline Point_<_Tp> operator * (double a, const Point_<_Tp>& b)
-{ return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); }    
-    
+{ return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); }
+
 //////////////////////////////// 3D Point ////////////////////////////////
 
 template<typename _Tp> inline Point3_<_Tp>::Point3_() : x(0), y(0), z(0) {}
@@ -1740,7 +1740,7 @@ template<typename _Tp> inline _Tp Point3_<_Tp>::dot(const Point3_& pt) const
 { return saturate_cast<_Tp>(x*pt.x + y*pt.y + z*pt.z); }
 template<typename _Tp> inline double Point3_<_Tp>::ddot(const Point3_& pt) const
 { return (double)x*pt.x + (double)y*pt.y + (double)z*pt.z; }
-    
+
 template<typename _Tp> inline Point3_<_Tp> Point3_<_Tp>::cross(const Point3_<_Tp>& pt) const
 {
     return Point3_<_Tp>(y*pt.z - z*pt.y, z*pt.x - x*pt.z, x*pt.y - y*pt.x);
@@ -1754,7 +1754,7 @@ operator += (Point3_<_Tp>& a, const Point3_<_Tp>& b)
     a.z = saturate_cast<_Tp>(a.z + b.z);
     return a;
 }
-    
+
 template<typename _Tp> static inline Point3_<_Tp>&
 operator -= (Point3_<_Tp>& a, const Point3_<_Tp>& b)
 {
@@ -1762,8 +1762,8 @@ operator -= (Point3_<_Tp>& a, const Point3_<_Tp>& b)
     a.y = saturate_cast<_Tp>(a.y - b.y);
     a.z = saturate_cast<_Tp>(a.z - b.z);
     return a;
-}    
-    
+}
+
 template<typename _Tp> static inline Point3_<_Tp>&
 operator *= (Point3_<_Tp>& a, int b)
 {
@@ -1789,8 +1789,8 @@ operator *= (Point3_<_Tp>& a, double b)
     a.y = saturate_cast<_Tp>(a.y*b);
     a.z = saturate_cast<_Tp>(a.z*b);
     return a;
-}    
-    
+}
+
 template<typename _Tp> static inline double norm(const Point3_<_Tp>& pt)
 { return std::sqrt((double)pt.x*pt.x + (double)pt.y*pt.y + (double)pt.z*pt.z); }
 
@@ -1799,7 +1799,7 @@ template<typename _Tp> static inline bool operator == (const Point3_<_Tp>& a, co
 
 template<typename _Tp> static inline bool operator != (const Point3_<_Tp>& a, const Point3_<_Tp>& b)
 { return a.x != b.x || a.y != b.y || a.z != b.z; }
-    
+
 template<typename _Tp> static inline Point3_<_Tp> operator + (const Point3_<_Tp>& a, const Point3_<_Tp>& b)
 { return Point3_<_Tp>( saturate_cast<_Tp>(a.x + b.x),
                       saturate_cast<_Tp>(a.y + b.y),
@@ -1844,7 +1844,7 @@ template<typename _Tp> static inline Point3_<_Tp> operator * (double a, const Po
 { return Point3_<_Tp>( saturate_cast<_Tp>(b.x*a),
                       saturate_cast<_Tp>(b.y*a),
                       saturate_cast<_Tp>(b.z*a) ); }
-    
+
 //////////////////////////////// Size ////////////////////////////////
 
 template<typename _Tp> inline Size_<_Tp>::Size_()
@@ -1958,8 +1958,8 @@ template<typename _Tp> static inline bool operator == (const Rect_<_Tp>& a, cons
 template<typename _Tp> static inline bool operator != (const Rect_<_Tp>& a, const Rect_<_Tp>& b)
 {
     return a.x != b.x || a.y != b.y || a.width != b.width || a.height != b.height;
-}    
-    
+}
+
 template<typename _Tp> static inline Rect_<_Tp> operator + (const Rect_<_Tp>& a, const Point_<_Tp>& b)
 {
     return Rect_<_Tp>( a.x + b.x, a.y + b.y, a.width, a.height );
@@ -2002,7 +2002,7 @@ inline RotatedRect::operator CvBox2D() const
     CvBox2D box; box.center = center; box.size = size; box.angle = angle;
     return box;
 }
-    
+
 //////////////////////////////// Scalar_ ///////////////////////////////
 
 template<typename _Tp> inline Scalar_<_Tp>::Scalar_()
@@ -2117,7 +2117,7 @@ template<typename _Tp> static inline Scalar_<_Tp> operator - (const Scalar_<_Tp>
                       saturate_cast<_Tp>(-a.val[2]), saturate_cast<_Tp>(-a.val[3]));
 }
 
-    
+
 template<typename _Tp> static inline Scalar_<_Tp>
 operator * (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b)
 {
@@ -2126,14 +2126,14 @@ operator * (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b)
                         saturate_cast<_Tp>(a[0]*b[2] - a[1]*b[3] + a[2]*b[0] + a[3]*b[1]),
                         saturate_cast<_Tp>(a[0]*b[3] + a[1]*b[2] - a[2]*b[1] + a[3]*b[0]));
 }
-    
+
 template<typename _Tp> static inline Scalar_<_Tp>&
 operator *= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b)
 {
     a = a*b;
     return a;
-}    
-    
+}
+
 template<typename _Tp> inline Scalar_<_Tp> Scalar_<_Tp>::conj() const
 {
     return Scalar_<_Tp>(saturate_cast<_Tp>(this->val[0]),
@@ -2146,7 +2146,7 @@ template<typename _Tp> inline bool Scalar_<_Tp>::isReal() const
 {
     return this->val[1] == 0 && this->val[2] == 0 && this->val[3] == 0;
 }
-    
+
 template<typename _Tp> static inline
 Scalar_<_Tp> operator / (const Scalar_<_Tp>& a, _Tp alpha)
 {
@@ -2154,36 +2154,36 @@ Scalar_<_Tp> operator / (const Scalar_<_Tp>& a, _Tp alpha)
                         saturate_cast<_Tp>(a.val[1] / alpha),
                         saturate_cast<_Tp>(a.val[2] / alpha),
                         saturate_cast<_Tp>(a.val[3] / alpha));
-}    
+}
 
 template<typename _Tp> static inline
 Scalar_<float> operator / (const Scalar_<float>& a, float alpha)
 {
     float s = 1/alpha;
     return Scalar_<float>(a.val[0]*s, a.val[1]*s, a.val[2]*s, a.val[3]*s);
-}        
+}
 
 template<typename _Tp> static inline
 Scalar_<double> operator / (const Scalar_<double>& a, double alpha)
 {
     double s = 1/alpha;
     return Scalar_<double>(a.val[0]*s, a.val[1]*s, a.val[2]*s, a.val[3]*s);
-}            
-    
+}
+
 template<typename _Tp> static inline
 Scalar_<_Tp>& operator /= (Scalar_<_Tp>& a, _Tp alpha)
 {
     a = a/alpha;
     return a;
 }
-    
+
 template<typename _Tp> static inline
 Scalar_<_Tp> operator / (_Tp a, const Scalar_<_Tp>& b)
 {
     _Tp s = a/(b[0]*b[0] + b[1]*b[1] + b[2]*b[2] + b[3]*b[3]);
     return b.conj()*s;
-}    
-    
+}
+
 template<typename _Tp> static inline
 Scalar_<_Tp> operator / (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b)
 {
@@ -2196,7 +2196,7 @@ Scalar_<_Tp>& operator /= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b)
     a = a/b;
     return a;
 }
-    
+
 //////////////////////////////// Range /////////////////////////////////
 
 inline Range::Range() : start(0), end(0) {}
@@ -2251,8 +2251,8 @@ static inline Range operator - (const Range& r1, int delta)
 inline Range::operator CvSlice() const
 { return *this != Range::all() ? cvSlice(start, end) : CV_WHOLE_SEQ; }
 
-    
-    
+
+
 //////////////////////////////// Vector ////////////////////////////////
 
 // template vector class. It is similar to STL's vector,
@@ -2268,7 +2268,7 @@ public:
     typedef const _Tp* const_iterator;
     typedef _Tp& reference;
     typedef const _Tp& const_reference;
-    
+
     struct CV_EXPORTS Hdr
     {
         Hdr() : data(0), datastart(0), refcount(0), size(0), capacity(0) {};
@@ -2278,7 +2278,7 @@ public:
         size_t size;
         size_t capacity;
     };
-    
+
     Vector() {}
     Vector(size_t _size)  { resize(_size); }
     Vector(size_t _size, const _Tp& val)
@@ -2289,15 +2289,15 @@ public:
     }
     Vector(_Tp* _data, size_t _size, bool _copyData=false)
     { set(_data, _size, _copyData); }
-    
+
     template<int n> Vector(const Vec<_Tp, n>& vec)
-    { set((_Tp*)&vec.val[0], n, true); }    
-    
+    { set((_Tp*)&vec.val[0], n, true); }
+
     Vector(const std::vector<_Tp>& vec, bool _copyData=false)
-    { set(!vec.empty() ? (_Tp*)&vec[0] : 0, vec.size(), _copyData); }    
-    
+    { set(!vec.empty() ? (_Tp*)&vec[0] : 0, vec.size(), _copyData); }
+
     Vector(const Vector& d) { *this = d; }
-    
+
     Vector(const Vector& d, const Range& r_)
     {
         Range r = r_ == Range::all() ? Range(0, d.size()) : r_;
@@ -2313,7 +2313,7 @@ public:
             hdr.capacity = hdr.size = r.size();
         }
     }
-    
+
     Vector<_Tp>& operator = (const Vector& d)
     {
         if( this != &d )
@@ -2325,12 +2325,12 @@ public:
         }
         return *this;
     }
-    
+
     ~Vector()  { release(); }
-    
+
     Vector<_Tp> clone() const
     { return hdr.data ? Vector<_Tp>(hdr.data, hdr.size, true) : Vector<_Tp>(); }
-    
+
     void copyTo(Vector<_Tp>& vec) const
     {
         size_t i, sz = size();
@@ -2340,7 +2340,7 @@ public:
         for( i = 0; i < sz; i++ )
             dst[i] = src[i];
     }
-    
+
     void copyTo(std::vector<_Tp>& vec) const
     {
         size_t i, sz = size();
@@ -2350,10 +2350,10 @@ public:
         for( i = 0; i < sz; i++ )
             dst[i] = src[i];
     }
-    
+
     operator CvMat() const
     { return cvMat((int)size(), 1, type(), (void*)hdr.data); }
-    
+
     _Tp& operator [] (size_t i) { CV_DbgAssert( i < size() ); return hdr.data[i]; }
     const _Tp& operator [] (size_t i) const { CV_DbgAssert( i < size() ); return hdr.data[i]; }
     Vector operator() (const Range& r) const { return Vector(*this, r); }
@@ -2361,12 +2361,12 @@ public:
     const _Tp& back() const { CV_DbgAssert(!empty()); return hdr.data[hdr.size-1]; }
     _Tp& front() { CV_DbgAssert(!empty()); return hdr.data[0]; }
     const _Tp& front() const { CV_DbgAssert(!empty()); return hdr.data[0]; }
-    
+
     _Tp* begin() { return hdr.data; }
     _Tp* end() { return hdr.data + hdr.size; }
     const _Tp* begin() const { return hdr.data; }
     const _Tp* end() const { return hdr.data + hdr.size; }
-    
+
     void addref() { if( hdr.refcount ) CV_XADD(hdr.refcount, 1); }
     void release()
     {
@@ -2377,7 +2377,7 @@ public:
         }
         hdr = Hdr();
     }
-    
+
     void set(_Tp* _data, size_t _size, bool _copyData=false)
     {
         if( !_copyData )
@@ -2395,7 +2395,7 @@ public:
             hdr.size = _size;
         }
     }
-    
+
     void reserve(size_t newCapacity)
     {
         _Tp* newData;
@@ -2414,7 +2414,7 @@ public:
         hdr.size = oldSize;
         hdr.refcount = newRefcount;
     }
-    
+
     void resize(size_t newSize)
     {
         size_t i;
@@ -2427,7 +2427,7 @@ public:
             hdr.data[i] = _Tp();
         hdr.size = newSize;
     }
-    
+
     Vector<_Tp>& push_back(const _Tp& elem)
     {
         if( hdr.size == hdr.capacity )
@@ -2435,25 +2435,25 @@ public:
         hdr.data[hdr.size++] = elem;
         return *this;
     }
-    
+
     Vector<_Tp>& pop_back()
     {
         if( hdr.size > 0 )
             --hdr.size;
         return *this;
     }
-    
+
     size_t size() const { return hdr.size; }
     size_t capacity() const { return hdr.capacity; }
     bool empty() const { return hdr.size == 0; }
     void clear() { resize(0); }
     int type() const { return DataType<_Tp>::type; }
-    
+
 protected:
     Hdr hdr;
-};    
+};
+
 
-    
 template<typename _Tp> inline typename DataType<_Tp>::work_type
 dot(const Vector<_Tp>& v1, const Vector<_Tp>& v2)
 {
@@ -2475,7 +2475,7 @@ dot(const Vector<_Tp>& v1, const Vector<_Tp>& v2)
     }
     return s;
 }
-    
+
 // Multiply-with-Carry RNG
 inline RNG::RNG() { state = 0xffffffff; }
 inline RNG::RNG(uint64 _state) { state = _state ? _state : 0xffffffff; }
@@ -2533,7 +2533,7 @@ inline Point LineIterator::pos() const
     p.x = (int)(((ptr - ptr0) - p.y*step)/elemSize);
     return p;
 }
-    
+
 /////////////////////////////// AutoBuffer ////////////////////////////////////////
 
 template<typename _Tp, size_t fixed_size> inline AutoBuffer<_Tp, fixed_size>::AutoBuffer()
@@ -2653,7 +2653,7 @@ template<typename _Tp> template<typename _Tp2> inline Ptr<_Tp2> Ptr<_Tp>::ptr()
     p.refcount = refcount;
     return p;
 }
-    
+
 template<typename _Tp> template<typename _Tp2> inline const Ptr<_Tp2> Ptr<_Tp>::ptr() const
 {
     Ptr<_Tp2> p;
@@ -2665,7 +2665,7 @@ template<typename _Tp> template<typename _Tp2> inline const Ptr<_Tp2> Ptr<_Tp>::
     p.refcount = refcount;
     return p;
 }
-    
+
 //// specializied implementations of Ptr::delete_obj() for classic OpenCV types
 
 template<> CV_EXPORTS void Ptr<CvMat>::delete_obj();
@@ -2674,7 +2674,7 @@ template<> CV_EXPORTS void Ptr<CvMatND>::delete_obj();
 template<> CV_EXPORTS void Ptr<CvSparseMat>::delete_obj();
 template<> CV_EXPORTS void Ptr<CvMemStorage>::delete_obj();
 template<> CV_EXPORTS void Ptr<CvFileStorage>::delete_obj();
-    
+
 //////////////////////////////////////// XML & YAML I/O ////////////////////////////////////
 
 CV_EXPORTS_W void write( FileStorage& fs, const string& name, int value );
@@ -2870,11 +2870,11 @@ template<typename _Tp> static inline void write( FileStorage& fs, const string&
 {
     WriteStructContext ws(fs, name, CV_NODE_SEQ+(DataType<_Tp>::fmt != 0 ? CV_NODE_FLOW : 0));
     write(fs, vec);
-}    
-    
+}
+
 CV_EXPORTS_W void write( FileStorage& fs, const string& name, const Mat& value );
 CV_EXPORTS void write( FileStorage& fs, const string& name, const SparseMat& value );
-    
+
 template<typename _Tp> static inline FileStorage& operator << (FileStorage& fs, const _Tp& value)
 {
     if( !fs.isOpened() )
@@ -2923,7 +2923,7 @@ static inline void read(const FileNode& node, int& value, int default_value)
     CV_NODE_IS_INT(node.node->tag) ? node.node->data.i :
     CV_NODE_IS_REAL(node.node->tag) ? cvRound(node.node->data.f) : 0x7fffffff;
 }
-    
+
 static inline void read(const FileNode& node, bool& value, bool default_value)
 {
     int temp; read(node, temp, (int)default_value);
@@ -2953,7 +2953,7 @@ static inline void read(const FileNode& node, short& value, short default_value)
     int temp; read(node, temp, (int)default_value);
     value = saturate_cast<short>(temp);
 }
-    
+
 static inline void read(const FileNode& node, float& value, float default_value)
 {
     value = !node.node ? default_value :
@@ -2975,7 +2975,7 @@ static inline void read(const FileNode& node, string& value, const string& defau
 
 CV_EXPORTS_W void read(const FileNode& node, Mat& mat, const Mat& default_mat=Mat() );
 CV_EXPORTS void read(const FileNode& node, SparseMat& mat, const SparseMat& default_mat=SparseMat() );
-    
+
 inline FileNode::operator int() const
 {
     int value;
@@ -3019,7 +3019,7 @@ public:
     }
     FileNodeIterator* it;
 };
-    
+
 template<typename _Tp> class CV_EXPORTS VecReaderProxy<_Tp,1>
 {
 public:
@@ -3055,7 +3055,7 @@ read( const FileNode& node, vector<_Tp>& vec, const vector<_Tp>& default_value=v
         read( it, vec );
     }
 }
-    
+
 inline FileNodeIterator FileNode::begin() const
 {
     return FileNodeIterator(fs, node);
@@ -3459,7 +3459,7 @@ partition( const vector<_Tp>& _vec, vector<int>& labels,
     return nclasses;
 }
 
-    
+
 //////////////////////////////////////////////////////////////////////////////
 
 // bridge C++ => C Seq API
@@ -3473,7 +3473,7 @@ CV_EXPORTS void  seqRemove( CvSeq* seq, int index );
 CV_EXPORTS void  clearSeq( CvSeq* seq );
 CV_EXPORTS schar*  getSeqElem( const CvSeq* seq, int index );
 CV_EXPORTS void  seqRemoveSlice( CvSeq* seq, CvSlice slice );
-CV_EXPORTS void  seqInsertSlice( CvSeq* seq, int before_index, const CvArr* from_arr );    
+CV_EXPORTS void  seqInsertSlice( CvSeq* seq, int before_index, const CvArr* from_arr );
 
 template<typename _Tp> inline Seq<_Tp>::Seq() : seq(0) {}
 template<typename _Tp> inline Seq<_Tp>::Seq( const CvSeq* _seq ) : seq((CvSeq*)_seq)
@@ -3528,8 +3528,8 @@ template<typename _Tp> inline void Seq<_Tp>::push_back(const _Tp* elem, size_t c
 { cvSeqPushMulti(seq, elem, (int)count, 0); }
 
 template<typename _Tp> inline void Seq<_Tp>::push_front(const _Tp* elem, size_t count)
-{ cvSeqPushMulti(seq, elem, (int)count, 1); }    
-    
+{ cvSeqPushMulti(seq, elem, (int)count, 1); }
+
 template<typename _Tp> inline _Tp& Seq<_Tp>::back()
 { return *(_Tp*)getSeqElem(seq, -1); }
 
@@ -3558,23 +3558,23 @@ template<typename _Tp> inline void Seq<_Tp>::pop_back(_Tp* elem, size_t count)
 { seqPopMulti(seq, elem, (int)count, 0); }
 
 template<typename _Tp> inline void Seq<_Tp>::pop_front(_Tp* elem, size_t count)
-{ seqPopMulti(seq, elem, (int)count, 1); }    
+{ seqPopMulti(seq, elem, (int)count, 1); }
 
 template<typename _Tp> inline void Seq<_Tp>::insert(int idx, const _Tp& elem)
 { seqInsert(seq, idx, &elem); }
-    
+
 template<typename _Tp> inline void Seq<_Tp>::insert(int idx, const _Tp* elems, size_t count)
 {
     CvMat m = cvMat(1, count, DataType<_Tp>::type, elems);
     seqInsertSlice(seq, idx, &m);
 }
-    
+
 template<typename _Tp> inline void Seq<_Tp>::remove(int idx)
 { seqRemove(seq, idx); }
-    
+
 template<typename _Tp> inline void Seq<_Tp>::remove(const Range& r)
 { seqRemoveSlice(seq, r); }
-    
+
 template<typename _Tp> inline void Seq<_Tp>::copyTo(vector<_Tp>& vec, const Range& range) const
 {
     size_t len = !seq ? 0 : range == Range::all() ? seq->total : range.end - range.start;
@@ -3716,7 +3716,7 @@ public:
         delete obj;
         return 0;
     }
-    
+
     static void write(CvFileStorage* _fs, const char* name, const void* ptr, CvAttrList)
     {
         if(ptr && _fs)
@@ -3726,7 +3726,7 @@ public:
             ((const _ClsName*)ptr)->write(fs, string(name));
         }
     }
-    
+
     static void* clone(const void* ptr)
     {
         if(!ptr)
@@ -3735,7 +3735,7 @@ public:
     }
 };
 
-    
+
 class CV_EXPORTS Formatter
 {
 public:
@@ -3759,7 +3759,7 @@ struct CV_EXPORTS Formatted
     vector<int> params;
 };
 
-    
+
 /** Writes a point to an output stream in Matlab notation
  */
 template<typename _Tp> inline std::ostream& operator<<(std::ostream& out, const Point_<_Tp>& p)
@@ -3774,7 +3774,7 @@ template<typename _Tp> inline std::ostream& operator<<(std::ostream& out, const
 {
     out << "[" << p.x << ", " << p.y << ", " << p.z << "]";
     return out;
-}        
+}
 
 static inline Formatted format(const Mat& mtx, const char* fmt,
                                const vector<int>& params=vector<int>())
@@ -3800,7 +3800,7 @@ template<typename _Tp> static inline Formatted format(const vector<Point3_<_Tp>
  Mat my_mat = Mat::eye(3,3,CV_32F);
  std::cout << my_mat;
  @endverbatim
- */    
+ */
 static inline std::ostream& operator << (std::ostream& out, const Mat& mtx)
 {
     Formatter::get()->write(out, mtx);
@@ -3813,7 +3813,7 @@ static inline std::ostream& operator << (std::ostream& out, const Mat& mtx)
  Mat my_mat = Mat::eye(3,3,CV_32F);
  std::cout << my_mat;
  @endverbatim
- */    
+ */
 static inline std::ostream& operator << (std::ostream& out, const Formatted& fmtd)
 {
     fmtd.fmt->write(out, fmtd.mtx);
@@ -3835,13 +3835,13 @@ template<typename _Tp> static inline std::ostream& operator << (std::ostream& ou
     Formatter::get()->write(out, Mat(vec));
     return out;
 }
-    
+
 
 template<typename _Tp> inline Ptr<_Tp> Algorithm::create(const string& name)
 {
     return _create(name).ptr<_Tp>();
 }
-    
+
 template<typename _Tp> inline typename ParamType<_Tp>::member_type Algorithm::get(const string& name) const
 {
     typename ParamType<_Tp>::member_type value;
@@ -3854,8 +3854,8 @@ template<typename _Tp> inline typename ParamType<_Tp>::member_type Algorithm::ge
     typename ParamType<_Tp>::member_type value;
     info()->get(this, name, ParamType<_Tp>::type, &value);
     return value;
-}    
-    
+}
+
 }
 
 #endif // __cplusplus
index e11f096..6c48360 100644 (file)
 #ifndef __OPENCV_CORE_TYPES_H__
 #define __OPENCV_CORE_TYPES_H__
 
-#if !defined _CRT_SECURE_NO_DEPRECATE && _MSC_VER > 1300
-#define _CRT_SECURE_NO_DEPRECATE /* to avoid multiple Visual Studio 2005 warnings */
+#if !defined _CRT_SECURE_NO_DEPRECATE && defined _MSC_VER
+#  if _MSC_VER > 1300
+#    define _CRT_SECURE_NO_DEPRECATE /* to avoid multiple Visual Studio 2005 warnings */
+#  endif
 #endif
 
 
 #ifndef SKIP_INCLUDES
-  #include <assert.h>
-  #include <stdlib.h>
-  #include <string.h>
-  #include <float.h>
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <float.h>
 
 #if !defined _MSC_VER && !defined __BORLANDC__
-  #include <stdint.h>
+#  include <stdint.h>
+#endif
+
+#if defined __ICL
+#  define CV_ICC   __ICL
+#elif defined __ICC
+#  define CV_ICC   __ICC
+#elif defined __ECL
+#  define CV_ICC   __ECL
+#elif defined __ECC
+#  define CV_ICC   __ECC
+#elif defined __INTEL_COMPILER
+#  define CV_ICC   __INTEL_COMPILER
+#endif
+
+#if defined CV_ICC && !defined CV_ENABLE_UNROLLED
+#  define CV_ENABLE_UNROLLED 0
+#else
+#  define CV_ENABLE_UNROLLED 1
+#endif
+
+#if (defined _M_X64 && _MSC_VER >= 1400) || (__GNUC__ >= 4 && defined __x86_64__)
+#  if defined WIN32
+#    include <intrin.h>
+#  endif
+#  if __SSE2__ || !defined __GNUC__
+#    include <emmintrin.h>
+#  endif
+#endif
+
+#if defined __BORLANDC__
+#  include <fastmath.h>
+#else
+#  include <math.h>
+#endif
+
+#ifdef HAVE_IPL
+#  ifndef __IPL_H__
+#    if defined WIN32 || defined _WIN32
+#      include <ipl.h>
+#    else
+#      include <ipl/ipl.h>
+#    endif
+#  endif
+#elif defined __IPL_H__
+#  define HAVE_IPL
 #endif
 
-  #if defined __ICL
-    #define CV_ICC   __ICL
-  #elif defined __ICC
-    #define CV_ICC   __ICC
-  #elif defined __ECL
-    #define CV_ICC   __ECL
-  #elif defined __ECC
-    #define CV_ICC   __ECC
-  #elif defined __INTEL_COMPILER
-    #define CV_ICC   __INTEL_COMPILER
-  #endif
-
-  #if (_MSC_VER >= 1400 && defined _M_X64) || (__GNUC__ >= 4 && defined __x86_64__)
-    #if defined WIN32
-      #include <intrin.h>
-    #endif
-    #if __SSE2__ || !defined __GNUC__
-      #include <emmintrin.h>
-    #endif
-  #endif
-
-  #if defined __BORLANDC__
-    #include <fastmath.h>
-  #else
-    #include <math.h>
-  #endif
-
-  #ifdef HAVE_IPL
-      #ifndef __IPL_H__
-          #if defined WIN32 || defined _WIN32
-              #include <ipl.h>
-          #else
-              #include <ipl/ipl.h>
-          #endif
-      #endif
-  #elif defined __IPL_H__
-      #define HAVE_IPL
-  #endif
 #endif // SKIP_INCLUDES
 
 #if defined WIN32 || defined _WIN32
-    #define CV_CDECL __cdecl
-    #define CV_STDCALL __stdcall
+#  define CV_CDECL __cdecl
+#  define CV_STDCALL __stdcall
 #else
-    #define CV_CDECL
-    #define CV_STDCALL
+#  define CV_CDECL
+#  define CV_STDCALL
 #endif
 
 #ifndef CV_EXTERN_C
-    #ifdef __cplusplus
-        #define CV_EXTERN_C extern "C"
-        #define CV_DEFAULT(val) = val
-    #else
-        #define CV_EXTERN_C
-        #define CV_DEFAULT(val)
-    #endif
+#  ifdef __cplusplus
+#    define CV_EXTERN_C extern "C"
+#    define CV_DEFAULT(val) = val
+#  else
+#    define CV_EXTERN_C
+#    define CV_DEFAULT(val)
+#  endif
 #endif
 
 #ifndef CV_EXTERN_C_FUNCPTR
-    #ifdef __cplusplus
-        #define CV_EXTERN_C_FUNCPTR(x) extern "C" { typedef x; }
-    #else
-        #define CV_EXTERN_C_FUNCPTR(x) typedef x
-    #endif
+#  ifdef __cplusplus
+#    define CV_EXTERN_C_FUNCPTR(x) extern "C" { typedef x; }
+#  else
+#    define CV_EXTERN_C_FUNCPTR(x) typedef x
+#  endif
 #endif
 
 #ifndef CV_INLINE
-#if defined __cplusplus
-    #define CV_INLINE inline
-#elif (defined WIN32 || defined _WIN32 || defined WINCE) && !defined __GNUC__
-    #define CV_INLINE __inline
-#else
-    #define CV_INLINE static
-#endif
+#  if defined __cplusplus
+#    define CV_INLINE inline
+#  elif (defined WIN32 || defined _WIN32 || defined WINCE) && !defined __GNUC__
+#    define CV_INLINE __inline
+#  else
+#    define CV_INLINE static
+#  endif
 #endif /* CV_INLINE */
 
 #if (defined WIN32 || defined _WIN32 || defined WINCE) && defined CVAPI_EXPORTS
-    #define CV_EXPORTS __declspec(dllexport)
+#  define CV_EXPORTS __declspec(dllexport)
 #else
-    #define CV_EXPORTS
+#  define CV_EXPORTS
 #endif
 
 #ifndef CVAPI
-    #define CVAPI(rettype) CV_EXTERN_C CV_EXPORTS rettype CV_CDECL
+#  define CVAPI(rettype) CV_EXTERN_C CV_EXPORTS rettype CV_CDECL
 #endif
 
 #if defined _MSC_VER || defined __BORLANDC__
-typedef __int64 int64;
-typedef unsigned __int64 uint64;
-#define CV_BIG_INT(n)   n##I64
-#define CV_BIG_UINT(n)  n##UI64
+   typedef __int64 int64;
+   typedef unsigned __int64 uint64;
+#  define CV_BIG_INT(n)   n##I64
+#  define CV_BIG_UINT(n)  n##UI64
 #else
-typedef int64_t int64;
-typedef uint64_t uint64;
-#define CV_BIG_INT(n)   n##LL
-#define CV_BIG_UINT(n)  n##ULL
+   typedef int64_t int64;
+   typedef uint64_t uint64;
+#  define CV_BIG_INT(n)   n##LL
+#  define CV_BIG_UINT(n)  n##ULL
 #endif
 
 #ifndef HAVE_IPL
-typedef unsigned char uchar;
-typedef unsigned short ushort;
+   typedef unsigned char uchar;
+   typedef unsigned short ushort;
 #endif
 
 typedef signed char schar;
@@ -203,7 +213,7 @@ Cv64suf;
 
 typedef int CVStatus;
 
-enum {    
+enum {
  CV_StsOk=                       0,  /* everithing is ok                */
  CV_StsBackTrace=               -1,  /* pseudo error for back trace     */
  CV_StsError=                   -2,  /* unknown /unspecified error      */
@@ -241,8 +251,8 @@ enum {
  CV_StsInplaceNotSupported=    -203, /* in-place operation is not supported */
  CV_StsObjectNotFound=         -204, /* request can't be completed */
  CV_StsUnmatchedFormats=       -205, /* formats of input/output arrays differ */
- CV_StsBadFlag=                -206, /* flag is wrong or not supported */  
- CV_StsBadPoint=               -207, /* bad CvPoint */ 
+ CV_StsBadFlag=                -206, /* flag is wrong or not supported */
+ CV_StsBadPoint=               -207, /* bad CvPoint */
  CV_StsBadMask=                -208, /* bad format of mask (neither 8uC1 nor 8sC1)*/
  CV_StsUnmatchedSizes=         -209, /* sizes of input/output structures do not match */
  CV_StsUnsupportedFormat=      -210, /* the data format/type is not supported by the function*/
@@ -250,8 +260,8 @@ enum {
  CV_StsParseError=             -212, /* invalid syntax/structure of the parsed file */
  CV_StsNotImplemented=         -213, /* the requested function/feature is not implemented */
  CV_StsBadMemBlock=            -214, /* an allocated block has been corrupted */
- CV_StsAssert=                 -215, /* assertion failed */    
- CV_GpuNotSupported=           -216,  
+ CV_StsAssert=                 -215, /* assertion failed */
+ CV_GpuNotSupported=           -216,
  CV_GpuApiCallError=           -217,
  CV_OpenGlNotSupported=        -218,
  CV_OpenGlApiCallError=        -219
@@ -262,7 +272,7 @@ enum {
 \****************************************************************************************/
 
 #ifdef HAVE_TEGRA_OPTIMIZATION
-# include "tegra_round.hpp"
+#  include "tegra_round.hpp"
 #endif
 
 #define CV_PI   3.1415926535897932384626433832795
@@ -271,11 +281,11 @@ enum {
 #define CV_SWAP(a,b,t) ((t) = (a), (a) = (b), (b) = (t))
 
 #ifndef MIN
-#define MIN(a,b)  ((a) > (b) ? (b) : (a))
+#  define MIN(a,b)  ((a) > (b) ? (b) : (a))
 #endif
 
 #ifndef MAX
-#define MAX(a,b)  ((a) < (b) ? (b) : (a))
+#  define MAX(a,b)  ((a) < (b) ? (b) : (a))
 #endif
 
 /* min & max without jumps */
@@ -285,9 +295,9 @@ enum {
 
 /* absolute value without jumps */
 #ifndef __cplusplus
-#define  CV_IABS(a)     (((a) ^ ((a) < 0 ? -1 : 0)) - ((a) < 0 ? -1 : 0))
+#  define  CV_IABS(a)     (((a) ^ ((a) < 0 ? -1 : 0)) - ((a) < 0 ? -1 : 0))
 #else
-#define  CV_IABS(a)     abs(a)
+#  define  CV_IABS(a)     abs(a)
 #endif
 #define  CV_CMP(a,b)    (((a) > (b)) - ((a) < (b)))
 #define  CV_SIGN(a)     CV_CMP((a),0)
@@ -306,11 +316,11 @@ CV_INLINE  int  cvRound( double value )
     }
     return t;
 #elif defined HAVE_LRINT || defined CV_ICC || defined __GNUC__
-# ifdef HAVE_TEGRA_OPTIMIZATION
+#  ifdef HAVE_TEGRA_OPTIMIZATION
     TEGRA_ROUND(value);
-# else
+#  else
     return (int)lrint(value);
-# endif
+#  endif
 #else
     // while this is not IEEE754-compliant rounding, it's usually a good enough approximation
     return (int)(value + (value >= 0 ? 0.5 : -0.5));
@@ -318,7 +328,7 @@ CV_INLINE  int  cvRound( double value )
 }
 
 #if defined __SSE2__ || (defined _M_IX86_FP && 2 == _M_IX86_FP)
-#include "emmintrin.h"
+#  include "emmintrin.h"
 #endif
 
 CV_INLINE  int  cvFloor( double value )
@@ -1886,6 +1896,6 @@ typedef struct CvModuleInfo
 }
 CvModuleInfo;
 
-#endif /*_CXCORE_TYPES_H_*/
+#endif /*__OPENCV_CORE_TYPES_H__*/
 
 /* End of file. */
index 901f889..8e56cbc 100644 (file)
@@ -3,7 +3,7 @@
 
 #include "opencv2/ts/ts.hpp"
 
-#if GTEST_CREATE_SHARED_LIBRARY
+#ifdef GTEST_CREATE_SHARED_LIBRARY
 #error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
 #endif
 
index 418cb24..79e849e 100644 (file)
@@ -28,11 +28,11 @@ PERF_TEST_P(Size_MatType, mean, TYPICAL_MATS)
 
     Mat src(sz, type);
     Scalar s;
-    
+
     declare.in(src, WARMUP_RNG).out(s);
-    
+
     TEST_CYCLE() s = mean(src);
-    
+
     SANITY_CHECK(s, 1e-6);
 }
 
@@ -44,11 +44,11 @@ PERF_TEST_P(Size_MatType, mean_mask, TYPICAL_MATS)
     Mat src(sz, type);
     Mat mask = Mat::ones(src.size(), CV_8U);
     Scalar s;
-    
+
     declare.in(src, WARMUP_RNG).in(mask).out(s);
-    
+
     TEST_CYCLE() s = mean(src, mask);
-    
+
     SANITY_CHECK(s, 1e-6);
 }
 
@@ -64,7 +64,7 @@ PERF_TEST_P(Size_MatType, meanStdDev, TYPICAL_MATS)
     declare.in(src, WARMUP_RNG).out(mean, dev);
 
     TEST_CYCLE() meanStdDev(src, mean, dev);
-    
+
     SANITY_CHECK(mean, 1e-6);
     SANITY_CHECK(dev, 1e-6);
 }
@@ -80,9 +80,9 @@ PERF_TEST_P(Size_MatType, meanStdDev_mask, TYPICAL_MATS)
     Scalar dev;
 
     declare.in(src, WARMUP_RNG).in(mask).out(mean, dev);
-    
+
     TEST_CYCLE() meanStdDev(src, mean, dev, mask);
-    
+
     SANITY_CHECK(mean, 1e-6);
     SANITY_CHECK(dev, 1e-6);
 }
@@ -96,8 +96,8 @@ PERF_TEST_P(Size_MatType, countNonZero, testing::Combine( testing::Values( TYPIC
     int cnt = 0;
 
     declare.in(src, WARMUP_RNG);
-    
+
     TEST_CYCLE() cnt = countNonZero(src);
-    
+
     SANITY_CHECK(cnt);
 }
index dfe7252..1944ed1 100644 (file)
@@ -55,7 +55,9 @@ static void* OutOfMemoryError(size_t size)
 
 #if CV_USE_SYSTEM_MALLOC
 
+#if defined WIN32 || defined _WIN32
 void deleteThreadAllocData() {}
+#endif
 
 void* fastMalloc( size_t size )
 {
@@ -66,14 +68,14 @@ void* fastMalloc( size_t size )
     adata[-1] = udata;
     return adata;
 }
-    
+
 void fastFree(void* ptr)
 {
     if(ptr)
     {
         uchar* udata = ((uchar**)ptr)[-1];
         CV_DbgAssert(udata < (uchar*)ptr &&
-               ((uchar*)ptr - udata) <= (ptrdiff_t)(sizeof(void*)+CV_MALLOC_ALIGN)); 
+               ((uchar*)ptr - udata) <= (ptrdiff_t)(sizeof(void*)+CV_MALLOC_ALIGN));
         free(udata);
     }
 }
@@ -388,7 +390,7 @@ struct ThreadData
 
 #ifdef WIN32
 #ifdef WINCE
-#      define TLS_OUT_OF_INDEXES ((DWORD)0xFFFFFFFF)
+#   define TLS_OUT_OF_INDEXES ((DWORD)0xFFFFFFFF)
 #endif //WINCE
 
     static DWORD tlsKey;
@@ -535,7 +537,7 @@ void* fastMalloc( size_t size )
             freePtr = block;
             if( !data )
             {
-                block = gcPtr; 
+                block = gcPtr;
                 for( int k = 0; k < 2; k++ )
                 {
                     SANITY_CHECK(block);
@@ -620,7 +622,7 @@ void fastFree( void* ptr )
                 Block*& startPtr = tls->bins[idx][START];
                 Block*& freePtr = tls->bins[idx][FREE];
                 Block*& gcPtr = tls->bins[idx][GC];
-                
+
                 if( block == block->next )
                 {
                     CV_DbgAssert( startPtr == block && freePtr == block && gcPtr == block );
index 454c65c..e252a28 100644 (file)
@@ -974,7 +974,7 @@ void convertAndUnrollScalar( const Mat& sc, int buftype, uchar* scbuf, size_t bl
         scbuf[i] = scbuf[i - esz];
 }
 
-void binary_op(InputArray _src1, InputArray _src2, OutputArray _dst,
+static void binary_op(InputArray _src1, InputArray _src2, OutputArray _dst,
                InputArray _mask, const BinaryFunc* tab, bool bitwise)
 {
     int kind1 = _src1.kind(), kind2 = _src2.kind();
@@ -1216,7 +1216,7 @@ void cv::min(const Mat& src1, double src2, Mat& dst)
 namespace cv
 {
 
-void arithm_op(InputArray _src1, InputArray _src2, OutputArray _dst,
+static void arithm_op(InputArray _src1, InputArray _src2, OutputArray _dst,
                InputArray _mask, int dtype, BinaryFunc* tab, bool muldiv=false, void* usrdata=0)
 {
     int kind1 = _src1.kind(), kind2 = _src2.kind();
index aee5ff9..b5c838c 100644 (file)
@@ -6,6 +6,7 @@
 using namespace std;
 using namespace cv;
 
+namespace {
 void helpParser()
 {
     printf("\nThe CommandLineParser class is designed for command line arguments parsing\n"
@@ -89,6 +90,8 @@ string del_space(string name)
        return name;
 }
 
+}//namespace
+
 CommandLineParser::CommandLineParser(int argc, const char* const argv[], const char* keys)
 {
        std::string keys_buffer;
index 9a95019..e391b15 100644 (file)
@@ -53,7 +53,7 @@ static const float atan2_p1 = 0.9997878412794807f*(float)(180/CV_PI);
 static const float atan2_p3 = -0.3258083974640975f*(float)(180/CV_PI);
 static const float atan2_p5 = 0.1555786518463281f*(float)(180/CV_PI);
 static const float atan2_p7 = -0.04432655554792128f*(float)(180/CV_PI);
-    
+
 float fastAtan2( float y, float x )
 {
     float ax = std::abs(x), ay = std::abs(y);
@@ -109,18 +109,18 @@ static void FastAtan2_32f(const float *Y, const float *X, float *angle, int len,
             a = _mm_mul_ps(_mm_add_ps(a, p5), c2);
             a = _mm_mul_ps(_mm_add_ps(a, p3), c2);
             a = _mm_mul_ps(_mm_add_ps(a, p1), c);
-            
+
             __m128 b = _mm_sub_ps(_90, a);
             a = _mm_xor_ps(a, _mm_and_ps(_mm_xor_ps(a, b), mask));
-            
+
             b = _mm_sub_ps(_180, a);
             mask = _mm_cmplt_ps(x, z);
             a = _mm_xor_ps(a, _mm_and_ps(_mm_xor_ps(a, b), mask));
-            
+
             b = _mm_sub_ps(_360, a);
             mask = _mm_cmplt_ps(y, z);
             a = _mm_xor_ps(a, _mm_and_ps(_mm_xor_ps(a, b), mask));
-            
+
             a = _mm_mul_ps(a, scale4);
             _mm_storeu_ps(angle + i, a);
         }
@@ -197,7 +197,7 @@ float  cubeRoot( float value )
 static void Magnitude_32f(const float* x, const float* y, float* mag, int len)
 {
     int i = 0;
-    
+
 #if CV_SSE
     if( USE_SSE2 )
     {
@@ -223,8 +223,8 @@ static void Magnitude_32f(const float* x, const float* y, float* mag, int len)
 static void Magnitude_64f(const double* x, const double* y, double* mag, int len)
 {
     int i = 0;
-    
-#if CV_SSE2   
+
+#if CV_SSE2
     if( USE_SSE2 )
     {
         for( ; i <= len - 4; i += 4 )
@@ -238,7 +238,7 @@ static void Magnitude_64f(const double* x, const double* y, double* mag, int len
         }
     }
 #endif
-    
+
     for( ; i < len; i++ )
     {
         double x0 = x[i], y0 = y[i];
@@ -246,14 +246,14 @@ static void Magnitude_64f(const double* x, const double* y, double* mag, int len
     }
 }
 
-    
+
 static void InvSqrt_32f(const float* src, float* dst, int len)
 {
     int i = 0;
-    
-#if CV_SSE   
+
+#if CV_SSE
     if( USE_SSE2 )
-    {    
+    {
         __m128 _0_5 = _mm_set1_ps(0.5f), _1_5 = _mm_set1_ps(1.5f);
         if( (((size_t)src|(size_t)dst) & 15) == 0 )
             for( ; i <= len - 8; i += 8 )
@@ -277,24 +277,24 @@ static void InvSqrt_32f(const float* src, float* dst, int len)
             }
     }
 #endif
-    
+
     for( ; i < len; i++ )
         dst[i] = 1/std::sqrt(src[i]);
 }
 
-    
+
 static void InvSqrt_64f(const double* src, double* dst, int len)
 {
     for( int i = 0; i < len; i++ )
         dst[i] = 1/std::sqrt(src[i]);
-}    
-    
-    
+}
+
+
 static void Sqrt_32f(const float* src, float* dst, int len)
 {
     int i = 0;
-    
-#if CV_SSE    
+
+#if CV_SSE
     if( USE_SSE2 )
     {
         if( (((size_t)src|(size_t)dst) & 15) == 0 )
@@ -312,18 +312,18 @@ static void Sqrt_32f(const float* src, float* dst, int len)
                 _mm_storeu_ps(dst + i, t0); _mm_storeu_ps(dst + i + 4, t1);
             }
     }
-#endif    
-    
+#endif
+
     for( ; i < len; i++ )
         dst[i] = std::sqrt(src[i]);
 }
 
-    
+
 static void Sqrt_64f(const double* src, double* dst, int len)
 {
     int i = 0;
-    
-#if CV_SSE2    
+
+#if CV_SSE2
     if( USE_SSE2 )
     {
         if( (((size_t)src|(size_t)dst) & 15) == 0 )
@@ -342,7 +342,7 @@ static void Sqrt_64f(const double* src, double* dst, int len)
             }
     }
 #endif
-    
+
     for( ; i < len; i++ )
         dst[i] = std::sqrt(src[i]);
 }
@@ -359,12 +359,12 @@ void magnitude( InputArray src1, InputArray src2, OutputArray dst )
        CV_Assert( X.size == Y.size && type == Y.type() && (depth == CV_32F || depth == CV_64F));
     dst.create(X.dims, X.size, X.type());
     Mat Mag = dst.getMat();
-    
+
     const Mat* arrays[] = {&X, &Y, &Mag, 0};
     uchar* ptrs[3];
     NAryMatIterator it(arrays, ptrs);
     int len = (int)it.size*cn;
-        
+
     for( size_t i = 0; i < it.nplanes; i++, ++it )
     {
         if( depth == CV_32F )
@@ -382,7 +382,7 @@ void magnitude( InputArray src1, InputArray src2, OutputArray dst )
     }
 }
 
-    
+
 void phase( InputArray src1, InputArray src2, OutputArray dst, bool angleInDegrees )
 {
     Mat X = src1.getMat(), Y = src2.getMat();
@@ -390,7 +390,7 @@ void phase( InputArray src1, InputArray src2, OutputArray dst, bool angleInDegre
     CV_Assert( X.size == Y.size && type == Y.type() && (depth == CV_32F || depth == CV_64F));
     dst.create( X.dims, X.size, type );
     Mat Angle = dst.getMat();
-    
+
     const Mat* arrays[] = {&X, &Y, &Angle, 0};
     uchar* ptrs[3];
     NAryMatIterator it(arrays, ptrs);
@@ -398,7 +398,7 @@ void phase( InputArray src1, InputArray src2, OutputArray dst, bool angleInDegre
     float* buf[2] = {0, 0};
     int j, k, total = (int)(it.size*cn), blockSize = total;
     size_t esz1 = X.elemSize1();
-    
+
     if( depth == CV_64F )
     {
         blockSize = std::min(blockSize, ((BLOCK_SIZE+cn-1)/cn)*cn);
@@ -406,7 +406,7 @@ void phase( InputArray src1, InputArray src2, OutputArray dst, bool angleInDegre
         buf[0] = _buf;
         buf[1] = buf[0] + blockSize;
     }
-    
+
     for( size_t i = 0; i < it.nplanes; i++, ++it )
     {
         for( j = 0; j < total; j += blockSize )
@@ -427,7 +427,7 @@ void phase( InputArray src1, InputArray src2, OutputArray dst, bool angleInDegre
                     buf[0][k] = (float)x[k];
                     buf[1][k] = (float)y[k];
                 }
-                    
+
                 FastAtan2_32f( buf[1], buf[0], buf[0], len, angleInDegrees );
                 for( k = 0; k < len; k++ )
                                        angle[k] = buf[0][k];
@@ -438,8 +438,8 @@ void phase( InputArray src1, InputArray src2, OutputArray dst, bool angleInDegre
         }
     }
 }
-    
+
+
 void cartToPolar( InputArray src1, InputArray src2,
                   OutputArray dst1, OutputArray dst2, bool angleInDegrees )
 {
@@ -449,7 +449,7 @@ void cartToPolar( InputArray src1, InputArray src2,
     dst1.create( X.dims, X.size, type );
     dst2.create( X.dims, X.size, type );
     Mat Mag = dst1.getMat(), Angle = dst2.getMat();
-    
+
     const Mat* arrays[] = {&X, &Y, &Mag, &Angle, 0};
     uchar* ptrs[4];
     NAryMatIterator it(arrays, ptrs);
@@ -457,14 +457,14 @@ void cartToPolar( InputArray src1, InputArray src2,
     float* buf[2] = {0, 0};
     int j, k, total = (int)(it.size*cn), blockSize = std::min(total, ((BLOCK_SIZE+cn-1)/cn)*cn);
     size_t esz1 = X.elemSize1();
-    
+
     if( depth == CV_64F )
     {
         _buf.allocate(blockSize*2);
         buf[0] = _buf;
         buf[1] = buf[0] + blockSize;
     }
-    
+
     for( size_t i = 0; i < it.nplanes; i++, ++it )
     {
         for( j = 0; j < total; j += blockSize )
@@ -481,14 +481,14 @@ void cartToPolar( InputArray src1, InputArray src2,
             {
                 const double *x = (const double*)ptrs[0], *y = (const double*)ptrs[1];
                 double *angle = (double*)ptrs[3];
-                
+
                 Magnitude_64f(x, y, (double*)ptrs[2], len);
                 for( k = 0; k < len; k++ )
                 {
                     buf[0][k] = (float)x[k];
                     buf[1][k] = (float)y[k];
                 }
-                
+
                 FastAtan2_32f( buf[1], buf[0], buf[0], len, angleInDegrees );
                 for( k = 0; k < len; k++ )
                                        angle[k] = buf[0][k];
@@ -595,7 +595,7 @@ void polarToCart( InputArray src1, InputArray src2,
     dst1.create( Angle.dims, Angle.size, type );
     dst2.create( Angle.dims, Angle.size, type );
     Mat X = dst1.getMat(), Y = dst2.getMat();
-    
+
     const Mat* arrays[] = {&Mag, &Angle, &X, &Y, 0};
     uchar* ptrs[4];
     NAryMatIterator it(arrays, ptrs);
@@ -603,14 +603,14 @@ void polarToCart( InputArray src1, InputArray src2,
     float* buf[2] = {0, 0};
     int j, k, total = (int)(it.size*cn), blockSize = std::min(total, ((BLOCK_SIZE+cn-1)/cn)*cn);
     size_t esz1 = Angle.elemSize1();
-    
+
     if( depth == CV_64F )
     {
         _buf.allocate(blockSize*2);
         buf[0] = _buf;
         buf[1] = buf[0] + blockSize;
     }
-    
+
     for( size_t i = 0; i < it.nplanes; i++, ++it )
     {
         for( j = 0; j < total; j += blockSize )
@@ -620,7 +620,7 @@ void polarToCart( InputArray src1, InputArray src2,
             {
                 const float *mag = (const float*)ptrs[0], *angle = (const float*)ptrs[1];
                 float *x = (float*)ptrs[2], *y = (float*)ptrs[3];
-                
+
                 SinCos_32f( angle, y, x, len, angleInDegrees );
                 if( mag )
                     for( k = 0; k < len; k++ )
@@ -633,10 +633,10 @@ void polarToCart( InputArray src1, InputArray src2,
             {
                 const double *mag = (const double*)ptrs[0], *angle = (const double*)ptrs[1];
                 double *x = (double*)ptrs[2], *y = (double*)ptrs[3];
-                
+
                 for( k = 0; k < len; k++ )
                     buf[0][k] = (float)angle[k];
-                
+
                 SinCos_32f( buf[0], buf[1], buf[0], len, angleInDegrees );
                 if( mag )
                     for( k = 0; k < len; k++ )
@@ -650,7 +650,7 @@ void polarToCart( InputArray src1, InputArray src2,
                         x[k] = buf[0][k]; y[k] = buf[1][k];
                     }
             }
-            
+
             if( ptrs[0] )
                 ptrs[0] += len*esz1;
             ptrs[1] += len*esz1;
@@ -759,8 +759,8 @@ static const double expTab[] = {
     (!defined __APPLE__ && defined __GNUC__ && __GNUC__*100 + __GNUC_MINOR__ < 402)
 #undef CV_SSE2
 #define CV_SSE2 0
-#endif    
-    
+#endif
+
 static const double exp_prescale = 1.4426950408889634073599246810019 * (1 << EXPTAB_SCALE);
 static const double exp_postscale = 1./(1 << EXPTAB_SCALE);
 static const double exp_max_val = 3000.*(1 << EXPTAB_SCALE); // log10(DBL_MAX) < 3000
@@ -772,11 +772,11 @@ static void Exp_32f( const float *_x, float *y, int n )
         A3 = (float)(.6931471805521448196800669615864773144641 / EXPPOLY_32F_A0),
         A2 = (float)(.2402265109513301490103372422686535526573 / EXPPOLY_32F_A0),
         A1 = (float)(.5550339366753125211915322047004666939128e-1 / EXPPOLY_32F_A0);
-    
+
 #undef EXPPOLY
 #define EXPPOLY(x)  \
     (((((x) + A1)*(x) + A2)*(x) + A3)*(x) + A4)
-    
+
     int i = 0;
     const Cv32suf* x = (const Cv32suf*)_x;
     Cv32suf buf[4];
@@ -788,90 +788,90 @@ static void Exp_32f( const float *_x, float *y, int n )
         static const __m128 postscale4 = _mm_set1_ps((float)exp_postscale);
         static const __m128 maxval4 = _mm_set1_ps((float)(exp_max_val/exp_prescale));
         static const __m128 minval4 = _mm_set1_ps((float)(-exp_max_val/exp_prescale));
-        
+
         static const __m128 mA1 = _mm_set1_ps(A1);
         static const __m128 mA2 = _mm_set1_ps(A2);
         static const __m128 mA3 = _mm_set1_ps(A3);
         static const __m128 mA4 = _mm_set1_ps(A4);
         bool y_aligned = (size_t)(void*)y % 16 == 0;
-        
+
         ushort CV_DECL_ALIGNED(16) tab_idx[8];
-        
+
         for( ; i <= n - 8; i += 8 )
         {
             __m128 xf0, xf1;
             xf0 = _mm_loadu_ps(&x[i].f);
             xf1 = _mm_loadu_ps(&x[i+4].f);
             __m128i xi0, xi1, xi2, xi3;
-            
+
             xf0 = _mm_min_ps(_mm_max_ps(xf0, minval4), maxval4);
             xf1 = _mm_min_ps(_mm_max_ps(xf1, minval4), maxval4);
-            
+
             __m128d xd0 = _mm_cvtps_pd(xf0);
             __m128d xd2 = _mm_cvtps_pd(_mm_movehl_ps(xf0, xf0));
             __m128d xd1 = _mm_cvtps_pd(xf1);
             __m128d xd3 = _mm_cvtps_pd(_mm_movehl_ps(xf1, xf1));
-            
+
             xd0 = _mm_mul_pd(xd0, prescale2);
             xd2 = _mm_mul_pd(xd2, prescale2);
             xd1 = _mm_mul_pd(xd1, prescale2);
             xd3 = _mm_mul_pd(xd3, prescale2);
-            
+
             xi0 = _mm_cvtpd_epi32(xd0);
             xi2 = _mm_cvtpd_epi32(xd2);
-            
+
             xi1 = _mm_cvtpd_epi32(xd1);
             xi3 = _mm_cvtpd_epi32(xd3);
-            
+
             xd0 = _mm_sub_pd(xd0, _mm_cvtepi32_pd(xi0));
             xd2 = _mm_sub_pd(xd2, _mm_cvtepi32_pd(xi2));
             xd1 = _mm_sub_pd(xd1, _mm_cvtepi32_pd(xi1));
             xd3 = _mm_sub_pd(xd3, _mm_cvtepi32_pd(xi3));
-            
+
             xf0 = _mm_movelh_ps(_mm_cvtpd_ps(xd0), _mm_cvtpd_ps(xd2));
             xf1 = _mm_movelh_ps(_mm_cvtpd_ps(xd1), _mm_cvtpd_ps(xd3));
-            
+
             xf0 = _mm_mul_ps(xf0, postscale4);
             xf1 = _mm_mul_ps(xf1, postscale4);
 
             xi0 = _mm_unpacklo_epi64(xi0, xi2);
             xi1 = _mm_unpacklo_epi64(xi1, xi3);
             xi0 = _mm_packs_epi32(xi0, xi1);
-            
+
             _mm_store_si128((__m128i*)tab_idx, _mm_and_si128(xi0, _mm_set1_epi16(EXPTAB_MASK)));
-            
+
             xi0 = _mm_add_epi16(_mm_srai_epi16(xi0, EXPTAB_SCALE), _mm_set1_epi16(127));
             xi0 = _mm_max_epi16(xi0, _mm_setzero_si128());
             xi0 = _mm_min_epi16(xi0, _mm_set1_epi16(255));
             xi1 = _mm_unpackhi_epi16(xi0, _mm_setzero_si128());
             xi0 = _mm_unpacklo_epi16(xi0, _mm_setzero_si128());
-            
+
             __m128d yd0 = _mm_unpacklo_pd(_mm_load_sd(expTab + tab_idx[0]), _mm_load_sd(expTab + tab_idx[1]));
             __m128d yd1 = _mm_unpacklo_pd(_mm_load_sd(expTab + tab_idx[2]), _mm_load_sd(expTab + tab_idx[3]));
             __m128d yd2 = _mm_unpacklo_pd(_mm_load_sd(expTab + tab_idx[4]), _mm_load_sd(expTab + tab_idx[5]));
             __m128d yd3 = _mm_unpacklo_pd(_mm_load_sd(expTab + tab_idx[6]), _mm_load_sd(expTab + tab_idx[7]));
-            
+
             __m128 yf0 = _mm_movelh_ps(_mm_cvtpd_ps(yd0), _mm_cvtpd_ps(yd1));
             __m128 yf1 = _mm_movelh_ps(_mm_cvtpd_ps(yd2), _mm_cvtpd_ps(yd3));
 
             yf0 = _mm_mul_ps(yf0, _mm_castsi128_ps(_mm_slli_epi32(xi0, 23)));
             yf1 = _mm_mul_ps(yf1, _mm_castsi128_ps(_mm_slli_epi32(xi1, 23)));
-            
+
             __m128 zf0 = _mm_add_ps(xf0, mA1);
             __m128 zf1 = _mm_add_ps(xf1, mA1);
-            
+
             zf0 = _mm_add_ps(_mm_mul_ps(zf0, xf0), mA2);
             zf1 = _mm_add_ps(_mm_mul_ps(zf1, xf1), mA2);
-            
+
             zf0 = _mm_add_ps(_mm_mul_ps(zf0, xf0), mA3);
             zf1 = _mm_add_ps(_mm_mul_ps(zf1, xf1), mA3);
-            
+
             zf0 = _mm_add_ps(_mm_mul_ps(zf0, xf0), mA4);
             zf1 = _mm_add_ps(_mm_mul_ps(zf1, xf1), mA4);
-            
+
             zf0 = _mm_mul_ps(zf0, yf0);
             zf1 = _mm_mul_ps(zf1, yf1);
-            
+
             if( y_aligned )
             {
                 _mm_store_ps(y + i, zf0);
@@ -893,77 +893,77 @@ static void Exp_32f( const float *_x, float *y, int n )
         double x2 = x[i + 2].f * exp_prescale;
         double x3 = x[i + 3].f * exp_prescale;
         int val0, val1, val2, val3, t;
-        
+
         if( ((x[i].i >> 23) & 255) > 127 + 10 )
             x0 = x[i].i < 0 ? -exp_max_val : exp_max_val;
-        
+
         if( ((x[i+1].i >> 23) & 255) > 127 + 10 )
             x1 = x[i+1].i < 0 ? -exp_max_val : exp_max_val;
-        
+
         if( ((x[i+2].i >> 23) & 255) > 127 + 10 )
             x2 = x[i+2].i < 0 ? -exp_max_val : exp_max_val;
-        
+
         if( ((x[i+3].i >> 23) & 255) > 127 + 10 )
             x3 = x[i+3].i < 0 ? -exp_max_val : exp_max_val;
-        
+
         val0 = cvRound(x0);
         val1 = cvRound(x1);
         val2 = cvRound(x2);
         val3 = cvRound(x3);
-        
+
         x0 = (x0 - val0)*exp_postscale;
         x1 = (x1 - val1)*exp_postscale;
         x2 = (x2 - val2)*exp_postscale;
         x3 = (x3 - val3)*exp_postscale;
-        
+
         t = (val0 >> EXPTAB_SCALE) + 127;
         t = !(t & ~255) ? t : t < 0 ? 0 : 255;
         buf[0].i = t << 23;
-        
+
         t = (val1 >> EXPTAB_SCALE) + 127;
         t = !(t & ~255) ? t : t < 0 ? 0 : 255;
         buf[1].i = t << 23;
-        
+
         t = (val2 >> EXPTAB_SCALE) + 127;
         t = !(t & ~255) ? t : t < 0 ? 0 : 255;
         buf[2].i = t << 23;
-        
+
         t = (val3 >> EXPTAB_SCALE) + 127;
         t = !(t & ~255) ? t : t < 0 ? 0 : 255;
         buf[3].i = t << 23;
-        
+
         x0 = buf[0].f * expTab[val0 & EXPTAB_MASK] * EXPPOLY( x0 );
         x1 = buf[1].f * expTab[val1 & EXPTAB_MASK] * EXPPOLY( x1 );
-        
+
         y[i] = (float)x0;
         y[i + 1] = (float)x1;
-        
+
         x2 = buf[2].f * expTab[val2 & EXPTAB_MASK] * EXPPOLY( x2 );
         x3 = buf[3].f * expTab[val3 & EXPTAB_MASK] * EXPPOLY( x3 );
-        
+
         y[i + 2] = (float)x2;
         y[i + 3] = (float)x3;
     }
-    
+
     for( ; i < n; i++ )
     {
         double x0 = x[i].f * exp_prescale;
         int val0, t;
-        
+
         if( ((x[i].i >> 23) & 255) > 127 + 10 )
             x0 = x[i].i < 0 ? -exp_max_val : exp_max_val;
-        
+
         val0 = cvRound(x0);
         t = (val0 >> EXPTAB_SCALE) + 127;
         t = !(t & ~255) ? t : t < 0 ? 0 : 255;
-        
+
         buf[0].i = t << 23;
         x0 = (x0 - val0)*exp_postscale;
-        
+
         y[i] = (float)(buf[0].f * expTab[val0 & EXPTAB_MASK] * EXPPOLY(x0));
     }
 }
-    
+
 
 static void Exp_64f( const double *_x, double *y, int n )
 {
@@ -974,14 +974,14 @@ static void Exp_64f( const double *_x, double *y, int n )
     A2 = .55504108793649567998466049042729e-1 / EXPPOLY_32F_A0,
     A1 = .96180973140732918010002372686186e-2 / EXPPOLY_32F_A0,
     A0 = .13369713757180123244806654839424e-2 / EXPPOLY_32F_A0;
-    
+
 #undef EXPPOLY
 #define EXPPOLY(x)  (((((A0*(x) + A1)*(x) + A2)*(x) + A3)*(x) + A4)*(x) + A5)
-    
+
     int i = 0;
     Cv64suf buf[4];
     const Cv64suf* x = (const Cv64suf*)_x;
-    
+
 #if CV_SSE2
     if( USE_SSE2 )
     {
@@ -989,16 +989,16 @@ static void Exp_64f( const double *_x, double *y, int n )
         static const __m128d postscale2 = _mm_set1_pd(exp_postscale);
         static const __m128d maxval2 = _mm_set1_pd(exp_max_val);
         static const __m128d minval2 = _mm_set1_pd(-exp_max_val);
-        
+
         static const __m128d mA0 = _mm_set1_pd(A0);
         static const __m128d mA1 = _mm_set1_pd(A1);
         static const __m128d mA2 = _mm_set1_pd(A2);
         static const __m128d mA3 = _mm_set1_pd(A3);
         static const __m128d mA4 = _mm_set1_pd(A4);
         static const __m128d mA5 = _mm_set1_pd(A5);
-        
+
         int CV_DECL_ALIGNED(16) tab_idx[4];
-        
+
         for( ; i <= n - 4; i += 4 )
         {
             __m128d xf0 = _mm_loadu_pd(&x[i].f), xf1 = _mm_loadu_pd(&x[i+2].f);
@@ -1007,15 +1007,15 @@ static void Exp_64f( const double *_x, double *y, int n )
             xf1 = _mm_min_pd(_mm_max_pd(xf1, minval2), maxval2);
             xf0 = _mm_mul_pd(xf0, prescale2);
             xf1 = _mm_mul_pd(xf1, prescale2);
-            
+
             xi0 = _mm_cvtpd_epi32(xf0);
             xi1 = _mm_cvtpd_epi32(xf1);
             xf0 = _mm_mul_pd(_mm_sub_pd(xf0, _mm_cvtepi32_pd(xi0)), postscale2);
             xf1 = _mm_mul_pd(_mm_sub_pd(xf1, _mm_cvtepi32_pd(xi1)), postscale2);
-            
+
             xi0 = _mm_unpacklo_epi64(xi0, xi1);
             _mm_store_si128((__m128i*)tab_idx, _mm_and_si128(xi0, _mm_set1_epi32(EXPTAB_MASK)));
-            
+
             xi0 = _mm_add_epi32(_mm_srai_epi32(xi0, EXPTAB_SCALE), _mm_set1_epi32(1023));
             xi0 = _mm_packs_epi32(xi0, xi0);
             xi0 = _mm_max_epi16(xi0, _mm_setzero_si128());
@@ -1023,30 +1023,30 @@ static void Exp_64f( const double *_x, double *y, int n )
             xi0 = _mm_unpacklo_epi16(xi0, _mm_setzero_si128());
             xi1 = _mm_unpackhi_epi32(xi0, _mm_setzero_si128());
             xi0 = _mm_unpacklo_epi32(xi0, _mm_setzero_si128());
-            
+
             __m128d yf0 = _mm_unpacklo_pd(_mm_load_sd(expTab + tab_idx[0]), _mm_load_sd(expTab + tab_idx[1]));
             __m128d yf1 = _mm_unpacklo_pd(_mm_load_sd(expTab + tab_idx[2]), _mm_load_sd(expTab + tab_idx[3]));
             yf0 = _mm_mul_pd(yf0, _mm_castsi128_pd(_mm_slli_epi64(xi0, 52)));
             yf1 = _mm_mul_pd(yf1, _mm_castsi128_pd(_mm_slli_epi64(xi1, 52)));
-            
+
             __m128d zf0 = _mm_add_pd(_mm_mul_pd(mA0, xf0), mA1);
             __m128d zf1 = _mm_add_pd(_mm_mul_pd(mA0, xf1), mA1);
-            
+
             zf0 = _mm_add_pd(_mm_mul_pd(zf0, xf0), mA2);
             zf1 = _mm_add_pd(_mm_mul_pd(zf1, xf1), mA2);
-            
+
             zf0 = _mm_add_pd(_mm_mul_pd(zf0, xf0), mA3);
             zf1 = _mm_add_pd(_mm_mul_pd(zf1, xf1), mA3);
-            
+
             zf0 = _mm_add_pd(_mm_mul_pd(zf0, xf0), mA4);
             zf1 = _mm_add_pd(_mm_mul_pd(zf1, xf1), mA4);
-            
+
             zf0 = _mm_add_pd(_mm_mul_pd(zf0, xf0), mA5);
             zf1 = _mm_add_pd(_mm_mul_pd(zf1, xf1), mA5);
-            
+
             zf0 = _mm_mul_pd(zf0, yf0);
             zf1 = _mm_mul_pd(zf1, yf1);
-            
+
             _mm_storeu_pd(y + i, zf0);
             _mm_storeu_pd(y + i + 2, zf1);
         }
@@ -1059,81 +1059,81 @@ static void Exp_64f( const double *_x, double *y, int n )
         double x1 = x[i + 1].f * exp_prescale;
         double x2 = x[i + 2].f * exp_prescale;
         double x3 = x[i + 3].f * exp_prescale;
-        
+
         double y0, y1, y2, y3;
         int val0, val1, val2, val3, t;
-        
+
         t = (int)(x[i].i >> 52);
         if( (t & 2047) > 1023 + 10 )
             x0 = t < 0 ? -exp_max_val : exp_max_val;
-        
+
         t = (int)(x[i+1].i >> 52);
         if( (t & 2047) > 1023 + 10 )
             x1 = t < 0 ? -exp_max_val : exp_max_val;
-        
+
         t = (int)(x[i+2].i >> 52);
         if( (t & 2047) > 1023 + 10 )
             x2 = t < 0 ? -exp_max_val : exp_max_val;
-        
+
         t = (int)(x[i+3].i >> 52);
         if( (t & 2047) > 1023 + 10 )
             x3 = t < 0 ? -exp_max_val : exp_max_val;
-        
+
         val0 = cvRound(x0);
         val1 = cvRound(x1);
         val2 = cvRound(x2);
         val3 = cvRound(x3);
-        
+
         x0 = (x0 - val0)*exp_postscale;
         x1 = (x1 - val1)*exp_postscale;
         x2 = (x2 - val2)*exp_postscale;
         x3 = (x3 - val3)*exp_postscale;
-        
+
         t = (val0 >> EXPTAB_SCALE) + 1023;
         t = !(t & ~2047) ? t : t < 0 ? 0 : 2047;
         buf[0].i = (int64)t << 52;
-        
+
         t = (val1 >> EXPTAB_SCALE) + 1023;
         t = !(t & ~2047) ? t : t < 0 ? 0 : 2047;
         buf[1].i = (int64)t << 52;
-        
+
         t = (val2 >> EXPTAB_SCALE) + 1023;
         t = !(t & ~2047) ? t : t < 0 ? 0 : 2047;
         buf[2].i = (int64)t << 52;
-        
+
         t = (val3 >> EXPTAB_SCALE) + 1023;
         t = !(t & ~2047) ? t : t < 0 ? 0 : 2047;
         buf[3].i = (int64)t << 52;
-        
+
         y0 = buf[0].f * expTab[val0 & EXPTAB_MASK] * EXPPOLY( x0 );
         y1 = buf[1].f * expTab[val1 & EXPTAB_MASK] * EXPPOLY( x1 );
-        
+
         y[i] = y0;
         y[i + 1] = y1;
-        
+
         y2 = buf[2].f * expTab[val2 & EXPTAB_MASK] * EXPPOLY( x2 );
         y3 = buf[3].f * expTab[val3 & EXPTAB_MASK] * EXPPOLY( x3 );
-        
+
         y[i + 2] = y2;
         y[i + 3] = y3;
     }
-    
+
     for( ; i < n; i++ )
     {
         double x0 = x[i].f * exp_prescale;
         int val0, t;
-        
+
         t = (int)(x[i].i >> 52);
         if( (t & 2047) > 1023 + 10 )
             x0 = t < 0 ? -exp_max_val : exp_max_val;
-        
+
         val0 = cvRound(x0);
         t = (val0 >> EXPTAB_SCALE) + 1023;
         t = !(t & ~2047) ? t : t < 0 ? 0 : 2047;
-        
+
         buf[0].i = (int64)t << 52;
         x0 = (x0 - val0)*exp_postscale;
-        
+
         y[i] = buf[0].f * expTab[val0 & EXPTAB_MASK] * EXPPOLY( x0 );
     }
 }
@@ -1153,17 +1153,17 @@ void exp( InputArray _src, OutputArray _dst )
 {
     Mat src = _src.getMat();
     int type = src.type(), depth = src.depth(), cn = src.channels();
-    
+
     _dst.create( src.dims, src.size, type );
     Mat dst = _dst.getMat();
-    
+
     CV_Assert( depth == CV_32F || depth == CV_64F );
-    
+
     const Mat* arrays[] = {&src, &dst, 0};
     uchar* ptrs[2];
     NAryMatIterator it(arrays, ptrs);
     int len = (int)(it.size*cn);
-    
+
     for( size_t i = 0; i < it.nplanes; i++, ++it )
     {
         if( depth == CV_32F )
@@ -1470,26 +1470,26 @@ static void Log_32f( const float *_x, float *y, int n )
         static const __m128d ln2_2 = _mm_set1_pd(ln_2);
         static const __m128 _1_4 = _mm_set1_ps(1.f);
         static const __m128 shift4 = _mm_set1_ps(-1.f/512);
-        
+
         static const __m128 mA0 = _mm_set1_ps(A0);
         static const __m128 mA1 = _mm_set1_ps(A1);
         static const __m128 mA2 = _mm_set1_ps(A2);
-        
+
         int CV_DECL_ALIGNED(16) idx[4];
-        
+
         for( ; i <= n - 4; i += 4 )
-        {            
+        {
             __m128i h0 = _mm_loadu_si128((const __m128i*)(x + i));
             __m128i yi0 = _mm_sub_epi32(_mm_and_si128(_mm_srli_epi32(h0, 23), _mm_set1_epi32(255)), _mm_set1_epi32(127));
             __m128d yd0 = _mm_mul_pd(_mm_cvtepi32_pd(yi0), ln2_2);
             __m128d yd1 = _mm_mul_pd(_mm_cvtepi32_pd(_mm_unpackhi_epi64(yi0,yi0)), ln2_2);
-            
+
             __m128i xi0 = _mm_or_si128(_mm_and_si128(h0, _mm_set1_epi32(LOGTAB_MASK2_32F)), _mm_set1_epi32(127 << 23));
-            
+
             h0 = _mm_and_si128(_mm_srli_epi32(h0, 23 - LOGTAB_SCALE - 1), _mm_set1_epi32(LOGTAB_MASK*2));
             _mm_store_si128((__m128i*)idx, h0);
             h0 = _mm_cmpeq_epi32(h0, _mm_set1_epi32(510));
-            
+
             __m128d t0, t1, t2, t3, t4;
             t0 = _mm_load_pd(icvLogTab + idx[0]);
             t2 = _mm_load_pd(icvLogTab + idx[1]);
@@ -1499,21 +1499,21 @@ static void Log_32f( const float *_x, float *y, int n )
             t4 = _mm_load_pd(icvLogTab + idx[3]);
             t3 = _mm_unpackhi_pd(t2, t4);
             t2 = _mm_unpacklo_pd(t2, t4);
-            
+
             yd0 = _mm_add_pd(yd0, t0);
             yd1 = _mm_add_pd(yd1, t2);
-            
+
             __m128 yf0 = _mm_movelh_ps(_mm_cvtpd_ps(yd0), _mm_cvtpd_ps(yd1));
-            
+
             __m128 xf0 = _mm_sub_ps(_mm_castsi128_ps(xi0), _1_4);
             xf0 = _mm_mul_ps(xf0, _mm_movelh_ps(_mm_cvtpd_ps(t1), _mm_cvtpd_ps(t3)));
             xf0 = _mm_add_ps(xf0, _mm_and_ps(_mm_castsi128_ps(h0), shift4));
-            
+
             __m128 zf0 = _mm_mul_ps(xf0, mA0);
             zf0 = _mm_mul_ps(_mm_add_ps(zf0, mA1), xf0);
             zf0 = _mm_mul_ps(_mm_add_ps(zf0, mA2), xf0);
             yf0 = _mm_add_ps(yf0, zf0);
-            
+
             _mm_storeu_ps(y + i, yf0);
         }
     }
@@ -1626,10 +1626,10 @@ static void Log_64f( const double *x, double *y, int n )
         static const __m128d ln2_2 = _mm_set1_pd(ln_2);
         static const __m128d _1_2 = _mm_set1_pd(1.);
         static const __m128d shift2 = _mm_set1_pd(-1./512);
-        
+
         static const __m128i log_and_mask2 = _mm_set_epi32(LOGTAB_MASK2, 0xffffffff, LOGTAB_MASK2, 0xffffffff);
         static const __m128i log_or_mask2 = _mm_set_epi32(1023 << 20, 0, 1023 << 20, 0);
-        
+
         static const __m128d mA0 = _mm_set1_pd(A0);
         static const __m128d mA1 = _mm_set1_pd(A1);
         static const __m128d mA2 = _mm_set1_pd(A2);
@@ -1638,28 +1638,28 @@ static void Log_64f( const double *x, double *y, int n )
         static const __m128d mA5 = _mm_set1_pd(A5);
         static const __m128d mA6 = _mm_set1_pd(A6);
         static const __m128d mA7 = _mm_set1_pd(A7);
-        
+
         int CV_DECL_ALIGNED(16) idx[4];
-        
+
         for( ; i <= n - 4; i += 4 )
         {
             __m128i h0 = _mm_loadu_si128((const __m128i*)(x + i));
             __m128i h1 = _mm_loadu_si128((const __m128i*)(x + i + 2));
-            
+
             __m128d xd0 = _mm_castsi128_pd(_mm_or_si128(_mm_and_si128(h0, log_and_mask2), log_or_mask2));
             __m128d xd1 = _mm_castsi128_pd(_mm_or_si128(_mm_and_si128(h1, log_and_mask2), log_or_mask2));
-            
+
             h0 = _mm_unpackhi_epi32(_mm_unpacklo_epi32(h0, h1), _mm_unpackhi_epi32(h0, h1));
-            
+
             __m128i yi0 = _mm_sub_epi32(_mm_and_si128(_mm_srli_epi32(h0, 20),
                                     _mm_set1_epi32(2047)), _mm_set1_epi32(1023));
             __m128d yd0 = _mm_mul_pd(_mm_cvtepi32_pd(yi0), ln2_2);
             __m128d yd1 = _mm_mul_pd(_mm_cvtepi32_pd(_mm_unpackhi_epi64(yi0, yi0)), ln2_2);
-            
+
             h0 = _mm_and_si128(_mm_srli_epi32(h0, 20 - LOGTAB_SCALE - 1), _mm_set1_epi32(LOGTAB_MASK * 2));
             _mm_store_si128((__m128i*)idx, h0);
             h0 = _mm_cmpeq_epi32(h0, _mm_set1_epi32(510));
-            
+
             __m128d t0, t1, t2, t3, t4;
             t0 = _mm_load_pd(icvLogTab + idx[0]);
             t2 = _mm_load_pd(icvLogTab + idx[1]);
@@ -1669,16 +1669,16 @@ static void Log_64f( const double *x, double *y, int n )
             t4 = _mm_load_pd(icvLogTab + idx[3]);
             t3 = _mm_unpackhi_pd(t2, t4);
             t2 = _mm_unpacklo_pd(t2, t4);
-            
+
             yd0 = _mm_add_pd(yd0, t0);
             yd1 = _mm_add_pd(yd1, t2);
-            
+
             xd0 = _mm_mul_pd(_mm_sub_pd(xd0, _1_2), t1);
             xd1 = _mm_mul_pd(_mm_sub_pd(xd1, _1_2), t3);
-            
+
             xd0 = _mm_add_pd(xd0, _mm_and_pd(_mm_castsi128_pd(_mm_unpacklo_epi32(h0, h0)), shift2));
             xd1 = _mm_add_pd(xd1, _mm_and_pd(_mm_castsi128_pd(_mm_unpackhi_epi32(h0, h0)), shift2));
-            
+
             __m128d zd0 = _mm_mul_pd(xd0, mA0);
             __m128d zd1 = _mm_mul_pd(xd1, mA0);
             zd0 = _mm_mul_pd(_mm_add_pd(zd0, mA1), xd0);
@@ -1695,10 +1695,10 @@ static void Log_64f( const double *x, double *y, int n )
             zd1 = _mm_mul_pd(_mm_add_pd(zd1, mA6), xd1);
             zd0 = _mm_mul_pd(_mm_add_pd(zd0, mA7), xd0);
             zd1 = _mm_mul_pd(_mm_add_pd(zd1, mA7), xd1);
-            
+
             yd0 = _mm_add_pd(yd0, zd0);
             yd1 = _mm_add_pd(yd1, zd1);
-            
+
             _mm_storeu_pd(y + i, yd0);
             _mm_storeu_pd(y + i + 2, yd1);
         }
@@ -1769,7 +1769,7 @@ static void Log_64f( const double *x, double *y, int n )
         y[i + 2] = y2;
         y[i + 3] = y3;
     }
-    
+
     for( ; i < n; i++ )
     {
         int h0 = X[i].i.hi;
@@ -1798,17 +1798,17 @@ void log( InputArray _src, OutputArray _dst )
 {
     Mat src = _src.getMat();
     int type = src.type(), depth = src.depth(), cn = src.channels();
-    
+
     _dst.create( src.dims, src.size, type );
     Mat dst = _dst.getMat();
-    
+
     CV_Assert( depth == CV_32F || depth == CV_64F );
-    
+
     const Mat* arrays[] = {&src, &dst, 0};
     uchar* ptrs[2];
     NAryMatIterator it(arrays, ptrs);
     int len = (int)(it.size*cn);
-    
+
     for( size_t i = 0; i < it.nplanes; i++, ++it )
     {
         if( depth == CV_32F )
@@ -1816,7 +1816,7 @@ void log( InputArray _src, OutputArray _dst )
         else
             Log_64f( (const double*)ptrs[0], (double*)ptrs[1], len );
     }
-}    
+}
 
 /****************************************************************************************\
 *                                    P O W E R                                           *
@@ -1844,63 +1844,63 @@ iPow_( const T* src, T* dst, int len, int power )
     }
 }
 
-    
-void iPow8u(const uchar* src, uchar* dst, int len, int power)
+
+static void iPow8u(const uchar* src, uchar* dst, int len, int power)
 {
     iPow_<uchar, int>(src, dst, len, power);
 }
 
-void iPow8s(const schar* src, schar* dst, int len, int power)
+static void iPow8s(const schar* src, schar* dst, int len, int power)
 {
     iPow_<schar, int>(src, dst, len, power);
 }
-    
-void iPow16u(const ushort* src, ushort* dst, int len, int power)
+
+static void iPow16u(const ushort* src, ushort* dst, int len, int power)
 {
     iPow_<ushort, int>(src, dst, len, power);
 }
 
-void iPow16s(const short* src, short* dst, int len, int power)
+static void iPow16s(const short* src, short* dst, int len, int power)
 {
     iPow_<short, int>(src, dst, len, power);
 }
-    
-void iPow32s(const int* src, int* dst, int len, int power)
+
+static void iPow32s(const int* src, int* dst, int len, int power)
 {
     iPow_<int, int>(src, dst, len, power);
 }
 
-void iPow32f(const float* src, float* dst, int len, int power)
+static void iPow32f(const float* src, float* dst, int len, int power)
 {
     iPow_<float, float>(src, dst, len, power);
 }
 
-void iPow64f(const double* src, double* dst, int len, int power)
+static void iPow64f(const double* src, double* dst, int len, int power)
 {
     iPow_<double, double>(src, dst, len, power);
 }
 
-    
+
 typedef void (*IPowFunc)( const uchar* src, uchar* dst, int len, int power );
-    
+
 static IPowFunc ipowTab[] =
 {
     (IPowFunc)iPow8u, (IPowFunc)iPow8s, (IPowFunc)iPow16u, (IPowFunc)iPow16s,
     (IPowFunc)iPow32s, (IPowFunc)iPow32f, (IPowFunc)iPow64f, 0
 };
 
-    
+
 void pow( InputArray _src, double power, OutputArray _dst )
 {
     Mat src = _src.getMat();
     int type = src.type(), depth = src.depth(), cn = src.channels();
-    
+
     _dst.create( src.dims, src.size, type );
     Mat dst = _dst.getMat();
-    
+
     int ipower = cvRound(power);
     bool is_ipower = false;
-    
+
     if( fabs(ipower - power) < DBL_EPSILON )
     {
         if( ipower < 0 )
@@ -1911,7 +1911,7 @@ void pow( InputArray _src, double power, OutputArray _dst )
             ipower = -ipower;
             src = dst;
         }
-        
+
         switch( ipower )
         {
         case 0:
@@ -1929,17 +1929,17 @@ void pow( InputArray _src, double power, OutputArray _dst )
     }
     else
         CV_Assert( depth == CV_32F || depth == CV_64F );
-    
+
     const Mat* arrays[] = {&src, &dst, 0};
     uchar* ptrs[2];
     NAryMatIterator it(arrays, ptrs);
     int len = (int)(it.size*cn);
-    
+
     if( is_ipower )
     {
         IPowFunc func = ipowTab[depth];
         CV_Assert( func != 0 );
-        
+
         for( size_t i = 0; i < it.nplanes; i++, ++it )
             func( ptrs[0], ptrs[1], len, ipower );
     }
@@ -1948,7 +1948,7 @@ void pow( InputArray _src, double power, OutputArray _dst )
         MathFunc func = power < 0 ?
             (depth == CV_32F ? (MathFunc)InvSqrt_32f : (MathFunc)InvSqrt_64f) :
             (depth == CV_32F ? (MathFunc)Sqrt_32f : (MathFunc)Sqrt_64f);
-        
+
         for( size_t i = 0; i < it.nplanes; i++, ++it )
             func( ptrs[0], ptrs[1], len );
     }
@@ -1956,7 +1956,7 @@ void pow( InputArray _src, double power, OutputArray _dst )
     {
         int j, k, blockSize = std::min(len, ((BLOCK_SIZE + cn-1)/cn)*cn);
         size_t esz1 = src.elemSize1();
-        
+
         for( size_t i = 0; i < it.nplanes; i++, ++it )
         {
             for( j = 0; j < len; j += blockSize )
@@ -1966,7 +1966,7 @@ void pow( InputArray _src, double power, OutputArray _dst )
                 {
                     const float* x = (const float*)ptrs[0];
                     float* y = (float*)ptrs[1];
-                    
+
                     Log_32f(x, y, bsz);
                     for( k = 0; k < bsz; k++ )
                         y[k] = (float)(y[k]*power);
@@ -1976,7 +1976,7 @@ void pow( InputArray _src, double power, OutputArray _dst )
                 {
                     const double* x = (const double*)ptrs[0];
                     double* y = (double*)ptrs[1];
-                    
+
                     Log_64f(x, y, bsz);
                     for( k = 0; k < bsz; k++ )
                         y[k] *= power;
@@ -2036,8 +2036,8 @@ template<> struct mat_type_assotiations<CV_32S>
 template<int depth>
 bool checkIntegerRange(cv::Mat src, Point& bad_pt, int minVal, int maxVal, double& bad_value)
 {
-    typedef mat_type_assotiations<depth> type_ass; 
-    
+    typedef mat_type_assotiations<depth> type_ass;
+
     if (minVal < type_ass::min_allowable && maxVal > type_ass::max_allowable)
     {
         return true;
@@ -2051,23 +2051,23 @@ bool checkIntegerRange(cv::Mat src, Point& bad_pt, int minVal, int maxVal, doubl
 
     for (int j = 0; j < as_one_channel.rows; ++j)
         for (int i = 0; i < as_one_channel.cols; ++i)
-        {    
+        {
             if (as_one_channel.at<typename type_ass::type>(j ,i) < minVal || as_one_channel.at<typename type_ass::type>(j ,i) > maxVal)
-            {            
-                bad_pt.y = j ; 
+            {
+                bad_pt.y = j ;
                 bad_pt.x = i % src.channels();
                 bad_value = as_one_channel.at<typename type_ass::type>(j ,i);
                 return false;
             }
         }
     bad_value = 0.0;
-    
+
     return true;
 }
 
-typedef bool (*check_range_function)(cv::Mat src, Point& bad_pt, int minVal, int maxVal, double& bad_value); 
+typedef bool (*check_range_function)(cv::Mat src, Point& bad_pt, int minVal, int maxVal, double& bad_value);
 
-check_range_function check_range_functions[] = 
+check_range_function check_range_functions[] =
 {
     &checkIntegerRange<CV_8U>,
     &checkIntegerRange<CV_8S>,
@@ -2085,7 +2085,7 @@ bool checkRange(InputArray _src, bool quiet, Point* pt, double minVal, double ma
         const Mat* arrays[] = {&src, 0};
         Mat planes[1];
         NAryMatIterator it(arrays, planes);
-        
+
         for ( size_t i = 0; i < it.nplanes; i++, ++it )
         {
             if (!checkRange( it.planes[0], quiet, pt, minVal, maxVal ))
@@ -2096,7 +2096,7 @@ bool checkRange(InputArray _src, bool quiet, Point* pt, double minVal, double ma
         }
         return true;
     }
-    
+
     int depth = src.depth();
     Point badPt(-1, -1);
     double badValue = 0;
@@ -2185,19 +2185,19 @@ bool checkRange(InputArray _src, bool quiet, Point* pt, double minVal, double ma
     return badPt.x < 0;
 }
 
-    
+
 void patchNaNs( InputOutputArray _a, double _val )
 {
     Mat a = _a.getMat();
     CV_Assert( a.depth() == CV_32F );
-    
+
     const Mat* arrays[] = {&a, 0};
     int* ptrs[1];
     NAryMatIterator it(arrays, (uchar**)ptrs);
     size_t len = it.size*a.channels();
     Cv32suf val;
     val.f = (float)_val;
-    
+
     for( size_t i = 0; i < it.nplanes; i++, ++it )
     {
         int* tptr = ptrs[0];
@@ -2207,22 +2207,22 @@ void patchNaNs( InputOutputArray _a, double _val )
     }
 }
 
-    
+
 void exp(const float* src, float* dst, int n)
 {
     Exp_32f(src, dst, n);
 }
-    
+
 void log(const float* src, float* dst, int n)
 {
     Log_32f(src, dst, n);
 }
-    
+
 void fastAtan2(const float* y, const float* x, float* dst, int n, bool angleInDegrees)
 {
     FastAtan2_32f(y, x, dst, n, angleInDegrees);
 }
-    
+
 void magnitude(const float* x, const float* y, float* dst, int n)
 {
     Magnitude_32f(x, y, dst, n);
@@ -2343,26 +2343,26 @@ int cv::solveCubic( InputArray _coeffs, OutputArray _roots )
     const int n0 = 3;
     Mat coeffs = _coeffs.getMat();
     int ctype = coeffs.type();
-    
+
     CV_Assert( ctype == CV_32F || ctype == CV_64F );
     CV_Assert( (coeffs.size() == Size(n0, 1) ||
                 coeffs.size() == Size(n0+1, 1) ||
                 coeffs.size() == Size(1, n0) ||
                 coeffs.size() == Size(1, n0+1)) );
-    
+
     _roots.create(n0, 1, ctype, -1, true, DEPTH_MASK_FLT);
     Mat roots = _roots.getMat();
-    
+
     int i = -1, n = 0;
     double a0 = 1., a1, a2, a3;
     double x0 = 0., x1 = 0., x2 = 0.;
     int ncoeffs = coeffs.rows + coeffs.cols - 1;
-    
+
     if( ctype == CV_32FC1 )
     {
         if( ncoeffs == 4 )
             a0 = coeffs.at<float>(++i);
-        
+
         a1 = coeffs.at<float>(i+1);
         a2 = coeffs.at<float>(i+2);
         a3 = coeffs.at<float>(i+3);
@@ -2371,12 +2371,12 @@ int cv::solveCubic( InputArray _coeffs, OutputArray _roots )
     {
         if( ncoeffs == 4 )
             a0 = coeffs.at<double>(++i);
-        
+
         a1 = coeffs.at<double>(i+1);
         a2 = coeffs.at<double>(i+2);
         a3 = coeffs.at<double>(i+3);
     }
-    
+
     if( a0 == 0 )
     {
         if( a1 == 0 )
@@ -2419,12 +2419,12 @@ int cv::solveCubic( InputArray _coeffs, OutputArray _roots )
         a1 *= a0;
         a2 *= a0;
         a3 *= a0;
-        
+
         double Q = (a1 * a1 - 3 * a2) * (1./9);
         double R = (2 * a1 * a1 * a1 - 9 * a1 * a2 + 27 * a3) * (1./54);
         double Qcubed = Q * Q * Q;
         double d = Qcubed - R * R;
-        
+
         if( d >= 0 )
         {
             double theta = acos(R / sqrt(Qcubed));
@@ -2448,7 +2448,7 @@ int cv::solveCubic( InputArray _coeffs, OutputArray _roots )
             n = 1;
         }
     }
-    
+
     if( roots.type() == CV_32FC1 )
     {
         roots.at<float>(0) = (float)x0;
@@ -2461,7 +2461,7 @@ int cv::solveCubic( InputArray _coeffs, OutputArray _roots )
         roots.at<double>(1) = x1;
         roots.at<double>(2) = x2;
     }
-    
+
     return n;
 }
 
@@ -2476,15 +2476,15 @@ double cv::solvePoly( InputArray _coeffs0, OutputArray _roots0, int maxIters )
     Mat coeffs0 = _coeffs0.getMat();
     int ctype = _coeffs0.type();
     int cdepth = CV_MAT_DEPTH(ctype);
-    
+
     CV_Assert( CV_MAT_DEPTH(ctype) >= CV_32F && CV_MAT_CN(ctype) <= 2 );
     CV_Assert( coeffs0.rows == 1 || coeffs0.cols == 1 );
-    
+
     int n = coeffs0.cols + coeffs0.rows - 2;
 
-    _roots0.create(n, 1, CV_MAKETYPE(cdepth, 2), -1, true, DEPTH_MASK_FLT);    
+    _roots0.create(n, 1, CV_MAKETYPE(cdepth, 2), -1, true, DEPTH_MASK_FLT);
     Mat roots0 = _roots0.getMat();
-    
+
     AutoBuffer<C> buf(n*2+2);
     C *coeffs = buf, *roots = coeffs + n + 1;
     Mat coeffs1(coeffs0.size(), CV_MAKETYPE(CV_64F, coeffs0.channels()), coeffs0.channels() == 2 ? coeffs : roots);
index 845bd89..5778ee3 100644 (file)
@@ -62,18 +62,18 @@ void swap( Mat& a, Mat& b )
     std::swap(a.dataend, b.dataend);
     std::swap(a.datalimit, b.datalimit);
     std::swap(a.allocator, b.allocator);
-    
+
     std::swap(a.size.p, b.size.p);
     std::swap(a.step.p, b.step.p);
     std::swap(a.step.buf[0], b.step.buf[0]);
     std::swap(a.step.buf[1], b.step.buf[1]);
-    
+
     if( a.step.p == b.step.buf )
     {
         a.step.p = a.step.buf;
         a.size.p = &a.rows;
     }
-    
+
     if( b.step.p == a.step.buf )
     {
         b.step.p = b.step.buf;
@@ -102,11 +102,11 @@ static inline void setSize( Mat& m, int _dims, const int* _sz,
             m.rows = m.cols = -1;
         }
     }
-    
+
     m.dims = _dims;
     if( !_sz )
         return;
-    
+
     size_t esz = CV_ELEM_SIZE(m.flags), total = esz;
     int i;
     for( i = _dims-1; i >= 0; i-- )
@@ -114,7 +114,7 @@ static inline void setSize( Mat& m, int _dims, const int* _sz,
         int s = _sz[i];
         CV_Assert( s >= 0 );
         m.size.p[i] = s;
-        
+
         if( _steps )
             m.step.p[i] = i < _dims-1 ? _steps[i] : esz;
         else if( autoSteps )
@@ -126,7 +126,7 @@ static inline void setSize( Mat& m, int _dims, const int* _sz,
             total = (size_t)total1;
         }
     }
-    
+
     if( _dims == 1 )
     {
         m.dims = 2;
@@ -134,7 +134,7 @@ static inline void setSize( Mat& m, int _dims, const int* _sz,
         m.step[1] = esz;
     }
 }
-   
+
 static void updateContinuityFlag(Mat& m)
 {
     int i, j;
@@ -143,20 +143,20 @@ static void updateContinuityFlag(Mat& m)
         if( m.size[i] > 1 )
             break;
     }
-    
+
     for( j = m.dims-1; j > i; j-- )
     {
         if( m.step[j]*m.size[j] < m.step[j-1] )
             break;
     }
-    
+
     int64 t = (int64)m.step[0]*m.size[0];
     if( j <= i && t == (int)t )
         m.flags |= Mat::CONTINUOUS_FLAG;
     else
         m.flags &= ~Mat::CONTINUOUS_FLAG;
 }
-    
+
 static void finalizeHdr(Mat& m)
 {
     updateContinuityFlag(m);
@@ -178,14 +178,14 @@ static void finalizeHdr(Mat& m)
     else
         m.dataend = m.datalimit = 0;
 }
-    
-    
+
+
 void Mat::create(int d, const int* _sizes, int _type)
 {
     int i;
     CV_Assert(0 <= d && _sizes && d <= CV_MAX_DIM && _sizes);
     _type = CV_MAT_TYPE(_type);
-    
+
     if( data && (d == dims || (d == 1 && dims <= 2)) && _type == type() )
     {
         if( d == 2 && rows == _sizes[0] && cols == _sizes[1] )
@@ -196,13 +196,13 @@ void Mat::create(int d, const int* _sizes, int _type)
         if( i == d && (d > 1 || size[1] == 1))
             return;
     }
-    
+
     release();
     if( d == 0 )
         return;
     flags = (_type & CV_MAT_TYPE_MASK) | MAGIC_VAL;
     setSize(*this, d, _sizes, 0, true);
-    
+
     if( total() > 0 )
     {
 #ifdef HAVE_TGPU
@@ -218,7 +218,7 @@ void Mat::create(int d, const int* _sizes, int _type)
         else
         {
 #ifdef HAVE_TGPU
-           try 
+           try
             {
                 allocator->allocate(dims, size, _type, refcount, datastart, data, step.p);
                 CV_Assert( step[dims-1] == (size_t)CV_ELEM_SIZE(flags) );
@@ -236,7 +236,7 @@ void Mat::create(int d, const int* _sizes, int _type)
 #endif
         }
     }
-    
+
     finalizeHdr(*this);
 }
 
@@ -249,7 +249,7 @@ void Mat::copySize(const Mat& m)
         step[i] = m.step[i];
     }
 }
-    
+
 void Mat::deallocate()
 {
     if( allocator )
@@ -261,7 +261,7 @@ void Mat::deallocate()
     }
 }
 
-    
+
 Mat::Mat(const Mat& m, const Range& rowRange, const Range& colRange) : size(&rows)
 {
     initEmpty();
@@ -276,7 +276,7 @@ Mat::Mat(const Mat& m, const Range& rowRange, const Range& colRange) : size(&row
         *this = m(rs);
         return;
     }
-    
+
     *this = m;
     if( rowRange != Range::all() && rowRange != Range(0,rows) )
     {
@@ -285,7 +285,7 @@ Mat::Mat(const Mat& m, const Range& rowRange, const Range& colRange) : size(&row
         data += step*rowRange.start;
         flags |= SUBMATRIX_FLAG;
     }
-    
+
     if( colRange != Range::all() && colRange != Range(0,cols) )
     {
         CV_Assert( 0 <= colRange.start && colRange.start <= colRange.end && colRange.end <= m.cols );
@@ -294,18 +294,18 @@ Mat::Mat(const Mat& m, const Range& rowRange, const Range& colRange) : size(&row
         flags &= cols < m.cols ? ~CONTINUOUS_FLAG : -1;
         flags |= SUBMATRIX_FLAG;
     }
-    
+
     if( rows == 1 )
         flags |= CONTINUOUS_FLAG;
-    
+
     if( rows <= 0 || cols <= 0 )
     {
         release();
         rows = cols = 0;
     }
 }
-    
+
+
 Mat::Mat(const Mat& m, const Rect& roi)
     : flags(m.flags), dims(2), rows(roi.height), cols(roi.width),
     data(m.data + roi.y*m.step[0]), refcount(m.refcount),
@@ -315,7 +315,7 @@ Mat::Mat(const Mat& m, const Rect& roi)
     CV_Assert( m.dims <= 2 );
     flags &= roi.width < m.cols ? ~CONTINUOUS_FLAG : -1;
     flags |= roi.height == 1 ? CONTINUOUS_FLAG : 0;
-    
+
     size_t esz = CV_ELEM_SIZE(flags);
     data += roi.x*esz;
     CV_Assert( 0 <= roi.x && 0 <= roi.width && roi.x + roi.width <= m.cols &&
@@ -324,9 +324,9 @@ Mat::Mat(const Mat& m, const Rect& roi)
         CV_XADD(refcount, 1);
     if( roi.width < m.cols || roi.height < m.rows )
         flags |= SUBMATRIX_FLAG;
-    
+
     step[0] = m.step[0]; step[1] = esz;
-    
+
     if( rows <= 0 || cols <= 0 )
     {
         release();
@@ -334,7 +334,7 @@ Mat::Mat(const Mat& m, const Rect& roi)
     }
 }
 
-    
+
 Mat::Mat(int _dims, const int* _sizes, int _type, void* _data, const size_t* _steps) : size(&rows)
 {
     initEmpty();
@@ -343,13 +343,13 @@ Mat::Mat(int _dims, const int* _sizes, int _type, void* _data, const size_t* _st
     setSize(*this, _dims, _sizes, _steps, true);
     finalizeHdr(*this);
 }
-    
-    
+
+
 Mat::Mat(const Mat& m, const Range* ranges) : size(&rows)
 {
     initEmpty();
     int i, d = m.dims;
-    
+
     CV_Assert(ranges);
     for( i = 0; i < d; i++ )
     {
@@ -369,8 +369,8 @@ Mat::Mat(const Mat& m, const Range* ranges) : size(&rows)
     }
     updateContinuityFlag(*this);
 }
-    
+
+
 Mat::Mat(const CvMatND* m, bool copyData) : size(&rows)
 {
     initEmpty();
@@ -380,14 +380,14 @@ Mat::Mat(const CvMatND* m, bool copyData) : size(&rows)
     flags |= CV_MAT_TYPE(m->type);
     int _sizes[CV_MAX_DIM];
     size_t _steps[CV_MAX_DIM];
-    
+
     int i, d = m->dims;
     for( i = 0; i < d; i++ )
     {
         _sizes[i] = m->dim[i].size;
         _steps[i] = m->dim[i].step;
     }
-    
+
     setSize(*this, d, _sizes, _steps);
     finalizeHdr(*this);
 
@@ -397,15 +397,15 @@ Mat::Mat(const CvMatND* m, bool copyData) : size(&rows)
         temp.copyTo(*this);
     }
 }
-    
-    
+
+
 Mat Mat::diag(int d) const
 {
     CV_Assert( dims <= 2 );
     Mat m = *this;
     size_t esz = elemSize();
     int len;
-    
+
     if( d >= 0 )
     {
         len = std::min(cols - d, rows);
@@ -417,30 +417,30 @@ Mat Mat::diag(int d) const
         m.data -= step[0]*d;
     }
     CV_DbgAssert( len > 0 );
-    
+
     m.size[0] = m.rows = len;
     m.size[1] = m.cols = 1;
     m.step[0] += (len > 1 ? esz : 0);
-    
+
     if( m.rows > 1 )
         m.flags &= ~CONTINUOUS_FLAG;
     else
         m.flags |= CONTINUOUS_FLAG;
-    
+
     if( size() != Size(1,1) )
         m.flags |= SUBMATRIX_FLAG;
-    
+
     return m;
 }
 
-    
+
 Mat::Mat(const CvMat* m, bool copyData) : size(&rows)
 {
     initEmpty();
-    
+
     if( !m )
         return;
-    
+
     if( !copyData )
     {
         flags = MAGIC_VAL + (m->type & (CV_MAT_TYPE_MASK|CV_MAT_CONT_FLAG));
@@ -462,17 +462,17 @@ Mat::Mat(const CvMat* m, bool copyData) : size(&rows)
     }
 }
 
-    
+
 Mat::Mat(const IplImage* img, bool copyData) : size(&rows)
 {
     initEmpty();
-    
+
     if( !img )
         return;
-    
+
     dims = 2;
     CV_DbgAssert(CV_IS_IMAGE(img) && img->imageData != 0);
-    
+
     int depth = IPL2CV_DEPTH(img->depth);
     size_t esz;
     step[0] = img->widthStep;
@@ -493,8 +493,8 @@ Mat::Mat(const IplImage* img, bool copyData) : size(&rows)
         rows = img->roi->height; cols = img->roi->width;
         esz = CV_ELEM_SIZE(flags);
         data = datastart = (uchar*)img->imageData +
-                       (selectedPlane ? (img->roi->coi - 1)*step*img->height : 0) +
-                       img->roi->yOffset*step[0] + img->roi->xOffset*esz;        
+            (selectedPlane ? (img->roi->coi - 1)*step*img->height : 0) +
+            img->roi->yOffset*step[0] + img->roi->xOffset*esz;
     }
     datalimit = datastart + step.p[0]*rows;
     dataend = datastart + step.p[0]*(rows-1) + esz*cols;
@@ -517,7 +517,7 @@ Mat::Mat(const IplImage* img, bool copyData) : size(&rows)
     }
 }
 
-    
+
 Mat::operator IplImage() const
 {
     CV_Assert( dims <= 2 );
@@ -527,11 +527,11 @@ Mat::operator IplImage() const
     return img;
 }
 
-    
+
 void Mat::pop_back(size_t nelems)
 {
     CV_Assert( nelems <= (size_t)size.p[0] );
-    
+
     if( isSubmatrix() )
         *this = rowRange(0, size.p[0] - (int)nelems);
     else
@@ -547,14 +547,14 @@ void Mat::pop_back(size_t nelems)
         }*/
     }
 }
-    
-    
+
+
 void Mat::push_back_(const void* elem)
 {
     int r = size.p[0];
     if( isSubmatrix() || dataend + step.p[0] > datalimit )
         reserve( std::max(r + 1, (r*3+1)/2) );
-    
+
     size_t esz = elemSize();
     memcpy(data + r*step.p[0], elem, esz);
     size.p[0] = r + 1;
@@ -566,22 +566,22 @@ void Mat::push_back_(const void* elem)
 void Mat::reserve(size_t nelems)
 {
     const size_t MIN_SIZE = 64;
-    
+
     CV_Assert( (int)nelems >= 0 );
     if( !isSubmatrix() && data + step.p[0]*nelems <= datalimit )
         return;
-    
+
     int r = size.p[0];
-    
+
     if( (size_t)r >= nelems )
         return;
-    
+
     size.p[0] = std::max((int)nelems, 1);
     size_t newsize = total()*elemSize();
-    
+
     if( newsize < MIN_SIZE )
         size.p[0] = (int)((MIN_SIZE + newsize - 1)*nelems/newsize);
-    
+
     Mat m(dims, size.p, type());
     size.p[0] = r;
     if( r > 0 )
@@ -589,42 +589,42 @@ void Mat::reserve(size_t nelems)
         Mat mpart = m.rowRange(0, r);
         copyTo(mpart);
     }
-    
+
     *this = m;
     size.p[0] = r;
     dataend = data + step.p[0]*r;
 }
 
-    
+
 void Mat::resize(size_t nelems)
 {
     int saveRows = size.p[0];
     if( saveRows == (int)nelems )
         return;
     CV_Assert( (int)nelems >= 0 );
-    
+
     if( isSubmatrix() || data + step.p[0]*nelems > datalimit )
         reserve(nelems);
-    
+
     size.p[0] = (int)nelems;
     dataend += (size.p[0] - saveRows)*step.p[0];
-    
+
     //updateContinuityFlag(*this);
-}    
+}
+
 
-    
 void Mat::resize(size_t nelems, const Scalar& s)
 {
     int saveRows = size.p[0];
     resize(nelems);
-    
+
     if( size.p[0] > saveRows )
     {
         Mat part = rowRange(saveRows, size.p[0]);
         part = s;
     }
-}    
-    
+}
+
 void Mat::push_back(const Mat& elems)
 {
     int r = size.p[0], delta = elems.size.p[0];
@@ -636,11 +636,11 @@ void Mat::push_back(const Mat& elems)
         push_back(tmp);
         return;
     }
-       if( !data )
-       {
-               *this = elems.clone();
-               return;
-       }
+    if( !data )
+    {
+        *this = elems.clone();
+        return;
+    }
 
     size.p[0] = elems.size.p[0];
     bool eq = size == elems.size;
@@ -649,15 +649,15 @@ void Mat::push_back(const Mat& elems)
         CV_Error(CV_StsUnmatchedSizes, "");
     if( type() != elems.type() )
         CV_Error(CV_StsUnmatchedFormats, "");
-    
+
     if( isSubmatrix() || dataend + step.p[0]*delta > datalimit )
         reserve( std::max(r + delta, (r*3+1)/2) );
-    
+
     size.p[0] += delta;
     dataend += step.p[0]*delta;
-    
+
     //updateContinuityFlag(*this);
-    
+
     if( isContinuous() && elems.isContinuous() )
         memcpy(data + r*step.p[0], elems.data, elems.total()*elems.elemSize());
     else
@@ -667,7 +667,7 @@ void Mat::push_back(const Mat& elems)
     }
 }
 
-    
+
 Mat cvarrToMat(const CvArr* arr, bool copyData,
                bool /*allowND*/, int coiMode)
 {
@@ -703,7 +703,7 @@ void Mat::locateROI( Size& wholeSize, Point& ofs ) const
     CV_Assert( dims <= 2 && step[0] > 0 );
     size_t esz = elemSize(), minstep;
     ptrdiff_t delta1 = data - datastart, delta2 = dataend - datastart;
-    
+
     if( delta1 == 0 )
         ofs.x = ofs.y = 0;
     else
@@ -735,17 +735,17 @@ Mat& Mat::adjustROI( int dtop, int dbottom, int dleft, int dright )
     else
         flags &= ~CONTINUOUS_FLAG;
     return *this;
-}    
+}
 
 }
-    
+
 void cv::extractImageCOI(const CvArr* arr, OutputArray _ch, int coi)
 {
     Mat mat = cvarrToMat(arr, false, true, 1);
     _ch.create(mat.dims, mat.size, mat.depth());
     Mat ch = _ch.getMat();
     if(coi < 0)
-    { 
+    {
         CV_Assert( CV_IS_IMAGE(arr) );
         coi = cvGetImageCOI((const IplImage*)arr)-1;
     }
@@ -753,12 +753,12 @@ void cv::extractImageCOI(const CvArr* arr, OutputArray _ch, int coi)
     int _pairs[] = { coi, 0 };
     mixChannels( &mat, 1, &ch, 1, _pairs, 1 );
 }
-    
+
 void cv::insertImageCOI(InputArray _ch, CvArr* arr, int coi)
 {
     Mat ch = _ch.getMat(), mat = cvarrToMat(arr, false, true, 1);
     if(coi < 0)
-    { 
+    {
         CV_Assert( CV_IS_IMAGE(arr) );
         coi = cvGetImageCOI((const IplImage*)arr)-1;
     }
@@ -766,7 +766,7 @@ void cv::insertImageCOI(InputArray _ch, CvArr* arr, int coi)
     int _pairs[] = { 0, coi };
     mixChannels( &ch, 1, &mat, 1, _pairs, 1 );
 }
-    
+
 namespace cv
 {
 
@@ -774,7 +774,7 @@ Mat Mat::reshape(int new_cn, int new_rows) const
 {
     int cn = channels();
     Mat hdr = *this;
-    
+
     if( dims > 2 && new_rows == 0 && new_cn != 0 && size[dims-1]*cn % new_cn == 0 )
     {
         hdr.flags = (hdr.flags & ~CV_MAT_CN_MASK) | ((new_cn-1) << CV_CN_SHIFT);
@@ -782,9 +782,9 @@ Mat Mat::reshape(int new_cn, int new_rows) const
         hdr.size[dims-1] = hdr.size[dims-1]*cn / new_cn;
         return hdr;
     }
-    
+
     CV_Assert( dims <= 2 );
-    
+
     if( new_cn == 0 )
         new_cn = cn;
 
@@ -825,7 +825,7 @@ Mat Mat::reshape(int new_cn, int new_rows) const
     return hdr;
 }
 
-    
+
 int Mat::checkVector(int _elemChannels, int _depth, bool _requireContinuous) const
 {
     return (depth() == _depth || _depth <= 0) &&
@@ -911,12 +911,13 @@ void scalarToRawData(const Scalar& s, void* _buf, int type, int unroll_to)
     }
 }
 
-    
+
 /*************************************************************************************************\
                                         Input/Output Array
 \*************************************************************************************************/
 
 _InputArray::_InputArray() : flags(0), obj(0) {}
+_InputArray::~_InputArray() {}
 _InputArray::_InputArray(const Mat& m) : flags(MAT), obj((void*)&m) {}
 _InputArray::_InputArray(const vector<Mat>& vec) : flags(STD_VECTOR_MAT), obj((void*)&vec) {}
 _InputArray::_InputArray(const double& val) : flags(FIXED_TYPE + FIXED_SIZE + MATX + CV_64F), obj((void*)&val), sz(Size(1,1)) {}
@@ -924,11 +925,11 @@ _InputArray::_InputArray(const MatExpr& expr) : flags(FIXED_TYPE + FIXED_SIZE +
 _InputArray::_InputArray(const GlBuffer& buf) : flags(FIXED_TYPE + FIXED_SIZE + OPENGL_BUFFER), obj((void*)&buf) {}
 _InputArray::_InputArray(const GlTexture& tex) : flags(FIXED_TYPE + FIXED_SIZE + OPENGL_TEXTURE), obj((void*)&tex) {}
 _InputArray::_InputArray(const gpu::GpuMat& d_mat) : flags(GPU_MAT), obj((void*)&d_mat) {}
+
 Mat _InputArray::getMat(int i) const
 {
     int k = kind();
-    
+
     if( k == MAT )
     {
         const Mat* m = (const Mat*)obj;
@@ -936,115 +937,115 @@ Mat _InputArray::getMat(int i) const
             return *m;
         return m->row(i);
     }
-    
+
     if( k == EXPR )
     {
         CV_Assert( i < 0 );
         return (Mat)*((const MatExpr*)obj);
     }
-    
+
     if( k == MATX )
     {
         CV_Assert( i < 0 );
         return Mat(sz, flags, obj);
     }
-    
+
     if( k == STD_VECTOR )
     {
         CV_Assert( i < 0 );
         int t = CV_MAT_TYPE(flags);
         const vector<uchar>& v = *(const vector<uchar>*)obj;
-        
+
         return !v.empty() ? Mat(size(), t, (void*)&v[0]) : Mat();
     }
-    
+
     if( k == NONE )
         return Mat();
-    
+
     if( k == STD_VECTOR_VECTOR )
     {
         int t = type(i);
         const vector<vector<uchar> >& vv = *(const vector<vector<uchar> >*)obj;
         CV_Assert( 0 <= i && i < (int)vv.size() );
         const vector<uchar>& v = vv[i];
-        
+
         return !v.empty() ? Mat(size(i), t, (void*)&v[0]) : Mat();
     }
-    
+
     CV_Assert( k == STD_VECTOR_MAT );
     //if( k == STD_VECTOR_MAT )
     {
         const vector<Mat>& v = *(const vector<Mat>*)obj;
         CV_Assert( 0 <= i && i < (int)v.size() );
-        
+
         return v[i];
-    }        
+    }
 }
-    
-    
+
+
 void _InputArray::getMatVector(vector<Mat>& mv) const
 {
     int k = kind();
-    
+
     if( k == MAT )
     {
         const Mat& m = *(const Mat*)obj;
         int i, n = (int)m.size[0];
         mv.resize(n);
-        
+
         for( i = 0; i < n; i++ )
             mv[i] = m.dims == 2 ? Mat(1, m.cols, m.type(), (void*)m.ptr(i)) :
                 Mat(m.dims-1, &m.size[1], m.type(), (void*)m.ptr(i), &m.step[1]);
         return;
     }
-    
+
     if( k == EXPR )
     {
         Mat m = *(const MatExpr*)obj;
         int i, n = m.size[0];
         mv.resize(n);
-        
+
         for( i = 0; i < n; i++ )
             mv[i] = m.row(i);
         return;
     }
-    
+
     if( k == MATX )
     {
         size_t i, n = sz.height, esz = CV_ELEM_SIZE(flags);
         mv.resize(n);
-        
+
         for( i = 0; i < n; i++ )
             mv[i] = Mat(1, sz.width, CV_MAT_TYPE(flags), (uchar*)obj + esz*sz.width*i);
         return;
     }
-    
+
     if( k == STD_VECTOR )
     {
         const vector<uchar>& v = *(const vector<uchar>*)obj;
-        
+
         size_t i, n = v.size(), esz = CV_ELEM_SIZE(flags);
         int t = CV_MAT_DEPTH(flags), cn = CV_MAT_CN(flags);
         mv.resize(n);
-        
+
         for( i = 0; i < n; i++ )
             mv[i] = Mat(1, cn, t, (void*)(&v[0] + esz*i));
         return;
     }
-    
+
     if( k == NONE )
     {
         mv.clear();
         return;
     }
-    
+
     if( k == STD_VECTOR_VECTOR )
     {
         const vector<vector<uchar> >& vv = *(const vector<vector<uchar> >*)obj;
         int i, n = (int)vv.size();
         int t = CV_MAT_TYPE(flags);
         mv.resize(n);
-        
+
         for( i = 0; i < n; i++ )
         {
             const vector<uchar>& v = vv[i];
@@ -1052,7 +1053,7 @@ void _InputArray::getMatVector(vector<Mat>& mv) const
         }
         return;
     }
-    
+
     CV_Assert( k == STD_VECTOR_MAT );
     //if( k == STD_VECTOR_MAT )
     {
@@ -1098,34 +1099,34 @@ gpu::GpuMat _InputArray::getGpuMat() const
         return *d_mat;
     }
 }
-    
+
 int _InputArray::kind() const
 {
     return flags & KIND_MASK;
 }
-    
+
 Size _InputArray::size(int i) const
 {
     int k = kind();
-    
+
     if( k == MAT )
     {
         CV_Assert( i < 0 );
         return ((const Mat*)obj)->size();
     }
-    
+
     if( k == EXPR )
     {
         CV_Assert( i < 0 );
         return ((const MatExpr*)obj)->size();
     }
-    
+
     if( k == MATX )
     {
         CV_Assert( i < 0 );
         return sz;
     }
-    
+
     if( k == STD_VECTOR )
     {
         CV_Assert( i < 0 );
@@ -1134,10 +1135,10 @@ Size _InputArray::size(int i) const
         size_t szb = v.size(), szi = iv.size();
         return szb == szi ? Size((int)szb, 1) : Size((int)(szb/CV_ELEM_SIZE(flags)), 1);
     }
-    
+
     if( k == NONE )
         return Size();
-    
+
     if( k == STD_VECTOR_VECTOR )
     {
         const vector<vector<uchar> >& vv = *(const vector<vector<uchar> >*)obj;
@@ -1145,18 +1146,18 @@ Size _InputArray::size(int i) const
             return vv.empty() ? Size() : Size((int)vv.size(), 1);
         CV_Assert( i < (int)vv.size() );
         const vector<vector<int> >& ivv = *(const vector<vector<int> >*)obj;
-        
+
         size_t szb = vv[i].size(), szi = ivv[i].size();
         return szb == szi ? Size((int)szb, 1) : Size((int)(szb/CV_ELEM_SIZE(flags)), 1);
     }
-    
+
     if( k == STD_VECTOR_MAT )
     {
         const vector<Mat>& vv = *(const vector<Mat>*)obj;
         if( i < 0 )
             return vv.empty() ? Size() : Size((int)vv.size(), 1);
         CV_Assert( i < (int)vv.size() );
-        
+
         return vv[i].size();
     }
 
@@ -1187,106 +1188,107 @@ size_t _InputArray::total(int i) const
 {
     return size(i).area();
 }
-    
+
 int _InputArray::type(int i) const
 {
     int k = kind();
-    
+
     if( k == MAT )
         return ((const Mat*)obj)->type();
-    
+
     if( k == EXPR )
         return ((const MatExpr*)obj)->type();
-    
+
     if( k == MATX || k == STD_VECTOR || k == STD_VECTOR_VECTOR )
         return CV_MAT_TYPE(flags);
-    
+
     if( k == NONE )
         return -1;
-    
+
     if( k == STD_VECTOR_MAT )
     {
         const vector<Mat>& vv = *(const vector<Mat>*)obj;
         CV_Assert( i < (int)vv.size() );
-        
+
         return vv[i >= 0 ? i : 0].type();
     }
-    
+
     if( k == OPENGL_BUFFER )
         return ((const GlBuffer*)obj)->type();
-    
+
     if( k == OPENGL_TEXTURE )
         return ((const GlTexture*)obj)->type();
-    
+
     CV_Assert( k == GPU_MAT );
     //if( k == GPU_MAT )
         return ((const gpu::GpuMat*)obj)->type();
 }
-    
+
 int _InputArray::depth(int i) const
 {
     return CV_MAT_DEPTH(type(i));
 }
-    
+
 int _InputArray::channels(int i) const
 {
     return CV_MAT_CN(type(i));
 }
-    
+
 bool _InputArray::empty() const
 {
     int k = kind();
-    
+
     if( k == MAT )
         return ((const Mat*)obj)->empty();
-    
+
     if( k == EXPR )
         return false;
-    
+
     if( k == MATX )
         return false;
-    
+
     if( k == STD_VECTOR )
     {
         const vector<uchar>& v = *(const vector<uchar>*)obj;
         return v.empty();
     }
-    
+
     if( k == NONE )
         return true;
-    
+
     if( k == STD_VECTOR_VECTOR )
     {
         const vector<vector<uchar> >& vv = *(const vector<vector<uchar> >*)obj;
         return vv.empty();
     }
-    
+
     if( k == STD_VECTOR_MAT )
     {
         const vector<Mat>& vv = *(const vector<Mat>*)obj;
         return vv.empty();
     }
-    
+
     if( k == OPENGL_BUFFER )
         return ((const GlBuffer*)obj)->empty();
-    
+
     if( k == OPENGL_TEXTURE )
         return ((const GlTexture*)obj)->empty();
-    
+
     CV_Assert( k == GPU_MAT );
     //if( k == GPU_MAT )
         return ((const gpu::GpuMat*)obj)->empty();
 }
-    
-    
+
+
 _OutputArray::_OutputArray() {}
+_OutputArray::~_OutputArray() {}
 _OutputArray::_OutputArray(Mat& m) : _InputArray(m) {}
 _OutputArray::_OutputArray(vector<Mat>& vec) : _InputArray(vec) {}
 
 _OutputArray::_OutputArray(const Mat& m) : _InputArray(m) {flags |= FIXED_SIZE|FIXED_TYPE;}
 _OutputArray::_OutputArray(const vector<Mat>& vec) : _InputArray(vec) {flags |= FIXED_SIZE;}
 
-    
+
 bool _OutputArray::fixedSize() const
 {
     return (flags & FIXED_SIZE) == FIXED_SIZE;
@@ -1296,7 +1298,7 @@ bool _OutputArray::fixedType() const
 {
     return (flags & FIXED_TYPE) == FIXED_TYPE;
 }
-    
+
 void _OutputArray::create(Size _sz, int type, int i, bool allowTransposed, int fixedDepthMask) const
 {
     int k = kind();
@@ -1324,12 +1326,12 @@ void _OutputArray::create(int rows, int cols, int type, int i, bool allowTranspo
     int sz[] = {rows, cols};
     create(2, sz, type, i, allowTransposed, fixedDepthMask);
 }
-    
+
 void _OutputArray::create(int dims, const int* size, int type, int i, bool allowTransposed, int fixedDepthMask) const
 {
     int k = kind();
     type = CV_MAT_TYPE(type);
-    
+
     if( k == MAT )
     {
         CV_Assert( i < 0 );
@@ -1341,7 +1343,7 @@ void _OutputArray::create(int dims, const int* size, int type, int i, bool allow
                 CV_Assert(!fixedType() && !fixedSize());
                 m.release();
             }
-            
+
             if( dims == 2 && m.dims == 2 && m.data &&
                 m.type() == type && m.rows == size[1] && m.cols == size[0] )
                 return;
@@ -1363,7 +1365,7 @@ void _OutputArray::create(int dims, const int* size, int type, int i, bool allow
         m.create(dims, size, type);
         return;
     }
-    
+
     if( k == MATX )
     {
         CV_Assert( i < 0 );
@@ -1373,13 +1375,13 @@ void _OutputArray::create(int dims, const int* size, int type, int i, bool allow
                                  (allowTransposed && size[0] == sz.width && size[1] == sz.height)));
         return;
     }
-    
+
     if( k == STD_VECTOR || k == STD_VECTOR_VECTOR )
     {
         CV_Assert( dims == 2 && (size[0] == 1 || size[1] == 1 || size[0]*size[1] == 0) );
         size_t len = size[0]*size[1] > 0 ? size[0] + size[1] - 1 : 0;
         vector<uchar>* v = (vector<uchar>*)obj;
-        
+
         if( k == STD_VECTOR_VECTOR )
         {
             vector<vector<uchar> >& vv = *(vector<vector<uchar> >*)obj;
@@ -1394,10 +1396,10 @@ void _OutputArray::create(int dims, const int* size, int type, int i, bool allow
         }
         else
             CV_Assert( i < 0 );
-        
+
         int type0 = CV_MAT_TYPE(flags);
         CV_Assert( type == type0 || (CV_MAT_CN(type) == CV_MAT_CN(type0) && ((1 << type0) & fixedDepthMask) != 0) );
-        
+
         int esz = CV_ELEM_SIZE(type0);
         CV_Assert(!fixedSize() || len == ((vector<uchar>*)v)->size() / esz);
         switch( esz )
@@ -1455,23 +1457,23 @@ void _OutputArray::create(int dims, const int* size, int type, int i, bool allow
         }
         return;
     }
-    
+
     if( k == NONE )
     {
-        CV_Error(CV_StsNullPtr, "create() called for the missing output array" ); 
+        CV_Error(CV_StsNullPtr, "create() called for the missing output array" );
         return;
     }
-    
+
     CV_Assert( k == STD_VECTOR_MAT );
     //if( k == STD_VECTOR_MAT )
     {
         vector<Mat>& v = *(vector<Mat>*)obj;
-        
+
         if( i < 0 )
         {
             CV_Assert( dims == 2 && (size[0] == 1 || size[1] == 1 || size[0]*size[1] == 0) );
             size_t len = size[0]*size[1] > 0 ? size[0] + size[1] - 1 : 0, len0 = v.size();
-            
+
             CV_Assert(!fixedSize() || len == len0);
             v.resize(len);
             if( fixedType() )
@@ -1487,10 +1489,10 @@ void _OutputArray::create(int dims, const int* size, int type, int i, bool allow
             }
             return;
         }
-        
+
         CV_Assert( i < (int)v.size() );
         Mat& m = v[i];
-        
+
         if( allowTransposed )
         {
             if( !m.isContinuous() )
@@ -1498,7 +1500,7 @@ void _OutputArray::create(int dims, const int* size, int type, int i, bool allow
                 CV_Assert(!fixedType() && !fixedSize());
                 m.release();
             }
-            
+
             if( dims == 2 && m.dims == 2 && m.data &&
                 m.type() == type && m.rows == size[1] && m.cols == size[0] )
                 return;
@@ -1521,55 +1523,55 @@ void _OutputArray::create(int dims, const int* size, int type, int i, bool allow
         m.create(dims, size, type);
     }
 }
-    
+
 void _OutputArray::release() const
 {
     CV_Assert(!fixedSize());
 
     int k = kind();
-    
+
     if( k == MAT )
     {
         ((Mat*)obj)->release();
         return;
     }
-    
+
     if( k == NONE )
         return;
-    
+
     if( k == STD_VECTOR )
     {
         create(Size(), CV_MAT_TYPE(flags));
         return;
     }
-    
+
     if( k == STD_VECTOR_VECTOR )
     {
         ((vector<vector<uchar> >*)obj)->clear();
         return;
     }
-    
+
     CV_Assert( k == STD_VECTOR_MAT );
     //if( k == STD_VECTOR_MAT )
     {
         ((vector<Mat>*)obj)->clear();
-    }    
+    }
 }
 
 void _OutputArray::clear() const
 {
     int k = kind();
-    
+
     if( k == MAT )
     {
         CV_Assert(!fixedSize());
         ((Mat*)obj)->resize(0);
         return;
     }
-    
+
     release();
 }
-    
+
 bool _OutputArray::needed() const
 {
     return kind() != NONE;
@@ -1594,7 +1596,7 @@ Mat& _OutputArray::getMatRef(int i) const
 
 static _OutputArray _none;
 OutputArray noArray() { return _none; }
-    
+
 }
 
 /*************************************************************************************************\
@@ -1608,7 +1610,7 @@ void cv::hconcat(const Mat* src, size_t nsrc, OutputArray _dst)
         _dst.release();
         return;
     }
-    
+
     int totalCols = 0, cols = 0;
     size_t i;
     for( i = 0; i < nsrc; i++ )
@@ -1627,13 +1629,13 @@ void cv::hconcat(const Mat* src, size_t nsrc, OutputArray _dst)
         cols += src[i].cols;
     }
 }
-    
+
 void cv::hconcat(InputArray src1, InputArray src2, OutputArray dst)
 {
     Mat src[] = {src1.getMat(), src2.getMat()};
     hconcat(src, 2, dst);
 }
-    
+
 void cv::hconcat(InputArray _src, OutputArray dst)
 {
     vector<Mat> src;
@@ -1648,7 +1650,7 @@ void cv::vconcat(const Mat* src, size_t nsrc, OutputArray _dst)
         _dst.release();
         return;
     }
-    
+
     int totalRows = 0, rows = 0;
     size_t i;
     for( i = 0; i < nsrc; i++ )
@@ -1667,12 +1669,12 @@ void cv::vconcat(const Mat* src, size_t nsrc, OutputArray _dst)
         rows += src[i].rows;
     }
 }
-    
+
 void cv::vconcat(InputArray src1, InputArray src2, OutputArray dst)
 {
     Mat src[] = {src1.getMat(), src2.getMat()};
     vconcat(src, 2, dst);
-}        
+}
 
 void cv::vconcat(InputArray _src, OutputArray dst)
 {
@@ -1680,14 +1682,14 @@ void cv::vconcat(InputArray _src, OutputArray dst)
     _src.getMatVector(src);
     vconcat(!src.empty() ? &src[0] : 0, src.size(), dst);
 }
-    
+
 //////////////////////////////////////// set identity ////////////////////////////////////////////
 void cv::setIdentity( InputOutputArray _m, const Scalar& s )
 {
     Mat m = _m.getMat();
     CV_Assert( m.dims <= 2 );
     int i, j, rows = m.rows, cols = m.cols, type = m.type();
-    
+
     if( type == CV_32FC1 )
     {
         float* data = (float*)m.data;
@@ -1721,15 +1723,15 @@ void cv::setIdentity( InputOutputArray _m, const Scalar& s )
     }
 }
 
-//////////////////////////////////////////// trace ///////////////////////////////////////////    
-    
+//////////////////////////////////////////// trace ///////////////////////////////////////////
+
 cv::Scalar cv::trace( InputArray _m )
 {
     Mat m = _m.getMat();
     CV_Assert( m.dims <= 2 );
     int i, type = m.type();
     int nm = std::min(m.rows, m.cols);
-    
+
     if( type == CV_32FC1 )
     {
         const float* ptr = (const float*)m.data;
@@ -1739,7 +1741,7 @@ cv::Scalar cv::trace( InputArray _m )
             _s += ptr[i*step];
         return _s;
     }
-    
+
     if( type == CV_64FC1 )
     {
         const double* ptr = (const double*)m.data;
@@ -1749,7 +1751,7 @@ cv::Scalar cv::trace( InputArray _m )
             _s += ptr[i*step];
         return _s;
     }
-    
+
     return cv::sum(m.diag());
 }
 
@@ -1763,27 +1765,27 @@ transpose_( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size sz )
 {
     int i=0, j, m = sz.width, n = sz.height;
 
-       #if CV_ENABLE_UNROLLED
+    #if CV_ENABLE_UNROLLED
     for(; i <= m - 4; i += 4 )
     {
         T* d0 = (T*)(dst + dstep*i);
         T* d1 = (T*)(dst + dstep*(i+1));
         T* d2 = (T*)(dst + dstep*(i+2));
         T* d3 = (T*)(dst + dstep*(i+3));
-        
+
         for( j = 0; j <= n - 4; j += 4 )
         {
             const T* s0 = (const T*)(src + i*sizeof(T) + sstep*j);
             const T* s1 = (const T*)(src + i*sizeof(T) + sstep*(j+1));
             const T* s2 = (const T*)(src + i*sizeof(T) + sstep*(j+2));
             const T* s3 = (const T*)(src + i*sizeof(T) + sstep*(j+3));
-            
+
             d0[j] = s0[0]; d0[j+1] = s1[0]; d0[j+2] = s2[0]; d0[j+3] = s3[0];
             d1[j] = s0[1]; d1[j+1] = s1[1]; d1[j+2] = s2[1]; d1[j+3] = s3[1];
             d2[j] = s0[2]; d2[j+1] = s1[2]; d2[j+2] = s2[2]; d2[j+3] = s3[2];
             d3[j] = s0[3]; d3[j+1] = s1[3]; d3[j+2] = s2[3]; d3[j+3] = s3[3];
         }
-        
+
         for( ; j < n; j++ )
         {
             const T* s0 = (const T*)(src + i*sizeof(T) + j*sstep);
@@ -1795,14 +1797,14 @@ transpose_( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size sz )
     {
         T* d0 = (T*)(dst + dstep*i);
         j = 0;
-               #if CV_ENABLE_UNROLLED
+        #if CV_ENABLE_UNROLLED
         for(; j <= n - 4; j += 4 )
         {
             const T* s0 = (const T*)(src + i*sizeof(T) + sstep*j);
             const T* s1 = (const T*)(src + i*sizeof(T) + sstep*(j+1));
             const T* s2 = (const T*)(src + i*sizeof(T) + sstep*(j+2));
             const T* s3 = (const T*)(src + i*sizeof(T) + sstep*(j+3));
-            
+
             d0[j] = s0[0]; d0[j+1] = s1[0]; d0[j+2] = s2[0]; d0[j+3] = s3[0];
         }
         #endif
@@ -1826,10 +1828,10 @@ transposeI_( uchar* data, size_t step, int n )
             std::swap( row[j], *(T*)(data1 + step*j) );
     }
 }
-    
+
 typedef void (*TransposeFunc)( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size sz );
 typedef void (*TransposeInplaceFunc)( uchar* data, size_t step, int n );
-    
+
 #define DEF_TRANSPOSE_FUNC(suffix, type) \
 static void transpose_##suffix( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size sz ) \
 { transpose_<type>(src, sstep, dst, dstep, sz); } \
@@ -1863,7 +1865,7 @@ static TransposeInplaceFunc transposeInplaceTab[] =
 };
 
 }
-    
+
 void cv::transpose( InputArray _src, OutputArray _dst )
 {
     Mat src = _src.getMat();
@@ -1872,7 +1874,7 @@ void cv::transpose( InputArray _src, OutputArray _dst )
 
     _dst.create(src.cols, src.rows, src.type());
     Mat dst = _dst.getMat();
-    
+
     if( dst.data == src.data )
     {
         TransposeInplaceFunc func = transposeInplaceTab[esz];
@@ -1892,7 +1894,7 @@ void cv::completeSymm( InputOutputArray _m, bool LtoR )
 {
     Mat m = _m.getMat();
     CV_Assert( m.dims <= 2 );
-    
+
     int i, j, nrows = m.rows, type = m.type();
     int j0 = 0, j1 = nrows;
     CV_Assert( m.rows == m.cols );
@@ -1923,7 +1925,7 @@ void cv::completeSymm( InputOutputArray _m, bool LtoR )
         CV_Error( CV_StsUnsupportedFormat, "" );
 }
 
-    
+
 cv::Mat cv::Mat::cross(InputArray _m) const
 {
     Mat m = _m.getMat();
@@ -1985,7 +1987,7 @@ reduceR_( const Mat& srcmat, Mat& dstmat )
     {
         src += srcstep;
         i = 0;
-               #if CV_ENABLE_UNROLLED
+        #if CV_ENABLE_UNROLLED
         for(; i <= size.width - 4; i += 4 )
         {
             WT s0, s1;
@@ -2044,7 +2046,7 @@ reduceC_( const Mat& srcmat, Mat& dstmat )
               dst[k] = (ST)a0;
             }
         }
-       }
+    }
 }
 
 typedef void (*ReduceFunc)( const Mat& src, Mat& dst );
@@ -2110,7 +2112,7 @@ void cv::reduce(InputArray _src, OutputArray _dst, int dim, int op, int dtype)
     _dst.create(dim == 0 ? 1 : src.rows, dim == 0 ? src.cols : 1,
                 CV_MAKETYPE(dtype >= 0 ? dtype : stype, cn));
     Mat dst = _dst.getMat(), temp = dst;
-    
+
     CV_Assert( op == CV_REDUCE_SUM || op == CV_REDUCE_MAX ||
                op == CV_REDUCE_MIN || op == CV_REDUCE_AVG );
     CV_Assert( src.channels() == dst.channels() );
@@ -2240,8 +2242,8 @@ void cv::reduce(InputArray _src, OutputArray _dst, int dim, int op, int dtype)
     if( op0 == CV_REDUCE_AVG )
         temp.convertTo(dst, dst.type(), 1./(dim == 0 ? src.rows : src.cols));
 }
-       
-    
+
+
 //////////////////////////////////////// sort ///////////////////////////////////////////
 
 namespace cv
@@ -2255,7 +2257,7 @@ template<typename T> static void sort_( const Mat& src, Mat& dst, int flags )
     bool sortRows = (flags & 1) == CV_SORT_EVERY_ROW;
     bool inplace = src.data == dst.data;
     bool sortDescending = (flags & CV_SORT_DESCENDING) != 0;
-    
+
     if( sortRows )
         n = src.rows, len = src.cols;
     else
@@ -2306,7 +2308,7 @@ template<typename T> static void sortIdx_( const Mat& src, Mat& dst, int flags )
     bool sortDescending = (flags & CV_SORT_DESCENDING) != 0;
 
     CV_Assert( src.data != dst.data );
-    
+
     if( sortRows )
         n = src.rows, len = src.cols;
     else
@@ -2348,7 +2350,7 @@ template<typename T> static void sortIdx_( const Mat& src, Mat& dst, int flags )
 typedef void (*SortFunc)(const Mat& src, Mat& dst, int flags);
 
 }
-    
+
 void cv::sort( InputArray _src, OutputArray _dst, int flags )
 {
     static SortFunc tab[] =
@@ -2374,7 +2376,7 @@ void cv::sortIdx( InputArray _src, OutputArray _dst, int flags )
     Mat src = _src.getMat();
     SortFunc func = tab[src.depth()];
     CV_Assert( src.dims <= 2 && src.channels() == 1 && func != 0 );
-    
+
     Mat dst = _dst.getMat();
     if( dst.data == src.data )
         _dst.release();
@@ -2382,8 +2384,8 @@ void cv::sortIdx( InputArray _src, OutputArray _dst, int flags )
     dst = _dst.getMat();
     func( src, dst, flags );
 }
-    
-    
+
+
 ////////////////////////////////////////// kmeans ////////////////////////////////////////////
 
 namespace cv
@@ -2421,7 +2423,7 @@ static void generateCentersPP(const Mat& _data, Mat& _out_centers,
         dist[i] = normL2Sqr_(data + step*i, data + step*centers[0], dims);
         sum0 += dist[i];
     }
-    
+
     for( k = 1; k < K; k++ )
     {
         double bestSum = DBL_MAX;
@@ -2439,7 +2441,7 @@ static void generateCentersPP(const Mat& _data, Mat& _out_centers,
                 tdist2[i] = std::min(normL2Sqr_(data + step*i, data + step*ci, dims), dist[i]);
                 s += tdist2[i];
             }
-            
+
             if( s < bestSum )
             {
                 bestSum = s;
@@ -2462,7 +2464,7 @@ static void generateCentersPP(const Mat& _data, Mat& _out_centers,
 }
 
 }
-    
+
 double cv::kmeans( InputArray _data, int K,
                    InputOutputArray _bestLabels,
                    TermCriteria criteria, int attempts,
@@ -2480,7 +2482,7 @@ double cv::kmeans( InputArray _data, int K,
     CV_Assert( N >= K );
 
     _bestLabels.create(N, 1, CV_32S, -1, true);
-    
+
     Mat _labels, best_labels = _bestLabels.getMat();
     if( flags & CV_KMEANS_USE_INITIAL_LABELS )
     {
@@ -2566,7 +2568,7 @@ double cv::kmeans( InputArray _data, int K,
                     for( i = 0; i < N; i++ )
                         CV_Assert( (unsigned)labels[i] < (unsigned)K );
                 }
-            
+
                 // compute centers
                 centers = Scalar(0);
                 for( k = 0; k < K; k++ )
@@ -2577,8 +2579,8 @@ double cv::kmeans( InputArray _data, int K,
                     sample = data.ptr<float>(i);
                     k = labels[i];
                     float* center = centers.ptr<float>(k);
-                                       j=0;
-                                       #if CV_ENABLE_UNROLLED
+                    j=0;
+                    #if CV_ENABLE_UNROLLED
                     for(; j <= dims - 4; j += 4 )
                     {
                         float t0 = center[j] + sample[j];
@@ -2601,7 +2603,7 @@ double cv::kmeans( InputArray _data, int K,
 
                 if( iter > 0 )
                     max_center_shift = 0;
-                
+
                 for( k = 0; k < K; k++ )
                 {
                     if( counters[k] != 0 )
@@ -2617,8 +2619,8 @@ double cv::kmeans( InputArray _data, int K,
                         if( counters[max_k] < counters[k1] )
                             max_k = k1;
                     }
-                    
-                    double max_dist = 0;                        
+
+                    double max_dist = 0;
                     int farthest_i = -1;
                     float* new_center = centers.ptr<float>(k);
                     float* old_center = centers.ptr<float>(max_k);
@@ -2626,26 +2628,26 @@ double cv::kmeans( InputArray _data, int K,
                     float scale = 1.f/counters[max_k];
                     for( j = 0; j < dims; j++ )
                         _old_center[j] = old_center[j]*scale;
-                    
+
                     for( i = 0; i < N; i++ )
                     {
                         if( labels[i] != max_k )
                             continue;
                         sample = data.ptr<float>(i);
                         double dist = normL2Sqr_(sample, _old_center, dims);
-                            
+
                         if( max_dist <= dist )
                         {
                             max_dist = dist;
                             farthest_i = i;
                         }
                     }
-                    
+
                     counters[max_k]--;
                     counters[k]++;
                     labels[farthest_i] = k;
                     sample = data.ptr<float>(farthest_i);
-                    
+
                     for( j = 0; j < dims; j++ )
                     {
                         old_center[j] -= sample[j];
@@ -2661,7 +2663,7 @@ double cv::kmeans( InputArray _data, int K,
                     float scale = 1.f/counters[k];
                     for( j = 0; j < dims; j++ )
                         center[j] *= scale;
-                    
+
                     if( iter > 0 )
                     {
                         double dist = 0;
@@ -2675,7 +2677,7 @@ double cv::kmeans( InputArray _data, int K,
                     }
                 }
             }
-            
+
             if( ++iter == MAX(criteria.maxCount, 2) || max_center_shift <= criteria.epsilon )
                 break;
 
@@ -2759,7 +2761,7 @@ CV_IMPL void
 cvReduce( const CvArr* srcarr, CvArr* dstarr, int dim, int op )
 {
     cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
-    
+
     if( dim < 0 )
         dim = src.rows > dst.rows ? 0 : src.cols > dst.cols ? 1 : dst.cols == 1;
 
@@ -2769,7 +2771,7 @@ cvReduce( const CvArr* srcarr, CvArr* dstarr, int dim, int op )
     if( (dim == 0 && (dst.cols != src.cols || dst.rows != 1)) ||
         (dim == 1 && (dst.rows != src.rows || dst.cols != 1)) )
         CV_Error( CV_StsBadSize, "The output array size is incorrect" );
-    
+
     if( src.channels() != dst.channels() )
         CV_Error( CV_StsUnmatchedFormats, "Input and output arrays must have the same number of channels" );
 
@@ -2781,14 +2783,14 @@ CV_IMPL CvArr*
 cvRange( CvArr* arr, double start, double end )
 {
     int ok = 0;
-    
+
     CvMat stub, *mat = (CvMat*)arr;
     double delta;
     int type, step;
     double val = start;
     int i, j;
     int rows, cols;
-    
+
     if( !CV_IS_MAT(mat) )
         mat = cvGetMat( mat, &stub);
 
@@ -2844,7 +2846,7 @@ CV_IMPL void
 cvSort( const CvArr* _src, CvArr* _dst, CvArr* _idx, int flags )
 {
     cv::Mat src = cv::cvarrToMat(_src), dst, idx;
-    
+
     if( _idx )
     {
         cv::Mat idx0 = cv::cvarrToMat(_idx), idx = idx0;
@@ -2884,7 +2886,7 @@ cvKMeans2( const CvArr* _samples, int cluster_count, CvArr* _labels,
     CV_Assert( labels.isContinuous() && labels.type() == CV_32S &&
         (labels.cols == 1 || labels.rows == 1) &&
         labels.cols + labels.rows - 1 == data.rows );
-    
+
     double compactness = cv::kmeans(data, cluster_count, labels, termcrit, attempts,
                                     flags, _centers ? cv::_OutputArray(centers) : cv::_OutputArray() );
     if( _compactness )
@@ -2932,26 +2934,26 @@ NAryMatIterator::NAryMatIterator(const Mat** _arrays, Mat* _planes, int _narrays
 : arrays(0), planes(0), ptrs(0), narrays(0), nplanes(0), size(0), iterdepth(0), idx(0)
 {
     init(_arrays, _planes, 0, _narrays);
-}    
-    
+}
+
 NAryMatIterator::NAryMatIterator(const Mat** _arrays, uchar** _ptrs, int _narrays)
     : arrays(0), planes(0), ptrs(0), narrays(0), nplanes(0), size(0), iterdepth(0), idx(0)
 {
     init(_arrays, 0, _ptrs, _narrays);
 }
-    
+
 void NAryMatIterator::init(const Mat** _arrays, Mat* _planes, uchar** _ptrs, int _narrays)
 {
     CV_Assert( _arrays && (_ptrs || _planes) );
     int i, j, d1=0, i0 = -1, d = -1;
-    
+
     arrays = _arrays;
     ptrs = _ptrs;
     planes = _planes;
     narrays = _narrays;
     nplanes = 0;
     size = 0;
-    
+
     if( narrays < 0 )
     {
         for( i = 0; _arrays[i] != 0; i++ )
@@ -2968,15 +2970,15 @@ void NAryMatIterator::init(const Mat** _arrays, Mat* _planes, uchar** _ptrs, int
         const Mat& A = *arrays[i];
         if( ptrs )
             ptrs[i] = A.data;
-        
+
         if( !A.data )
             continue;
-        
+
         if( i0 < 0 )
         {
             i0 = i;
             d = A.dims;
-            
+
             // find the first dimensionality which is different from 1;
             // in any of the arrays the first "d1" step do not affect the continuity
             for( d1 = 0; d1 < d; d1++ )
@@ -3010,16 +3012,16 @@ void NAryMatIterator::init(const Mat** _arrays, Mat* _planes, uchar** _ptrs, int
         iterdepth = j;
         if( iterdepth == d1 )
             iterdepth = 0;
-        
+
         nplanes = 1;
         for( j = iterdepth-1; j >= 0; j-- )
             nplanes *= arrays[i0]->size[j];
     }
     else
         iterdepth = 0;
-    
+
     idx = 0;
-    
+
     if( !planes )
         return;
 
@@ -3027,14 +3029,14 @@ void NAryMatIterator::init(const Mat** _arrays, Mat* _planes, uchar** _ptrs, int
     {
         CV_Assert(arrays[i] != 0);
         const Mat& A = *arrays[i];
-        
+
         if( !A.data )
         {
             planes[i] = Mat();
             continue;
         }
-        
-        planes[i] = Mat(1, (int)size, A.type(), A.data); 
+
+        planes[i] = Mat(1, (int)size, A.type(), A.data);
     }
 }
 
@@ -3044,7 +3046,7 @@ NAryMatIterator& NAryMatIterator::operator ++()
     if( idx >= nplanes-1 )
         return *this;
     ++idx;
-    
+
     if( iterdepth == 1 )
     {
         if( ptrs )
@@ -3087,7 +3089,7 @@ NAryMatIterator& NAryMatIterator::operator ++()
                 planes[i].data = data;
         }
     }
-    
+
     return *this;
 }
 
@@ -3107,7 +3109,7 @@ Point MatConstIterator::pos() const
     if( !m )
         return Point();
     CV_DbgAssert(m->dims <= 2);
-    
+
     ptrdiff_t ofs = ptr - m->data;
     int y = (int)(ofs/m->step[0]);
     return Point((int)((ofs - y*m->step[0])/elemSize), y);
@@ -3147,7 +3149,7 @@ ptrdiff_t MatConstIterator::lpos() const
     }
     return result;
 }
-    
+
 void MatConstIterator::seek(ptrdiff_t ofs, bool relative)
 {
     if( m->isContinuous() )
@@ -3159,7 +3161,7 @@ void MatConstIterator::seek(ptrdiff_t ofs, bool relative)
             ptr = sliceEnd;
         return;
     }
-    
+
     int d = m->dims;
     if( d == 2 )
     {
@@ -3178,20 +3180,20 @@ void MatConstIterator::seek(ptrdiff_t ofs, bool relative)
             sliceStart + (ofs - y*m->cols)*elemSize;
         return;
     }
-    
+
     if( relative )
         ofs += lpos();
-    
+
     if( ofs < 0 )
         ofs = 0;
-    
+
     int szi = m->size[d-1];
     ptrdiff_t t = ofs/szi;
     int v = (int)(ofs - t*szi);
     ofs = t;
     ptr = m->data + v*elemSize;
     sliceStart = m->data;
-    
+
     for( int i = d-2; i >= 0; i-- )
     {
         szi = m->size[i];
@@ -3200,14 +3202,14 @@ void MatConstIterator::seek(ptrdiff_t ofs, bool relative)
         ofs = t;
         sliceStart += v*m->step[i];
     }
-    
+
     sliceEnd = sliceStart + m->size[d-1]*elemSize;
     if( ofs > 0 )
         ptr = sliceEnd;
     else
         ptr = sliceStart + (ptr - m->data);
 }
-    
+
 void MatConstIterator::seek(const int* _idx, bool relative)
 {
     int i, d = m->dims;
@@ -3232,8 +3234,8 @@ ptrdiff_t operator - (const MatConstIterator& b, const MatConstIterator& a)
         return (b.ptr - a.ptr)/b.elemSize;
 
     return b.lpos() - a.lpos();
-}    
-    
+}
+
 //////////////////////////////// SparseMat ////////////////////////////////
 
 template<typename T1, typename T2> void
@@ -3260,7 +3262,7 @@ convertScaleData_(const void* _from, void* _to, int cn, double alpha, double bet
             to[i] = saturate_cast<T2>(from[i]*alpha + beta);
 }
 
-ConvertData getConvertData(int fromType, int toType)
+static ConvertData getConvertData(int fromType, int toType)
 {
     static ConvertData tab[][8] =
     {{ convertData_<uchar, uchar>, convertData_<uchar, schar>,
@@ -3305,7 +3307,7 @@ ConvertData getConvertData(int fromType, int toType)
     return func;
 }
 
-ConvertScaleData getConvertScaleData(int fromType, int toType)
+static ConvertScaleData getConvertScaleData(int fromType, int toType)
 {
     static ConvertScaleData tab[][8] =
     {{ convertScaleData_<uchar, uchar>, convertScaleData_<uchar, schar>,
@@ -3382,7 +3384,7 @@ SparseMat::Hdr::Hdr( int _dims, const int* _sizes, int _type )
         sizeof(int)*std::max(dims - CV_MAX_DIM, 0), CV_ELEM_SIZE1(_type));
     nodeSize = alignSize(valueOffset +
         CV_ELEM_SIZE(_type), (int)sizeof(size_t));
-   
+
     int i;
     for( i = 0; i < dims; i++ )
         size[i] = _sizes[i];
@@ -3420,7 +3422,7 @@ SparseMat::SparseMat(const Mat& m)
             uchar* to = newNode(idx, hash(idx));
             copyElem( ptr, to, esz );
         }
-        
+
         for( i = d - 2; i >= 0; i-- )
         {
             ptr += m.step[i] - m.size[i+1]*m.step[i+1];
@@ -3432,7 +3434,7 @@ SparseMat::SparseMat(const Mat& m)
             break;
     }
 }
-                
+
 SparseMat::SparseMat(const CvSparseMat* m)
 : flags(MAGIC_VAL), hdr(0)
 {
@@ -3525,11 +3527,11 @@ void SparseMat::convertTo( SparseMat& m, int rtype, double alpha ) const
         m = temp;
         return;
     }
-    
+
     CV_Assert(hdr != 0);
     if( hdr != m.hdr )
         m.create( hdr->dims, hdr->size, rtype );
-    
+
     SparseMatConstIterator from = begin();
     size_t i, N = nzcount();
 
@@ -3540,7 +3542,7 @@ void SparseMat::convertTo( SparseMat& m, int rtype, double alpha ) const
         {
             const Node* n = from.node();
             uchar* to = hdr == m.hdr ? from.ptr : m.newNode(n->idx, n->hashval);
-            cvtfunc( from.ptr, to, cn ); 
+            cvtfunc( from.ptr, to, cn );
         }
     }
     else
@@ -3550,7 +3552,7 @@ void SparseMat::convertTo( SparseMat& m, int rtype, double alpha ) const
         {
             const Node* n = from.node();
             uchar* to = hdr == m.hdr ? from.ptr : m.newNode(n->idx, n->hashval);
-            cvtfunc( from.ptr, to, cn, alpha, 0 ); 
+            cvtfunc( from.ptr, to, cn, alpha, 0 );
         }
     }
 }
@@ -3562,7 +3564,7 @@ void SparseMat::convertTo( Mat& m, int rtype, double alpha, double beta ) const
     if( rtype < 0 )
         rtype = type();
     rtype = CV_MAKETYPE(rtype, cn);
-    
+
     CV_Assert( hdr );
     m.create( dims(), hdr->size, rtype );
     m = Scalar(beta);
@@ -3629,7 +3631,7 @@ uchar* SparseMat::ptr(int i0, bool createMissing, size_t* hashval)
             return &value<uchar>(elem);
         nidx = elem->next;
     }
-    
+
     if( createMissing )
     {
         int idx[] = { i0 };
@@ -3637,7 +3639,7 @@ uchar* SparseMat::ptr(int i0, bool createMissing, size_t* hashval)
     }
     return 0;
 }
-    
+
 uchar* SparseMat::ptr(int i0, int i1, bool createMissing, size_t* hashval)
 {
     CV_Assert( hdr && hdr->dims == 2 );
@@ -3810,7 +3812,7 @@ uchar* SparseMat::newNode(const int* idx, size_t hashval)
         resizeHashTab(std::max(hsize*2, (size_t)8));
         hsize = hdr->hashtab.size();
     }
-    
+
     if( !hdr->freeList )
     {
         size_t i, nsz = hdr->nodeSize, psize = hdr->pool.size(),
@@ -3841,7 +3843,7 @@ uchar* SparseMat::newNode(const int* idx, size_t hashval)
         *((double*)p) = 0.;
     else
         memset(p, 0, esz);
-    
+
     return p;
 }
 
@@ -3913,14 +3915,14 @@ SparseMatConstIterator& SparseMatConstIterator::operator ++()
 double norm( const SparseMat& src, int normType )
 {
     SparseMatConstIterator it = src.begin();
-    
+
     size_t i, N = src.nzcount();
     normType &= NORM_TYPE_MASK;
     int type = src.type();
     double result = 0;
-    
+
     CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 );
-    
+
     if( type == CV_32F )
     {
         if( normType == NORM_INF )
@@ -3932,7 +3934,7 @@ double norm( const SparseMat& src, int normType )
         else
             for( i = 0; i < N; i++, ++it )
             {
-                double v = *(const float*)it.ptr; 
+                double v = *(const float*)it.ptr;
                 result += v*v;
             }
     }
@@ -3947,25 +3949,25 @@ double norm( const SparseMat& src, int normType )
         else
             for( i = 0; i < N; i++, ++it )
             {
-                double v = *(const double*)it.ptr; 
+                double v = *(const double*)it.ptr;
                 result += v*v;
             }
     }
     else
         CV_Error( CV_StsUnsupportedFormat, "Only 32f and 64f are supported" );
-    
+
     if( normType == NORM_L2 )
         result = std::sqrt(result);
     return result;
 }
-    
+
 void minMaxLoc( const SparseMat& src, double* _minval, double* _maxval, int* _minidx, int* _maxidx )
 {
     SparseMatConstIterator it = src.begin();
     size_t i, N = src.nzcount(), d = src.hdr ? src.hdr->dims : 0;
     int type = src.type();
     const int *minidx = 0, *maxidx = 0;
-    
+
     if( type == CV_32F )
     {
         float minval = FLT_MAX, maxval = -FLT_MAX;
@@ -4012,7 +4014,7 @@ void minMaxLoc( const SparseMat& src, double* _minval, double* _maxval, int* _mi
     }
     else
         CV_Error( CV_StsUnsupportedFormat, "Only 32f and 64f are supported" );
-    
+
     if( _minidx )
         for( i = 0; i < d; i++ )
             _minidx[i] = minidx[i];
@@ -4021,7 +4023,7 @@ void minMaxLoc( const SparseMat& src, double* _minval, double* _maxval, int* _mi
             _maxidx[i] = maxidx[i];
 }
 
-    
+
 void normalize( const SparseMat& src, SparseMat& dst, double a, int norm_type )
 {
     double scale = 1;
@@ -4032,18 +4034,18 @@ void normalize( const SparseMat& src, SparseMat& dst, double a, int norm_type )
     }
     else
         CV_Error( CV_StsBadArg, "Unknown/unsupported norm type" );
-    
+
     src.convertTo( dst, -1, scale );
 }
 
 ////////////////////// RotatedRect //////////////////////
-    
+
 void RotatedRect::points(Point2f pt[]) const
 {
     double _angle = angle*CV_PI/180.;
     float b = (float)cos(_angle)*0.5f;
     float a = (float)sin(_angle)*0.5f;
-    
+
     pt[0].x = center.x - a*size.height - b*size.width;
     pt[0].y = center.y + b*size.height - a*size.width;
     pt[1].x = center.x + a*size.height - b*size.width;
@@ -4065,8 +4067,8 @@ Rect RotatedRect::boundingRect() const
     r.width -= r.x - 1;
     r.height -= r.y - 1;
     return r;
-}        
-    
 }
+
+}
+
 /* End of file. */
index 366f5cf..6817fca 100644 (file)
@@ -116,13 +116,13 @@ static void writeMat(std::ostream& out, const Mat& m, char rowsep, char elembrac
 {
     CV_Assert(m.dims <= 2);
     int type = m.type();
-    
+
     char crowbrace = getCloseBrace(rowsep);
     char orowbrace = crowbrace ? rowsep : '\0';
-    
+
     if( orowbrace || isspace(rowsep) )
         rowsep = '\0';
-    
+
     for( int i = 0; i < m.rows; i++ )
     {
         if(orowbrace)
@@ -151,7 +151,7 @@ public:
         writeMat(out, m, ';', ' ', m.cols == 1);
         out << "]";
     }
-    
+
     void write(std::ostream& out, const void* data, int nelems, int type, const int*, int) const
     {
         writeElems(out, data, nelems, type, ' ');
@@ -168,7 +168,7 @@ public:
         writeMat(out, m, m.cols > 1 ? '[' : ' ', '[', m.cols*m.channels() == 1);
         out << "]";
     }
-    
+
     void write(std::ostream& out, const void* data, int nelems, int type, const int*, int) const
     {
         writeElems(out, data, nelems, type, '[');
@@ -190,7 +190,7 @@ public:
         writeMat(out, m, m.cols > 1 ? '[' : ' ', '[', m.cols*m.channels() == 1);
         out << "], type='" << numpyTypes[m.depth()] << "')";
     }
-    
+
     void write(std::ostream& out, const void* data, int nelems, int type, const int*, int) const
     {
         writeElems(out, data, nelems, type, '[');
@@ -208,7 +208,7 @@ public:
         if(m.rows > 1)
             out << "\n";
     }
-    
+
     void write(std::ostream& out, const void* data, int nelems, int type, const int*, int) const
     {
         writeElems(out, data, nelems, type, ' ');
@@ -226,7 +226,7 @@ public:
         writeMat(out, m, ',', ' ', m.cols==1);
         out << "}";
     }
-    
+
     void write(std::ostream& out, const void* data, int nelems, int type, const int*, int) const
     {
         writeElems(out, data, nelems, type, ' ');
@@ -243,7 +243,7 @@ static CFormatter cFormatter;
 static const Formatter* g_defaultFormatter0 = &matlabFormatter;
 static const Formatter* g_defaultFormatter = &matlabFormatter;
 
-bool my_streq(const char* a, const char* b)
+static bool my_streq(const char* a, const char* b)
 {
     size_t i, alen = strlen(a), blen = strlen(b);
     if( alen != blen )
@@ -280,7 +280,7 @@ const Formatter* Formatter::setDefault(const Formatter* fmt)
     g_defaultFormatter = fmt;
     return prevFmt;
 }
-    
+
 Formatted::Formatted(const Mat& _m, const Formatter* _fmt,
                      const vector<int>& _params)
 {
@@ -288,12 +288,12 @@ Formatted::Formatted(const Mat& _m, const Formatter* _fmt,
     fmt = _fmt ? _fmt : Formatter::get();
     std::copy(_params.begin(), _params.end(), back_inserter(params));
 }
-    
+
 Formatted::Formatted(const Mat& _m, const Formatter* _fmt, const int* _params)
 {
     mtx = _m;
     fmt = _fmt ? _fmt : Formatter::get();
-    
+
     if( _params )
     {
         int i, maxParams = 100;
index 499c44c..b5b08fb 100644 (file)
@@ -54,7 +54,7 @@ template<typename T> static inline Scalar rawToScalar(const T& v)
     for( i = 0; i < n; i++ )
         s.val[i] = ((T1*)&v)[i];
     return s;
-}    
+}
 
 /****************************************************************************************\
 *                                        sum                                             *
@@ -72,7 +72,7 @@ static int sum_(const T* src0, const uchar* mask, ST* dst, int len, int cn )
         {
             ST s0 = dst[0];
 
-                       #if CV_ENABLE_UNROLLED
+            #if CV_ENABLE_UNROLLED
             for(; i <= len - 4; i += 4, src += cn*4 )
                 s0 += src[0] + src[cn] + src[cn*2] + src[cn*3];
             #endif
@@ -104,7 +104,7 @@ static int sum_(const T* src0, const uchar* mask, ST* dst, int len, int cn )
             dst[1] = s1;
             dst[2] = s2;
         }
-        
+
         for( ; k < cn; k += 4 )
         {
             src = src0 + k;
@@ -121,7 +121,7 @@ static int sum_(const T* src0, const uchar* mask, ST* dst, int len, int cn )
         }
         return len;
     }
-    
+
     int i, nzm = 0;
     if( cn == 1 )
     {
@@ -155,7 +155,7 @@ static int sum_(const T* src0, const uchar* mask, ST* dst, int len, int cn )
             if( mask[i] )
             {
                 int k = 0;
-                               #if CV_ENABLE_UNROLLED
+                #if CV_ENABLE_UNROLLED
                 for( ; k <= cn - 4; k += 4 )
                 {
                     ST s0, s1;
@@ -212,7 +212,7 @@ template<typename T>
 static int countNonZero_(const T* src, int len )
 {
     int i=0, nz = 0;
-       #if CV_ENABLE_UNROLLED
+    #if CV_ENABLE_UNROLLED
     for(; i <= len - 4; i += 4 )
         nz += (src[i] != 0) + (src[i+1] != 0) + (src[i+2] != 0) + (src[i+3] != 0);
     #endif
@@ -251,12 +251,12 @@ template<typename T, typename ST, typename SQT>
 static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int len, int cn )
 {
     const T* src = src0;
-    
+
     if( !mask )
     {
         int i;
         int k = cn % 4;
-        
+
         if( k == 1 )
         {
             ST s0 = sum[0];
@@ -296,7 +296,7 @@ static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int le
             sum[0] = s0; sum[1] = s1; sum[2] = s2;
             sqsum[0] = sq0; sqsum[1] = sq1; sqsum[2] = sq2;
         }
-        
+
         for( ; k < cn; k += 4 )
         {
             src = src0 + k;
@@ -319,7 +319,7 @@ static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int le
         }
         return len;
     }
-    
+
     int i, nzm = 0;
 
     if( cn == 1 )
@@ -368,7 +368,7 @@ static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int le
             }
     }
     return nzm;
-}    
+}
 
 
 static int sqsum8u( const uchar* src, const uchar* mask, int* sum, int* sqsum, int len, int cn )
@@ -407,9 +407,9 @@ cv::Scalar cv::sum( InputArray _src )
     Mat src = _src.getMat();
     int k, cn = src.channels(), depth = src.depth();
     SumFunc func = sumTab[depth];
-    
+
     CV_Assert( cn <= 4 && func != 0 );
-    
+
     const Mat* arrays[] = {&src, 0};
     uchar* ptrs[1];
     NAryMatIterator it(arrays, ptrs);
@@ -420,7 +420,7 @@ cv::Scalar cv::sum( InputArray _src )
     int* buf = (int*)&s[0];
     size_t esz = 0;
     bool blockSum = depth < CV_32S;
-    
+
     if( blockSum )
     {
         intSumBlockSize = depth <= CV_8S ? (1 << 23) : (1 << 15);
@@ -459,30 +459,30 @@ int cv::countNonZero( InputArray _src )
 {
     Mat src = _src.getMat();
     CountNonZeroFunc func = countNonZeroTab[src.depth()];
-    
+
     CV_Assert( src.channels() == 1 && func != 0 );
-    
+
     const Mat* arrays[] = {&src, 0};
     uchar* ptrs[1];
     NAryMatIterator it(arrays, ptrs);
     int total = (int)it.size, nz = 0;
-    
+
     for( size_t i = 0; i < it.nplanes; i++, ++it )
         nz += func( ptrs[0], total );
-    
+
     return nz;
-}    
+}
 
 cv::Scalar cv::mean( InputArray _src, InputArray _mask )
 {
     Mat src = _src.getMat(), mask = _mask.getMat();
     CV_Assert( mask.empty() || mask.type() == CV_8U );
-    
+
     int k, cn = src.channels(), depth = src.depth();
     SumFunc func = sumTab[depth];
-    
+
     CV_Assert( cn <= 4 && func != 0 );
-    
+
     const Mat* arrays[] = {&src, &mask, 0};
     uchar* ptrs[2];
     NAryMatIterator it(arrays, ptrs);
@@ -493,19 +493,19 @@ cv::Scalar cv::mean( InputArray _src, InputArray _mask )
     int* buf = (int*)&s[0];
     bool blockSum = depth <= CV_16S;
     size_t esz = 0, nz0 = 0;
-    
+
     if( blockSum )
     {
         intSumBlockSize = depth <= CV_8S ? (1 << 23) : (1 << 15);
         blockSize = std::min(blockSize, intSumBlockSize);
         _buf.allocate(cn);
         buf = _buf;
-        
+
         for( k = 0; k < cn; k++ )
             buf[k] = 0;
         esz = src.elemSize();
     }
-    
+
     for( size_t i = 0; i < it.nplanes; i++, ++it )
     {
         for( j = 0; j < total; j += blockSize )
@@ -529,19 +529,19 @@ cv::Scalar cv::mean( InputArray _src, InputArray _mask )
         }
     }
     return s*(nz0 ? 1./nz0 : 0);
-}    
+}
 
 
 void cv::meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv, InputArray _mask )
 {
     Mat src = _src.getMat(), mask = _mask.getMat();
     CV_Assert( mask.empty() || mask.type() == CV_8U );
-    
+
     int k, cn = src.channels(), depth = src.depth();
     SumSqrFunc func = sumSqrTab[depth];
-    
+
     CV_Assert( func != 0 );
-    
+
     const Mat* arrays[] = {&src, &mask, 0};
     uchar* ptrs[2];
     NAryMatIterator it(arrays, ptrs);
@@ -552,10 +552,10 @@ void cv::meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv, Input
     int *sbuf = (int*)s, *sqbuf = (int*)sq;
     bool blockSum = depth <= CV_16S, blockSqSum = depth <= CV_8S;
     size_t esz = 0;
-    
+
     for( k = 0; k < cn; k++ )
         s[k] = sq[k] = 0;
-    
+
     if( blockSum )
     {
         intSumBlockSize = 1 << 15;
@@ -567,7 +567,7 @@ void cv::meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv, Input
             sbuf[k] = sqbuf[k] = 0;
         esz = src.elemSize();
     }
-    
+
     for( size_t i = 0; i < it.nplanes; i++, ++it )
     {
         for( j = 0; j < total; j += blockSize )
@@ -598,14 +598,14 @@ void cv::meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv, Input
                 ptrs[1] += bsz;
         }
     }
-    
+
     double scale = nz0 ? 1./nz0 : 0.;
     for( k = 0; k < cn; k++ )
     {
         s[k] *= scale;
         sq[k] = std::sqrt(std::max(sq[k]*scale - s[k]*s[k], 0.));
     }
-    
+
     for( j = 0; j < 2; j++ )
     {
         const double* sptr = j == 0 ? s : sq;
@@ -640,7 +640,7 @@ minMaxIdx_( const T* src, const uchar* mask, WT* _minVal, WT* _maxVal,
 {
     WT minVal = *_minVal, maxVal = *_maxVal;
     size_t minIdx = *_minIdx, maxIdx = *_maxIdx;
-    
+
     if( !mask )
     {
         for( int i = 0; i < len; i++ )
@@ -708,7 +708,7 @@ static void minMaxIdx_32f(const float* src, const uchar* mask, float* minval, fl
 
 static void minMaxIdx_64f(const double* src, const uchar* mask, double* minval, double* maxval,
                           size_t* minidx, size_t* maxidx, int len, size_t startidx )
-{ minMaxIdx_(src, mask, minval, maxval, minidx, maxidx, len, startidx ); }    
+{ minMaxIdx_(src, mask, minval, maxval, minidx, maxidx, len, startidx ); }
 
 typedef void (*MinMaxIdxFunc)(const uchar*, const uchar*, int*, int*, size_t*, size_t*, int, size_t);
 
@@ -749,16 +749,16 @@ void cv::minMaxIdx(InputArray _src, double* minVal,
 {
     Mat src = _src.getMat(), mask = _mask.getMat();
     int depth = src.depth(), cn = src.channels();
-    
+
     CV_Assert( (cn == 1 && (mask.empty() || mask.type() == CV_8U)) ||
                (cn >= 1 && mask.empty() && !minIdx && !maxIdx) );
     MinMaxIdxFunc func = minmaxTab[depth];
     CV_Assert( func != 0 );
-    
+
     const Mat* arrays[] = {&src, &mask, 0};
     uchar* ptrs[2];
     NAryMatIterator it(arrays, ptrs);
-    
+
     size_t minidx = 0, maxidx = 0;
     int iminval = INT_MAX, imaxval = INT_MIN;
     float fminval = FLT_MAX, fmaxval = -FLT_MAX;
@@ -766,39 +766,39 @@ void cv::minMaxIdx(InputArray _src, double* minVal,
     size_t startidx = 1;
     int *minval = &iminval, *maxval = &imaxval;
     int planeSize = (int)it.size*cn;
-    
+
     if( depth == CV_32F )
         minval = (int*)&fminval, maxval = (int*)&fmaxval;
     else if( depth == CV_64F )
         minval = (int*)&dminval, maxval = (int*)&dmaxval;
-    
+
     for( size_t i = 0; i < it.nplanes; i++, ++it, startidx += planeSize )
         func( ptrs[0], ptrs[1], minval, maxval, &minidx, &maxidx, planeSize, startidx );
-    
+
     if( minidx == 0 )
         dminval = dmaxval = 0;
     else if( depth == CV_32F )
         dminval = fminval, dmaxval = fmaxval;
     else if( depth <= CV_32S )
         dminval = iminval, dmaxval = imaxval;
-    
+
     if( minVal )
         *minVal = dminval;
     if( maxVal )
         *maxVal = dmaxval;
-    
+
     if( minIdx )
         ofs2idx(src, minidx, minIdx);
     if( maxIdx )
         ofs2idx(src, maxidx, maxIdx);
-}    
+}
 
 void cv::minMaxLoc( InputArray _img, double* minVal, double* maxVal,
                     Point* minLoc, Point* maxLoc, InputArray mask )
 {
     Mat img = _img.getMat();
     CV_Assert(img.dims <= 2);
-    
+
     minMaxIdx(_img, minVal, maxVal, (int*)minLoc, (int*)maxLoc, mask);
     if( minLoc )
         std::swap(minLoc->x, minLoc->y);
@@ -821,7 +821,7 @@ float normL2Sqr_(const float* a, const float* b, int n)
     {
         float CV_DECL_ALIGNED(16) buf[4];
         __m128 d0 = _mm_setzero_ps(), d1 = _mm_setzero_ps();
-        
+
         for( ; j <= n - 8; j += 8 )
         {
             __m128 t0 = _mm_sub_ps(_mm_loadu_ps(a + j), _mm_loadu_ps(b + j));
@@ -834,14 +834,14 @@ float normL2Sqr_(const float* a, const float* b, int n)
     }
     else
 #endif
-       {
+    {
         for( ; j <= n - 4; j += 4 )
         {
             float t0 = a[j] - b[j], t1 = a[j+1] - b[j+1], t2 = a[j+2] - b[j+2], t3 = a[j+3] - b[j+3];
             d += t0*t0 + t1*t1 + t2*t2 + t3*t3;
         }
     }
+
     for( ; j < n; j++ )
     {
         float t = a[j] - b[j];
@@ -861,7 +861,7 @@ float normL1_(const float* a, const float* b, int n)
         static const int CV_DECL_ALIGNED(16) absbuf[4] = {0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff};
         __m128 d0 = _mm_setzero_ps(), d1 = _mm_setzero_ps();
         __m128 absmask = _mm_load_ps((const float*)absbuf);
-        
+
         for( ; j <= n - 8; j += 8 )
         {
             __m128 t0 = _mm_sub_ps(_mm_loadu_ps(a + j), _mm_loadu_ps(b + j));
@@ -894,12 +894,12 @@ int normL1_(const uchar* a, const uchar* b, int n)
     if( USE_SSE2 )
     {
         __m128i d0 = _mm_setzero_si128();
-        
+
         for( ; j <= n - 16; j += 16 )
         {
             __m128i t0 = _mm_loadu_si128((const __m128i*)(a + j));
             __m128i t1 = _mm_loadu_si128((const __m128i*)(b + j));
-            
+
             d0 = _mm_add_epi32(d0, _mm_sad_epu8(t0, t1));
         }
 
@@ -907,7 +907,7 @@ int normL1_(const uchar* a, const uchar* b, int n)
         {
             __m128i t0 = _mm_cvtsi32_si128(*(const int*)(a + j));
             __m128i t1 = _mm_cvtsi32_si128(*(const int*)(b + j));
-            
+
             d0 = _mm_add_epi32(d0, _mm_sad_epu8(t0, t1));
         }
         d = _mm_cvtsi128_si32(_mm_add_epi32(d0, _mm_unpackhi_epi64(d0, d0)));
@@ -926,7 +926,7 @@ int normL1_(const uchar* a, const uchar* b, int n)
     return d;
 }
 
-static const uchar popCountTable[] = 
+static const uchar popCountTable[] =
 {
     0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
     1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
@@ -962,7 +962,7 @@ static const uchar popCountTable4[] =
     1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
 };
 
-int normHamming(const uchar* a, int n)
+static int normHamming(const uchar* a, int n)
 {
     int i = 0, result = 0;
 #if CV_NEON
@@ -989,7 +989,7 @@ int normHamming(const uchar* a, int n)
         result += popCountTable[a[i]];
     return result;
 }
-    
+
 int normHamming(const uchar* a, const uchar* b, int n)
 {
     int i = 0, result = 0;
@@ -1020,7 +1020,7 @@ int normHamming(const uchar* a, const uchar* b, int n)
     return result;
 }
 
-int normHamming(const uchar* a, int n, int cellSize)
+static int normHamming(const uchar* a, int n, int cellSize)
 {
     if( cellSize == 1 )
         return normHamming(a, n);
@@ -1039,8 +1039,8 @@ int normHamming(const uchar* a, int n, int cellSize)
     for( ; i < n; i++ )
         result += tab[a[i]];
     return result;
-}    
-    
+}
+
 int normHamming(const uchar* a, const uchar* b, int n, int cellSize)
 {
     if( cellSize == 1 )
@@ -1053,7 +1053,7 @@ int normHamming(const uchar* a, const uchar* b, int n, int cellSize)
     else
         CV_Error( CV_StsBadSize, "bad cell size (not 1, 2 or 4) in normHamming" );
     int i = 0, result = 0;
-       #if CV_ENABLE_UNROLLED
+    #if CV_ENABLE_UNROLLED
     for( ; i <= n - 4; i += 4 )
         result += tab[a[i] ^ b[i]] + tab[a[i+1] ^ b[i+1]] +
                 tab[a[i+2] ^ b[i+2]] + tab[a[i+3] ^ b[i+3]];
@@ -1128,7 +1128,7 @@ normL2_(const T* src, const uchar* mask, ST* _result, int len, int cn)
     }
     *_result = result;
     return 0;
-}    
+}
 
 template<typename T, typename ST> int
 normDiffInf_(const T* src1, const T* src2, const uchar* mask, ST* _result, int len, int cn)
@@ -1194,7 +1194,7 @@ normDiffL2_(const T* src1, const T* src2, const uchar* mask, ST* _result, int le
     }
     *_result = result;
     return 0;
-}    
+}
 
 
 #define CV_DEF_NORM_FUNC(L, suffix, type, ntype) \
@@ -1219,7 +1219,7 @@ CV_DEF_NORM_ALL(64f, double, double, double, double)
 
 
 typedef int (*NormFunc)(const uchar*, const uchar*, uchar*, int, int);
-typedef int (*NormDiffFunc)(const uchar*, const uchar*, const uchar*, uchar*, int, int);    
+typedef int (*NormDiffFunc)(const uchar*, const uchar*, const uchar*, uchar*, int, int);
 
 static NormFunc normTab[3][8] =
 {
@@ -1265,11 +1265,11 @@ double cv::norm( InputArray _src, int normType, InputArray _mask )
 {
     Mat src = _src.getMat(), mask = _mask.getMat();
     int depth = src.depth(), cn = src.channels();
-    
+
     normType &= 7;
     CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 || normType == NORM_L2SQR ||
                ((normType == NORM_HAMMING || normType == NORM_HAMMING2) && src.type() == CV_8U) );
-    
+
     if( src.isContinuous() && mask.empty() )
     {
         size_t len = src.total()*cn;
@@ -1278,7 +1278,7 @@ double cv::norm( InputArray _src, int normType, InputArray _mask )
             if( depth == CV_32F )
             {
                 const float* data = src.ptr<float>();
-                
+
                 if( normType == NORM_L2 )
                 {
                     double result = 0;
@@ -1307,18 +1307,18 @@ double cv::norm( InputArray _src, int normType, InputArray _mask )
             if( depth == CV_8U )
             {
                 const uchar* data = src.ptr<uchar>();
-                
+
                 if( normType == NORM_HAMMING )
                     return normHamming(data, (int)len);
-                
+
                 if( normType == NORM_HAMMING2 )
                     return normHamming(data, (int)len, 2);
             }
         }
     }
-    
+
     CV_Assert( mask.empty() || mask.type() == CV_8U );
-    
+
     if( normType == NORM_HAMMING || normType == NORM_HAMMING2 )
     {
         if( !mask.empty() )
@@ -1328,22 +1328,22 @@ double cv::norm( InputArray _src, int normType, InputArray _mask )
             return norm(temp, normType);
         }
         int cellSize = normType == NORM_HAMMING ? 1 : 2;
-        
+
         const Mat* arrays[] = {&src, 0};
         uchar* ptrs[1];
         NAryMatIterator it(arrays, ptrs);
         int total = (int)it.size;
         int result = 0;
-        
+
         for( size_t i = 0; i < it.nplanes; i++, ++it )
             result += normHamming(ptrs[0], total, cellSize);
-        
+
         return result;
     }
-    
+
     NormFunc func = normTab[normType >> 1][depth];
     CV_Assert( func != 0 );
-    
+
     const Mat* arrays[] = {&src, &mask, 0};
     uchar* ptrs[2];
     union
@@ -1361,7 +1361,7 @@ double cv::norm( InputArray _src, int normType, InputArray _mask )
     int isum = 0;
     int *ibuf = &result.i;
     size_t esz = 0;
-    
+
     if( blockSum )
     {
         intSumBlockSize = (normType == NORM_L1 && depth <= CV_8S ? (1 << 23) : (1 << 15))/cn;
@@ -1369,7 +1369,7 @@ double cv::norm( InputArray _src, int normType, InputArray _mask )
         ibuf = &isum;
         esz = src.elemSize();
     }
-    
+
     for( size_t i = 0; i < it.nplanes; i++, ++it )
     {
         for( j = 0; j < total; j += blockSize )
@@ -1388,7 +1388,7 @@ double cv::norm( InputArray _src, int normType, InputArray _mask )
                 ptrs[1] += bsz;
         }
     }
-    
+
     if( normType == NORM_INF )
     {
         if( depth == CV_64F )
@@ -1400,7 +1400,7 @@ double cv::norm( InputArray _src, int normType, InputArray _mask )
     }
     else if( normType == NORM_L2 )
         result.d = std::sqrt(result.d);
-    
+
     return result.d;
 }
 
@@ -1409,16 +1409,16 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
 {
     if( normType & CV_RELATIVE )
         return norm(_src1, _src2, normType & ~CV_RELATIVE, _mask)/(norm(_src2, normType, _mask) + DBL_EPSILON);
-    
+
     Mat src1 = _src1.getMat(), src2 = _src2.getMat(), mask = _mask.getMat();
     int depth = src1.depth(), cn = src1.channels();
-    
+
     CV_Assert( src1.size == src2.size && src1.type() == src2.type() );
-    
+
     normType &= 7;
     CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 || normType == NORM_L2SQR ||
               ((normType == NORM_HAMMING || normType == NORM_HAMMING2) && src1.type() == CV_8U) );
-    
+
     if( src1.isContinuous() && src2.isContinuous() && mask.empty() )
     {
         size_t len = src1.total()*src1.channels();
@@ -1428,7 +1428,7 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
             {
                 const float* data1 = src1.ptr<float>();
                 const float* data2 = src2.ptr<float>();
-                
+
                 if( normType == NORM_L2 )
                 {
                     double result = 0;
@@ -1456,9 +1456,9 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
             }
         }
     }
-    
+
     CV_Assert( mask.empty() || mask.type() == CV_8U );
-    
+
     if( normType == NORM_HAMMING || normType == NORM_HAMMING2 )
     {
         if( !mask.empty() )
@@ -1469,22 +1469,22 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
             return norm(temp, normType);
         }
         int cellSize = normType == NORM_HAMMING ? 1 : 2;
-        
+
         const Mat* arrays[] = {&src1, &src2, 0};
         uchar* ptrs[2];
         NAryMatIterator it(arrays, ptrs);
         int total = (int)it.size;
         int result = 0;
-        
+
         for( size_t i = 0; i < it.nplanes; i++, ++it )
             result += normHamming(ptrs[0], ptrs[1], total, cellSize);
-        
+
         return result;
     }
-    
+
     NormDiffFunc func = normDiffTab[normType >> 1][depth];
     CV_Assert( func != 0 );
-    
+
     const Mat* arrays[] = {&src1, &src2, &mask, 0};
     uchar* ptrs[3];
     union
@@ -1503,7 +1503,7 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
     unsigned isum = 0;
     unsigned *ibuf = &result.u;
     size_t esz = 0;
-    
+
     if( blockSum )
     {
         intSumBlockSize = normType == NORM_L1 && depth <= CV_8S ? (1 << 23) : (1 << 15);
@@ -1511,7 +1511,7 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
         ibuf = &isum;
         esz = src1.elemSize();
     }
-    
+
     for( size_t i = 0; i < it.nplanes; i++, ++it )
     {
         for( j = 0; j < total; j += blockSize )
@@ -1531,7 +1531,7 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
                 ptrs[2] += bsz;
         }
     }
-    
+
     if( normType == NORM_INF )
     {
         if( depth == CV_64F )
@@ -1543,7 +1543,7 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
     }
     else if( normType == NORM_L2 )
         result.d = std::sqrt(result.d);
-    
+
     return result.d;
 }
 
@@ -1692,7 +1692,7 @@ static void batchDistL2_32f(const float* src1, const float* src2, size_t step2,
 typedef void (*BatchDistFunc)(const uchar* src1, const uchar* src2, size_t step2,
                               int nvecs, int len, uchar* dist, const uchar* mask);
 
-    
+
 struct BatchDistInvoker
 {
     BatchDistInvoker( const Mat& _src1, const Mat& _src2,
@@ -1709,26 +1709,26 @@ struct BatchDistInvoker
         update = _update;
         func = _func;
     }
-    
+
     void operator()(const BlockedRange& range) const
     {
         AutoBuffer<int> buf(src2->rows);
         int* bufptr = buf;
-        
+
         for( int i = range.begin(); i < range.end(); i++ )
         {
             func(src1->ptr(i), src2->ptr(), src2->step, src2->rows, src2->cols,
                  K > 0 ? (uchar*)bufptr : dist->ptr(i), mask->data ? mask->ptr(i) : 0);
-            
+
             if( K > 0 )
             {
                 int* nidxptr = nidx->ptr<int>(i);
                 // since positive float's can be compared just like int's,
                 // we handle both CV_32S and CV_32F cases with a single branch
                 int* distptr = (int*)dist->ptr(i);
-                
+
                 int j, k;
-                
+
                 for( j = 0; j < src2->rows; j++ )
                 {
                     int d = bufptr[j];
@@ -1746,7 +1746,7 @@ struct BatchDistInvoker
             }
         }
     }
-    
+
     const Mat *src1;
     const Mat *src2;
     Mat *dist;
@@ -1756,9 +1756,9 @@ struct BatchDistInvoker
     int update;
     BatchDistFunc func;
 };
-    
+
 }
-    
+
 void cv::batchDistance( InputArray _src1, InputArray _src2,
                         OutputArray _dist, int dtype, OutputArray _nidx,
                         int normType, int K, InputArray _mask,
@@ -1769,7 +1769,7 @@ void cv::batchDistance( InputArray _src1, InputArray _src2,
     CV_Assert( type == src2.type() && src1.cols == src2.cols &&
                (type == CV_32F || type == CV_8U));
     CV_Assert( _nidx.needed() == (K > 0) );
-    
+
     if( dtype == -1 )
     {
         dtype = normType == NORM_HAMMING || normType == NORM_HAMMING2 ? CV_32S : CV_32F;
@@ -1777,7 +1777,7 @@ void cv::batchDistance( InputArray _src1, InputArray _src2,
     CV_Assert( (type == CV_8U && dtype == CV_32S) || dtype == CV_32F);
 
     K = std::min(K, src2.rows);
-    
+
     _dist.create(src1.rows, (K > 0 ? K : src2.rows), dtype);
     Mat dist = _dist.getMat(), nidx;
     if( _nidx.needed() )
@@ -1785,19 +1785,19 @@ void cv::batchDistance( InputArray _src1, InputArray _src2,
         _nidx.create(dist.size(), CV_32S);
         nidx = _nidx.getMat();
     }
-    
+
     if( update == 0 && K > 0 )
     {
         dist = Scalar::all(dtype == CV_32S ? (double)INT_MAX : (double)FLT_MAX);
         nidx = Scalar::all(-1);
     }
-    
+
     if( crosscheck )
     {
         CV_Assert( K == 1 && update == 0 && mask.empty() );
         Mat tdist, tidx;
         batchDistance(src2, src1, tdist, dtype, tidx, normType, K, mask, 0, false);
-        
+
         // if an idx-th element from src1 appeared to be the nearest to i-th element of src2,
         // we update the minimum mutual distance between idx-th element of src1 and the whole src2 set.
         // As a result, if nidx[idx] = i*, it means that idx-th element of src1 is the nearest
@@ -1832,7 +1832,7 @@ void cv::batchDistance( InputArray _src1, InputArray _src2,
         }
         return;
     }
-    
+
     BatchDistFunc func = 0;
     if( type == CV_8U )
     {
@@ -1860,12 +1860,12 @@ void cv::batchDistance( InputArray _src1, InputArray _src2,
         else if( normType == NORM_L2 )
             func = (BatchDistFunc)batchDistL2_32f;
     }
-    
+
     if( func == 0 )
         CV_Error_(CV_StsUnsupportedFormat,
                   ("The combination of type=%d, dtype=%d and normType=%d is not supported",
                    type, dtype, normType));
-    
+
     parallel_for(BlockedRange(0, src1.rows),
                  BatchDistInvoker(src1, src2, dist, nidx, K, mask, update, func));
 }
index b8c46fa..b48467c 100644 (file)
@@ -88,7 +88,7 @@
 #if defined __linux__ || defined __APPLE__
 #include <unistd.h>
 #include <stdio.h>
-#include <sys/types.h> 
+#include <sys/types.h>
 #if defined ANDROID
 #include <sys/sysconf.h>
 #else
@@ -111,7 +111,7 @@ Exception::~Exception() throw() {}
 
 /*!
  \return the error description and the context as a text string.
- */ 
+ */
 const char* Exception::what() const throw() { return msg.c_str(); }
 
 void Exception::formatMessage()
@@ -121,7 +121,7 @@ void Exception::formatMessage()
     else
         msg = format("%s:%d: error: (%d) %s\n", file.c_str(), line, code, err.c_str());
 }
-    
+
 struct HWFeatures
 {
     enum { MAX_FEATURE = CV_HARDWARE_MAX_FEATURE };
@@ -374,7 +374,7 @@ int getThreadNum(void)
 #endif
 }
 
-#if ANDROID
+#ifdef ANDROID
 static inline int getNumberOfCPUsImpl()
 {
    FILE* cpuPossible = fopen("/sys/devices/system/cpu/possible", "r");
@@ -408,7 +408,7 @@ static inline int getNumberOfCPUsImpl()
           sscanf(pos, "%d-%d", &rstart, &rend);
           cpusAvailable += rend - rstart + 1;
       }
-      
+
    }
    return cpusAvailable ? cpusAvailable : 1;
 }
@@ -419,9 +419,9 @@ int getNumberOfCPUs(void)
 #if defined WIN32 || defined _WIN32
     SYSTEM_INFO sysinfo;
     GetSystemInfo( &sysinfo );
-    
+
     return (int)sysinfo.dwNumberOfProcessors;
-#elif ANDROID
+#elif defined ANDROID
     static int ncpus = getNumberOfCPUsImpl();
     printf("CPUS= %d\n", ncpus);
     return ncpus;
@@ -430,24 +430,24 @@ int getNumberOfCPUs(void)
 #elif defined __APPLE__
     int numCPU=0;
     int mib[4];
-    size_t len = sizeof(numCPU); 
-    
+    size_t len = sizeof(numCPU);
+
     /* set the mib for hw.ncpu */
     mib[0] = CTL_HW;
     mib[1] = HW_AVAILCPU;  // alternatively, try HW_NCPU;
-    
+
     /* get the number of CPUs from the system */
     sysctl(mib, 2, &numCPU, &len, NULL, 0);
-    
-    if( numCPU < 1 ) 
+
+    if( numCPU < 1 )
     {
         mib[1] = HW_NCPU;
         sysctl( mib, 2, &numCPU, &len, NULL, 0 );
-        
+
         if( numCPU < 1 )
             numCPU = 1;
     }
-    
+
     return (int)numCPU;
 #else
     return 1;
@@ -475,7 +475,7 @@ string tempfile( const char* suffix )
 {
     char buf[L_tmpnam];
     char* name = 0;
-#if ANDROID
+#ifdef ANDROID
     strcpy(buf, "/sdcard/__opencv_temp_XXXXXX");
     name = mktemp(buf);
 #else
index bd12fd5..91c9727 100644 (file)
@@ -45,11 +45,11 @@ static void  cvTsClearSimpleSeq( CvTsSimpleSeq* seq )
 static void cvTsSimpleSeqShiftAndCopy( CvTsSimpleSeq* seq, int from_idx, int to_idx, void* elem=0 )
 {
     int elem_size = seq->elem_size;
-    
+
     if( from_idx == to_idx )
         return;
     assert( (from_idx > to_idx && !elem) || (from_idx < to_idx && elem) );
-    
+
     if( from_idx < seq->count )
     {
         memmove( seq->array + to_idx*elem_size, seq->array + from_idx*elem_size,
@@ -64,7 +64,7 @@ static void cvTsSimpleSeqInvert( CvTsSimpleSeq* seq )
 {
     int i, k, len = seq->count, elem_size = seq->elem_size;
     schar *data = seq->array, t;
-    
+
     for( i = 0; i < len/2; i++ )
     {
         schar* a = data + i*elem_size;
@@ -92,7 +92,7 @@ static void  cvTsClearSimpleSet( CvTsSimpleSet* set_header )
 {
     int i;
     int elem_size = set_header->elem_size;
-    
+
     for( i = 0; i < set_header->max_count; i++ )
     {
         set_header->array[i*elem_size] = 0;
@@ -111,7 +111,7 @@ static CvTsSimpleSet*  cvTsCreateSimpleSet( int max_count, int elem_size )
     set_header->max_count = max_count;
     set_header->free_stack = (int*)(set_header + 1);
     set_header->array = (schar*)(set_header->free_stack + max_count);
-    
+
     cvTsClearSimpleSet( set_header );
     return set_header;
 }
@@ -135,7 +135,7 @@ static int  cvTsSimpleSetAdd( CvTsSimpleSet* set_header, void* elem )
 {
     int idx, idx2;
     assert( set_header->free_count > 0 );
-    
+
     idx = set_header->free_stack[--set_header->free_count];
     idx2 = idx * set_header->elem_size;
     assert( set_header->array[idx2] == 0 );
@@ -143,7 +143,7 @@ static int  cvTsSimpleSetAdd( CvTsSimpleSet* set_header, void* elem )
     if( set_header->elem_size > 1 )
         memcpy( set_header->array + idx2 + 1, elem, set_header->elem_size - 1 );
     set_header->count = MAX( set_header->count, idx + 1 );
-    
+
     return idx;
 }
 
@@ -153,7 +153,7 @@ static void  cvTsSimpleSetRemove( CvTsSimpleSet* set_header, int index )
     assert( set_header->free_count < set_header->max_count &&
            0 <= index && index < set_header->max_count );
     assert( set_header->array[index * set_header->elem_size] == 1 );
-    
+
     set_header->free_stack[set_header->free_count++] = index;
     set_header->array[index * set_header->elem_size] = 0;
 }
@@ -184,7 +184,7 @@ static CvTsSimpleGraph*  cvTsCreateSimpleGraph( int max_vtx_count, int vtx_size,
                                                int edge_size, int oriented )
 {
     CvTsSimpleGraph* graph;
-    
+
     assert( max_vtx_count > 1 && vtx_size >= 0 && edge_size >= 0 );
     graph = (CvTsSimpleGraph*)cvAlloc( sizeof(*graph) +
                                       max_vtx_count * max_vtx_count * (edge_size + 1));
@@ -192,7 +192,7 @@ static CvTsSimpleGraph*  cvTsCreateSimpleGraph( int max_vtx_count, int vtx_size,
     graph->edge_size = edge_size + 1;
     graph->matrix = (char*)(graph + 1);
     graph->oriented = oriented;
-    
+
     cvTsClearSimpleGraph( graph );
     return graph;
 }
@@ -219,7 +219,7 @@ static void  cvTsSimpleGraphRemoveVertex( CvTsSimpleGraph* graph, int index )
     int i, max_vtx_count = graph->vtx->max_count;
     int edge_size = graph->edge_size;
     cvTsSimpleSetRemove( graph->vtx, index );
-    
+
     /* remove all the corresponding edges */
     for( i = 0; i < max_vtx_count; i++ )
     {
@@ -232,10 +232,10 @@ static void  cvTsSimpleGraphRemoveVertex( CvTsSimpleGraph* graph, int index )
 static void cvTsSimpleGraphAddEdge( CvTsSimpleGraph* graph, int idx1, int idx2, void* edge )
 {
     int i, t, n = graph->oriented ? 1 : 2;
-    
+
     assert( cvTsSimpleSetFind( graph->vtx, idx1 ) &&
            cvTsSimpleSetFind( graph->vtx, idx2 ));
-    
+
     for( i = 0; i < n; i++ )
     {
         int ofs = (idx1*graph->vtx->max_count + idx2)*graph->edge_size;
@@ -243,7 +243,7 @@ static void cvTsSimpleGraphAddEdge( CvTsSimpleGraph* graph, int idx1, int idx2,
         graph->matrix[ofs] = 1;
         if( graph->edge_size > 1 )
             memcpy( graph->matrix + ofs + 1, edge, graph->edge_size - 1 );
-        
+
         CV_SWAP( idx1, idx2, t );
     }
 }
@@ -252,10 +252,10 @@ static void cvTsSimpleGraphAddEdge( CvTsSimpleGraph* graph, int idx1, int idx2,
 static void  cvTsSimpleGraphRemoveEdge( CvTsSimpleGraph* graph, int idx1, int idx2 )
 {
     int i, t, n = graph->oriented ? 1 : 2;
-    
+
     assert( cvTsSimpleSetFind( graph->vtx, idx1 ) &&
            cvTsSimpleSetFind( graph->vtx, idx2 ));
-    
+
     for( i = 0; i < n; i++ )
     {
         int ofs = (idx1*graph->vtx->max_count + idx2)*graph->edge_size;
@@ -290,13 +290,13 @@ static int  cvTsSimpleGraphVertexDegree( CvTsSimpleGraph* graph, int index )
     int edge_size = graph->edge_size;
     int max_vtx_count = graph->vtx->max_count;
     assert( cvTsSimpleGraphFindVertex( graph, index ) != 0 );
-    
+
     for( i = 0; i < max_vtx_count; i++ )
     {
         count += graph->matrix[(i*max_vtx_count + index)*edge_size] +
         graph->matrix[(index*max_vtx_count + i)*edge_size];
     }
-    
+
     if( !graph->oriented )
     {
         assert( count % 2 == 0 );
@@ -323,7 +323,7 @@ public:
     virtual ~Core_DynStructBaseTest();
     bool can_do_fast_forward();
     void clear();
-    
+
 protected:
     int read_params( CvFileStorage* fs );
     void run_func(void);
@@ -332,7 +332,7 @@ protected:
                            const char* file, int line );
     int test_seq_block_consistence( int _struct_idx, CvSeq* seq, int total );
     void update_progressbar();
-    
+
     int struct_count, max_struct_size, iterations, generations;
     int min_log_storage_block_size, max_log_storage_block_size;
     int min_log_elem_size, max_log_elem_size;
@@ -358,7 +358,7 @@ Core_DynStructBaseTest::Core_DynStructBaseTest()
     iterations = max_struct_size*2;
     gen = struct_idx = iter = -1;
     test_progress = -1;
-    
+
     storage = 0;
 }
 
@@ -391,33 +391,33 @@ int Core_DynStructBaseTest::read_params( CvFileStorage* fs )
     double sqrt_scale = sqrt(ts->get_test_case_count_scale());
     if( code < 0 )
         return code;
-    
+
     struct_count = cvReadInt( find_param( fs, "struct_count" ), struct_count );
     max_struct_size = cvReadInt( find_param( fs, "max_struct_size" ), max_struct_size );
     generations = cvReadInt( find_param( fs, "generations" ), generations );
     iterations = cvReadInt( find_param( fs, "iterations" ), iterations );
     generations = cvRound(generations*sqrt_scale);
     iterations = cvRound(iterations*sqrt_scale);
-    
+
     min_log_storage_block_size = cvReadInt( find_param( fs, "min_log_storage_block_size" ),
                                            min_log_storage_block_size );
     max_log_storage_block_size = cvReadInt( find_param( fs, "max_log_storage_block_size" ),
                                            max_log_storage_block_size );
     min_log_elem_size = cvReadInt( find_param( fs, "min_log_elem_size" ), min_log_elem_size );
     max_log_elem_size = cvReadInt( find_param( fs, "max_log_elem_size" ), max_log_elem_size );
-    
+
     struct_count = cvtest::clipInt( struct_count, 1, 100 );
     max_struct_size = cvtest::clipInt( max_struct_size, 1, 1<<20 );
     generations = cvtest::clipInt( generations, 1, 100 );
     iterations = cvtest::clipInt( iterations, 100, 1<<20 );
-    
+
     min_log_storage_block_size = cvtest::clipInt( min_log_storage_block_size, 7, 20 );
     max_log_storage_block_size = cvtest::clipInt( max_log_storage_block_size,
                                              min_log_storage_block_size, 20 );
-    
+
     min_log_elem_size = cvtest::clipInt( min_log_elem_size, 0, 8 );
     max_log_elem_size = cvtest::clipInt( max_log_elem_size, min_log_elem_size, 10 );
-    
+
     return 0;
 }
 
@@ -425,14 +425,14 @@ int Core_DynStructBaseTest::read_params( CvFileStorage* fs )
 void Core_DynStructBaseTest::update_progressbar()
 {
     int64 t;
-    
+
     if( test_progress < 0 )
     {
         test_progress = 0;
         cpu_freq = cv::getTickFrequency();
         start_time = cv::getTickCount();
     }
-    
+
     t = cv::getTickCount();
     test_progress = update_progress( test_progress, 0, 0, (double)(t - start_time)/cpu_freq );
 }
@@ -453,16 +453,16 @@ int Core_DynStructBaseTest::test_seq_block_consistence( int _struct_idx, CvSeq*
 {
     int sum = 0;
     struct_idx = _struct_idx;
-    
+
     CV_TS_SEQ_CHECK_CONDITION( seq != 0, "Null sequence pointer" );
-    
+
     if( seq->first )
     {
         CvSeqBlock* block = seq->first;
         CvSeqBlock* prev_block = block->prev;
-        
+
         int delta_idx = seq->first->start_index;
-        
+
         for( ;; )
         {
             CV_TS_SEQ_CHECK_CONDITION( sum == block->start_index - delta_idx &&
@@ -474,15 +474,15 @@ int Core_DynStructBaseTest::test_seq_block_consistence( int _struct_idx, CvSeq*
             block = block->next;
             if( block == seq->first ) break;
         }
-        
+
         CV_TS_SEQ_CHECK_CONDITION( block->prev->count * seq->elem_size +
                                   block->prev->data <= seq->block_max,
                                   "block->data or block_max pointer are incorrect" );
     }
-    
+
     CV_TS_SEQ_CHECK_CONDITION( seq->total == sum && sum == total,
                               "total number of elements is incorrect" );
-    
+
     return 0;
 }
 
@@ -495,7 +495,7 @@ public:
     Core_SeqBaseTest();
     void clear();
     void run( int );
-    
+
 protected:
     int test_multi_create();
     int test_get_seq_elem( int _struct_idx, int iters );
@@ -524,20 +524,20 @@ int Core_SeqBaseTest::test_multi_create()
     vector<int> index(struct_count);
     int  cur_count, elem_size;
     RNG& rng = ts->get_rng();
-    
+
     for( int i = 0; i < struct_count; i++ )
     {
         double t;
         CvTsSimpleSeq* sseq;
-        
+
         pos[i] = -1;
         index[i] = i;
-        
+
         t = cvtest::randReal(rng)*(max_log_elem_size - min_log_elem_size) + min_log_elem_size;
         elem_size = cvRound( exp(t * CV_LOG2) );
         elem_size = MIN( elem_size, (int)(storage->block_size - sizeof(void*) -
                                           sizeof(CvSeqBlock) - sizeof(CvMemBlock)) );
-        
+
         cvTsReleaseSimpleSeq( (CvTsSimpleSeq**)&simple_struct[i] );
         simple_struct[i] = sseq = cvTsCreateSimpleSeq( max_struct_size, elem_size );
         cxcore_struct[i] = 0;
@@ -545,7 +545,7 @@ int Core_SeqBaseTest::test_multi_create()
         Mat m( 1, MAX(sseq->count,1)*elem_size, CV_8UC1, sseq->array );
         cvtest::randUni( rng, m, Scalar::all(0), Scalar::all(256) );
     }
-    
+
     for( cur_count = struct_count; cur_count > 0; cur_count-- )
     {
         for(;;)
@@ -553,13 +553,13 @@ int Core_SeqBaseTest::test_multi_create()
             int k = cvtest::randInt( rng ) % cur_count;
             struct_idx = index[k];
             CvTsSimpleSeq* sseq = (CvTsSimpleSeq*)simple_struct[struct_idx];
-            
+
             if( pos[struct_idx] < 0 )
             {
                 int hdr_size = (cvtest::randInt(rng) % 10)*4 + sizeof(CvSeq);
                 hdr_size = MIN( hdr_size, (int)(storage->block_size - sizeof(CvMemBlock)) );
                 elem_size = sseq->elem_size;
-                
+
                 if( cvtest::randInt(rng) % 2 )
                 {
                     cvStartWriteSeq( 0, hdr_size, elem_size, storage, &writer[struct_idx] );
@@ -570,11 +570,11 @@ int Core_SeqBaseTest::test_multi_create()
                     s = cvCreateSeq( 0, hdr_size, elem_size, storage );
                     cvStartAppendToSeq( s, &writer[struct_idx] );
                 }
-                
+
                 cvSetSeqBlockSize( writer[struct_idx].seq, cvtest::randInt( rng ) % 10000 );
                 pos[struct_idx] = 0;
             }
-            
+
             update_progressbar();
             if( pos[struct_idx] == sseq->count )
             {
@@ -584,7 +584,7 @@ int Core_SeqBaseTest::test_multi_create()
                     index[k] = index[k+1];
                 break;
             }
-            
+
             {
                 schar* el = cvTsSimpleSeqElem( sseq, pos[struct_idx] );
                 CV_WRITE_SEQ_ELEM_VAR( el, writer[struct_idx] );
@@ -592,7 +592,7 @@ int Core_SeqBaseTest::test_multi_create()
             pos[struct_idx]++;
         }
     }
-    
+
     return 0;
 }
 
@@ -600,16 +600,16 @@ int Core_SeqBaseTest::test_multi_create()
 int  Core_SeqBaseTest::test_get_seq_elem( int _struct_idx, int iters )
 {
     RNG& rng = ts->get_rng();
-    
+
     CvSeq* seq = (CvSeq*)cxcore_struct[_struct_idx];
     CvTsSimpleSeq* sseq = (CvTsSimpleSeq*)simple_struct[_struct_idx];
     struct_idx = _struct_idx;
-    
+
     assert( seq->total == sseq->count );
-    
+
     if( sseq->count == 0 )
         return 0;
-    
+
     for( int i = 0; i < iters; i++ )
     {
         int idx = cvtest::randInt(rng) % (sseq->count*3) - sseq->count*3/2;
@@ -618,7 +618,7 @@ int  Core_SeqBaseTest::test_get_seq_elem( int _struct_idx, int iters )
         int bad_range = (unsigned)idx0 >= (unsigned)(sseq->count);
         schar* elem;
          elem = cvGetSeqElem( seq, idx );
-        
+
         if( bad_range )
         {
             CV_TS_SEQ_CHECK_CONDITION( elem == 0,
@@ -630,13 +630,13 @@ int  Core_SeqBaseTest::test_get_seq_elem( int _struct_idx, int iters )
             CV_TS_SEQ_CHECK_CONDITION( elem != 0 &&
                                       !memcmp( elem, cvTsSimpleSeqElem(sseq, idx0), sseq->elem_size ),
                                       "cvGetSeqElem returns wrong element" );
-            
+
              idx = cvSeqElemIdx(seq, elem );
             CV_TS_SEQ_CHECK_CONDITION( idx >= 0 && idx == idx0,
                                       "cvSeqElemIdx is incorrect" );
         }
     }
-    
+
     return 0;
 }
 
@@ -651,43 +651,43 @@ int  Core_SeqBaseTest::test_get_seq_reading( int _struct_idx, int iters )
     CvSeqReader reader;
     vector<schar> _elem(sseq->elem_size);
     schar* elem = &_elem[0];
-    
+
     assert( total == sseq->count );
     this->struct_idx = _struct_idx;
-    
+
     int pos = cvtest::randInt(rng) % 2;
     cvStartReadSeq( seq, &reader, pos );
-    
+
     if( total == 0 )
     {
         CV_TS_SEQ_CHECK_CONDITION( reader.ptr == 0, "Empty sequence reader pointer is not NULL" );
         return 0;
     }
-    
+
     pos = pos ? seq->total - 1 : 0;
-    
+
     CV_TS_SEQ_CHECK_CONDITION( pos == cvGetSeqReaderPos(&reader),
                               "initial reader position is wrong" );
-    
+
     for( iter = 0; iter < iters; iter++ )
     {
         int op = cvtest::randInt(rng) % max_val;
-        
+
         if( op >= max_val - 2 )
         {
             int new_pos, new_pos0;
             int bad_range;
             int is_relative = op == max_val - 1;
-            
+
             new_pos = cvtest::randInt(rng) % (total*2) - total;
             new_pos0 = new_pos + (is_relative ? pos : 0 );
-            
+
             if( new_pos0 < 0 ) new_pos0 += total;
             if( new_pos0 >= total ) new_pos0 -= total;
-            
+
             bad_range = (unsigned)new_pos0 >= (unsigned)total;
              cvSetSeqReaderPos( &reader, new_pos, is_relative );
-            
+
             if( !bad_range )
             {
                 CV_TS_SEQ_CHECK_CONDITION( new_pos0 == cvGetSeqReaderPos( &reader ),
@@ -704,7 +704,7 @@ int  Core_SeqBaseTest::test_get_seq_reading( int _struct_idx, int iters )
         {
             int direction = (op % 3) - 1;
             memcpy( elem, reader.ptr, sseq->elem_size );
-            
+
             if( direction > 0 )
             {
                 CV_NEXT_SEQ_ELEM( sseq->elem_size, reader );
@@ -713,18 +713,18 @@ int  Core_SeqBaseTest::test_get_seq_reading( int _struct_idx, int iters )
             {
                 CV_PREV_SEQ_ELEM( sseq->elem_size, reader );
             }
-            
+
             CV_TS_SEQ_CHECK_CONDITION( memcmp(elem, cvTsSimpleSeqElem(sseq, pos),
                                               sseq->elem_size) == 0, "reading is incorrect" );
             pos += direction;
             if( -pos > 0 ) pos += total;
             if( pos >= total ) pos -= total;
-            
+
             CV_TS_SEQ_CHECK_CONDITION( pos == cvGetSeqReaderPos( &reader ),
                                       "reader doesn't move correctly after reading" );
         }
     }
-    
+
     return 0;
 }
 
@@ -735,14 +735,14 @@ int  Core_SeqBaseTest::test_seq_ops( int iters )
     int max_elem_size = 0;
     schar* elem2 = 0;
     RNG& rng = ts->get_rng();
-    
+
     for( int i = 0; i < struct_count; i++ )
         max_elem_size = MAX( max_elem_size, ((CvSeq*)cxcore_struct[i])->elem_size );
-    
+
     vector<schar> elem_buf(max_struct_size*max_elem_size);
     schar* elem = (schar*)&elem_buf[0];
     Mat elem_mat;
-    
+
     for( iter = 0; iter < iters; iter++ )
     {
         struct_idx = cvtest::randInt(rng) % struct_count;
@@ -751,7 +751,7 @@ int  Core_SeqBaseTest::test_seq_ops( int iters )
         CvTsSimpleSeq* sseq = (CvTsSimpleSeq*)simple_struct[struct_idx];
         int elem_size = sseq->elem_size;
         int whence = 0, pos = 0, count = 0;
-        
+
         switch( op )
         {
             case 0:
@@ -759,10 +759,10 @@ int  Core_SeqBaseTest::test_seq_ops( int iters )
             case 2:  // push/pushfront/insert
                 if( sseq->count == sseq->max_count )
                     break;
-                
+
                 elem_mat = Mat(1, elem_size, CV_8U, elem);
                 cvtest::randUni( rng, elem_mat, cvScalarAll(0), cvScalarAll(255) );
-                
+
                 whence = op - 1;
                 if( whence < 0 )
                 {
@@ -779,7 +779,7 @@ int  Core_SeqBaseTest::test_seq_ops( int iters )
                     pos = cvtest::randInt(rng) % (sseq->count + 1);
                     cvSeqInsert( seq, pos, elem );
                 }
-                
+
                 cvTsSimpleSeqShiftAndCopy( sseq, pos, pos + 1, elem );
                 elem2 = cvGetSeqElem( seq, pos );
                 CV_TS_SEQ_CHECK_CONDITION( elem2 != 0, "The inserted element could not be retrieved" );
@@ -787,13 +787,13 @@ int  Core_SeqBaseTest::test_seq_ops( int iters )
                                           memcmp(elem2, cvTsSimpleSeqElem(sseq,pos), elem_size) == 0,
                                           "The inserted sequence element is wrong" );
                 break;
-                
+
             case 3:
             case 4:
             case 5: // pop/popfront/remove
                 if( sseq->count == 0 )
                     break;
-                
+
                 whence = op - 4;
                 if( whence < 0 )
                 {
@@ -810,19 +810,19 @@ int  Core_SeqBaseTest::test_seq_ops( int iters )
                     pos = cvtest::randInt(rng) % sseq->count;
                      cvSeqRemove( seq, pos );
                 }
-                
+
                 if( whence != 0 )
                     CV_TS_SEQ_CHECK_CONDITION( seq->total == sseq->count - 1 &&
                                               memcmp( elem, cvTsSimpleSeqElem(sseq,pos), elem_size) == 0,
                                               "The popped sequence element isn't correct" );
-                
+
                 cvTsSimpleSeqShiftAndCopy( sseq, pos + 1, pos );
-                
+
                 if( sseq->count > 0 )
                 {
                      elem2 = cvGetSeqElem( seq, pos < sseq->count ? pos : -1 );
                     CV_TS_SEQ_CHECK_CONDITION( elem2 != 0, "GetSeqElem fails after removing the element" );
-                    
+
                     CV_TS_SEQ_CHECK_CONDITION( memcmp( elem2,
                                                       cvTsSimpleSeqElem(sseq, pos - (pos == sseq->count)), elem_size) == 0,
                                               "The first shifted element is not correct after removing another element" );
@@ -833,17 +833,17 @@ int  Core_SeqBaseTest::test_seq_ops( int iters )
                                               "The sequence doesn't become empty after the final remove" );
                 }
                 break;
-                
+
             case 6:
             case 7:
             case 8: // push [front] multi/insert slice
                 if( sseq->count == sseq->max_count )
                     break;
-                
+
                 count = cvtest::randInt( rng ) % (sseq->max_count - sseq->count + 1);
                 elem_mat = Mat(1, MAX(count,1) * elem_size, CV_8U, elem);
                 cvtest::randUni( rng, elem_mat, cvScalarAll(0), cvScalarAll(255) );
-                
+
                 whence = op - 7;
                 pos = whence < 0 ? 0 : whence > 0 ? sseq->count : cvtest::randInt(rng) % (sseq->count+1);
                 if( whence != 0 )
@@ -858,11 +858,11 @@ int  Core_SeqBaseTest::test_seq_ops( int iters )
                                                      sseq->elem_size,
                                                      elem, count,
                                                      &header, &block );
-                    
+
                     cvSeqInsertSlice( seq, pos, &header );
                 }
                 cvTsSimpleSeqShiftAndCopy( sseq, pos, pos + count, elem );
-                
+
                 if( sseq->count > 0 )
                 {
                     // choose the random element among the added
@@ -879,22 +879,22 @@ int  Core_SeqBaseTest::test_seq_ops( int iters )
                                               "Adding no elements to empty sequence fails" );
                 }
                 break;
-                
+
             case 9:
             case 10:
             case 11: // pop [front] multi
                 if( sseq->count == 0 )
                     break;
-                
+
                 count = cvtest::randInt(rng) % (sseq->count+1);
                 whence = op - 10;
                 pos = whence < 0 ? 0 : whence > 0 ? sseq->count - count :
                 cvtest::randInt(rng) % (sseq->count - count + 1);
-                
+
                 if( whence != 0 )
                 {
                      cvSeqPopMulti( seq, elem, count, whence < 0 );
-                    
+
                     if( count > 0 )
                     {
                         CV_TS_SEQ_CHECK_CONDITION( memcmp(elem,
@@ -906,10 +906,10 @@ int  Core_SeqBaseTest::test_seq_ops( int iters )
                 {
                      cvSeqRemoveSlice( seq, cvSlice(pos, pos + count) );
                 }
-                
+
                 CV_TS_SEQ_CHECK_CONDITION( seq->total == sseq->count - count,
                                           "The popmulti left a wrong number of elements in the sequence" );
-                
+
                 cvTsSimpleSeqShiftAndCopy( sseq, pos + count, pos, 0 );
                 if( sseq->count > 0 )
                 {
@@ -929,15 +929,15 @@ int  Core_SeqBaseTest::test_seq_ops( int iters )
             {
                 CvMemStoragePos storage_pos;
                 cvSaveMemStoragePos( storage, &storage_pos );
-                
+
                 int copy_data = cvtest::randInt(rng) % 2;
                 count = cvtest::randInt(rng) % (seq->total + 1);
                 pos = cvtest::randInt(rng) % (seq->total - count + 1);
                 CvSeq* seq_slice = cvSeqSlice( seq, cvSlice(pos, pos + count), storage, copy_data );
-                
+
                 CV_TS_SEQ_CHECK_CONDITION( seq_slice && seq_slice->total == count,
                                           "cvSeqSlice returned incorrect slice" );
-                
+
                 if( count > 0 )
                 {
                     int test_idx = cvtest::randInt(rng) % count;
@@ -949,7 +949,7 @@ int  Core_SeqBaseTest::test_seq_ops( int iters )
                     CV_TS_SEQ_CHECK_CONDITION( (elem2 == elem3) ^ copy_data,
                                               "copy_data flag is handled incorrectly" );
                 }
-                
+
                 cvRestoreMemStoragePos( storage, &storage_pos );
             }
                 break;
@@ -963,16 +963,16 @@ int  Core_SeqBaseTest::test_seq_ops( int iters )
                 assert(0);
                 return -1;
         }
-        
+
         if( test_seq_block_consistence(struct_idx, seq, sseq->count) < 0 )
             return -1;
-        
+
         if( test_get_seq_elem(struct_idx, 7) < 0 )
             return -1;
-        
+
         update_progressbar();
     }
-    
+
     return 0;
 }
 
@@ -984,44 +984,44 @@ void Core_SeqBaseTest::run( int )
         RNG& rng = ts->get_rng();
         int i;
         double t;
-        
+
         clear();
         test_progress = -1;
-        
+
         simple_struct.resize(struct_count, 0);
         cxcore_struct.resize(struct_count, 0);
-        
+
         for( gen = 0; gen < generations; gen++ )
         {
             struct_idx = iter = -1;
-            
+
             if( !storage )
             {
                 t = cvtest::randReal(rng)*(max_log_storage_block_size - min_log_storage_block_size)
                 + min_log_storage_block_size;
                 storage = cvCreateMemStorage( cvRound( exp(t * CV_LOG2) ) );
             }
-            
+
             iter = struct_idx = -1;
             test_multi_create();
-            
+
             for( i = 0; i < struct_count; i++ )
             {
                 if( test_seq_block_consistence(i, (CvSeq*)cxcore_struct[i],
                                                ((CvTsSimpleSeq*)simple_struct[i])->count) < 0 )
                     return;
-                
+
                 if( test_get_seq_elem( i, MAX(iterations/3,7) ) < 0 )
                     return;
-                
+
                 if( test_get_seq_reading( i, MAX(iterations/3,7) ) < 0 )
                     return;
                 update_progressbar();
             }
-            
+
             if( test_seq_ops( iterations ) < 0 )
                 return;
-            
+
             if( cvtest::randInt(rng) % 2 )
                 storage.release();
             else
@@ -1041,7 +1041,7 @@ class Core_SeqSortInvTest : public Core_SeqBaseTest
 public:
     Core_SeqSortInvTest();
     void run( int );
-    
+
 protected:
 };
 
@@ -1072,73 +1072,73 @@ void Core_SeqSortInvTest::run( int )
         double t;
         schar *elem0, *elem, *elem2;
         vector<uchar> buffer;
-        
+
         clear();
         test_progress = -1;
-        
+
         simple_struct.resize(struct_count, 0);
         cxcore_struct.resize(struct_count, 0);
-        
+
         for( gen = 0; gen < generations; gen++ )
         {
             struct_idx = iter = -1;
-            
+
             if( storage.empty() )
             {
                 t = cvtest::randReal(rng)*(max_log_storage_block_size - min_log_storage_block_size)
                 + min_log_storage_block_size;
                 storage = cvCreateMemStorage( cvRound( exp(t * CV_LOG2) ) );
             }
-            
+
             for( iter = 0; iter < iterations/10; iter++ )
             {
                 int max_size = 0;
                 test_multi_create();
-                
+
                 for( i = 0; i < struct_count; i++ )
                 {
                     CvTsSimpleSeq* sseq = (CvTsSimpleSeq*)simple_struct[i];
                     max_size = MAX( max_size, sseq->count*sseq->elem_size );
                 }
-                
+
                 buffer.resize(max_size);
-                
+
                 for( i = 0; i < struct_count; i++ )
                 {
                     CvSeq* seq = (CvSeq*)cxcore_struct[i];
                     CvTsSimpleSeq* sseq = (CvTsSimpleSeq*)simple_struct[i];
                     CvSlice slice = CV_WHOLE_SEQ;
-                    
+
                     //printf("%d. %d. %d-th size = %d\n", gen, iter, i, sseq->count );
-                    
+
                     cvSeqInvert( seq );
                     cvTsSimpleSeqInvert( sseq );
-                    
+
                     if( test_seq_block_consistence( i, seq, sseq->count ) < 0 )
                         return;
-                    
+
                     if( sseq->count > 0 && cvtest::randInt(rng) % 2 == 0 )
                     {
                         slice.end_index = cvtest::randInt(rng) % sseq->count + 1;
                         slice.start_index = cvtest::randInt(rng) % (sseq->count - slice.end_index + 1);
                         slice.end_index += slice.start_index;
                     }
-                    
+
                     cvCvtSeqToArray( seq, &buffer[0], slice );
-                    
+
                     slice.end_index = MIN( slice.end_index, sseq->count );
                     CV_TS_SEQ_CHECK_CONDITION( sseq->count == 0 || memcmp( &buffer[0],
                                                                           sseq->array + slice.start_index*sseq->elem_size,
                                                                           (slice.end_index - slice.start_index)*sseq->elem_size ) == 0,
                                               "cvSeqInvert returned wrong result" );
-                    
+
                     for( k = 0; k < (sseq->count > 0 ? 10 : 0); k++ )
                     {
                         int idx0 = cvtest::randInt(rng) % sseq->count, idx = 0;
                         elem0 = cvTsSimpleSeqElem( sseq, idx0 );
                         elem = cvGetSeqElem( seq, idx0 );
                         elem2 = cvSeqSearch( seq, elem0, k % 2 ? icvCmpSeqElems : 0, 0, &idx, seq );
-                        
+
                         CV_TS_SEQ_CHECK_CONDITION( elem != 0 &&
                                                   memcmp( elem0, elem, seq->elem_size ) == 0,
                                                   "cvSeqInvert gives incorrect result" );
@@ -1147,18 +1147,18 @@ void Core_SeqSortInvTest::run( int )
                                                   elem2 == cvGetSeqElem( seq, idx ),
                                                   "cvSeqSearch failed (linear search)" );
                     }
-                    
+
                     cvSeqSort( seq, icvCmpSeqElems, seq );
-                    
+
                     if( test_seq_block_consistence( i, seq, sseq->count ) < 0 )
                         return;
-                    
+
                     if( sseq->count > 0 )
                     {
                         // !!! This is not thread-safe !!!
                         icvCmpSeqElems2_elem_size = sseq->elem_size;
                         qsort( sseq->array, sseq->count, sseq->elem_size, icvCmpSeqElems2 );
-                        
+
                         if( cvtest::randInt(rng) % 2 == 0 )
                         {
                             slice.end_index = cvtest::randInt(rng) % sseq->count + 1;
@@ -1166,20 +1166,20 @@ void Core_SeqSortInvTest::run( int )
                             slice.end_index += slice.start_index;
                         }
                     }
-                    
+
                     cvCvtSeqToArray( seq, &buffer[0], slice );
                     CV_TS_SEQ_CHECK_CONDITION( sseq->count == 0 || memcmp( &buffer[0],
                                                                           sseq->array + slice.start_index*sseq->elem_size,
                                                                           (slice.end_index - slice.start_index)*sseq->elem_size ) == 0,
                                               "cvSeqSort returned wrong result" );
-                    
+
                     for( k = 0; k < (sseq->count > 0 ? 10 : 0); k++ )
                     {
                         int idx0 = cvtest::randInt(rng) % sseq->count, idx = 0;
                         elem0 = cvTsSimpleSeqElem( sseq, idx0 );
                         elem = cvGetSeqElem( seq, idx0 );
                         elem2 = cvSeqSearch( seq, elem0, icvCmpSeqElems, 1, &idx, seq );
-                        
+
                         CV_TS_SEQ_CHECK_CONDITION( elem != 0 &&
                                                   memcmp( elem0, elem, seq->elem_size ) == 0,
                                                   "cvSeqSort gives incorrect result" );
@@ -1189,10 +1189,10 @@ void Core_SeqSortInvTest::run( int )
                                                   "cvSeqSearch failed (binary search)" );
                     }
                 }
-                
+
                 cvClearMemStorage( storage );
             }
-            
+
             storage.release();
         }
     }
@@ -1210,7 +1210,7 @@ public:
     Core_SetTest();
     void clear();
     void run( int );
-    
+
 protected:
     //int test_seq_block_consistence( int struct_idx );
     int test_set_ops( int iters );
@@ -1239,17 +1239,17 @@ int  Core_SetTest::test_set_ops( int iters )
     schar* elem_data = 0;
     RNG& rng = ts->get_rng();
     //int max_active_count = 0, mean_active_count = 0;
-    
+
     for( int i = 0; i < struct_count; i++ )
         max_elem_size = MAX( max_elem_size, ((CvSeq*)cxcore_struct[i])->elem_size );
-    
+
     vector<schar> elem_buf(max_elem_size);
     Mat elem_mat;
-    
+
     for( iter = 0; iter < iters; iter++ )
     {
         struct_idx = cvtest::randInt(rng) % struct_count;
-        
+
         CvSet* cvset = (CvSet*)cxcore_struct[struct_idx];
         CvTsSimpleSet* sset = (CvTsSimpleSet*)simple_struct[struct_idx];
         int pure_elem_size = sset->elem_size - 1;
@@ -1259,13 +1259,13 @@ int  Core_SetTest::test_set_ops( int iters )
         CvSetElem* first_free = cvset->free_elems;
         CvSetElem* next_free = first_free ? first_free->next_free : 0;
         int pass_data = 0;
-        
+
         if( iter > iters/10 && cvtest::randInt(rng)%200 == 0 ) // clear set
         {
             int prev_count = cvset->total;
             cvClearSet( cvset );
             cvTsClearSimpleSet( sset );
-            
+
             CV_TS_SEQ_CHECK_CONDITION( cvset->active_count == 0 && cvset->total == 0 &&
                                       cvset->first == 0 && cvset->free_elems == 0 &&
                                       (cvset->free_blocks != 0 || prev_count == 0),
@@ -1276,11 +1276,11 @@ int  Core_SetTest::test_set_ops( int iters )
         {
             if( sset->free_count == 0 )
                 continue;
-            
+
             elem_mat = Mat(1, cvset->elem_size, CV_8U, &elem_buf[0]);
             cvtest::randUni( rng, elem_mat, cvScalarAll(0), cvScalarAll(255) );
             elem = (CvSetElem*)&elem_buf[0];
-            
+
             if( by_ptr )
             {
                 elem2 = cvSetNew( cvset );
@@ -1293,21 +1293,21 @@ int  Core_SetTest::test_set_ops( int iters )
                 CV_TS_SEQ_CHECK_CONDITION( elem2 != 0 && elem2->flags == idx,
                                           "cvSetAdd returned NULL pointer or a wrong index" );
             }
-            
+
             elem_data = (schar*)elem + sizeof(int);
-            
+
             if( !pass_data )
                 memcpy( (schar*)elem2 + sizeof(int), elem_data, pure_elem_size );
-            
+
             idx = elem2->flags;
             idx0 = cvTsSimpleSetAdd( sset, elem_data );
             elem3 = cvGetSetElem( cvset, idx );
-            
+
             CV_TS_SEQ_CHECK_CONDITION( CV_IS_SET_ELEM(elem3) &&
                                       idx == idx0 && elem3 == elem2 && (!pass_data ||
                                                                         memcmp( (char*)elem3 + sizeof(int), elem_data, pure_elem_size) == 0),
                                       "The added element is not correct" );
-            
+
             CV_TS_SEQ_CHECK_CONDITION( (!first_free || elem3 == first_free) &&
                                       (!next_free || cvset->free_elems == next_free) &&
                                       cvset->active_count == prev_count + 1,
@@ -1316,19 +1316,19 @@ int  Core_SetTest::test_set_ops( int iters )
         else if( op == 2 || op == 3 ) // remove element
         {
             idx = cvtest::randInt(rng) % sset->max_count;
-            
+
             if( sset->free_count == sset->max_count || idx >= sset->count )
                 continue;
-            
+
             elem_data = cvTsSimpleSetFind(sset, idx);
             if( elem_data == 0 )
                 continue;
-            
+
             elem = cvGetSetElem( cvset, idx );
             CV_TS_SEQ_CHECK_CONDITION( CV_IS_SET_ELEM(elem) && elem->flags == idx &&
                                       memcmp((char*)elem + sizeof(int), elem_data, pure_elem_size) == 0,
                                       "cvGetSetElem returned wrong element" );
-            
+
             if( by_ptr )
             {
                  cvSetRemoveByPtr( cvset, elem );
@@ -1337,32 +1337,32 @@ int  Core_SetTest::test_set_ops( int iters )
             {
                  cvSetRemove( cvset, idx );
             }
-            
+
             cvTsSimpleSetRemove( sset, idx );
-            
+
             CV_TS_SEQ_CHECK_CONDITION( !CV_IS_SET_ELEM(elem) && !cvGetSetElem(cvset, idx) &&
                                       (elem->flags & CV_SET_ELEM_IDX_MASK) == idx,
                                       "cvSetRemove[ByPtr] didn't release the element properly" );
-            
+
             CV_TS_SEQ_CHECK_CONDITION( elem->next_free == first_free &&
                                       cvset->free_elems == elem &&
                                       cvset->active_count == prev_count - 1,
                                       "The free node list has not been updated properly" );
         }
-        
+
         //max_active_count = MAX( max_active_count, cvset->active_count );
         //mean_active_count += cvset->active_count;
         CV_TS_SEQ_CHECK_CONDITION( cvset->active_count == sset->max_count - sset->free_count &&
                                   cvset->total >= cvset->active_count &&
                                   (cvset->total == 0 || cvset->total >= prev_total),
                                   "The total number of cvset elements is not correct" );
-        
+
         // CvSet and simple set do not neccessary have the same "total" (active & free) number,
         // so pass "set->total" to skip that check
         test_seq_block_consistence( struct_idx, (CvSeq*)cvset, cvset->total );
         update_progressbar();
     }
-    
+
     return 0;
 }
 
@@ -1373,19 +1373,19 @@ void Core_SetTest::run( int )
     {
         RNG& rng = ts->get_rng();
         double t;
-        
+
         clear();
         test_progress = -1;
-        
+
         simple_struct.resize(struct_count, 0);
         cxcore_struct.resize(struct_count, 0);
-        
+
         for( gen = 0; gen < generations; gen++ )
         {
             struct_idx = iter = -1;
             t = cvtest::randReal(rng)*(max_log_storage_block_size - min_log_storage_block_size) + min_log_storage_block_size;
             storage = cvCreateMemStorage( cvRound( exp(t * CV_LOG2) ) );
-            
+
             for( int i = 0; i < struct_count; i++ )
             {
                 t = cvtest::randReal(rng)*(max_log_elem_size - min_log_elem_size) + min_log_elem_size;
@@ -1395,15 +1395,15 @@ void Core_SetTest::run( int )
                 elem_size = MAX( elem_size, (int)sizeof(CvSetElem) );
                 elem_size = MIN( elem_size, (int)(storage->block_size - sizeof(void*) - sizeof(CvMemBlock) - sizeof(CvSeqBlock)) );
                 pure_elem_size = MIN( pure_elem_size, elem_size-(int)sizeof(CvSetElem) );
-                
+
                 cvTsReleaseSimpleSet( (CvTsSimpleSet**)&simple_struct[i] );
                 simple_struct[i] = cvTsCreateSimpleSet( max_struct_size, pure_elem_size );
                  cxcore_struct[i] = cvCreateSet( 0, sizeof(CvSet), elem_size, storage );
             }
-            
+
             if( test_set_ops( iterations*100 ) < 0 )
                 return;
-            
+
             storage.release();
         }
     }
@@ -1421,7 +1421,7 @@ public:
     Core_GraphTest();
     void clear();
     void run( int );
-    
+
 protected:
     //int test_seq_block_consistence( int struct_idx );
     int test_graph_ops( int iters );
@@ -1451,17 +1451,17 @@ int  Core_GraphTest::test_graph_ops( int iters )
     CvGraphEdge* edge = 0, *edge2 = 0;
     RNG& rng = ts->get_rng();
     //int max_active_count = 0, mean_active_count = 0;
-    
+
     for( i = 0; i < struct_count; i++ )
     {
         CvGraph* graph = (CvGraph*)cxcore_struct[i];
         max_elem_size = MAX( max_elem_size, graph->elem_size );
         max_elem_size = MAX( max_elem_size, graph->edges->elem_size );
     }
-    
+
     vector<schar> elem_buf(max_elem_size);
     Mat elem_mat;
-    
+
     for( iter = 0; iter < iters; iter++ )
     {
         struct_idx = cvtest::randInt(rng) % struct_count;
@@ -1479,19 +1479,19 @@ int  Core_GraphTest::test_graph_ops( int iters )
         int op = cvtest::randInt(rng) % max_op;
         int pass_data = 0, vtx_degree0 = 0, vtx_degree = 0;
         CvSetElem *first_free, *next_free;
-        
+
         if( cvtest::randInt(rng) % 200 == 0 ) // clear graph
         {
             int prev_vtx_count = graph->total, prev_edge_count = graph->edges->total;
-            
+
             cvClearGraph( graph );
             cvTsClearSimpleGraph( sgraph );
-            
+
             CV_TS_SEQ_CHECK_CONDITION( graph->active_count == 0 && graph->total == 0 &&
                                       graph->first == 0 && graph->free_elems == 0 &&
                                       (graph->free_blocks != 0 || prev_vtx_count == 0),
                                       "The graph is not empty after clearing" );
-            
+
             CV_TS_SEQ_CHECK_CONDITION( edges->active_count == 0 && edges->total == 0 &&
                                       edges->first == 0 && edges->free_elems == 0 &&
                                       (edges->free_blocks != 0 || prev_edge_count == 0),
@@ -1501,33 +1501,33 @@ int  Core_GraphTest::test_graph_ops( int iters )
         {
             if( sgraph->vtx->free_count == 0 )
                 continue;
-            
+
             first_free = graph->free_elems;
             next_free = first_free ? first_free->next_free : 0;
-            
+
             if( pure_vtx_size )
             {
                 elem_mat = Mat(1, graph->elem_size, CV_8U, &elem_buf[0]);
                 cvtest::randUni( rng, elem_mat, cvScalarAll(0), cvScalarAll(255) );
             }
-            
+
             vtx = (CvGraphVtx*)&elem_buf[0];
             idx0 = cvTsSimpleGraphAddVertex( sgraph, vtx + 1 );
-            
+
             pass_data = cvtest::randInt(rng) % 2;
             idx = cvGraphAddVtx( graph, pass_data ? vtx : 0, &vtx2 );
-            
+
             if( !pass_data && pure_vtx_size > 0 )
                 memcpy( vtx2 + 1, vtx + 1, pure_vtx_size );
-            
+
             vtx3 = cvGetGraphVtx( graph, idx );
-            
+
             CV_TS_SEQ_CHECK_CONDITION( (CV_IS_SET_ELEM(vtx3) && vtx3->flags == idx &&
                                         vtx3->first == 0) || (idx == idx0 && vtx3 == vtx2 &&
                                                               (!pass_data || pure_vtx_size == 0 ||
                                                                memcmp(vtx3 + 1, vtx + 1, pure_vtx_size) == 0)),
                                       "The added element is not correct" );
-            
+
             CV_TS_SEQ_CHECK_CONDITION( (!first_free || first_free == (CvSetElem*)vtx3) &&
                                       (!next_free || graph->free_elems == next_free) &&
                                       graph->active_count == prev_vtx_count + 1,
@@ -1538,19 +1538,19 @@ int  Core_GraphTest::test_graph_ops( int iters )
             idx = cvtest::randInt(rng) % sgraph->vtx->max_count;
             if( sgraph->vtx->free_count == sgraph->vtx->max_count || idx >= sgraph->vtx->count )
                 continue;
-            
+
             vtx_data = cvTsSimpleGraphFindVertex(sgraph, idx);
             if( vtx_data == 0 )
                 continue;
-            
+
             vtx_degree0 = cvTsSimpleGraphVertexDegree( sgraph, idx );
             first_free = graph->free_elems;
-            
+
             vtx = cvGetGraphVtx( graph, idx );
             CV_TS_SEQ_CHECK_CONDITION( CV_IS_SET_ELEM(vtx) && vtx->flags == idx &&
                                       (pure_vtx_size == 0 || memcmp( vtx + 1, vtx_data, pure_vtx_size) == 0),
                                       "cvGetGraphVtx returned wrong element" );
-            
+
             if( cvtest::randInt(rng) % 2 )
             {
                  vtx_degree = cvGraphVtxDegreeByPtr( graph, vtx );
@@ -1561,20 +1561,20 @@ int  Core_GraphTest::test_graph_ops( int iters )
                  vtx_degree = cvGraphVtxDegree( graph, idx );
                  cvGraphRemoveVtx( graph, idx );
             }
-            
+
             cvTsSimpleGraphRemoveVertex( sgraph, idx );
-            
+
             CV_TS_SEQ_CHECK_CONDITION( vtx_degree == vtx_degree0,
                                       "Number of incident edges is different in two graph representations" );
-            
+
             CV_TS_SEQ_CHECK_CONDITION( !CV_IS_SET_ELEM(vtx) && !cvGetGraphVtx(graph, idx) &&
                                       (vtx->flags & CV_SET_ELEM_IDX_MASK) == idx,
                                       "cvGraphRemoveVtx[ByPtr] didn't release the vertex properly" );
-            
+
             CV_TS_SEQ_CHECK_CONDITION( graph->edges->active_count == prev_edge_count - vtx_degree,
                                       "cvGraphRemoveVtx[ByPtr] didn't remove all the incident edges "
                                       "(or removed some extra)" );
-            
+
             CV_TS_SEQ_CHECK_CONDITION( ((CvSetElem*)vtx)->next_free == first_free &&
                                       graph->free_elems == (CvSetElem*)vtx &&
                                       graph->active_count == prev_vtx_count - 1,
@@ -1584,10 +1584,10 @@ int  Core_GraphTest::test_graph_ops( int iters )
         {
             int v_idx[2] = {0,0}, res = 0;
             int v_prev_degree[2] = {0,0}, v_degree[2] = {0,0};
-            
+
             if( sgraph->vtx->free_count >= sgraph->vtx->max_count-1 )
                 continue;
-            
+
             for( i = 0, k = 0; i < 10; i++ )
             {
                 int j = cvtest::randInt(rng) % sgraph->vtx->count;
@@ -1605,34 +1605,34 @@ int  Core_GraphTest::test_graph_ops( int iters )
                     }
                 }
             }
-            
+
             if( k < 2 )
                 continue;
-            
+
             first_free = graph->edges->free_elems;
             next_free = first_free ? first_free->next_free : 0;
-            
+
             edge = cvFindGraphEdge( graph, v_idx[0], v_idx[1] );
             CV_TS_SEQ_CHECK_CONDITION( edge == 0, "Extra edge appeared in the graph" );
-            
+
             if( pure_edge_size > 0 )
             {
                 elem_mat = Mat(1, graph->edges->elem_size, CV_8U, &elem_buf[0]);
                 cvtest::randUni( rng, elem_mat, cvScalarAll(0), cvScalarAll(255) );
             }
             edge = (CvGraphEdge*)&elem_buf[0];
-            
+
             // assign some default weight that is easy to check for
             // consistensy, 'cause an edge weight is not stored
             // in the simple graph
             edge->weight = (float)(v_idx[0] + v_idx[1]);
             pass_data = cvtest::randInt(rng) % 2;
-            
+
             vtx = cvGetGraphVtx( graph, v_idx[0] );
             vtx2 = cvGetGraphVtx( graph, v_idx[1] );
             CV_TS_SEQ_CHECK_CONDITION( vtx != 0 && vtx2 != 0 && vtx->flags == v_idx[0] &&
                                       vtx2->flags == v_idx[1], "Some of the vertices are missing" );
-            
+
             if( cvtest::randInt(rng) % 2 )
             {
                  v_prev_degree[0] = cvGraphVtxDegreeByPtr( graph, vtx );
@@ -1649,27 +1649,27 @@ int  Core_GraphTest::test_graph_ops( int iters )
                  v_degree[0] = cvGraphVtxDegree( graph, v_idx[0] );
                  v_degree[1] = cvGraphVtxDegree( graph, v_idx[1] );
             }
-            
+
             //edge3 = (CvGraphEdge*)cvGetSetElem( graph->edges, idx );
             CV_TS_SEQ_CHECK_CONDITION( res == 1 && edge2 != 0 && CV_IS_SET_ELEM(edge2) &&
                                       ((edge2->vtx[0] == vtx && edge2->vtx[1] == vtx2) ||
                                        (!CV_IS_GRAPH_ORIENTED(graph) && edge2->vtx[0] == vtx2 && edge2->vtx[1] == vtx)) &&
                                       (!pass_data || pure_edge_size == 0 || memcmp( edge2 + 1, edge + 1, pure_edge_size ) == 0),
                                       "The edge has been added incorrectly" );
-            
+
             if( !pass_data )
             {
                 if( pure_edge_size > 0 )
                     memcpy( edge2 + 1, edge + 1, pure_edge_size );
                 edge2->weight = edge->weight;
             }
-            
+
             CV_TS_SEQ_CHECK_CONDITION( v_degree[0] == v_prev_degree[0] + 1 &&
                                       v_degree[1] == v_prev_degree[1] + 1,
                                       "The vertices lists have not been updated properly" );
-            
+
             cvTsSimpleGraphAddEdge( sgraph, v_idx[0], v_idx[1], edge + 1 );
-            
+
             CV_TS_SEQ_CHECK_CONDITION( (!first_free || first_free == (CvSetElem*)edge2) &&
                                       (!next_free || graph->edges->free_elems == next_free) &&
                                       graph->edges->active_count == prev_edge_count + 1,
@@ -1679,10 +1679,10 @@ int  Core_GraphTest::test_graph_ops( int iters )
         {
             int v_idx[2] = {0,0}, by_ptr;
             int v_prev_degree[2] = {0,0}, v_degree[2] = {0,0};
-            
+
             if( sgraph->vtx->free_count >= sgraph->vtx->max_count-1 )
                 continue;
-            
+
             edge_data = 0;
             for( i = 0, k = 0; i < 10; i++ )
             {
@@ -1704,18 +1704,18 @@ int  Core_GraphTest::test_graph_ops( int iters )
                     }
                 }
             }
-            
+
             if( k < 2 )
                 continue;
-            
+
             by_ptr = cvtest::randInt(rng) % 2;
             first_free = graph->edges->free_elems;
-            
+
             vtx = cvGetGraphVtx( graph, v_idx[0] );
             vtx2 = cvGetGraphVtx( graph, v_idx[1] );
             CV_TS_SEQ_CHECK_CONDITION( vtx != 0 && vtx2 != 0 && vtx->flags == v_idx[0] &&
                                       vtx2->flags == v_idx[1], "Some of the vertices are missing" );
-            
+
             if( by_ptr )
             {
                  edge = cvFindGraphEdgeByPtr( graph, vtx, vtx2 );
@@ -1728,15 +1728,15 @@ int  Core_GraphTest::test_graph_ops( int iters )
                  v_prev_degree[0] = cvGraphVtxDegree( graph, v_idx[0] );
                  v_prev_degree[1] = cvGraphVtxDegree( graph, v_idx[1] );
             }
-            
+
             idx = edge->flags;
-            
+
             CV_TS_SEQ_CHECK_CONDITION( edge != 0 && edge->weight == v_idx[0] + v_idx[1] &&
                                       ((edge->vtx[0] == vtx && edge->vtx[1] == vtx2) ||
                                        (!CV_IS_GRAPH_ORIENTED(graph) && edge->vtx[1] == vtx && edge->vtx[0] == vtx2)) &&
                                       (pure_edge_size == 0 || memcmp(edge + 1, edge_data, pure_edge_size) == 0),
                                       "An edge is missing or incorrect" );
-            
+
             if( by_ptr )
             {
                  cvGraphRemoveEdgeByPtr( graph, vtx, vtx2 );
@@ -1751,41 +1751,41 @@ int  Core_GraphTest::test_graph_ops( int iters )
                  v_degree[0] = cvGraphVtxDegree( graph, v_idx[0] );
                  v_degree[1] = cvGraphVtxDegree( graph, v_idx[1] );
             }
-            
+
             CV_TS_SEQ_CHECK_CONDITION( !edge2 && !CV_IS_SET_ELEM(edge),
                                       "The edge has not been removed from the edge set" );
-            
+
             CV_TS_SEQ_CHECK_CONDITION( v_degree[0] == v_prev_degree[0] - 1 &&
                                       v_degree[1] == v_prev_degree[1] - 1,
                                       "The vertices lists have not been updated properly" );
-            
+
             cvTsSimpleGraphRemoveEdge( sgraph, v_idx[0], v_idx[1] );
-            
+
             CV_TS_SEQ_CHECK_CONDITION( graph->edges->free_elems == (CvSetElem*)edge &&
                                       graph->edges->free_elems->next_free == first_free &&
                                       graph->edges->active_count == prev_edge_count - 1,
                                       "The free edge list has not been modified properly" );
         }
-        
+
         //max_active_count = MAX( max_active_count, graph->active_count );
         //mean_active_count += graph->active_count;
-        
+
         CV_TS_SEQ_CHECK_CONDITION( graph->active_count == sgraph->vtx->max_count - sgraph->vtx->free_count &&
                                   graph->total >= graph->active_count &&
                                   (graph->total == 0 || graph->total >= prev_vtx_total),
                                   "The total number of graph vertices is not correct" );
-        
+
         CV_TS_SEQ_CHECK_CONDITION( graph->edges->total >= graph->edges->active_count &&
                                   (graph->edges->total == 0 || graph->edges->total >= prev_edge_total),
                                   "The total number of graph vertices is not correct" );
-        
+
         // CvGraph and simple graph do not neccessary have the same "total" (active & free) number,
         // so pass "graph->total" (or "graph->edges->total") to skip that check
         test_seq_block_consistence( struct_idx, (CvSeq*)graph, graph->total );
         test_seq_block_consistence( struct_idx, (CvSeq*)graph->edges, graph->edges->total );
         update_progressbar();
     }
-    
+
     return 0;
 }
 
@@ -1797,22 +1797,22 @@ void Core_GraphTest::run( int )
         RNG& rng = ts->get_rng();
         int i, k;
         double t;
-        
+
         clear();
         test_progress = -1;
-        
+
         simple_struct.resize(struct_count, 0);
         cxcore_struct.resize(struct_count, 0);
-        
+
         for( gen = 0; gen < generations; gen++ )
         {
             struct_idx = iter = -1;
             t = cvtest::randReal(rng)*(max_log_storage_block_size - min_log_storage_block_size) + min_log_storage_block_size;
             int block_size = cvRound( exp(t * CV_LOG2) );
             block_size = MAX(block_size, (int)(sizeof(CvGraph) + sizeof(CvMemBlock) + sizeof(CvSeqBlock)));
-            
+
             storage = cvCreateMemStorage(block_size);
-            
+
             for( i = 0; i < struct_count; i++ )
             {
                 int pure_elem_size[2], elem_size[2];
@@ -1830,7 +1830,7 @@ void Core_GraphTest::run( int )
                     pure_elem_size[k] = pe;
                     elem_size[k] = e;
                 }
-                
+
                 cvTsReleaseSimpleGraph( (CvTsSimpleGraph**)&simple_struct[i] );
                 simple_struct[i] = cvTsCreateSimpleGraph( max_struct_size/4, pure_elem_size[0],
                                                          pure_elem_size[1], is_oriented );
@@ -1838,10 +1838,10 @@ void Core_GraphTest::run( int )
                                                           sizeof(CvGraph), elem_size[0], elem_size[1],
                                                           storage );
             }
-            
+
             if( test_graph_ops( iterations*10 ) < 0 )
                 return;
-            
+
             storage.release();
         }
     }
@@ -1858,7 +1858,7 @@ class Core_GraphScanTest : public Core_DynStructBaseTest
 public:
     Core_GraphScanTest();
     void run( int );
-    
+
 protected:
     //int test_seq_block_consistence( int struct_idx );
     int create_random_graph( int );
@@ -1879,29 +1879,29 @@ int Core_GraphScanTest::create_random_graph( int _struct_idx )
     int i, vtx_count = cvtest::randInt(rng) % max_struct_size;
     int edge_count = cvtest::randInt(rng) % MAX(vtx_count*20, 1);
     CvGraph* graph;
-    
+
     struct_idx = _struct_idx;
     cxcore_struct[_struct_idx] = graph =
         cvCreateGraph(is_oriented ? CV_ORIENTED_GRAPH : CV_GRAPH,
                       sizeof(CvGraph), sizeof(CvGraphVtx),
                       sizeof(CvGraphEdge), storage );
-    
+
     for( i = 0; i < vtx_count; i++ )
          cvGraphAddVtx( graph );
-    
+
     assert( graph->active_count == vtx_count );
-    
+
     for( i = 0; i < edge_count; i++ )
     {
         int j = cvtest::randInt(rng) % vtx_count;
         int k = cvtest::randInt(rng) % vtx_count;
-        
+
         if( j != k )
              cvGraphAddEdge( graph, j, k );
     }
-    
+
     assert( graph->active_count == vtx_count && graph->edges->active_count <= edge_count );
-    
+
     return 0;
 }
 
@@ -1915,12 +1915,12 @@ void Core_GraphScanTest::run( int )
         vector<uchar> vtx_mask, edge_mask;
         double t;
         int i;
-        
+
         clear();
         test_progress = -1;
-        
+
         cxcore_struct.resize(struct_count, 0);
-        
+
         for( gen = 0; gen < generations; gen++ )
         {
             struct_idx = iter = -1;
@@ -1930,49 +1930,49 @@ void Core_GraphScanTest::run( int )
             storage_blocksize = MAX(storage_blocksize, (int)(sizeof(CvGraphEdge) + sizeof(CvMemBlock) + sizeof(CvSeqBlock)));
             storage_blocksize = MAX(storage_blocksize, (int)(sizeof(CvGraphVtx) + sizeof(CvMemBlock) + sizeof(CvSeqBlock)));
             storage = cvCreateMemStorage(storage_blocksize);
-            
+
             if( gen == 0 )
             {
                 // special regression test for one sample graph.
                 // !!! ATTENTION !!! The test relies on the particular order of the inserted edges
                 // (LIFO: the edge inserted last goes first in the list of incident edges).
                 // if it is changed, the test will have to be modified.
-                
+
                 int vtx_count = -1, edge_count = 0, edges[][3] =
                 {
                     {0,4,'f'}, {0,1,'t'}, {1,4,'t'}, {1,2,'t'}, {2,3,'t'}, {4,3,'c'}, {3,1,'b'},
                     {5,7,'t'}, {7,5,'b'}, {5,6,'t'}, {6,0,'c'}, {7,6,'c'}, {6,4,'c'}, {-1,-1,0}
                 };
-                
+
                 CvGraph* graph = cvCreateGraph( CV_ORIENTED_GRAPH, sizeof(CvGraph),
                                                sizeof(CvGraphVtx), sizeof(CvGraphEdge), storage );
-                
+
                 for( i = 0; edges[i][0] >= 0; i++ )
                 {
                     vtx_count = MAX( vtx_count, edges[i][0] );
                     vtx_count = MAX( vtx_count, edges[i][1] );
                 }
                 vtx_count++;
-                
+
                 for( i = 0; i < vtx_count; i++ )
                      cvGraphAddVtx( graph );
-                
+
                 for( i = 0; edges[i][0] >= 0; i++ )
                 {
                     CvGraphEdge* edge;
                      cvGraphAddEdge( graph, edges[i][0], edges[i][1], 0, &edge );
                     edge->weight = (float)edges[i][2];
                 }
-                
+
                 edge_count = i;
                 scanner = cvCreateGraphScanner( graph, 0, CV_GRAPH_ALL_ITEMS );
-                
+
                 for(;;)
                 {
                     int code, a = -1, b = -1;
                     const char* event = "";
                      code = cvNextGraphItem( scanner );
-                    
+
                     switch( code )
                     {
                         case CV_GRAPH_VERTEX:
@@ -2023,16 +2023,16 @@ void Core_GraphScanTest::run( int )
                             event = "End of procedure";
                             break;
                         default:
-#if _MSC_VER >= 1200
+#if defined _MSC_VER && _MSC_VER >= 1200
     #pragma warning( push )
-    #pragma warning( disable : 4127 )    
+    #pragma warning( disable : 4127 )
 #endif
                             CV_TS_SEQ_CHECK_CONDITION( 0, "Invalid code appeared during graph scan" );
-#if _MSC_VER >= 1200
+#if defined _MSC_VER && _MSC_VER >= 1200
     #pragma warning( pop )
 #endif
                     }
-                    
+
                     ts->printf( cvtest::TS::LOG, "%s", event );
                     if( a >= 0 )
                     {
@@ -2041,48 +2041,48 @@ void Core_GraphScanTest::run( int )
                         else
                             ts->printf( cvtest::TS::LOG, ": %d", a );
                     }
-                    
+
                     ts->printf( cvtest::TS::LOG, "\n" );
-                    
+
                     if( code < 0 )
                         break;
                 }
-                
+
                 CV_TS_SEQ_CHECK_CONDITION( vtx_count == 0 && edge_count == 0,
                                           "Not every vertex/edge has been visited" );
                 update_progressbar();
             }
-            
+
             // for a random graph the test just checks that every graph vertex and
             // every edge is vitisted during the scan
             for( iter = 0; iter < iterations; iter++ )
             {
                 create_random_graph(0);
                 CvGraph* graph = (CvGraph*)cxcore_struct[0];
-                
+
                 // iterate twice to check that scanner doesn't damage the graph
                 for( i = 0; i < 2; i++ )
                 {
                     CvGraphVtx* start_vtx = cvtest::randInt(rng) % 2 || graph->active_count == 0 ? 0 :
                     cvGetGraphVtx( graph, cvtest::randInt(rng) % graph->active_count );
-                    
+
                     scanner = cvCreateGraphScanner( graph, start_vtx, CV_GRAPH_ALL_ITEMS );
-                    
+
                     vtx_mask.resize(0);
                     vtx_mask.resize(graph->active_count, 0);
                     edge_mask.resize(0);
                     edge_mask.resize(graph->edges->active_count, 0);
-                    
+
                     for(;;)
                     {
                         int code = cvNextGraphItem( scanner );
-                        
+
                         if( code == CV_GRAPH_OVER )
                             break;
                         else if( code & CV_GRAPH_ANY_EDGE )
                         {
                             int edge_idx = scanner->edge->flags & CV_SET_ELEM_IDX_MASK;
-                            
+
                             CV_TS_SEQ_CHECK_CONDITION( edge_idx < graph->edges->active_count &&
                                                       edge_mask[edge_idx] == 0,
                                                       "The edge is not found or visited for the second time" );
@@ -2091,16 +2091,16 @@ void Core_GraphScanTest::run( int )
                         else if( code & CV_GRAPH_VERTEX )
                         {
                             int vtx_idx = scanner->vtx->flags & CV_SET_ELEM_IDX_MASK;
-                            
+
                             CV_TS_SEQ_CHECK_CONDITION( vtx_idx < graph->active_count &&
                                                       vtx_mask[vtx_idx] == 0,
                                                       "The vtx is not found or visited for the second time" );
                             vtx_mask[vtx_idx] = 1;
                         }
                     }
-                    
+
                     cvReleaseGraphScanner( &scanner );
-                    
+
                     CV_TS_SEQ_CHECK_CONDITION( cvtest::norm(Mat(vtx_mask),CV_L1) == graph->active_count &&
                                               cvtest::norm(Mat(edge_mask),CV_L1) == graph->edges->active_count,
                                               "Some vertices or edges have not been visited" );
@@ -2108,14 +2108,14 @@ void Core_GraphScanTest::run( int )
                 }
                 cvClearMemStorage( storage );
             }
-            
+
             storage.release();
         }
     }
     catch(int)
     {
     }
-    
+
     cvReleaseGraphScanner( &scanner );
 }
 
index c986237..eaf9d77 100644 (file)
@@ -5,7 +5,7 @@
 #include "opencv2/highgui/highgui.hpp"
 #include "opencv2/features2d/features2d.hpp"
 
-#if GTEST_CREATE_SHARED_LIBRARY
+#ifdef GTEST_CREATE_SHARED_LIBRARY
 #error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
 #endif
 
index 7247b92..98759be 100644 (file)
@@ -61,7 +61,7 @@ inline int smoothedSum(const Mat& sum, const KeyPoint& pt, int y, int x)
            + sum.at<int>(img_y - HALF_KERNEL, img_x - HALF_KERNEL);
 }
 
-void pixelTests16(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& descriptors)
+static void pixelTests16(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& descriptors)
 {
     for (int i = 0; i < (int)keypoints.size(); ++i)
     {
@@ -71,7 +71,7 @@ void pixelTests16(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& d
     }
 }
 
-void pixelTests32(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& descriptors)
+static void pixelTests32(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& descriptors)
 {
     for (int i = 0; i < (int)keypoints.size(); ++i)
     {
@@ -82,7 +82,7 @@ void pixelTests32(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& d
     }
 }
 
-void pixelTests64(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& descriptors)
+static void pixelTests64(const Mat& sum, const std::vector<KeyPoint>& keypoints, Mat& descriptors)
 {
     for (int i = 0; i < (int)keypoints.size(); ++i)
     {
index 7d8d9fc..e90bf9d 100644 (file)
@@ -56,7 +56,7 @@ DescriptorExtractor::~DescriptorExtractor()
 {}
 
 void DescriptorExtractor::compute( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors ) const
-{    
+{
     if( image.empty() || keypoints.empty() )
     {
         descriptors.release();
@@ -102,7 +102,7 @@ Ptr<DescriptorExtractor> DescriptorExtractor::create(const string& descriptorExt
         string type = descriptorExtractorType.substr(pos);
         return new OpponentColorDescriptorExtractor(DescriptorExtractor::create(type));
     }
-    
+
     return Algorithm::create<DescriptorExtractor>("Feature2D." + descriptorExtractorType);
 }
 
@@ -117,7 +117,7 @@ OpponentColorDescriptorExtractor::OpponentColorDescriptorExtractor( const Ptr<De
     CV_Assert( !descriptorExtractor.empty() );
 }
 
-void convertBGRImageToOpponentColorSpace( const Mat& bgrImage, vector<Mat>& opponentChannels )
+static void convertBGRImageToOpponentColorSpace( const Mat& bgrImage, vector<Mat>& opponentChannels )
 {
     if( bgrImage.type() != CV_8UC3 )
         CV_Error( CV_StsBadArg, "input image must be an BGR image of type CV_8UC3" );
@@ -227,7 +227,7 @@ void OpponentColorDescriptorExtractor::computeImpl( const Mat& bgrImage, vector<
     Mat mergedDescriptors( maxKeypointsCount, 3*descriptorSize, descriptorExtractor->descriptorType() );
     int mergedCount = 0;
     // cp - current channel position
-    size_t cp[] = {0, 0, 0}; 
+    size_t cp[] = {0, 0, 0};
     while( cp[0] < channelKeypoints[0].size() &&
            cp[1] < channelKeypoints[1].size() &&
            cp[2] < channelKeypoints[2].size() )
index 8b23242..70e795a 100644 (file)
@@ -45,7 +45,7 @@ using namespace std;
 
 namespace cv
 {
-    
+
 /*
  *  FeatureDetector
  */
@@ -95,19 +95,19 @@ Ptr<FeatureDetector> FeatureDetector::create( const string& detectorType )
         return new GridAdaptedFeatureDetector(FeatureDetector::create(
                                 detectorType.substr(strlen("Grid"))));
     }
-    
+
     if( detectorType.find("Pyramid") == 0 )
     {
         return new PyramidAdaptedFeatureDetector(FeatureDetector::create(
                                 detectorType.substr(strlen("Pyramid"))));
     }
-    
+
     if( detectorType.find("Dynamic") == 0 )
     {
         return new DynamicAdaptedFeatureDetector(AdjusterAdapter::create(
                                 detectorType.substr(strlen("Dynamic"))));
     }
-    
+
     if( detectorType.compare( "HARRIS" ) == 0 )
     {
         Ptr<FeatureDetector> fd = FeatureDetector::create("GFTT");
@@ -149,13 +149,13 @@ void GFTTDetector::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, co
 /*
  *  DenseFeatureDetector
  */
-DenseFeatureDetector::DenseFeatureDetector( float _initFeatureScale, int _featureScaleLevels, 
-                                                                         float _featureScaleMul, int _initXyStep, 
-                                                                         int _initImgBound, bool _varyXyStepWithScale, 
-                                                                         bool _varyImgBoundWithScale ) :
-       initFeatureScale(_initFeatureScale), featureScaleLevels(_featureScaleLevels),
-       featureScaleMul(_featureScaleMul), initXyStep(_initXyStep), initImgBound(_initImgBound),
-       varyXyStepWithScale(_varyXyStepWithScale), varyImgBoundWithScale(_varyImgBoundWithScale)
+DenseFeatureDetector::DenseFeatureDetector( float _initFeatureScale, int _featureScaleLevels,
+                                      float _featureScaleMul, int _initXyStep,
+                                      int _initImgBound, bool _varyXyStepWithScale,
+                                      bool _varyImgBoundWithScale ) :
+    initFeatureScale(_initFeatureScale), featureScaleLevels(_featureScaleLevels),
+    featureScaleMul(_featureScaleMul), initXyStep(_initXyStep), initImgBound(_initImgBound),
+    varyXyStepWithScale(_varyXyStepWithScale), varyImgBoundWithScale(_varyImgBoundWithScale)
 {}
 
 
@@ -203,7 +203,7 @@ struct ResponseComparator
     }
 };
 
-void keepStrongest( int N, vector<KeyPoint>& keypoints )
+static void keepStrongest( int N, vector<KeyPoint>& keypoints )
 {
     if( (int)keypoints.size() > N )
     {
index 6f9e07c..508e034 100644 (file)
@@ -42,8 +42,7 @@
 
 #include "precomp.hpp"
 
-namespace cv
-{
+using namespace cv;
 
 /////////////////////// AlgorithmInfo for various detector & descriptors ////////////////////////////
 
@@ -54,7 +53,7 @@ namespace cv
 
 CV_INIT_ALGORITHM(BriefDescriptorExtractor, "Feature2D.BRIEF",
                   obj.info()->addParam(obj, "bytes", obj.bytes_));
-    
+
 ///////////////////////////////////////////////////////////////////////////////////////////////////////////
 
 CV_INIT_ALGORITHM(FastFeatureDetector, "Feature2D.FAST",
@@ -69,7 +68,7 @@ CV_INIT_ALGORITHM(StarDetector, "Feature2D.STAR",
                   obj.info()->addParam(obj, "lineThresholdProjected", obj.lineThresholdProjected);
                   obj.info()->addParam(obj, "lineThresholdBinarized", obj.lineThresholdBinarized);
                   obj.info()->addParam(obj, "suppressNonmaxSize", obj.suppressNonmaxSize));
-    
+
 ///////////////////////////////////////////////////////////////////////////////////////////////////////////
 
 CV_INIT_ALGORITHM(MSER, "Feature2D.MSER",
@@ -81,8 +80,8 @@ CV_INIT_ALGORITHM(MSER, "Feature2D.MSER",
                   obj.info()->addParam(obj, "maxEvolution", obj.maxEvolution);
                   obj.info()->addParam(obj, "areaThreshold", obj.areaThreshold);
                   obj.info()->addParam(obj, "minMargin", obj.minMargin);
-                  obj.info()->addParam(obj, "edgeBlurSize", obj.edgeBlurSize));    
-    
+                  obj.info()->addParam(obj, "edgeBlurSize", obj.edgeBlurSize));
+
 ///////////////////////////////////////////////////////////////////////////////////////////////////////////
 
 CV_INIT_ALGORITHM(ORB, "Feature2D.ORB",
@@ -96,7 +95,7 @@ CV_INIT_ALGORITHM(ORB, "Feature2D.ORB",
                   obj.info()->addParam(obj, "scoreType", obj.scoreType));
 
 ///////////////////////////////////////////////////////////////////////////////////////////////////////////
-    
+
 CV_INIT_ALGORITHM(GFTTDetector, "Feature2D.GFTT",
                   obj.info()->addParam(obj, "nfeatures", obj.nfeatures);
                   obj.info()->addParam(obj, "qualityLevel", obj.qualityLevel);
@@ -105,7 +104,7 @@ CV_INIT_ALGORITHM(GFTTDetector, "Feature2D.GFTT",
                   obj.info()->addParam(obj, "k", obj.k));
 
 ///////////////////////////////////////////////////////////////////////////////////////////////////////////
-    
+
 class CV_EXPORTS HarrisDetector : public GFTTDetector
 {
 public:
@@ -113,7 +112,7 @@ public:
                     int blockSize=3, bool useHarrisDetector=true, double k=0.04 )
     : GFTTDetector( maxCorners, qualityLevel, minDistance, blockSize, useHarrisDetector, k ) {}
     AlgorithmInfo* info() const;
-};    
+};
 
 CV_INIT_ALGORITHM(HarrisDetector, "Feature2D.HARRIS",
                   obj.info()->addParam(obj, "nfeatures", obj.nfeatures);
@@ -122,7 +121,7 @@ CV_INIT_ALGORITHM(HarrisDetector, "Feature2D.HARRIS",
                   obj.info()->addParam(obj, "useHarrisDetector", obj.useHarrisDetector);
                   obj.info()->addParam(obj, "k", obj.k));
 
-////////////////////////////////////////////////////////////////////////////////////////////////////////////    
+////////////////////////////////////////////////////////////////////////////////////////////////////////////
 
 CV_INIT_ALGORITHM(DenseFeatureDetector, "Feature2D.Dense",
                   obj.info()->addParam(obj, "initFeatureScale", obj.initFeatureScale);
@@ -134,22 +133,23 @@ CV_INIT_ALGORITHM(DenseFeatureDetector, "Feature2D.Dense",
                   obj.info()->addParam(obj, "varyImgBoundWithScale", obj.varyImgBoundWithScale));
 
 CV_INIT_ALGORITHM(GridAdaptedFeatureDetector, "Feature2D.Grid",
-                  obj.info()->addParam(obj, "detector", (Ptr<Algorithm>&)obj.detector);
+                  //obj.info()->addParam(obj, "detector", (Ptr<Algorithm>&)obj.detector);
                   obj.info()->addParam(obj, "maxTotalKeypoints", obj.maxTotalKeypoints);
                   obj.info()->addParam(obj, "gridRows", obj.gridRows);
                   obj.info()->addParam(obj, "gridCols", obj.gridCols));
 
-bool initModule_features2d(void)
+bool cv::initModule_features2d(void)
 {
-    Ptr<Algorithm> brief = createBriefDescriptorExtractor(), orb = createORB(),
-        star = createStarDetector(), fastd = createFastFeatureDetector(), mser = createMSER(),
-        dense = createDenseFeatureDetector(), gftt = createGFTTDetector(),
-        harris = createHarrisDetector(), grid = createGridAdaptedFeatureDetector();
-        
-    return brief->info() != 0 && orb->info() != 0 && star->info() != 0 &&
-        fastd->info() != 0 && mser->info() != 0 && dense->info() != 0 &&
-        gftt->info() != 0 && harris->info() != 0 && grid->info() != 0;
-}
-
+    bool all = true;
+    all &= !BriefDescriptorExtractor_info_auto.name().empty();
+    all &= !FastFeatureDetector_info_auto.name().empty();
+    all &= !StarDetector_info_auto.name().empty();
+    all &= !MSER_info_auto.name().empty();
+    all &= !ORB_info_auto.name().empty();
+    all &= !GFTTDetector_info_auto.name().empty();
+    all &= !HarrisDetector_info_auto.name().empty();
+    all &= !DenseFeatureDetector_info_auto.name().empty();
+    all &= !GridAdaptedFeatureDetector_info_auto.name().empty();
+
+    return all;
 }
-
index 61a6ced..74ab4b8 100755 (executable)
@@ -174,7 +174,7 @@ int DescriptorMatcher::DescriptorCollection::size() const
 /*
  * DescriptorMatcher
  */
-void convertMatches( const vector<vector<DMatch> >& knnMatches, vector<DMatch>& matches )
+static void convertMatches( const vector<vector<DMatch> >& knnMatches, vector<DMatch>& matches )
 {
     matches.clear();
     matches.reserve( knnMatches.size() );
index 1678c69..4bf511c 100644 (file)
@@ -43,7 +43,7 @@
 #ifndef __OPENCV_PRECOMP_H__
 #define __OPENCV_PRECOMP_H__
 
-#if _MSC_VER >= 1200
+#if defined _MSC_VER && _MSC_VER >= 1200
 #pragma warning( disable: 4251 4512 4710 4711 4514 4996 )
 #endif
 
index 272acca..958d891 100644 (file)
@@ -493,23 +493,6 @@ private:
     CV_DescriptorExtractorTest& operator=(const CV_DescriptorExtractorTest&) { return *this; }
 };
 
-/*template<typename T, typename Distance>
-class CV_CalonderDescriptorExtractorTest : public CV_DescriptorExtractorTest<Distance>
-{
-public:
-    CV_CalonderDescriptorExtractorTest( const char* testName, float _normDif, float _prevTime ) :
-            CV_DescriptorExtractorTest<Distance>( testName, _normDif, Ptr<DescriptorExtractor>(), _prevTime )
-    {}
-
-protected:
-    virtual void createDescriptorExtractor()
-    {
-        CV_DescriptorExtractorTest<Distance>::dextractor =
-                new CalonderDescriptorExtractor<T>( string(CV_DescriptorExtractorTest<Distance>::ts->get_data_path()) +
-                                                    FEATURES2D_DIR + "/calonder_classifier.rtc");
-    }
-};*/
-
 /****************************************************************************************\
 *                       Algorithmic tests for descriptor matchers                        *
 \****************************************************************************************/
@@ -1059,24 +1042,6 @@ TEST( Features2d_DescriptorExtractor_BRIEF, regression )
     test.safe_run();
 }
 
-#if CV_SSE2
-TEST( Features2d_DescriptorExtractor_Calonder_uchar, regression )
-{
-    CV_CalonderDescriptorExtractorTest<uchar, L2<uchar> > test( "descriptor-calonder-uchar",
-                                                                std::numeric_limits<float>::epsilon() + 1,
-                                                                0.0132175f );
-    test.safe_run();
-}
-
-TEST( Features2d_DescriptorExtractor_Calonder_float, regression )
-{
-    CV_CalonderDescriptorExtractorTest<float, L2<float> > test( "descriptor-calonder-float",
-                                                                std::numeric_limits<float>::epsilon(),
-                                                                0.0221308f );
-    test.safe_run();
-}
-#endif // CV_SSE2
-
 /*
  * Matchers
  */
index 34a7120..e1c78b6 100644 (file)
@@ -46,6 +46,7 @@ struct base_any_policy
     virtual ::size_t get_size() = 0;
     virtual const std::type_info& type() = 0;
     virtual void print(std::ostream& out, void* const* src) = 0;
+    virtual ~base_any_policy() {}
 };
 
 template<typename T>
index 064ec39..b94a089 100644 (file)
@@ -35,6 +35,9 @@
 #ifndef OPENCV_FLANN_DYNAMIC_BITSET_H_
 #define OPENCV_FLANN_DYNAMIC_BITSET_H_
 
+#ifndef FLANN_USE_BOOST
+#  define FLANN_USE_BOOST 0
+#endif
 //#define FLANN_USE_BOOST 1
 #if FLANN_USE_BOOST
 #include <boost/dynamic_bitset.hpp>
index 4948d05..cb9e05f 100644 (file)
 #include <iomanip>
 #include <limits.h>
 // TODO as soon as we use C++0x, use the code in USE_UNORDERED_MAP
+#ifdef __GXX_EXPERIMENTAL_CXX0X__
+#  define USE_UNORDERED_MAP 1
+#else
+#  define USE_UNORDERED_MAP 0
+#endif
 #if USE_UNORDERED_MAP
 #include <unordered_map>
 #else
index 21532ab..472f750 100644 (file)
@@ -16,7 +16,7 @@
 \r
 #include "perf_utility.hpp"\r
 \r
-#if GTEST_CREATE_SHARED_LIBRARY\r
+#ifdef GTEST_CREATE_SHARED_LIBRARY\r
 #error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined\r
 #endif\r
 \r
index 6c3fbd1..f57b08c 100644 (file)
@@ -20,7 +20,7 @@
 \r
 #include "perf_utility.hpp"\r
 \r
-#if GTEST_CREATE_SHARED_LIBRARY\r
+#ifdef GTEST_CREATE_SHARED_LIBRARY\r
 #error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined\r
 #endif\r
 \r
index 287d6bc..1cf467c 100644 (file)
@@ -43,7 +43,7 @@
 #ifndef __OPENCV_PRECOMP_H__\r
 #define __OPENCV_PRECOMP_H__\r
 \r
-#if _MSC_VER >= 1200\r
+#if defined _MSC_VER && _MSC_VER >= 1200\r
     #pragma warning( disable: 4251 4710 4711 4514 4996 )\r
 #endif\r
 \r
index ecd0276..ecce478 100644 (file)
@@ -81,6 +81,10 @@ if(HAVE_QT)
 
   list(APPEND HIGHGUI_LIBRARIES ${QT_LIBRARIES} ${QT_QTTEST_LIBRARY})
   list(APPEND highgui_srcs src/window_QT.cpp ${_MOC_OUTFILES} ${_RCC_OUTFILES} )
+
+  if(CMAKE_COMPILER_IS_GNUCXX)
+    set_source_files_properties(${_RCC_OUTFILES} PROPERTIES COMPILE_FLAGS "-Wno-missing-declarations")
+  endif()
 elseif(WIN32)
   list(APPEND highgui_srcs src/window_w32.cpp)
 elseif(HAVE_GTK)
@@ -131,6 +135,10 @@ if(HAVE_OPENNI)
   list(APPEND highgui_srcs src/cap_openni.cpp)
   ocv_include_directories(${OPENNI_INCLUDE_DIR})
   list(APPEND HIGHGUI_LIBRARIES ${OPENNI_LIBRARY})
+
+  if(CMAKE_COMPILER_IS_GNUCXX)
+    set_source_files_properties(src/cap_openni.cpp PROPERTIES COMPILE_FLAGS "-Wno-unknown-pragmas -Wno-uninitialized -Wno-reorder -Wno-strict-aliasing")
+  endif()
 endif(HAVE_OPENNI)
 
 if(HAVE_opencv_androidcamera)
index 7937df4..ddd3003 100644 (file)
@@ -79,7 +79,7 @@ CVAPI(void) cvDisplayStatusBar(const char* name, const char* text, int delayms C
 CVAPI(void) cvSaveWindowParameters(const char* name);
 CVAPI(void) cvLoadWindowParameters(const char* name);
 CVAPI(int) cvStartLoop(int (*pt2Func)(int argc, char *argv[]), int argc, char* argv[]);
-CVAPI(void) cvStopLoop();
+CVAPI(void) cvStopLoop( void );
 
 typedef void (CV_CDECL *CvButtonCallback)(int state, void* userdata);
 enum {CV_PUSH_BUTTON = 0, CV_CHECKBOX = 1, CV_RADIOBOX = 2};
@@ -90,7 +90,7 @@ CVAPI(int) cvCreateButton( const char* button_name CV_DEFAULT(NULL),CvButtonCall
 /* this function is used to set some external parameters in case of X Window */
 CVAPI(int) cvInitSystem( int argc, char** argv );
 
-CVAPI(int) cvStartWindowThread();
+CVAPI(int) cvStartWindowThread( void );
 
 // ---------  YV ---------
 enum
@@ -100,16 +100,16 @@ enum
     CV_WND_PROP_AUTOSIZE   = 1, //to change/get window's autosize property
     CV_WND_PROP_ASPECTRATIO= 2, //to change/get window's aspectratio property
     CV_WND_PROP_OPENGL     = 3, //to change/get window's opengl support
-    
+
     //These 2 flags are used by cvNamedWindow and cvSet/GetWindowProperty
     CV_WINDOW_NORMAL       = 0x00000000, //the user can resize the window (no constraint)  / also use to switch a fullscreen window to a normal size
     CV_WINDOW_AUTOSIZE     = 0x00000001, //the user cannot resize the window, the size is constrainted by the image displayed
     CV_WINDOW_OPENGL       = 0x00001000, //window with opengl support
-    
+
     //Those flags are only for Qt
     CV_GUI_EXPANDED         = 0x00000000, //status bar and tool bar
     CV_GUI_NORMAL           = 0x00000010, //old fashious way
-    
+
     //These 3 flags are used by cvNamedWindow and cvSet/GetWindowProperty
     CV_WINDOW_FULLSCREEN   = 1,//change the window to fullscreen
     CV_WINDOW_FREERATIO    = 0x00000100,//the image expends as much as it can (no ratio constraint)
@@ -303,10 +303,10 @@ enum
     CV_CAP_OPENNI_ASUS =910,   // OpenNI (for Asus Xtion)
 
     CV_CAP_ANDROID  =1000,  // Android
-    
+
     CV_CAP_XIAPI    =1100,   // XIMEA Camera API
-    
-    CV_CAP_AVFOUNDATION = 1200 // AVFoundation framework for iOS (OS X Lion will have the same API)
+
+    CV_CAP_AVFOUNDATION = 1200  // AVFoundation framework for iOS (OS X Lion will have the same API)
 };
 
 /* start capturing frames from camera: index = camera_index + domain_offset (CV_CAP_*) */
@@ -367,15 +367,15 @@ enum
     CV_CAP_PROP_TRIGGER_DELAY =25,
     CV_CAP_PROP_WHITE_BALANCE_RED_V =26,
     CV_CAP_PROP_ZOOM          =27,
-    CV_CAP_PROP_FOCUS         =28,     
-    CV_CAP_PROP_GUID          =29,     
-    CV_CAP_PROP_ISO_SPEED     =30,     
+    CV_CAP_PROP_FOCUS         =28,
+    CV_CAP_PROP_GUID          =29,
+    CV_CAP_PROP_ISO_SPEED     =30,
     CV_CAP_PROP_MAX_DC1394    =31,
-       CV_CAP_PROP_BACKLIGHT     =32,
-       CV_CAP_PROP_PAN           =33,
-       CV_CAP_PROP_TILT          =34,
-       CV_CAP_PROP_ROLL          =35,
-       CV_CAP_PROP_IRIS          =36,
+    CV_CAP_PROP_BACKLIGHT     =32,
+    CV_CAP_PROP_PAN           =33,
+    CV_CAP_PROP_TILT          =34,
+    CV_CAP_PROP_ROLL          =35,
+    CV_CAP_PROP_IRIS          =36,
     CV_CAP_PROP_SETTINGS      =37,
 
     CV_CAP_PROP_AUTOGRAB      =1024, // property for highgui class CvCapture_Android only
@@ -409,24 +409,24 @@ enum
     CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH    = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_FOCAL_LENGTH,
     CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION    = CV_CAP_OPENNI_DEPTH_GENERATOR + CV_CAP_PROP_OPENNI_REGISTRATION,
     CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CV_CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION,
-    
+
     // Properties of cameras available through GStreamer interface
     CV_CAP_GSTREAMER_QUEUE_LENGTH   = 200, // default is 1
     CV_CAP_PROP_PVAPI_MULTICASTIP   = 300, // ip for anable multicast master mode. 0 for disable multicast
-    
+
     // Properties of cameras available through XIMEA SDK interface
-    CV_CAP_PROP_XI_DOWNSAMPLING  = 400,      // Change image resolution by binning or skipping.  
+    CV_CAP_PROP_XI_DOWNSAMPLING  = 400,      // Change image resolution by binning or skipping.
     CV_CAP_PROP_XI_DATA_FORMAT   = 401,       // Output data format.
     CV_CAP_PROP_XI_OFFSET_X      = 402,      // Horizontal offset from the origin to the area of interest (in pixels).
     CV_CAP_PROP_XI_OFFSET_Y      = 403,      // Vertical offset from the origin to the area of interest (in pixels).
     CV_CAP_PROP_XI_TRG_SOURCE    = 404,      // Defines source of trigger.
     CV_CAP_PROP_XI_TRG_SOFTWARE  = 405,      // Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE.
-    CV_CAP_PROP_XI_GPI_SELECTOR  = 406,      // Selects general purpose input 
+    CV_CAP_PROP_XI_GPI_SELECTOR  = 406,      // Selects general purpose input
     CV_CAP_PROP_XI_GPI_MODE      = 407,      // Set general purpose input mode
     CV_CAP_PROP_XI_GPI_LEVEL     = 408,      // Get general purpose level
-    CV_CAP_PROP_XI_GPO_SELECTOR  = 409,      // Selects general purpose output 
+    CV_CAP_PROP_XI_GPO_SELECTOR  = 409,      // Selects general purpose output
     CV_CAP_PROP_XI_GPO_MODE      = 410,      // Set general purpose output mode
-    CV_CAP_PROP_XI_LED_SELECTOR  = 411,      // Selects camera signalling LED 
+    CV_CAP_PROP_XI_LED_SELECTOR  = 411,      // Selects camera signalling LED
     CV_CAP_PROP_XI_LED_MODE      = 412,      // Define camera signalling LED functionality
     CV_CAP_PROP_XI_MANUAL_WB     = 413,      // Calculates White Balance(must be called during acquisition)
     CV_CAP_PROP_XI_AUTO_WB       = 414,      // Automatic white balance
@@ -436,7 +436,7 @@ enum
     CV_CAP_PROP_XI_AG_MAX_LIMIT  = 418,      // Maximum limit of gain in AEAG procedure
     CV_CAP_PROP_XI_AEAG_LEVEL    = 419,       // Average intensity of output signal AEAG should achieve(in %)
     CV_CAP_PROP_XI_TIMEOUT       = 420,       // Image capture timeout in milliseconds
-    
+
     // Properties for Android cameras
     CV_CAP_PROP_ANDROID_FLASH_MODE = 8001,
     CV_CAP_PROP_ANDROID_FOCUS_MODE = 8002,
@@ -532,7 +532,7 @@ CVAPI(double) cvGetCaptureProperty( CvCapture* capture, int property_id );
 CVAPI(int)    cvSetCaptureProperty( CvCapture* capture, int property_id, double value );
 
 // Return the type of the capturer (eg, CV_CAP_V4W, CV_CAP_UNICAP), which is unknown if created with CV_CAP_ANY
-CVAPI(int)    cvGetCaptureDomain( CvCapture* capture);  
+CVAPI(int)    cvGetCaptureDomain( CvCapture* capture);
 
 /* "black box" video file writer structure */
 typedef struct CvVideoWriter CvVideoWriter;
index 2ffa9ee..17a338e 100644 (file)
@@ -4,7 +4,7 @@
 #include "opencv2/ts/ts.hpp"
 #include "opencv2/highgui/highgui.hpp"
 
-#if GTEST_CREATE_SHARED_LIBRARY
+#ifdef GTEST_CREATE_SHARED_LIBRARY
 #error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
 #endif
 
index 8dc25f3..15ac626 100644 (file)
@@ -41,7 +41,7 @@
 
 #include "precomp.hpp"
 
-#if _MSC_VER >= 1200
+#if defined _MSC_VER && _MSC_VER >= 1200
 #pragma warning( disable: 4711 )
 #endif
 
@@ -282,7 +282,7 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index)
                 return capture;
         break;
 #endif
-        
+
 #ifdef HAVE_PVAPI
         case CV_CAP_PVAPI:
             capture = cvCreateCameraCapture_PvAPI (index);
@@ -306,7 +306,7 @@ CV_IMPL CvCapture * cvCreateCameraCapture (int index)
                 return capture;
         break;
 #endif
-        
+
 #ifdef HAVE_XIMEA
         case CV_CAP_XIAPI:
             capture = cvCreateCameraCapture_XIMEA (index);
@@ -354,7 +354,7 @@ CV_IMPL CvCapture * cvCreateFileCapture (const char * filename)
     if (! result)
         result = cvCreateFileCapture_QT (filename);
 #endif
-    
+
 #ifdef HAVE_AVFOUNDATION
     if (! result)
         result = cvCreateFileCapture_AVFoundation (filename);
@@ -364,7 +364,7 @@ CV_IMPL CvCapture * cvCreateFileCapture (const char * filename)
     if (! result)
         result = cvCreateFileCapture_OpenNI (filename);
 #endif
-    
+
     if (! result)
         result = cvCreateFileCapture_Images (filename);
 
@@ -378,29 +378,29 @@ CV_IMPL CvCapture * cvCreateFileCapture (const char * filename)
 CV_IMPL CvVideoWriter* cvCreateVideoWriter( const char* filename, int fourcc,
                                             double fps, CvSize frameSize, int is_color )
 {
-       //CV_FUNCNAME( "cvCreateVideoWriter" );
+    //CV_FUNCNAME( "cvCreateVideoWriter" );
 
-       CvVideoWriter *result = 0;
+    CvVideoWriter *result = 0;
 
-       if(!fourcc || !fps)
-               result = cvCreateVideoWriter_Images(filename);
+    if(!fourcc || !fps)
+        result = cvCreateVideoWriter_Images(filename);
 
-       if(!result)
-               result = cvCreateVideoWriter_FFMPEG_proxy (filename, fourcc, fps, frameSize, is_color);
+    if(!result)
+        result = cvCreateVideoWriter_FFMPEG_proxy (filename, fourcc, fps, frameSize, is_color);
 
-/*     #ifdef HAVE_XINE
-       if(!result)
-               result = cvCreateVideoWriter_XINE(filename, fourcc, fps, frameSize, is_color);
-       #endif
+/*  #ifdef HAVE_XINE
+    if(!result)
+        result = cvCreateVideoWriter_XINE(filename, fourcc, fps, frameSize, is_color);
+    #endif
 */
-#ifdef HAVE_AVFOUNDATION 
+#ifdef HAVE_AVFOUNDATION
     if (! result)
         result = cvCreateVideoWriter_AVFoundation(filename, fourcc, fps, frameSize, is_color);
 #endif
 
 #ifdef HAVE_QUICKTIME
-       if(!result)
-               result = cvCreateVideoWriter_QT(filename, fourcc, fps, frameSize, is_color);
+    if(!result)
+        result = cvCreateVideoWriter_QT(filename, fourcc, fps, frameSize, is_color);
 #endif
 
 #ifdef HAVE_GSTREAMER
@@ -408,10 +408,10 @@ CV_IMPL CvVideoWriter* cvCreateVideoWriter( const char* filename, int fourcc,
         result = cvCreateVideoWriter_GStreamer(filename, fourcc, fps, frameSize, is_color);
 #endif
 
-       if(!result)
-               result = cvCreateVideoWriter_Images(filename);
+    if(!result)
+        result = cvCreateVideoWriter_Images(filename);
 
-       return result;
+    return result;
 }
 
 CV_IMPL int cvWriteFrame( CvVideoWriter* writer, const IplImage* image )
@@ -434,12 +434,12 @@ namespace cv
 
 VideoCapture::VideoCapture()
 {}
-        
+
 VideoCapture::VideoCapture(const string& filename)
 {
     open(filename);
 }
-    
+
 VideoCapture::VideoCapture(int device)
 {
     open(device);
@@ -449,21 +449,21 @@ VideoCapture::~VideoCapture()
 {
     cap.release();
 }
-    
+
 bool VideoCapture::open(const string& filename)
 {
     cap = cvCreateFileCapture(filename.c_str());
     return isOpened();
 }
-    
+
 bool VideoCapture::open(int device)
 {
     cap = cvCreateCameraCapture(device);
     return isOpened();
 }
-    
+
 bool VideoCapture::isOpened() const { return !cap.empty(); }
-    
+
 void VideoCapture::release()
 {
     cap.release();
@@ -473,7 +473,7 @@ bool VideoCapture::grab()
 {
     return cvGrabFrame(cap) != 0;
 }
-    
+
 bool VideoCapture::retrieve(Mat& image, int channel)
 {
     IplImage* _img = cvRetrieveFrame(cap, channel);
@@ -500,18 +500,18 @@ bool VideoCapture::read(Mat& image)
         image.release();
     return !image.empty();
 }
-    
+
 VideoCapture& VideoCapture::operator >> (Mat& image)
 {
     read(image);
     return *this;
 }
-    
+
 bool VideoCapture::set(int propId, double value)
 {
     return cvSetCaptureProperty(cap, propId, value) != 0;
 }
-    
+
 double VideoCapture::get(int propId)
 {
     return cvGetCaptureProperty(cap, propId);
@@ -519,7 +519,7 @@ double VideoCapture::get(int propId)
 
 VideoWriter::VideoWriter()
 {}
-    
+
 VideoWriter::VideoWriter(const string& filename, int fourcc, double fps, Size frameSize, bool isColor)
 {
     open(filename, fourcc, fps, frameSize, isColor);
@@ -528,13 +528,13 @@ VideoWriter::VideoWriter(const string& filename, int fourcc, double fps, Size fr
 void VideoWriter::release()
 {
     writer.release();
-}    
-    
+}
+
 VideoWriter::~VideoWriter()
 {
     release();
 }
-    
+
 bool VideoWriter::open(const string& filename, int fourcc, double fps, Size frameSize, bool isColor)
 {
     writer = cvCreateVideoWriter(filename.c_str(), fourcc, fps, frameSize, isColor);
@@ -544,18 +544,18 @@ bool VideoWriter::open(const string& filename, int fourcc, double fps, Size fram
 bool VideoWriter::isOpened() const
 {
     return !writer.empty();
-}    
+}
 
 void VideoWriter::write(const Mat& image)
 {
     IplImage _img = image;
     cvWriteFrame(writer, &_img);
 }
-    
+
 VideoWriter& VideoWriter::operator << (const Mat& image)
 {
     write(image);
-    return *this;    
+    return *this;
 }
 
 }
index be89b9c..0cc60e3 100644 (file)
@@ -123,7 +123,7 @@ icvInitFFMPEG(void)
         icvReleaseVideoWriter_FFMPEG_p = (CvReleaseVideoWriter_Plugin)cvReleaseVideoWriter_FFMPEG;
         icvWriteFrame_FFMPEG_p = (CvWriteFrame_Plugin)cvWriteFrame_FFMPEG;
     #endif
-        
+
         ffmpegInitialized = 1;
     }
 }
@@ -151,7 +151,7 @@ public:
     {
         unsigned char* data = 0;
         int step=0, width=0, height=0, cn=0;
-        
+
         if(!ffmpegCapture ||
            !icvRetrieveFrame_FFMPEG_p(ffmpegCapture,&data,&step,&width,&height,&cn))
            return 0;
@@ -193,7 +193,7 @@ CvCapture* cvCreateFileCapture_FFMPEG_proxy(const char * filename)
     return cvCreateFileCapture_VFW(filename);
 #else
     return 0;
-#endif    
+#endif
 }
 
 
@@ -247,5 +247,5 @@ CvVideoWriter* cvCreateVideoWriter_FFMPEG_proxy( const char* filename, int fourc
     return cvCreateVideoWriter_VFW(filename, fourcc, fps, frameSize, isColor);
 #else
     return 0;
-#endif    
+#endif
 }
index 025cb89..fc9ace5 100644 (file)
@@ -66,7 +66,7 @@ extern "C" {
 #ifndef HAVE_FFMPEG_SWSCALE
     #error "libswscale is necessary to build the newer OpenCV ffmpeg wrapper"
 #endif
-    
+
 // if the header path is not specified explicitly, let's deduce it
 #if !defined HAVE_FFMPEG_AVCODEC_H && !defined HAVE_LIBAVCODEC_AVCODEC_H
 
@@ -140,7 +140,7 @@ extern "C" {
 #define AV_NOPTS_VALUE_ ((int64_t)AV_NOPTS_VALUE)
 #endif
 
-int get_number_of_cpus(void)
+static int get_number_of_cpus(void)
 {
 #if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(52, 111, 0)
     return 1;
@@ -210,7 +210,7 @@ struct CvCapture_FFMPEG
 
     void    seek(int64_t frame_number);
     void    seek(double sec);
-    bool       slowSeek( int framenumber );
+    bool    slowSeek( int framenumber );
 
     int64_t get_total_frames();
     double  get_duration_sec();
@@ -225,8 +225,8 @@ struct CvCapture_FFMPEG
     AVCodec         * avcodec;
     int               video_stream;
     AVStream        * video_st;
-    AVFrame         * picture;        
-    AVFrame           rgb_picture;    
+    AVFrame         * picture;
+    AVFrame           rgb_picture;
     int64_t           picture_pts;
 
     AVPacket          packet;
@@ -274,7 +274,7 @@ void CvCapture_FFMPEG::close()
         sws_freeContext(img_convert_ctx);
         img_convert_ctx = 0;
     }
-    
+
     if( picture )
         av_free(picture);
 
@@ -293,9 +293,9 @@ void CvCapture_FFMPEG::close()
     if( ic )
     {
 #if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 24, 2)
-               av_close_input_file(ic);
+        av_close_input_file(ic);
 #else
-               avformat_close_input(&ic);
+        avformat_close_input(&ic);
 #endif
 
         ic = NULL;
@@ -337,7 +337,7 @@ static void icvInitFFMPEG_internal()
         av_register_all();
 
         av_log_set_level(AV_LOG_ERROR);
-        
+
         initialized = true;
     }
 }
@@ -345,18 +345,18 @@ static void icvInitFFMPEG_internal()
 bool CvCapture_FFMPEG::open( const char* _filename )
 {
     icvInitFFMPEG_internal();
-    
+
     unsigned i;
     bool valid = false;
 
     close();
-    
+
 #if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(52, 111, 0)
     int err = avformat_open_input(&ic, _filename, NULL, NULL);
 #else
     int err = av_open_input_file(&ic, _filename, NULL, 0, NULL);
-#endif    
-    
+#endif
+
     if (err < 0) {
         CV_WARN("Error opening file");
         goto exit_func;
@@ -438,13 +438,13 @@ bool CvCapture_FFMPEG::grabFrame()
     const int max_number_of_attempts = 1 << 16;
 
     if( !ic || !video_st )  return false;
-    
+
     if( ic->streams[video_stream]->nb_frames > 0 &&
         frame_number > ic->streams[video_stream]->nb_frames )
         return false;
 
     av_free_packet (&packet);
-    
+
     picture_pts = AV_NOPTS_VALUE_;
 
     // get the next frame
@@ -463,7 +463,7 @@ bool CvCapture_FFMPEG::grabFrame()
                 break;
             continue;
         }
-        
+
         // Decode video frame
         #if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
             avcodec_decode_video2(video_st->codec, picture, &got_picture, &packet);
@@ -498,7 +498,7 @@ bool CvCapture_FFMPEG::grabFrame()
 
     if( valid && first_frame_number < 0 )
         first_frame_number = dts_to_frame_number(picture_pts);
-    
+
     // return if we have a new picture or not
     return valid;
 }
@@ -518,7 +518,7 @@ bool CvCapture_FFMPEG::retrieveFrame(int, unsigned char** data, int* step, int*
     {
         if( img_convert_ctx )
             sws_freeContext(img_convert_ctx);
-        
+
         frame.width = video_st->codec->width;
         frame.height = video_st->codec->height;
 
@@ -629,7 +629,7 @@ double CvCapture_FFMPEG::get_fps()
     {
         fps = r2d(ic->streams[video_stream]->avg_frame_rate);
     }
-#endif    
+#endif
 
     if (fps < eps_zero)
     {
@@ -666,12 +666,12 @@ void CvCapture_FFMPEG::seek(int64_t _frame_number)
 {
     _frame_number = std::min(_frame_number, get_total_frames());
     int delta = 16;
-    
+
     // if we have not grabbed a single frame before first seek, let's read the first frame
     // and get some valuable information during the process
     if( first_frame_number < 0 && get_total_frames() > 1 )
         grabFrame();
-    
+
     for(;;)
     {
         int64_t _frame_number_temp = std::max(_frame_number-delta, (int64_t)0);
@@ -684,13 +684,13 @@ void CvCapture_FFMPEG::seek(int64_t _frame_number)
         if( _frame_number > 0 )
         {
             grabFrame();
-            
+
             if( _frame_number > 1 )
             {
                 frame_number = dts_to_frame_number(picture_pts) - first_frame_number;
                 //printf("_frame_number = %d, frame_number = %d, delta = %d\n",
                 //       (int)_frame_number, (int)frame_number, delta);
-                
+
                 if( frame_number < 0 || frame_number > _frame_number-1 )
                 {
                     if( _frame_number_temp == 0 || delta >= INT_MAX/4 )
@@ -771,7 +771,7 @@ struct CvVideoWriter_FFMPEG
 
     void init();
 
-    AVOutputFormat     * fmt;
+    AVOutputFormat  * fmt;
     AVFormatContext * oc;
     uint8_t         * outbuf;
     uint32_t          outbuf_size;
@@ -1010,7 +1010,7 @@ static AVStream *icv_add_video_stream_FFMPEG(AVFormatContext *oc,
 
 static const int OPENCV_NO_FRAMES_WRITTEN_CODE = 1000;
 
-int icv_av_write_frame_FFMPEG( AVFormatContext * oc, AVStream * video_st, uint8_t * outbuf, uint32_t outbuf_size, AVFrame * picture )
+static int icv_av_write_frame_FFMPEG( AVFormatContext * oc, AVStream * video_st, uint8_t * outbuf, uint32_t outbuf_size, AVFrame * picture )
 {
 #if LIBAVFORMAT_BUILD > 4628
     AVCodecContext * c = video_st->codec;
@@ -1046,7 +1046,7 @@ int icv_av_write_frame_FFMPEG( AVFormatContext * oc, AVStream * video_st, uint8_
 
 #if LIBAVFORMAT_BUILD > 4752
             if(c->coded_frame->pts != (int64_t)AV_NOPTS_VALUE)
-                               pkt.pts = av_rescale_q(c->coded_frame->pts, c->time_base, video_st->time_base);
+                pkt.pts = av_rescale_q(c->coded_frame->pts, c->time_base, video_st->time_base);
 #else
             pkt.pts = c->coded_frame->pts;
 #endif
@@ -1069,7 +1069,7 @@ int icv_av_write_frame_FFMPEG( AVFormatContext * oc, AVStream * video_st, uint8_
 bool CvVideoWriter_FFMPEG::writeFrame( const unsigned char* data, int step, int width, int height, int cn, int origin )
 {
     bool ret = false;
-    
+
     if( (width & -2) != frame_width || (height & -2) != frame_height || !data )
         return false;
     width = frame_width;
@@ -1180,7 +1180,7 @@ void CvVideoWriter_FFMPEG::close()
     // nothing to do if already released
     if ( !picture )
         return;
-    
+
     /* no more frame to compress. The codec has a latency of a few
        frames if using B frames, so we get the last frames by
        passing the same picture again */
@@ -1200,7 +1200,7 @@ void CvVideoWriter_FFMPEG::close()
         }
         av_write_trailer(oc);
     }
-    
+
     if( img_convert_ctx )
     {
         sws_freeContext(img_convert_ctx);
@@ -1272,7 +1272,7 @@ bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc,
                                  double fps, int width, int height, bool is_color )
 {
     icvInitFFMPEG_internal();
-    
+
     CodecID codec_id = CODEC_ID_NONE;
     int err, codec_pix_fmt;
     double bitrate_scale = 1;
@@ -1284,7 +1284,7 @@ bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc,
         return false;
     if(fps <= 0)
         return false;
-    
+
     // we allow frames of odd width or height, but in this case we truncate
     // the rightmost column/the bottom row. Probably, this should be handled more elegantly,
     // but some internal functions inside FFMPEG swscale require even width/height.
@@ -1363,7 +1363,7 @@ bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc,
         codec_pix_fmt = PIX_FMT_YUV420P;
         break;
     }
-    
+
     double bitrate = MIN(bitrate_scale*fps*width*height, (double)INT_MAX/2);
 
     // TODO -- safe to ignore output audio stream?
@@ -1480,8 +1480,8 @@ bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc,
     err=avformat_write_header(oc, NULL);
 #else
     err=av_write_header( oc );
-#endif    
-    
+#endif
+
     if(err < 0)
     {
         close();
@@ -1579,7 +1579,7 @@ struct OutputMediaStream_FFMPEG
 {
     bool open(const char* fileName, int width, int height, double fps);
     void close();
-    
+
     void write(unsigned char* data, int size, int keyFrame);
 
     // add a video output stream to the container
@@ -1692,7 +1692,7 @@ AVStream* OutputMediaStream_FFMPEG::addVideoStream(AVFormatContext *oc, CodecID
             {
                 AVRational error = av_sub_q(req, *p);
 
-                if (error.num < 0) 
+                if (error.num < 0)
                     error.num *= -1;
 
                 if (av_cmp_q(error, best_error) < 0)
@@ -1825,7 +1825,7 @@ bool OutputMediaStream_FFMPEG::open(const char* fileName, int width, int height,
 void OutputMediaStream_FFMPEG::write(unsigned char* data, int size, int keyFrame)
 {
     // if zero size, it means the image was buffered
-    if (size > 0) 
+    if (size > 0)
     {
         AVPacket pkt;
         av_init_packet(&pkt);
@@ -1851,7 +1851,7 @@ struct OutputMediaStream_FFMPEG* create_OutputMediaStream_FFMPEG(const char* fil
 
     stream->close();
     free(stream);
-    
+
     return 0;
 }
 
index 739adf8..7ec7cf8 100644 (file)
 
 #include <iostream>
 #include <queue>
+
+#ifndef i386
+#  define i386 0
+#endif
+#ifndef __arm__
+#  define __arm__ 0
+#endif
+#ifndef _ARC
+#  define _ARC 0
+#endif
+#ifndef __APPLE__
+#  define __APPLE__ 0
+#endif
+
 #include "XnCppWrapper.h"
 
 const std::string XMLConfig =
@@ -169,6 +183,8 @@ private:
             approxSyncGrabber(approxSyncGrabber), isDepthFilled(false), isImageFilled(false)
         {}
 
+        virtual ~ApproximateSynchronizerBase() {}
+
         virtual bool isSpinContinue() const = 0;
         virtual void pushDepthMetaData( xn::DepthMetaData& depthMetaData ) = 0;
         virtual void pushImageMetaData( xn::ImageMetaData& imageMetaData ) = 0;
@@ -410,7 +426,7 @@ class CvCapture_OpenNI : public CvCapture
 {
 public:
     enum { DEVICE_DEFAULT=0, DEVICE_MS_KINECT=0, DEVICE_ASUS_XTION=1, DEVICE_MAX=1 };
-    
+
     static const int INVALID_PIXEL_VAL = 0;
     static const int INVALID_COORDINATE_VAL = 0;
 
@@ -508,26 +524,26 @@ bool CvCapture_OpenNI::isOpened() const
     return isContextOpened;
 }
 
-XnMapOutputMode defaultMapOutputMode()
-{
-    XnMapOutputMode mode;
-    mode.nXRes = XN_VGA_X_RES;
-    mode.nYRes = XN_VGA_Y_RES;
-    mode.nFPS  = 30;
-    return mode;
-}
+// static XnMapOutputMode defaultMapOutputMode()
+// {
+//     XnMapOutputMode mode;
+//     mode.nXRes = XN_VGA_X_RES;
+//     mode.nYRes = XN_VGA_Y_RES;
+//     mode.nFPS  = 30;
+//     return mode;
+// }
 
 
 CvCapture_OpenNI::CvCapture_OpenNI( int index )
 {
     int deviceType = DEVICE_DEFAULT;
     XnStatus status;
-    
+
     isContextOpened = false;
     maxBufferSize = DEFAULT_MAX_BUFFER_SIZE;
     isCircleBuffer = DEFAULT_IS_CIRCLE_BUFFER;
     maxTimeDuration = DEFAULT_MAX_TIME_DURATION;
-    
+
     if( index >= 10 )
     {
         deviceType = index / 10;
@@ -1201,7 +1217,7 @@ IplImage* CvCapture_OpenNI::retrievePointCloudMap()
     return outputMaps[CV_CAP_OPENNI_POINT_CLOUD_MAP].getIplImagePtr();
 }
 
-void computeDisparity_32F( const xn::DepthMetaData& depthMetaData, cv::Mat& disp, XnDouble baseline, XnUInt64 F,
+static void computeDisparity_32F( const xn::DepthMetaData& depthMetaData, cv::Mat& disp, XnDouble baseline, XnUInt64 F,
                            XnUInt64 noSampleValue, XnUInt64 shadowValue )
 {
     cv::Mat depth;
index 183a38e..ec45c84 100644 (file)
@@ -126,8 +126,7 @@ skip_input_data(j_decompress_ptr cinfo, long num_bytes)
 }
 
 
-GLOBAL(void)
-jpeg_buffer_src(j_decompress_ptr cinfo, JpegSource* source)
+static void jpeg_buffer_src(j_decompress_ptr cinfo, JpegSource* source)
 {
     cinfo->src = &source->pub;
 
@@ -498,8 +497,7 @@ empty_output_buffer (j_compress_ptr cinfo)
     return TRUE;
 }
 
-GLOBAL(void)
-jpeg_buffer_dest(j_compress_ptr cinfo, JpegDestination* destination)
+static void jpeg_buffer_dest(j_compress_ptr cinfo, JpegDestination* destination)
 {
     cinfo->dest = &destination->pub;
 
index b903b03..9250fff 100644 (file)
@@ -57,7 +57,7 @@ namespace cv
 static vector<ImageDecoder> decoders;
 static vector<ImageEncoder> encoders;
 
-ImageDecoder findDecoder( const string& filename )
+static ImageDecoder findDecoder( const string& filename )
 {
     size_t i, maxlen = 0;
     for( i = 0; i < decoders.size(); i++ )
@@ -83,7 +83,7 @@ ImageDecoder findDecoder( const string& filename )
     return ImageDecoder();
 }
 
-ImageDecoder findDecoder( const Mat& buf )
+static ImageDecoder findDecoder( const Mat& buf )
 {
     size_t i, maxlen = 0;
 
@@ -110,7 +110,7 @@ ImageDecoder findDecoder( const Mat& buf )
     return ImageDecoder();
 }
 
-ImageEncoder findEncoder( const string& _ext )
+static ImageEncoder findEncoder( const string& _ext )
 {
     if( _ext.size() <= 1 )
         return ImageEncoder();
@@ -395,7 +395,7 @@ Mat imdecode( InputArray _buf, int flags )
     imdecode_( buf, flags, LOAD_MAT, &img );
     return img;
 }
-    
+
 bool imencode( const string& ext, InputArray _image,
                vector<uchar>& buf, const vector<int>& params )
 {
index 415f5e8..5664dd7 100644 (file)
@@ -42,7 +42,7 @@
 #ifndef __HIGHGUI_H_
 #define __HIGHGUI_H_
 
-#if _MSC_VER >= 1200
+#if defined _MSC_VER && _MSC_VER >= 1200
 #pragma warning( disable: 4251 )
 #endif
 
index 30bf973..0924277 100755 (executable)
@@ -1,4 +1,4 @@
-//IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. \r
+//IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
 \r
 // By downloading, copying, installing or using the software you agree to this license.\r
 // If you do not agree to this license, do not download, install,\r
@@ -78,181 +78,181 @@ static CvWinProperties* global_control_panel = NULL;
 \r
 CV_IMPL CvFont cvFontQt(const char* nameFont, int pointSize,CvScalar color,int weight,int style, int spacing)\r
 {\r
-       /*\r
-       //nameFont   <- only Qt\r
-       //CvScalar color   <- only Qt (blue_component, green_component, red\_component[, alpha_component])\r
-       int         font_face;//<- style in Qt\r
-       const int*  ascii;\r
-       const int*  greek;\r
-       const int*  cyrillic;\r
-       float       hscale, vscale;\r
-       float       shear;\r
-       int         thickness;//<- weight in Qt\r
-       float       dx;//spacing letter in Qt (0 default) in pixel\r
-       int         line_type;//<- pointSize in Qt\r
-       */\r
-       CvFont f = {nameFont,color,style,NULL,NULL,NULL,0,0,0,weight,spacing,pointSize};\r
-       return f;\r
+    /*\r
+    //nameFont   <- only Qt\r
+    //CvScalar color   <- only Qt (blue_component, green_component, red\_component[, alpha_component])\r
+    int         font_face;//<- style in Qt\r
+    const int*  ascii;\r
+    const int*  greek;\r
+    const int*  cyrillic;\r
+    float       hscale, vscale;\r
+    float       shear;\r
+    int         thickness;//<- weight in Qt\r
+    float       dx;//spacing letter in Qt (0 default) in pixel\r
+    int         line_type;//<- pointSize in Qt\r
+    */\r
+    CvFont f = {nameFont,color,style,NULL,NULL,NULL,0,0,0,weight,spacing,pointSize};\r
+    return f;\r
 }\r
 \r
 \r
 CV_IMPL void cvAddText(const CvArr* img, const char* text, CvPoint org, CvFont* font)\r
 {\r
-       if (!guiMainThread)\r
-               CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+    if (!guiMainThread)\r
+        CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
 \r
-       QMetaObject::invokeMethod(guiMainThread,\r
-               "putText",\r
-               Qt::AutoConnection,\r
-               Q_ARG(void*, (void*) img),\r
-               Q_ARG(QString,QString(text)),\r
-               Q_ARG(QPoint, QPoint(org.x,org.y)),\r
-               Q_ARG(void*,(void*) font));\r
+    QMetaObject::invokeMethod(guiMainThread,\r
+        "putText",\r
+        Qt::AutoConnection,\r
+        Q_ARG(void*, (void*) img),\r
+        Q_ARG(QString,QString(text)),\r
+        Q_ARG(QPoint, QPoint(org.x,org.y)),\r
+        Q_ARG(void*,(void*) font));\r
 }\r
 \r
 \r
 double cvGetRatioWindow_QT(const char* name)\r
 {\r
-       if (!guiMainThread)\r
-               CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+    if (!guiMainThread)\r
+        CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
 \r
-       double result = -1;\r
-       QMetaObject::invokeMethod(guiMainThread,\r
-               "getRatioWindow",\r
-               //Qt::DirectConnection,\r
-               Qt::AutoConnection,\r
-               Q_RETURN_ARG(double, result),\r
-               Q_ARG(QString, QString(name)));\r
+    double result = -1;\r
+    QMetaObject::invokeMethod(guiMainThread,\r
+        "getRatioWindow",\r
+        //Qt::DirectConnection,\r
+        Qt::AutoConnection,\r
+        Q_RETURN_ARG(double, result),\r
+        Q_ARG(QString, QString(name)));\r
 \r
-       return result;\r
+    return result;\r
 }\r
 \r
 \r
 void cvSetRatioWindow_QT(const char* name,double prop_value)\r
 {\r
 \r
-       if (!guiMainThread)\r
-               CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+    if (!guiMainThread)\r
+        CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
 \r
-       QMetaObject::invokeMethod(guiMainThread,\r
-               "setRatioWindow",\r
-               Qt::AutoConnection,\r
-               Q_ARG(QString, QString(name)),\r
-               Q_ARG(double, prop_value));\r
+    QMetaObject::invokeMethod(guiMainThread,\r
+        "setRatioWindow",\r
+        Qt::AutoConnection,\r
+        Q_ARG(QString, QString(name)),\r
+        Q_ARG(double, prop_value));\r
 }\r
 \r
 \r
 double cvGetPropWindow_QT(const char* name)\r
 {\r
-       if (!guiMainThread)\r
-               CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+    if (!guiMainThread)\r
+        CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
 \r
-       double result = -1;\r
-       QMetaObject::invokeMethod(guiMainThread,\r
-               "getPropWindow",\r
-               //Qt::DirectConnection,\r
-               Qt::AutoConnection,\r
-               Q_RETURN_ARG(double, result),\r
-               Q_ARG(QString, QString(name)));\r
+    double result = -1;\r
+    QMetaObject::invokeMethod(guiMainThread,\r
+        "getPropWindow",\r
+        //Qt::DirectConnection,\r
+        Qt::AutoConnection,\r
+        Q_RETURN_ARG(double, result),\r
+        Q_ARG(QString, QString(name)));\r
 \r
-       return result;\r
+    return result;\r
 }\r
 \r
 \r
 void cvSetPropWindow_QT(const char* name,double prop_value)\r
 {\r
-       if (!guiMainThread)\r
-               CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+    if (!guiMainThread)\r
+        CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
 \r
-       QMetaObject::invokeMethod(guiMainThread,\r
-               "setPropWindow",\r
-               Qt::AutoConnection,\r
-               Q_ARG(QString, QString(name)),\r
-               Q_ARG(double, prop_value));\r
+    QMetaObject::invokeMethod(guiMainThread,\r
+        "setPropWindow",\r
+        Qt::AutoConnection,\r
+        Q_ARG(QString, QString(name)),\r
+        Q_ARG(double, prop_value));\r
 }\r
 \r
 \r
 void cvSetModeWindow_QT(const char* name, double prop_value)\r
 {\r
-       if (!guiMainThread)\r
-               CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+    if (!guiMainThread)\r
+        CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
 \r
-       QMetaObject::invokeMethod(guiMainThread,\r
-               "toggleFullScreen",\r
-               Qt::AutoConnection,\r
-               Q_ARG(QString, QString(name)),\r
-               Q_ARG(double, prop_value));\r
+    QMetaObject::invokeMethod(guiMainThread,\r
+        "toggleFullScreen",\r
+        Qt::AutoConnection,\r
+        Q_ARG(QString, QString(name)),\r
+        Q_ARG(double, prop_value));\r
 }\r
 \r
 \r
 double cvGetModeWindow_QT(const char* name)\r
 {\r
-       if (!guiMainThread)\r
-               CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+    if (!guiMainThread)\r
+        CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
 \r
-       double result = -1;\r
+    double result = -1;\r
 \r
-       QMetaObject::invokeMethod(guiMainThread,\r
-               "isFullScreen",\r
-               Qt::AutoConnection,\r
-               Q_RETURN_ARG(double, result),\r
-               Q_ARG(QString, QString(name)));\r
+    QMetaObject::invokeMethod(guiMainThread,\r
+        "isFullScreen",\r
+        Qt::AutoConnection,\r
+        Q_RETURN_ARG(double, result),\r
+        Q_ARG(QString, QString(name)));\r
 \r
-       return result;\r
+    return result;\r
 }\r
 \r
 \r
 CV_IMPL void cvDisplayOverlay(const char* name, const char* text, int delayms)\r
 {\r
-       if (!guiMainThread)\r
-               CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+    if (!guiMainThread)\r
+        CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
 \r
-       QMetaObject::invokeMethod(guiMainThread,\r
-               "displayInfo",\r
-               Qt::AutoConnection,\r
-               //Qt::DirectConnection,\r
-               Q_ARG(QString, QString(name)),\r
-               Q_ARG(QString, QString(text)),\r
-               Q_ARG(int, delayms));\r
+    QMetaObject::invokeMethod(guiMainThread,\r
+        "displayInfo",\r
+        Qt::AutoConnection,\r
+        //Qt::DirectConnection,\r
+        Q_ARG(QString, QString(name)),\r
+        Q_ARG(QString, QString(text)),\r
+        Q_ARG(int, delayms));\r
 }\r
 \r
 \r
 CV_IMPL void cvSaveWindowParameters(const char* name)\r
 {\r
-       if (!guiMainThread)\r
-               CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+    if (!guiMainThread)\r
+        CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
 \r
-       QMetaObject::invokeMethod(guiMainThread,\r
-               "saveWindowParameters",\r
-               Qt::AutoConnection,\r
-               Q_ARG(QString, QString(name)));\r
+    QMetaObject::invokeMethod(guiMainThread,\r
+        "saveWindowParameters",\r
+        Qt::AutoConnection,\r
+        Q_ARG(QString, QString(name)));\r
 }\r
 \r
 \r
 CV_IMPL void cvLoadWindowParameters(const char* name)\r
 {\r
-       if (!guiMainThread)\r
-               CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+    if (!guiMainThread)\r
+        CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
 \r
-       QMetaObject::invokeMethod(guiMainThread,\r
-               "loadWindowParameters",\r
-               Qt::AutoConnection,\r
-               Q_ARG(QString, QString(name)));\r
+    QMetaObject::invokeMethod(guiMainThread,\r
+        "loadWindowParameters",\r
+        Qt::AutoConnection,\r
+        Q_ARG(QString, QString(name)));\r
 }\r
 \r
 \r
 CV_IMPL void cvDisplayStatusBar(const char* name, const char* text, int delayms)\r
 {\r
-       if (!guiMainThread)\r
-               CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+    if (!guiMainThread)\r
+        CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
 \r
-       QMetaObject::invokeMethod(guiMainThread,\r
-               "displayStatusBar",\r
-               Qt::AutoConnection,\r
-               //Qt::DirectConnection,\r
-               Q_ARG(QString, QString(name)),\r
-               Q_ARG(QString, QString(text)),\r
-               Q_ARG(int, delayms));\r
+    QMetaObject::invokeMethod(guiMainThread,\r
+        "displayStatusBar",\r
+        Qt::AutoConnection,\r
+        //Qt::DirectConnection,\r
+        Q_ARG(QString, QString(name)),\r
+        Q_ARG(QString, QString(text)),\r
+        Q_ARG(int, delayms));\r
 }\r
 \r
 \r
@@ -337,19 +337,19 @@ CV_IMPL int cvWaitKey(int delay)
 //We recommend not using this function for now\r
 CV_IMPL int cvStartLoop(int (*pt2Func)(int argc, char *argv[]), int argc, char* argv[])\r
 {\r
-       multiThreads = true;\r
-       QFuture<int> future = QtConcurrent::run(pt2Func, argc, argv);\r
-       return guiMainThread->start();\r
+    multiThreads = true;\r
+    QFuture<int> future = QtConcurrent::run(pt2Func, argc, argv);\r
+    return guiMainThread->start();\r
 }\r
 \r
 \r
 CV_IMPL void cvStopLoop()\r
 {\r
-       qApp->exit();\r
+    qApp->exit();\r
 }\r
 \r
 \r
-CvWindow* icvFindWindowByName(QString name)\r
+static CvWindow* icvFindWindowByName(QString name)\r
 {\r
     CvWindow* window = 0;\r
 \r
@@ -357,100 +357,100 @@ CvWindow* icvFindWindowByName(QString name)
     //that can be grabbed here and crash the code at 'w->param_name==name'.\r
     foreach (QWidget* widget, QApplication::topLevelWidgets())\r
     {\r
-           if (widget->isWindow() && !widget->parentWidget())//is a window without parent\r
-           {\r
-                   CvWinModel* temp = (CvWinModel*) widget;\r
+        if (widget->isWindow() && !widget->parentWidget())//is a window without parent\r
+        {\r
+            CvWinModel* temp = (CvWinModel*) widget;\r
 \r
-                   if (temp->type == type_CvWindow)\r
-                   {\r
-                       CvWindow* w = (CvWindow*) temp;\r
+            if (temp->type == type_CvWindow)\r
+            {\r
+                CvWindow* w = (CvWindow*) temp;\r
                 if (w->windowTitle() == name)\r
-                       {\r
-                               window = w;\r
-                               break;\r
-                       }\r
-                   }\r
-           }\r
-    }  \r
+                {\r
+                    window = w;\r
+                    break;\r
+                }\r
+            }\r
+        }\r
+    }\r
 \r
     return window;\r
 }\r
 \r
 \r
-CvBar* icvFindBarByName(QBoxLayout* layout, QString name_bar, typeBar type)\r
+static CvBar* icvFindBarByName(QBoxLayout* layout, QString name_bar, typeBar type)\r
 {\r
     if (!layout)\r
-           return NULL;\r
+        return NULL;\r
 \r
     int stop_index = layout->layout()->count();\r
 \r
     for (int i = 0; i < stop_index; ++i)\r
     {\r
-           CvBar* t = (CvBar*) layout->layout()->itemAt(i);\r
+        CvBar* t = (CvBar*) layout->layout()->itemAt(i);\r
 \r
-           if (t->type == type && t->name_bar == name_bar)\r
-                   return t;\r
+        if (t->type == type && t->name_bar == name_bar)\r
+            return t;\r
     }\r
 \r
     return NULL;\r
 }\r
 \r
 \r
-CvTrackbar* icvFindTrackBarByName(const char* name_trackbar, const char* name_window, QBoxLayout* layout = NULL)\r
+static CvTrackbar* icvFindTrackBarByName(const char* name_trackbar, const char* name_window, QBoxLayout* layout = NULL)\r
 {\r
     QString nameQt(name_trackbar);\r
 \r
     if (!name_window && global_control_panel) //window name is null and we have a control panel\r
-           layout = global_control_panel->myLayout;\r
+        layout = global_control_panel->myLayout;\r
 \r
     if (!layout)\r
     {\r
-           QPointer<CvWindow> w = icvFindWindowByName(QLatin1String(name_window));\r
+        QPointer<CvWindow> w = icvFindWindowByName(QLatin1String(name_window));\r
 \r
-           if (!w)\r
-                   CV_Error(CV_StsNullPtr, "NULL window handler");\r
+        if (!w)\r
+            CV_Error(CV_StsNullPtr, "NULL window handler");\r
 \r
-           if (w->param_gui_mode == CV_GUI_NORMAL)\r
-                   return (CvTrackbar*) icvFindBarByName(w->myBarLayout, nameQt, type_CvTrackbar);\r
+        if (w->param_gui_mode == CV_GUI_NORMAL)\r
+            return (CvTrackbar*) icvFindBarByName(w->myBarLayout, nameQt, type_CvTrackbar);\r
 \r
-           if (w->param_gui_mode == CV_GUI_EXPANDED)\r
-           {\r
-                   CvBar* result = icvFindBarByName(w->myBarLayout, nameQt, type_CvTrackbar);\r
+        if (w->param_gui_mode == CV_GUI_EXPANDED)\r
+        {\r
+            CvBar* result = icvFindBarByName(w->myBarLayout, nameQt, type_CvTrackbar);\r
 \r
-                   if (result)\r
-                           return (CvTrackbar*) result;\r
+            if (result)\r
+                return (CvTrackbar*) result;\r
 \r
-                   return (CvTrackbar*) icvFindBarByName(global_control_panel->myLayout, nameQt, type_CvTrackbar);\r
-           }\r
+            return (CvTrackbar*) icvFindBarByName(global_control_panel->myLayout, nameQt, type_CvTrackbar);\r
+        }\r
 \r
-           return NULL;\r
+        return NULL;\r
     }\r
     else\r
     {\r
-           //layout was specified\r
-           return (CvTrackbar*) icvFindBarByName(layout, nameQt, type_CvTrackbar);\r
+        //layout was specified\r
+        return (CvTrackbar*) icvFindBarByName(layout, nameQt, type_CvTrackbar);\r
     }\r
 }\r
 \r
-\r
-CvButtonbar* icvFindButtonBarByName(const char* button_name, QBoxLayout* layout)\r
+/*\r
+static CvButtonbar* icvFindButtonBarByName(const char* button_name, QBoxLayout* layout)\r
 {\r
     QString nameQt(button_name);\r
     return (CvButtonbar*) icvFindBarByName(layout, nameQt, type_CvButtonbar);\r
 }\r
+*/\r
 \r
-\r
-int icvInitSystem(int* c, char** v)\r
+static int icvInitSystem(int* c, char** v)\r
 {\r
     //"For any GUI application using Qt, there is precisely one QApplication object"\r
     if (!QApplication::instance())\r
     {\r
-           new QApplication(*c, v);\r
+        new QApplication(*c, v);\r
 \r
-           qDebug() << "init done";\r
+        qDebug() << "init done";\r
 \r
 #ifdef HAVE_QT_OPENGL\r
-           qDebug() << "opengl support available";\r
+        qDebug() << "opengl support available";\r
 #endif\r
     }\r
 \r
@@ -460,212 +460,212 @@ int icvInitSystem(int* c, char** v)
 \r
 CV_IMPL int cvInitSystem(int, char**)\r
 {\r
-       icvInitSystem(&parameterSystemC, parameterSystemV);\r
-       return 0;\r
+    icvInitSystem(&parameterSystemC, parameterSystemV);\r
+    return 0;\r
 }\r
 \r
 \r
 CV_IMPL int cvNamedWindow(const char* name, int flags)\r
 {\r
-       if (!guiMainThread)\r
-               guiMainThread = new GuiReceiver;\r
+    if (!guiMainThread)\r
+        guiMainThread = new GuiReceiver;\r
 \r
-       if (multiThreads)\r
-               QMetaObject::invokeMethod(guiMainThread,\r
-               "createWindow",\r
-               Qt::BlockingQueuedConnection,\r
-               Q_ARG(QString, QString(name)),\r
-               Q_ARG(int, flags));\r
-       else\r
-               guiMainThread->createWindow(QString(name), flags);\r
+    if (multiThreads)\r
+        QMetaObject::invokeMethod(guiMainThread,\r
+        "createWindow",\r
+        Qt::BlockingQueuedConnection,\r
+        Q_ARG(QString, QString(name)),\r
+        Q_ARG(int, flags));\r
+    else\r
+        guiMainThread->createWindow(QString(name), flags);\r
 \r
-       return 1; //Dummy value\r
+    return 1; //Dummy value\r
 }\r
 \r
 \r
 CV_IMPL void cvDestroyWindow(const char* name)\r
 {\r
-       if (!guiMainThread)\r
-               CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+    if (!guiMainThread)\r
+        CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
 \r
-       QMetaObject::invokeMethod(guiMainThread,\r
-               "destroyWindow",\r
-               //Qt::BlockingQueuedConnection,\r
-               Qt::AutoConnection,\r
-               Q_ARG(QString, QString(name)));\r
+    QMetaObject::invokeMethod(guiMainThread,\r
+        "destroyWindow",\r
+        //Qt::BlockingQueuedConnection,\r
+        Qt::AutoConnection,\r
+        Q_ARG(QString, QString(name)));\r
 }\r
 \r
 \r
 CV_IMPL void cvDestroyAllWindows()\r
 {\r
-       if (!guiMainThread)\r
-               CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+    if (!guiMainThread)\r
+        CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
 \r
-       QMetaObject::invokeMethod(guiMainThread,\r
-               "destroyAllWindow",\r
-               //Qt::BlockingQueuedConnection,\r
-               Qt::AutoConnection);\r
+    QMetaObject::invokeMethod(guiMainThread,\r
+        "destroyAllWindow",\r
+        //Qt::BlockingQueuedConnection,\r
+        Qt::AutoConnection);\r
 }\r
 \r
 \r
 CV_IMPL void* cvGetWindowHandle(const char* name)\r
 {\r
-       if (!name)\r
-               CV_Error( CV_StsNullPtr, "NULL name string" );\r
+    if (!name)\r
+        CV_Error( CV_StsNullPtr, "NULL name string" );\r
 \r
-       return (void*) icvFindWindowByName(QLatin1String(name));\r
+    return (void*) icvFindWindowByName(QLatin1String(name));\r
 }\r
 \r
 \r
 CV_IMPL const char* cvGetWindowName(void* window_handle)\r
 {\r
-       if( !window_handle )\r
-               CV_Error( CV_StsNullPtr, "NULL window handler" );\r
+    if( !window_handle )\r
+        CV_Error( CV_StsNullPtr, "NULL window handler" );\r
 \r
-       return ((CvWindow*)window_handle)->windowTitle().toLatin1().data();\r
+    return ((CvWindow*)window_handle)->windowTitle().toLatin1().data();\r
 }\r
 \r
 \r
 CV_IMPL void cvMoveWindow(const char* name, int x, int y)\r
 {\r
-       if (!guiMainThread)\r
-               CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+    if (!guiMainThread)\r
+        CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
 \r
-       QMetaObject::invokeMethod(guiMainThread,\r
-               "moveWindow",\r
-               //Qt::BlockingQueuedConnection,\r
-               Qt::AutoConnection,\r
-               Q_ARG(QString, QString(name)),\r
-               Q_ARG(int, x),\r
-               Q_ARG(int, y));\r
+    QMetaObject::invokeMethod(guiMainThread,\r
+        "moveWindow",\r
+        //Qt::BlockingQueuedConnection,\r
+        Qt::AutoConnection,\r
+        Q_ARG(QString, QString(name)),\r
+        Q_ARG(int, x),\r
+        Q_ARG(int, y));\r
 }\r
 \r
 \r
 CV_IMPL void cvResizeWindow(const char* name, int width, int height)\r
 {\r
-       if (!guiMainThread)\r
-               CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+    if (!guiMainThread)\r
+        CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
 \r
-       QMetaObject::invokeMethod(guiMainThread,\r
-               "resizeWindow",\r
-               //Qt::BlockingQueuedConnection,\r
-               Qt::AutoConnection,\r
-               Q_ARG(QString, QString(name)),\r
-               Q_ARG(int, width),\r
-               Q_ARG(int, height));\r
+    QMetaObject::invokeMethod(guiMainThread,\r
+        "resizeWindow",\r
+        //Qt::BlockingQueuedConnection,\r
+        Qt::AutoConnection,\r
+        Q_ARG(QString, QString(name)),\r
+        Q_ARG(int, width),\r
+        Q_ARG(int, height));\r
 }\r
 \r
 \r
 CV_IMPL int cvCreateTrackbar2(const char* name_bar, const char* window_name, int* val, int count, CvTrackbarCallback2 on_notify, void* userdata)\r
 {\r
-       if (!guiMainThread) \r
-               CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" ); \r
+    if (!guiMainThread)\r
+        CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
 \r
-       QMetaObject::invokeMethod(guiMainThread, \r
-               "addSlider2", \r
-               Qt::AutoConnection, \r
-               Q_ARG(QString, QString(name_bar)), \r
-               Q_ARG(QString, QString(window_name)), \r
-               Q_ARG(void*, (void*)val), \r
-               Q_ARG(int, count), \r
-               Q_ARG(void*, (void*)on_notify), \r
-               Q_ARG(void*, (void*)userdata)); \r
+    QMetaObject::invokeMethod(guiMainThread,\r
+        "addSlider2",\r
+        Qt::AutoConnection,\r
+        Q_ARG(QString, QString(name_bar)),\r
+        Q_ARG(QString, QString(window_name)),\r
+        Q_ARG(void*, (void*)val),\r
+        Q_ARG(int, count),\r
+        Q_ARG(void*, (void*)on_notify),\r
+        Q_ARG(void*, (void*)userdata));\r
 \r
-       return 1; //dummy value \r
+    return 1; //dummy value\r
 }\r
 \r
 \r
 CV_IMPL int cvStartWindowThread()\r
 {\r
-       return 0;\r
+    return 0;\r
 }\r
 \r
 \r
 CV_IMPL int cvCreateTrackbar(const char* name_bar, const char* window_name, int* value, int count, CvTrackbarCallback on_change)\r
 {\r
-       if (!guiMainThread)\r
-               CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+    if (!guiMainThread)\r
+        CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
 \r
-       QMetaObject::invokeMethod(guiMainThread,\r
-               "addSlider",\r
-               Qt::AutoConnection,\r
-               Q_ARG(QString, QString(name_bar)),\r
-               Q_ARG(QString, QString(window_name)),\r
-               Q_ARG(void*, (void*)value),\r
-               Q_ARG(int, count),\r
-               Q_ARG(void*, (void*)on_change));\r
+    QMetaObject::invokeMethod(guiMainThread,\r
+        "addSlider",\r
+        Qt::AutoConnection,\r
+        Q_ARG(QString, QString(name_bar)),\r
+        Q_ARG(QString, QString(window_name)),\r
+        Q_ARG(void*, (void*)value),\r
+        Q_ARG(int, count),\r
+        Q_ARG(void*, (void*)on_change));\r
 \r
-       return 1; //dummy value\r
+    return 1; //dummy value\r
 }\r
 \r
 \r
 CV_IMPL int cvCreateButton(const char* button_name, CvButtonCallback on_change, void* userdata, int button_type, int initial_button_state)\r
 {\r
-       if (!guiMainThread)\r
-               CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+    if (!guiMainThread)\r
+        CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
 \r
-       if (initial_button_state < 0 || initial_button_state > 1)\r
-               return 0;\r
+    if (initial_button_state < 0 || initial_button_state > 1)\r
+        return 0;\r
 \r
-       QMetaObject::invokeMethod(guiMainThread,\r
-               "addButton",\r
-               Qt::AutoConnection,\r
-               Q_ARG(QString, QString(button_name)),\r
-               Q_ARG(int,  button_type),\r
-               Q_ARG(int, initial_button_state),\r
-               Q_ARG(void*, (void*)on_change),\r
-               Q_ARG(void*, userdata));\r
+    QMetaObject::invokeMethod(guiMainThread,\r
+        "addButton",\r
+        Qt::AutoConnection,\r
+        Q_ARG(QString, QString(button_name)),\r
+        Q_ARG(int,  button_type),\r
+        Q_ARG(int, initial_button_state),\r
+        Q_ARG(void*, (void*)on_change),\r
+        Q_ARG(void*, userdata));\r
 \r
-       return 1;//dummy value\r
+    return 1;//dummy value\r
 }\r
 \r
 \r
 CV_IMPL int cvGetTrackbarPos(const char* name_bar, const char* window_name)\r
 {\r
-       int result = -1;\r
+    int result = -1;\r
 \r
-       QPointer<CvTrackbar> t = icvFindTrackBarByName(name_bar, window_name);\r
+    QPointer<CvTrackbar> t = icvFindTrackBarByName(name_bar, window_name);\r
 \r
-       if (t)\r
-               result = t->slider->value();\r
+    if (t)\r
+        result = t->slider->value();\r
 \r
-       return result;\r
+    return result;\r
 }\r
 \r
 \r
 CV_IMPL void cvSetTrackbarPos(const char* name_bar, const char* window_name, int pos)\r
 {\r
-       QPointer<CvTrackbar> t = icvFindTrackBarByName(name_bar, window_name);\r
+    QPointer<CvTrackbar> t = icvFindTrackBarByName(name_bar, window_name);\r
 \r
-       if (t)\r
-               t->slider->setValue(pos);\r
+    if (t)\r
+        t->slider->setValue(pos);\r
 }\r
 \r
 \r
 /* assign callback for mouse events */\r
 CV_IMPL void cvSetMouseCallback(const char* window_name, CvMouseCallback on_mouse, void* param)\r
 {\r
-       QPointer<CvWindow> w = icvFindWindowByName(QLatin1String(window_name));\r
+    QPointer<CvWindow> w = icvFindWindowByName(QLatin1String(window_name));\r
 \r
-       if (!w)\r
-               CV_Error(CV_StsNullPtr, "NULL window handler");\r
+    if (!w)\r
+        CV_Error(CV_StsNullPtr, "NULL window handler");\r
 \r
-       w->setMouseCallBack(on_mouse, param);\r
+    w->setMouseCallBack(on_mouse, param);\r
 \r
 }\r
 \r
 \r
 CV_IMPL void cvShowImage(const char* name, const CvArr* arr)\r
 {\r
-       if (!guiMainThread)\r
-               guiMainThread = new GuiReceiver;\r
+    if (!guiMainThread)\r
+        guiMainThread = new GuiReceiver;\r
 \r
-       QMetaObject::invokeMethod(guiMainThread,\r
-               "showImage",\r
-               //Qt::BlockingQueuedConnection,\r
-               Qt::DirectConnection,\r
-               Q_ARG(QString, QString(name)),\r
-               Q_ARG(void*, (void*)arr));\r
+    QMetaObject::invokeMethod(guiMainThread,\r
+        "showImage",\r
+        //Qt::BlockingQueuedConnection,\r
+        Qt::DirectConnection,\r
+        Q_ARG(QString, QString(name)),\r
+        Q_ARG(void*, (void*)arr));\r
 }\r
 \r
 \r
@@ -673,53 +673,53 @@ CV_IMPL void cvShowImage(const char* name, const CvArr* arr)
 \r
 CV_IMPL void cvSetOpenGlDrawCallback(const char* window_name, CvOpenGlDrawCallback callback, void* userdata)\r
 {\r
-       if (!guiMainThread)\r
-               CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+    if (!guiMainThread)\r
+        CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
 \r
-       QMetaObject::invokeMethod(guiMainThread,\r
-               "setOpenGlDrawCallback",\r
-               Qt::AutoConnection,\r
-               Q_ARG(QString, QString(window_name)),\r
-               Q_ARG(void*, (void*)callback),\r
-               Q_ARG(void*, userdata));\r
+    QMetaObject::invokeMethod(guiMainThread,\r
+        "setOpenGlDrawCallback",\r
+        Qt::AutoConnection,\r
+        Q_ARG(QString, QString(window_name)),\r
+        Q_ARG(void*, (void*)callback),\r
+        Q_ARG(void*, userdata));\r
 }\r
 \r
 \r
 void icvSetOpenGlCleanCallback(const char* window_name, CvOpenGlCleanCallback callback, void* userdata)\r
 {\r
-       if (!guiMainThread)\r
-               CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+    if (!guiMainThread)\r
+        CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
 \r
-       QMetaObject::invokeMethod(guiMainThread,\r
-               "setOpenGlCleanCallback",\r
-               Qt::AutoConnection,\r
-               Q_ARG(QString, QString(window_name)),\r
-               Q_ARG(void*, (void*)callback),\r
-               Q_ARG(void*, userdata));\r
+    QMetaObject::invokeMethod(guiMainThread,\r
+        "setOpenGlCleanCallback",\r
+        Qt::AutoConnection,\r
+        Q_ARG(QString, QString(window_name)),\r
+        Q_ARG(void*, (void*)callback),\r
+        Q_ARG(void*, userdata));\r
 }\r
 \r
 \r
 CV_IMPL void cvSetOpenGlContext(const char* window_name)\r
 {\r
-       if (!guiMainThread)\r
-               CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+    if (!guiMainThread)\r
+        CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
 \r
-       QMetaObject::invokeMethod(guiMainThread,\r
-               "setOpenGlContext",\r
-               Qt::AutoConnection,\r
-               Q_ARG(QString, QString(window_name)));\r
+    QMetaObject::invokeMethod(guiMainThread,\r
+        "setOpenGlContext",\r
+        Qt::AutoConnection,\r
+        Q_ARG(QString, QString(window_name)));\r
 }\r
 \r
 \r
 CV_IMPL void cvUpdateWindow(const char* window_name)\r
 {\r
-       if (!guiMainThread)\r
-               CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
+    if (!guiMainThread)\r
+        CV_Error( CV_StsNullPtr, "NULL guiReceiver (please create a window)" );\r
 \r
-       QMetaObject::invokeMethod(guiMainThread,\r
-               "updateWindow",\r
-               Qt::AutoConnection,\r
-               Q_ARG(QString, QString(window_name)));\r
+    QMetaObject::invokeMethod(guiMainThread,\r
+        "updateWindow",\r
+        Qt::AutoConnection,\r
+        Q_ARG(QString, QString(window_name)));\r
 }\r
 \r
 #endif\r
@@ -727,18 +727,18 @@ CV_IMPL void cvUpdateWindow(const char* window_name)
 \r
 double cvGetOpenGlProp_QT(const char* name)\r
 {\r
-       double result = -1;\r
+    double result = -1;\r
 \r
-       if (guiMainThread)\r
+    if (guiMainThread)\r
     {\r
-           QMetaObject::invokeMethod(guiMainThread,\r
-                   "isOpenGl",\r
-                   Qt::AutoConnection,\r
-                   Q_RETURN_ARG(double, result),\r
-                   Q_ARG(QString, QString(name)));\r
+        QMetaObject::invokeMethod(guiMainThread,\r
+            "isOpenGl",\r
+            Qt::AutoConnection,\r
+            Q_RETURN_ARG(double, result),\r
+            Q_ARG(QString, QString(name)));\r
     }\r
 \r
-       return result;\r
+    return result;\r
 }\r
 \r
 \r
@@ -748,108 +748,108 @@ double cvGetOpenGlProp_QT(const char* name)
 \r
 GuiReceiver::GuiReceiver() : bTimeOut(false), nb_windows(0)\r
 {\r
-       doesExternalQAppExist = (QApplication::instance() != 0);\r
-       icvInitSystem(&parameterSystemC, parameterSystemV);\r
+    doesExternalQAppExist = (QApplication::instance() != 0);\r
+    icvInitSystem(&parameterSystemC, parameterSystemV);\r
 \r
-       timer = new QTimer(this);\r
-       QObject::connect(timer, SIGNAL(timeout()), this, SLOT(timeOut()));\r
-       timer->setSingleShot(true);\r
+    timer = new QTimer(this);\r
+    QObject::connect(timer, SIGNAL(timeout()), this, SLOT(timeOut()));\r
+    timer->setSingleShot(true);\r
 }\r
 \r
 \r
 void GuiReceiver::isLastWindow()\r
 {\r
-       if (--nb_windows <= 0)\r
-       {\r
-               delete guiMainThread;//delete global_control_panel too\r
-               guiMainThread = NULL;\r
+    if (--nb_windows <= 0)\r
+    {\r
+        delete guiMainThread;//delete global_control_panel too\r
+        guiMainThread = NULL;\r
 \r
-               if (!doesExternalQAppExist)\r
-               {\r
-                       qApp->quit();\r
-               }\r
-       }\r
+        if (!doesExternalQAppExist)\r
+        {\r
+            qApp->quit();\r
+        }\r
+    }\r
 }\r
 \r
 \r
 GuiReceiver::~GuiReceiver()\r
-{      \r
-       if (global_control_panel)\r
-       {\r
-               delete global_control_panel;\r
-               global_control_panel = NULL;\r
-       }\r
+{\r
+    if (global_control_panel)\r
+    {\r
+        delete global_control_panel;\r
+        global_control_panel = NULL;\r
+    }\r
 }\r
 \r
 \r
 void GuiReceiver::putText(void* arr, QString text, QPoint org, void* arg2)\r
 {\r
-       CV_Assert(arr);\r
+    CV_Assert(arr);\r
 \r
-       CvMat* mat, stub;\r
-       mat = cvGetMat(arr, &stub);\r
+    CvMat* mat, stub;\r
+    mat = cvGetMat(arr, &stub);\r
 \r
-       int nbChannelOriginImage = cvGetElemType(mat);\r
-       if (nbChannelOriginImage != CV_8UC3) return; //for now, font works only with 8UC3\r
+    int nbChannelOriginImage = cvGetElemType(mat);\r
+    if (nbChannelOriginImage != CV_8UC3) return; //for now, font works only with 8UC3\r
 \r
-       QImage qimg(mat->data.ptr, mat->cols, mat->rows, mat->step, QImage::Format_RGB888);\r
+    QImage qimg(mat->data.ptr, mat->cols, mat->rows, mat->step, QImage::Format_RGB888);\r
 \r
-       CvFont* font = (CvFont*)arg2;\r
+    CvFont* font = (CvFont*)arg2;\r
 \r
-       QPainter qp(&qimg);\r
-       if (font)\r
-       {\r
-               QFont f(font->nameFont, font->line_type/*PointSize*/, font->thickness/*weight*/);\r
-               f.setStyle((QFont::Style) font->font_face/*style*/);\r
-               f.setLetterSpacing(QFont::AbsoluteSpacing, font->dx/*spacing*/);\r
-               //cvScalar(blue_component, green_component, red_component[, alpha_component])\r
-               //Qt map non-transparent to 0xFF and transparent to 0\r
-               //OpenCV scalar is the reverse, so 255-font->color.val[3]\r
-               qp.setPen(QColor(font->color.val[2], font->color.val[1], font->color.val[0], 255 - font->color.val[3]));\r
-               qp.setFont(f);\r
-       }\r
-       qp.drawText(org, text);\r
-       qp.end();\r
+    QPainter qp(&qimg);\r
+    if (font)\r
+    {\r
+        QFont f(font->nameFont, font->line_type/*PointSize*/, font->thickness/*weight*/);\r
+        f.setStyle((QFont::Style) font->font_face/*style*/);\r
+        f.setLetterSpacing(QFont::AbsoluteSpacing, font->dx/*spacing*/);\r
+        //cvScalar(blue_component, green_component, red_component[, alpha_component])\r
+        //Qt map non-transparent to 0xFF and transparent to 0\r
+        //OpenCV scalar is the reverse, so 255-font->color.val[3]\r
+        qp.setPen(QColor(font->color.val[2], font->color.val[1], font->color.val[0], 255 - font->color.val[3]));\r
+        qp.setFont(f);\r
+    }\r
+    qp.drawText(org, text);\r
+    qp.end();\r
 }\r
 \r
 \r
 void GuiReceiver::saveWindowParameters(QString name)\r
 {\r
-       QPointer<CvWindow> w = icvFindWindowByName(name);\r
+    QPointer<CvWindow> w = icvFindWindowByName(name);\r
 \r
-       if (w)\r
-               w->writeSettings();\r
+    if (w)\r
+        w->writeSettings();\r
 }\r
 \r
 \r
 void GuiReceiver::loadWindowParameters(QString name)\r
 {\r
-       QPointer<CvWindow> w = icvFindWindowByName(name);\r
+    QPointer<CvWindow> w = icvFindWindowByName(name);\r
 \r
-       if (w)\r
-               w->readSettings();\r
+    if (w)\r
+        w->readSettings();\r
 }\r
 \r
 \r
 double GuiReceiver::getRatioWindow(QString name)\r
 {\r
-       QPointer<CvWindow> w = icvFindWindowByName(name);\r
+    QPointer<CvWindow> w = icvFindWindowByName(name);\r
 \r
-       if (!w)\r
-               return -1;\r
+    if (!w)\r
+        return -1;\r
 \r
-       return w->getRatio();\r
+    return w->getRatio();\r
 }\r
 \r
 \r
 void GuiReceiver::setRatioWindow(QString name, double arg2)\r
 {\r
-       QPointer<CvWindow> w = icvFindWindowByName( name.toLatin1().data() );\r
+    QPointer<CvWindow> w = icvFindWindowByName( name.toLatin1().data() );\r
+\r
+    if (!w)\r
+        return;\r
 \r
-       if (!w)\r
-               return;\r
-    \r
-       int flags = (int) arg2;\r
+    int flags = (int) arg2;\r
 \r
     w->setRatio(flags);\r
 }\r
@@ -857,23 +857,23 @@ void GuiReceiver::setRatioWindow(QString name, double arg2)
 \r
 double GuiReceiver::getPropWindow(QString name)\r
 {\r
-       QPointer<CvWindow> w = icvFindWindowByName(name);\r
+    QPointer<CvWindow> w = icvFindWindowByName(name);\r
 \r
-       if (!w)\r
-               return -1;\r
+    if (!w)\r
+        return -1;\r
 \r
-       return (double) w->getPropWindow();\r
+    return (double) w->getPropWindow();\r
 }\r
 \r
 \r
 void GuiReceiver::setPropWindow(QString name, double arg2)\r
 {\r
-       QPointer<CvWindow> w = icvFindWindowByName(name);\r
+    QPointer<CvWindow> w = icvFindWindowByName(name);\r
 \r
-       if (!w)\r
-               return;\r
+    if (!w)\r
+        return;\r
 \r
-       int flags = (int) arg2;\r
+    int flags = (int) arg2;\r
 \r
     w->setPropWindow(flags);\r
 }\r
@@ -881,10 +881,10 @@ void GuiReceiver::setPropWindow(QString name, double arg2)
 \r
 double GuiReceiver::isFullScreen(QString name)\r
 {\r
-       QPointer<CvWindow> w = icvFindWindowByName(name);\r
+    QPointer<CvWindow> w = icvFindWindowByName(name);\r
 \r
-       if (!w)\r
-               return -1;\r
+    if (!w)\r
+        return -1;\r
 \r
     return w->isFullScreen() ? CV_WINDOW_FULLSCREEN : CV_WINDOW_NORMAL;\r
 }\r
@@ -892,12 +892,12 @@ double GuiReceiver::isFullScreen(QString name)
 \r
 void GuiReceiver::toggleFullScreen(QString name, double arg2)\r
 {\r
-       QPointer<CvWindow> w = icvFindWindowByName(name);\r
+    QPointer<CvWindow> w = icvFindWindowByName(name);\r
 \r
-       if (!w)\r
-               return;\r
+    if (!w)\r
+        return;\r
 \r
-       int flags = (int) arg2;\r
+    int flags = (int) arg2;\r
 \r
     w->toggleFullScreen(flags);\r
 }\r
@@ -905,53 +905,53 @@ void GuiReceiver::toggleFullScreen(QString name, double arg2)
 \r
 void GuiReceiver::createWindow(QString name, int flags)\r
 {\r
-       if (!qApp)\r
-               CV_Error(CV_StsNullPtr, "NULL session handler" );\r
+    if (!qApp)\r
+        CV_Error(CV_StsNullPtr, "NULL session handler" );\r
 \r
-       // Check the name in the storage\r
-       if (icvFindWindowByName(name.toLatin1().data()))\r
-       {\r
-               return;\r
-       }\r
+    // Check the name in the storage\r
+    if (icvFindWindowByName(name.toLatin1().data()))\r
+    {\r
+        return;\r
+    }\r
 \r
-       nb_windows++;\r
-       new CvWindow(name, flags);\r
+    nb_windows++;\r
+    new CvWindow(name, flags);\r
 }\r
 \r
 \r
 void GuiReceiver::timeOut()\r
 {\r
-       bTimeOut = true;\r
+    bTimeOut = true;\r
 }\r
 \r
 \r
 void GuiReceiver::displayInfo(QString name, QString text, int delayms)\r
 {\r
-       QPointer<CvWindow> w = icvFindWindowByName(name);\r
+    QPointer<CvWindow> w = icvFindWindowByName(name);\r
 \r
-       if (w)\r
-               w->displayInfo(text, delayms);\r
+    if (w)\r
+        w->displayInfo(text, delayms);\r
 }\r
 \r
 \r
 void GuiReceiver::displayStatusBar(QString name, QString text, int delayms)\r
 {\r
-       QPointer<CvWindow> w = icvFindWindowByName(name);\r
+    QPointer<CvWindow> w = icvFindWindowByName(name);\r
 \r
-       if (w)\r
-               w->displayStatusBar(text, delayms);\r
+    if (w)\r
+        w->displayStatusBar(text, delayms);\r
 }\r
 \r
 \r
 void GuiReceiver::showImage(QString name, void* arr)\r
 {\r
-       QPointer<CvWindow> w = icvFindWindowByName(name);\r
+    QPointer<CvWindow> w = icvFindWindowByName(name);\r
 \r
-       if (!w) //as observed in the previous implementation (W32, GTK or Carbon), create a new window is the pointer returned is null\r
-       {\r
-               cvNamedWindow(name.toLatin1().data());\r
-               w = icvFindWindowByName(name);\r
-       }\r
+    if (!w) //as observed in the previous implementation (W32, GTK or Carbon), create a new window is the pointer returned is null\r
+    {\r
+        cvNamedWindow(name.toLatin1().data());\r
+        w = icvFindWindowByName(name);\r
+    }\r
 \r
     if (!w || !arr)\r
         return; // keep silence here.\r
@@ -960,253 +960,253 @@ void GuiReceiver::showImage(QString name, void* arr)
     {\r
         CvMat* mat, stub;\r
 \r
-           mat = cvGetMat(arr, &stub);\r
+        mat = cvGetMat(arr, &stub);\r
 \r
         cv::Mat im(mat);\r
         cv::imshow(name.toStdString(), im);\r
     }\r
     else\r
     {\r
-               w->updateImage(arr);\r
+        w->updateImage(arr);\r
     }\r
 \r
-       if (w->isHidden())\r
-               w->show();\r
+    if (w->isHidden())\r
+        w->show();\r
 }\r
 \r
 \r
 void GuiReceiver::destroyWindow(QString name)\r
 {\r
-       QPointer<CvWindow> w = icvFindWindowByName(name);\r
+    QPointer<CvWindow> w = icvFindWindowByName(name);\r
 \r
-       if (w)\r
-       {\r
-               w->close();\r
+    if (w)\r
+    {\r
+        w->close();\r
 \r
-               //in not-multiThreads mode, looks like the window is hidden but not deleted\r
-               //so I do it manually\r
-               //otherwise QApplication do it for me if the exec command was executed (in multiThread mode)\r
-               if (!multiThreads)\r
-                       delete w;\r
-       }\r
+        //in not-multiThreads mode, looks like the window is hidden but not deleted\r
+        //so I do it manually\r
+        //otherwise QApplication do it for me if the exec command was executed (in multiThread mode)\r
+        if (!multiThreads)\r
+            delete w;\r
+    }\r
 }\r
 \r
 \r
 void GuiReceiver::destroyAllWindow()\r
 {\r
-       if (!qApp)\r
-               CV_Error(CV_StsNullPtr, "NULL session handler" );\r
-\r
-       if (multiThreads)\r
-       {\r
-               // WARNING: this could even close windows from an external parent app\r
-               //#TODO check externalQAppExists and in case it does, close windows carefully,\r
-               //      i.e. apply the className-check from below...\r
-               qApp->closeAllWindows();\r
-       }\r
-       else\r
-       {\r
-               bool isWidgetDeleted = true;\r
-               while(isWidgetDeleted)\r
-               {\r
-                       isWidgetDeleted = false;\r
-                       QWidgetList list = QApplication::topLevelWidgets();\r
-                       for (int i = 0; i < list.count(); i++)\r
-                       {\r
-                               QObject *obj = list.at(i);\r
-                               if (obj->metaObject()->className() == QString("CvWindow"))\r
-                               {\r
-                                       delete obj;\r
-                                       isWidgetDeleted = true;\r
-                                       break;\r
-                               }\r
-                       }\r
-               }\r
-       }\r
+    if (!qApp)\r
+        CV_Error(CV_StsNullPtr, "NULL session handler" );\r
+\r
+    if (multiThreads)\r
+    {\r
+        // WARNING: this could even close windows from an external parent app\r
+        //#TODO check externalQAppExists and in case it does, close windows carefully,\r
+        //      i.e. apply the className-check from below...\r
+        qApp->closeAllWindows();\r
+    }\r
+    else\r
+    {\r
+        bool isWidgetDeleted = true;\r
+        while(isWidgetDeleted)\r
+        {\r
+            isWidgetDeleted = false;\r
+            QWidgetList list = QApplication::topLevelWidgets();\r
+            for (int i = 0; i < list.count(); i++)\r
+            {\r
+                QObject *obj = list.at(i);\r
+                if (obj->metaObject()->className() == QString("CvWindow"))\r
+                {\r
+                    delete obj;\r
+                    isWidgetDeleted = true;\r
+                    break;\r
+                }\r
+            }\r
+        }\r
+    }\r
 }\r
 \r
 \r
 void GuiReceiver::moveWindow(QString name, int x, int y)\r
 {\r
-       QPointer<CvWindow> w = icvFindWindowByName(name);\r
+    QPointer<CvWindow> w = icvFindWindowByName(name);\r
 \r
-       if (w)\r
-               w->move(x, y);\r
+    if (w)\r
+        w->move(x, y);\r
 }\r
 \r
 \r
 void GuiReceiver::resizeWindow(QString name, int width, int height)\r
 {\r
-       QPointer<CvWindow> w = icvFindWindowByName(name);\r
+    QPointer<CvWindow> w = icvFindWindowByName(name);\r
 \r
-       if (w)\r
-       {\r
-               w->showNormal();\r
+    if (w)\r
+    {\r
+        w->showNormal();\r
         w->setViewportSize(QSize(width, height));\r
-       }\r
+    }\r
 }\r
 \r
 \r
 void GuiReceiver::enablePropertiesButtonEachWindow()\r
 {\r
-       //For each window, enable window property button\r
-       foreach (QWidget* widget, QApplication::topLevelWidgets())\r
-       {\r
-               if (widget->isWindow() && !widget->parentWidget()) //is a window without parent\r
-               {\r
-                       CvWinModel* temp = (CvWinModel*) widget;\r
-                       if (temp->type == type_CvWindow)\r
-                       {\r
-                               CvWindow* w = (CvWindow*) widget;\r
+    //For each window, enable window property button\r
+    foreach (QWidget* widget, QApplication::topLevelWidgets())\r
+    {\r
+        if (widget->isWindow() && !widget->parentWidget()) //is a window without parent\r
+        {\r
+            CvWinModel* temp = (CvWinModel*) widget;\r
+            if (temp->type == type_CvWindow)\r
+            {\r
+                CvWindow* w = (CvWindow*) widget;\r
 \r
-                               //active window properties button\r
-                               w->enablePropertiesButton();\r
-                       }\r
-               }\r
-       }\r
+                //active window properties button\r
+                w->enablePropertiesButton();\r
+            }\r
+        }\r
+    }\r
 }\r
 \r
 \r
 void GuiReceiver::addButton(QString button_name, int button_type, int initial_button_state, void* on_change, void* userdata)\r
 {\r
-       if (!global_control_panel)\r
-               return;\r
+    if (!global_control_panel)\r
+        return;\r
 \r
-       QPointer<CvButtonbar> b;\r
+    QPointer<CvButtonbar> b;\r
 \r
-       if (global_control_panel->myLayout->count() == 0) //if that is the first button attach to the control panel, create a new button bar\r
-       {\r
-               b = CvWindow::createButtonBar(button_name); //the bar has the name of the first button attached to it\r
-               enablePropertiesButtonEachWindow();\r
+    if (global_control_panel->myLayout->count() == 0) //if that is the first button attach to the control panel, create a new button bar\r
+    {\r
+        b = CvWindow::createButtonBar(button_name); //the bar has the name of the first button attached to it\r
+        enablePropertiesButtonEachWindow();\r
 \r
-       }\r
+    }\r
     else\r
     {\r
-               CvBar* lastbar = (CvBar*) global_control_panel->myLayout->itemAt(global_control_panel->myLayout->count() - 1);\r
+        CvBar* lastbar = (CvBar*) global_control_panel->myLayout->itemAt(global_control_panel->myLayout->count() - 1);\r
 \r
-               if (lastbar->type == type_CvTrackbar) //if last bar is a trackbar, create a new buttonbar, else, attach to the current bar\r
-                       b = CvWindow::createButtonBar(button_name); //the bar has the name of the first button attached to it\r
-               else\r
-                       b = (CvButtonbar*) lastbar;\r
+        if (lastbar->type == type_CvTrackbar) //if last bar is a trackbar, create a new buttonbar, else, attach to the current bar\r
+            b = CvWindow::createButtonBar(button_name); //the bar has the name of the first button attached to it\r
+        else\r
+            b = (CvButtonbar*) lastbar;\r
 \r
-       }\r
+    }\r
 \r
-       b->addButton(button_name, (CvButtonCallback) on_change, userdata, button_type, initial_button_state);\r
+    b->addButton(button_name, (CvButtonCallback) on_change, userdata, button_type, initial_button_state);\r
 }\r
 \r
 \r
 void GuiReceiver::addSlider2(QString bar_name, QString window_name, void* value, int count, void* on_change, void *userdata)\r
 {\r
-       QBoxLayout *layout = NULL;\r
-       QPointer<CvWindow> w;\r
+    QBoxLayout *layout = NULL;\r
+    QPointer<CvWindow> w;\r
 \r
     if (!window_name.isEmpty())\r
-       {\r
-               w = icvFindWindowByName(window_name);\r
+    {\r
+        w = icvFindWindowByName(window_name);\r
 \r
-               if (!w)\r
-                       return;\r
-       }\r
+        if (!w)\r
+            return;\r
+    }\r
     else\r
     {\r
-               if (global_control_panel)\r
-                       layout = global_control_panel->myLayout;\r
-       }\r
+        if (global_control_panel)\r
+            layout = global_control_panel->myLayout;\r
+    }\r
 \r
-       QPointer<CvTrackbar> t = icvFindTrackBarByName(bar_name.toLatin1().data(), window_name.toLatin1().data(), layout);\r
+    QPointer<CvTrackbar> t = icvFindTrackBarByName(bar_name.toLatin1().data(), window_name.toLatin1().data(), layout);\r
 \r
-       if (t) //trackbar exists\r
-               return;\r
+    if (t) //trackbar exists\r
+        return;\r
 \r
-       if (!value)\r
-               CV_Error(CV_StsNullPtr, "NULL value pointer" );\r
+    if (!value)\r
+        CV_Error(CV_StsNullPtr, "NULL value pointer" );\r
 \r
-       if (count <= 0) //count is the max value of the slider, so must be bigger than 0\r
-               CV_Error(CV_StsNullPtr, "Max value of the slider must be bigger than 0" );\r
+    if (count <= 0) //count is the max value of the slider, so must be bigger than 0\r
+        CV_Error(CV_StsNullPtr, "Max value of the slider must be bigger than 0" );\r
 \r
-       CvWindow::addSlider2(w, bar_name, (int*)value, count, (CvTrackbarCallback2) on_change, userdata);\r
+    CvWindow::addSlider2(w, bar_name, (int*)value, count, (CvTrackbarCallback2) on_change, userdata);\r
 }\r
 \r
 \r
 void GuiReceiver::addSlider(QString bar_name, QString window_name, void* value, int count, void* on_change)\r
 {\r
-       QBoxLayout *layout = NULL;\r
-       QPointer<CvWindow> w;\r
+    QBoxLayout *layout = NULL;\r
+    QPointer<CvWindow> w;\r
 \r
-       if (!window_name.isEmpty())\r
-       {\r
-               w = icvFindWindowByName(window_name);\r
+    if (!window_name.isEmpty())\r
+    {\r
+        w = icvFindWindowByName(window_name);\r
 \r
-               if (!w)\r
-                       return;\r
-       }\r
+        if (!w)\r
+            return;\r
+    }\r
     else\r
     {\r
-               if (global_control_panel)\r
-                       layout = global_control_panel->myLayout;\r
-       }\r
+        if (global_control_panel)\r
+            layout = global_control_panel->myLayout;\r
+    }\r
 \r
-       QPointer<CvTrackbar> t = icvFindTrackBarByName(bar_name.toLatin1().data(), window_name.toLatin1().data(), layout);\r
+    QPointer<CvTrackbar> t = icvFindTrackBarByName(bar_name.toLatin1().data(), window_name.toLatin1().data(), layout);\r
 \r
-       if (t) //trackbar exists\r
-               return;\r
+    if (t) //trackbar exists\r
+        return;\r
 \r
-       if (!value)\r
-               CV_Error(CV_StsNullPtr, "NULL value pointer" );\r
+    if (!value)\r
+        CV_Error(CV_StsNullPtr, "NULL value pointer" );\r
 \r
-       if (count <= 0) //count is the max value of the slider, so must be bigger than 0\r
-               CV_Error(CV_StsNullPtr, "Max value of the slider must be bigger than 0" );\r
+    if (count <= 0) //count is the max value of the slider, so must be bigger than 0\r
+        CV_Error(CV_StsNullPtr, "Max value of the slider must be bigger than 0" );\r
 \r
-       CvWindow::addSlider(w, bar_name, (int*)value, count, (CvTrackbarCallback) on_change);\r
+    CvWindow::addSlider(w, bar_name, (int*)value, count, (CvTrackbarCallback) on_change);\r
 }\r
 \r
 \r
 int GuiReceiver::start()\r
 {\r
-       return qApp->exec();\r
+    return qApp->exec();\r
 }\r
 \r
 \r
 void GuiReceiver::setOpenGlDrawCallback(QString name, void* callback, void* userdata)\r
 {\r
-       QPointer<CvWindow> w = icvFindWindowByName(name);\r
+    QPointer<CvWindow> w = icvFindWindowByName(name);\r
 \r
-       if (w)\r
-               w->setOpenGlDrawCallback((CvOpenGlDrawCallback) callback, userdata);\r
+    if (w)\r
+        w->setOpenGlDrawCallback((CvOpenGlDrawCallback) callback, userdata);\r
 }\r
 \r
 void GuiReceiver::setOpenGlCleanCallback(QString name, void* callback, void* userdata)\r
 {\r
-       QPointer<CvWindow> w = icvFindWindowByName(name);\r
+    QPointer<CvWindow> w = icvFindWindowByName(name);\r
 \r
-       if (w)\r
-               w->setOpenGlCleanCallback((CvOpenGlCleanCallback) callback, userdata);\r
+    if (w)\r
+        w->setOpenGlCleanCallback((CvOpenGlCleanCallback) callback, userdata);\r
 }\r
 \r
 void GuiReceiver::setOpenGlContext(QString name)\r
 {\r
-       QPointer<CvWindow> w = icvFindWindowByName(name);\r
+    QPointer<CvWindow> w = icvFindWindowByName(name);\r
 \r
-       if (w)\r
-               w->makeCurrentOpenGlContext();\r
+    if (w)\r
+        w->makeCurrentOpenGlContext();\r
 }\r
 \r
 void GuiReceiver::updateWindow(QString name)\r
 {\r
-       QPointer<CvWindow> w = icvFindWindowByName(name);\r
+    QPointer<CvWindow> w = icvFindWindowByName(name);\r
 \r
-       if (w)\r
-               w->updateGl();\r
+    if (w)\r
+        w->updateGl();\r
 }\r
 \r
 double GuiReceiver::isOpenGl(QString name)\r
 {\r
     double result = -1;\r
 \r
-       QPointer<CvWindow> w = icvFindWindowByName(name);\r
+    QPointer<CvWindow> w = icvFindWindowByName(name);\r
 \r
-       if (w)\r
-               result = (double) w->isOpenGl();\r
+    if (w)\r
+        result = (double) w->isOpenGl();\r
 \r
     return result;\r
 }\r
@@ -1218,125 +1218,125 @@ double GuiReceiver::isOpenGl(QString name)
 \r
 CvTrackbar::CvTrackbar(CvWindow* arg, QString name, int* value, int count, CvTrackbarCallback2 on_change, void* data)\r
 {\r
-       callback = NULL;\r
-       callback2 = on_change;\r
-       userdata = data;\r
+    callback = NULL;\r
+    callback2 = on_change;\r
+    userdata = data;\r
 \r
-       create(arg, name, value, count);\r
+    create(arg, name, value, count);\r
 }\r
 \r
 \r
 CvTrackbar::CvTrackbar(CvWindow* arg, QString name, int* value, int count, CvTrackbarCallback on_change)\r
 {\r
-       callback = on_change;\r
-       callback2 = NULL;\r
-       userdata = NULL;\r
+    callback = on_change;\r
+    callback2 = NULL;\r
+    userdata = NULL;\r
 \r
-       create(arg, name, value, count);\r
+    create(arg, name, value, count);\r
 }\r
 \r
 \r
 void CvTrackbar::create(CvWindow* arg, QString name, int* value, int count)\r
 {\r
-       type = type_CvTrackbar;\r
-       myparent = arg;\r
-       name_bar = name;\r
-       setObjectName(name_bar);\r
-       dataSlider = value;\r
+    type = type_CvTrackbar;\r
+    myparent = arg;\r
+    name_bar = name;\r
+    setObjectName(name_bar);\r
+    dataSlider = value;\r
 \r
-       slider = new QSlider(Qt::Horizontal);\r
-       slider->setFocusPolicy(Qt::StrongFocus);\r
-       slider->setMinimum(0);\r
-       slider->setMaximum(count);\r
-       slider->setPageStep(5);\r
-       slider->setValue(*value);\r
-       slider->setTickPosition(QSlider::TicksBelow);\r
+    slider = new QSlider(Qt::Horizontal);\r
+    slider->setFocusPolicy(Qt::StrongFocus);\r
+    slider->setMinimum(0);\r
+    slider->setMaximum(count);\r
+    slider->setPageStep(5);\r
+    slider->setValue(*value);\r
+    slider->setTickPosition(QSlider::TicksBelow);\r
 \r
 \r
-       //Change style of the Slider\r
-       //slider->setStyleSheet(str_Trackbar_css);\r
+    //Change style of the Slider\r
+    //slider->setStyleSheet(str_Trackbar_css);\r
 \r
-       QFile qss(":/stylesheet-trackbar");\r
-       if (qss.open(QFile::ReadOnly))\r
-       {\r
-               slider->setStyleSheet(QLatin1String(qss.readAll()));\r
-               qss.close();\r
-       }\r
+    QFile qss(":/stylesheet-trackbar");\r
+    if (qss.open(QFile::ReadOnly))\r
+    {\r
+        slider->setStyleSheet(QLatin1String(qss.readAll()));\r
+        qss.close();\r
+    }\r
 \r
 \r
-       //this next line does not work if we change the style with a stylesheet, why ? (bug in QT ?)\r
-       //slider->setTickPosition(QSlider::TicksBelow);\r
-       label = new QPushButton;\r
-       label->setFlat(true);\r
-       setLabel(slider->value());\r
+    //this next line does not work if we change the style with a stylesheet, why ? (bug in QT ?)\r
+    //slider->setTickPosition(QSlider::TicksBelow);\r
+    label = new QPushButton;\r
+    label->setFlat(true);\r
+    setLabel(slider->value());\r
 \r
 \r
-       QObject::connect(slider, SIGNAL(valueChanged(int)), this, SLOT(update(int)));\r
+    QObject::connect(slider, SIGNAL(valueChanged(int)), this, SLOT(update(int)));\r
 \r
-       QObject::connect(label, SIGNAL(clicked()), this, SLOT(createDialog()));\r
+    QObject::connect(label, SIGNAL(clicked()), this, SLOT(createDialog()));\r
 \r
-       //label->setStyleSheet("QPushButton:disabled {color: black}");\r
+    //label->setStyleSheet("QPushButton:disabled {color: black}");\r
 \r
-       addWidget(label, Qt::AlignLeft);//name + value\r
-       addWidget(slider, Qt::AlignCenter);//slider\r
+    addWidget(label, Qt::AlignLeft);//name + value\r
+    addWidget(slider, Qt::AlignCenter);//slider\r
 }\r
 \r
 \r
 void CvTrackbar::createDialog()\r
 {\r
-       bool ok = false;\r
+    bool ok = false;\r
 \r
-       //crash if I access the values directly and give them to QInputDialog, so do a copy first.\r
-       int value = slider->value();\r
-       int step = slider->singleStep();\r
-       int min = slider->minimum();\r
-       int max = slider->maximum();\r
+    //crash if I access the values directly and give them to QInputDialog, so do a copy first.\r
+    int value = slider->value();\r
+    int step = slider->singleStep();\r
+    int min = slider->minimum();\r
+    int max = slider->maximum();\r
 \r
-       int i =\r
+    int i =\r
 #if QT_VERSION >= 0x040500\r
-               QInputDialog::getInt\r
+        QInputDialog::getInt\r
 #else\r
-               QInputDialog::getInteger\r
+        QInputDialog::getInteger\r
 #endif\r
-               (this->parentWidget(),\r
-               tr("Slider %1").arg(name_bar),\r
-               tr("New value:"),\r
-               value,\r
-               min,\r
-               max,\r
-               step,\r
-               &ok);\r
+        (this->parentWidget(),\r
+        tr("Slider %1").arg(name_bar),\r
+        tr("New value:"),\r
+        value,\r
+        min,\r
+        max,\r
+        step,\r
+        &ok);\r
 \r
-       if (ok)\r
-               slider->setValue(i);\r
+    if (ok)\r
+        slider->setValue(i);\r
 }\r
 \r
 \r
 void CvTrackbar::update(int myvalue)\r
 {\r
-       setLabel(myvalue);\r
+    setLabel(myvalue);\r
 \r
-       *dataSlider = myvalue;\r
-       if (callback)\r
-       {\r
-               callback(myvalue);\r
-               return;\r
-       }\r
+    *dataSlider = myvalue;\r
+    if (callback)\r
+    {\r
+        callback(myvalue);\r
+        return;\r
+    }\r
 \r
-       if (callback2)\r
-       {\r
-               callback2(myvalue, userdata);\r
-               return;\r
-       }\r
+    if (callback2)\r
+    {\r
+        callback2(myvalue, userdata);\r
+        return;\r
+    }\r
 }\r
 \r
 \r
 void CvTrackbar::setLabel(int myvalue)\r
 {\r
-       QString nameNormalized = name_bar.leftJustified( 10, ' ', true );\r
-       QString valueMaximum = QString("%1").arg(slider->maximum());\r
-       QString str = QString("%1 (%2/%3)").arg(nameNormalized).arg(myvalue,valueMaximum.length(),10,QChar('0')).arg(valueMaximum);\r
-       label->setText(str);\r
+    QString nameNormalized = name_bar.leftJustified( 10, ' ', true );\r
+    QString valueMaximum = QString("%1").arg(slider->maximum());\r
+    QString str = QString("%1 (%2/%3)").arg(nameNormalized).arg(myvalue,valueMaximum.length(),10,QChar('0')).arg(valueMaximum);\r
+    label->setText(str);\r
 }\r
 \r
 \r
@@ -1347,52 +1347,52 @@ void CvTrackbar::setLabel(int myvalue)
 //here CvButtonbar class\r
 CvButtonbar::CvButtonbar(QWidget* arg,  QString arg2)\r
 {\r
-       type = type_CvButtonbar;\r
-       myparent = arg;\r
-       name_bar = arg2;\r
-       setObjectName(name_bar);\r
+    type = type_CvButtonbar;\r
+    myparent = arg;\r
+    name_bar = arg2;\r
+    setObjectName(name_bar);\r
 \r
-       group_button = new QButtonGroup(this);\r
+    group_button = new QButtonGroup(this);\r
 }\r
 \r
 \r
 void CvButtonbar::setLabel()\r
 {\r
-       QString nameNormalized = name_bar.leftJustified(10, ' ', true);\r
-       label->setText(nameNormalized);\r
+    QString nameNormalized = name_bar.leftJustified(10, ' ', true);\r
+    label->setText(nameNormalized);\r
 }\r
 \r
 \r
 void CvButtonbar::addButton(QString name, CvButtonCallback call, void* userdata,  int button_type, int initial_button_state)\r
 {\r
-       QString button_name = name;\r
+    QString button_name = name;\r
 \r
-       if (button_name == "")\r
-               button_name = tr("button %1").arg(this->count());\r
+    if (button_name == "")\r
+        button_name = tr("button %1").arg(this->count());\r
 \r
-       QPointer<QAbstractButton> button;\r
+    QPointer<QAbstractButton> button;\r
 \r
-       if (button_type == CV_PUSH_BUTTON)\r
-               button = (QAbstractButton*) new CvPushButton(this, button_name,call, userdata);\r
+    if (button_type == CV_PUSH_BUTTON)\r
+        button = (QAbstractButton*) new CvPushButton(this, button_name,call, userdata);\r
 \r
-       if (button_type == CV_CHECKBOX)\r
-               button = (QAbstractButton*) new CvCheckBox(this, button_name,call, userdata, initial_button_state);\r
+    if (button_type == CV_CHECKBOX)\r
+        button = (QAbstractButton*) new CvCheckBox(this, button_name,call, userdata, initial_button_state);\r
 \r
-       if (button_type == CV_RADIOBOX)\r
-       {\r
-               button = (QAbstractButton*) new CvRadioButton(this, button_name,call, userdata, initial_button_state);\r
-               group_button->addButton(button);\r
-       }\r
+    if (button_type == CV_RADIOBOX)\r
+    {\r
+        button = (QAbstractButton*) new CvRadioButton(this, button_name,call, userdata, initial_button_state);\r
+        group_button->addButton(button);\r
+    }\r
 \r
-       if (button)\r
-       {\r
-               if (button_type == CV_PUSH_BUTTON)\r
-                       QObject::connect(button, SIGNAL(clicked(bool)), button, SLOT(callCallBack(bool)));\r
-               else \r
-                       QObject::connect(button, SIGNAL(toggled(bool)), button, SLOT(callCallBack(bool)));\r
+    if (button)\r
+    {\r
+        if (button_type == CV_PUSH_BUTTON)\r
+            QObject::connect(button, SIGNAL(clicked(bool)), button, SLOT(callCallBack(bool)));\r
+        else\r
+            QObject::connect(button, SIGNAL(toggled(bool)), button, SLOT(callCallBack(bool)));\r
 \r
-               addWidget(button, Qt::AlignCenter);\r
-       }\r
+        addWidget(button, Qt::AlignCenter);\r
+    }\r
 }\r
 \r
 \r
@@ -1403,68 +1403,68 @@ void CvButtonbar::addButton(QString name, CvButtonCallback call, void* userdata,
 //buttons here\r
 CvPushButton::CvPushButton(CvButtonbar* arg1, QString arg2, CvButtonCallback arg3, void* arg4)\r
 {\r
-       myparent = arg1;\r
-       button_name = arg2;\r
-       callback = arg3;\r
-       userdata = arg4;\r
+    myparent = arg1;\r
+    button_name = arg2;\r
+    callback = arg3;\r
+    userdata = arg4;\r
 \r
-       setObjectName(button_name);\r
-       setText(button_name);\r
+    setObjectName(button_name);\r
+    setText(button_name);\r
 \r
-       if (isChecked())\r
-               callCallBack(true);\r
+    if (isChecked())\r
+        callCallBack(true);\r
 }\r
 \r
 \r
 void CvPushButton::callCallBack(bool checked)\r
 {\r
-       if (callback)\r
-               callback(checked, userdata);\r
+    if (callback)\r
+        callback(checked, userdata);\r
 }\r
 \r
 \r
 CvCheckBox::CvCheckBox(CvButtonbar* arg1, QString arg2, CvButtonCallback arg3, void* arg4, int initial_button_state)\r
 {\r
-       myparent = arg1;\r
-       button_name = arg2;\r
-       callback = arg3;\r
-       userdata = arg4;\r
+    myparent = arg1;\r
+    button_name = arg2;\r
+    callback = arg3;\r
+    userdata = arg4;\r
 \r
-       setObjectName(button_name);\r
-       setCheckState((initial_button_state == 1 ? Qt::Checked : Qt::Unchecked));\r
-       setText(button_name);\r
+    setObjectName(button_name);\r
+    setCheckState((initial_button_state == 1 ? Qt::Checked : Qt::Unchecked));\r
+    setText(button_name);\r
 \r
-       if (isChecked())\r
-               callCallBack(true);\r
+    if (isChecked())\r
+        callCallBack(true);\r
 }\r
 \r
 \r
 void CvCheckBox::callCallBack(bool checked)\r
 {\r
-       if (callback)\r
-               callback(checked, userdata);\r
+    if (callback)\r
+        callback(checked, userdata);\r
 }\r
 \r
 \r
 CvRadioButton::CvRadioButton(CvButtonbar* arg1, QString arg2, CvButtonCallback arg3, void* arg4, int initial_button_state)\r
 {\r
-       myparent = arg1;\r
-       button_name = arg2;\r
-       callback = arg3;\r
-       userdata = arg4;\r
+    myparent = arg1;\r
+    button_name = arg2;\r
+    callback = arg3;\r
+    userdata = arg4;\r
 \r
-       setObjectName(button_name);\r
-       setChecked(initial_button_state);\r
-       setText(button_name);\r
+    setObjectName(button_name);\r
+    setChecked(initial_button_state);\r
+    setText(button_name);\r
 \r
-       if (isChecked())\r
-               callCallBack(true);\r
+    if (isChecked())\r
+        callCallBack(true);\r
 }\r
 \r
 void CvRadioButton::callCallBack(bool checked)\r
 {\r
-       if (callback)\r
-               callback(checked, userdata);\r
+    if (callback)\r
+        callback(checked, userdata);\r
 }\r
 \r
 \r
@@ -1475,67 +1475,67 @@ void CvRadioButton::callCallBack(bool checked)
 //here CvWinProperties class\r
 CvWinProperties::CvWinProperties(QString name_paraWindow, QObject* parent)\r
 {\r
-       //setParent(parent);\r
-       type = type_CvWinProperties;\r
-       setWindowFlags(Qt::Tool);\r
-       setContentsMargins(0, 0, 0, 0);\r
-       setWindowTitle(name_paraWindow);\r
-       setObjectName(name_paraWindow);\r
-       resize(100, 50);\r
+    //setParent(parent);\r
+    type = type_CvWinProperties;\r
+    setWindowFlags(Qt::Tool);\r
+    setContentsMargins(0, 0, 0, 0);\r
+    setWindowTitle(name_paraWindow);\r
+    setObjectName(name_paraWindow);\r
+    resize(100, 50);\r
 \r
-       myLayout = new QBoxLayout(QBoxLayout::TopToBottom);\r
-       myLayout->setObjectName(QString::fromUtf8("boxLayout"));\r
-       myLayout->setContentsMargins(0, 0, 0, 0);\r
-       myLayout->setSpacing(0);\r
-       myLayout->setMargin(0);\r
-       myLayout->setSizeConstraint(QLayout::SetFixedSize);\r
-       setLayout(myLayout);\r
+    myLayout = new QBoxLayout(QBoxLayout::TopToBottom);\r
+    myLayout->setObjectName(QString::fromUtf8("boxLayout"));\r
+    myLayout->setContentsMargins(0, 0, 0, 0);\r
+    myLayout->setSpacing(0);\r
+    myLayout->setMargin(0);\r
+    myLayout->setSizeConstraint(QLayout::SetFixedSize);\r
+    setLayout(myLayout);\r
 \r
-       hide();\r
+    hide();\r
 }\r
 \r
 \r
 void CvWinProperties::closeEvent(QCloseEvent* e)\r
 {\r
-       e->accept(); //intersept the close event (not sure I really need it)\r
-       //an hide event is also sent. I will intercept it and do some processing\r
+    e->accept(); //intersept the close event (not sure I really need it)\r
+    //an hide event is also sent. I will intercept it and do some processing\r
 }\r
 \r
 \r
 void CvWinProperties::showEvent(QShowEvent* event)\r
 {\r
-       //why -1,-1 ?: do this trick because the first time the code is run,\r
-       //no value pos was saved so we let Qt move the window in the middle of its parent (event ignored).\r
-       //then hide will save the last position and thus, we want to retreive it (event accepted).\r
-       QPoint mypos(-1, -1);\r
-       QSettings settings("OpenCV2", windowTitle());\r
-       mypos = settings.value("pos", mypos).toPoint();\r
+    //why -1,-1 ?: do this trick because the first time the code is run,\r
+    //no value pos was saved so we let Qt move the window in the middle of its parent (event ignored).\r
+    //then hide will save the last position and thus, we want to retreive it (event accepted).\r
+    QPoint mypos(-1, -1);\r
+    QSettings settings("OpenCV2", windowTitle());\r
+    mypos = settings.value("pos", mypos).toPoint();\r
 \r
-       if (mypos.x() >= 0)\r
-       {\r
-               move(mypos);\r
-               event->accept();\r
-       }\r
-       else\r
+    if (mypos.x() >= 0)\r
+    {\r
+        move(mypos);\r
+        event->accept();\r
+    }\r
+    else\r
     {\r
-               event->ignore();\r
-       }\r
+        event->ignore();\r
+    }\r
 }\r
 \r
 \r
 void CvWinProperties::hideEvent(QHideEvent* event)\r
 {\r
-       QSettings settings("OpenCV2", windowTitle());\r
-       settings.setValue("pos", pos()); //there is an offset of 6 pixels (so the window's position is wrong -- why ?)\r
-       event->accept();\r
+    QSettings settings("OpenCV2", windowTitle());\r
+    settings.setValue("pos", pos()); //there is an offset of 6 pixels (so the window's position is wrong -- why ?)\r
+    event->accept();\r
 }\r
 \r
 \r
 CvWinProperties::~CvWinProperties()\r
 {\r
-       //clear the setting pos\r
-       QSettings settings("OpenCV2", windowTitle());\r
-       settings.remove("pos");\r
+    //clear the setting pos\r
+    QSettings settings("OpenCV2", windowTitle());\r
+    settings.remove("pos");\r
 }\r
 \r
 \r
@@ -1545,102 +1545,102 @@ CvWinProperties::~CvWinProperties()
 \r
 CvWindow::CvWindow(QString name, int arg2)\r
 {\r
-       type = type_CvWindow;\r
-       moveToThread(qApp->instance()->thread());\r
+    type = type_CvWindow;\r
+    moveToThread(qApp->instance()->thread());\r
 \r
-       param_flags = arg2 & 0x0000000F;\r
-       param_gui_mode = arg2 & 0x000000F0;\r
-       param_ratio_mode =  arg2 & 0x00000F00;\r
+    param_flags = arg2 & 0x0000000F;\r
+    param_gui_mode = arg2 & 0x000000F0;\r
+    param_ratio_mode =  arg2 & 0x00000F00;\r
 \r
-       //setAttribute(Qt::WA_DeleteOnClose); //in other case, does not release memory\r
-       setContentsMargins(0, 0, 0, 0);\r
-       setWindowTitle(name);\r
+    //setAttribute(Qt::WA_DeleteOnClose); //in other case, does not release memory\r
+    setContentsMargins(0, 0, 0, 0);\r
+    setWindowTitle(name);\r
         setObjectName(name);\r
 \r
         setFocus( Qt::PopupFocusReason ); //#1695 arrow keys are not recieved without the explicit focus\r
 \r
-       resize(400, 300);\r
-       setMinimumSize(1, 1);\r
+    resize(400, 300);\r
+    setMinimumSize(1, 1);\r
 \r
-       //1: create control panel\r
-       if (!global_control_panel)\r
-               global_control_panel = createParameterWindow();\r
+    //1: create control panel\r
+    if (!global_control_panel)\r
+        global_control_panel = createParameterWindow();\r
 \r
-       //2: Layouts\r
-       createBarLayout();\r
-       createGlobalLayout();\r
+    //2: Layouts\r
+    createBarLayout();\r
+    createGlobalLayout();\r
 \r
-       //3: my view\r
+    //3: my view\r
 #ifndef HAVE_QT_OPENGL\r
     if (arg2 & CV_WINDOW_OPENGL)\r
         CV_Error( CV_OpenGlNotSupported, "Library was built without OpenGL support" );\r
-       mode_display = CV_MODE_NORMAL;\r
+    mode_display = CV_MODE_NORMAL;\r
 #else\r
     mode_display = arg2 & CV_WINDOW_OPENGL ? CV_MODE_OPENGL : CV_MODE_NORMAL;\r
     if (mode_display == CV_MODE_OPENGL)\r
         param_gui_mode = CV_GUI_NORMAL;\r
 #endif\r
-       createView();\r
+    createView();\r
 \r
-       //4: shortcuts and actions\r
-       //5: toolBar and statusbar\r
-       if (param_gui_mode == CV_GUI_EXPANDED)\r
-       {\r
+    //4: shortcuts and actions\r
+    //5: toolBar and statusbar\r
+    if (param_gui_mode == CV_GUI_EXPANDED)\r
+    {\r
         createActions();\r
         createShortcuts();\r
 \r
-               createToolBar();\r
-               createStatusBar();\r
-       }\r
+        createToolBar();\r
+        createStatusBar();\r
+    }\r
 \r
-       //Now attach everything\r
-       if (myToolBar)\r
-               myGlobalLayout->addWidget(myToolBar, Qt::AlignCenter);\r
+    //Now attach everything\r
+    if (myToolBar)\r
+        myGlobalLayout->addWidget(myToolBar, Qt::AlignCenter);\r
 \r
-       myGlobalLayout->addWidget(myView->getWidget(), Qt::AlignCenter);\r
+    myGlobalLayout->addWidget(myView->getWidget(), Qt::AlignCenter);\r
 \r
-       myGlobalLayout->addLayout(myBarLayout, Qt::AlignCenter);\r
+    myGlobalLayout->addLayout(myBarLayout, Qt::AlignCenter);\r
 \r
-       if (myStatusBar)\r
-               myGlobalLayout->addWidget(myStatusBar, Qt::AlignCenter);\r
+    if (myStatusBar)\r
+        myGlobalLayout->addWidget(myStatusBar, Qt::AlignCenter);\r
 \r
-       setLayout(myGlobalLayout);\r
-       show();\r
+    setLayout(myGlobalLayout);\r
+    show();\r
 }\r
 \r
 \r
 CvWindow::~CvWindow()\r
 {\r
-       if (guiMainThread)\r
-               guiMainThread->isLastWindow();\r
+    if (guiMainThread)\r
+        guiMainThread->isLastWindow();\r
 }\r
 \r
 \r
 void CvWindow::setMouseCallBack(CvMouseCallback callback, void* param)\r
 {\r
-       myView->setMouseCallBack(callback, param);\r
+    myView->setMouseCallBack(callback, param);\r
 }\r
 \r
 \r
 void CvWindow::writeSettings()\r
 {\r
-       //organisation and application's name\r
-       QSettings settings("OpenCV2", QFileInfo(QApplication::applicationFilePath()).fileName());\r
+    //organisation and application's name\r
+    QSettings settings("OpenCV2", QFileInfo(QApplication::applicationFilePath()).fileName());\r
 \r
-       settings.setValue("pos", pos());\r
-       settings.setValue("size", size());\r
-       settings.setValue("mode_resize" ,param_flags);\r
-       settings.setValue("mode_gui", param_gui_mode);\r
+    settings.setValue("pos", pos());\r
+    settings.setValue("size", size());\r
+    settings.setValue("mode_resize" ,param_flags);\r
+    settings.setValue("mode_gui", param_gui_mode);\r
 \r
     myView->writeSettings(settings);\r
 \r
-       icvSaveTrackbars(&settings);\r
+    icvSaveTrackbars(&settings);\r
 \r
-       if (global_control_panel)\r
-       {\r
-               icvSaveControlPanel();\r
-               settings.setValue("posPanel", global_control_panel->pos());\r
-       }\r
+    if (global_control_panel)\r
+    {\r
+        icvSaveControlPanel();\r
+        settings.setValue("posPanel", global_control_panel->pos());\r
+    }\r
 }\r
 \r
 \r
@@ -1648,30 +1648,30 @@ void CvWindow::writeSettings()
 //TODO: load CV_GUI flag (done) and act accordingly (create win property if needed and attach trackbars)\r
 void CvWindow::readSettings()\r
 {\r
-       //organisation and application's name\r
-       QSettings settings("OpenCV2", QFileInfo(QApplication::applicationFilePath()).fileName());\r
+    //organisation and application's name\r
+    QSettings settings("OpenCV2", QFileInfo(QApplication::applicationFilePath()).fileName());\r
 \r
-       QPoint pos = settings.value("pos", QPoint(200, 200)).toPoint();\r
-       QSize size = settings.value("size", QSize(400, 400)).toSize();\r
+    QPoint pos = settings.value("pos", QPoint(200, 200)).toPoint();\r
+    QSize size = settings.value("size", QSize(400, 400)).toSize();\r
 \r
-       param_flags = settings.value("mode_resize", param_flags).toInt();\r
-       param_gui_mode = settings.value("mode_gui", param_gui_mode).toInt();\r
+    param_flags = settings.value("mode_resize", param_flags).toInt();\r
+    param_gui_mode = settings.value("mode_gui", param_gui_mode).toInt();\r
 \r
-       param_flags = settings.value("mode_resize", param_flags).toInt();\r
+    param_flags = settings.value("mode_resize", param_flags).toInt();\r
 \r
-       myView->readSettings(settings);\r
+    myView->readSettings(settings);\r
 \r
-       //trackbar here\r
-       icvLoadTrackbars(&settings);\r
+    //trackbar here\r
+    icvLoadTrackbars(&settings);\r
 \r
-       resize(size);\r
-       move(pos);\r
+    resize(size);\r
+    move(pos);\r
 \r
-       if (global_control_panel)\r
-       {\r
-               icvLoadControlPanel();\r
-               global_control_panel->move(settings.value("posPanel", global_control_panel->pos()).toPoint());\r
-       }\r
+    if (global_control_panel)\r
+    {\r
+        icvLoadControlPanel();\r
+        global_control_panel->move(settings.value("posPanel", global_control_panel->pos()).toPoint());\r
+    }\r
 }\r
 \r
 \r
@@ -1687,30 +1687,30 @@ void CvWindow::setRatio(int flags)
 }\r
 \r
 \r
-int CvWindow::getPropWindow() \r
-{ \r
-    return param_flags; \r
+int CvWindow::getPropWindow()\r
+{\r
+    return param_flags;\r
 }\r
 \r
 \r
 void CvWindow::setPropWindow(int flags)\r
 {\r
     if (param_flags == flags) //nothing to do\r
-           return;\r
+        return;\r
 \r
     switch(flags)\r
     {\r
     case CV_WINDOW_NORMAL:\r
-           myGlobalLayout->setSizeConstraint(QLayout::SetMinAndMaxSize);\r
-           param_flags = flags;\r
+        myGlobalLayout->setSizeConstraint(QLayout::SetMinAndMaxSize);\r
+        param_flags = flags;\r
 \r
-           break;\r
+        break;\r
 \r
     case CV_WINDOW_AUTOSIZE:\r
-           myGlobalLayout->setSizeConstraint(QLayout::SetFixedSize);\r
-           param_flags = flags;\r
+        myGlobalLayout->setSizeConstraint(QLayout::SetFixedSize);\r
+        param_flags = flags;\r
 \r
-           break;\r
+        break;\r
 \r
     default:\r
         ;\r
@@ -1722,36 +1722,36 @@ void CvWindow::toggleFullScreen(int flags)
 {\r
     if (isFullScreen() && flags == CV_WINDOW_NORMAL)\r
     {\r
-           showTools();\r
-           showNormal();\r
-           return;\r
+        showTools();\r
+        showNormal();\r
+        return;\r
     }\r
 \r
     if (!isFullScreen() && flags == CV_WINDOW_FULLSCREEN)\r
     {\r
-           hideTools();\r
-           showFullScreen();\r
-           return;\r
+        hideTools();\r
+        showFullScreen();\r
+        return;\r
     }\r
 }\r
 \r
 \r
 void CvWindow::updateImage(void* arr)\r
 {\r
-       myView->updateImage(arr);\r
+    myView->updateImage(arr);\r
 }\r
 \r
 \r
 void CvWindow::displayInfo(QString text, int delayms)\r
 {\r
-       myView->startDisplayInfo(text, delayms);\r
+    myView->startDisplayInfo(text, delayms);\r
 }\r
 \r
 \r
 void CvWindow::displayStatusBar(QString text, int delayms)\r
 {\r
     if (myStatusBar)\r
-           myStatusBar->showMessage(text, delayms);\r
+        myStatusBar->showMessage(text, delayms);\r
 }\r
 \r
 \r
@@ -1763,74 +1763,74 @@ void CvWindow::enablePropertiesButton()
 \r
 CvButtonbar* CvWindow::createButtonBar(QString name_bar)\r
 {\r
-       QPointer<CvButtonbar> t = new CvButtonbar(global_control_panel, name_bar);\r
-       t->setAlignment(Qt::AlignHCenter);\r
+    QPointer<CvButtonbar> t = new CvButtonbar(global_control_panel, name_bar);\r
+    t->setAlignment(Qt::AlignHCenter);\r
 \r
-       QPointer<QBoxLayout> myLayout = global_control_panel->myLayout;\r
+    QPointer<QBoxLayout> myLayout = global_control_panel->myLayout;\r
 \r
-       myLayout->insertLayout(myLayout->count(), t);\r
+    myLayout->insertLayout(myLayout->count(), t);\r
 \r
-       return t;\r
+    return t;\r
 }\r
 \r
 \r
 void CvWindow::addSlider(CvWindow* w, QString name, int* value, int count, CvTrackbarCallback on_change)\r
 {\r
-       QPointer<CvTrackbar> t = new CvTrackbar(w, name, value, count, on_change);\r
-       t->setAlignment(Qt::AlignHCenter);\r
+    QPointer<CvTrackbar> t = new CvTrackbar(w, name, value, count, on_change);\r
+    t->setAlignment(Qt::AlignHCenter);\r
 \r
-       QPointer<QBoxLayout> myLayout;\r
+    QPointer<QBoxLayout> myLayout;\r
 \r
-       if (w)\r
-       {\r
-               myLayout = w->myBarLayout;\r
-       }\r
-       else\r
-       {\r
-               myLayout = global_control_panel->myLayout;\r
+    if (w)\r
+    {\r
+        myLayout = w->myBarLayout;\r
+    }\r
+    else\r
+    {\r
+        myLayout = global_control_panel->myLayout;\r
 \r
-               //if first one, enable control panel\r
-               if (myLayout->count() == 0)\r
-                       guiMainThread->enablePropertiesButtonEachWindow();\r
-       }\r
+        //if first one, enable control panel\r
+        if (myLayout->count() == 0)\r
+            guiMainThread->enablePropertiesButtonEachWindow();\r
+    }\r
 \r
-       myLayout->insertLayout(myLayout->count(), t);\r
+    myLayout->insertLayout(myLayout->count(), t);\r
 }\r
 \r
 \r
 void CvWindow::addSlider2(CvWindow* w, QString name, int* value, int count, CvTrackbarCallback2 on_change, void* userdata)\r
 {\r
-       QPointer<CvTrackbar> t = new CvTrackbar(w, name, value, count, on_change, userdata);\r
-       t->setAlignment(Qt::AlignHCenter);\r
+    QPointer<CvTrackbar> t = new CvTrackbar(w, name, value, count, on_change, userdata);\r
+    t->setAlignment(Qt::AlignHCenter);\r
 \r
-       QPointer<QBoxLayout> myLayout;\r
+    QPointer<QBoxLayout> myLayout;\r
 \r
-       if (w)\r
-       {\r
-               myLayout = w->myBarLayout;\r
-       }\r
-       else\r
-       {\r
-               myLayout = global_control_panel->myLayout;\r
+    if (w)\r
+    {\r
+        myLayout = w->myBarLayout;\r
+    }\r
+    else\r
+    {\r
+        myLayout = global_control_panel->myLayout;\r
 \r
-               //if first one, enable control panel\r
-               if (myLayout->count() == 0)\r
-                       guiMainThread->enablePropertiesButtonEachWindow();\r
-       }\r
+        //if first one, enable control panel\r
+        if (myLayout->count() == 0)\r
+            guiMainThread->enablePropertiesButtonEachWindow();\r
+    }\r
 \r
-       myLayout->insertLayout(myLayout->count(), t);\r
+    myLayout->insertLayout(myLayout->count(), t);\r
 }\r
 \r
 \r
 void CvWindow::setOpenGlDrawCallback(CvOpenGlDrawCallback callback, void* userdata)\r
 {\r
-       myView->setOpenGlDrawCallback(callback, userdata);\r
+    myView->setOpenGlDrawCallback(callback, userdata);\r
 }\r
 \r
 \r
 void CvWindow::setOpenGlCleanCallback(CvOpenGlCleanCallback callback, void* userdata)\r
 {\r
-       myView->setOpenGlCleanCallback(callback, userdata);\r
+    myView->setOpenGlCleanCallback(callback, userdata);\r
 }\r
 \r
 \r
@@ -1861,27 +1861,27 @@ void CvWindow::setViewportSize(QSize size)
 \r
 void CvWindow::createBarLayout()\r
 {\r
-       myBarLayout = new QBoxLayout(QBoxLayout::TopToBottom);\r
-       myBarLayout->setObjectName(QString::fromUtf8("barLayout"));\r
-       myBarLayout->setContentsMargins(0, 0, 0, 0);\r
-       myBarLayout->setSpacing(0);\r
-       myBarLayout->setMargin(0);\r
+    myBarLayout = new QBoxLayout(QBoxLayout::TopToBottom);\r
+    myBarLayout->setObjectName(QString::fromUtf8("barLayout"));\r
+    myBarLayout->setContentsMargins(0, 0, 0, 0);\r
+    myBarLayout->setSpacing(0);\r
+    myBarLayout->setMargin(0);\r
 }\r
 \r
 \r
 void CvWindow::createGlobalLayout()\r
 {\r
-       myGlobalLayout = new QBoxLayout(QBoxLayout::TopToBottom);\r
-       myGlobalLayout->setObjectName(QString::fromUtf8("boxLayout"));\r
-       myGlobalLayout->setContentsMargins(0, 0, 0, 0);\r
-       myGlobalLayout->setSpacing(0);\r
-       myGlobalLayout->setMargin(0);\r
-       setMinimumSize(1, 1);\r
+    myGlobalLayout = new QBoxLayout(QBoxLayout::TopToBottom);\r
+    myGlobalLayout->setObjectName(QString::fromUtf8("boxLayout"));\r
+    myGlobalLayout->setContentsMargins(0, 0, 0, 0);\r
+    myGlobalLayout->setSpacing(0);\r
+    myGlobalLayout->setMargin(0);\r
+    setMinimumSize(1, 1);\r
 \r
-       if (param_flags == CV_WINDOW_AUTOSIZE)\r
-               myGlobalLayout->setSizeConstraint(QLayout::SetFixedSize);\r
-       else if (param_flags == CV_WINDOW_NORMAL)\r
-               myGlobalLayout->setSizeConstraint(QLayout::SetMinAndMaxSize);\r
+    if (param_flags == CV_WINDOW_AUTOSIZE)\r
+        myGlobalLayout->setSizeConstraint(QLayout::SetFixedSize);\r
+    else if (param_flags == CV_WINDOW_NORMAL)\r
+        myGlobalLayout->setSizeConstraint(QLayout::SetMinAndMaxSize);\r
 }\r
 \r
 \r
@@ -1889,180 +1889,180 @@ void CvWindow::createView()
 {\r
 #ifdef HAVE_QT_OPENGL\r
     if (isOpenGl())\r
-           myView = new OpenGlViewPort(this);\r
+        myView = new OpenGlViewPort(this);\r
     else\r
 #endif\r
-           myView = new DefaultViewPort(this, param_ratio_mode);\r
+        myView = new DefaultViewPort(this, param_ratio_mode);\r
 }\r
 \r
 \r
 void CvWindow::createActions()\r
 {\r
-       vect_QActions.resize(10);\r
+    vect_QActions.resize(10);\r
 \r
     QWidget* view = myView->getWidget();\r
 \r
-       //if the shortcuts are changed in window_QT.h, we need to update the tooltip manually\r
-       vect_QActions[0] = new QAction(QIcon(":/left-icon"), "Panning left (CTRL+arrowLEFT)", this);\r
-       vect_QActions[0]->setIconVisibleInMenu(true);\r
-       QObject::connect(vect_QActions[0], SIGNAL(triggered()), view, SLOT(siftWindowOnLeft()));\r
+    //if the shortcuts are changed in window_QT.h, we need to update the tooltip manually\r
+    vect_QActions[0] = new QAction(QIcon(":/left-icon"), "Panning left (CTRL+arrowLEFT)", this);\r
+    vect_QActions[0]->setIconVisibleInMenu(true);\r
+    QObject::connect(vect_QActions[0], SIGNAL(triggered()), view, SLOT(siftWindowOnLeft()));\r
 \r
-       vect_QActions[1] = new QAction(QIcon(":/right-icon"), "Panning right (CTRL+arrowRIGHT)", this);\r
-       vect_QActions[1]->setIconVisibleInMenu(true);\r
-       QObject::connect(vect_QActions[1], SIGNAL(triggered()), view, SLOT(siftWindowOnRight()));\r
+    vect_QActions[1] = new QAction(QIcon(":/right-icon"), "Panning right (CTRL+arrowRIGHT)", this);\r
+    vect_QActions[1]->setIconVisibleInMenu(true);\r
+    QObject::connect(vect_QActions[1], SIGNAL(triggered()), view, SLOT(siftWindowOnRight()));\r
 \r
-       vect_QActions[2] = new QAction(QIcon(":/up-icon"), "Panning up (CTRL+arrowUP)", this);\r
-       vect_QActions[2]->setIconVisibleInMenu(true);\r
-       QObject::connect(vect_QActions[2], SIGNAL(triggered()), view, SLOT(siftWindowOnUp()));\r
+    vect_QActions[2] = new QAction(QIcon(":/up-icon"), "Panning up (CTRL+arrowUP)", this);\r
+    vect_QActions[2]->setIconVisibleInMenu(true);\r
+    QObject::connect(vect_QActions[2], SIGNAL(triggered()), view, SLOT(siftWindowOnUp()));\r
 \r
-       vect_QActions[3] = new QAction(QIcon(":/down-icon"), "Panning down (CTRL+arrowDOWN)", this);\r
-       vect_QActions[3]->setIconVisibleInMenu(true);\r
-       QObject::connect(vect_QActions[3], SIGNAL(triggered()), view, SLOT(siftWindowOnDown()) );\r
+    vect_QActions[3] = new QAction(QIcon(":/down-icon"), "Panning down (CTRL+arrowDOWN)", this);\r
+    vect_QActions[3]->setIconVisibleInMenu(true);\r
+    QObject::connect(vect_QActions[3], SIGNAL(triggered()), view, SLOT(siftWindowOnDown()) );\r
 \r
-       vect_QActions[4] = new QAction(QIcon(":/zoom_x1-icon"), "Zoom x1 (CTRL+P)", this);\r
-       vect_QActions[4]->setIconVisibleInMenu(true);\r
-       QObject::connect(vect_QActions[4], SIGNAL(triggered()), view, SLOT(resetZoom()));\r
+    vect_QActions[4] = new QAction(QIcon(":/zoom_x1-icon"), "Zoom x1 (CTRL+P)", this);\r
+    vect_QActions[4]->setIconVisibleInMenu(true);\r
+    QObject::connect(vect_QActions[4], SIGNAL(triggered()), view, SLOT(resetZoom()));\r
 \r
-       vect_QActions[5] = new QAction(QIcon(":/imgRegion-icon"), tr("Zoom x%1 (see label) (CTRL+X)").arg(threshold_zoom_img_region), this);\r
-       vect_QActions[5]->setIconVisibleInMenu(true);\r
-       QObject::connect(vect_QActions[5], SIGNAL(triggered()), view, SLOT(imgRegion()));\r
+    vect_QActions[5] = new QAction(QIcon(":/imgRegion-icon"), tr("Zoom x%1 (see label) (CTRL+X)").arg(threshold_zoom_img_region), this);\r
+    vect_QActions[5]->setIconVisibleInMenu(true);\r
+    QObject::connect(vect_QActions[5], SIGNAL(triggered()), view, SLOT(imgRegion()));\r
 \r
-       vect_QActions[6] = new QAction(QIcon(":/zoom_in-icon"), "Zoom in (CTRL++)", this);\r
-       vect_QActions[6]->setIconVisibleInMenu(true);\r
-       QObject::connect(vect_QActions[6], SIGNAL(triggered()), view, SLOT(ZoomIn()));\r
+    vect_QActions[6] = new QAction(QIcon(":/zoom_in-icon"), "Zoom in (CTRL++)", this);\r
+    vect_QActions[6]->setIconVisibleInMenu(true);\r
+    QObject::connect(vect_QActions[6], SIGNAL(triggered()), view, SLOT(ZoomIn()));\r
 \r
-       vect_QActions[7] = new QAction(QIcon(":/zoom_out-icon"), "Zoom out (CTRL+-)", this);\r
-       vect_QActions[7]->setIconVisibleInMenu(true);\r
-       QObject::connect(vect_QActions[7], SIGNAL(triggered()), view, SLOT(ZoomOut()));\r
+    vect_QActions[7] = new QAction(QIcon(":/zoom_out-icon"), "Zoom out (CTRL+-)", this);\r
+    vect_QActions[7]->setIconVisibleInMenu(true);\r
+    QObject::connect(vect_QActions[7], SIGNAL(triggered()), view, SLOT(ZoomOut()));\r
 \r
-       vect_QActions[8] = new QAction(QIcon(":/save-icon"), "Save current image (CTRL+S)", this);\r
-       vect_QActions[8]->setIconVisibleInMenu(true);\r
-       QObject::connect(vect_QActions[8], SIGNAL(triggered()), view, SLOT(saveView()));\r
+    vect_QActions[8] = new QAction(QIcon(":/save-icon"), "Save current image (CTRL+S)", this);\r
+    vect_QActions[8]->setIconVisibleInMenu(true);\r
+    QObject::connect(vect_QActions[8], SIGNAL(triggered()), view, SLOT(saveView()));\r
 \r
-       vect_QActions[9] = new QAction(QIcon(":/properties-icon"), "Display properties window (CTRL+P)", this);\r
-       vect_QActions[9]->setIconVisibleInMenu(true);\r
-       QObject::connect(vect_QActions[9], SIGNAL(triggered()), this, SLOT(displayPropertiesWin()));\r
+    vect_QActions[9] = new QAction(QIcon(":/properties-icon"), "Display properties window (CTRL+P)", this);\r
+    vect_QActions[9]->setIconVisibleInMenu(true);\r
+    QObject::connect(vect_QActions[9], SIGNAL(triggered()), this, SLOT(displayPropertiesWin()));\r
 \r
-       if (global_control_panel->myLayout->count() == 0)\r
-               vect_QActions[9]->setDisabled(true);\r
+    if (global_control_panel->myLayout->count() == 0)\r
+        vect_QActions[9]->setDisabled(true);\r
 }\r
 \r
 \r
 void CvWindow::createShortcuts()\r
 {\r
-       vect_QShortcuts.resize(10);\r
+    vect_QShortcuts.resize(10);\r
 \r
     QWidget* view = myView->getWidget();\r
 \r
-       vect_QShortcuts[0] = new QShortcut(shortcut_panning_left, this);\r
-       QObject::connect(vect_QShortcuts[0], SIGNAL(activated()), view, SLOT(siftWindowOnLeft()));\r
+    vect_QShortcuts[0] = new QShortcut(shortcut_panning_left, this);\r
+    QObject::connect(vect_QShortcuts[0], SIGNAL(activated()), view, SLOT(siftWindowOnLeft()));\r
 \r
-       vect_QShortcuts[1] = new QShortcut(shortcut_panning_right, this);\r
-       QObject::connect(vect_QShortcuts[1], SIGNAL(activated()), view, SLOT(siftWindowOnRight()));\r
+    vect_QShortcuts[1] = new QShortcut(shortcut_panning_right, this);\r
+    QObject::connect(vect_QShortcuts[1], SIGNAL(activated()), view, SLOT(siftWindowOnRight()));\r
 \r
-       vect_QShortcuts[2] = new QShortcut(shortcut_panning_up, this);\r
-       QObject::connect(vect_QShortcuts[2], SIGNAL(activated()), view, SLOT(siftWindowOnUp()));\r
+    vect_QShortcuts[2] = new QShortcut(shortcut_panning_up, this);\r
+    QObject::connect(vect_QShortcuts[2], SIGNAL(activated()), view, SLOT(siftWindowOnUp()));\r
 \r
-       vect_QShortcuts[3] = new QShortcut(shortcut_panning_down, this);\r
-       QObject::connect(vect_QShortcuts[3], SIGNAL(activated()), view, SLOT(siftWindowOnDown()));\r
+    vect_QShortcuts[3] = new QShortcut(shortcut_panning_down, this);\r
+    QObject::connect(vect_QShortcuts[3], SIGNAL(activated()), view, SLOT(siftWindowOnDown()));\r
 \r
-       vect_QShortcuts[4] = new QShortcut(shortcut_zoom_normal, this);\r
-       QObject::connect(vect_QShortcuts[4], SIGNAL(activated()), view, SLOT(resetZoom()));\r
+    vect_QShortcuts[4] = new QShortcut(shortcut_zoom_normal, this);\r
+    QObject::connect(vect_QShortcuts[4], SIGNAL(activated()), view, SLOT(resetZoom()));\r
 \r
-       vect_QShortcuts[5] = new QShortcut(shortcut_zoom_imgRegion, this);\r
-       QObject::connect(vect_QShortcuts[5], SIGNAL(activated()), view, SLOT(imgRegion()));\r
+    vect_QShortcuts[5] = new QShortcut(shortcut_zoom_imgRegion, this);\r
+    QObject::connect(vect_QShortcuts[5], SIGNAL(activated()), view, SLOT(imgRegion()));\r
 \r
-       vect_QShortcuts[6] = new QShortcut(shortcut_zoom_in, this);\r
-       QObject::connect(vect_QShortcuts[6], SIGNAL(activated()), view, SLOT(ZoomIn()));\r
+    vect_QShortcuts[6] = new QShortcut(shortcut_zoom_in, this);\r
+    QObject::connect(vect_QShortcuts[6], SIGNAL(activated()), view, SLOT(ZoomIn()));\r
 \r
-       vect_QShortcuts[7] = new QShortcut(shortcut_zoom_out, this);\r
-       QObject::connect(vect_QShortcuts[7], SIGNAL(activated()), view, SLOT(ZoomOut()));\r
+    vect_QShortcuts[7] = new QShortcut(shortcut_zoom_out, this);\r
+    QObject::connect(vect_QShortcuts[7], SIGNAL(activated()), view, SLOT(ZoomOut()));\r
 \r
-       vect_QShortcuts[8] = new QShortcut(shortcut_save_img, this);\r
-       QObject::connect(vect_QShortcuts[8], SIGNAL(activated()), view, SLOT(saveView()));\r
+    vect_QShortcuts[8] = new QShortcut(shortcut_save_img, this);\r
+    QObject::connect(vect_QShortcuts[8], SIGNAL(activated()), view, SLOT(saveView()));\r
 \r
-       vect_QShortcuts[9] = new QShortcut(shortcut_properties_win, this);\r
-       QObject::connect(vect_QShortcuts[9], SIGNAL(activated()), this, SLOT(displayPropertiesWin()));\r
+    vect_QShortcuts[9] = new QShortcut(shortcut_properties_win, this);\r
+    QObject::connect(vect_QShortcuts[9], SIGNAL(activated()), this, SLOT(displayPropertiesWin()));\r
 }\r
 \r
 \r
 void CvWindow::createToolBar()\r
 {\r
-       myToolBar = new QToolBar(this);\r
-       myToolBar->setFloatable(false); //is not a window\r
-       myToolBar->setFixedHeight(28);\r
-       myToolBar->setMinimumWidth(1);\r
+    myToolBar = new QToolBar(this);\r
+    myToolBar->setFloatable(false); //is not a window\r
+    myToolBar->setFixedHeight(28);\r
+    myToolBar->setMinimumWidth(1);\r
 \r
-       foreach (QAction *a, vect_QActions)\r
-               myToolBar->addAction(a);\r
+    foreach (QAction *a, vect_QActions)\r
+        myToolBar->addAction(a);\r
 }\r
 \r
 \r
 void CvWindow::createStatusBar()\r
 {\r
-       myStatusBar = new QStatusBar(this);\r
-       myStatusBar->setSizeGripEnabled(false);\r
-       myStatusBar->setFixedHeight(20);\r
-       myStatusBar->setMinimumWidth(1);\r
-       myStatusBar_msg = new QLabel;\r
+    myStatusBar = new QStatusBar(this);\r
+    myStatusBar->setSizeGripEnabled(false);\r
+    myStatusBar->setFixedHeight(20);\r
+    myStatusBar->setMinimumWidth(1);\r
+    myStatusBar_msg = new QLabel;\r
 \r
-       //I comment this because if we change the style, myview (the picture)\r
-       //will not be the correct size anymore (will lost 2 pixel because of the borders)\r
+    //I comment this because if we change the style, myview (the picture)\r
+    //will not be the correct size anymore (will lost 2 pixel because of the borders)\r
 \r
-       //myStatusBar_msg->setFrameStyle(QFrame::Raised);\r
+    //myStatusBar_msg->setFrameStyle(QFrame::Raised);\r
 \r
-       myStatusBar_msg->setAlignment(Qt::AlignHCenter);\r
-       myStatusBar->addWidget(myStatusBar_msg);\r
+    myStatusBar_msg->setAlignment(Qt::AlignHCenter);\r
+    myStatusBar->addWidget(myStatusBar_msg);\r
 }\r
 \r
 \r
 void CvWindow::hideTools()\r
 {\r
-       if (myToolBar)\r
-               myToolBar->hide();\r
+    if (myToolBar)\r
+        myToolBar->hide();\r
 \r
-       if (myStatusBar)\r
-               myStatusBar->hide();\r
+    if (myStatusBar)\r
+        myStatusBar->hide();\r
 \r
-       if (global_control_panel)\r
-               global_control_panel->hide();\r
+    if (global_control_panel)\r
+        global_control_panel->hide();\r
 }\r
 \r
 \r
 void CvWindow::showTools()\r
 {\r
-       if (myToolBar)\r
-               myToolBar->show();\r
+    if (myToolBar)\r
+        myToolBar->show();\r
 \r
-       if (myStatusBar)\r
-               myStatusBar->show();\r
+    if (myStatusBar)\r
+        myStatusBar->show();\r
 }\r
 \r
 \r
 CvWinProperties* CvWindow::createParameterWindow()\r
 {\r
-       QString name_paraWindow = QFileInfo(QApplication::applicationFilePath()).fileName() + " settings";\r
+    QString name_paraWindow = QFileInfo(QApplication::applicationFilePath()).fileName() + " settings";\r
 \r
-       CvWinProperties* result = new CvWinProperties(name_paraWindow, guiMainThread);\r
+    CvWinProperties* result = new CvWinProperties(name_paraWindow, guiMainThread);\r
 \r
-       return result;\r
+    return result;\r
 }\r
 \r
 \r
 void CvWindow::displayPropertiesWin()\r
 {\r
-       if (global_control_panel->isHidden())\r
-               global_control_panel->show();\r
-       else\r
-               global_control_panel->hide();\r
+    if (global_control_panel->isHidden())\r
+        global_control_panel->show();\r
+    else\r
+        global_control_panel->hide();\r
 }\r
 \r
 \r
 //Need more test here !\r
 void CvWindow::keyPressEvent(QKeyEvent *event)\r
 {\r
-       //see http://doc.trolltech.com/4.6/qt.html#Key-enum\r
-       int key = event->key();\r
+    //see http://doc.trolltech.com/4.6/qt.html#Key-enum\r
+    int key = event->key();\r
 \r
         Qt::Key qtkey = static_cast<Qt::Key>(key);\r
         char asciiCode = QTest::keyToAscii(qtkey);\r
@@ -2071,187 +2071,187 @@ void CvWindow::keyPressEvent(QKeyEvent *event)
         else\r
             key = event->nativeVirtualKey(); //same codes as returned by GTK-based backend\r
 \r
-       //control plus (Z, +, -, up, down, left, right) are used for zoom/panning functions\r
+    //control plus (Z, +, -, up, down, left, right) are used for zoom/panning functions\r
         if (event->modifiers() != Qt::ControlModifier)\r
         {\r
-               mutexKey.lock();\r
-               last_key = key;\r
-               mutexKey.unlock();\r
-               key_pressed.wakeAll();\r
-               //event->accept();\r
-       }\r
+        mutexKey.lock();\r
+        last_key = key;\r
+        mutexKey.unlock();\r
+        key_pressed.wakeAll();\r
+        //event->accept();\r
+    }\r
 \r
-       QWidget::keyPressEvent(event);\r
+    QWidget::keyPressEvent(event);\r
 }\r
 \r
 \r
 void CvWindow::icvLoadControlPanel()\r
 {\r
-       QSettings settings("OpenCV2", QFileInfo(QApplication::applicationFilePath()).fileName() + " control panel");\r
-       \r
+    QSettings settings("OpenCV2", QFileInfo(QApplication::applicationFilePath()).fileName() + " control panel");\r
+\r
     int size = settings.beginReadArray("bars");\r
 \r
-       if (size == global_control_panel->myLayout->layout()->count())\r
+    if (size == global_control_panel->myLayout->layout()->count())\r
     {\r
-               for (int i = 0; i < size; ++i) \r
+        for (int i = 0; i < size; ++i)\r
         {\r
-                       CvBar* t = (CvBar*) global_control_panel->myLayout->layout()->itemAt(i);\r
-                       settings.setArrayIndex(i);\r
-                       if (t->type == type_CvTrackbar)\r
-                       {\r
-                               if (t->name_bar == settings.value("namebar").toString())\r
-                               {\r
-                                       ((CvTrackbar*)t)->slider->setValue(settings.value("valuebar").toInt());\r
-                               }\r
-                       }\r
-                       if (t->type == type_CvButtonbar)\r
-                       {\r
-                               int subsize = settings.beginReadArray(QString("buttonbar")+i);\r
+            CvBar* t = (CvBar*) global_control_panel->myLayout->layout()->itemAt(i);\r
+            settings.setArrayIndex(i);\r
+            if (t->type == type_CvTrackbar)\r
+            {\r
+                if (t->name_bar == settings.value("namebar").toString())\r
+                {\r
+                    ((CvTrackbar*)t)->slider->setValue(settings.value("valuebar").toInt());\r
+                }\r
+            }\r
+            if (t->type == type_CvButtonbar)\r
+            {\r
+                int subsize = settings.beginReadArray(QString("buttonbar")+i);\r
+\r
+                if ( subsize == ((CvButtonbar*)t)->layout()->count() )\r
+                    icvLoadButtonbar((CvButtonbar*)t,&settings);\r
 \r
-                               if ( subsize == ((CvButtonbar*)t)->layout()->count() )\r
-                                       icvLoadButtonbar((CvButtonbar*)t,&settings);\r
-                               \r
-                               settings.endArray();\r
-                       }\r
-               }\r
+                settings.endArray();\r
+            }\r
+        }\r
     }\r
 \r
-       settings.endArray();\r
+    settings.endArray();\r
 }\r
 \r
 \r
 void CvWindow::icvSaveControlPanel()\r
 {\r
-       QSettings settings("OpenCV2", QFileInfo(QApplication::applicationFilePath()).fileName()+" control panel");\r
+    QSettings settings("OpenCV2", QFileInfo(QApplication::applicationFilePath()).fileName()+" control panel");\r
 \r
-       settings.beginWriteArray("bars");\r
+    settings.beginWriteArray("bars");\r
 \r
-       for (int i = 0; i < global_control_panel->myLayout->layout()->count(); ++i) \r
+    for (int i = 0; i < global_control_panel->myLayout->layout()->count(); ++i)\r
     {\r
-               CvBar* t = (CvBar*) global_control_panel->myLayout->layout()->itemAt(i);\r
-               settings.setArrayIndex(i);\r
-               if (t->type == type_CvTrackbar)\r
-               {\r
-                       settings.setValue("namebar", QString(t->name_bar));\r
-                       settings.setValue("valuebar",((CvTrackbar*)t)->slider->value());\r
-               }\r
-               if (t->type == type_CvButtonbar)\r
-               {\r
-                       settings.beginWriteArray(QString("buttonbar")+i);\r
-                       icvSaveButtonbar((CvButtonbar*)t,&settings);\r
-                       settings.endArray();\r
-               }\r
-       }\r
+        CvBar* t = (CvBar*) global_control_panel->myLayout->layout()->itemAt(i);\r
+        settings.setArrayIndex(i);\r
+        if (t->type == type_CvTrackbar)\r
+        {\r
+            settings.setValue("namebar", QString(t->name_bar));\r
+            settings.setValue("valuebar",((CvTrackbar*)t)->slider->value());\r
+        }\r
+        if (t->type == type_CvButtonbar)\r
+        {\r
+            settings.beginWriteArray(QString("buttonbar")+i);\r
+            icvSaveButtonbar((CvButtonbar*)t,&settings);\r
+            settings.endArray();\r
+        }\r
+    }\r
 \r
-       settings.endArray();\r
+    settings.endArray();\r
 }\r
 \r
 \r
 void CvWindow::icvSaveButtonbar(CvButtonbar* b, QSettings* settings)\r
 {\r
-       for (int i = 0, count = b->layout()->count(); i < count; ++i) \r
+    for (int i = 0, count = b->layout()->count(); i < count; ++i)\r
     {\r
-               settings->setArrayIndex(i);\r
+        settings->setArrayIndex(i);\r
 \r
-               QWidget* temp = (QWidget*) b->layout()->itemAt(i)->widget();\r
+        QWidget* temp = (QWidget*) b->layout()->itemAt(i)->widget();\r
         QString myclass(QLatin1String(temp->metaObject()->className()));\r
 \r
-               if (myclass == "CvPushButton")\r
-               {\r
-                       CvPushButton* button = (CvPushButton*) temp;\r
-                       settings->setValue("namebutton", button->text());\r
-                       settings->setValue("valuebutton", int(button->isChecked()));\r
-               }\r
-               else if (myclass == "CvCheckBox")\r
-               {\r
-                       CvCheckBox* button = (CvCheckBox*) temp;\r
-                       settings->setValue("namebutton", button->text());\r
-                       settings->setValue("valuebutton", int(button->isChecked()));\r
-               }\r
-               else if (myclass == "CvRadioButton")\r
-               {\r
-                       CvRadioButton* button = (CvRadioButton*) temp;\r
-                       settings->setValue("namebutton", button->text());\r
-                       settings->setValue("valuebutton", int(button->isChecked()));\r
-               }\r
-       }\r
+        if (myclass == "CvPushButton")\r
+        {\r
+            CvPushButton* button = (CvPushButton*) temp;\r
+            settings->setValue("namebutton", button->text());\r
+            settings->setValue("valuebutton", int(button->isChecked()));\r
+        }\r
+        else if (myclass == "CvCheckBox")\r
+        {\r
+            CvCheckBox* button = (CvCheckBox*) temp;\r
+            settings->setValue("namebutton", button->text());\r
+            settings->setValue("valuebutton", int(button->isChecked()));\r
+        }\r
+        else if (myclass == "CvRadioButton")\r
+        {\r
+            CvRadioButton* button = (CvRadioButton*) temp;\r
+            settings->setValue("namebutton", button->text());\r
+            settings->setValue("valuebutton", int(button->isChecked()));\r
+        }\r
+    }\r
 }\r
 \r
 \r
 void CvWindow::icvLoadButtonbar(CvButtonbar* b, QSettings* settings)\r
 {\r
-       for (int i = 0, count = b->layout()->count(); i < count; ++i)\r
-       {\r
-               settings->setArrayIndex(i);\r
+    for (int i = 0, count = b->layout()->count(); i < count; ++i)\r
+    {\r
+        settings->setArrayIndex(i);\r
 \r
-               QWidget* temp = (QWidget*) b->layout()->itemAt(i)->widget();\r
-               QString myclass(QLatin1String(temp->metaObject()->className()));\r
+        QWidget* temp = (QWidget*) b->layout()->itemAt(i)->widget();\r
+        QString myclass(QLatin1String(temp->metaObject()->className()));\r
 \r
-               if (myclass == "CvPushButton")\r
-               {\r
-                       CvPushButton* button = (CvPushButton*) temp;\r
+        if (myclass == "CvPushButton")\r
+        {\r
+            CvPushButton* button = (CvPushButton*) temp;\r
 \r
-                       if (button->text() == settings->value("namebutton").toString())\r
-                               button->setChecked(settings->value("valuebutton").toInt());\r
-               }\r
-               else if (myclass == "CvCheckBox")\r
-               {\r
-                       CvCheckBox* button = (CvCheckBox*) temp;\r
+            if (button->text() == settings->value("namebutton").toString())\r
+                button->setChecked(settings->value("valuebutton").toInt());\r
+        }\r
+        else if (myclass == "CvCheckBox")\r
+        {\r
+            CvCheckBox* button = (CvCheckBox*) temp;\r
 \r
-                       if (button->text() == settings->value("namebutton").toString())\r
-                               button->setChecked(settings->value("valuebutton").toInt());\r
-               }\r
-               else if (myclass == "CvRadioButton")\r
-               {\r
-                       CvRadioButton* button = (CvRadioButton*) temp;\r
+            if (button->text() == settings->value("namebutton").toString())\r
+                button->setChecked(settings->value("valuebutton").toInt());\r
+        }\r
+        else if (myclass == "CvRadioButton")\r
+        {\r
+            CvRadioButton* button = (CvRadioButton*) temp;\r
 \r
-                       if (button->text() == settings->value("namebutton").toString())\r
-                               button->setChecked(settings->value("valuebutton").toInt());\r
-               }\r
+            if (button->text() == settings->value("namebutton").toString())\r
+                button->setChecked(settings->value("valuebutton").toInt());\r
+        }\r
 \r
-       }\r
+    }\r
 }\r
 \r
 \r
 void CvWindow::icvLoadTrackbars(QSettings* settings)\r
 {\r
-       int size = settings->beginReadArray("trackbars");\r
+    int size = settings->beginReadArray("trackbars");\r
 \r
-       //trackbar are saved in the same order, so no need to use icvFindTrackbarByName\r
+    //trackbar are saved in the same order, so no need to use icvFindTrackbarByName\r
 \r
-       if (myBarLayout->layout()->count() == size) //if not the same number, the window saved and loaded is not the same (nb trackbar not equal)\r
+    if (myBarLayout->layout()->count() == size) //if not the same number, the window saved and loaded is not the same (nb trackbar not equal)\r
     {\r
-               for (int i = 0; i < size; ++i)\r
-               {\r
-                       settings->setArrayIndex(i);\r
+        for (int i = 0; i < size; ++i)\r
+        {\r
+            settings->setArrayIndex(i);\r
 \r
-                       CvTrackbar* t = (CvTrackbar*) myBarLayout->layout()->itemAt(i);\r
+            CvTrackbar* t = (CvTrackbar*) myBarLayout->layout()->itemAt(i);\r
 \r
-                       if (t->name_bar == settings->value("name").toString())\r
-                               t->slider->setValue(settings->value("value").toInt());\r
+            if (t->name_bar == settings->value("name").toString())\r
+                t->slider->setValue(settings->value("value").toInt());\r
 \r
-               }\r
+        }\r
     }\r
 \r
-       settings->endArray();\r
+    settings->endArray();\r
 }\r
 \r
 \r
 void CvWindow::icvSaveTrackbars(QSettings* settings)\r
 {\r
-       settings->beginWriteArray("trackbars");\r
+    settings->beginWriteArray("trackbars");\r
 \r
-       for (int i = 0; i < myBarLayout->layout()->count(); ++i) \r
+    for (int i = 0; i < myBarLayout->layout()->count(); ++i)\r
     {\r
-               settings->setArrayIndex(i);\r
+        settings->setArrayIndex(i);\r
 \r
-               CvTrackbar* t = (CvTrackbar*) myBarLayout->layout()->itemAt(i);\r
+        CvTrackbar* t = (CvTrackbar*) myBarLayout->layout()->itemAt(i);\r
 \r
-               settings->setValue("name", t->name_bar);\r
-               settings->setValue("value", t->slider->value());\r
-       }\r
+        settings->setValue("name", t->name_bar);\r
+        settings->setValue("value", t->slider->value());\r
+    }\r
 \r
-       settings->endArray();\r
+    settings->endArray();\r
 }\r
 \r
 \r
@@ -2261,44 +2261,44 @@ void CvWindow::icvSaveTrackbars(QSettings* settings)
 \r
 DefaultViewPort::DefaultViewPort(CvWindow* arg, int arg2) : QGraphicsView(arg), image2Draw_mat(0)\r
 {\r
-       centralWidget = arg;\r
+    centralWidget = arg;\r
     param_keepRatio = arg2;\r
 \r
-       setContentsMargins(0, 0, 0, 0);\r
-       setMinimumSize(1, 1);\r
+    setContentsMargins(0, 0, 0, 0);\r
+    setMinimumSize(1, 1);\r
     setAlignment(Qt::AlignHCenter);\r
 \r
-       setObjectName(QString::fromUtf8("graphicsView"));\r
+    setObjectName(QString::fromUtf8("graphicsView"));\r
 \r
-       timerDisplay = new QTimer(this);\r
-       timerDisplay->setSingleShot(true);\r
-       connect(timerDisplay, SIGNAL(timeout()), this, SLOT(stopDisplayInfo()));\r
+    timerDisplay = new QTimer(this);\r
+    timerDisplay->setSingleShot(true);\r
+    connect(timerDisplay, SIGNAL(timeout()), this, SLOT(stopDisplayInfo()));\r
 \r
-       drawInfo = false;\r
-       positionGrabbing = QPointF(0, 0);\r
-       positionCorners = QRect(0, 0, size().width(), size().height());\r
+    drawInfo = false;\r
+    positionGrabbing = QPointF(0, 0);\r
+    positionCorners = QRect(0, 0, size().width(), size().height());\r
 \r
-       on_mouse = 0;\r
+    on_mouse = 0;\r
     on_mouse_param = 0;\r
-       mouseCoordinate = QPoint(-1, -1);\r
+    mouseCoordinate = QPoint(-1, -1);\r
 \r
-       //no border\r
-       setStyleSheet( "QGraphicsView { border-style: none; }" ); \r
+    //no border\r
+    setStyleSheet( "QGraphicsView { border-style: none; }" );\r
 \r
     image2Draw_mat = cvCreateMat(viewport()->height(), viewport()->width(), CV_8UC3);\r
     cvZero(image2Draw_mat);\r
 \r
     nbChannelOriginImage = 0;\r
 \r
-       setInteractive(false);\r
-       setMouseTracking(true); //receive mouse event everytime\r
+    setInteractive(false);\r
+    setMouseTracking(true); //receive mouse event everytime\r
 }\r
 \r
 \r
 DefaultViewPort::~DefaultViewPort()\r
 {\r
-       if (image2Draw_mat)             \r
-               cvReleaseMat(&image2Draw_mat);\r
+    if (image2Draw_mat)\r
+        cvReleaseMat(&image2Draw_mat);\r
 }\r
 \r
 \r
@@ -2310,9 +2310,9 @@ QWidget* DefaultViewPort::getWidget()
 \r
 void DefaultViewPort::setMouseCallBack(CvMouseCallback m, void* param)\r
 {\r
-       on_mouse = m;\r
+    on_mouse = m;\r
 \r
-       on_mouse_param = param;\r
+    on_mouse_param = param;\r
 }\r
 \r
 void DefaultViewPort::writeSettings(QSettings& settings)\r
@@ -2354,63 +2354,63 @@ double DefaultViewPort::getRatio()
 void DefaultViewPort::setRatio(int flags)\r
 {\r
     if (getRatio() == flags) //nothing to do\r
-               return;\r
+        return;\r
 \r
-       //if valid flags\r
-       if (flags == CV_WINDOW_FREERATIO || flags == CV_WINDOW_KEEPRATIO)\r
+    //if valid flags\r
+    if (flags == CV_WINDOW_FREERATIO || flags == CV_WINDOW_KEEPRATIO)\r
     {\r
         centralWidget->param_ratio_mode = flags;\r
-           param_keepRatio = flags;\r
-           updateGeometry();\r
-           viewport()->update();\r
+        param_keepRatio = flags;\r
+        updateGeometry();\r
+        viewport()->update();\r
     }\r
 }\r
 \r
 \r
 void DefaultViewPort::updateImage(const CvArr* arr)\r
 {\r
-       CV_Assert(arr);\r
+    CV_Assert(arr);\r
 \r
-       CvMat* mat, stub;\r
-       int origin = 0;\r
+    CvMat* mat, stub;\r
+    int origin = 0;\r
 \r
-       if (CV_IS_IMAGE_HDR(arr))\r
-               origin = ((IplImage*)arr)->origin;\r
+    if (CV_IS_IMAGE_HDR(arr))\r
+        origin = ((IplImage*)arr)->origin;\r
 \r
-       mat = cvGetMat(arr, &stub);\r
+    mat = cvGetMat(arr, &stub);\r
 \r
-       if (!image2Draw_mat || !CV_ARE_SIZES_EQ(image2Draw_mat, mat))\r
-       {\r
+    if (!image2Draw_mat || !CV_ARE_SIZES_EQ(image2Draw_mat, mat))\r
+    {\r
         if (image2Draw_mat)\r
-                   cvReleaseMat(&image2Draw_mat);\r
+            cvReleaseMat(&image2Draw_mat);\r
 \r
-               //the image in ipl (to do a deep copy with cvCvtColor)\r
-               image2Draw_mat = cvCreateMat(mat->rows, mat->cols, CV_8UC3);\r
-               image2Draw_qt = QImage(image2Draw_mat->data.ptr, image2Draw_mat->cols, image2Draw_mat->rows, image2Draw_mat->step, QImage::Format_RGB888);\r
+        //the image in ipl (to do a deep copy with cvCvtColor)\r
+        image2Draw_mat = cvCreateMat(mat->rows, mat->cols, CV_8UC3);\r
+        image2Draw_qt = QImage(image2Draw_mat->data.ptr, image2Draw_mat->cols, image2Draw_mat->rows, image2Draw_mat->step, QImage::Format_RGB888);\r
 \r
-               //use to compute mouse coordinate, I need to update the ratio here and in resizeEvent\r
-               ratioX = width() / float(image2Draw_mat->cols);\r
-               ratioY = height() / float(image2Draw_mat->rows);\r
+        //use to compute mouse coordinate, I need to update the ratio here and in resizeEvent\r
+        ratioX = width() / float(image2Draw_mat->cols);\r
+        ratioY = height() / float(image2Draw_mat->rows);\r
 \r
-               updateGeometry();\r
-       }\r
+        updateGeometry();\r
+    }\r
 \r
-       nbChannelOriginImage = cvGetElemType(mat);\r
+    nbChannelOriginImage = cvGetElemType(mat);\r
 \r
-       cvConvertImage(mat, image2Draw_mat, (origin != 0 ? CV_CVTIMG_FLIP : 0) + CV_CVTIMG_SWAP_RB);\r
+    cvConvertImage(mat, image2Draw_mat, (origin != 0 ? CV_CVTIMG_FLIP : 0) + CV_CVTIMG_SWAP_RB);\r
 \r
-       viewport()->update();\r
+    viewport()->update();\r
 }\r
 \r
 \r
 void DefaultViewPort::startDisplayInfo(QString text, int delayms)\r
 {\r
-       if (timerDisplay->isActive())\r
-               stopDisplayInfo();\r
+    if (timerDisplay->isActive())\r
+        stopDisplayInfo();\r
 \r
-       infoText = text;\r
-       if (delayms > 0) timerDisplay->start(delayms);\r
-       drawInfo = true;\r
+    infoText = text;\r
+    if (delayms > 0) timerDisplay->start(delayms);\r
+    drawInfo = true;\r
 }\r
 \r
 \r
@@ -2441,120 +2441,120 @@ void DefaultViewPort::updateGl()
 //Note: move 2 percent of the window\r
 void DefaultViewPort::siftWindowOnLeft()\r
 {\r
-       float delta = 2 * width() / (100.0 * param_matrixWorld.m11());\r
-       moveView(QPointF(delta, 0));\r
+    float delta = 2 * width() / (100.0 * param_matrixWorld.m11());\r
+    moveView(QPointF(delta, 0));\r
 }\r
 \r
 \r
 //Note: move 2 percent of the window\r
 void DefaultViewPort::siftWindowOnRight()\r
 {\r
-       float delta = -2 * width() / (100.0 * param_matrixWorld.m11());\r
-       moveView(QPointF(delta, 0));\r
+    float delta = -2 * width() / (100.0 * param_matrixWorld.m11());\r
+    moveView(QPointF(delta, 0));\r
 }\r
 \r
 \r
 //Note: move 2 percent of the window\r
 void DefaultViewPort::siftWindowOnUp()\r
 {\r
-       float delta = 2 * height() / (100.0 * param_matrixWorld.m11());\r
-       moveView(QPointF(0, delta));\r
+    float delta = 2 * height() / (100.0 * param_matrixWorld.m11());\r
+    moveView(QPointF(0, delta));\r
 }\r
 \r
 \r
 //Note: move 2 percent of the window\r
 void DefaultViewPort::siftWindowOnDown()\r
 {\r
-       float delta = -2 * height() / (100.0 * param_matrixWorld.m11());\r
-       moveView(QPointF(0, delta));\r
+    float delta = -2 * height() / (100.0 * param_matrixWorld.m11());\r
+    moveView(QPointF(0, delta));\r
 }\r
 \r
 \r
 void DefaultViewPort::imgRegion()\r
 {\r
-       scaleView((threshold_zoom_img_region / param_matrixWorld.m11() - 1) * 5, QPointF(size().width() / 2, size().height() / 2));\r
+    scaleView((threshold_zoom_img_region / param_matrixWorld.m11() - 1) * 5, QPointF(size().width() / 2, size().height() / 2));\r
 }\r
 \r
 \r
 void DefaultViewPort::resetZoom()\r
 {\r
-       param_matrixWorld.reset();\r
-       controlImagePosition();\r
+    param_matrixWorld.reset();\r
+    controlImagePosition();\r
 }\r
 \r
 \r
 void DefaultViewPort::ZoomIn()\r
 {\r
-       scaleView(0.5, QPointF(size().width() / 2, size().height() / 2));\r
+    scaleView(0.5, QPointF(size().width() / 2, size().height() / 2));\r
 }\r
 \r
 \r
 void DefaultViewPort::ZoomOut()\r
 {\r
-       scaleView(-0.5, QPointF(size().width() / 2, size().height() / 2));\r
+    scaleView(-0.5, QPointF(size().width() / 2, size().height() / 2));\r
 }\r
 \r
 \r
 //can save as JPG, JPEG, BMP, PNG\r
 void DefaultViewPort::saveView()\r
 {\r
-       QDate date_d = QDate::currentDate();\r
-       QString date_s = date_d.toString("dd.MM.yyyy");\r
+    QDate date_d = QDate::currentDate();\r
+    QString date_s = date_d.toString("dd.MM.yyyy");\r
     QString name_s = centralWidget->windowTitle() + "_screenshot_" + date_s;\r
 \r
-       QString fileName = QFileDialog::getSaveFileName(this, tr("Save File %1").arg(name_s), name_s + ".png", tr("Images (*.png *.jpg *.bmp *.jpeg)"));\r
+    QString fileName = QFileDialog::getSaveFileName(this, tr("Save File %1").arg(name_s), name_s + ".png", tr("Images (*.png *.jpg *.bmp *.jpeg)"));\r
+\r
+    if (!fileName.isEmpty()) //save the picture\r
+    {\r
+        QString extension = fileName.right(3);\r
 \r
-       if (!fileName.isEmpty()) //save the picture\r
-       {\r
-               QString extension = fileName.right(3);\r
+        //   (no need anymore) create the image resized to receive the 'screenshot'\r
+        //    image2Draw_qt_resized = QImage(viewport()->width(), viewport()->height(),QImage::Format_RGB888);\r
 \r
-           //   (no need anymore) create the image resized to receive the 'screenshot'\r
-           //    image2Draw_qt_resized = QImage(viewport()->width(), viewport()->height(),QImage::Format_RGB888);\r
-       \r
-           QPainter saveimage(&image2Draw_qt_resized);\r
-           this->render(&saveimage);\r
+        QPainter saveimage(&image2Draw_qt_resized);\r
+        this->render(&saveimage);\r
 \r
-               // Save it..\r
-               if (QString::compare(extension, "png", Qt::CaseInsensitive) == 0)\r
-               {\r
-                       image2Draw_qt_resized.save(fileName, "PNG");\r
-                       return;\r
-               }\r
+        // Save it..\r
+        if (QString::compare(extension, "png", Qt::CaseInsensitive) == 0)\r
+        {\r
+            image2Draw_qt_resized.save(fileName, "PNG");\r
+            return;\r
+        }\r
 \r
-               if (QString::compare(extension, "jpg", Qt::CaseInsensitive) == 0)\r
-               {\r
-                       image2Draw_qt_resized.save(fileName, "JPG");\r
-                       return;\r
-               }\r
+        if (QString::compare(extension, "jpg", Qt::CaseInsensitive) == 0)\r
+        {\r
+            image2Draw_qt_resized.save(fileName, "JPG");\r
+            return;\r
+        }\r
 \r
-               if (QString::compare(extension, "bmp", Qt::CaseInsensitive) == 0)\r
-               {\r
-                       image2Draw_qt_resized.save(fileName, "BMP");\r
-                       return;\r
-               }\r
+        if (QString::compare(extension, "bmp", Qt::CaseInsensitive) == 0)\r
+        {\r
+            image2Draw_qt_resized.save(fileName, "BMP");\r
+            return;\r
+        }\r
 \r
-               if (QString::compare(extension, "jpeg", Qt::CaseInsensitive) == 0)\r
-               {\r
-                       image2Draw_qt_resized.save(fileName, "JPEG");\r
-                       return;\r
-               }\r
+        if (QString::compare(extension, "jpeg", Qt::CaseInsensitive) == 0)\r
+        {\r
+            image2Draw_qt_resized.save(fileName, "JPEG");\r
+            return;\r
+        }\r
 \r
-               CV_Error(CV_StsNullPtr, "file extension not recognized, please choose between JPG, JPEG, BMP or PNG");\r
-       }\r
+        CV_Error(CV_StsNullPtr, "file extension not recognized, please choose between JPG, JPEG, BMP or PNG");\r
+    }\r
 }\r
 \r
 \r
 void DefaultViewPort::contextMenuEvent(QContextMenuEvent* event)\r
 {\r
-       if (centralWidget->vect_QActions.size() > 0)\r
-       {\r
-               QMenu menu(this);\r
+    if (centralWidget->vect_QActions.size() > 0)\r
+    {\r
+        QMenu menu(this);\r
 \r
-               foreach (QAction *a, centralWidget->vect_QActions)\r
-                       menu.addAction(a);\r
+        foreach (QAction *a, centralWidget->vect_QActions)\r
+            menu.addAction(a);\r
 \r
-               menu.exec(event->globalPos());\r
-       }\r
+        menu.exec(event->globalPos());\r
+    }\r
 }\r
 \r
 \r
@@ -2565,248 +2565,248 @@ void DefaultViewPort::resizeEvent(QResizeEvent* event)
     //use to compute mouse coordinate, I need to update the ratio here and in resizeEvent\r
     ratioX = width() / float(image2Draw_mat->cols);\r
     ratioY = height() / float(image2Draw_mat->rows);\r
-       \r
+\r
     if (param_keepRatio == CV_WINDOW_KEEPRATIO)//to keep the same aspect ratio\r
     {\r
-           QSize newSize = QSize(image2Draw_mat->cols, image2Draw_mat->rows);\r
-           newSize.scale(event->size(), Qt::KeepAspectRatio);\r
+        QSize newSize = QSize(image2Draw_mat->cols, image2Draw_mat->rows);\r
+        newSize.scale(event->size(), Qt::KeepAspectRatio);\r
 \r
-           //imageWidth/imageHeight = newWidth/newHeight +/- epsilon\r
-           //ratioX = ratioY +/- epsilon\r
-           //||ratioX - ratioY|| = epsilon\r
-           if (fabs(ratioX - ratioY) * 100 > ratioX) //avoid infinity loop / epsilon = 1% of ratioX\r
-           {\r
-                   resize(newSize);\r
+        //imageWidth/imageHeight = newWidth/newHeight +/- epsilon\r
+        //ratioX = ratioY +/- epsilon\r
+        //||ratioX - ratioY|| = epsilon\r
+        if (fabs(ratioX - ratioY) * 100 > ratioX) //avoid infinity loop / epsilon = 1% of ratioX\r
+        {\r
+            resize(newSize);\r
 \r
-                   //move to the middle\r
-                   //newSize get the delta offset to place the picture in the middle of its parent\r
-                   newSize = (event->size() - newSize) / 2;\r
+            //move to the middle\r
+            //newSize get the delta offset to place the picture in the middle of its parent\r
+            newSize = (event->size() - newSize) / 2;\r
 \r
-                   //if the toolbar is displayed, avoid drawing myview on top of it\r
-                   if (centralWidget->myToolBar)\r
-                           if(!centralWidget->myToolBar->isHidden())\r
-                                   newSize += QSize(0, centralWidget->myToolBar->height());\r
+            //if the toolbar is displayed, avoid drawing myview on top of it\r
+            if (centralWidget->myToolBar)\r
+                if(!centralWidget->myToolBar->isHidden())\r
+                    newSize += QSize(0, centralWidget->myToolBar->height());\r
 \r
-                   move(newSize.width(), newSize.height());\r
-           }\r
+            move(newSize.width(), newSize.height());\r
+        }\r
     }\r
 \r
-       return QGraphicsView::resizeEvent(event);\r
+    return QGraphicsView::resizeEvent(event);\r
 }\r
 \r
 \r
 void DefaultViewPort::wheelEvent(QWheelEvent* event)\r
 {\r
-       scaleView(event->delta() / 240.0, event->pos());\r
-       viewport()->update();\r
+    scaleView(event->delta() / 240.0, event->pos());\r
+    viewport()->update();\r
 }\r
 \r
 \r
 void DefaultViewPort::mousePressEvent(QMouseEvent* event)\r
 {\r
-       int cv_event = -1, flags = 0;\r
-       QPoint pt = event->pos();\r
+    int cv_event = -1, flags = 0;\r
+    QPoint pt = event->pos();\r
 \r
-       //icvmouseHandler: pass parameters for cv_event, flags\r
-       icvmouseHandler(event, mouse_down, cv_event, flags);\r
-       icvmouseProcessing(QPointF(pt), cv_event, flags);\r
+    //icvmouseHandler: pass parameters for cv_event, flags\r
+    icvmouseHandler(event, mouse_down, cv_event, flags);\r
+    icvmouseProcessing(QPointF(pt), cv_event, flags);\r
 \r
-       if (param_matrixWorld.m11()>1)\r
-       {\r
-               setCursor(Qt::ClosedHandCursor);\r
-               positionGrabbing = event->pos();\r
-       }\r
+    if (param_matrixWorld.m11()>1)\r
+    {\r
+        setCursor(Qt::ClosedHandCursor);\r
+        positionGrabbing = event->pos();\r
+    }\r
 \r
-       QWidget::mousePressEvent(event);\r
+    QWidget::mousePressEvent(event);\r
 }\r
 \r
 \r
 void DefaultViewPort::mouseReleaseEvent(QMouseEvent* event)\r
 {\r
-       int cv_event = -1, flags = 0;\r
-       QPoint pt = event->pos();\r
+    int cv_event = -1, flags = 0;\r
+    QPoint pt = event->pos();\r
 \r
-       //icvmouseHandler: pass parameters for cv_event, flags\r
-       icvmouseHandler(event, mouse_up, cv_event, flags);\r
-       icvmouseProcessing(QPointF(pt), cv_event, flags);\r
+    //icvmouseHandler: pass parameters for cv_event, flags\r
+    icvmouseHandler(event, mouse_up, cv_event, flags);\r
+    icvmouseProcessing(QPointF(pt), cv_event, flags);\r
 \r
-       if (param_matrixWorld.m11()>1)\r
-               setCursor(Qt::OpenHandCursor);\r
+    if (param_matrixWorld.m11()>1)\r
+        setCursor(Qt::OpenHandCursor);\r
 \r
-       QWidget::mouseReleaseEvent(event);\r
+    QWidget::mouseReleaseEvent(event);\r
 }\r
 \r
 \r
 void DefaultViewPort::mouseDoubleClickEvent(QMouseEvent* event)\r
 {\r
-       int cv_event = -1, flags = 0;\r
-       QPoint pt = event->pos();\r
+    int cv_event = -1, flags = 0;\r
+    QPoint pt = event->pos();\r
 \r
-       //icvmouseHandler: pass parameters for cv_event, flags\r
-       icvmouseHandler(event, mouse_dbclick, cv_event, flags);\r
-       icvmouseProcessing(QPointF(pt), cv_event, flags);\r
+    //icvmouseHandler: pass parameters for cv_event, flags\r
+    icvmouseHandler(event, mouse_dbclick, cv_event, flags);\r
+    icvmouseProcessing(QPointF(pt), cv_event, flags);\r
 \r
-       QWidget::mouseDoubleClickEvent(event);\r
+    QWidget::mouseDoubleClickEvent(event);\r
 }\r
 \r
 \r
 void DefaultViewPort::mouseMoveEvent(QMouseEvent* event)\r
 {\r
-       int cv_event = CV_EVENT_MOUSEMOVE, flags = 0;\r
-       QPoint pt = event->pos();\r
+    int cv_event = CV_EVENT_MOUSEMOVE, flags = 0;\r
+    QPoint pt = event->pos();\r
 \r
-       //icvmouseHandler: pass parameters for cv_event, flags\r
-       icvmouseHandler(event, mouse_move, cv_event, flags);\r
-       icvmouseProcessing(QPointF(pt), cv_event, flags);\r
+    //icvmouseHandler: pass parameters for cv_event, flags\r
+    icvmouseHandler(event, mouse_move, cv_event, flags);\r
+    icvmouseProcessing(QPointF(pt), cv_event, flags);\r
 \r
-       if (param_matrixWorld.m11() > 1 && event->buttons() == Qt::LeftButton)\r
-       {\r
-               QPointF dxy = (pt - positionGrabbing)/param_matrixWorld.m11();\r
-               positionGrabbing = event->pos();\r
-               moveView(dxy);\r
-       }\r
+    if (param_matrixWorld.m11() > 1 && event->buttons() == Qt::LeftButton)\r
+    {\r
+        QPointF dxy = (pt - positionGrabbing)/param_matrixWorld.m11();\r
+        positionGrabbing = event->pos();\r
+        moveView(dxy);\r
+    }\r
 \r
-       //I update the statusbar here because if the user does a cvWaitkey(0) (like with inpaint.cpp)\r
-       //the status bar will only be repaint when a click occurs.\r
-       if (centralWidget->myStatusBar)\r
-               viewport()->update();\r
+    //I update the statusbar here because if the user does a cvWaitkey(0) (like with inpaint.cpp)\r
+    //the status bar will only be repaint when a click occurs.\r
+    if (centralWidget->myStatusBar)\r
+        viewport()->update();\r
 \r
-       QWidget::mouseMoveEvent(event);\r
+    QWidget::mouseMoveEvent(event);\r
 }\r
 \r
 \r
 void DefaultViewPort::paintEvent(QPaintEvent* event)\r
 {\r
-       QPainter myPainter(viewport());\r
-       myPainter.setWorldTransform(param_matrixWorld);\r
+    QPainter myPainter(viewport());\r
+    myPainter.setWorldTransform(param_matrixWorld);\r
 \r
-       draw2D(&myPainter);\r
+    draw2D(&myPainter);\r
 \r
-       //Now disable matrixWorld for overlay display\r
-       myPainter.setWorldMatrixEnabled(false);\r
+    //Now disable matrixWorld for overlay display\r
+    myPainter.setWorldMatrixEnabled(false);\r
 \r
-       //in mode zoom/panning\r
-       if (param_matrixWorld.m11() > 1)\r
-       {               \r
-               if (param_matrixWorld.m11() >= threshold_zoom_img_region)\r
-               {\r
-                       if (centralWidget->param_flags == CV_WINDOW_NORMAL)\r
-                               startDisplayInfo("WARNING: The values displayed are the resized image's values. If you want the original image's values, use CV_WINDOW_AUTOSIZE", 1000);\r
+    //in mode zoom/panning\r
+    if (param_matrixWorld.m11() > 1)\r
+    {\r
+        if (param_matrixWorld.m11() >= threshold_zoom_img_region)\r
+        {\r
+            if (centralWidget->param_flags == CV_WINDOW_NORMAL)\r
+                startDisplayInfo("WARNING: The values displayed are the resized image's values. If you want the original image's values, use CV_WINDOW_AUTOSIZE", 1000);\r
 \r
-                       drawImgRegion(&myPainter);\r
-               }\r
+            drawImgRegion(&myPainter);\r
+        }\r
 \r
-               drawViewOverview(&myPainter);\r
-       }\r
+        drawViewOverview(&myPainter);\r
+    }\r
 \r
-       //for information overlay\r
-       if (drawInfo)\r
-               drawInstructions(&myPainter);\r
+    //for information overlay\r
+    if (drawInfo)\r
+        drawInstructions(&myPainter);\r
 \r
-       //for statusbar\r
-       if (centralWidget->myStatusBar)\r
-               drawStatusBar();\r
+    //for statusbar\r
+    if (centralWidget->myStatusBar)\r
+        drawStatusBar();\r
 \r
-       QGraphicsView::paintEvent(event);\r
+    QGraphicsView::paintEvent(event);\r
 }\r
 \r
 \r
 void DefaultViewPort::stopDisplayInfo()\r
 {\r
-       timerDisplay->stop();\r
-       drawInfo = false;\r
+    timerDisplay->stop();\r
+    drawInfo = false;\r
 }\r
 \r
 \r
 inline bool DefaultViewPort::isSameSize(IplImage* img1, IplImage* img2)\r
 {\r
-       return img1->width == img2->width && img1->height == img2->height;\r
+    return img1->width == img2->width && img1->height == img2->height;\r
 }\r
 \r
 \r
 void DefaultViewPort::controlImagePosition()\r
 {\r
-       qreal left, top, right, bottom;\r
-\r
-       //after check top-left, bottom right corner to avoid getting "out" during zoom/panning\r
-       param_matrixWorld.map(0,0,&left,&top);\r
-\r
-       if (left > 0)\r
-       {\r
-               param_matrixWorld.translate(-left,0);\r
-               left = 0;\r
-       }\r
-       if (top > 0)\r
-       {\r
-               param_matrixWorld.translate(0,-top);\r
-               top = 0;\r
-       }\r
-       //-------\r
-\r
-       QSize sizeImage = size();\r
-       param_matrixWorld.map(sizeImage.width(),sizeImage.height(),&right,&bottom);\r
-       if (right < sizeImage.width())\r
-       {\r
-               param_matrixWorld.translate(sizeImage.width()-right,0);\r
-               right = sizeImage.width();\r
-       }\r
-       if (bottom < sizeImage.height())\r
-       {\r
-               param_matrixWorld.translate(0,sizeImage.height()-bottom);\r
-               bottom = sizeImage.height();\r
-       }\r
-\r
-       //save corner position\r
-       positionCorners.setTopLeft(QPoint(left,top));\r
-       positionCorners.setBottomRight(QPoint(right,bottom));\r
-       //save also the inv matrix\r
-       matrixWorld_inv = param_matrixWorld.inverted();\r
-\r
-       //viewport()->update();\r
+    qreal left, top, right, bottom;\r
+\r
+    //after check top-left, bottom right corner to avoid getting "out" during zoom/panning\r
+    param_matrixWorld.map(0,0,&left,&top);\r
+\r
+    if (left > 0)\r
+    {\r
+        param_matrixWorld.translate(-left,0);\r
+        left = 0;\r
+    }\r
+    if (top > 0)\r
+    {\r
+        param_matrixWorld.translate(0,-top);\r
+        top = 0;\r
+    }\r
+    //-------\r
+\r
+    QSize sizeImage = size();\r
+    param_matrixWorld.map(sizeImage.width(),sizeImage.height(),&right,&bottom);\r
+    if (right < sizeImage.width())\r
+    {\r
+        param_matrixWorld.translate(sizeImage.width()-right,0);\r
+        right = sizeImage.width();\r
+    }\r
+    if (bottom < sizeImage.height())\r
+    {\r
+        param_matrixWorld.translate(0,sizeImage.height()-bottom);\r
+        bottom = sizeImage.height();\r
+    }\r
+\r
+    //save corner position\r
+    positionCorners.setTopLeft(QPoint(left,top));\r
+    positionCorners.setBottomRight(QPoint(right,bottom));\r
+    //save also the inv matrix\r
+    matrixWorld_inv = param_matrixWorld.inverted();\r
+\r
+    //viewport()->update();\r
 }\r
 \r
 void DefaultViewPort::moveView(QPointF delta)\r
 {\r
-       param_matrixWorld.translate(delta.x(),delta.y());\r
-       controlImagePosition();\r
-       viewport()->update();\r
+    param_matrixWorld.translate(delta.x(),delta.y());\r
+    controlImagePosition();\r
+    viewport()->update();\r
 }\r
 \r
 //factor is -0.5 (zoom out) or 0.5 (zoom in)\r
 void DefaultViewPort::scaleView(qreal factor,QPointF center)\r
 {\r
-       factor/=5;//-0.1 <-> 0.1\r
-       factor+=1;//0.9 <-> 1.1\r
+    factor/=5;//-0.1 <-> 0.1\r
+    factor+=1;//0.9 <-> 1.1\r
 \r
-       //limit zoom out ---\r
-       if (param_matrixWorld.m11()==1 && factor < 1)\r
-               return;\r
+    //limit zoom out ---\r
+    if (param_matrixWorld.m11()==1 && factor < 1)\r
+        return;\r
 \r
-       if (param_matrixWorld.m11()*factor<1)\r
-               factor = 1/param_matrixWorld.m11();\r
+    if (param_matrixWorld.m11()*factor<1)\r
+        factor = 1/param_matrixWorld.m11();\r
 \r
 \r
-       //limit zoom int ---\r
-       if (param_matrixWorld.m11()>100 && factor > 1)\r
-               return;\r
+    //limit zoom int ---\r
+    if (param_matrixWorld.m11()>100 && factor > 1)\r
+        return;\r
 \r
-       //inverse the transform\r
-       int a, b;\r
-       matrixWorld_inv.map(center.x(),center.y(),&a,&b);\r
+    //inverse the transform\r
+    int a, b;\r
+    matrixWorld_inv.map(center.x(),center.y(),&a,&b);\r
 \r
-       param_matrixWorld.translate(a-factor*a,b-factor*b);\r
-       param_matrixWorld.scale(factor,factor);\r
+    param_matrixWorld.translate(a-factor*a,b-factor*b);\r
+    param_matrixWorld.scale(factor,factor);\r
 \r
-       controlImagePosition();\r
+    controlImagePosition();\r
 \r
-       //display new zoom\r
-       if (centralWidget->myStatusBar)\r
-               centralWidget->displayStatusBar(tr("Zoom: %1%").arg(param_matrixWorld.m11()*100),1000);\r
+    //display new zoom\r
+    if (centralWidget->myStatusBar)\r
+        centralWidget->displayStatusBar(tr("Zoom: %1%").arg(param_matrixWorld.m11()*100),1000);\r
 \r
-       if (param_matrixWorld.m11()>1)\r
-               setCursor(Qt::OpenHandCursor);\r
-       else\r
-               unsetCursor();\r
+    if (param_matrixWorld.m11()>1)\r
+        setCursor(Qt::OpenHandCursor);\r
+    else\r
+        unsetCursor();\r
 }\r
 \r
 \r
@@ -2815,7 +2815,7 @@ void DefaultViewPort::icvmouseHandler(QMouseEvent *event, type_mouse_event categ
 {\r
     Qt::KeyboardModifiers modifiers = event->modifiers();\r
     Qt::MouseButtons buttons = event->buttons();\r
-    \r
+\r
     flags = 0;\r
     if(modifiers & Qt::ShiftModifier)\r
         flags |= CV_EVENT_FLAG_SHIFTKEY;\r
@@ -2853,208 +2853,208 @@ void DefaultViewPort::icvmouseHandler(QMouseEvent *event, type_mouse_event categ
 \r
 void DefaultViewPort::icvmouseProcessing(QPointF pt, int cv_event, int flags)\r
 {\r
-       //to convert mouse coordinate\r
-       qreal pfx, pfy;\r
-       matrixWorld_inv.map(pt.x(),pt.y(),&pfx,&pfy);\r
-       \r
-       mouseCoordinate.rx()=floor(pfx/ratioX);\r
-       mouseCoordinate.ry()=floor(pfy/ratioY);\r
+    //to convert mouse coordinate\r
+    qreal pfx, pfy;\r
+    matrixWorld_inv.map(pt.x(),pt.y(),&pfx,&pfy);\r
+\r
+    mouseCoordinate.rx()=floor(pfx/ratioX);\r
+    mouseCoordinate.ry()=floor(pfy/ratioY);\r
 \r
-       if (on_mouse)\r
-               on_mouse( cv_event, mouseCoordinate.x(),\r
+    if (on_mouse)\r
+        on_mouse( cv_event, mouseCoordinate.x(),\r
             mouseCoordinate.y(), flags, on_mouse_param );\r
 }\r
 \r
 \r
 QSize DefaultViewPort::sizeHint() const\r
 {\r
-       if(image2Draw_mat)\r
-               return QSize(image2Draw_mat->cols, image2Draw_mat->rows);\r
-       else\r
-               return QGraphicsView::sizeHint();\r
+    if(image2Draw_mat)\r
+        return QSize(image2Draw_mat->cols, image2Draw_mat->rows);\r
+    else\r
+        return QGraphicsView::sizeHint();\r
 }\r
 \r
 \r
 void DefaultViewPort::draw2D(QPainter *painter)\r
 {\r
-       image2Draw_qt = QImage(image2Draw_mat->data.ptr, image2Draw_mat->cols, image2Draw_mat->rows,image2Draw_mat->step,QImage::Format_RGB888);\r
-       image2Draw_qt_resized = image2Draw_qt.scaled(viewport()->width(),viewport()->height(),Qt::IgnoreAspectRatio,Qt::FastTransformation);//Qt::SmoothTransformation);\r
-       painter->drawImage(0,0,image2Draw_qt_resized);\r
+    image2Draw_qt = QImage(image2Draw_mat->data.ptr, image2Draw_mat->cols, image2Draw_mat->rows,image2Draw_mat->step,QImage::Format_RGB888);\r
+    image2Draw_qt_resized = image2Draw_qt.scaled(viewport()->width(),viewport()->height(),Qt::IgnoreAspectRatio,Qt::FastTransformation);//Qt::SmoothTransformation);\r
+    painter->drawImage(0,0,image2Draw_qt_resized);\r
 }\r
 \r
 //only if CV_8UC1 or CV_8UC3\r
 void DefaultViewPort::drawStatusBar()\r
 {\r
-       if (nbChannelOriginImage!=CV_8UC1 && nbChannelOriginImage!=CV_8UC3)\r
-               return;\r
-\r
-       if (mouseCoordinate.x()>=0 &&\r
-               mouseCoordinate.y()>=0 &&\r
-               mouseCoordinate.x()<image2Draw_qt.width() &&\r
-               mouseCoordinate.y()<image2Draw_qt.height())\r
-//     if (mouseCoordinate.x()>=0 && mouseCoordinate.y()>=0)\r
-       {\r
-               QRgb rgbValue = image2Draw_qt.pixel(mouseCoordinate);\r
-\r
-               if (nbChannelOriginImage==CV_8UC3 )\r
-               {\r
-                       centralWidget->myStatusBar_msg->setText(tr("<font color='black'>(x=%1, y=%2) ~ </font>")\r
-                               .arg(mouseCoordinate.x())\r
-                               .arg(mouseCoordinate.y())+\r
-                               tr("<font color='red'>R:%3 </font>").arg(qRed(rgbValue))+//.arg(value.val[0])+\r
-                               tr("<font color='green'>G:%4 </font>").arg(qGreen(rgbValue))+//.arg(value.val[1])+\r
-                               tr("<font color='blue'>B:%5</font>").arg(qBlue(rgbValue))//.arg(value.val[2])\r
-                               );\r
-               }\r
-\r
-               if (nbChannelOriginImage==CV_8UC1)\r
-               {\r
-                       //all the channel have the same value (because of cvconvertimage), so only the r channel is dsplayed\r
-                       centralWidget->myStatusBar_msg->setText(tr("<font color='black'>(x=%1, y=%2) ~ </font>")\r
-                               .arg(mouseCoordinate.x())\r
-                               .arg(mouseCoordinate.y())+\r
-                               tr("<font color='grey'>L:%3 </font>").arg(qRed(rgbValue))\r
-                               );\r
-               }\r
-       }\r
+    if (nbChannelOriginImage!=CV_8UC1 && nbChannelOriginImage!=CV_8UC3)\r
+        return;\r
+\r
+    if (mouseCoordinate.x()>=0 &&\r
+        mouseCoordinate.y()>=0 &&\r
+        mouseCoordinate.x()<image2Draw_qt.width() &&\r
+        mouseCoordinate.y()<image2Draw_qt.height())\r
+//  if (mouseCoordinate.x()>=0 && mouseCoordinate.y()>=0)\r
+    {\r
+        QRgb rgbValue = image2Draw_qt.pixel(mouseCoordinate);\r
+\r
+        if (nbChannelOriginImage==CV_8UC3 )\r
+        {\r
+            centralWidget->myStatusBar_msg->setText(tr("<font color='black'>(x=%1, y=%2) ~ </font>")\r
+                .arg(mouseCoordinate.x())\r
+                .arg(mouseCoordinate.y())+\r
+                tr("<font color='red'>R:%3 </font>").arg(qRed(rgbValue))+//.arg(value.val[0])+\r
+                tr("<font color='green'>G:%4 </font>").arg(qGreen(rgbValue))+//.arg(value.val[1])+\r
+                tr("<font color='blue'>B:%5</font>").arg(qBlue(rgbValue))//.arg(value.val[2])\r
+                );\r
+        }\r
+\r
+        if (nbChannelOriginImage==CV_8UC1)\r
+        {\r
+            //all the channel have the same value (because of cvconvertimage), so only the r channel is dsplayed\r
+            centralWidget->myStatusBar_msg->setText(tr("<font color='black'>(x=%1, y=%2) ~ </font>")\r
+                .arg(mouseCoordinate.x())\r
+                .arg(mouseCoordinate.y())+\r
+                tr("<font color='grey'>L:%3 </font>").arg(qRed(rgbValue))\r
+                );\r
+        }\r
+    }\r
 }\r
 \r
 //accept only CV_8UC1 and CV_8UC8 image for now\r
 void DefaultViewPort::drawImgRegion(QPainter *painter)\r
 {\r
 \r
-       if (nbChannelOriginImage!=CV_8UC1 && nbChannelOriginImage!=CV_8UC3)\r
-               return;\r
-\r
-       qreal offsetX = param_matrixWorld.dx()/param_matrixWorld.m11();\r
-       offsetX = offsetX - floor(offsetX);\r
-       qreal offsetY = param_matrixWorld.dy()/param_matrixWorld.m11();\r
-       offsetY = offsetY - floor(offsetY);\r
-\r
-       QSize view = size();\r
-       QVarLengthArray<QLineF, 30> linesX;\r
-       for (qreal x = offsetX*param_matrixWorld.m11(); x < view.width(); x += param_matrixWorld.m11() )\r
-               linesX.append(QLineF(x, 0, x, view.height()));\r
+    if (nbChannelOriginImage!=CV_8UC1 && nbChannelOriginImage!=CV_8UC3)\r
+        return;\r
 \r
-       QVarLengthArray<QLineF, 30> linesY;\r
-       for (qreal y = offsetY*param_matrixWorld.m11(); y < view.height(); y += param_matrixWorld.m11() )\r
-               linesY.append(QLineF(0, y, view.width(), y));\r
+    qreal offsetX = param_matrixWorld.dx()/param_matrixWorld.m11();\r
+    offsetX = offsetX - floor(offsetX);\r
+    qreal offsetY = param_matrixWorld.dy()/param_matrixWorld.m11();\r
+    offsetY = offsetY - floor(offsetY);\r
 \r
+    QSize view = size();\r
+    QVarLengthArray<QLineF, 30> linesX;\r
+    for (qreal x = offsetX*param_matrixWorld.m11(); x < view.width(); x += param_matrixWorld.m11() )\r
+        linesX.append(QLineF(x, 0, x, view.height()));\r
 \r
-       QFont f = painter->font();\r
-       int original_font_size = f.pointSize();\r
-       //change font size\r
-       //f.setPointSize(4+(param_matrixWorld.m11()-threshold_zoom_img_region)/5);\r
-       f.setPixelSize(10+(param_matrixWorld.m11()-threshold_zoom_img_region)/5);\r
-       painter->setFont(f);\r
-       QString val;\r
-       QRgb rgbValue;\r
+    QVarLengthArray<QLineF, 30> linesY;\r
+    for (qreal y = offsetY*param_matrixWorld.m11(); y < view.height(); y += param_matrixWorld.m11() )\r
+        linesY.append(QLineF(0, y, view.width(), y));\r
 \r
-       QPointF point1;//sorry, I do not know how to name it\r
-       QPointF point2;//idem\r
 \r
-       for (int j=-1;j<height()/param_matrixWorld.m11();j++)//-1 because display the pixels top rows left colums\r
-               for (int i=-1;i<width()/param_matrixWorld.m11();i++)//-1\r
-               {\r
-                       point1.setX((i+offsetX)*param_matrixWorld.m11());\r
-                       point1.setY((j+offsetY)*param_matrixWorld.m11());\r
+    QFont f = painter->font();\r
+    int original_font_size = f.pointSize();\r
+    //change font size\r
+    //f.setPointSize(4+(param_matrixWorld.m11()-threshold_zoom_img_region)/5);\r
+    f.setPixelSize(10+(param_matrixWorld.m11()-threshold_zoom_img_region)/5);\r
+    painter->setFont(f);\r
+    QString val;\r
+    QRgb rgbValue;\r
 \r
-                       matrixWorld_inv.map(point1.x(),point1.y(),&point2.rx(),&point2.ry());\r
+    QPointF point1;//sorry, I do not know how to name it\r
+    QPointF point2;//idem\r
 \r
-                       point2.rx()= (long) (point2.x() + 0.5);\r
-                       point2.ry()= (long) (point2.y() + 0.5);\r
-\r
-                       if (point2.x() >= 0 && point2.y() >= 0)\r
-                               rgbValue = image2Draw_qt_resized.pixel(QPoint(point2.x(),point2.y()));\r
-                       else\r
-                               rgbValue = qRgb(0,0,0);\r
+    for (int j=-1;j<height()/param_matrixWorld.m11();j++)//-1 because display the pixels top rows left colums\r
+        for (int i=-1;i<width()/param_matrixWorld.m11();i++)//-1\r
+        {\r
+            point1.setX((i+offsetX)*param_matrixWorld.m11());\r
+            point1.setY((j+offsetY)*param_matrixWorld.m11());\r
 \r
-                       if (nbChannelOriginImage==CV_8UC3)\r
-                       {\r
-                               //for debug\r
-                               /*\r
-                               val = tr("%1 %2").arg(point2.x()).arg(point2.y());\r
-                               painter->setPen(QPen(Qt::black, 1));\r
-                               painter->drawText(QRect(point1.x(),point1.y(),param_matrixWorld.m11(),param_matrixWorld.m11()/2),\r
-                                       Qt::AlignCenter, val);\r
-                               */\r
+            matrixWorld_inv.map(point1.x(),point1.y(),&point2.rx(),&point2.ry());\r
 \r
-                               val = tr("%1").arg(qRed(rgbValue));\r
-                               painter->setPen(QPen(Qt::red, 1));\r
-                               painter->drawText(QRect(point1.x(),point1.y(),param_matrixWorld.m11(),param_matrixWorld.m11()/3),\r
-                                       Qt::AlignCenter, val);\r
+            point2.rx()= (long) (point2.x() + 0.5);\r
+            point2.ry()= (long) (point2.y() + 0.5);\r
 \r
-                               val = tr("%1").arg(qGreen(rgbValue));\r
-                               painter->setPen(QPen(Qt::green, 1));\r
-                               painter->drawText(QRect(point1.x(),point1.y()+param_matrixWorld.m11()/3,param_matrixWorld.m11(),param_matrixWorld.m11()/3),\r
-                                       Qt::AlignCenter, val);\r
+            if (point2.x() >= 0 && point2.y() >= 0)\r
+                rgbValue = image2Draw_qt_resized.pixel(QPoint(point2.x(),point2.y()));\r
+            else\r
+                rgbValue = qRgb(0,0,0);\r
 \r
-                               val = tr("%1").arg(qBlue(rgbValue));\r
-                               painter->setPen(QPen(Qt::blue, 1));\r
-                               painter->drawText(QRect(point1.x(),point1.y()+2*param_matrixWorld.m11()/3,param_matrixWorld.m11(),param_matrixWorld.m11()/3),\r
-                                       Qt::AlignCenter, val);\r
+            if (nbChannelOriginImage==CV_8UC3)\r
+            {\r
+                //for debug\r
+                /*\r
+                val = tr("%1 %2").arg(point2.x()).arg(point2.y());\r
+                painter->setPen(QPen(Qt::black, 1));\r
+                painter->drawText(QRect(point1.x(),point1.y(),param_matrixWorld.m11(),param_matrixWorld.m11()/2),\r
+                    Qt::AlignCenter, val);\r
+                */\r
+\r
+                val = tr("%1").arg(qRed(rgbValue));\r
+                painter->setPen(QPen(Qt::red, 1));\r
+                painter->drawText(QRect(point1.x(),point1.y(),param_matrixWorld.m11(),param_matrixWorld.m11()/3),\r
+                    Qt::AlignCenter, val);\r
+\r
+                val = tr("%1").arg(qGreen(rgbValue));\r
+                painter->setPen(QPen(Qt::green, 1));\r
+                painter->drawText(QRect(point1.x(),point1.y()+param_matrixWorld.m11()/3,param_matrixWorld.m11(),param_matrixWorld.m11()/3),\r
+                    Qt::AlignCenter, val);\r
+\r
+                val = tr("%1").arg(qBlue(rgbValue));\r
+                painter->setPen(QPen(Qt::blue, 1));\r
+                painter->drawText(QRect(point1.x(),point1.y()+2*param_matrixWorld.m11()/3,param_matrixWorld.m11(),param_matrixWorld.m11()/3),\r
+                    Qt::AlignCenter, val);\r
 \r
-                       }\r
+            }\r
 \r
-                       if (nbChannelOriginImage==CV_8UC1)\r
-                       {\r
+            if (nbChannelOriginImage==CV_8UC1)\r
+            {\r
 \r
-                               val = tr("%1").arg(qRed(rgbValue));\r
-                               painter->drawText(QRect(point1.x(),point1.y(),param_matrixWorld.m11(),param_matrixWorld.m11()),\r
-                                       Qt::AlignCenter, val);\r
-                       }\r
-               }\r
+                val = tr("%1").arg(qRed(rgbValue));\r
+                painter->drawText(QRect(point1.x(),point1.y(),param_matrixWorld.m11(),param_matrixWorld.m11()),\r
+                    Qt::AlignCenter, val);\r
+            }\r
+        }\r
 \r
-               painter->setPen(QPen(Qt::black, 1));\r
-               painter->drawLines(linesX.data(), linesX.size());\r
-               painter->drawLines(linesY.data(), linesY.size());\r
+        painter->setPen(QPen(Qt::black, 1));\r
+        painter->drawLines(linesX.data(), linesX.size());\r
+        painter->drawLines(linesY.data(), linesY.size());\r
 \r
-               //restore font size\r
-               f.setPointSize(original_font_size);\r
-               painter->setFont(f);\r
+        //restore font size\r
+        f.setPointSize(original_font_size);\r
+        painter->setFont(f);\r
 \r
 }\r
 \r
 void DefaultViewPort::drawViewOverview(QPainter *painter)\r
 {\r
-       QSize viewSize = size();\r
-       viewSize.scale ( 100, 100,Qt::KeepAspectRatio );\r
+    QSize viewSize = size();\r
+    viewSize.scale ( 100, 100,Qt::KeepAspectRatio );\r
 \r
-       const int margin = 5;\r
+    const int margin = 5;\r
 \r
-       //draw the image's location\r
-       painter->setBrush(QColor(0, 0, 0, 127));\r
-       painter->setPen(Qt::darkGreen);\r
-       painter->drawRect(QRect(width()-viewSize.width()-margin, 0,viewSize.width(),viewSize.height()));\r
+    //draw the image's location\r
+    painter->setBrush(QColor(0, 0, 0, 127));\r
+    painter->setPen(Qt::darkGreen);\r
+    painter->drawRect(QRect(width()-viewSize.width()-margin, 0,viewSize.width(),viewSize.height()));\r
 \r
-       //daw the view's location inside the image\r
-       qreal ratioSize = 1/param_matrixWorld.m11();\r
-       qreal ratioWindow = (qreal)(viewSize.height())/(qreal)(size().height());\r
-       painter->setPen(Qt::darkBlue);\r
-       painter->drawRect(QRectF(width()-viewSize.width()-positionCorners.left()*ratioSize*ratioWindow-margin,\r
-               -positionCorners.top()*ratioSize*ratioWindow,\r
-               (viewSize.width()-1)*ratioSize,\r
-               (viewSize.height()-1)*ratioSize)\r
-               );\r
+    //daw the view's location inside the image\r
+    qreal ratioSize = 1/param_matrixWorld.m11();\r
+    qreal ratioWindow = (qreal)(viewSize.height())/(qreal)(size().height());\r
+    painter->setPen(Qt::darkBlue);\r
+    painter->drawRect(QRectF(width()-viewSize.width()-positionCorners.left()*ratioSize*ratioWindow-margin,\r
+        -positionCorners.top()*ratioSize*ratioWindow,\r
+        (viewSize.width()-1)*ratioSize,\r
+        (viewSize.height()-1)*ratioSize)\r
+        );\r
 }\r
 \r
 void DefaultViewPort::drawInstructions(QPainter *painter)\r
 {\r
-       QFontMetrics metrics = QFontMetrics(font());\r
-       int border = qMax(4, metrics.leading());\r
+    QFontMetrics metrics = QFontMetrics(font());\r
+    int border = qMax(4, metrics.leading());\r
 \r
-       QRect rect = metrics.boundingRect(0, 0, width() - 2*border, int(height()*0.125),\r
-               Qt::AlignCenter | Qt::TextWordWrap, infoText);\r
-       painter->setRenderHint(QPainter::TextAntialiasing);\r
-       painter->fillRect(QRect(0, 0, width(), rect.height() + 2*border),\r
-               QColor(0, 0, 0, 127));\r
-       painter->setPen(Qt::white);\r
-       painter->fillRect(QRect(0, 0, width(), rect.height() + 2*border),\r
-               QColor(0, 0, 0, 127));\r
+    QRect rect = metrics.boundingRect(0, 0, width() - 2*border, int(height()*0.125),\r
+        Qt::AlignCenter | Qt::TextWordWrap, infoText);\r
+    painter->setRenderHint(QPainter::TextAntialiasing);\r
+    painter->fillRect(QRect(0, 0, width(), rect.height() + 2*border),\r
+        QColor(0, 0, 0, 127));\r
+    painter->setPen(Qt::white);\r
+    painter->fillRect(QRect(0, 0, width(), rect.height() + 2*border),\r
+        QColor(0, 0, 0, 127));\r
 \r
-       painter->drawText((width() - rect.width())/2, border,\r
-               rect.width(), rect.height(),\r
-               Qt::AlignCenter | Qt::TextWordWrap, infoText);\r
+    painter->drawText((width() - rect.width())/2, border,\r
+        rect.width(), rect.height(),\r
+        Qt::AlignCenter | Qt::TextWordWrap, infoText);\r
 }\r
 \r
 \r
@@ -3202,7 +3202,7 @@ public:
     void generateBitmapFont(const std::string& family, int height, int weight, bool italic, bool underline, int start, int count, int base) const;\r
 \r
     bool isGlContextInitialized() const;\r
-        \r
+\r
     PFNGLGENBUFFERSPROC    glGenBuffersExt;\r
     PFNGLDELETEBUFFERSPROC glDeleteBuffersExt;\r
 \r
@@ -3422,100 +3422,100 @@ void OpenGlViewPort::paintGL()
 \r
 void OpenGlViewPort::mousePressEvent(QMouseEvent* event)\r
 {\r
-       int cv_event = -1, flags = 0;\r
-       QPoint pt = event->pos();\r
+    int cv_event = -1, flags = 0;\r
+    QPoint pt = event->pos();\r
 \r
-       icvmouseHandler(event, mouse_down, cv_event, flags);\r
-       icvmouseProcessing(QPointF(pt), cv_event, flags);\r
+    icvmouseHandler(event, mouse_down, cv_event, flags);\r
+    icvmouseProcessing(QPointF(pt), cv_event, flags);\r
 \r
-       QGLWidget::mousePressEvent(event);\r
+    QGLWidget::mousePressEvent(event);\r
 }\r
 \r
 \r
 void OpenGlViewPort::mouseReleaseEvent(QMouseEvent* event)\r
 {\r
-       int cv_event = -1, flags = 0;\r
-       QPoint pt = event->pos();\r
+    int cv_event = -1, flags = 0;\r
+    QPoint pt = event->pos();\r
 \r
-       icvmouseHandler(event, mouse_up, cv_event, flags);\r
-       icvmouseProcessing(QPointF(pt), cv_event, flags);\r
+    icvmouseHandler(event, mouse_up, cv_event, flags);\r
+    icvmouseProcessing(QPointF(pt), cv_event, flags);\r
 \r
-       QGLWidget::mouseReleaseEvent(event);\r
+    QGLWidget::mouseReleaseEvent(event);\r
 }\r
 \r
 \r
 void OpenGlViewPort::mouseDoubleClickEvent(QMouseEvent* event)\r
 {\r
-       int cv_event = -1, flags = 0;\r
-       QPoint pt = event->pos();\r
+    int cv_event = -1, flags = 0;\r
+    QPoint pt = event->pos();\r
 \r
-       icvmouseHandler(event, mouse_dbclick, cv_event, flags);\r
-       icvmouseProcessing(QPointF(pt), cv_event, flags);\r
+    icvmouseHandler(event, mouse_dbclick, cv_event, flags);\r
+    icvmouseProcessing(QPointF(pt), cv_event, flags);\r
 \r
-       QGLWidget::mouseDoubleClickEvent(event);\r
+    QGLWidget::mouseDoubleClickEvent(event);\r
 }\r
 \r
 \r
 void OpenGlViewPort::mouseMoveEvent(QMouseEvent* event)\r
 {\r
-       int cv_event = CV_EVENT_MOUSEMOVE, flags = 0;\r
-       QPoint pt = event->pos();\r
+    int cv_event = CV_EVENT_MOUSEMOVE, flags = 0;\r
+    QPoint pt = event->pos();\r
 \r
-       //icvmouseHandler: pass parameters for cv_event, flags\r
-       icvmouseHandler(event, mouse_move, cv_event, flags);\r
-       icvmouseProcessing(QPointF(pt), cv_event, flags);\r
+    //icvmouseHandler: pass parameters for cv_event, flags\r
+    icvmouseHandler(event, mouse_move, cv_event, flags);\r
+    icvmouseProcessing(QPointF(pt), cv_event, flags);\r
 \r
-       QGLWidget::mouseMoveEvent(event);\r
+    QGLWidget::mouseMoveEvent(event);\r
 }\r
 \r
 void OpenGlViewPort::icvmouseHandler(QMouseEvent* event, type_mouse_event category, int& cv_event, int& flags)\r
 {\r
-       Qt::KeyboardModifiers modifiers = event->modifiers();\r
+    Qt::KeyboardModifiers modifiers = event->modifiers();\r
     Qt::MouseButtons buttons = event->buttons();\r
-    \r
+\r
     flags = 0;\r
     if (modifiers & Qt::ShiftModifier)\r
-               flags |= CV_EVENT_FLAG_SHIFTKEY;\r
-       if (modifiers & Qt::ControlModifier)\r
-               flags |= CV_EVENT_FLAG_CTRLKEY;\r
-       if (modifiers & Qt::AltModifier)\r
-               flags |= CV_EVENT_FLAG_ALTKEY;\r
+        flags |= CV_EVENT_FLAG_SHIFTKEY;\r
+    if (modifiers & Qt::ControlModifier)\r
+        flags |= CV_EVENT_FLAG_CTRLKEY;\r
+    if (modifiers & Qt::AltModifier)\r
+        flags |= CV_EVENT_FLAG_ALTKEY;\r
 \r
     if (buttons & Qt::LeftButton)\r
-               flags |= CV_EVENT_FLAG_LBUTTON;\r
-       if (buttons & Qt::RightButton)\r
-               flags |= CV_EVENT_FLAG_RBUTTON;\r
+        flags |= CV_EVENT_FLAG_LBUTTON;\r
+    if (buttons & Qt::RightButton)\r
+        flags |= CV_EVENT_FLAG_RBUTTON;\r
     if (buttons & Qt::MidButton)\r
-               flags |= CV_EVENT_FLAG_MBUTTON;\r
+        flags |= CV_EVENT_FLAG_MBUTTON;\r
 \r
     cv_event = CV_EVENT_MOUSEMOVE;\r
-       switch (event->button())\r
-       {\r
-       case Qt::LeftButton:\r
-               cv_event = tableMouseButtons[category][0];\r
-               flags |= CV_EVENT_FLAG_LBUTTON;\r
-               break;\r
-\r
-       case Qt::RightButton:\r
-               cv_event = tableMouseButtons[category][1];\r
-               flags |= CV_EVENT_FLAG_RBUTTON;\r
-               break;\r
-\r
-       case Qt::MidButton:\r
-               cv_event = tableMouseButtons[category][2];\r
-               flags |= CV_EVENT_FLAG_MBUTTON;\r
-               break;\r
-\r
-       default:\r
+    switch (event->button())\r
+    {\r
+    case Qt::LeftButton:\r
+        cv_event = tableMouseButtons[category][0];\r
+        flags |= CV_EVENT_FLAG_LBUTTON;\r
+        break;\r
+\r
+    case Qt::RightButton:\r
+        cv_event = tableMouseButtons[category][1];\r
+        flags |= CV_EVENT_FLAG_RBUTTON;\r
+        break;\r
+\r
+    case Qt::MidButton:\r
+        cv_event = tableMouseButtons[category][2];\r
+        flags |= CV_EVENT_FLAG_MBUTTON;\r
+        break;\r
+\r
+    default:\r
         ;\r
-       }\r
+    }\r
 }\r
 \r
 \r
 void OpenGlViewPort::icvmouseProcessing(QPointF pt, int cv_event, int flags)\r
 {\r
-       if (mouseCallback)\r
-               mouseCallback(cv_event, pt.x(), pt.y(), flags, mouseData);\r
+    if (mouseCallback)\r
+        mouseCallback(cv_event, pt.x(), pt.y(), flags, mouseData);\r
 }\r
 \r
 \r
index 33cc72a..3f92562 100644 (file)
@@ -1372,17 +1372,17 @@ cvDestroyAllWindows( void )
     CV_UNLOCK_MUTEX();
 }
 
-CvSize icvCalcOptimalWindowSize( CvWindow * window, CvSize new_image_size){
-    CvSize window_size;
-    GtkWidget * toplevel = gtk_widget_get_toplevel( window->frame );
-    gdk_drawable_get_size( GDK_DRAWABLE(toplevel->window),
-            &window_size.width, &window_size.height );
+// CvSize icvCalcOptimalWindowSize( CvWindow * window, CvSize new_image_size){
+//     CvSize window_size;
+//     GtkWidget * toplevel = gtk_widget_get_toplevel( window->frame );
+//     gdk_drawable_get_size( GDK_DRAWABLE(toplevel->window),
+//             &window_size.width, &window_size.height );
 
-    window_size.width = window_size.width + new_image_size.width - window->widget->allocation.width;
-    window_size.height = window_size.height + new_image_size.height - window->widget->allocation.height;
+//     window_size.width = window_size.width + new_image_size.width - window->widget->allocation.width;
+//     window_size.height = window_size.height + new_image_size.height - window->widget->allocation.height;
 
-    return window_size;
-}
+//     return window_size;
+// }
 
 CV_IMPL void
 cvShowImage( const char* name, const CvArr* arr )
index bc6d2be..3653f1f 100644 (file)
@@ -5,7 +5,7 @@
 #include "opencv2/imgproc/imgproc.hpp"
 #include "opencv2/highgui/highgui.hpp"
 
-#if GTEST_CREATE_SHARED_LIBRARY
+#ifdef GTEST_CREATE_SHARED_LIBRARY
 #error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
 #endif
 
index a19f7e2..29acdb4 100644 (file)
@@ -98,6 +98,7 @@ typedef struct _list _CVLIST;
     _LIST_INLINE CVPOS prefix##get_tail_pos_##type(_CVLIST*);\
     _LIST_INLINE type* prefix##get_next_##type(CVPOS*);\
     _LIST_INLINE type* prefix##get_prev_##type(CVPOS*);\
+    _LIST_INLINE int prefix##is_pos_##type(CVPOS pos);\
     /* Modification functions*/\
     _LIST_INLINE void prefix##clear_list_##type(_CVLIST*);\
     _LIST_INLINE CVPOS prefix##add_head_##type(_CVLIST*, type*);\
@@ -151,8 +152,8 @@ typedef struct _list _CVLIST;
     }\
     element->m_next = ((element_type*)l->m_head_free.m_pos);\
     l->m_head_free.m_pos = element;
-    
-    
+
+
 /*#define GET_FIRST_FREE(l) ((ELEMENT_##type*)(l->m_head_free.m_pos))*/
 
 #define IMPLEMENT_LIST(type, prefix)\
index c77640b..e970a31 100644 (file)
@@ -233,7 +233,7 @@ typedef DiffC3<cv::Vec3i> Diff32sC3;
 typedef DiffC1<float> Diff32fC1;
 typedef DiffC3<cv::Vec3f> Diff32fC3;
 
-cv::Vec3i& operator += (cv::Vec3i& a, const cv::Vec3b& b)
+static cv::Vec3i& operator += (cv::Vec3i& a, const cv::Vec3b& b)
 {
     a[0] += b[0];
     a[1] += b[1];
@@ -440,7 +440,7 @@ cvFloodFill( CvArr* arr, CvPoint seed_point,
 {
     cv::Ptr<CvMat> tempMask;
     cv::AutoBuffer<CvFFillSegment> buffer;
-    
+
     if( comp )
         memset( comp, 0, sizeof(*comp) );
 
@@ -491,16 +491,16 @@ cvFloodFill( CvArr* arr, CvPoint seed_point,
     {
         /*int elem_size = CV_ELEM_SIZE(type);
         const uchar* seed_ptr = img->data.ptr + img->step*seed_point.y + elem_size*seed_point.x;
-        
+
         // check if the new value is different from the current value at the seed point.
         // if they are exactly the same, use the generic version with mask to avoid infinite loops.
         for( i = 0; i < elem_size; i++ )
             if( seed_ptr[i] != ((uchar*)nv_buf)[i] )
                 break;
-        
+
         if( i == elem_size )
             return;*/
-        
+
         if( type == CV_8UC1 )
             icvFloodFill_CnIR(img->data.ptr, img->step, size, seed_point, nv_buf.b[0],
                               comp, flags, buffer, buffer_size);
@@ -632,7 +632,7 @@ int cv::floodFill( InputOutputArray _image, Point seedPoint,
 }
 
 int cv::floodFill( InputOutputArray _image, InputOutputArray _mask,
-                   Point seedPoint, Scalar newVal, Rect* rect, 
+                   Point seedPoint, Scalar newVal, Rect* rect,
                    Scalar loDiff, Scalar upDiff, int flags )
 {
     CvConnectedComp ccomp;
index 27a535c..98dbf74 100644 (file)
@@ -230,7 +230,7 @@ void GMM::calcInverseCovAndDeterm( int ci )
   Calculate beta - parameter of GrabCut algorithm.
   beta = 1/(2*avg(sqr(||color[i] - color[j]||)))
 */
-double calcBeta( const Mat& img )
+static double calcBeta( const Mat& img )
 {
     double beta = 0;
     for( int y = 0; y < img.rows; y++ )
@@ -272,7 +272,7 @@ double calcBeta( const Mat& img )
   Calculate weights of noterminal vertices of graph.
   beta and gamma - parameters of GrabCut algorithm.
  */
-void calcNWeights( const Mat& img, Mat& leftW, Mat& upleftW, Mat& upW, Mat& uprightW, double beta, double gamma )
+static void calcNWeights( const Mat& img, Mat& leftW, Mat& upleftW, Mat& upW, Mat& uprightW, double beta, double gamma )
 {
     const double gammaDivSqrt2 = gamma / std::sqrt(2.0f);
     leftW.create( img.rows, img.cols, CV_64FC1 );
@@ -319,7 +319,7 @@ void calcNWeights( const Mat& img, Mat& leftW, Mat& upleftW, Mat& upW, Mat& upri
 /*
   Check size, type and element values of mask matrix.
  */
-void checkMask( const Mat& img, const Mat& mask )
+static void checkMask( const Mat& img, const Mat& mask )
 {
     if( mask.empty() )
         CV_Error( CV_StsBadArg, "mask is empty" );
@@ -342,7 +342,7 @@ void checkMask( const Mat& img, const Mat& mask )
 /*
   Initialize mask using rectangular.
 */
-void initMaskWithRect( Mat& mask, Size imgSize, Rect rect )
+static void initMaskWithRect( Mat& mask, Size imgSize, Rect rect )
 {
     mask.create( imgSize, CV_8UC1 );
     mask.setTo( GC_BGD );
@@ -358,7 +358,7 @@ void initMaskWithRect( Mat& mask, Size imgSize, Rect rect )
 /*
   Initialize GMM background and foreground models using kmeans algorithm.
 */
-void initGMMs( const Mat& img, const Mat& mask, GMM& bgdGMM, GMM& fgdGMM )
+static void initGMMs( const Mat& img, const Mat& mask, GMM& bgdGMM, GMM& fgdGMM )
 {
     const int kMeansItCount = 10;
     const int kMeansType = KMEANS_PP_CENTERS;
@@ -398,7 +398,7 @@ void initGMMs( const Mat& img, const Mat& mask, GMM& bgdGMM, GMM& fgdGMM )
 /*
   Assign GMMs components for each pixel.
 */
-void assignGMMsComponents( const Mat& img, const Mat& mask, const GMM& bgdGMM, const GMM& fgdGMM, Mat& compIdxs )
+static void assignGMMsComponents( const Mat& img, const Mat& mask, const GMM& bgdGMM, const GMM& fgdGMM, Mat& compIdxs )
 {
     Point p;
     for( p.y = 0; p.y < img.rows; p.y++ )
@@ -415,7 +415,7 @@ void assignGMMsComponents( const Mat& img, const Mat& mask, const GMM& bgdGMM, c
 /*
   Learn GMMs parameters.
 */
-void learnGMMs( const Mat& img, const Mat& mask, const Mat& compIdxs, GMM& bgdGMM, GMM& fgdGMM )
+static void learnGMMs( const Mat& img, const Mat& mask, const Mat& compIdxs, GMM& bgdGMM, GMM& fgdGMM )
 {
     bgdGMM.initLearning();
     fgdGMM.initLearning();
@@ -443,7 +443,7 @@ void learnGMMs( const Mat& img, const Mat& mask, const Mat& compIdxs, GMM& bgdGM
 /*
   Construct GCGraph
 */
-void constructGCGraph( const Mat& img, const Mat& mask, const GMM& bgdGMM, const GMM& fgdGMM, double lambda,
+static void constructGCGraph( const Mat& img, const Mat& mask, const GMM& bgdGMM, const GMM& fgdGMM, double lambda,
                        const Mat& leftW, const Mat& upleftW, const Mat& upW, const Mat& uprightW,
                        GCGraph<double>& graph )
 {
@@ -506,7 +506,7 @@ void constructGCGraph( const Mat& img, const Mat& mask, const GMM& bgdGMM, const
 /*
   Estimate segmentation using MaxFlow algorithm
 */
-void estimateSegmentation( GCGraph<double>& graph, Mat& mask )
+static void estimateSegmentation( GCGraph<double>& graph, Mat& mask )
 {
     graph.maxFlow();
     Point p;
@@ -533,7 +533,7 @@ void cv::grabCut( InputArray _img, InputOutputArray _mask, Rect rect,
     Mat& mask = _mask.getMatRef();
     Mat& bgdModel = _bgdModel.getMatRef();
     Mat& fgdModel = _fgdModel.getMatRef();
-    
+
     if( img.empty() )
         CV_Error( CV_StsBadArg, "image is empty" );
     if( img.type() != CV_8UC3 )
index a2ada39..462f712 100644 (file)
@@ -114,7 +114,7 @@ icvHoughLinesStandard( const CvMat* img, float rho, float theta,
     _tabCos.allocate(numangle);
     int *accum = _accum, *sort_buf = _sort_buf;
     float *tabSin = _tabSin, *tabCos = _tabCos;
-    
+
     memset( accum, 0, sizeof(accum[0]) * (numangle+2) * (numrho+2) );
 
     for( ang = 0, n = 0; n < numangle; ang += theta, n++ )
@@ -249,7 +249,7 @@ icvHoughLinesSDiv( const CvMat* img,
     /* Precalculating sin */
     _sinTable.resize( 5 * tn * stn );
     sinTable = &_sinTable[0];
-    
+
     for( index = 0; index < 5 * tn * stn; index++ )
         sinTable[index] = (float)cos( stheta * index * 0.2f );
 
@@ -449,7 +449,7 @@ icvHoughLinesSDiv( const CvMat* img,
             h_get_next__index( &pos );
         }
     }
-    
+
     h_destroy_list__index(list);
 }
 
@@ -756,7 +756,7 @@ cvHoughLines2( CvArr* src_image, void* lineStorage, int method,
     }
     else
         CV_Error( CV_StsBadArg, "Destination is not CvMemStorage* nor CvMat*" );
-    
+
     iparam1 = cvRound(param1);
     iparam2 = cvRound(param2);
 
@@ -842,7 +842,7 @@ icvHoughCirclesGradient( CvMat* img, float dp, float min_dist,
     acols = accum->cols - 2;
     adata = accum->data.i;
     astep = accum->step/sizeof(adata[0]);
-       // Accumulate circle evidence for each edge pixel
+    // Accumulate circle evidence for each edge pixel
     for( y = 0; y < rows; y++ )
     {
         const uchar* edges_row = edges->data.ptr + y*edges->step;
@@ -868,7 +868,7 @@ icvHoughCirclesGradient( CvMat* img, float dp, float min_dist,
 
             x0 = cvRound((x*idp)*ONE);
             y0 = cvRound((y*idp)*ONE);
-                       // Step from min_radius to max_radius in both directions of the gradient
+            // Step from min_radius to max_radius in both directions of the gradient
             for( k = 0; k < 2; k++ )
             {
                 x1 = x0 + min_radius * sx;
@@ -894,7 +894,7 @@ icvHoughCirclesGradient( CvMat* img, float dp, float min_dist,
     nz_count = nz->total;
     if( !nz_count )
         return;
-       //Find possible circle centers
+    //Find possible circle centers
     for( y = 1; y < arows - 1; y++ )
     {
         for( x = 1; x < acols - 1; x++ )
@@ -924,19 +924,19 @@ icvHoughCirclesGradient( CvMat* img, float dp, float min_dist,
     dr = dp;
     min_dist = MAX( min_dist, dp );
     min_dist *= min_dist;
-       // For each found possible center
-       // Estimate radius and check support
+    // For each found possible center
+    // Estimate radius and check support
     for( i = 0; i < centers->total; i++ )
     {
         int ofs = *(int*)cvGetSeqElem( centers, i );
         y = ofs/(acols+2);
         x = ofs - (y)*(acols+2);
-               //Calculate circle's center in pixels
+        //Calculate circle's center in pixels
         float cx = (float)((x + 0.5f)*dp), cy = (float)(( y + 0.5f )*dp);
         float start_dist, dist_sum;
         float r_best = 0, c[3];
         int max_count = 0;
-               // Check distance with previously detected circles
+        // Check distance with previously detected circles
         for( j = 0; j < circles->total; j++ )
         {
             float* c = (float*)cvGetSeqElem( circles, j );
@@ -946,7 +946,7 @@ icvHoughCirclesGradient( CvMat* img, float dp, float min_dist,
 
         if( j < circles->total )
             continue;
-               // Estimate best radius
+        // Estimate best radius
         cvStartReadSeq( nz, &reader );
         for( j = k = 0; j < nz_count; j++ )
         {
@@ -982,7 +982,7 @@ icvHoughCirclesGradient( CvMat* img, float dp, float min_dist,
             {
                 float r_cur = ddata[sort_buf[(j + start_idx)/2]];
                 if( (start_idx - j)*r_best >= max_count*r_cur ||
-                    (r_best < FLT_EPSILON && start_idx - j >= max_count) ) 
+                    (r_best < FLT_EPSILON && start_idx - j >= max_count) )
                 {
                     r_best = r_cur;
                     max_count = start_idx - j;
@@ -993,7 +993,7 @@ icvHoughCirclesGradient( CvMat* img, float dp, float min_dist,
             }
             dist_sum += d;
         }
-               // Check if the circle has enough support
+        // Check if the circle has enough support
         if( max_count > acc_threshold )
         {
             c[0] = cx;
@@ -1103,9 +1103,9 @@ static void seqToMat(const CvSeq* seq, OutputArray _arr)
     else
         _arr.release();
 }
-    
+
 }
-    
+
 void cv::HoughLines( InputArray _image, OutputArray _lines,
                      double rho, double theta, int threshold,
                      double srn, double stn )
index f78af82..71582cb 100644 (file)
@@ -406,42 +406,42 @@ static void fftShift(InputOutputArray _out)
     merge(planes, out);
 }
 
-Point2d weightedCentroid(InputArray _src, cv::Point peakLocation, cv::Size weightBoxSize)
+static Point2d weightedCentroid(InputArray _src, cv::Point peakLocation, cv::Size weightBoxSize)
 {
     Mat src = _src.getMat();
-    
+
     int type = src.type();
     CV_Assert( type == CV_32FC1 || type == CV_64FC1 );
-    
+
     int minr = peakLocation.y - (weightBoxSize.height >> 1);
     int maxr = peakLocation.y + (weightBoxSize.height >> 1);
     int minc = peakLocation.x - (weightBoxSize.width  >> 1);
     int maxc = peakLocation.x + (weightBoxSize.width  >> 1);
-    
+
     Point2d centroid;
     double sumIntensity = 0.0;
-    
+
     // clamp the values to min and max if needed.
     if(minr < 0)
     {
         minr = 0;
     }
-    
+
     if(minc < 0)
     {
         minc = 0;
     }
-    
+
     if(maxr > src.rows - 1)
     {
         maxr = src.rows - 1;
     }
-    
+
     if(maxc > src.cols - 1)
     {
         maxc = src.cols - 1;
     }
-    
+
     if(type == CV_32FC1)
     {
         const float* dataIn = (const float*)src.data;
@@ -454,7 +454,7 @@ Point2d weightedCentroid(InputArray _src, cv::Point peakLocation, cv::Size weigh
                 centroid.y   += (double)y*dataIn[x];
                 sumIntensity += (double)dataIn[x];
             }
-            
+
             dataIn += src.cols;
         }
     }
@@ -470,19 +470,19 @@ Point2d weightedCentroid(InputArray _src, cv::Point peakLocation, cv::Size weigh
                 centroid.y   += (double)y*dataIn[x];
                 sumIntensity += dataIn[x];
             }
-            
+
             dataIn += src.cols;
         }
     }
-    
+
     sumIntensity += DBL_EPSILON; // prevent div0 problems...
-    
+
     centroid.x /= sumIntensity;
     centroid.y /= sumIntensity;
-    
+
     return centroid;
 }
-    
+
 }
 
 cv::Point2d cv::phaseCorrelate(InputArray _src1, InputArray _src2, InputArray _window)
index eda4c85..cfc171a 100644 (file)
@@ -73,13 +73,13 @@ template<typename T, typename ST> struct RowSum : public BaseRowFilter
         ksize = _ksize;
         anchor = _anchor;
     }
-    
+
     void operator()(const uchar* src, uchar* dst, int width, int cn)
     {
         const T* S = (const T*)src;
         ST* D = (ST*)dst;
         int i = 0, k, ksz_cn = ksize*cn;
-        
+
         width = (width - 1)*cn;
         for( k = 0; k < cn; k++, S++, D++ )
         {
@@ -108,7 +108,7 @@ template<typename ST, typename T> struct ColumnSum : public BaseColumnFilter
     }
 
     void reset() { sumCount = 0; }
-    
+
     void operator()(const uchar** src, uchar* dst, int dststep, int count, int width)
     {
         int i;
@@ -198,7 +198,7 @@ template<typename ST, typename T> struct ColumnSum : public BaseColumnFilter
 
 
 }
-    
+
 cv::Ptr<cv::BaseRowFilter> cv::getRowSumFilter(int srcType, int sumType, int ksize, int anchor)
 {
     int sdepth = CV_MAT_DEPTH(srcType), ddepth = CV_MAT_DEPTH(sumType);
@@ -325,7 +325,7 @@ void cv::blur( InputArray src, OutputArray dst,
            Size ksize, Point anchor, int borderType )
 {
     boxFilter( src, dst, -1, ksize, anchor, true, borderType );
-}    
+}
 
 /****************************************************************************************\
                                      Gaussian Blur
@@ -422,7 +422,7 @@ void cv::GaussianBlur( InputArray _src, OutputArray _dst, Size ksize,
     Mat src = _src.getMat();
     _dst.create( src.size(), src.type() );
     Mat dst = _dst.getMat();
-    
+
     if( borderType != BORDER_CONSTANT )
     {
         if( src.rows == 1 )
@@ -454,7 +454,7 @@ void cv::GaussianBlur( InputArray _src, OutputArray _dst, Size ksize,
 namespace cv
 {
 
-#if _MSC_VER >= 1200
+#if defined _MSC_VER && _MSC_VER >= 1200
 #pragma warning( disable: 4244 )
 #endif
 
@@ -479,7 +479,7 @@ typedef struct
 
 #if CV_SSE2
 #define MEDIAN_HAVE_SIMD 1
-    
+
 static inline void histogram_add_simd( const HT x[16], HT y[16] )
 {
     const __m128i* rx = (const __m128i*)x;
@@ -499,12 +499,12 @@ static inline void histogram_sub_simd( const HT x[16], HT y[16] )
     _mm_store_si128(ry+0, r0);
     _mm_store_si128(ry+1, r1);
 }
-    
+
 #else
 #define MEDIAN_HAVE_SIMD 0
 #endif
 
-    
+
 static inline void histogram_add( const HT x[16], HT y[16] )
 {
     int i;
@@ -667,14 +667,14 @@ medianBlur_8u_O1( const Mat& _src, Mat& _dst, int ksize )
                 {
                     for( j = 0; j < 2*r; ++j )
                         histogram_add( &h_coarse[16*(n*c+j)], H[c].coarse );
-                    
+
                     for( j = r; j < n-r; j++ )
                     {
                         int t = 2*r*r + 2*r, b, sum = 0;
                         HT* segment;
-                        
+
                         histogram_add( &h_coarse[16*(n*c + std::min(j+r,n-1))], H[c].coarse );
-                        
+
                         // Find median at coarse level
                         for ( k = 0; k < 16 ; ++k )
                         {
@@ -686,14 +686,14 @@ medianBlur_8u_O1( const Mat& _src, Mat& _dst, int ksize )
                             }
                         }
                         assert( k < 16 );
-                        
+
                         /* Update corresponding histogram segment */
                         if ( luc[c][k] <= j-r )
                         {
                             memset( &H[c].fine[k], 0, 16 * sizeof(HT) );
                             for ( luc[c][k] = j-r; luc[c][k] < MIN(j+r+1,n); ++luc[c][k] )
                                 histogram_add( &h_fine[16*(n*(16*c+k)+luc[c][k])], H[c].fine[k] );
-                            
+
                             if ( luc[c][k] < j+r+1 )
                             {
                                 histogram_muladd( j+r+1 - n, &h_fine[16*(n*(16*c+k)+(n-1))], &H[c].fine[k][0] );
@@ -708,9 +708,9 @@ medianBlur_8u_O1( const Mat& _src, Mat& _dst, int ksize )
                                 histogram_add( &h_fine[16*(n*(16*c+k)+MIN(luc[c][k],n-1))], H[c].fine[k] );
                             }
                         }
-                        
+
                         histogram_sub( &h_coarse[16*(n*c+MAX(j-r,0))], H[c].coarse );
-                        
+
                         /* Find median in segment */
                         segment = H[c].fine[k];
                         for ( b = 0; b < 16 ; b++ )
@@ -734,7 +734,7 @@ medianBlur_8u_O1( const Mat& _src, Mat& _dst, int ksize )
 }
 
 
-#if _MSC_VER >= 1200
+#if defined _MSC_VER && _MSC_VER >= 1200
 #pragma warning( default: 4244 )
 #endif
 
@@ -910,7 +910,7 @@ struct MinMax16u
         b = std::max(b, t);
     }
 };
-    
+
 struct MinMax16s
 {
     typedef short value_type;
@@ -974,7 +974,7 @@ struct MinMaxVec16u
     }
 };
 
-    
+
 struct MinMaxVec16s
 {
     typedef short value_type;
@@ -988,9 +988,9 @@ struct MinMaxVec16s
         a = _mm_min_epi16(a, b);
         b = _mm_max_epi16(b, t);
     }
-};    
+};
+
 
-    
 struct MinMaxVec32f
 {
     typedef float value_type;
@@ -1033,7 +1033,7 @@ medianBlur_SortNet( const Mat& _src, Mat& _dst, int m )
     Op op;
     VecOp vop;
     volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE2);
-    
+
     if( m == 3 )
     {
         if( size.width == 1 || size.height == 1 )
@@ -1055,7 +1055,7 @@ medianBlur_SortNet( const Mat& _src, Mat& _dst, int m )
                 }
             return;
         }
-        
+
         size.width *= cn;
         for( i = 0; i < size.height; i++, dst += dstep )
         {
@@ -1155,7 +1155,7 @@ medianBlur_SortNet( const Mat& _src, Mat& _dst, int m )
                         p[k*5+2] = rowk[j]; p[k*5+3] = rowk[j3];
                         p[k*5+4] = rowk[j4];
                     }
-                    
+
                     op(p[1], p[2]); op(p[0], p[1]); op(p[1], p[2]); op(p[4], p[5]); op(p[3], p[4]);
                     op(p[4], p[5]); op(p[0], p[3]); op(p[2], p[5]); op(p[2], p[3]); op(p[1], p[4]);
                     op(p[1], p[2]); op(p[3], p[4]); op(p[7], p[8]); op(p[6], p[7]); op(p[7], p[8]);
@@ -1195,7 +1195,7 @@ medianBlur_SortNet( const Mat& _src, Mat& _dst, int m )
                         p[k*5+2] = vop.load(rowk+j); p[k*5+3] = vop.load(rowk+j+cn);
                         p[k*5+4] = vop.load(rowk+j+cn*2);
                     }
-                    
+
                     vop(p[1], p[2]); vop(p[0], p[1]); vop(p[1], p[2]); vop(p[4], p[5]); vop(p[3], p[4]);
                     vop(p[4], p[5]); vop(p[0], p[3]); vop(p[2], p[5]); vop(p[2], p[3]); vop(p[1], p[4]);
                     vop(p[1], p[2]); vop(p[3], p[4]); vop(p[7], p[8]); vop(p[6], p[7]); vop(p[7], p[8]);
@@ -1229,13 +1229,13 @@ medianBlur_SortNet( const Mat& _src, Mat& _dst, int m )
 }
 
 }
-    
+
 void cv::medianBlur( InputArray _src0, OutputArray _dst, int ksize )
 {
     Mat src0 = _src0.getMat();
     _dst.create( src0.size(), src0.type() );
     Mat dst = _dst.getMat();
-    
+
     if( ksize <= 1 )
     {
         src0.copyTo(dst);
@@ -1248,13 +1248,13 @@ void cv::medianBlur( InputArray _src0, OutputArray _dst, int ksize )
     if (tegra::medianBlur(src0, dst, ksize))
         return;
 #endif
-    
+
     bool useSortNet = ksize == 3 || (ksize == 5
 #if !CV_SSE2
             && src0.depth() > CV_8U
 #endif
         );
-    
+
     Mat src;
     if( useSortNet )
     {
@@ -1315,7 +1315,7 @@ bilateralFilter_8u( const Mat& src, Mat& dst, int d,
         sigma_color = 1;
     if( sigma_space <= 0 )
         sigma_space = 1;
-    
+
     double gauss_color_coeff = -0.5/(sigma_color*sigma_color);
     double gauss_space_coeff = -0.5/(sigma_space*sigma_space);
 
@@ -1422,7 +1422,7 @@ bilateralFilter_32f( const Mat& src, Mat& dst, int d,
         sigma_color = 1;
     if( sigma_space <= 0 )
         sigma_space = 1;
-    
+
     double gauss_color_coeff = -0.5/(sigma_color*sigma_color);
     double gauss_space_coeff = -0.5/(sigma_space*sigma_space);
 
@@ -1433,9 +1433,9 @@ bilateralFilter_32f( const Mat& src, Mat& dst, int d,
     radius = MAX(radius, 1);
     d = radius*2 + 1;
     // compute the min/max range for the input image (even if multichannel)
-    
+
     minMaxLoc( src.reshape(1), &minValSrc, &maxValSrc );
-    
+
     // temporary copy of the image with borders for easy processing
     Mat temp;
     copyMakeBorder( src, temp, radius, radius, radius, radius, borderType );
@@ -1454,7 +1454,7 @@ bilateralFilter_32f( const Mat& src, Mat& dst, int d,
     float* expLUT = &_expLUT[0];
 
     scale_index = kExpNumBins/len;
-    
+
     // initialize the exp LUT
     for( i = 0; i < kExpNumBins+2; i++ )
     {
@@ -1467,7 +1467,7 @@ bilateralFilter_32f( const Mat& src, Mat& dst, int d,
         else
             expLUT[i] = 0.f;
     }
-    
+
     // initialize space-related bilateral filter coefficients
     for( i = -radius, maxk = 0; i <= radius; i++ )
         for( j = -radius; j <= radius; j++ )
@@ -1481,7 +1481,7 @@ bilateralFilter_32f( const Mat& src, Mat& dst, int d,
 
     for( i = 0; i < size.height; i++ )
     {
-           const float* sptr = (const float*)(temp.data + (i+radius)*temp.step) + radius*cn;
+        const float* sptr = (const float*)(temp.data + (i+radius)*temp.step) + radius*cn;
         float* dptr = (float*)(dst.data + i*dst.step);
 
         if( cn == 1 )
@@ -1493,11 +1493,11 @@ bilateralFilter_32f( const Mat& src, Mat& dst, int d,
                 for( k = 0; k < maxk; k++ )
                 {
                     float val = sptr[j + space_ofs[k]];
-                                       float alpha = (float)(std::abs(val - val0)*scale_index);
+                    float alpha = (float)(std::abs(val - val0)*scale_index);
                     int idx = cvFloor(alpha);
                     alpha -= idx;
                     float w = space_weight[k]*(expLUT[idx] + alpha*(expLUT[idx+1] - expLUT[idx]));
-                       sum += val*w;
+                    sum += val*w;
                     wsum += w;
                 }
                 dptr[j] = (float)(sum/wsum);
@@ -1514,7 +1514,7 @@ bilateralFilter_32f( const Mat& src, Mat& dst, int d,
                 {
                     const float* sptr_k = sptr + j + space_ofs[k];
                     float b = sptr_k[0], g = sptr_k[1], r = sptr_k[2];
-                                       float alpha = (float)((std::abs(b - b0) +
+                    float alpha = (float)((std::abs(b - b0) +
                         std::abs(g - g0) + std::abs(r - r0))*scale_index);
                     int idx = cvFloor(alpha);
                     alpha -= idx;
@@ -1541,7 +1541,7 @@ void cv::bilateralFilter( InputArray _src, OutputArray _dst, int d,
     Mat src = _src.getMat();
     _dst.create( src.size(), src.type() );
     Mat dst = _dst.getMat();
-    
+
     if( src.depth() == CV_8U )
         bilateralFilter_8u( src, dst, d, sigmaColor, sigmaSpace, borderType );
     else if( src.depth() == CV_32F )
index 3a57a78..b441970 100644 (file)
@@ -134,7 +134,7 @@ void integral_( const T* src, size_t _srcstep, ST* sum, size_t _sumstep,
 
             if( size.width == cn )
                 buf[cn] = 0;
-            
+
             if( sqsum )
             {
                 sqsum[-cn] = 0;
@@ -148,7 +148,7 @@ void integral_( const T* src, size_t _srcstep, ST* sum, size_t _sumstep,
             sum += sumstep - cn;
             tilted += tiltedstep - cn;
             buf += -cn;
-            
+
             if( sqsum )
                 sqsum += sqsumstep - cn;
 
@@ -197,7 +197,7 @@ void integral_( const T* src, size_t _srcstep, ST* sum, size_t _sumstep,
                     tilted[x] = t0 + t1 + tilted[x - tiltedstep - cn];
                     buf[x] = t0;
                 }
-                
+
                 if( sqsum )
                     sqsum++;
             }
@@ -205,10 +205,10 @@ void integral_( const T* src, size_t _srcstep, ST* sum, size_t _sumstep,
     }
 }
 
-    
+
 #define DEF_INTEGRAL_FUNC(suffix, T, ST, QT) \
-void integral_##suffix( T* src, size_t srcstep, ST* sum, size_t sumstep, QT* sqsum, size_t sqsumstep, \
-                        ST* tilted, size_t tiltedstep, Size size, int cn ) \
+static void integral_##suffix( T* src, size_t srcstep, ST* sum, size_t sumstep, QT* sqsum, size_t sqsumstep, \
+                              ST* tilted, size_t tiltedstep, Size size, int cn ) \
 { integral_(src, srcstep, sum, sumstep, sqsum, sqsumstep, tilted, tiltedstep, size, cn); }
 
 DEF_INTEGRAL_FUNC(8u32s, uchar, int, double)
@@ -217,7 +217,7 @@ DEF_INTEGRAL_FUNC(8u64f, uchar, double, double)
 DEF_INTEGRAL_FUNC(32f, float, float, double)
 DEF_INTEGRAL_FUNC(32f64f, float, double, double)
 DEF_INTEGRAL_FUNC(64f, double, double, double)
-    
+
 typedef void (*IntegralFunc)(const uchar* src, size_t srcstep, uchar* sum, size_t sumstep,
                              uchar* sqsum, size_t sqsumstep, uchar* tilted, size_t tstep,
                              Size size, int cn );
@@ -236,19 +236,19 @@ void cv::integral( InputArray _src, OutputArray _sum, OutputArray _sqsum, Output
     sdepth = CV_MAT_DEPTH(sdepth);
     _sum.create( isize, CV_MAKETYPE(sdepth, cn) );
     sum = _sum.getMat();
-    
+
     if( _tilted.needed() )
     {
         _tilted.create( isize, CV_MAKETYPE(sdepth, cn) );
         tilted = _tilted.getMat();
     }
-    
+
     if( _sqsum.needed() )
     {
         _sqsum.create( isize, CV_MAKETYPE(CV_64F, cn) );
         sqsum = _sqsum.getMat();
     }
-    
+
     IntegralFunc func = 0;
 
     if( depth == CV_8U && sdepth == CV_32S )
@@ -269,7 +269,7 @@ void cv::integral( InputArray _src, OutputArray _sum, OutputArray _sqsum, Output
     func( src.data, src.step, sum.data, sum.step, sqsum.data, sqsum.step,
           tilted.data, tilted.step, src.size(), cn );
 }
-    
+
 void cv::integral( InputArray src, OutputArray sum, int sdepth )
 {
     integral( src, sum, noArray(), noArray(), sdepth );
index 3afce31..7a2695a 100644 (file)
@@ -49,7 +49,7 @@
 #include "opencv2/core/core_c.h"
 #include <stdio.h>
 
-#if _MSC_VER >= 1200 || defined __BORLANDC__
+#if (defined _MSC_VER && _MSC_VER >= 1200) || defined __BORLANDC__
 #define cv_stricmp stricmp
 #define cv_strnicmp strnicmp
 #if defined WINCE
index 6d2ca32..3911ebd 100644 (file)
@@ -3478,7 +3478,7 @@ typedef struct CvBGCodeBookModel
     CvBGCodeBookElem* freeList;
 } CvBGCodeBookModel;
 
-CVAPI(CvBGCodeBookModel*) cvCreateBGCodeBookModel();
+CVAPI(CvBGCodeBookModel*) cvCreateBGCodeBookModel( void );
 CVAPI(void) cvReleaseBGCodeBookModel( CvBGCodeBookModel** model );
 
 CVAPI(void) cvBGCodeBookUpdate( CvBGCodeBookModel* model, const CvArr* image,
index e7dd622..ae4b9c1 100644 (file)
@@ -41,7 +41,7 @@
 
 #include "precomp.hpp"
 
-#if _MSC_VER >= 1200
+#if defined _MSC_VER && _MSC_VER >= 1200
 #pragma warning(disable:4786) // Disable MSVC warnings in the standard library.
 #pragma warning(disable:4100)
 #pragma warning(disable:4512)
@@ -49,7 +49,7 @@
 #include <stdio.h>
 #include <map>
 #include <algorithm>
-#if _MSC_VER >= 1200
+#if defined _MSC_VER && _MSC_VER >= 1200
 #pragma warning(default:4100)
 #pragma warning(default:4512)
 #endif
@@ -148,7 +148,7 @@ CV_IMPL CvBool cv3dTrackerCalibrateCameras(int num_cameras,
                     cvReleaseImage(&gray_img);
                 CV_CALL(gray_img = cvCreateImage(image_size, IPL_DEPTH_8U, 1));
             }
-            
+
             CV_CALL(cvCvtColor(samples[c], gray_img, CV_BGR2GRAY));
 
             img = gray_img;
@@ -172,7 +172,7 @@ CV_IMPL CvBool cv3dTrackerCalibrateCameras(int num_cameras,
                                                    etalon_size, points, &count) != 0;
         if (count == 0)
             continue;
-        
+
         // If found is true, it means all the points were found (count = num_points).
         // If found is false but count is non-zero, it means that not all points were found.
 
@@ -258,7 +258,7 @@ CV_IMPL CvBool cv3dTrackerCalibrateCameras(int num_cameras,
                              { 0.f, 1.f, 0.f, 0.f },
                              { 0.f, 0.f, 1.f, 0.f },
                              { transVect[0], transVect[1], transVect[2], 1.f } };
-        
+
         float rmat[4][4] = { { rotMatr[0], rotMatr[1], rotMatr[2], 0.f },
                              { rotMatr[3], rotMatr[4], rotMatr[5], 0.f },
                              { rotMatr[6], rotMatr[7], rotMatr[8], 0.f },
@@ -267,7 +267,7 @@ CV_IMPL CvBool cv3dTrackerCalibrateCameras(int num_cameras,
 
         MultMatrix(camera_info[c].mat, tmat, rmat);
 
-        // change the transformation of the cameras to put them in the world coordinate 
+        // change the transformation of the cameras to put them in the world coordinate
         // system we want to work with.
 
         // Start with an identity matrix; then fill in the values to accomplish
index b46c995..635b862 100644 (file)
 #include "assert.h"
 #include "math.h"
 
-#if _MSC_VER >= 1400
+#if defined _MSC_VER && _MSC_VER >= 1400
 #pragma warning(disable: 4512) // suppress "assignment operator could not be generated"
 #endif
 
-// J.S. Beis and D.G. Lowe. Shape indexing using approximate nearest-neighbor search 
-// in highdimensional spaces. In Proc. IEEE Conf. Comp. Vision Patt. Recog., 
-// pages 1000--1006, 1997. http://citeseer.ist.psu.edu/beis97shape.html 
+// J.S. Beis and D.G. Lowe. Shape indexing using approximate nearest-neighbor search
+// in highdimensional spaces. In Proc. IEEE Conf. Comp. Vision Patt. Recog.,
+// pages 1000--1006, 1997. http://citeseer.ist.psu.edu/beis97shape.html
 #undef __deref
 #undef __valuetype
 
@@ -72,23 +72,23 @@ public:
 
 private:
   struct node {
-    int dim;                   // split dimension; >=0 for nodes, -1 for leaves
-    __valuetype value;         // if leaf, value of leaf
-    int left, right;           // node indices of left and right branches
-    scalar_type boundary;      // left if deref(value,dim)<=boundary, otherwise right
+    int dim;      // split dimension; >=0 for nodes, -1 for leaves
+    __valuetype value;    // if leaf, value of leaf
+    int left, right;    // node indices of left and right branches
+    scalar_type boundary; // left if deref(value,dim)<=boundary, otherwise right
   };
   typedef std::vector < node > node_array;
 
-  __deref deref;               // requires operator() (__valuetype lhs,int dim)
+  __deref deref;    // requires operator() (__valuetype lhs,int dim)
 
-  node_array nodes;            // node storage
-  int point_dim;               // dimension of points (the k in kd-tree)
-  int root_node;               // index of root node, -1 if empty tree
+  node_array nodes;   // node storage
+  int point_dim;    // dimension of points (the k in kd-tree)
+  int root_node;    // index of root node, -1 if empty tree
 
   // for given set of point indices, compute dimension of highest variance
   template < class __instype, class __valuector >
   int dimension_of_highest_variance(__instype * first, __instype * last,
-                                   __valuector ctor) {
+            __valuector ctor) {
     assert(last - first > 0);
 
     accum_type maxvar = -std::numeric_limits < accum_type >::max();
@@ -96,32 +96,32 @@ private:
     for (int j = 0; j < point_dim; ++j) {
       accum_type mean = 0;
       for (__instype * k = first; k < last; ++k)
-       mean += deref(ctor(*k), j);
+  mean += deref(ctor(*k), j);
       mean /= last - first;
       accum_type var = 0;
       for (__instype * k = first; k < last; ++k) {
-       accum_type diff = accum_type(deref(ctor(*k), j)) - mean;
-       var += diff * diff;
+  accum_type diff = accum_type(deref(ctor(*k), j)) - mean;
+  var += diff * diff;
       }
       var /= last - first;
 
       assert(maxj != -1 || var >= maxvar);
 
       if (var >= maxvar) {
-       maxvar = var;
-       maxj = j;
+  maxvar = var;
+  maxj = j;
       }
     }
 
     return maxj;
   }
 
-  // given point indices and dimension, find index of median; (almost) modifies [first,last) 
+  // given point indices and dimension, find index of median; (almost) modifies [first,last)
   // such that points_in[first,median]<=point[median], points_in(median,last)>point[median].
   // implemented as partial quicksort; expected linear perf.
   template < class __instype, class __valuector >
   __instype * median_partition(__instype * first, __instype * last,
-                              int dim, __valuector ctor) {
+             int dim, __valuector ctor) {
     assert(last - first > 0);
     __instype *k = first + (last - first) / 2;
     median_partition(first, last, k, dim, ctor);
@@ -143,14 +143,14 @@ private:
   };
 
   template < class __instype, class __valuector >
-  void median_partition(__instype * first, __instype * last, 
-                       __instype * k, int dim, __valuector ctor) {
+  void median_partition(__instype * first, __instype * last,
+      __instype * k, int dim, __valuector ctor) {
     int pivot = (int)((last - first) / 2);
 
     std::swap(first[pivot], last[-1]);
     __instype *middle = std::partition(first, last - 1,
-                                      median_pr < __instype, __valuector > 
-                                      (last[-1], dim, deref, ctor));
+               median_pr < __instype, __valuector >
+               (last[-1], dim, deref, ctor));
     std::swap(*middle, last[-1]);
 
     if (middle < k)
@@ -170,36 +170,36 @@ private:
       __instype *median = median_partition(first, last, dim, ctor);
 
       __instype *split = median;
-      for (; split != last && deref(ctor(*split), dim) == 
-            deref(ctor(*median), dim); ++split);
+      for (; split != last && deref(ctor(*split), dim) ==
+       deref(ctor(*median), dim); ++split);
 
       if (split == last) { // leaf
-       int nexti = -1;
-       for (--split; split >= first; --split) {
-         int i = (int)nodes.size();
-         node & n = *nodes.insert(nodes.end(), node());
-         n.dim = -1;
-         n.value = ctor(*split);
-         n.left = -1;
-         n.right = nexti;
-         nexti = i;
-       }
-
-       return nexti;
+  int nexti = -1;
+  for (--split; split >= first; --split) {
+    int i = (int)nodes.size();
+    node & n = *nodes.insert(nodes.end(), node());
+    n.dim = -1;
+    n.value = ctor(*split);
+    n.left = -1;
+    n.right = nexti;
+    nexti = i;
+  }
+
+  return nexti;
       } else { // node
-       int i = (int)nodes.size();
-       // note that recursive insert may invalidate this ref
-       node & n = *nodes.insert(nodes.end(), node());
+  int i = (int)nodes.size();
+  // note that recursive insert may invalidate this ref
+  node & n = *nodes.insert(nodes.end(), node());
 
-       n.dim = dim;
-       n.boundary = deref(ctor(*median), dim);
+  n.dim = dim;
+  n.boundary = deref(ctor(*median), dim);
 
-       int left = insert(first, split, ctor);
-       nodes[i].left = left;
-       int right = insert(split, last, ctor);
-       nodes[i].right = right;
+  int left = insert(first, split, ctor);
+  nodes[i].left = left;
+  int right = insert(split, last, ctor);
+  nodes[i].right = right;
 
-       return i;
+  return i;
       }
     }
   }
@@ -214,21 +214,21 @@ private:
 
     if (n.dim >= 0) { // node
       if (deref(p, n.dim) <= n.boundary) // left
-       r = remove(&n.left, p);
+  r = remove(&n.left, p);
       else // right
-       r = remove(&n.right, p);
+  r = remove(&n.right, p);
 
       // if terminal, remove this node
       if (n.left == -1 && n.right == -1)
-       *i = -1;
+  *i = -1;
 
       return r;
     } else { // leaf
       if (n.value == p) {
-       *i = n.right;
-       return true;
+  *i = n.right;
+  return true;
       } else
-       return remove(&n.right, p);
+  return remove(&n.right, p);
     }
   }
 
@@ -245,14 +245,14 @@ public:
   }
   // given points, initialize a balanced tree
   CvKDTree(__valuetype * first, __valuetype * last, int _point_dim,
-          __deref _deref = __deref())
+     __deref _deref = __deref())
     : deref(_deref) {
     set_data(first, last, _point_dim, identity_ctor());
   }
   // given points, initialize a balanced tree
   template < class __instype, class __valuector >
   CvKDTree(__instype * first, __instype * last, int _point_dim,
-          __valuector ctor, __deref _deref = __deref())
+     __valuector ctor, __deref _deref = __deref())
     : deref(_deref) {
     set_data(first, last, _point_dim, ctor);
   }
@@ -266,7 +266,7 @@ public:
   }
   template < class __instype, class __valuector >
   void set_data(__instype * first, __instype * last, int _point_dim,
-               __valuector ctor) {
+    __valuector ctor) {
     point_dim = _point_dim;
     nodes.clear();
     nodes.reserve(last - first);
@@ -292,9 +292,9 @@ public:
       std::cout << " ";
     const node & n = nodes[i];
     if (n.dim >= 0) {
-      std::cout << "node " << i << ", left " << nodes[i].left << ", right " << 
-       nodes[i].right << ", dim " << nodes[i].dim << ", boundary " << 
-       nodes[i].boundary << std::endl;
+      std::cout << "node " << i << ", left " << nodes[i].left << ", right " <<
+  nodes[i].right << ", dim " << nodes[i].dim << ", boundary " <<
+  nodes[i].boundary << std::endl;
       print(n.left, indent + 3);
       print(n.right, indent + 3);
     } else
@@ -304,9 +304,9 @@ public:
   ////////////////////////////////////////////////////////////////////////////////////////
   // bbf search
 public:
-  struct bbf_nn {              // info on found neighbors (approx k nearest)
-    const __valuetype *p;      // nearest neighbor
-    accum_type dist;           // distance from d to query point
+  struct bbf_nn {   // info on found neighbors (approx k nearest)
+    const __valuetype *p; // nearest neighbor
+    accum_type dist;    // distance from d to query point
     bbf_nn(const __valuetype & _p, accum_type _dist)
       : p(&_p), dist(_dist) {
     }
@@ -316,9 +316,9 @@ public:
   };
   typedef std::vector < bbf_nn > bbf_nn_pqueue;
 private:
-  struct bbf_node {            // info on branches not taken
-    int node;                  // corresponding node
-    accum_type dist;           // minimum distance from bounds to query point
+  struct bbf_node {   // info on branches not taken
+    int node;     // corresponding node
+    accum_type dist;    // minimum distance from bounds to query point
     bbf_node(int _node, accum_type _dist)
       : node(_node), dist(_dist) {
     }
@@ -346,10 +346,10 @@ private:
   int bbf_branch(int i, const __desctype * d, bbf_pqueue & pq) const {
     const node & n = nodes[i];
     // push bbf_node with bounds of alternate branch, then branch
-    if (d[n.dim] <= n.boundary) {      // left
+    if (d[n.dim] <= n.boundary) { // left
       pq_alternate(n.right, pq, n.boundary - d[n.dim]);
       return n.left;
-    } else {                   // right
+    } else {      // right
       pq_alternate(n.left, pq, d[n.dim] - n.boundary);
       return n.right;
     }
@@ -366,11 +366,11 @@ private:
   }
 
   // called per candidate nearest neighbor; constructs new bbf_nn for
-  // candidate and adds it to priority queue of all candidates; if 
+  // candidate and adds it to priority queue of all candidates; if
   // queue len exceeds k, drops the point furthest from query point d.
   template < class __desctype >
-  void bbf_new_nn(bbf_nn_pqueue & nn_pq, int k, 
-                 const __desctype * d, const __valuetype & p) const {
+  void bbf_new_nn(bbf_nn_pqueue & nn_pq, int k,
+      const __desctype * d, const __valuetype & p) const {
     bbf_nn nn(p, distance(d, p));
     if ((int) nn_pq.size() < k) {
       nn_pq.push_back(nn);
@@ -384,14 +384,14 @@ private:
   }
 
 public:
-  // finds (with high probability) the k nearest neighbors of d, 
+  // finds (with high probability) the k nearest neighbors of d,
   // searching at most emax leaves/bins.
-  // ret_nn_pq is an array containing the (at most) k nearest neighbors 
+  // ret_nn_pq is an array containing the (at most) k nearest neighbors
   // (see bbf_nn structure def above).
   template < class __desctype >
-  int find_nn_bbf(const __desctype * d, 
-                 int k, int emax, 
-                 bbf_nn_pqueue & ret_nn_pq) const {
+  int find_nn_bbf(const __desctype * d,
+      int k, int emax,
+      bbf_nn_pqueue & ret_nn_pq) const {
     assert(k > 0);
     ret_nn_pq.clear();
 
@@ -411,17 +411,17 @@ public:
 
       int i;
       for (i = bbf.node;
-          i != -1 && nodes[i].dim >= 0; 
-          i = bbf_branch(i, d, tmp_pq));
+     i != -1 && nodes[i].dim >= 0;
+     i = bbf_branch(i, d, tmp_pq));
 
       if (i != -1) {
 
-       // add points in leaf/bin to ret_nn_pq
-       do {
-         bbf_new_nn(ret_nn_pq, k, d, nodes[i].value);
-       } while (-1 != (i = nodes[i].right));
+  // add points in leaf/bin to ret_nn_pq
+  do {
+    bbf_new_nn(ret_nn_pq, k, d, nodes[i].value);
+  } while (-1 != (i = nodes[i].right));
 
-       --emax;
+  --emax;
       }
     }
 
@@ -433,27 +433,27 @@ public:
   // orthogonal range search
 private:
   void find_ortho_range(int i, scalar_type * bounds_min,
-                       scalar_type * bounds_max,
-                       std::vector < __valuetype > &inbounds) const {
+      scalar_type * bounds_max,
+      std::vector < __valuetype > &inbounds) const {
     if (i == -1)
       return;
     const node & n = nodes[i];
     if (n.dim >= 0) { // node
       if (bounds_min[n.dim] <= n.boundary)
-       find_ortho_range(n.left, bounds_min, bounds_max, inbounds);
+  find_ortho_range(n.left, bounds_min, bounds_max, inbounds);
       if (bounds_max[n.dim] > n.boundary)
-       find_ortho_range(n.right, bounds_min, bounds_max, inbounds);
+  find_ortho_range(n.right, bounds_min, bounds_max, inbounds);
     } else { // leaf
       do {
-       inbounds.push_back(nodes[i].value);
+  inbounds.push_back(nodes[i].value);
       } while (-1 != (i = nodes[i].right));
     }
   }
 public:
   // return all points that lie within the given bounds; inbounds is cleared
   int find_ortho_range(scalar_type * bounds_min,
-                      scalar_type * bounds_max,
-                      std::vector < __valuetype > &inbounds) const {
+           scalar_type * bounds_max,
+           std::vector < __valuetype > &inbounds) const {
     inbounds.clear();
     find_ortho_range(root_node, bounds_min, bounds_max, inbounds);
     return (int)inbounds.size();
index bc52422..e3aef57 100644 (file)
@@ -237,9 +237,9 @@ public:
        virtual float*  GetFVVar(){return m_FVVar;}; /* returned pointer to array of maximal values of FV, if return 0 then FVrange is not exist */
 };/* CvBlobTrackFVGenN */
 
-CvBlobTrackFVGen* cvCreateFVGenP(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenN(2);}
-CvBlobTrackFVGen* cvCreateFVGenPV(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenN(4);}
-CvBlobTrackFVGen* cvCreateFVGenPVS(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenN(5);}
+inline CvBlobTrackFVGen* cvCreateFVGenP(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenN(2);}
+inline CvBlobTrackFVGen* cvCreateFVGenPV(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenN(4);}
+inline CvBlobTrackFVGen* cvCreateFVGenPVS(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenN(5);}
 #undef MAX_FV_SIZE
 
 #define MAX_FV_SIZE 4
@@ -408,7 +408,7 @@ public:
        virtual float*  GetFVVar(){return m_FVVar;}; /* returned pointer to array of maximal values of FV, if return 0 then FVrange is not exist */
 };/* CvBlobTrackFVGenSS */
 
-CvBlobTrackFVGen* cvCreateFVGenSS(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenSS;}
+inline CvBlobTrackFVGen* cvCreateFVGenSS(){return (CvBlobTrackFVGen*)new CvBlobTrackFVGenSS;}
 
 /*======================= TRAJECTORY ANALYZER MODULES =====================*/
 /* Trajectory Analyser module */
@@ -1510,7 +1510,7 @@ public:
 
 }; /* CvBlobTrackAnalysisSVM. */
 
-
+#if 0
 CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisSVMP()
 {return (CvBlobTrackAnalysis*) new CvBlobTrackAnalysisSVM(cvCreateFVGenP);}
 
@@ -1522,3 +1522,4 @@ CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisSVMPVS()
 
 CvBlobTrackAnalysis* cvCreateModuleBlobTrackAnalysisSVMSS()
 {return (CvBlobTrackAnalysis*) new CvBlobTrackAnalysisSVM(cvCreateFVGenSS);}
+#endif
index 2e41ea1..3a0f03a 100644 (file)
@@ -162,12 +162,15 @@ public:
     }
 };  /* class CvBlobTrackerOneKalman */
 
+#if 0
 static CvBlobTrackerOne* cvCreateModuleBlobTrackerOneKalman()
 {
     return (CvBlobTrackerOne*) new CvBlobTrackerOneKalman;
 }
 
+
 CvBlobTracker* cvCreateBlobTrackerKalman()
 {
     return cvCreateBlobTrackerList(cvCreateModuleBlobTrackerOneKalman);
 }
+#endif
index 2d1dafb..8444964 100644 (file)
@@ -716,7 +716,7 @@ void CvBlobTrackerOneMSFG::CollectHist(IplImage* pImg, IplImage* pMask, CvBlob*
 };  /* CollectHist */
 #endif
 
-CvBlobTrackerOne* cvCreateBlobTrackerOneMSFG()
+static CvBlobTrackerOne* cvCreateBlobTrackerOneMSFG()
 {
     return (CvBlobTrackerOne*) new CvBlobTrackerOneMSFG;
 }
@@ -739,7 +739,7 @@ public:
     };
 };
 
-CvBlobTrackerOne* cvCreateBlobTrackerOneMS()
+static CvBlobTrackerOne* cvCreateBlobTrackerOneMS()
 {
     return (CvBlobTrackerOne*) new CvBlobTrackerOneMS;
 }
@@ -1169,6 +1169,7 @@ public:
 
 };  /* CvBlobTrackerOneMSPF */
 
+CvBlobTrackerOne* cvCreateBlobTrackerOneMSPF();
 CvBlobTrackerOne* cvCreateBlobTrackerOneMSPF()
 {
     return (CvBlobTrackerOne*) new CvBlobTrackerOneMSPF;
index 9adf56a..65de3ef 100644 (file)
@@ -47,7 +47,7 @@ typedef float DefHistType;
 #define DefHistTypeMat CV_32F
 #define HIST_INDEX(_pData) (((_pData)[0]>>m_ByteShift) + (((_pData)[1]>>(m_ByteShift))<<m_BinBit)+((pImgData[2]>>m_ByteShift)<<(m_BinBit*2)))
 
-void calcKernelEpanechnikov(CvMat* pK)
+static void calcKernelEpanechnikov(CvMat* pK)
 {    /* Allocate kernel for histogramm creation: */
     int     x,y;
     int     w = pK->width;
@@ -445,7 +445,7 @@ public:
     virtual void Release(){delete this;};
 }; /*CvBlobTrackerOneMSFGS*/
 
-CvBlobTrackerOne* cvCreateBlobTrackerOneMSFGS()
+static CvBlobTrackerOne* cvCreateBlobTrackerOneMSFGS()
 {
     return (CvBlobTrackerOne*) new CvBlobTrackerOneMSFGS;
 }
index 8eea2c6..debe89d 100644 (file)
@@ -188,7 +188,7 @@ void CvBlobTrackPostProcKalman::Release()
     delete this;
 }
 
-CvBlobTrackPostProcOne* cvCreateModuleBlobTrackPostProcKalmanOne()
+static CvBlobTrackPostProcOne* cvCreateModuleBlobTrackPostProcKalmanOne()
 {
     return (CvBlobTrackPostProcOne*) new CvBlobTrackPostProcKalman;
 }
index df50e0f..c18737c 100644 (file)
@@ -106,12 +106,12 @@ public:
     }
 };  /* class CvBlobTrackPostProcTimeAver */
 
-CvBlobTrackPostProcOne* cvCreateModuleBlobTrackPostProcTimeAverRectOne()
+static CvBlobTrackPostProcOne* cvCreateModuleBlobTrackPostProcTimeAverRectOne()
 {
     return (CvBlobTrackPostProcOne*) new CvBlobTrackPostProcTimeAver(0);
 }
 
-CvBlobTrackPostProcOne* cvCreateModuleBlobTrackPostProcTimeAverExpOne()
+static CvBlobTrackPostProcOne* cvCreateModuleBlobTrackPostProcTimeAverExpOne()
 {
     return (CvBlobTrackPostProcOne*) new CvBlobTrackPostProcTimeAver(1);
 }
index 4d608bb..0462590 100644 (file)
@@ -44,7 +44,7 @@
 
 #undef quad
 
-#if _MSC_VER >= 1200
+#if defined _MSC_VER && _MSC_VER >= 1200
 #pragma warning( disable: 4701 )
 #endif
 
@@ -99,18 +99,18 @@ bool CvCalibFilter::SetEtalon( CvCalibEtalonType type, double* params,
 
     Stop();
 
-       if (latestPoints != NULL)
-       {
-               for( i = 0; i < MAX_CAMERAS; i++ )
-                       cvFree( latestPoints + i );
-       }
+    if (latestPoints != NULL)
+    {
+        for( i = 0; i < MAX_CAMERAS; i++ )
+            cvFree( latestPoints + i );
+    }
 
     if( type == CV_CALIB_ETALON_USER || type != etalonType )
     {
-               if (etalonParams != NULL)
-               {
-                       cvFree( &etalonParams );
-               }
+        if (etalonParams != NULL)
+        {
+            cvFree( &etalonParams );
+        }
     }
 
     etalonType = type;
@@ -154,10 +154,10 @@ bool CvCalibFilter::SetEtalon( CvCalibEtalonType type, double* params,
 
     if( etalonPointCount != pointCount )
     {
-               if (etalonPoints != NULL)
-               {
-                       cvFree( &etalonPoints );
-               }
+        if (etalonPoints != NULL)
+        {
+            cvFree( &etalonPoints );
+        }
         etalonPointCount = pointCount;
         etalonPoints = (CvPoint2D32f*)cvAlloc( arrSize );
     }
@@ -184,15 +184,15 @@ bool CvCalibFilter::SetEtalon( CvCalibEtalonType type, double* params,
         break;
 
     case CV_CALIB_ETALON_USER:
-               if (params != NULL)
-               {
-                       memcpy( etalonParams, params, arrSize );
-               }
-               if (points != NULL)
-               {
-                       memcpy( etalonPoints, points, arrSize );
-               }
-               break;
+        if (params != NULL)
+        {
+            memcpy( etalonParams, params, arrSize );
+        }
+        if (points != NULL)
+        {
+            memcpy( etalonPoints, points, arrSize );
+        }
+        break;
 
     default:
         assert(0);
@@ -226,7 +226,7 @@ CvCalibFilter::GetEtalon( int* paramCount, const double** params,
 void CvCalibFilter::SetCameraCount( int count )
 {
     Stop();
-    
+
     if( count != cameraCount )
     {
         for( int i = 0; i < cameraCount; i++ )
@@ -245,7 +245,7 @@ void CvCalibFilter::SetCameraCount( int count )
     }
 }
 
-   
+
 bool CvCalibFilter::SetFrames( int frames )
 {
     if( frames < 5 )
@@ -253,7 +253,7 @@ bool CvCalibFilter::SetFrames( int frames )
         assert(0);
         return false;
     }
-    
+
     framesTotal = frames;
     return true;
 }
@@ -304,7 +304,7 @@ void CvCalibFilter::Stop( bool calibrate )
 
             cameraParams[i].imgSize[0] = (float)imgSize.width;
             cameraParams[i].imgSize[1] = (float)imgSize.height;
-            
+
 //            cameraParams[i].focalLength[0] = cameraParams[i].matrix[0];
 //            cameraParams[i].focalLength[1] = cameraParams[i].matrix[4];
 
@@ -315,7 +315,7 @@ void CvCalibFilter::Stop( bool calibrate )
             memcpy( cameraParams[i].transVect, transVect, 3 * sizeof(transVect[0]));
 
             mat.data.ptr = (uchar*)(cameraParams + i);
-            
+
             /* check resultant camera parameters: if there are some INF's or NAN's,
                stop and reset results */
             if( !cvCheckArr( &mat, CV_CHECK_RANGE | CV_CHECK_QUIET, -10000, 10000 ))
@@ -342,7 +342,7 @@ void CvCalibFilter::Stop( bool calibrate )
                 {
                     stereo.fundMatr[i] = stereo.fundMatr[i];
                 }
-                
+
             }
 
         }
@@ -499,16 +499,16 @@ bool CvCalibFilter::GetLatestPoints( int idx, CvPoint2D32f** pts,
                                      int* count, bool* found )
 {
     int n;
-    
+
     if( (unsigned)idx >= (unsigned)cameraCount ||
         !pts || !count || !found )
     {
         assert(0);
         return false;
     }
-    
+
     n = latestCounts[idx];
-    
+
     *found = n > 0;
     *count = abs(n);
     *pts = latestPoints[idx];
@@ -616,7 +616,7 @@ const CvCamera* CvCalibFilter::GetCameraParams( int idx ) const
         assert(0);
         return 0;
     }
-    
+
     return isCalibrated ? cameraParams + idx : 0;
 }
 
@@ -630,7 +630,7 @@ const CvStereoCamera* CvCalibFilter::GetStereoParams() const
         assert(0);
         return 0;
     }
-    
+
     return &stereo;
 }
 
@@ -640,9 +640,9 @@ bool CvCalibFilter::SetCameraParams( CvCamera* params )
 {
     CvMat mat;
     int arrSize;
-    
+
     Stop();
-    
+
     if( !params )
     {
         assert(0);
@@ -667,7 +667,7 @@ bool CvCalibFilter::SaveCameraParams( const char* filename )
     if( isCalibrated )
     {
         int i, j;
-        
+
         FILE* f = fopen( filename, "w" );
 
         if( !f ) return false;
@@ -729,7 +729,7 @@ bool CvCalibFilter::LoadCameraParams( const char* filename )
         return false;
 
     SetCameraCount( d );
-    
+
     for( i = 0; i < cameraCount; i++ )
     {
         for( j = 0; j < (int)(sizeof(cameraParams[i])/sizeof(float)); j++ )
@@ -763,16 +763,16 @@ bool CvCalibFilter::LoadCameraParams( const char* filename )
             CV_Assert(values_read == 1);
         }
     }
-    
-    
-    
-    
+
+
+
+
     fclose(f);
 
     stereo.warpSize = cvSize( cvRound(cameraParams[0].imgSize[0]), cvRound(cameraParams[0].imgSize[1]));
 
     isCalibrated = true;
-    
+
     return true;
 }
 
@@ -924,4 +924,4 @@ bool CvCalibFilter::Undistort( CvMat** srcarr, CvMat** dstarr )
     return true;
 }
 
-                
+
index c271664..027dc6e 100644 (file)
@@ -45,7 +45,7 @@
 //#include <limits.h>
 //#include "cv.h"
 //#include "highgui.h"
-
+#if 0
 #include <stdio.h>
 
 /* Valery Mosyagin */
@@ -53,7 +53,7 @@
 /* ===== Function for find corresponding between images ===== */
 
 /* Create feature points on image and return number of them. Array points fills by found points */
-int icvCreateFeaturePoints(IplImage *image, CvMat *points, CvMat *status)
+static int icvCreateFeaturePoints(IplImage *image, CvMat *points, CvMat *status)
 {
     int foundFeaturePoints = 0;
     IplImage *grayImage = 0;
@@ -175,9 +175,9 @@ int icvCreateFeaturePoints(IplImage *image, CvMat *points, CvMat *status)
 
 /* For given points1 (with pntStatus) on image1 finds corresponding points2 on image2 and set pntStatus2 for them */
 /* Returns number of corresponding points */
-int icvFindCorrForGivenPoints( IplImage *image1,/* Image 1 */
+static int icvFindCorrForGivenPoints( IplImage *image1,/* Image 1 */
                                 IplImage *image2,/* Image 2 */
-                                CvMat *points1, 
+                                CvMat *points1,
                                 CvMat *pntStatus1,
                                 CvMat *points2,
                                 CvMat *pntStatus2,
@@ -203,7 +203,7 @@ int icvFindCorrForGivenPoints( IplImage *image1,/* Image 1 */
     /* Test input data for errors */
 
     /* Test for null pointers */
-    if( image1     == 0 || image2     == 0 || 
+    if( image1     == 0 || image2     == 0 ||
         points1    == 0 || points2    == 0 ||
         pntStatus1 == 0 || pntStatus2 == 0)
     {
@@ -226,7 +226,7 @@ int icvFindCorrForGivenPoints( IplImage *image1,/* Image 1 */
     }
 
     /* Test for matrices */
-    if( !CV_IS_MAT(points1)    || !CV_IS_MAT(points2) || 
+    if( !CV_IS_MAT(points1)    || !CV_IS_MAT(points2) ||
         !CV_IS_MAT(pntStatus1) || !CV_IS_MAT(pntStatus2) )
     {
         CV_ERROR( CV_StsUnsupportedFormat, "Input parameters (points and status) must be a matrices" );
@@ -333,11 +333,11 @@ int icvFindCorrForGivenPoints( IplImage *image1,/* Image 1 */
                                 pyrImage1, pyrImage2,
                                 cornerPoints1, cornerPoints2,
                                 numVisPoints, cvSize(10,10), 3,
-                                status, errors, 
+                                status, errors,
                                 cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03),
                                 0/*CV_LKFLOW_PYR_A_READY*/ );
 
-        
+
         memset(stat2,0,sizeof(uchar)*numPoints);
 
         int currVis = 0;
@@ -393,7 +393,7 @@ int icvFindCorrForGivenPoints( IplImage *image1,/* Image 1 */
                 CvMat fundMatr;
                 double fundMatr_dat[9];
                 fundMatr = cvMat(3,3,CV_64F,fundMatr_dat);
-        
+
                 CV_CALL( pStatus = cvCreateMat(1,totalCorns,CV_32F) );
 
                 int num = cvFindFundamentalMat(tmpPoints1,tmpPoints2,&fundMatr,CV_FM_RANSAC,threshold,0.99,pStatus);
@@ -435,8 +435,9 @@ int icvFindCorrForGivenPoints( IplImage *image1,/* Image 1 */
 
     return resNumCorrPoints;
 }
+
 /*-------------------------------------------------------------------------------------*/
-int icvGrowPointsAndStatus(CvMat **oldPoints,CvMat **oldStatus,CvMat *addPoints,CvMat *addStatus,int addCreateNum)
+static int icvGrowPointsAndStatus(CvMat **oldPoints,CvMat **oldStatus,CvMat *addPoints,CvMat *addStatus,int addCreateNum)
 {
     /* Add to existing points and status arrays new points or just grow */
     CvMat *newOldPoint  = 0;
@@ -445,7 +446,7 @@ int icvGrowPointsAndStatus(CvMat **oldPoints,CvMat **oldStatus,CvMat *addPoints,
 
     CV_FUNCNAME( "icvGrowPointsAndStatus" );
     __BEGIN__;
-    
+
     /* Test for errors */
     if( oldPoints == 0 || oldStatus == 0 )
     {
@@ -546,8 +547,9 @@ int icvGrowPointsAndStatus(CvMat **oldPoints,CvMat **oldStatus,CvMat *addPoints,
 
     return newTotalNumber;
 }
+
 /*-------------------------------------------------------------------------------------*/
-int icvRemoveDoublePoins(   CvMat *oldPoints,/* Points on prev image */
+static int icvRemoveDoublePoins(   CvMat *oldPoints,/* Points on prev image */
                             CvMat *newPoints,/* New points */
                             CvMat *oldStatus,/* Status for old points */
                             CvMat *newStatus,
@@ -560,7 +562,7 @@ int icvRemoveDoublePoins(   CvMat *oldPoints,/* Points on prev image */
     CvSeq* seq = 0;
 
     int originalPoints = 0;
-    
+
     CV_FUNCNAME( "icvRemoveDoublePoins" );
     __BEGIN__;
 
@@ -624,7 +626,7 @@ int icvRemoveDoublePoins(   CvMat *oldPoints,/* Points on prev image */
     {
         CV_ERROR( CV_StsOutOfRange, "Statuses must have 1 row" );
     }
-    
+
     /* we have points on image and wants add new points */
     /* use subdivision for find nearest points */
 
@@ -731,7 +733,7 @@ int icvRemoveDoublePoins(   CvMat *oldPoints,/* Points on prev image */
                     /* Point is double. Turn it off */
                     /* Set status */
                     //newStatus->data.ptr[i] = 0;
-                    
+
                     /* No this is a double point */
                     //originalPoints--;
                     flag = 0;
@@ -745,7 +747,7 @@ int icvRemoveDoublePoins(   CvMat *oldPoints,/* Points on prev image */
     __END__;
 
     cvReleaseMemStorage( &storage );
-    
+
 
     return originalPoints;
 
@@ -755,11 +757,11 @@ int icvRemoveDoublePoins(   CvMat *oldPoints,/* Points on prev image */
 void icvComputeProjectMatrix(CvMat* objPoints,CvMat* projPoints,CvMat* projMatr);
 
 /*-------------------------------------------------------------------------------------*/
-void icvComputeProjectMatrixStatus(CvMat *objPoints4D,CvMat *points2,CvMat *status, CvMat *projMatr)
+static void icvComputeProjectMatrixStatus(CvMat *objPoints4D,CvMat *points2,CvMat *status, CvMat *projMatr)
 {
     /* Compute number of good points */
     int num = cvCountNonZero(status);
-    
+
     /* Create arrays */
     CvMat *objPoints = 0;
     objPoints = cvCreateMat(4,num,CV_64F);
@@ -802,7 +804,7 @@ void icvComputeProjectMatrixStatus(CvMat *objPoints4D,CvMat *points2,CvMat *stat
 
             currVis++;
         }
-        
+
         fprintf(file,"\n");
     }
 
@@ -820,17 +822,16 @@ void icvComputeProjectMatrixStatus(CvMat *objPoints4D,CvMat *points2,CvMat *stat
 
 
 /*-------------------------------------------------------------------------------------*/
-/* For given N images 
+/* For given N images
  we have corresponding points on N images
  computed projection matrices
  reconstructed 4D points
 
-  we must to compute 
-  
+  we must to compute
 
-*/
 
-void icvAddNewImageToPrevious____(
+*/
+static void icvAddNewImageToPrevious____(
                                     IplImage *newImage,//Image to add
                                     IplImage *oldImage,//Previous image
                                     CvMat *oldPoints,// previous 2D points on prev image (some points may be not visible)
@@ -868,7 +869,7 @@ void icvAddNewImageToPrevious____(
     int corrNum;
     corrNum = icvFindCorrForGivenPoints(    oldImage,/* Image 1 */
                                             newImage,/* Image 2 */
-                                            oldPoints, 
+                                            oldPoints,
                                             oldPntStatus,
                                             points2,
                                             status,
@@ -887,10 +888,10 @@ void icvAddNewImageToPrevious____(
 //        icvComputeProjectMatrix(objPoints4D,points2,&projMatr);
         icvComputeProjectMatrixStatus(objPoints4D,points2,status,&projMatr);
         cvCopy(&projMatr,newProjMatr);
-        
+
         /* Create new points and find correspondence */
         icvCreateFeaturePoints(newImage, newFPoints2D2,newFPointsStatus);
-        
+
         /* Good if we test new points before find corr points */
 
         /* Find correspondence for new found points */
@@ -947,7 +948,7 @@ void icvAddNewImageToPrevious____(
 //CreateGood
 
 /*-------------------------------------------------------------------------------------*/
-int icvDeleteSparsInPoints(  int numImages,
+static int icvDeleteSparsInPoints(  int numImages,
                              CvMat **points,
                              CvMat **status,
                              CvMat *wasStatus)/* status of previous configuration */
@@ -979,7 +980,7 @@ int icvDeleteSparsInPoints(  int numImages,
 
     int numCoord;
     numCoord = points[0]->rows;// !!! may be number of coordinates is not correct !!!
-    
+
     int i;
     int currExistPoint;
     currExistPoint = 0;
@@ -1041,7 +1042,7 @@ int icvDeleteSparsInPoints(  int numImages,
     return comNumber;
 }
 
-#if 0
+
 /*-------------------------------------------------------------------------------------*/
 void icvGrowPointsArray(CvMat **points)
 {
@@ -1089,7 +1090,7 @@ int AddImageToStruct(  IplImage *newImage,//Image to add
     cvConvert(pntStatus,status);
 
     int corrNum = FindCorrForGivenPoints(oldImage,newImage,oldPoints,newPoints,status);
-    
+
     /* Status has new status of points */
 
     CvMat projMatr;
index 93e735f..798bd99 100644 (file)
@@ -48,7 +48,7 @@
     Stan Birchfield and Carlo Tomasi
     International Journal of Computer Vision,
     35(3): 269-293, December 1999.
-    
+
     This implementation uses different cost function that results in
     O(pixPerRow*maxDisparity) complexity of dynamic programming stage versus
     O(pixPerRow*log(pixPerRow)*maxDisparity) in the above paper.
@@ -68,7 +68,7 @@
 typedef struct _CvDPCell
 {
     uchar  step; //local-optimal step
-    int    sum;  //current sum  
+    int    sum;  //current sum
 }_CvDPCell;
 
 typedef struct _CvRightImData
@@ -79,17 +79,17 @@ typedef struct _CvRightImData
 #define CV_IMAX3(a,b,c) ((temp3 = (a) >= (b) ? (a) : (b)),(temp3 >= (c) ? temp3 : (c)))
 #define CV_IMIN3(a,b,c) ((temp3 = (a) <= (b) ? (a) : (b)),(temp3 <= (c) ? temp3 : (c)))
 
-void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
+static void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
                                                 uchar* disparities,
                                                 CvSize size, int widthStep,
-                                                int    maxDisparity, 
-                                                float  _param1, float _param2, 
+                                                int    maxDisparity,
+                                                float  _param1, float _param2,
                                                 float  _param3, float _param4,
                                                 float  _param5 )
 {
     int     x, y, i, j, temp3;
     int     d, s;
-    int     dispH =  maxDisparity + 3; 
+    int     dispH =  maxDisparity + 3;
     uchar  *dispdata;
     int     imgW = size.width;
     int     imgH = size.height;
@@ -103,22 +103,22 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
     int param5 = cvRound(_param5);
 
     #define CELL(d,x)   cells[(d)+(x)*dispH]
-    
+
     uchar*              dsi = (uchar*)cvAlloc(sizeof(uchar)*imgW*dispH);
     uchar*              edges = (uchar*)cvAlloc(sizeof(uchar)*imgW*imgH);
     _CvDPCell*          cells = (_CvDPCell*)cvAlloc(sizeof(_CvDPCell)*imgW*MAX(dispH,(imgH+1)/2));
     _CvRightImData*     rData = (_CvRightImData*)cvAlloc(sizeof(_CvRightImData)*imgW);
     int*                reliabilities = (int*)cells;
-    
-    for( y = 0; y < imgH; y++ ) 
-    { 
+
+    for( y = 0; y < imgH; y++ )
+    {
         uchar* srcdata1 = src1 + widthStep * y;
-        uchar* srcdata2 = src2 + widthStep * y;        
+        uchar* srcdata2 = src2 + widthStep * y;
 
         //init rData
         prevval = prev = srcdata2[0];
         for( j = 1; j < imgW; j++ )
-        {             
+        {
             curr = srcdata2[j];
             val = (uchar)((curr + prev)>>1);
             rData[j-1].max_val = (uchar)CV_IMAX3( val, prevval, prev );
@@ -130,12 +130,12 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
 
         // fill dissimularity space image
         for( i = 1; i <= maxDisparity + 1; i++ )
-        {               
+        {
             dsi += imgW;
             rData--;
             for( j = i - 1; j < imgW - 1; j++ )
-            {                
-                int t; 
+            {
+                int t;
                 if( (t = srcdata1[j] - rData[j+1].max_val) >= 0 )
                 {
                     dsi[j] = (uchar)t;
@@ -160,36 +160,36 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
         for( j = 3; j < imgW-4; j++ )
         {
             edges[y*imgW+j] = 0;
-            
-            if( ( CV_IMAX3( srcdata1[j-3], srcdata1[j-2], srcdata1[j-1] ) - 
+
+            if( ( CV_IMAX3( srcdata1[j-3], srcdata1[j-2], srcdata1[j-1] ) -
                   CV_IMIN3( srcdata1[j-3], srcdata1[j-2], srcdata1[j-1] ) ) >= ICV_BIRCH_DIFF_LUM )
             {
                 edges[y*imgW+j] |= 1;
             }
-            if( ( CV_IMAX3( srcdata2[j+3], srcdata2[j+2], srcdata2[j+1] ) - 
+            if( ( CV_IMAX3( srcdata2[j+3], srcdata2[j+2], srcdata2[j+1] ) -
                   CV_IMIN3( srcdata2[j+3], srcdata2[j+2], srcdata2[j+1] ) ) >= ICV_BIRCH_DIFF_LUM )
             {
                 edges[y*imgW+j] |= 2;
-            }            
-        }        
+            }
+        }
 
         //find correspondence using dynamical programming
         //init DP table
-        for( x = 0; x < imgW; x++ ) 
+        for( x = 0; x < imgW; x++ )
         {
             CELL(0,x).sum = CELL(dispH-1,x).sum = ICV_MAX_DP_SUM_VAL;
             CELL(0,x).step = CELL(dispH-1,x).step = ICV_DP_STEP_LEFT;
         }
-        for( d = 2; d < dispH; d++ ) 
+        for( d = 2; d < dispH; d++ )
         {
             CELL(d,d-2).sum = ICV_MAX_DP_SUM_VAL;
             CELL(d,d-2).step = ICV_DP_STEP_UP;
-        }    
+        }
         CELL(1,0).sum  = 0;
         CELL(1,0).step = ICV_DP_STEP_LEFT;
 
         for( x = 1; x < imgW; x++ )
-        {        
+        {
             int d = MIN( x + 1, maxDisparity + 1);
             uchar* _edges = edges + y*imgW + x;
             int e0 = _edges[0] & 1;
@@ -201,17 +201,17 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
                 int sum[3];
 
                 //check left step
-                sum[0] = _cell[d-dispH].sum - param2;                
+                sum[0] = _cell[d-dispH].sum - param2;
 
                 //check up step
                 if( _cell[d+1].step != ICV_DP_STEP_DIAG && e0 )
                 {
                     sum[1] = _cell[d+1].sum + param1;
 
-                    if( _cell[d-1-dispH].step != ICV_DP_STEP_UP && (_edges[1-d] & 2) ) 
+                    if( _cell[d-1-dispH].step != ICV_DP_STEP_UP && (_edges[1-d] & 2) )
                     {
                         int t;
-                        
+
                         sum[2] = _cell[d-1-dispH].sum + param1;
 
                         t = sum[1] < sum[0];
@@ -223,7 +223,7 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
                             _cell[d].sum = sum[t] + s;
                         }
                         else
-                        {                
+                        {
                             _cell[d].step = ICV_DP_STEP_DIAG;
                             _cell[d].sum = sum[2] + s;
                         }
@@ -242,7 +242,7 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
                         }
                     }
                 }
-                else if( _cell[d-1-dispH].step != ICV_DP_STEP_UP && (_edges[1-d] & 2) ) 
+                else if( _cell[d-1-dispH].step != ICV_DP_STEP_UP && (_edges[1-d] & 2) )
                 {
                     sum[2] = _cell[d-1-dispH].sum + param1;
                     if( sum[0] <= sum[2] )
@@ -278,25 +278,25 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
                 min_val = CELL(i,imgW-1).sum;
             }
         }
-        
+
         //track optimal pass
         for( x = imgW - 1; x > 0; x-- )
-        {        
+        {
             dispdata[x] = (uchar)(d - 1);
             while( CELL(d,x).step == ICV_DP_STEP_UP ) d++;
             if ( CELL(d,x).step == ICV_DP_STEP_DIAG )
             {
                 s = x;
-                while( CELL(d,x).step == ICV_DP_STEP_DIAG ) 
+                while( CELL(d,x).step == ICV_DP_STEP_DIAG )
                 {
-                    d--; 
-                    x--;                    
+                    d--;
+                    x--;
                 }
                 for( i = x; i < s; i++ )
                 {
                     dispdata[i] = (uchar)(d-1);
-                }            
-            }        
+                }
+            }
         }//for x
     }// for y
 
@@ -319,9 +319,9 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
     {
         for( y = 1; y < imgH - 1; y++ )
         {
-            if( ( CV_IMAX3( src1[(y-1)*widthStep+x], src1[y*widthStep+x], 
-                        src1[(y+1)*widthStep+x] ) - 
-                  CV_IMIN3( src1[(y-1)*widthStep+x], src1[y*widthStep+x], 
+            if( ( CV_IMAX3( src1[(y-1)*widthStep+x], src1[y*widthStep+x],
+                        src1[(y+1)*widthStep+x] ) -
+                  CV_IMIN3( src1[(y-1)*widthStep+x], src1[y*widthStep+x],
                         src1[(y+1)*widthStep+x] ) ) >= ICV_BIRCH_DIFF_LUM )
             {
                 edges[y*imgW+x] |= 4;
@@ -332,14 +332,14 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
         }
     }
 
-    //remove along any particular row, every gradient 
+    //remove along any particular row, every gradient
     //for which two adjacent columns do not agree.
     for( y = 0; y < imgH; y++ )
     {
         prev = edges[y*imgW];
         for( x = 1; x < imgW - 1; x++ )
         {
-            curr = edges[y*imgW+x];            
+            curr = edges[y*imgW+x];
             if( (curr & 4) &&
                 ( !( prev & 4 ) ||
                   !( edges[y*imgW+x+1] & 4 ) ) )
@@ -360,41 +360,41 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
                 ;
             s = y - i;
             for( ; i < y; i++ )
-            {                
+            {
                 reliabilities[i*imgW+x] = s;
-            }            
+            }
         }
-    }   
-    
-    //Y - propagate reliable regions 
+    }
+
+    //Y - propagate reliable regions
     for( x = 0; x < imgW; x++ )
-    {        
+    {
         for( y = 0; y < imgH; y++ )
-        {   
+        {
             d = dest[y*widthStep+x];
             if( reliabilities[y*imgW+x] >= param4 && !(edges[y*imgW+x] & 4) &&
                 d > 0 )//highly || moderately
-            {   
+            {
                 disparities[y*widthStep+x] = (uchar)d;
                 //up propagation
                 for( i = y - 1; i >= 0; i-- )
                 {
                     if(  ( edges[i*imgW+x] & 4 ) ||
-                         ( dest[i*widthStep+x] < d && 
+                         ( dest[i*widthStep+x] < d &&
                            reliabilities[i*imgW+x] >= param3 ) ||
-                         ( reliabilities[y*imgW+x] < param5 && 
+                         ( reliabilities[y*imgW+x] < param5 &&
                            dest[i*widthStep+x] - 1 == d ) ) break;
 
-                    disparities[i*widthStep+x] = (uchar)d;                    
-                }                     
-                                
+                    disparities[i*widthStep+x] = (uchar)d;
+                }
+
                 //down propagation
                 for( i = y + 1; i < imgH; i++ )
                 {
                     if(  ( edges[i*imgW+x] & 4 ) ||
-                         ( dest[i*widthStep+x] < d && 
+                         ( dest[i*widthStep+x] < d &&
                            reliabilities[i*imgW+x] >= param3 ) ||
-                         ( reliabilities[y*imgW+x] < param5 && 
+                         ( reliabilities[y*imgW+x] < param5 &&
                            dest[i*widthStep+x] - 1 == d ) ) break;
 
                     disparities[i*widthStep+x] = (uchar)d;
@@ -417,41 +417,41 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
             for( ; x < imgW && dest[y*widthStep+x] == dest[y*widthStep+x-1]; x++ );
             s = x - i;
             for( ; i < x; i++ )
-            {                
+            {
                 reliabilities[y*imgW+i] = s;
-            }            
+            }
         }
-    }   
-    
-    //X - propagate reliable regions 
-    for( y = 0; y < imgH; y++ )    
-    {        
+    }
+
+    //X - propagate reliable regions
+    for( y = 0; y < imgH; y++ )
+    {
         for( x = 0; x < imgW; x++ )
-        {   
+        {
             d = dest[y*widthStep+x];
             if( reliabilities[y*imgW+x] >= param4 && !(edges[y*imgW+x] & 1) &&
                 d > 0 )//highly || moderately
-            {   
+            {
                 disparities[y*widthStep+x] = (uchar)d;
                 //up propagation
                 for( i = x - 1; i >= 0; i-- )
                 {
                     if(  (edges[y*imgW+i] & 1) ||
-                         ( dest[y*widthStep+i] < d && 
+                         ( dest[y*widthStep+i] < d &&
                            reliabilities[y*imgW+i] >= param3 ) ||
-                         ( reliabilities[y*imgW+x] < param5 && 
+                         ( reliabilities[y*imgW+x] < param5 &&
                            dest[y*widthStep+i] - 1 == d ) ) break;
 
                     disparities[y*widthStep+i] = (uchar)d;
-                }                     
-                                
+                }
+
                 //down propagation
                 for( i = x + 1; i < imgW; i++ )
                 {
                     if(  (edges[y*imgW+i] & 1) ||
-                         ( dest[y*widthStep+i] < d && 
+                         ( dest[y*widthStep+i] < d &&
                            reliabilities[y*imgW+i] >= param3 ) ||
-                         ( reliabilities[y*imgW+x] < param5 && 
+                         ( reliabilities[y*imgW+x] < param5 &&
                            dest[y*widthStep+i] - 1 == d ) ) break;
 
                     disparities[y*widthStep+i] = (uchar)d;
@@ -466,10 +466,10 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
     }
 
     //release resources
-    cvFree( &dsi );    
-    cvFree( &edges );    
-    cvFree( &cells );        
-    cvFree( &rData );        
+    cvFree( &dsi );
+    cvFree( &edges );
+    cvFree( &cells );
+    cvFree( &rData );
 }
 
 
@@ -483,7 +483,7 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
 //      rightImage - right image of stereo-pair (format 8uC1).
 //      mode -mode of correspondance retrieval (now CV_RETR_DP_BIRCHFIELD only)
 //      dispImage - destination disparity image
-//      maxDisparity - maximal disparity 
+//      maxDisparity - maximal disparity
 //      param1, param2, param3, param4, param5 - parameters of algorithm
 //    Returns:
 //    Notes:
@@ -491,43 +491,43 @@ void icvFindStereoCorrespondenceByBirchfieldDP( uchar* src1, uchar* src2,
 //      All images must have format 8uC1.
 //F*/
 CV_IMPL void
-cvFindStereoCorrespondence( 
+cvFindStereoCorrespondence(
                    const  CvArr* leftImage, const  CvArr* rightImage,
                    int     mode,
                    CvArr*  depthImage,
-                   int     maxDisparity,                                
-                   double  param1, double  param2, double  param3, 
+                   int     maxDisparity,
+                   double  param1, double  param2, double  param3,
                    double  param4, double  param5  )
-{       
+{
     CV_FUNCNAME( "cvFindStereoCorrespondence" );
 
     __BEGIN__;
 
-    CvMat  *src1, *src2;    
+    CvMat  *src1, *src2;
     CvMat  *dst;
     CvMat  src1_stub, src2_stub, dst_stub;
-    int    coi;    
+    int    coi;
 
     CV_CALL( src1 = cvGetMat( leftImage, &src1_stub, &coi ));
     if( coi ) CV_ERROR( CV_BadCOI, "COI is not supported by the function" );
     CV_CALL( src2 = cvGetMat( rightImage, &src2_stub, &coi ));
-    if( coi ) CV_ERROR( CV_BadCOI, "COI is not supported by the function" );    
+    if( coi ) CV_ERROR( CV_BadCOI, "COI is not supported by the function" );
     CV_CALL( dst = cvGetMat( depthImage, &dst_stub, &coi ));
     if( coi ) CV_ERROR( CV_BadCOI, "COI is not supported by the function" );
 
-    // check args 
-    if( CV_MAT_TYPE( src1->type ) != CV_8UC1 || 
-        CV_MAT_TYPE( src2->type ) != CV_8UC1 ||        
+    // check args
+    if( CV_MAT_TYPE( src1->type ) != CV_8UC1 ||
+        CV_MAT_TYPE( src2->type ) != CV_8UC1 ||
         CV_MAT_TYPE( dst->type ) != CV_8UC1) CV_ERROR(CV_StsUnsupportedFormat,
-                        "All images must be single-channel and have 8u" );    
+                        "All images must be single-channel and have 8u" );
 
     if( !CV_ARE_SIZES_EQ( src1, src2 ) || !CV_ARE_SIZES_EQ( src1, dst ) )
             CV_ERROR( CV_StsUnmatchedSizes, "" );
-    
+
     if( maxDisparity <= 0 || maxDisparity >= src1->width || maxDisparity > 255 )
-        CV_ERROR(CV_StsOutOfRange, 
+        CV_ERROR(CV_StsOutOfRange,
                  "parameter /maxDisparity/ is out of range");
-    
+
     if( mode == CV_DISPARITY_BIRCHFIELD )
     {
         if( param1 == CV_UNDEF_SC_PARAM ) param1 = CV_IDP_BIRCHFIELD_PARAM1;
@@ -536,10 +536,10 @@ cvFindStereoCorrespondence(
         if( param4 == CV_UNDEF_SC_PARAM ) param4 = CV_IDP_BIRCHFIELD_PARAM4;
         if( param5 == CV_UNDEF_SC_PARAM ) param5 = CV_IDP_BIRCHFIELD_PARAM5;
 
-        CV_CALL( icvFindStereoCorrespondenceByBirchfieldDP( src1->data.ptr, 
-            src2->data.ptr, dst->data.ptr, 
+        CV_CALL( icvFindStereoCorrespondenceByBirchfieldDP( src1->data.ptr,
+            src2->data.ptr, dst->data.ptr,
             cvGetMatSize( src1 ), src1->step,
-            maxDisparity, (float)param1, (float)param2, (float)param3, 
+            maxDisparity, (float)param1, (float)param2, (float)param3,
             (float)param4, (float)param5 ) );
     }
     else
@@ -547,7 +547,7 @@ cvFindStereoCorrespondence(
         CV_ERROR( CV_StsBadArg, "Unsupported mode of function" );
     }
 
-    __END__; 
+    __END__;
 }
 
 /* End of file. */
index 1276627..9025119 100644 (file)
@@ -41,7 +41,7 @@
 
 #include "precomp.hpp"
 
-CvStatus CV_STDCALL
+static CvStatus
 icvJacobiEigens_32f(float *A, float *V, float *E, int n, float eps)
 {
     int i, j, k, ind;
index f39485f..24bc30d 100644 (file)
@@ -83,7 +83,7 @@ static int CompareContour(const void* a, const void* b, void* )
     return (dx < wt && dy < ht);
 }
 
-void cvFindBlobsByCCClasters(IplImage* pFG, CvBlobSeq* pBlobs, CvMemStorage* storage)
+static void cvFindBlobsByCCClasters(IplImage* pFG, CvBlobSeq* pBlobs, CvMemStorage* storage)
 {   /* Create contours: */
     IplImage*       pIB = NULL;
     CvSeq*          cnt = NULL;
index c8a55f6..b0ec792 100644 (file)
@@ -160,9 +160,5 @@ public:
 };
 
 /* Blob detector constructor: */
-CvBlobDetector* cvCreateBlobDetectorReal(CvTestSeq* pTestSeq){return new CvBlobDetectorReal(pTestSeq);}
-
-
-
-
+//CvBlobDetector* cvCreateBlobDetectorReal(CvTestSeq* pTestSeq){return new CvBlobDetectorReal(pTestSeq);}
 
index 0f5b24e..fa6e7ce 100644 (file)
@@ -106,7 +106,7 @@ int icvCompute3DPoint( double alpha,double betta,
     double invPartAll;
 
     double alphabetta = alpha*betta;
-    
+
     partAll = alpha - betta;
     if( fabs(partAll) > 0.00001  ) /* alpha must be > betta */
     {
@@ -116,7 +116,7 @@ int icvCompute3DPoint( double alpha,double betta,
 
         partY   = coeffs->Ycoef        + coeffs->YcoefA *alpha +
                   coeffs->YcoefB*betta + coeffs->YcoefAB*alphabetta;
-        
+
         partZ   = coeffs->Zcoef        + coeffs->ZcoefA *alpha +
                   coeffs->ZcoefB*betta + coeffs->ZcoefAB*alphabetta;
 
@@ -159,12 +159,12 @@ int icvCreateConvertMatrVect( CvMatr64d     rotMatr1,
     icvMulMatrix_64d(   convRotMatr,
                         3,3,
                         transVect2,
-                        1,3, 
+                        1,3,
                         tmpVect);
-    
+
     icvSubVector_64d(transVect1,tmpVect,convTransVect,3);
 
-    
+
     return CV_NO_ERR;
 }
 
@@ -182,15 +182,15 @@ int icvConvertPointSystem(CvPoint3D64d  M2,
     icvMulMatrix_64d(   rotMatr,
                         3,3,
                         (double*)&M2,
-                        1,3, 
+                        1,3,
                         tmpVect);
 
     icvAddVector_64d(tmpVect,transVect,(double*)M1,3);
-    
+
     return CV_NO_ERR;
 }
 /*--------------------------------------------------------------------------------------*/
-int icvComputeCoeffForStereoV3( double quad1[4][2],
+static int icvComputeCoeffForStereoV3( double quad1[4][2],
                                 double quad2[4][2],
                                 int    numScanlines,
                                 CvMatr64d    camMatr1,
@@ -222,7 +222,7 @@ int icvComputeCoeffForStereoV3( double quad1[4][2],
 
         point2.x = (1.0 - alpha) * quad1[1][0] + alpha * quad1[2][0];
         point2.y = (1.0 - alpha) * quad1[1][1] + alpha * quad1[2][1];
-        
+
         point3.x = (1.0 - alpha) * quad2[0][0] + alpha * quad2[3][0];
         point3.y = (1.0 - alpha) * quad2[0][1] + alpha * quad2[3][1];
 
@@ -243,10 +243,10 @@ int icvComputeCoeffForStereoV3( double quad1[4][2],
                             &startCoeffs[currLine],
                             needSwapCamera);
     }
-    return CV_NO_ERR;    
+    return CV_NO_ERR;
 }
 /*--------------------------------------------------------------------------------------*/
-int icvComputeCoeffForStereoNew(   double quad1[4][2],
+static int icvComputeCoeffForStereoNew(   double quad1[4][2],
                                         double quad2[4][2],
                                         int    numScanlines,
                                         CvMatr32f    camMatr1,
@@ -260,10 +260,10 @@ int icvComputeCoeffForStereoNew(   double quad1[4][2],
 
     double camMatr1_64d[9];
     double camMatr2_64d[9];
-    
+
     double rotMatr1_64d[9];
     double transVect1_64d[3];
-    
+
     double rotMatr2_64d[9];
     double transVect2_64d[3];
 
@@ -348,21 +348,21 @@ int icvComCoeffForLine(   CvPoint2D64d point1,
 {
     /* Get direction for all points */
     /* Direction for camera 1 */
-    
+
     CvPoint3D64f direct1;
     CvPoint3D64f direct2;
     CvPoint3D64f camPoint1;
-    
+
     CvPoint3D64f directS3;
     CvPoint3D64f directS4;
     CvPoint3D64f direct3;
     CvPoint3D64f direct4;
     CvPoint3D64f camPoint2;
-    
+
     icvGetDirectionForPoint(   point1,
                             camMatr1,
                             &direct1);
-    
+
     icvGetDirectionForPoint(   point2,
                             camMatr1,
                             &direct2);
@@ -372,13 +372,13 @@ int icvComCoeffForLine(   CvPoint2D64d point1,
     icvGetDirectionForPoint(   point3,
                             camMatr2,
                             &directS3);
-    
+
     icvGetDirectionForPoint(   point4,
                             camMatr2,
                             &directS4);
 
     /* Create convertion for camera 2: two direction and camera point */
-    
+
     double convRotMatr[9];
     double convTransVect[3];
 
@@ -392,15 +392,15 @@ int icvComCoeffForLine(   CvPoint2D64d point1,
     CvPoint3D64f zeroVect;
     zeroVect.x = zeroVect.y = zeroVect.z = 0.0;
     camPoint1.x = camPoint1.y = camPoint1.z = 0.0;
-    
+
     icvConvertPointSystem(directS3,&direct3,convRotMatr,convTransVect);
     icvConvertPointSystem(directS4,&direct4,convRotMatr,convTransVect);
     icvConvertPointSystem(zeroVect,&camPoint2,convRotMatr,convTransVect);
 
     CvPoint3D64f pointB;
-        
+
     int postype = 0;
-    
+
     /* Changed order */
     /* Compute point B: xB,yB,zB */
     icvGetCrossLines(camPoint1,direct2,
@@ -449,7 +449,7 @@ int icvComCoeffForLine(   CvPoint2D64d point1,
 
 
     double gamma;
-    
+
     double xA,yA,zA;
     double xB,yB,zB;
     double xC,yC,zC;
@@ -476,7 +476,7 @@ int icvComCoeffForLine(   CvPoint2D64d point1,
                                 camPoint1,
                                 gamma,
                                 coeffs);
-    
+
     return CV_NO_ERR;
 }
 
@@ -489,7 +489,7 @@ int icvGetDirectionForPoint(  CvPoint2D64d point,
 {
     /*  */
     double invMatr[9];
-    
+
     /* Invert matrix */
 
     icvInvertMatrix_64d(camMatr,3,invMatr);
@@ -504,10 +504,10 @@ int icvGetDirectionForPoint(  CvPoint2D64d point,
     icvMulMatrix_64d(   invMatr,
                         3,3,
                         vect,
-                        1,3, 
+                        1,3,
                         (double*)direct);
 
-    return CV_NO_ERR;    
+    return CV_NO_ERR;
 }
 
 /*--------------------------------------------------------------------------------------*/
@@ -556,7 +556,7 @@ int icvGetCrossLines(CvPoint3D64d point11,CvPoint3D64d point12,
     double alpha,betta;
 
     delta  = a11*a22-a12*a21;
-    
+
     if( fabs(delta) < EPS64D )
     {
         /*return ERROR;*/
@@ -662,7 +662,7 @@ int icvGetAngleLine( CvPoint2D64d startPoint, CvSize imageSize,CvPoint2D64d *poi
     /* Find four lines */
 
     CvPoint2D64d pa,pb,pc,pd;
-    
+
     pa.x = 0;
     pa.y = 0;
 
@@ -674,10 +674,10 @@ int icvGetAngleLine( CvPoint2D64d startPoint, CvSize imageSize,CvPoint2D64d *poi
 
     pc.x = 0;
     pc.y = imageSize.height-1;
-    
+
     /* We can compute points for angle */
     /* Test for place section */
-    
+
     if( startPoint.x < 0 )
     {/* 1,4,7 */
         if( startPoint.y < 0)
@@ -782,7 +782,7 @@ void icvGetCoefForPiece(   CvPoint2D64d p_start,CvPoint2D64d p_end,
 /*---------------------------------------------------------------------------------------*/
 
 /* Get common area of rectifying */
-void icvGetCommonArea( CvSize imageSize,
+static void icvGetCommonArea( CvSize imageSize,
                     CvPoint3D64d epipole1,CvPoint3D64d epipole2,
                     CvMatr64d fundMatr,
                     CvVect64d coeff11,CvVect64d coeff12,
@@ -808,10 +808,10 @@ void icvGetCommonArea( CvSize imageSize,
     double transFundMatr[3*3];
     /* Compute transpose of fundamental matrix */
     icvTransposeMatrix_64d( fundMatr, 3, 3, transFundMatr );
-    
+
     CvPoint2D64d epipole1_2d;
     CvPoint2D64d epipole2_2d;
-    
+
     if( fabs(epipole1.z) < 1e-8 )
     {/* epipole1 in infinity */
         *result = 0;
@@ -853,7 +853,7 @@ void icvGetCommonArea( CvSize imageSize,
     pointW11[2] = 1.0;
 
     icvTransformVector_64d( transFundMatr, /* !!! Modified from not transposed */
-                            pointW11, 
+                            pointW11,
                             corr21,
                             3,3);
 
@@ -864,7 +864,7 @@ void icvGetCommonArea( CvSize imageSize,
                         corr21[0],corr21[1],corr21[2],
                         &start,&end,
                         &res);
-    
+
     if( res == 0 )
     {/* We have not cross */
         /* We must define new angle */
@@ -879,7 +879,7 @@ void icvGetCommonArea( CvSize imageSize,
         /* corr11 = Fund * p21 */
 
         icvTransformVector_64d( fundMatr, /* !!! Modified */
-                                pointW21, 
+                                pointW21,
                                 corr11,
                                 3,3);
 
@@ -889,7 +889,7 @@ void icvGetCommonArea( CvSize imageSize,
         coeff11[0] = corr11[0];
         coeff11[1] = corr11[1];
         coeff11[2] = corr11[2];
-        
+
         /* Set coefs for line 1 image 2 */
         icvGetCoefForPiece(    epipole2_2d,point21,
                             &coeff21[0],&coeff21[1],&coeff21[2],
@@ -911,12 +911,12 @@ void icvGetCommonArea( CvSize imageSize,
             *result = 0;
             return;/* Error */
         }
-        
+
         /* Set coefs for line 1 image 2 */
         coeff21[0] = corr21[0];
         coeff21[1] = corr21[1];
         coeff21[2] = corr21[2];
-        
+
     }
 
     /* ============= Computation for line 2 ================ */
@@ -928,7 +928,7 @@ void icvGetCommonArea( CvSize imageSize,
     pointW12[2] = 1.0;
 
     icvTransformVector_64d( transFundMatr,
-                            pointW12, 
+                            pointW12,
                             corr22,
                             3,3);
 
@@ -937,7 +937,7 @@ void icvGetCommonArea( CvSize imageSize,
                         corr22[0],corr22[1],corr22[2],
                         &start,&end,
                         &res);
-    
+
     if( res == 0 )
     {/* We have not cross */
         /* We must define new angle */
@@ -952,18 +952,18 @@ void icvGetCommonArea( CvSize imageSize,
         /* corr2 = Fund' * p1 */
 
         icvTransformVector_64d( fundMatr,
-                                pointW22, 
+                                pointW22,
                                 corr12,
                                 3,3);
 
-        
+
         /* We have cross. And it's result cross for down line. Set result coefs */
 
         /* Set coefs for line 2 image 1 */
         coeff12[0] = corr12[0];
         coeff12[1] = corr12[1];
         coeff12[2] = corr12[2];
-        
+
         /* Set coefs for line 1 image 2 */
         icvGetCoefForPiece(    epipole2_2d,point22,
                             &coeff22[0],&coeff22[1],&coeff22[2],
@@ -985,12 +985,12 @@ void icvGetCommonArea( CvSize imageSize,
             *result = 0;
             return;/* Error */
         }
-        
+
         /* Set coefs for line 1 image 2 */
         coeff22[0] = corr22[0];
         coeff22[1] = corr22[1];
         coeff22[2] = corr22[2];
-        
+
     }
 
     /* Now we know common area */
@@ -1050,9 +1050,9 @@ void icvGetCrossPieceDirect(   CvPoint2D64d p_start,CvPoint2D64d p_end,
     {/* Have cross */
         double det;
         double detxc,detyc;
-        
+
         det = a * (p_end.x - p_start.x) + b * (p_end.y - p_start.y);
-        
+
         if( fabs(det) < EPS64D )
         {/* lines are parallel and may be equal or line is point */
             if(  fabs(a*p_start.x + b*p_start.y + c) < EPS64D )
@@ -1062,7 +1062,7 @@ void icvGetCrossPieceDirect(   CvPoint2D64d p_start,CvPoint2D64d p_end,
             }
             else
             {
-                *result = 2;                
+                *result = 2;
             }
             return;
         }
@@ -1131,7 +1131,7 @@ void icvGetCrossPiecePiece( CvPoint2D64d p1_start,CvPoint2D64d p1_end,
 
     cross->x = delX / del;
     cross->y = delY / del;
-    
+
     *result = 1;
     return;
 }
@@ -1171,7 +1171,7 @@ void icvGetCrossRectDirect(    CvSize imageSize,
     CvPoint2D64d frameEnd;
     CvPoint2D64d cross[4];
     int     haveCross[4];
-    
+
     haveCross[0] = 0;
     haveCross[1] = 0;
     haveCross[2] = 0;
@@ -1182,25 +1182,25 @@ void icvGetCrossRectDirect(    CvSize imageSize,
     frameEnd.x = imageSize.width;
     frameEnd.y = 0;
 
-    icvGetCrossPieceDirect(frameBeg,frameEnd,a,b,c,&cross[0],&haveCross[0]);    
-    
+    icvGetCrossPieceDirect(frameBeg,frameEnd,a,b,c,&cross[0],&haveCross[0]);
+
     frameBeg.x = imageSize.width;
     frameBeg.y = 0;
     frameEnd.x = imageSize.width;
     frameEnd.y = imageSize.height;
-    icvGetCrossPieceDirect(frameBeg,frameEnd,a,b,c,&cross[1],&haveCross[1]);    
+    icvGetCrossPieceDirect(frameBeg,frameEnd,a,b,c,&cross[1],&haveCross[1]);
 
     frameBeg.x = imageSize.width;
     frameBeg.y = imageSize.height;
     frameEnd.x = 0;
     frameEnd.y = imageSize.height;
-    icvGetCrossPieceDirect(frameBeg,frameEnd,a,b,c,&cross[2],&haveCross[2]);    
+    icvGetCrossPieceDirect(frameBeg,frameEnd,a,b,c,&cross[2],&haveCross[2]);
 
     frameBeg.x = 0;
     frameBeg.y = imageSize.height;
     frameEnd.x = 0;
     frameEnd.y = 0;
-    icvGetCrossPieceDirect(frameBeg,frameEnd,a,b,c,&cross[3],&haveCross[3]);    
+    icvGetCrossPieceDirect(frameBeg,frameEnd,a,b,c,&cross[3],&haveCross[3]);
 
     double maxDist;
 
@@ -1210,7 +1210,7 @@ void icvGetCrossRectDirect(    CvSize imageSize,
     int i,j;
 
     maxDist = -1.0;
-    
+
     double distance;
 
     for( i = 0; i < 3; i++ )
@@ -1259,7 +1259,7 @@ void icvProjectPointToImage(   CvPoint3D64d point,
 
     double tmpVect1[3];
     double tmpVect2[3];
-    
+
     icvMulMatrix_64d (  rotMatr,
                         3,3,
                         (double*)&point,
@@ -1276,13 +1276,13 @@ void icvProjectPointToImage(   CvPoint3D64d point,
 
     projPoint->x = tmpVect1[0] / tmpVect1[2];
     projPoint->y = tmpVect1[1] / tmpVect1[2];
-   
+
     return;
 }
 
 /*---------------------------------------------------------------------------------------*/
 /* Get quads for transform images */
-void icvGetQuadsTransform( 
+void icvGetQuadsTransform(
                           CvSize        imageSize,
                         CvMatr64d     camMatr1,
                         CvMatr64d     rotMatr1,
@@ -1338,10 +1338,10 @@ void icvGetQuadsTransform(
                                         fundMatr_32f,
                                         camMatr1_32f,
                                         camMatr2_32f);
-        
+
         CvPoint3D32f epipole1_32f;
         CvPoint3D32f epipole2_32f;
-        
+
         cvComputeEpipolesFromFundMatrix( fundMatr_32f,
                                          &epipole1_32f,
                                          &epipole2_32f);
@@ -1353,7 +1353,7 @@ void icvGetQuadsTransform(
         epipole2->x = epipole2_32f.x;
         epipole2->y = epipole2_32f.y;
         epipole2->z = epipole2_32f.z;
-        
+
         /* Convert fundamental matrix */
         icvCvt_32f_64d(fundMatr_32f,fundMatr,9);
     }
@@ -1466,7 +1466,7 @@ void icvGetQuadsTransform(
         /* -------------Compute for first image-------------- */
         CvPoint2D32f pointb1;
         CvPoint2D32f pointe1;
-        
+
         CvPoint2D32f pointb2;
         CvPoint2D32f pointe2;
 
@@ -1494,11 +1494,11 @@ void icvGetQuadsTransform(
         double dxOld,dyOld;
         double dxNew,dyNew;
         double distOld,distNew;
-        
+
         dxOld = quad2[1][0] - quad2[0][0];
         dyOld = quad2[1][1] - quad2[0][1];
         distOld = dxOld*dxOld + dyOld*dyOld;
-        
+
         dxNew = quad2[1][0] - pointb2.x;
         dyNew = quad2[1][1] - pointb2.y;
         distNew = dxNew*dxNew + dyNew*dyNew;
@@ -1542,7 +1542,7 @@ void icvGetQuadsTransform(
             newQuad2[0][1] = quad2[0][1];
             newQuad2[3][0] = quad2[3][0];
             newQuad2[3][1] = quad2[3][1];
-            
+
             newQuad1[0][0] = pointb1.x;
             newQuad1[0][1] = pointb1.y;
             newQuad1[3][0] = pointe1.x;
@@ -1569,11 +1569,11 @@ void icvGetQuadsTransform(
                                     &pointe2);
 
         /* Compute distances */
-        
+
         dxOld = quad2[0][0] - quad2[1][0];
         dyOld = quad2[0][1] - quad2[1][1];
         distOld = dxOld*dxOld + dyOld*dyOld;
-        
+
         dxNew = quad2[0][0] - pointb2.x;
         dyNew = quad2[0][1] - pointb2.y;
         distNew = dxNew*dxNew + dyNew*dyNew;
@@ -1614,7 +1614,7 @@ void icvGetQuadsTransform(
             newQuad2[1][1] = quad2[1][1];
             newQuad2[2][0] = quad2[2][0];
             newQuad2[2][1] = quad2[2][1];
-            
+
             newQuad1[1][0] = pointb1.x;
             newQuad1[1][1] = pointb1.y;
             newQuad1[2][0] = pointe1.x;
@@ -1660,7 +1660,7 @@ void icvGetQuadsTransform(
 
 /*---------------------------------------------------------------------------------------*/
 
-void icvGetQuadsTransformNew(  CvSize        imageSize,
+static void icvGetQuadsTransformNew(  CvSize        imageSize,
                             CvMatr32f     camMatr1,
                             CvMatr32f     camMatr2,
                             CvMatr32f     rotMatr1,
@@ -1732,7 +1732,7 @@ void icvGetQuadsTransformNew(  CvSize        imageSize,
 
     /* Convert fundamental matrix */
     icvCvt_64d_32f(fundMatr_64d,fundMatr,9);
-    
+
     return;
 }
 
@@ -1771,7 +1771,7 @@ void icvGetQuadsTransformStruct(  CvStereoCamera* stereoCamera)
 /*---------------------------------------------------------------------------------------*/
 void icvComputeStereoParamsForCameras(CvStereoCamera* stereoCamera)
 {
-    /* For given intrinsic and extrinsic parameters computes rest parameters 
+    /* For given intrinsic and extrinsic parameters computes rest parameters
     **   such as fundamental matrix. warping coeffs, epipoles, ...
     */
 
@@ -1792,14 +1792,14 @@ void icvComputeStereoParamsForCameras(CvStereoCamera* stereoCamera)
 
     icvCvt_32f_64d(stereoCamera->camera[0]->transVect,transVect1,3);
     icvCvt_32f_64d(stereoCamera->camera[1]->transVect,transVect2,3);
-        
+
     icvCreateConvertMatrVect(   rotMatr1,
                                 transVect1,
                                 rotMatr2,
                                 transVect2,
                                 convRotMatr,
                                 convTransVect);
-    
+
     /* copy to stereo camera params */
     icvCvt_64d_32f(convRotMatr,stereoCamera->rotMatrix,9);
     icvCvt_64d_32f(convTransVect,stereoCamera->transVector,3);
@@ -1837,7 +1837,7 @@ void icvGetCutPiece(   CvVect64d areaLineCoef1,CvVect64d areaLineCoef2,
     /* Find middle line of sector */
     double midLine[3]={0,0,0};
 
-    
+
     /* Different way  */
     CvPoint2D64d pointOnLine1;  pointOnLine1.x = pointOnLine1.y = 0;
     CvPoint2D64d pointOnLine2;  pointOnLine2.x = pointOnLine2.y = 0;
@@ -1885,7 +1885,7 @@ void icvGetCutPiece(   CvVect64d areaLineCoef1,CvVect64d areaLineCoef2,
         candPoints[numPoints] = cornerPoint;
         numPoints++;
     }
-    
+
     cornerPoint.x = imageSize.width;
     cornerPoint.y = imageSize.height;
     icvTestPoint( cornerPoint, areaLineCoef1, areaLineCoef2, epipole, &res);
@@ -1919,7 +1919,7 @@ void icvGetCutPiece(   CvVect64d areaLineCoef1,CvVect64d areaLineCoef2,
                         areaLineCoef2[0],areaLineCoef2[1],areaLineCoef2[2],
                         &tmpPoints[0], &tmpPoints[1],
                         &res);
-    
+
     for( i = 0; i < res; i++ )
     {
         candPoints[numPoints++] = tmpPoints[i];
@@ -1941,7 +1941,7 @@ void icvGetCutPiece(   CvVect64d areaLineCoef1,CvVect64d areaLineCoef2,
     double maxDist = 0;
     double minDist = 10000000;
 
-    
+
     for( i = 0; i < numPoints; i++ )
     {
         icvProjectPointToDirect(candPoints[i], midLine, &projPoint);
@@ -1960,7 +1960,7 @@ void icvGetCutPiece(   CvVect64d areaLineCoef1,CvVect64d areaLineCoef2,
     }
 
     /* We know maximum and minimum points. Now we can compute cut lines */
-    
+
     icvGetNormalDirect(midLine,minPoint,cutLine1);
     icvGetNormalDirect(midLine,maxPoint,cutLine2);
 
@@ -1993,7 +1993,7 @@ void icvGetMiddleAnglePoint(   CvPoint2D64d basePoint,
                             CvPoint2D64d point1,CvPoint2D64d point2,
                             CvPoint2D64d* midPoint)
 {/* !!! May be need to return error */
-    
+
     double dist1;
     double dist2;
     icvGetPieceLength(basePoint,point1,&dist1);
@@ -2020,7 +2020,7 @@ void icvGetNormalDirect(CvVect64d direct,CvPoint2D64d point,CvVect64d normDirect
 {
     normDirect[0] =   direct[1];
     normDirect[1] = - direct[0];
-    normDirect[2] = -(normDirect[0]*point.x + normDirect[1]*point.y);  
+    normDirect[2] = -(normDirect[0]*point.x + normDirect[1]*point.y);
     return;
 }
 
@@ -2063,7 +2063,7 @@ void icvTestPoint( CvPoint2D64d testPoint,
     {
         *result = 0;
     }
-    
+
     return;
 }
 
@@ -2074,7 +2074,7 @@ void icvProjectPointToDirect(  CvPoint2D64d point,CvVect64d lineCoeff,
 {
     double a = lineCoeff[0];
     double b = lineCoeff[1];
-    
+
     double det =  1.0 / ( a*a + b*b );
     double delta =  a*point.y - b*point.x;
 
@@ -2103,7 +2103,7 @@ CV_IMPL IplImage* icvCreateIsometricImage( IplImage* src, IplImage* dst,
     CvSize src_size ;
     src_size.width = src->width;
     src_size.height = src->height;
-    
+
     CvSize dst_size = src_size;
 
     if( dst )
@@ -2127,7 +2127,7 @@ CV_IMPL IplImage* icvCreateIsometricImage( IplImage* src, IplImage* dst,
     return dst;
 }
 
-int
+static int
 icvCvt_32f_64d( float *src, double *dst, int size )
 {
     int t;
@@ -2147,7 +2147,7 @@ icvCvt_32f_64d( float *src, double *dst, int size )
 
 /*======================================================================================*/
 /* Type conversion double -> float */
-int
+static int
 icvCvt_64d_32f( double *src, float *dst, int size )
 {
     int t;
@@ -2167,9 +2167,9 @@ icvCvt_64d_32f( double *src, float *dst, int size )
 
 /*----------------------------------------------------------------------------------*/
 
-
+#if 0
 /* Find line which cross frame by line(a,b,c) */
-void FindLineForEpiline(    CvSize imageSize,
+static void FindLineForEpiline(    CvSize imageSize,
                             float a,float b,float c,
                             CvPoint2D32f *start,CvPoint2D32f *end,
                             int*)
@@ -2191,7 +2191,7 @@ void FindLineForEpiline(    CvSize imageSize,
     frameEnd.x = (float)(imageSize.width);
     frameEnd.y = 0;
     haveCross[0] = icvGetCrossLineDirect(frameBeg,frameEnd,a,b,c,&cross[0]);
-    
+
     frameBeg.x = (float)(imageSize.width);
     frameBeg.y = 0;
     frameEnd.x = (float)(imageSize.width);
@@ -2203,7 +2203,7 @@ void FindLineForEpiline(    CvSize imageSize,
     frameEnd.x = 0;
     frameEnd.y = (float)(imageSize.height);
     haveCross[2] = icvGetCrossLineDirect(frameBeg,frameEnd,a,b,c,&cross[2]);
-    
+
     frameBeg.x = 0;
     frameBeg.y = (float)(imageSize.height);
     frameEnd.x = 0;
@@ -2255,13 +2255,12 @@ void FindLineForEpiline(    CvSize imageSize,
     }
 
     return;
-    
+
 }
 
 
 /*----------------------------------------------------------------------------------*/
-
-int GetAngleLinee( CvPoint2D32f epipole, CvSize imageSize,CvPoint2D32f point1,CvPoint2D32f point2)
+static int GetAngleLinee( CvPoint2D32f epipole, CvSize imageSize,CvPoint2D32f point1,CvPoint2D32f point2)
 {
     float width  = (float)(imageSize.width);
     float height = (float)(imageSize.height);
@@ -2271,7 +2270,7 @@ int GetAngleLinee( CvPoint2D32f epipole, CvSize imageSize,CvPoint2D32f point1,Cv
     /* Find four lines */
 
     CvPoint2D32f pa,pb,pc,pd;
-    
+
     pa.x = 0;
     pa.y = 0;
 
@@ -2290,7 +2289,7 @@ int GetAngleLinee( CvPoint2D32f epipole, CvSize imageSize,CvPoint2D32f point1,Cv
     float x,y;
     x = epipole.x;
     y = epipole.y;
-    
+
     if( x < 0 )
     {/* 1,4,7 */
         if( y < 0)
@@ -2344,15 +2343,15 @@ int GetAngleLinee( CvPoint2D32f epipole, CvSize imageSize,CvPoint2D32f point1,Cv
             return 2;
         }
 
-        
+
     }
-    
+
 
     return 0;
 }
 
 /*--------------------------------------------------------------------------------------*/
-void icvComputePerspectiveCoeffs(const CvPoint2D32f srcQuad[4],const CvPoint2D32f dstQuad[4],double coeffs[3][3])
+static void icvComputePerspectiveCoeffs(const CvPoint2D32f srcQuad[4],const CvPoint2D32f dstQuad[4],double coeffs[3][3])
 {/* Computes perspective coeffs for transformation from src to dst quad */
 
 
@@ -2385,7 +2384,7 @@ void icvComputePerspectiveCoeffs(const CvPoint2D32f srcQuad[4],const CvPoint2D32
         double Y = dstQuad[i].y;
 #endif
         double* a = A + i*16;
-        
+
         a[0] = x;
         a[1] = y;
         a[2] = 1;
@@ -2420,7 +2419,7 @@ void icvComputePerspectiveCoeffs(const CvPoint2D32f srcQuad[4],const CvPoint2D32
     CV_CALL( cvPseudoInverse( &matA, &matInvA ));
     CV_CALL( cvMatMulAdd( &matInvA, &matB, 0, &matX ));
     }
-    
+
     coeffs[0][0] = c[0];
     coeffs[0][1] = c[1];
     coeffs[0][2] = c[2];
@@ -2435,6 +2434,7 @@ void icvComputePerspectiveCoeffs(const CvPoint2D32f srcQuad[4],const CvPoint2D32
 
     return;
 }
+#endif
 
 /*--------------------------------------------------------------------------------------*/
 
@@ -2457,7 +2457,7 @@ CV_IMPL void cvComputePerspectiveMap(const double c[3][3], CvArr* rectMapX, CvAr
 
     size = cvGetMatSize(mapx);
     assert( fabs(c[2][2] - 1.) < FLT_EPSILON );
-    
+
     for( i = 0; i < size.height; i++ )
     {
         float* mx = (float*)(mapx->data.ptr + mapx->step*i);
@@ -2525,7 +2525,7 @@ CV_IMPL void cvInitPerspectiveTransform( CvSize size, const CvPoint2D32f quad[4]
         double Y = quad[i].y;
 #endif
         double* a = A + i*16;
-        
+
         a[0] = x;
         a[1] = y;
         a[2] = 1;
@@ -2560,7 +2560,7 @@ CV_IMPL void cvInitPerspectiveTransform( CvSize size, const CvPoint2D32f quad[4]
     CV_CALL( cvPseudoInverse( &matA, &matInvA ));
     CV_CALL( cvMatMulAdd( &matInvA, &matB, 0, &matX ));
     }
-    
+
     matrix[0][0] = c[0];
     matrix[0][1] = c[1];
     matrix[0][2] = c[2];
@@ -2613,7 +2613,7 @@ void icvComputeeInfiniteProject1(   CvMatr64d     rotMatr,
     icvMulMatrix_64d(   invMatr1,
                         3,3,
                         p1,
-                        1,3, 
+                        1,3,
                         P1);
 
     double invR[9];
@@ -2624,7 +2624,7 @@ void icvComputeeInfiniteProject1(   CvMatr64d     rotMatr,
     icvMulMatrix_64d(   invR,
                         3,3,
                         P1,
-                        1,3, 
+                        1,3,
                         P2);
 
     /* Now we can project this point to image 2 */
@@ -2633,7 +2633,7 @@ void icvComputeeInfiniteProject1(   CvMatr64d     rotMatr,
     icvMulMatrix_64d(   camMatr2,
                         3,3,
                         P2,
-                        1,3, 
+                        1,3,
                         projP);
 
     point2->x = (float)(projP[0] / projP[2]);
@@ -2661,7 +2661,7 @@ void icvComputeeInfiniteProject2(   CvMatr64d     rotMatr,
     icvMulMatrix_64d(   invMatr2,
                         3,3,
                         p2,
-                        1,3, 
+                        1,3,
                         P2);
 
     /* Change system 1 to system 2 */
@@ -2670,7 +2670,7 @@ void icvComputeeInfiniteProject2(   CvMatr64d     rotMatr,
     icvMulMatrix_64d(   rotMatr,
                         3,3,
                         P2,
-                        1,3, 
+                        1,3,
                         P1);
 
     /* Now we can project this point to image 2 */
@@ -2679,7 +2679,7 @@ void icvComputeeInfiniteProject2(   CvMatr64d     rotMatr,
     icvMulMatrix_64d(   camMatr1,
                         3,3,
                         P1,
-                        1,3, 
+                        1,3,
                         projP);
 
     point1->x = (float)(projP[0] / projP[2]);
@@ -2690,7 +2690,7 @@ void icvComputeeInfiniteProject2(   CvMatr64d     rotMatr,
 
 /* Select best R and t for given cameras, points, ... */
 /* For both cameras */
-int icvSelectBestRt(           int           numImages,
+static int icvSelectBestRt(         int           numImages,
                                     int*          numPoints,
                                     CvPoint2D32f* imagePoints1,
                                     CvPoint2D32f* imagePoints2,
@@ -2713,7 +2713,7 @@ int icvSelectBestRt(           int           numImages,
 
     /* Need to convert input data 32 -> 64 */
     CvPoint3D64d* objectPoints_64d;
-    
+
     double* rotMatrs1_64d;
     double* rotMatrs2_64d;
 
@@ -2736,7 +2736,7 @@ int icvSelectBestRt(           int           numImages,
     }
 
     objectPoints_64d = (CvPoint3D64d*)calloc(totalNum,sizeof(CvPoint3D64d));
-    
+
     rotMatrs1_64d    = (double*)calloc(numImages,sizeof(double)*9);
     rotMatrs2_64d    = (double*)calloc(numImages,sizeof(double)*9);
 
@@ -2744,7 +2744,7 @@ int icvSelectBestRt(           int           numImages,
     transVects2_64d  = (double*)calloc(numImages,sizeof(double)*3);
 
     /* Convert input data to 64d */
-    
+
     icvCvt_32f_64d((float*)objectPoints, (double*)objectPoints_64d,  totalNum*3);
 
     icvCvt_32f_64d(rotMatrs1, rotMatrs1_64d,  numImages*9);
@@ -2774,14 +2774,14 @@ int icvSelectBestRt(           int           numImages,
     int currRt;
     for( currRt = 0; currRt < numImages; currRt++ )
     {
-        int begPoint = 0; 
+        int begPoint = 0;
         for(currImagePair = 0; currImagePair < numImages; currImagePair++ )
         {
             /* For current R,t R,t compute relative position of cameras */
 
             double convRotMatr[9];
             double convTransVect[3];
-            
+
             icvCreateConvertMatrVect( rotMatrs1_64d + currRt*9,
                                       transVects1_64d + currRt*3,
                                       rotMatrs2_64d + currRt*9,
@@ -2836,7 +2836,7 @@ int icvSelectBestRt(           int           numImages,
                 tmpPoint.x = (double)(objectPoints[i].x);
                 tmpPoint.y = (double)(objectPoints[i].y);
                 tmpPoint.z = (double)(objectPoints[i].z);
-                
+
                 icvConvertPointSystem(  tmpPoint,
                                         points2+i,
                                         rotMatrs2_64d + currImagePair*9,
@@ -2862,7 +2862,7 @@ int icvSelectBestRt(           int           numImages,
 
 
             }
-            
+
 #if 0
             cvProjectPointsSimple(  numPoints[currImagePair],
                                     objectPoints_64d + begPoint,
@@ -2901,7 +2901,7 @@ int icvSelectBestRt(           int           numImages,
                                         cameraMatrix2_64d,
                                         nodist,
                                         projImagePoints2);
-                
+
             }
 #endif
 
@@ -2929,7 +2929,7 @@ int icvSelectBestRt(           int           numImages,
             double err;
             for( currPoint = 0; currPoint < numberPnt; currPoint++ )
             {
-                double len1,len2; 
+                double len1,len2;
                 double dx1,dy1;
                 dx1 = imagePoints1[begPoint+currPoint].x - projImagePoints1[currPoint].x;
                 dy1 = imagePoints1[begPoint+currPoint].y - projImagePoints1[currPoint].y;
@@ -3030,12 +3030,12 @@ int icvConvertWarpCoordinates(double coeffs[3][3],
                                 int direction)
 {
     double x,y;
-    double det; 
+    double det;
     if( direction == CV_WARP_TO_CAMERA )
     {/* convert from camera image to warped image coordinates */
         x = warpPoint->x;
         y = warpPoint->y;
-        
+
         det = (coeffs[2][0] * x + coeffs[2][1] * y + coeffs[2][2]);
         if( fabs(det) > 1e-8 )
         {
@@ -3058,7 +3058,7 @@ int icvConvertWarpCoordinates(double coeffs[3][3],
             return CV_OK;
         }
     }
-    
+
     return CV_BADFACTOR_ERR;
 }
 
@@ -3233,8 +3233,9 @@ int icvStereoCalibration( int numImages,
     return CV_NO_ERR;
 }
 
+#if 0
 /* Find line from epipole */
-void FindLine(CvPoint2D32f epipole,CvSize imageSize,CvPoint2D32f point,CvPoint2D32f *start,CvPoint2D32f *end)
+static void FindLine(CvPoint2D32f epipole,CvSize imageSize,CvPoint2D32f point,CvPoint2D32f *start,CvPoint2D32f *end)
 {
     CvPoint2D32f frameBeg;
     CvPoint2D32f frameEnd;
@@ -3252,7 +3253,7 @@ void FindLine(CvPoint2D32f epipole,CvSize imageSize,CvPoint2D32f point,CvPoint2D
     frameEnd.x = (float)(imageSize.width);
     frameEnd.y = 0;
     haveCross[0] = icvGetCrossPieceVector(frameBeg,frameEnd,epipole,point,&cross[0]);
-    
+
     frameBeg.x = (float)(imageSize.width);
     frameBeg.y = 0;
     frameEnd.x = (float)(imageSize.width);
@@ -3264,7 +3265,7 @@ void FindLine(CvPoint2D32f epipole,CvSize imageSize,CvPoint2D32f point,CvPoint2D
     frameEnd.x = 0;
     frameEnd.y = (float)(imageSize.height);
     haveCross[2] = icvGetCrossPieceVector(frameBeg,frameEnd,epipole,point,&cross[2]);
-    
+
     frameBeg.x = 0;
     frameBeg.y = (float)(imageSize.height);
     frameEnd.x = 0;
@@ -3277,7 +3278,7 @@ void FindLine(CvPoint2D32f epipole,CvSize imageSize,CvPoint2D32f point,CvPoint2D
 
     int maxN = -1;
     int minN = -1;
-    
+
     for( n = 0; n < 4; n++ )
     {
         if( haveCross[n] > 0 )
@@ -3315,9 +3316,8 @@ void FindLine(CvPoint2D32f epipole,CvSize imageSize,CvPoint2D32f point,CvPoint2D
     return;
 }
 
-
 /* Find line which cross frame by line(a,b,c) */
-void FindLineForEpiline(CvSize imageSize,float a,float b,float c,CvPoint2D32f *start,CvPoint2D32f *end)
+static void FindLineForEpiline(CvSize imageSize,float a,float b,float c,CvPoint2D32f *start,CvPoint2D32f *end)
 {
     CvPoint2D32f frameBeg;
     CvPoint2D32f frameEnd;
@@ -3335,7 +3335,7 @@ void FindLineForEpiline(CvSize imageSize,float a,float b,float c,CvPoint2D32f *s
     frameEnd.x = (float)(imageSize.width);
     frameEnd.y = 0;
     haveCross[0] = icvGetCrossLineDirect(frameBeg,frameEnd,a,b,c,&cross[0]);
-    
+
     frameBeg.x = (float)(imageSize.width);
     frameBeg.y = 0;
     frameEnd.x = (float)(imageSize.width);
@@ -3347,7 +3347,7 @@ void FindLineForEpiline(CvSize imageSize,float a,float b,float c,CvPoint2D32f *s
     frameEnd.x = 0;
     frameEnd.y = (float)(imageSize.height);
     haveCross[2] = icvGetCrossLineDirect(frameBeg,frameEnd,a,b,c,&cross[2]);
-    
+
     frameBeg.x = 0;
     frameBeg.y = (float)(imageSize.height);
     frameEnd.x = 0;
@@ -3399,11 +3399,11 @@ void FindLineForEpiline(CvSize imageSize,float a,float b,float c,CvPoint2D32f *s
     }
 
     return;
-    
+
 }
 
 /* Cross lines */
-int GetCrossLines(CvPoint2D32f p1_start,CvPoint2D32f p1_end,CvPoint2D32f p2_start,CvPoint2D32f p2_end,CvPoint2D32f *cross)
+static int GetCrossLines(CvPoint2D32f p1_start,CvPoint2D32f p1_end,CvPoint2D32f p2_start,CvPoint2D32f p2_end,CvPoint2D32f *cross)
 {
     double ex1,ey1,ex2,ey2;
     double px1,py1,px2,py2;
@@ -3448,7 +3448,7 @@ int GetCrossLines(CvPoint2D32f p1_start,CvPoint2D32f p1_end,CvPoint2D32f p2_star
     cross->y = (float)(-delY / del);
     return 1;
 }
-
+#endif
 
 int icvGetCrossPieceVector(CvPoint2D32f p1_start,CvPoint2D32f p1_end,CvPoint2D32f v2_start,CvPoint2D32f v2_end,CvPoint2D32f *cross)
 {
@@ -3527,11 +3527,12 @@ int icvGetCrossLineDirect(CvPoint2D32f p1,CvPoint2D32f p2,float a,float b,float
 
     cross->x = (float)X;
     cross->y = (float)Y;
-    
+
     return 1;
 }
 
-int cvComputeEpipoles( CvMatr32f camMatr1,  CvMatr32f camMatr2,
+#if 0
+static int cvComputeEpipoles( CvMatr32f camMatr1,  CvMatr32f camMatr2,
                             CvMatr32f rotMatr1,  CvMatr32f rotMatr2,
                             CvVect32f transVect1,CvVect32f transVect2,
                             CvVect32f epipole1,
@@ -3571,7 +3572,7 @@ int cvComputeEpipoles( CvMatr32f camMatr1,  CvMatr32f camMatr2,
     cvmMul( &ccamMatr1, &crotMatr1, &cmatrP1);
     cvmInvert( &cmatrP1,&cinvP1 );
     cvmMul( &ccamMatr1, &ctransVect1, &cvectp1 );
-    
+
     /* Compute second */
     cvmMul( &ccamMatr2, &crotMatr2, &cmatrP2 );
     cvmInvert( &cmatrP2,&cinvP2 );
@@ -3610,7 +3611,7 @@ int cvComputeEpipoles( CvMatr32f camMatr1,  CvMatr32f camMatr2,
 
     return CV_NO_ERR;
 }/* cvComputeEpipoles */
-
+#endif
 
 /* Compute epipoles for fundamental matrix */
 int cvComputeEpipolesFromFundMatrix(CvMatr32f fundMatr,
@@ -3632,7 +3633,7 @@ int cvComputeEpipolesFromFundMatrix(CvMatr32f fundMatr,
     epipole1->x = matrU->data.fl[6];
     epipole1->y = matrU->data.fl[7];
     epipole1->z = matrU->data.fl[8];
-    
+
     /* Get last row from V' and compute epipole2 */
     epipole2->x = matrV->data.fl[6];
     epipole2->y = matrV->data.fl[7];
@@ -3640,7 +3641,7 @@ int cvComputeEpipolesFromFundMatrix(CvMatr32f fundMatr,
 
     cvReleaseMat(&matrW);
     cvReleaseMat(&matrU);
-    cvReleaseMat(&matrV);    
+    cvReleaseMat(&matrV);
     return CV_OK;
 }
 
@@ -3660,7 +3661,7 @@ int cvConvertEssential2Fundamental( CvMatr32f essMatr,
     CvMat* invCM1T = cvCreateMat(3,3,CV_MAT32F);
 
     cvTranspose(&cameraMatr1C,tmpMatr);
-    cvInvert(tmpMatr,invCM1T);       
+    cvInvert(tmpMatr,invCM1T);
     cvmMul(invCM1T,&essMatrC,tmpMatr);
     cvInvert(&cameraMatr2C,invCM2);
     cvmMul(tmpMatr,invCM2,&fundMatrC);
@@ -3673,7 +3674,7 @@ int cvConvertEssential2Fundamental( CvMatr32f essMatr,
     cvReleaseMat(&invCM2);
     cvReleaseMat(&tmpMatr);
     cvReleaseMat(&invCM1T);
-    
+
     return CV_OK;
 }
 
@@ -3689,11 +3690,11 @@ int cvComputeEssentialMatrix(  CvMatr32f rotMatr,
     transMatr[0] =   0;
     transMatr[1] = - transVect[2];
     transMatr[2] =   transVect[1];
-    
+
     transMatr[3] =   transVect[2];
     transMatr[4] =   0;
     transMatr[5] = - transVect[0];
-    
+
     transMatr[6] = - transVect[1];
     transMatr[7] =   transVect[0];
     transMatr[8] =   0;
index 2cfed90..1e9abf7 100644 (file)
@@ -48,7 +48,7 @@
 #include "_kdtree.hpp"
 #include "_featuretree.h"
 
-#if _MSC_VER >= 1400
+#if defined _MSC_VER && _MSC_VER >= 1400
 #pragma warning(disable:4996) // suppress "function call with parameters may be unsafe" in std::copy
 #endif
 
@@ -95,7 +95,7 @@ class CvKDTreeWrap : public CvFeatureTree {
 
     for (int j = 0; j < d->rows; ++j) {
       const typename __treetype::scalar_type* dj =
-       (const typename __treetype::scalar_type*) dptr;
+  (const typename __treetype::scalar_type*) dptr;
 
       int* resultsj = (int*) resultsptr;
       double* distj = (double*) distptr;
@@ -103,8 +103,8 @@ class CvKDTreeWrap : public CvFeatureTree {
 
       assert((int)nn.size() <= k);
       for (unsigned int j = 0; j < nn.size(); ++j) {
-       *resultsj++ = *nn[j].p;
-       *distj++ = nn[j].dist;
+  *resultsj++ = *nn[j].p;
+  *distj++ = nn[j].dist;
       }
       std::fill(resultsj, resultsj + k - nn.size(), -1);
       std::fill(distj, distj + k - nn.size(), 0);
@@ -117,16 +117,16 @@ class CvKDTreeWrap : public CvFeatureTree {
 
   template <class __treetype>
   int find_ortho_range(CvMat* bounds_min, CvMat* bounds_max,
-                      CvMat* results) {
+           CvMat* results) {
     int rn = results->rows * results->cols;
     std::vector<int> inbounds;
     dispatch_cvtype(mat, ((__treetype*)data)->
-                   find_ortho_range((typename __treetype::scalar_type*)bounds_min->data.ptr, 
-                                    (typename __treetype::scalar_type*)bounds_max->data.ptr, 
-                                    inbounds));
+        find_ortho_range((typename __treetype::scalar_type*)bounds_min->data.ptr,
+             (typename __treetype::scalar_type*)bounds_max->data.ptr,
+             inbounds));
     std::copy(inbounds.begin(),
-             inbounds.begin() + std::min((int)inbounds.size(), rn),
-             (int*) results->data.ptr);
+        inbounds.begin() + std::min((int)inbounds.size(), rn),
+        (int*) results->data.ptr);
     return (int)inbounds.size();
   }
 
@@ -135,7 +135,7 @@ class CvKDTreeWrap : public CvFeatureTree {
 public:
   CvKDTreeWrap(CvMat* _mat) : mat(_mat) {
     // * a flag parameter should tell us whether
-    // * (a) user ensures *mat outlives *this and is unchanged, 
+    // * (a) user ensures *mat outlives *this and is unchanged,
     // * (b) we take reference and user ensures mat is unchanged,
     // * (c) we copy data, (d) we own and release data.
 
@@ -144,8 +144,8 @@ public:
       tmp[j] = j;
 
     dispatch_cvtype(mat, data = new tree_type
-                   (&tmp[0], &tmp[0] + tmp.size(), mat->cols,
-                    tree_type::deref_type(mat)));
+        (&tmp[0], &tmp[0] + tmp.size(), mat->cols,
+         tree_type::deref_type(mat)));
   }
   ~CvKDTreeWrap() {
     dispatch_cvtype(mat, delete (tree_type*) data);
@@ -185,15 +185,15 @@ public:
     assert(CV_MAT_TYPE(results->type) == CV_32SC1);
 
     dispatch_cvtype(mat, find_nn<tree_type>
-                   (desc, k, emax, results, dist));
+        (desc, k, emax, results, dist));
   }
   int FindOrthoRange(CvMat* bounds_min, CvMat* bounds_max,
-                    CvMat* results) {
+         CvMat* results) {
     bool free_bounds = false;
     int count = -1;
 
     if (bounds_min->cols * bounds_min->rows != dims() ||
-       bounds_max->cols * bounds_max->rows != dims())
+  bounds_max->cols * bounds_max->rows != dims())
       CV_Error(CV_StsUnmatchedSizes, "bounds_{min,max} must 1 x dims or dims x 1");
     if (CV_MAT_TYPE(bounds_min->type) != CV_MAT_TYPE(bounds_max->type))
       CV_Error(CV_StsUnmatchedFormats, "bounds_{min,max} must have same type");
@@ -218,7 +218,7 @@ public:
     assert(bounds_max->rows * bounds_max->cols == dims());
 
     dispatch_cvtype(mat, count = find_ortho_range<tree_type>
-                   (bounds_min, bounds_max,results));
+        (bounds_min, bounds_max,results));
 
     if (free_bounds) {
       cvReleaseMat(&bounds_min);
index 518ec77..9a20fa5 100644 (file)
@@ -1247,7 +1247,7 @@ int _cvSolveEqu1th(T c1, T c0, T* X);
         vertices_number: in, number of vertices in polygon
      Return     :
     --------------------------------------------------------------------------*/
-void _cvSetSeqBlockSize(CvVoronoiDiagramInt* pVoronoiDiagramInt,int vertices_number)
+static void _cvSetSeqBlockSize(CvVoronoiDiagramInt* pVoronoiDiagramInt,int vertices_number)
 {
     int N = 2*vertices_number;
     cvSetSeqBlockSize(pVoronoiDiagramInt->SiteSeq,N*pVoronoiDiagramInt->SiteSeq->elem_size);
index 976f6f4..aa2514e 100644 (file)
@@ -50,6 +50,7 @@
 typedef void (*pointer_LMJac)( const CvMat* src, CvMat* dst );
 typedef void (*pointer_LMFunc)( const CvMat* src, CvMat* dst );
 
+#if 0
 /* Optimization using Levenberg-Marquardt */
 void cvLevenbergMarquardtOptimization(pointer_LMJac JacobianFunction,
                                     pointer_LMFunc function,
@@ -75,7 +76,7 @@ void cvLevenbergMarquardtOptimization(pointer_LMJac JacobianFunction,
     CvMat *matrJtJN = 0;
     CvMat *matrJt = 0;
     CvMat *vectB = 0;
-   
+
     CV_FUNCNAME( "cvLevenbegrMarquardtOptimization" );
     __BEGIN__;
 
@@ -104,7 +105,7 @@ void cvLevenbergMarquardtOptimization(pointer_LMJac JacobianFunction,
     {
         CV_ERROR( CV_StsUnmatchedSizes, "Number of colomn of vector X0 must be 1" );
     }
-    
+
     if( observRes->cols != 1 )
     {
         CV_ERROR( CV_StsUnmatchedSizes, "Number of colomn of vector observed rusult must be 1" );
@@ -157,8 +158,8 @@ void cvLevenbergMarquardtOptimization(pointer_LMJac JacobianFunction,
         /* Print result of function to file */
 
         /* Compute error */
-        cvSub(observRes,resFunc,error);        
-        
+        cvSub(observRes,resFunc,error);
+
         //valError = error_function(observRes,resFunc);
         /* Need to use new version of computing error (norm) */
         valError = cvNorm(observRes,resFunc);
@@ -169,7 +170,7 @@ void cvLevenbergMarquardtOptimization(pointer_LMJac JacobianFunction,
         /* Define optimal delta for J'*J*delta=J'*error */
         /* compute J'J */
         cvMulTransposed(Jac,matrJtJ,1);
-        
+
         cvCopy(matrJtJ,matrJtJN);
 
         /* compute J'*error */
@@ -244,6 +245,7 @@ void cvLevenbergMarquardtOptimization(pointer_LMJac JacobianFunction,
 
     return;
 }
+#endif
 
 /*------------------------------------------------------------------------------*/
 #if 0
index 1581896..467c5d2 100644 (file)
@@ -65,9 +65,13 @@ void icvReconstructPoints4DStatus(CvMat** projPoints, CvMat **projMatrs, CvMat**
 */
 #define TRACK_BUNDLE_FILE            "d:\\test\\bundle.txt"
 
+void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPoints,
+                                       CvMat** pointsPres, int numImages,
+                                       CvMat** resultProjMatrs, CvMat* resultPoints4D,int maxIter,double epsilon );
+
 
 /* ============== Bundle adjustment optimization ================= */
-void icvComputeDerivateProj(CvMat *points4D,CvMat *projMatr, CvMat *status, CvMat *derivProj)
+static void icvComputeDerivateProj(CvMat *points4D,CvMat *projMatr, CvMat *status, CvMat *derivProj)
 {
     /* Compute derivate for given projection matrix points and status of points */
 
@@ -201,7 +205,7 @@ void icvComputeDerivateProj(CvMat *points4D,CvMat *projMatr, CvMat *status, CvMa
 }
 /*======================================================================================*/
 
-void icvComputeDerivateProjAll(CvMat *points4D, CvMat **projMatrs, CvMat **pointPres, int numImages,CvMat **projDerives)
+static void icvComputeDerivateProjAll(CvMat *points4D, CvMat **projMatrs, CvMat **pointPres, int numImages,CvMat **projDerives)
 {
     CV_FUNCNAME( "icvComputeDerivateProjAll" );
     __BEGIN__;
@@ -228,7 +232,7 @@ void icvComputeDerivateProjAll(CvMat *points4D, CvMat **projMatrs, CvMat **point
 }
 /*======================================================================================*/
 
-void icvComputeDerivatePoints(CvMat *points4D,CvMat *projMatr, CvMat *presPoints, CvMat *derivPoint)
+static void icvComputeDerivatePoints(CvMat *points4D,CvMat *projMatr, CvMat *presPoints, CvMat *derivPoint)
 {
 
     CV_FUNCNAME( "icvComputeDerivatePoints" );
@@ -267,7 +271,7 @@ void icvComputeDerivatePoints(CvMat *points4D,CvMat *projMatr, CvMat *presPoints
     {
         CV_ERROR( CV_StsOutOfRange, "Size of projection matrix (projMatr) must be 3x4" );
     }
-    
+
     if( !CV_IS_MAT(presPoints) )
     {
         CV_ERROR( CV_StsUnsupportedFormat, "Status must be a matrix 1xN" );
@@ -282,10 +286,10 @@ void icvComputeDerivatePoints(CvMat *points4D,CvMat *projMatr, CvMat *presPoints
     {
         CV_ERROR( CV_StsUnsupportedFormat, "derivPoint must be a matrix 2 x 4VisNum" );
     }
-    /* ----- End test ----- */    
-    
+    /* ----- End test ----- */
+
     /* Compute derivates by points */
-        
+
     double p[12];
     int i;
     for( i = 0; i < 12; i++ )
@@ -311,16 +315,16 @@ void icvComputeDerivatePoints(CvMat *points4D,CvMat *projMatr, CvMat *presPoints
             piX[0] = X[0]*p[0] + X[1]*p[1] + X[2]*p[2]  + X[3]*p[3];
             piX[1] = X[0]*p[4] + X[1]*p[5] + X[2]*p[6]  + X[3]*p[7];
             piX[2] = X[0]*p[8] + X[1]*p[9] + X[2]*p[10] + X[3]*p[11];
-            
+
             int i,j;
 
             double tmp3 = 1/(piX[2]*piX[2]);
-                      
+
             for( j = 0; j < 2; j++ )//for x and y
             {
                 for( i = 0; i < 4; i++ )// for X,Y,Z,W
                 {
-                    cvmSet( derivPoint, 
+                    cvmSet( derivPoint,
                             j, currVisPoint*4+i,
                             (p[j*4+i]*piX[2]-p[8+i]*piX[j]) * tmp3  );
                 }
@@ -337,8 +341,9 @@ void icvComputeDerivatePoints(CvMat *points4D,CvMat *projMatr, CvMat *presPoints
     __END__;
     return;
 }
+
 /*======================================================================================*/
-void icvComputeDerivatePointsAll(CvMat *points4D, CvMat **projMatrs, CvMat **pointPres, int numImages,CvMat **pointDerives)
+static void icvComputeDerivatePointsAll(CvMat *points4D, CvMat **projMatrs, CvMat **pointPres, int numImages,CvMat **pointDerives)
 {
     CV_FUNCNAME( "icvComputeDerivatePointsAll" );
     __BEGIN__;
@@ -364,7 +369,7 @@ void icvComputeDerivatePointsAll(CvMat *points4D, CvMat **projMatrs, CvMat **poi
     return;
 }
 /*======================================================================================*/
-void icvComputeMatrixVAll(int numImages,CvMat **pointDeriv,CvMat **presPoints, CvMat **matrV)
+static void icvComputeMatrixVAll(int numImages,CvMat **pointDeriv,CvMat **presPoints, CvMat **matrV)
 {
     int *shifts = 0;
 
@@ -404,10 +409,10 @@ void icvComputeMatrixVAll(int numImages,CvMat **pointDeriv,CvMat **presPoints, C
                 {
                     if( cvmGet(presPoints[currImage],0,currPoint) > 0 )
                     {
-                        sum += cvmGet(pointDeriv[currImage],0,shifts[currImage]*4+i) * 
+                        sum += cvmGet(pointDeriv[currImage],0,shifts[currImage]*4+i) *
                                cvmGet(pointDeriv[currImage],0,shifts[currImage]*4+j);
 
-                        sum += cvmGet(pointDeriv[currImage],1,shifts[currImage]*4+i) * 
+                        sum += cvmGet(pointDeriv[currImage],1,shifts[currImage]*4+i) *
                                cvmGet(pointDeriv[currImage],1,shifts[currImage]*4+j);
                     }
                 }
@@ -429,11 +434,11 @@ void icvComputeMatrixVAll(int numImages,CvMat **pointDeriv,CvMat **presPoints, C
 
     __END__;
     cvFree( &shifts);
-    
+
     return;
 }
 /*======================================================================================*/
-void icvComputeMatrixUAll(int numImages,CvMat **projDeriv,CvMat** matrU)
+static void icvComputeMatrixUAll(int numImages,CvMat **projDeriv,CvMat** matrU)
 {
     CV_FUNCNAME( "icvComputeMatrixVAll" );
     __BEGIN__;
@@ -460,7 +465,7 @@ void icvComputeMatrixUAll(int numImages,CvMat **projDeriv,CvMat** matrU)
     return;
 }
 /*======================================================================================*/
-void icvComputeMatrixW(int numImages, CvMat **projDeriv, CvMat **pointDeriv, CvMat **presPoints, CvMat *matrW)
+static void icvComputeMatrixW(int numImages, CvMat **projDeriv, CvMat **pointDeriv, CvMat **presPoints, CvMat *matrW)
 {
     CV_FUNCNAME( "icvComputeMatrixW" );
     __BEGIN__;
@@ -509,10 +514,10 @@ void icvComputeMatrixW(int numImages, CvMat **projDeriv, CvMat **pointDeriv, CvM
                     for( int currCol = 0; currCol < 4; currCol++ )
                     {
                         double sum;
-                        sum = cvmGet(projDeriv[currImage],currVis*2+0,currLine) * 
+                        sum = cvmGet(projDeriv[currImage],currVis*2+0,currLine) *
                               cvmGet(pointDeriv[currImage],0,currVis*4+currCol);
 
-                        sum += cvmGet(projDeriv[currImage],currVis*2+1,currLine) * 
+                        sum += cvmGet(projDeriv[currImage],currVis*2+1,currLine) *
                               cvmGet(pointDeriv[currImage],1,currVis*4+currCol);
 
                         cvmSet(matrW,currImage*12+currLine,currPoint*4+currCol,sum);
@@ -529,7 +534,7 @@ void icvComputeMatrixW(int numImages, CvMat **projDeriv, CvMat **pointDeriv, CvM
             }
         }
     }
-    
+
 #ifdef TRACK_BUNDLE
     {
         FILE *file;
@@ -560,9 +565,10 @@ void icvComputeMatrixW(int numImages, CvMat **projDeriv, CvMat **pointDeriv, CvM
     __END__;
     return;
 }
+
 /*======================================================================================*/
 /* Compute jacobian mult projection matrices error */
-void icvComputeJacErrorProj(int numImages,CvMat **projDeriv,CvMat **projErrors,CvMat *jacProjErr )
+static void icvComputeJacErrorProj(int numImages,CvMat **projDeriv,CvMat **projErrors,CvMat *jacProjErr )
 {
     CV_FUNCNAME( "icvComputeJacErrorProj" );
     __BEGIN__;
@@ -596,7 +602,7 @@ void icvComputeJacErrorProj(int numImages,CvMat **projDeriv,CvMat **projErrors,C
             double sum = 0;
             for( int i = 0; i < num; i++ )
             {
-                sum += cvmGet(projDeriv[currImage],i,currCol) * 
+                sum += cvmGet(projDeriv[currImage],i,currCol) *
                        cvmGet(projErrors[currImage],i%2,i/2);
             }
             cvmSet(jacProjErr,currImage*12+currCol,0,sum);
@@ -627,9 +633,10 @@ void icvComputeJacErrorProj(int numImages,CvMat **projDeriv,CvMat **projErrors,C
     __END__;
     return;
 }
+
 /*======================================================================================*/
 /* Compute jacobian mult points error */
-void icvComputeJacErrorPoint(int numImages,CvMat **pointDeriv,CvMat **projErrors, CvMat **presPoints,CvMat *jacPointErr )
+static void icvComputeJacErrorPoint(int numImages,CvMat **pointDeriv,CvMat **projErrors, CvMat **presPoints,CvMat *jacPointErr )
 {
     int *shifts = 0;
 
@@ -734,6 +741,7 @@ void icvComputeJacErrorPoint(int numImages,CvMat **pointDeriv,CvMat **projErrors
 }
 /*======================================================================================*/
 
+
 /* Reconstruct 4D points using status */
 void icvReconstructPoints4DStatus(CvMat** projPoints, CvMat **projMatrs, CvMat** presPoints,
                                   CvMat *points4D,int numImages,CvMat **projError)
@@ -797,7 +805,7 @@ void icvReconstructPoints4DStatus(CvMat** projPoints, CvMat **projMatrs, CvMat**
                 numVisProj++;
             }
         }
-        
+
         if( numVisProj < 2 )
         {
             /* This point can't be reconstructed */
@@ -821,7 +829,7 @@ void icvReconstructPoints4DStatus(CvMat** projPoints, CvMat **projMatrs, CvMat**
                 y = cvmGet(projPoints[currImage],1,currPoint);
                 for( int k = 0; k < 4; k++ )
                 {
-                    matrA_dat[currVisProj*12   + k] = 
+                    matrA_dat[currVisProj*12   + k] =
                            x * cvmGet(projMatrs[currImage],2,k) -     cvmGet(projMatrs[currImage],0,k);
 
                     matrA_dat[currVisProj*12+4 + k] =
@@ -854,7 +862,7 @@ void icvReconstructPoints4DStatus(CvMat** projPoints, CvMat **projMatrs, CvMat**
             CvMat point3D;
             double point3D_dat[3];
             point3D = cvMat(3,1,CV_64F,point3D_dat);
-            
+
             int currPoint;
             int numVis = 0;
             double totalError = 0;
@@ -897,7 +905,7 @@ void icvReconstructPoints4DStatus(CvMat** projPoints, CvMat **projMatrs, CvMat**
 
 /*======================================================================================*/
 
-void icvProjPointsStatusFunc( int numImages, CvMat *points4D, CvMat **projMatrs, CvMat **pointsPres, CvMat **projPoints)
+static void icvProjPointsStatusFunc( int numImages, CvMat *points4D, CvMat **projMatrs, CvMat **pointsPres, CvMat **projPoints)
 {
     CV_FUNCNAME( "icvProjPointsStatusFunc" );
     __BEGIN__;
@@ -943,7 +951,7 @@ void icvProjPointsStatusFunc( int numImages, CvMat *points4D, CvMat **projMatrs,
             fclose(file);
         }
 #endif
-    
+
     int currImage;
     for( currImage = 0; currImage < numImages; currImage++ )
     {
@@ -969,7 +977,7 @@ void icvProjPointsStatusFunc( int numImages, CvMat *points4D, CvMat **projMatrs,
                     fclose(file);
                 }
 #endif
-                
+
                 cvmMul(projMatrs[currImage],&point4D,&point3D);
                 double w = point3D_dat[2];
                 cvmSet(projPoints[currImage],0,currVisPoint,point3D_dat[0]/w);
@@ -998,11 +1006,11 @@ void icvProjPointsStatusFunc( int numImages, CvMat *points4D, CvMat **projMatrs,
 }
 
 /*======================================================================================*/
-void icvFreeMatrixArray(CvMat ***matrArray,int numMatr)
+static void icvFreeMatrixArray(CvMat ***matrArray,int numMatr)
 {
     /* Free each matrix */
     int currMatr;
-    
+
     if( *matrArray != 0 )
     {/* Need delete */
         for( currMatr = 0; currMatr < numMatr; currMatr++ )
@@ -1015,7 +1023,7 @@ void icvFreeMatrixArray(CvMat ***matrArray,int numMatr)
 }
 
 /*======================================================================================*/
-void *icvClearAlloc(int size)
+static void *icvClearAlloc(int size)
 {
     void *ptr = 0;
 
@@ -1047,6 +1055,7 @@ int icvDeleteSparsInPoints(  int numImages,
 
 }
 #endif
+
 /*======================================================================================*/
 /* !!! may be useful to return norm of error */
 /* !!! may be does not work correct with not all visible 4D points */
@@ -1054,15 +1063,15 @@ void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPo
                                        CvMat** pointsPres, int numImages,
                                        CvMat** resultProjMatrs, CvMat* resultPoints4D,int maxIter,double epsilon )
 {
-    
+
     CvMat  *vectorX_points4D = 0;
-    CvMat **vectorX_projMatrs = 0;    
+    CvMat **vectorX_projMatrs = 0;
 
     CvMat  *newVectorX_points4D = 0;
     CvMat **newVectorX_projMatrs = 0;
 
     CvMat  *changeVectorX_points4D = 0;
-    CvMat  *changeVectorX_projMatrs = 0;  
+    CvMat  *changeVectorX_projMatrs = 0;
 
     CvMat **observVisPoints = 0;
     CvMat **projVisPoints = 0;
@@ -1097,17 +1106,17 @@ void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPo
     {
         CV_ERROR( CV_StsOutOfRange, "Number of images must be more than zero" );
     }
-    
+
     if( maxIter < 1 || maxIter > 2000 )
     {
         CV_ERROR( CV_StsOutOfRange, "Maximum number of iteration must be in [1..1000]" );
     }
-    
+
     if( epsilon < 0  )
     {
         CV_ERROR( CV_StsOutOfRange, "Epsilon parameter must be >= 0" );
     }
-    
+
     if( !CV_IS_MAT(resultPoints4D) )
     {
         CV_ERROR( CV_StsUnsupportedFormat, "resultPoints4D must be a matrix 4 x NumPnt" );
@@ -1139,7 +1148,7 @@ void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPo
     CV_CALL( changeVectorX_projMatrs = cvCreateMat(3,4,CV_64F));
 
     int currImage;
-    
+
     /* ----- Test input params ----- */
     for( currImage = 0; currImage < numImages; currImage++ )
     {
@@ -1355,7 +1364,7 @@ void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPo
                 double norm = cvNorm(vectorX_projMatrs[i]);
                 fprintf(file,"        test 6.01 prev normProj=%lf\n",norm);
             }
-            
+
             fclose(file);
         }
 #endif
@@ -1384,7 +1393,7 @@ void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPo
                 double norm = cvNorm(matrsUk[i]);
                 fprintf(file,"        test 6.01 prev matrsUk=%lf\n",norm);
             }
-            
+
             for( i = 0; i < numPoints; i++ )
             {
                 double norm = cvNorm(matrsVi[i]);
@@ -1427,7 +1436,7 @@ void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPo
                 double norm = cvNorm(matrsUk[i]);
                 fprintf(file,"        test 6.01 post1 matrsUk=%lf\n",norm);
             }
-            
+
             for( i = 0; i < numPoints; i++ )
             {
                 double norm = cvNorm(matrsVi[i]);
@@ -1612,7 +1621,7 @@ void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPo
                 newError += currNorm * currNorm;
             }
             newError = sqrt(newError);
-            
+
             currIter++;
 
 
@@ -1732,7 +1741,7 @@ void cvOptimizeLevenbergMarquardtBundle( CvMat** projMatrs, CvMat** observProjPo
 
 
     } while( change > epsilon && currIter < maxIter );
-     
+
     /*--------------------------------------------*/
     /* Optimization complete copy computed params */
     /* Copy projection matrices */
index e8a7ad5..e86b52f 100644 (file)
@@ -46,6 +46,8 @@
 
 /* Valery Mosyagin */
 
+#if 0
+
 typedef void (*pointer_LMJac)( const CvMat* src, CvMat* dst );
 typedef void (*pointer_LMFunc)( const CvMat* src, CvMat* dst );
 
@@ -61,7 +63,7 @@ void icvReconstructPointsFor3View( CvMat* projMatr1,CvMat* projMatr2,CvMat* proj
 
 
 /* Jacobian computation for trifocal case */
-void icvJacobianFunction_ProjTrifocal(const CvMat *vectX,CvMat *Jacobian)
+static void icvJacobianFunction_ProjTrifocal(const CvMat *vectX,CvMat *Jacobian)
 {
     CV_FUNCNAME( "icvJacobianFunction_ProjTrifocal" );
     __BEGIN__;
@@ -101,7 +103,7 @@ void icvJacobianFunction_ProjTrifocal(const CvMat *vectX,CvMat *Jacobian)
     /* Fill Jacobian matrix */
     int currProjPoint;
     int currMatr;
-    
+
     cvZero(Jacobian);
     for( currMatr = 0; currMatr < 3; currMatr++ )
     {
@@ -137,7 +139,7 @@ void icvJacobianFunction_ProjTrifocal(const CvMat *vectX,CvMat *Jacobian)
             {
                 for( i = 0; i < 4; i++ )// for X,Y,Z,W
                 {
-                    cvmSet( Jacobian, 
+                    cvmSet( Jacobian,
                             currMatr*numPoints*2+currProjPoint*2+j, 36+currProjPoint*4+i,
                             (p[j*4+i]*piX[2]-p[8+i]*piX[j]) * tmp3  );
                 }
@@ -161,7 +163,7 @@ void icvJacobianFunction_ProjTrifocal(const CvMat *vectX,CvMat *Jacobian)
     return;
 }
 
-void icvFunc_ProjTrifocal(const CvMat *vectX, CvMat *resFunc)
+static void icvFunc_ProjTrifocal(const CvMat *vectX, CvMat *resFunc)
 {
     /* Computes function in a given point */
     /* Computers project points using 3 projection matrices and points 3D */
@@ -264,7 +266,7 @@ void icvFunc_ProjTrifocal(const CvMat *vectX, CvMat *resFunc)
 
 /*----------------------------------------------------------------------------------------*/
 
-void icvOptimizeProjectionTrifocal(CvMat **projMatrs,CvMat **projPoints,
+static void icvOptimizeProjectionTrifocal(CvMat **projMatrs,CvMat **projPoints,
                                 CvMat **resultProjMatrs, CvMat *resultPoints4D)
 {
 
@@ -312,7 +314,7 @@ void icvOptimizeProjectionTrifocal(CvMat **projMatrs,CvMat **projPoints,
         {
             CV_ERROR( CV_StsNullPtr, "Some of projPoints is a NULL pointer" );
         }
-    
+
         if( resultProjMatrs[i] == 0 )
         {
             CV_ERROR( CV_StsNullPtr, "Some of resultProjMatrs is a NULL pointer" );
@@ -402,7 +404,7 @@ void icvOptimizeProjectionTrifocal(CvMat **projMatrs,CvMat **projPoints,
         cvmSet(vectorX0,36 + currPoint*4 + 3,0,cvmGet(points4D,3,currPoint));
     }
 
-    
+
     /* Allocate memory for result */
     cvLevenbergMarquardtOptimization( icvJacobianFunction_ProjTrifocal, icvFunc_ProjTrifocal,
                                       vectorX0,observRes,optimX,100,1e-6);
@@ -441,7 +443,7 @@ void icvOptimizeProjectionTrifocal(CvMat **projMatrs,CvMat **projPoints,
 
 /*------------------------------------------------------------------------------*/
 /* Create good points using status information */
-void icvCreateGoodPoints(CvMat *points,CvMat **goodPoints, CvMat *status)
+static void icvCreateGoodPoints(CvMat *points,CvMat **goodPoints, CvMat *status)
 {
     *goodPoints = 0;
 
@@ -493,3 +495,4 @@ void icvCreateGoodPoints(CvMat *points,CvMat **goodPoints, CvMat *status)
     return;
 }
 
+#endif
index fbc5696..851b00b 100644 (file)
@@ -87,7 +87,7 @@ double _cvStretchingWork(CvPoint2D32f* P1,
 
     L1 = sqrt( (double)P1->x*P1->x + P1->y*P1->y);
     L2 = sqrt( (double)P2->x*P2->x + P2->y*P2->y);
-    
+
     L_min = MIN(L1, L2);
     dL = fabs( L1 - L2 );
 
@@ -96,15 +96,15 @@ double _cvStretchingWork(CvPoint2D32f* P1,
 
 
 ////////////////////////////////////////////////////////////////////////////////////
+CvPoint2D32f Q( CvPoint2D32f q0, CvPoint2D32f q1, CvPoint2D32f q2, double t );
+double angle( CvPoint2D32f A, CvPoint2D32f B );
+
 double _cvBendingWork(  CvPoint2D32f* B0,
                         CvPoint2D32f* F0,
                         CvPoint2D32f* B1,
                         CvPoint2D32f* F1/*,
                         CvPoint* K*/)
 {
-    CvPoint2D32f Q( CvPoint2D32f q0, CvPoint2D32f q1, CvPoint2D32f q2, double t );
-    double angle( CvPoint2D32f A, CvPoint2D32f B );
-
     CvPoint2D32f Q0, Q1, Q2;
     CvPoint2D32f Q1_nm = { 0, 0 }, Q2_nm = { 0, 0 };
     double d0, d1, d2, des, t_zero;
@@ -140,7 +140,7 @@ double _cvBendingWork(  CvPoint2D32f* B0,
         d_angle = d_angle - CV_PI*0.5;
         d_angle = fabs(d_angle);
 
-        
+
         K->x = -K->x;
         K->y = -K->y;
         B1->x = -B1->x;
@@ -427,7 +427,7 @@ void _cvWorkSouthEast(int i, int j, _CvWork** W, CvPoint2D32f* edges1, CvPoint2D
     small_edge.y = NULL_EDGE*edges1[i-2].y;
 
     w1 = W[i-1][j-1].w_east + _cvBendingWork(&edges1[i-2],
-                                            &edges1[i-1],                                           
+                                            &edges1[i-1],
                                             /*&null_edge*/&small_edge,
                                             &edges2[j-1]/*,
                                             &edges2[j-2]*/);
@@ -442,7 +442,7 @@ void _cvWorkSouthEast(int i, int j, _CvWork** W, CvPoint2D32f* edges1, CvPoint2D
     small_edge.y = NULL_EDGE*edges2[j-2].y;
 
     w3 = W[i-1][j-1].w_south + _cvBendingWork(  /*&null_edge*/&small_edge,
-                                                &edges1[i-1],                                           
+                                                &edges1[i-1],
                                                 &edges2[j-2],
                                                 &edges2[j-1]/*,
                                                 &edges1[i-2]*/);
@@ -511,6 +511,7 @@ void _cvWorkSouth(int i, int j, _CvWork** W, CvPoint2D32f* edges1, CvPoint2D32f*
     }
 }
 
+
 //===================================================
 CvPoint2D32f Q(CvPoint2D32f q0,CvPoint2D32f q1,CvPoint2D32f q2,double t)
 {
@@ -519,14 +520,14 @@ CvPoint2D32f Q(CvPoint2D32f q0,CvPoint2D32f q1,CvPoint2D32f q2,double t)
     q.x = (float)(q0.x*(1-t)*(1-t) + 2*q1.x*t*(1-t) + q2.x*t*t);
     q.y = (float)(q0.y*(1-t)*(1-t) + 2*q1.y*t*(1-t) + q2.y*t*t);
 
-    return q;       
+    return q;
 }
 
 double angle(CvPoint2D32f A, CvPoint2D32f B)
 {
     return acos( (A.x*B.x + A.y*B.y)/sqrt( (double)(A.x*A.x + A.y*A.y)*(B.x*B.x + B.y*B.y) ) );
 }
-
+#if 0
 /***************************************************************************************\
 *
 *   This function compute intermediate polygon between contour1 and contour2
@@ -536,14 +537,14 @@ double angle(CvPoint2D32f A, CvPoint2D32f B)
 *   param = [0,1];  0 correspondence to contour1, 1 - contour2
 *
 \***************************************************************************************/
-CvSeq* icvBlendContours(CvSeq* contour1, 
+static CvSeq* icvBlendContours(CvSeq* contour1,
                         CvSeq* contour2,
                         CvSeq* corr,
                         double param,
                         CvMemStorage* storage)
 {
     int j;
-    
+
     CvSeqWriter writer01;
     CvSeqReader reader01;
 
@@ -558,7 +559,7 @@ CvSeq* icvBlendContours(CvSeq* contour1,
     int corr_point;
 
     // Create output sequence.
-    CvSeq* output = cvCreateSeq(0,                      
+    CvSeq* output = cvCreateSeq(0,
                                 sizeof(CvSeq),
                                 sizeof(CvPoint),
                                 storage );
@@ -570,7 +571,7 @@ CvSeq* icvBlendContours(CvSeq* contour1,
     point1 = (CvPoint* )malloc( Ni*sizeof(CvPoint) );
     point2 = (CvPoint* )malloc( Nj*sizeof(CvPoint) );
 
-    // Initialize arrays of point 
+    // Initialize arrays of point
     cvCvtSeqToArray( contour1, point1, CV_WHOLE_SEQ );
     cvCvtSeqToArray( contour2, point2, CV_WHOLE_SEQ );
 
@@ -583,7 +584,7 @@ CvSeq* icvBlendContours(CvSeq* contour1,
 
     i = Ni-1; //correspondence to points of contour1
     for( ; corr; corr = corr->h_next )
-    {       
+    {
         //Initializes process of sequential reading from sequence
         cvStartReadSeq( corr, &reader01, 0 );
 
@@ -595,7 +596,7 @@ CvSeq* icvBlendContours(CvSeq* contour1,
             // Compute point of intermediate polygon.
             point_output.x = cvRound(point1[i].x + param*( point2[corr_point].x - point1[i].x ));
             point_output.y = cvRound(point1[i].y + param*( point2[corr_point].y - point1[i].y ));
-            
+
             // Write element to sequence.
             CV_WRITE_SEQ_ELEM( point_output, writer01 );
         }
@@ -603,7 +604,7 @@ CvSeq* icvBlendContours(CvSeq* contour1,
     }
     // Updates sequence header.
     cvFlushSeqWriter( &writer01 );
-    
+
     return output;
 }
 
@@ -621,9 +622,9 @@ CvSeq* icvBlendContours(CvSeq* contour1,
 **************************************************************************************************/
 
 
-void icvCalcContoursCorrespondence(CvSeq* contour1, 
-                                   CvSeq* contour2, 
-                                   CvSeq** corr, 
+static void icvCalcContoursCorrespondence(CvSeq* contour1,
+                                   CvSeq* contour2,
+                                   CvSeq** corr,
                                    CvMemStorage* storage)
 {
     int i,j;                    // counter of cycles
@@ -660,7 +661,7 @@ void icvCalcContoursCorrespondence(CvSeq* contour1,
     edges1 = (CvPoint2D32f* )malloc( (Ni-1)*sizeof(CvPoint2D32f) );
     edges2 = (CvPoint2D32f* )malloc( (Nj-1)*sizeof(CvPoint2D32f) );
 
-    // Initialize arrays of point 
+    // Initialize arrays of point
     cvCvtSeqToArray( contour1, point1, CV_WHOLE_SEQ );
     cvCvtSeqToArray( contour2, point2, CV_WHOLE_SEQ );
 
@@ -679,7 +680,7 @@ void icvCalcContoursCorrespondence(CvSeq* contour1,
         edges2[i].y = (float)( point2[i+1].y - point2[i].y );
     };
 
-    // Find infinity constant 
+    // Find infinity constant
     //inf=1;
 /////////////
 
@@ -716,11 +717,11 @@ void icvCalcContoursCorrespondence(CvSeq* contour1,
     {
         j=0;/////////
         W[i][j].w_east = W[i-1][j].w_east;
-        W[i][j].w_east = W[i][j].w_east /*+ 
+        W[i][j].w_east = W[i][j].w_east /*+
             _cvBendingWork( &edges1[i-2], &edges1[i-1], &null_edge, &null_edge, NULL )*/;
         W[i][j].w_east = W[i][j].w_east + _cvStretchingWork( &edges2[i-1], &null_edge );
         W[i][j].path_e = PATH_TO_E;
-        
+
         j=1;//////////
         W[i][j].w_south = inf;
 
@@ -732,18 +733,18 @@ void icvCalcContoursCorrespondence(CvSeq* contour1,
         small_edge.x = NULL_EDGE*edges1[i-2].x;
         small_edge.y = NULL_EDGE*edges1[i-2].y;
 
-        W[i][j].w_southeast = W[i][j].w_southeast + 
+        W[i][j].w_southeast = W[i][j].w_southeast +
             _cvBendingWork( &edges1[i-2], &edges1[i-1], /*&null_edge*/&small_edge, &edges2[j-1]/*, &edges2[Nj-2]*/);
 
         W[i][j].path_se = PATH_TO_E;
     }
 
     for(j=2; j<Nj; j++)
-    {       
+    {
         i=0;//////////
         W[i][j].w_south = W[i][j-1].w_south;
         W[i][j].w_south = W[i][j].w_south + _cvStretchingWork( &null_edge, &edges2[j-1] );
-        W[i][j].w_south = W[i][j].w_south /*+ 
+        W[i][j].w_south = W[i][j].w_south /*+
             _cvBendingWork( &null_edge, &null_edge, &edges2[j-2], &edges2[j-1], NULL )*/;
         W[i][j].path_s = 3;
 
@@ -758,7 +759,7 @@ void icvCalcContoursCorrespondence(CvSeq* contour1,
         small_edge.x = NULL_EDGE*edges2[j-2].x;
         small_edge.y = NULL_EDGE*edges2[j-2].y;
 
-        W[i][j].w_southeast = W[i][j].w_southeast + 
+        W[i][j].w_southeast = W[i][j].w_southeast +
             _cvBendingWork( /*&null_edge*/&small_edge, &edges1[i-1], &edges2[j-2], &edges2[j-1]/*, &edges1[Ni-2]*/);
         W[i][j].path_se = 3;
     }
@@ -773,8 +774,8 @@ void icvCalcContoursCorrespondence(CvSeq* contour1,
 
     i=Ni-1;j=Nj-1;
 
-    *corr = cvCreateSeq(0,                    
-                        sizeof(CvSeq),        
+    *corr = cvCreateSeq(0,
+                        sizeof(CvSeq),
                         sizeof(int),
                         storage );
 
@@ -806,26 +807,26 @@ void icvCalcContoursCorrespondence(CvSeq* contour1,
     {
         CV_WRITE_SEQ_ELEM( j, writer );
 
-        switch( path ) 
+        switch( path )
         {
         case PATH_TO_E:
             path = W[i][j].path_e;
             i--;
             cvFlushSeqWriter( &writer );
-            corr01->h_next = cvCreateSeq(   0,                    
-                                            sizeof(CvSeq),        
+            corr01->h_next = cvCreateSeq(   0,
+                                            sizeof(CvSeq),
                                             sizeof(int),
                                             storage );
             corr01 = corr01->h_next;
             cvStartAppendToSeq( corr01, &writer );
             break;
-        
+
         case PATH_TO_SE:
             path = W[i][j].path_se;
             j--; i--;
             cvFlushSeqWriter( &writer );
-            corr01->h_next = cvCreateSeq(   0,                    
-                                            sizeof(CvSeq),        
+            corr01->h_next = cvCreateSeq(   0,
+                                            sizeof(CvSeq),
                                             sizeof(int),
                                             storage );
             corr01 = corr01->h_next;
@@ -852,4 +853,4 @@ void icvCalcContoursCorrespondence(CvSeq* contour1,
     free(edges1);
     free(edges2);
 }
-
+#endif
index 737cecf..8bdafbb 100644 (file)
@@ -12,7 +12,7 @@
 #include <stdio.h>
 
 namespace cv{
-    
+
     inline int round(float value)
     {
         if(value > 0)
@@ -24,15 +24,15 @@ namespace cv{
             return int(value - 0.5f);
         }
     }
-    
+
     inline CvRect resize_rect(CvRect rect, float alpha)
     {
         return cvRect(rect.x + round((float)(0.5*(1 - alpha)*rect.width)), rect.y + round((float)(0.5*(1 - alpha)*rect.height)),
                       round(rect.width*alpha), round(rect.height*alpha));
     }
-    
+
     CvMat* ConvertImageToMatrix(IplImage* patch);
-    
+
     class CvCameraPose
         {
         public:
@@ -41,104 +41,104 @@ namespace cv{
                 m_rotation = cvCreateMat(1, 3, CV_32FC1);
                 m_translation = cvCreateMat(1, 3, CV_32FC1);
             };
-            
+
             ~CvCameraPose()
             {
                 cvReleaseMat(&m_rotation);
                 cvReleaseMat(&m_translation);
             };
-            
+
             void SetPose(CvMat* rotation, CvMat* translation)
             {
                 cvCopy(rotation, m_rotation);
                 cvCopy(translation, m_translation);
             };
-            
+
             CvMat* GetRotation() {return m_rotation;};
             CvMat* GetTranslation() {return m_translation;};
-            
+
         protected:
             CvMat* m_rotation;
             CvMat* m_translation;
         };
-    
+
     // AffineTransformPatch: generates an affine transformed image patch.
     // - src: source image (roi is supported)
     // - dst: output image. ROI of dst image should be 2 times smaller than ROI of src.
     // - pose: parameters of an affine transformation
     void AffineTransformPatch(IplImage* src, IplImage* dst, CvAffinePose pose);
-    
+
     // GenerateAffineTransformFromPose: generates an affine transformation matrix from CvAffinePose instance
     // - size: the size of image patch
     // - pose: affine transformation
     // - transform: 2x3 transformation matrix
     void GenerateAffineTransformFromPose(CvSize size, CvAffinePose pose, CvMat* transform);
-    
+
     // Generates a random affine pose
     CvAffinePose GenRandomAffinePose();
-    
-    
+
+
     const static int num_mean_components = 500;
     const static float noise_intensity = 0.15f;
-    
-    
+
+
     static inline CvPoint rect_center(CvRect rect)
     {
         return cvPoint(rect.x + rect.width/2, rect.y + rect.height/2);
     }
-    
-    void homography_transform(IplImage* frontal, IplImage* result, CvMat* homography)
-    {
-        cvWarpPerspective(frontal, result, homography);
-    }
-    
-    CvAffinePose perturbate_pose(CvAffinePose pose, float noise)
+
+    // static void homography_transform(IplImage* frontal, IplImage* result, CvMat* homography)
+    // {
+    //     cvWarpPerspective(frontal, result, homography);
+    // }
+
+    static CvAffinePose perturbate_pose(CvAffinePose pose, float noise)
     {
         // perturbate the matrix
         float noise_mult_factor = 1 + (0.5f - float(rand())/RAND_MAX)*noise;
         float noise_add_factor = noise_mult_factor - 1;
-        
+
         CvAffinePose pose_pert = pose;
         pose_pert.phi += noise_add_factor;
         pose_pert.theta += noise_mult_factor;
         pose_pert.lambda1 *= noise_mult_factor;
         pose_pert.lambda2 *= noise_mult_factor;
-        
+
         return pose_pert;
     }
-    
-    void generate_mean_patch(IplImage* frontal, IplImage* result, CvAffinePose pose, int pose_count, float noise)
+
+    static void generate_mean_patch(IplImage* frontal, IplImage* result, CvAffinePose pose, int pose_count, float noise)
     {
         IplImage* sum = cvCreateImage(cvSize(result->width, result->height), IPL_DEPTH_32F, 1);
         IplImage* workspace = cvCloneImage(result);
         IplImage* workspace_float = cvCloneImage(sum);
-        
+
         cvSetZero(sum);
         for(int i = 0; i < pose_count; i++)
         {
             CvAffinePose pose_pert = perturbate_pose(pose, noise);
-            
+
             AffineTransformPatch(frontal, workspace, pose_pert);
             cvConvertScale(workspace, workspace_float);
             cvAdd(sum, workspace_float, sum);
         }
-        
+
         cvConvertScale(sum, result, 1.0f/pose_count);
-        
+
         cvReleaseImage(&workspace);
         cvReleaseImage(&sum);
         cvReleaseImage(&workspace_float);
     }
-    
-    void generate_mean_patch_fast(IplImage* /*frontal*/, IplImage* /*result*/, CvAffinePose /*pose*/,
-                                  CvMat* /*pca_hr_avg*/, CvMat* /*pca_hr_eigenvectors*/, const OneWayDescriptor* /*pca_descriptors*/)
-    {
-        /*for(int i = 0; i < pca_hr_eigenvectors->cols; i++)
-        {
-            
-        }*/
-    }
-    
+
+    // static void generate_mean_patch_fast(IplImage* /*frontal*/, IplImage* /*result*/, CvAffinePose /*pose*/,
+    //                               CvMat* /*pca_hr_avg*/, CvMat* /*pca_hr_eigenvectors*/, const OneWayDescriptor* /*pca_descriptors*/)
+    // {
+    //     /*for(int i = 0; i < pca_hr_eigenvectors->cols; i++)
+    //     {
+
+    //     }*/
+    // }
+
     void readPCAFeatures(const char *filename, CvMat** avg, CvMat** eigenvectors, const char *postfix = "");
     void readPCAFeatures(const FileNode &fn, CvMat** avg, CvMat** eigenvectors, const char* postfix = "");
     void savePCAFeatures(FileStorage &fs, const char* postfix, CvMat* avg, CvMat* eigenvectors);
@@ -147,35 +147,35 @@ namespace cv{
     void loadPCAFeatures(const char* path, const char* images_list, vector<IplImage*>& patches, CvSize patch_size);
     void generatePCAFeatures(const char* path, const char* img_filename, FileStorage& fs, const char* postfix,
                              CvSize patch_size, CvMat** avg, CvMat** eigenvectors);
-    
+
     void eigenvector2image(CvMat* eigenvector, IplImage* img);
 
     void FindOneWayDescriptor(int desc_count, const OneWayDescriptor* descriptors, IplImage* patch, int& desc_idx, int& pose_idx, float& distance,
                               CvMat* avg = 0, CvMat* eigenvalues = 0);
-    
+
     void FindOneWayDescriptor(int desc_count, const OneWayDescriptor* descriptors, IplImage* patch, int n,
                               std::vector<int>& desc_idxs, std::vector<int>&  pose_idxs, std::vector<float>& distances,
                               CvMat* avg = 0, CvMat* eigenvalues = 0);
-    
+
     void FindOneWayDescriptor(cv::flann::Index* m_pca_descriptors_tree, CvSize patch_size, int m_pca_dim_low, int m_pose_count, IplImage* patch, int& desc_idx, int& pose_idx, float& distance,
                               CvMat* avg = 0, CvMat* eigenvalues = 0);
-    
+
     void FindOneWayDescriptorEx(int desc_count, const OneWayDescriptor* descriptors, IplImage* patch,
                                 float scale_min, float scale_max, float scale_step,
                                 int& desc_idx, int& pose_idx, float& distance, float& scale,
                                 CvMat* avg, CvMat* eigenvectors);
-    
+
     void FindOneWayDescriptorEx(int desc_count, const OneWayDescriptor* descriptors, IplImage* patch,
                                 float scale_min, float scale_max, float scale_step,
                                 int n, std::vector<int>& desc_idxs, std::vector<int>& pose_idxs,
                                 std::vector<float>& distances, std::vector<float>& scales,
                                 CvMat* avg, CvMat* eigenvectors);
-    
+
     void FindOneWayDescriptorEx(cv::flann::Index* m_pca_descriptors_tree, CvSize patch_size, int m_pca_dim_low, int m_pose_count, IplImage* patch,
                                 float scale_min, float scale_max, float scale_step,
                                 int& desc_idx, int& pose_idx, float& distance, float& scale,
                                 CvMat* avg, CvMat* eigenvectors);
-    
+
     inline CvRect fit_rect_roi_fixedsize(CvRect rect, CvRect roi)
     {
         CvRect fit = rect;
@@ -185,13 +185,13 @@ namespace cv{
         fit.y = MIN(fit.y, roi.y + roi.height - fit.height - 1);
         return(fit);
     }
-    
+
     inline CvRect fit_rect_fixedsize(CvRect rect, IplImage* img)
     {
         CvRect roi = cvGetImageROI(img);
         return fit_rect_roi_fixedsize(rect, roi);
     }
-    
+
     OneWayDescriptor::OneWayDescriptor()
     {
         m_pose_count = 0;
@@ -204,7 +204,7 @@ namespace cv{
         m_pca_dim_low = 100;
         m_pca_dim_high = 100;
     }
-    
+
     OneWayDescriptor::~OneWayDescriptor()
     {
         if(m_pose_count)
@@ -218,50 +218,50 @@ namespace cv{
             cvReleaseImage(&m_train_patch);
             delete []m_samples;
             delete []m_pca_coeffs;
-            
+
             if(!m_transforms)
             {
                 delete []m_affine_poses;
             }
         }
     }
-    
+
     void OneWayDescriptor::Allocate(int pose_count, CvSize size, int nChannels)
     {
         m_pose_count = pose_count;
         m_samples = new IplImage* [m_pose_count];
         m_pca_coeffs = new CvMat* [m_pose_count];
         m_patch_size = cvSize(size.width/2, size.height/2);
-        
+
         if(!m_transforms)
         {
             m_affine_poses = new CvAffinePose[m_pose_count];
         }
-        
+
         int length = m_pca_dim_low;//roi.width*roi.height;
         for(int i = 0; i < m_pose_count; i++)
         {
             m_samples[i] = cvCreateImage(cvSize(size.width/2, size.height/2), IPL_DEPTH_32F, nChannels);
             m_pca_coeffs[i] = cvCreateMat(1, length, CV_32FC1);
         }
-        
+
         m_input_patch = cvCreateImage(GetPatchSize(), IPL_DEPTH_8U, 1);
         m_train_patch = cvCreateImage(GetInputPatchSize(), IPL_DEPTH_8U, 1);
     }
-    
-    void cvmSet2DPoint(CvMat* matrix, int row, int col, CvPoint2D32f point)
-    {
-        cvmSet(matrix, row, col, point.x);
-        cvmSet(matrix, row, col + 1, point.y);
-    }
-    
-    void cvmSet3DPoint(CvMat* matrix, int row, int col, CvPoint3D32f point)
-    {
-        cvmSet(matrix, row, col, point.x);
-        cvmSet(matrix, row, col + 1, point.y);
-        cvmSet(matrix, row, col + 2, point.z);
-    }
-    
+
+    // static void cvmSet2DPoint(CvMat* matrix, int row, int col, CvPoint2D32f point)
+    // {
+    //     cvmSet(matrix, row, col, point.x);
+    //     cvmSet(matrix, row, col + 1, point.y);
+    // }
+
+    // static void cvmSet3DPoint(CvMat* matrix, int row, int col, CvPoint3D32f point)
+    // {
+    //     cvmSet(matrix, row, col, point.x);
+    //     cvmSet(matrix, row, col + 1, point.y);
+    //     cvmSet(matrix, row, col + 2, point.z);
+    // }
+
     CvAffinePose GenRandomAffinePose()
     {
         const float scale_min = 0.8f;
@@ -271,10 +271,10 @@ namespace cv{
         pose.phi = float(rand())/RAND_MAX*360;
         pose.lambda1 = scale_min + float(rand())/RAND_MAX*(scale_max - scale_min);
         pose.lambda2 = scale_min + float(rand())/RAND_MAX*(scale_max - scale_min);
-        
+
         return pose;
     }
-    
+
     void GenerateAffineTransformFromPose(CvSize size, CvAffinePose pose, CvMat* transform)
     {
         CvMat* temp = cvCreateMat(3, 3, CV_32FC1);
@@ -282,13 +282,13 @@ namespace cv{
         cvmSet(temp, 2, 0, 0.0f);
         cvmSet(temp, 2, 1, 0.0f);
         cvmSet(temp, 2, 2, 1.0f);
-        
+
         CvMat rotation;
         cvGetSubRect(temp, &rotation, cvRect(0, 0, 3, 2));
-        
+
         cv2DRotationMatrix(cvPoint2D32f(size.width/2, size.height/2), pose.phi, 1.0, &rotation);
         cvCopy(temp, final);
-        
+
         cvmSet(temp, 0, 0, pose.lambda1);
         cvmSet(temp, 0, 1, 0.0f);
         cvmSet(temp, 1, 0, 0.0f);
@@ -296,53 +296,53 @@ namespace cv{
         cvmSet(temp, 0, 2, size.width/2*(1 - pose.lambda1));
         cvmSet(temp, 1, 2, size.height/2*(1 - pose.lambda2));
         cvMatMul(temp, final, final);
-        
+
         cv2DRotationMatrix(cvPoint2D32f(size.width/2, size.height/2), pose.theta - pose.phi, 1.0, &rotation);
         cvMatMul(temp, final, final);
-        
+
         cvGetSubRect(final, &rotation, cvRect(0, 0, 3, 2));
         cvCopy(&rotation, transform);
-        
+
         cvReleaseMat(&temp);
         cvReleaseMat(&final);
     }
-    
+
     void AffineTransformPatch(IplImage* src, IplImage* dst, CvAffinePose pose)
     {
         CvRect src_large_roi = cvGetImageROI(src);
-        
+
         IplImage* temp = cvCreateImage(cvSize(src_large_roi.width, src_large_roi.height), IPL_DEPTH_32F, src->nChannels);
         cvSetZero(temp);
         IplImage* temp2 = cvCloneImage(temp);
         CvMat* rotation_phi = cvCreateMat(2, 3, CV_32FC1);
-        
+
         CvSize new_size = cvSize(cvRound(temp->width*pose.lambda1), cvRound(temp->height*pose.lambda2));
         IplImage* temp3 = cvCreateImage(new_size, IPL_DEPTH_32F, src->nChannels);
-        
+
         cvConvertScale(src, temp);
         cvResetImageROI(temp);
-        
-        
+
+
         cv2DRotationMatrix(cvPoint2D32f(temp->width/2, temp->height/2), pose.phi, 1.0, rotation_phi);
         cvWarpAffine(temp, temp2, rotation_phi);
-        
+
         cvSetZero(temp);
-        
+
         cvResize(temp2, temp3);
-        
+
         cv2DRotationMatrix(cvPoint2D32f(temp3->width/2, temp3->height/2), pose.theta - pose.phi, 1.0, rotation_phi);
         cvWarpAffine(temp3, temp, rotation_phi);
-        
+
         cvSetImageROI(temp, cvRect(temp->width/2 - src_large_roi.width/4, temp->height/2 - src_large_roi.height/4,
                                    src_large_roi.width/2, src_large_roi.height/2));
         cvConvertScale(temp, dst);
         cvReleaseMat(&rotation_phi);
-        
+
         cvReleaseImage(&temp3);
         cvReleaseImage(&temp2);
         cvReleaseImage(&temp);
     }
-    
+
     void OneWayDescriptor::GenerateSamples(int pose_count, IplImage* frontal, int norm)
     {
         /*    if(m_transforms)
@@ -361,7 +361,7 @@ namespace cv{
             }
             //AffineTransformPatch(frontal, patch_8u, m_affine_poses[i]);
             generate_mean_patch(frontal, patch_8u, m_affine_poses[i], num_mean_components, noise_intensity);
-            
+
             double scale = 1.0f;
             if(norm)
             {
@@ -369,7 +369,7 @@ namespace cv{
                 scale = 1/sum;
             }
             cvConvertScale(patch_8u, m_samples[i], scale);
-            
+
 #if 0
             double maxval;
             cvMinMaxLoc(m_samples[i], 0, &maxval);
@@ -382,7 +382,7 @@ namespace cv{
         }
         cvReleaseImage(&patch_8u);
     }
-    
+
     void OneWayDescriptor::GenerateSamplesFast(IplImage* frontal, CvMat* pca_hr_avg,
                                                CvMat* pca_hr_eigenvectors, OneWayDescriptor* pca_descriptors)
     {
@@ -392,12 +392,12 @@ namespace cv{
             cvResize(frontal, m_train_patch);
             frontal = m_train_patch;
         }
-        
+
         CvMat* pca_coeffs = cvCreateMat(1, pca_hr_eigenvectors->cols, CV_32FC1);
         double maxval;
         cvMinMaxLoc(frontal, 0, &maxval);
         CvMat* frontal_data = ConvertImageToMatrix(frontal);
-        
+
         double sum = cvSum(frontal_data).val[0];
         cvConvertScale(frontal_data, frontal_data, 1.0f/sum);
         cvProjectPCA(frontal_data, pca_hr_avg, pca_hr_eigenvectors, pca_coeffs);
@@ -409,7 +409,7 @@ namespace cv{
                 double coeff = cvmGet(pca_coeffs, 0, j);
                 IplImage* patch = pca_descriptors[j + 1].GetPatch(i);
                 cvAddWeighted(m_samples[i], 1.0, patch, coeff, 0, m_samples[i]);
-                
+
 #if 0
                 printf("coeff%d = %f\n", j, coeff);
                 IplImage* test = cvCreateImage(cvSize(12, 12), IPL_DEPTH_8U, 1);
@@ -421,11 +421,11 @@ namespace cv{
                 cvWaitKey(0);
 #endif
             }
-            
+
             cvAdd(pca_descriptors[0].GetPatch(i), m_samples[i], m_samples[i]);
             double sum = cvSum(m_samples[i]).val[0];
             cvConvertScale(m_samples[i], m_samples[i], 1.0/sum);
-            
+
 #if 0
             IplImage* test = cvCreateImage(cvSize(12, 12), IPL_DEPTH_8U, 1);
             /*        IplImage* temp1 = cvCreateImage(cvSize(12, 12), IPL_DEPTH_32F, 1);
@@ -436,7 +436,7 @@ namespace cv{
              cvConvertScale(temp1, test, 255.0/maxval);*/
             cvMinMaxLoc(m_samples[i], 0, &maxval);
             cvConvertScale(m_samples[i], test, 255.0/maxval);
-            
+
             cvNamedWindow("1", 1);
             cvShowImage("1", frontal);
             cvNamedWindow("2", 1);
@@ -444,33 +444,33 @@ namespace cv{
             cvWaitKey(0);
 #endif
         }
-        
+
         cvReleaseMat(&pca_coeffs);
         cvReleaseMat(&frontal_data);
     }
-    
+
     void OneWayDescriptor::SetTransforms(CvAffinePose* poses, CvMat** transforms)
     {
         if(m_affine_poses)
         {
             delete []m_affine_poses;
         }
-        
+
         m_affine_poses = poses;
         m_transforms = transforms;
     }
-    
+
     void OneWayDescriptor::Initialize(int pose_count, IplImage* frontal, const char* feature_name, int norm)
     {
         m_feature_name = std::string(feature_name);
         CvRect roi = cvGetImageROI(frontal);
         m_center = rect_center(roi);
-        
+
         Allocate(pose_count, cvSize(roi.width, roi.height), frontal->nChannels);
-        
+
         GenerateSamples(pose_count, frontal, norm);
     }
-    
+
     void OneWayDescriptor::InitializeFast(int pose_count, IplImage* frontal, const char* feature_name,
                                           CvMat* pca_hr_avg, CvMat* pca_hr_eigenvectors, OneWayDescriptor* pca_descriptors)
     {
@@ -482,12 +482,12 @@ namespace cv{
         m_feature_name = std::string(feature_name);
         CvRect roi = cvGetImageROI(frontal);
         m_center = rect_center(roi);
-        
+
         Allocate(pose_count, cvSize(roi.width, roi.height), frontal->nChannels);
-        
+
         GenerateSamplesFast(frontal, pca_hr_avg, pca_hr_eigenvectors, pca_descriptors);
     }
-    
+
     void OneWayDescriptor::InitializePCACoeffs(CvMat* avg, CvMat* eigenvectors)
     {
         for(int i = 0; i < m_pose_count; i++)
@@ -495,7 +495,7 @@ namespace cv{
             ProjectPCASample(m_samples[i], avg, eigenvectors, m_pca_coeffs[i]);
         }
     }
-    
+
     void OneWayDescriptor::ProjectPCASample(IplImage* patch, CvMat* avg, CvMat* eigenvectors, CvMat* pca_coeffs) const
     {
         CvMat* patch_mat = ConvertImageToMatrix(patch);
@@ -506,11 +506,11 @@ namespace cv{
         CvMat temp1;
         cvGetSubRect(temp, &temp1, cvRect(0, 0, pca_coeffs->cols, 1));
         cvCopy(&temp1, pca_coeffs);
-        
+
         cvReleaseMat(&temp);
         cvReleaseMat(&patch_mat);
     }
-    
+
     void OneWayDescriptor::EstimatePosePCA(CvArr* patch, int& pose_idx, float& distance, CvMat* avg, CvMat* eigenvectors) const
     {
         if(avg == 0)
@@ -522,7 +522,7 @@ namespace cv{
             }
             else
             {
-                
+
             }
             return;
         }
@@ -537,9 +537,9 @@ namespace cv{
                 roi = cvGetImageROI((IplImage*)patch);
             }
         }
-        
+
         CvMat* pca_coeffs = cvCreateMat(1, m_pca_dim_low, CV_32FC1);
-        
+
         if (CV_IS_MAT(patch))
         {
             cvCopy((CvMat*)patch, pca_coeffs);
@@ -552,41 +552,41 @@ namespace cv{
             ProjectPCASample(patch_32f, avg, eigenvectors, pca_coeffs);
             cvReleaseImage(&patch_32f);
         }
-        
-        
+
+
         distance = 1e10;
         pose_idx = -1;
-        
+
         for(int i = 0; i < m_pose_count; i++)
         {
             double dist = cvNorm(m_pca_coeffs[i], pca_coeffs);
-            //         float dist = 0;
-            //         float data1, data2;
-            //         //CvMat* pose_pca_coeffs = m_pca_coeffs[i];
-            //         for (int x=0; x < pca_coeffs->width; x++)
-            //                 for (int y =0 ; y < pca_coeffs->height; y++)
-            //                 {
-            //                         data1 = ((float*)(pca_coeffs->data.ptr + pca_coeffs->step*x))[y];
-            //                         data2 = ((float*)(m_pca_coeffs[i]->data.ptr + m_pca_coeffs[i]->step*x))[y];
-            //                         dist+=(data1-data2)*(data1-data2);
-            //                 }
+            //      float dist = 0;
+            //      float data1, data2;
+            //      //CvMat* pose_pca_coeffs = m_pca_coeffs[i];
+            //      for (int x=0; x < pca_coeffs->width; x++)
+            //          for (int y =0 ; y < pca_coeffs->height; y++)
+            //          {
+            //              data1 = ((float*)(pca_coeffs->data.ptr + pca_coeffs->step*x))[y];
+            //              data2 = ((float*)(m_pca_coeffs[i]->data.ptr + m_pca_coeffs[i]->step*x))[y];
+            //              dist+=(data1-data2)*(data1-data2);
+            //          }
             ////#if 1
-            //         for (int j = 0; j < m_pca_dim_low; j++)
-            //         {
-            //                 dist += (pose_pca_coeffs->data.fl[j]- pca_coeffs->data.fl[j])*(pose_pca_coeffs->data.fl[j]- pca_coeffs->data.fl[j]);
-            //         }
+            //      for (int j = 0; j < m_pca_dim_low; j++)
+            //      {
+            //          dist += (pose_pca_coeffs->data.fl[j]- pca_coeffs->data.fl[j])*(pose_pca_coeffs->data.fl[j]- pca_coeffs->data.fl[j]);
+            //      }
             //#else
-            //         for (int j = 0; j <= m_pca_dim_low - 4; j += 4)
-            //         {
-            //                 dist += (pose_pca_coeffs->data.fl[j]- pca_coeffs->data.fl[j])*
-            //                         (pose_pca_coeffs->data.fl[j]- pca_coeffs->data.fl[j]);
-            //                 dist += (pose_pca_coeffs->data.fl[j+1]- pca_coeffs->data.fl[j+1])*
-            //                         (pose_pca_coeffs->data.fl[j+1]- pca_coeffs->data.fl[j+1]);
-            //                 dist += (pose_pca_coeffs->data.fl[j+2]- pca_coeffs->data.fl[j+2])*
-            //                         (pose_pca_coeffs->data.fl[j+2]- pca_coeffs->data.fl[j+2]);
-            //                 dist += (pose_pca_coeffs->data.fl[j+3]- pca_coeffs->data.fl[j+3])*
-            //                         (pose_pca_coeffs->data.fl[j+3]- pca_coeffs->data.fl[j+3]);
-            //         }
+            //      for (int j = 0; j <= m_pca_dim_low - 4; j += 4)
+            //      {
+            //          dist += (pose_pca_coeffs->data.fl[j]- pca_coeffs->data.fl[j])*
+            //              (pose_pca_coeffs->data.fl[j]- pca_coeffs->data.fl[j]);
+            //          dist += (pose_pca_coeffs->data.fl[j+1]- pca_coeffs->data.fl[j+1])*
+            //              (pose_pca_coeffs->data.fl[j+1]- pca_coeffs->data.fl[j+1]);
+            //          dist += (pose_pca_coeffs->data.fl[j+2]- pca_coeffs->data.fl[j+2])*
+            //              (pose_pca_coeffs->data.fl[j+2]- pca_coeffs->data.fl[j+2]);
+            //          dist += (pose_pca_coeffs->data.fl[j+3]- pca_coeffs->data.fl[j+3])*
+            //              (pose_pca_coeffs->data.fl[j+3]- pca_coeffs->data.fl[j+3]);
+            //      }
             //#endif
             if(dist < distance)
             {
@@ -594,20 +594,20 @@ namespace cv{
                 pose_idx = i;
             }
         }
-        
+
         cvReleaseMat(&pca_coeffs);
     }
-    
+
     void OneWayDescriptor::EstimatePose(IplImage* patch, int& pose_idx, float& distance) const
     {
         distance = 1e10;
         pose_idx = -1;
-        
+
         CvRect roi = cvGetImageROI(patch);
         IplImage* patch_32f = cvCreateImage(cvSize(roi.width, roi.height), IPL_DEPTH_32F, patch->nChannels);
         double sum = cvSum(patch).val[0];
         cvConvertScale(patch, patch_32f, 1/sum);
-        
+
         for(int i = 0; i < m_pose_count; i++)
         {
             if(m_samples[i]->width != patch_32f->width || m_samples[i]->height != patch_32f->height)
@@ -617,21 +617,21 @@ namespace cv{
             double dist = cvNorm(m_samples[i], patch_32f);
             //float dist = 0.0f;
             //float i1,i2;
-            
+
             //for (int y = 0; y<patch_32f->height; y++)
-            // for (int x = 0; x< patch_32f->width; x++)
-            // {
-            //         i1 = ((float*)(m_samples[i]->imageData + m_samples[i]->widthStep*y))[x];
-            //         i2 = ((float*)(patch_32f->imageData + patch_32f->widthStep*y))[x];
-            //         dist+= (i1-i2)*(i1-i2);
-            // }
-            
+            //  for (int x = 0; x< patch_32f->width; x++)
+            //  {
+            //      i1 = ((float*)(m_samples[i]->imageData + m_samples[i]->widthStep*y))[x];
+            //      i2 = ((float*)(patch_32f->imageData + patch_32f->widthStep*y))[x];
+            //      dist+= (i1-i2)*(i1-i2);
+            //  }
+
             if(dist < distance)
             {
                 distance = (float)dist;
                 pose_idx = i;
             }
-            
+
 #if 0
             IplImage* img1 = cvCreateImage(cvSize(roi.width, roi.height), IPL_DEPTH_8U, 1);
             IplImage* img2 = cvCreateImage(cvSize(roi.width, roi.height), IPL_DEPTH_8U, 1);
@@ -640,7 +640,7 @@ namespace cv{
             cvConvertScale(m_samples[i], img1, 255.0/maxval);
             cvMinMaxLoc(patch_32f, 0, &maxval);
             cvConvertScale(patch_32f, img2, 255.0/maxval);
-            
+
             cvNamedWindow("1", 1);
             cvShowImage("1", img1);
             cvNamedWindow("2", 1);
@@ -649,10 +649,10 @@ namespace cv{
             cvWaitKey(0);
 #endif
         }
-        
+
         cvReleaseImage(&patch_32f);
     }
-    
+
     void OneWayDescriptor::Save(const char* path)
     {
         for(int i = 0; i < m_pose_count; i++)
@@ -660,21 +660,21 @@ namespace cv{
             char buf[1024];
             sprintf(buf, "%s/patch_%04d.jpg", path, i);
             IplImage* patch = cvCreateImage(cvSize(m_samples[i]->width, m_samples[i]->height), IPL_DEPTH_8U, m_samples[i]->nChannels);
-            
+
             double maxval;
             cvMinMaxLoc(m_samples[i], 0, &maxval);
             cvConvertScale(m_samples[i], patch, 255/maxval);
-            
+
             cvSaveImage(buf, patch);
-            
+
             cvReleaseImage(&patch);
         }
     }
-    
+
     void OneWayDescriptor::Write(CvFileStorage* fs, const char* name)
     {
         CvMat* mat = cvCreateMat(m_pose_count, m_samples[0]->width*m_samples[0]->height, CV_32FC1);
-        
+
         // prepare data to write as a single matrix
         for(int i = 0; i < m_pose_count; i++)
         {
@@ -687,12 +687,12 @@ namespace cv{
                 }
             }
         }
-        
+
         cvWrite(fs, name, mat);
-        
+
         cvReleaseMat(&mat);
     }
-    
+
     int OneWayDescriptor::ReadByName(const FileNode &parent, const char* name)
     {
         CvMat* mat = reinterpret_cast<CvMat*> (parent[name].readObj ());
@@ -700,8 +700,8 @@ namespace cv{
         {
             return 0;
         }
-        
-        
+
+
         for(int i = 0; i < m_pose_count; i++)
         {
             for(int y = 0; y < m_samples[i]->height; y++)
@@ -713,7 +713,7 @@ namespace cv{
                 }
             }
         }
-        
+
         cvReleaseMat(&mat);
         return 1;
     }
@@ -722,17 +722,17 @@ namespace cv{
     {
         return ReadByName (FileNode (fs, parent), name);
     }
-    
+
     IplImage* OneWayDescriptor::GetPatch(int index)
     {
         return m_samples[index];
     }
-    
+
     CvAffinePose OneWayDescriptor::GetPose(int index) const
     {
         return m_affine_poses[index];
     }
-    
+
     void FindOneWayDescriptor(int desc_count, const OneWayDescriptor* descriptors, IplImage* patch, int& desc_idx, int& pose_idx, float& distance,
                               CvMat* avg, CvMat* eigenvectors)
     {
@@ -751,7 +751,7 @@ namespace cv{
             IplImage* test_img = cvCreateImage(cvSize(patch_width,patch_height), IPL_DEPTH_8U, 1);
             if(_roi.width != patch_width|| _roi.height != patch_height)
             {
-                
+
                 cvResize(patch, test_img);
                 _roi = cvGetImageROI(test_img);
             }
@@ -762,7 +762,7 @@ namespace cv{
             IplImage* patch_32f = cvCreateImage(cvSize(_roi.width, _roi.height), IPL_DEPTH_32F, 1);
             double sum = cvSum(test_img).val[0];
             cvConvertScale(test_img, patch_32f, 1.0f/sum);
-            
+
             //ProjectPCASample(patch_32f, avg, eigenvectors, pca_coeffs);
             //Projecting PCA
             CvMat* patch_mat = ConvertImageToMatrix(patch_32f);
@@ -774,20 +774,20 @@ namespace cv{
             cvReleaseMat(&temp);
             cvReleaseMat(&patch_mat);
             //End of projecting
-            
+
             cvReleaseImage(&patch_32f);
             cvReleaseImage(&test_img);
         }
-        
+
         //--------
-        
-        
-        
+
+
+
         for(int i = 0; i < desc_count; i++)
         {
             int _pose_idx = -1;
             float _distance = 0;
-            
+
 #if 0
             descriptors[i].EstimatePose(patch, _pose_idx, _distance);
 #else
@@ -800,7 +800,7 @@ namespace cv{
                 descriptors[i].EstimatePosePCA(pca_coeffs, _pose_idx, _distance, avg, eigenvectors);
             }
 #endif
-            
+
             if(_distance < distance)
             {
                 desc_idx = i;
@@ -810,9 +810,9 @@ namespace cv{
         }
         cvReleaseMat(&pca_coeffs);
     }
-    
+
 #if defined(_KDTREE)
-    
+
     void FindOneWayDescriptor(cv::flann::Index* m_pca_descriptors_tree, CvSize patch_size, int m_pca_dim_low, int m_pose_count, IplImage* patch, int& desc_idx, int& pose_idx, float& distance,
                               CvMat* avg, CvMat* eigenvectors)
     {
@@ -826,77 +826,77 @@ namespace cv{
         int patch_height = patch_size.height;
         //if (avg)
         //{
-               CvRect _roi = cvGetImageROI((IplImage*)patch);
-               IplImage* test_img = cvCreateImage(cvSize(patch_width,patch_height), IPL_DEPTH_8U, 1);
-               if(_roi.width != patch_width|| _roi.height != patch_height)
-               {
-            
-                       cvResize(patch, test_img);
-                       _roi = cvGetImageROI(test_img);
-               }
-               else
-               {
-                       cvCopy(patch,test_img);
-               }
-               IplImage* patch_32f = cvCreateImage(cvSize(_roi.width, _roi.height), IPL_DEPTH_32F, 1);
-               float sum = cvSum(test_img).val[0];
-               cvConvertScale(test_img, patch_32f, 1.0f/sum);
-        
-               //ProjectPCASample(patch_32f, avg, eigenvectors, pca_coeffs);
-               //Projecting PCA
-               CvMat* patch_mat = ConvertImageToMatrix(patch_32f);
-               CvMat* temp = cvCreateMat(1, eigenvectors->cols, CV_32FC1);
-               cvProjectPCA(patch_mat, avg, eigenvectors, temp);
-               CvMat temp1;
-               cvGetSubRect(temp, &temp1, cvRect(0, 0, pca_coeffs->cols, 1));
-               cvCopy(&temp1, pca_coeffs);
-               cvReleaseMat(&temp);
-               cvReleaseMat(&patch_mat);
-               //End of projecting
-        
-               cvReleaseImage(&patch_32f);
-               cvReleaseImage(&test_img);
-        //     }
-        
+        CvRect _roi = cvGetImageROI((IplImage*)patch);
+        IplImage* test_img = cvCreateImage(cvSize(patch_width,patch_height), IPL_DEPTH_8U, 1);
+        if(_roi.width != patch_width|| _roi.height != patch_height)
+        {
+
+            cvResize(patch, test_img);
+            _roi = cvGetImageROI(test_img);
+        }
+        else
+        {
+            cvCopy(patch,test_img);
+        }
+        IplImage* patch_32f = cvCreateImage(cvSize(_roi.width, _roi.height), IPL_DEPTH_32F, 1);
+        float sum = cvSum(test_img).val[0];
+        cvConvertScale(test_img, patch_32f, 1.0f/sum);
+
+        //ProjectPCASample(patch_32f, avg, eigenvectors, pca_coeffs);
+        //Projecting PCA
+        CvMat* patch_mat = ConvertImageToMatrix(patch_32f);
+        CvMat* temp = cvCreateMat(1, eigenvectors->cols, CV_32FC1);
+        cvProjectPCA(patch_mat, avg, eigenvectors, temp);
+        CvMat temp1;
+        cvGetSubRect(temp, &temp1, cvRect(0, 0, pca_coeffs->cols, 1));
+        cvCopy(&temp1, pca_coeffs);
+        cvReleaseMat(&temp);
+        cvReleaseMat(&patch_mat);
+        //End of projecting
+
+        cvReleaseImage(&patch_32f);
+        cvReleaseImage(&test_img);
+        //  }
+
         //--------
-        
-               //float* target = new float[m_pca_dim_low];
-               //::cvflann::KNNResultSet res(1,pca_coeffs->data.fl,m_pca_dim_low);
-               //::cvflann::SearchParams params;
-               //params.checks = -1;
-        
-               //int maxDepth = 1000000;
-               //int neighbors_count = 1;
-               //int* neighborsIdx = new int[neighbors_count];
-               //float* distances = new float[neighbors_count];
-               //if (m_pca_descriptors_tree->findNearest(pca_coeffs->data.fl,neighbors_count,maxDepth,neighborsIdx,0,distances) > 0)
-               //{
-               //      desc_idx = neighborsIdx[0] / m_pose_count;
-               //      pose_idx = neighborsIdx[0] % m_pose_count;
-               //      distance = distances[0];
-               //}
-               //delete[] neighborsIdx;
-               //delete[] distances;
-        
-               cv::Mat m_object(1, m_pca_dim_low, CV_32F);
-               cv::Mat m_indices(1, 1, CV_32S);
-               cv::Mat m_dists(1, 1, CV_32F);
-        
-               float* object_ptr = m_object.ptr<float>(0);
-               for (int i=0;i<m_pca_dim_low;i++)
-               {
-                       object_ptr[i] = pca_coeffs->data.fl[i];
-               }
-        
-               m_pca_descriptors_tree->knnSearch(m_object, m_indices, m_dists, 1, cv::flann::SearchParams(-1) );
-        
-               desc_idx = ((int*)(m_indices.ptr<int>(0)))[0] / m_pose_count;
-               pose_idx = ((int*)(m_indices.ptr<int>(0)))[0] % m_pose_count;
-               distance = ((float*)(m_dists.ptr<float>(0)))[0];
-        
-        //     delete[] target;
-        
-        
+
+        //float* target = new float[m_pca_dim_low];
+        //::cvflann::KNNResultSet res(1,pca_coeffs->data.fl,m_pca_dim_low);
+        //::cvflann::SearchParams params;
+        //params.checks = -1;
+
+        //int maxDepth = 1000000;
+        //int neighbors_count = 1;
+        //int* neighborsIdx = new int[neighbors_count];
+        //float* distances = new float[neighbors_count];
+        //if (m_pca_descriptors_tree->findNearest(pca_coeffs->data.fl,neighbors_count,maxDepth,neighborsIdx,0,distances) > 0)
+        //{
+        //  desc_idx = neighborsIdx[0] / m_pose_count;
+        //  pose_idx = neighborsIdx[0] % m_pose_count;
+        //  distance = distances[0];
+        //}
+        //delete[] neighborsIdx;
+        //delete[] distances;
+
+        cv::Mat m_object(1, m_pca_dim_low, CV_32F);
+        cv::Mat m_indices(1, 1, CV_32S);
+        cv::Mat m_dists(1, 1, CV_32F);
+
+        float* object_ptr = m_object.ptr<float>(0);
+        for (int i=0;i<m_pca_dim_low;i++)
+        {
+            object_ptr[i] = pca_coeffs->data.fl[i];
+        }
+
+        m_pca_descriptors_tree->knnSearch(m_object, m_indices, m_dists, 1, cv::flann::SearchParams(-1) );
+
+        desc_idx = ((int*)(m_indices.ptr<int>(0)))[0] / m_pose_count;
+        pose_idx = ((int*)(m_indices.ptr<int>(0)))[0] % m_pose_count;
+        distance = ((float*)(m_dists.ptr<float>(0)))[0];
+
+        //  delete[] target;
+
+
         //    for(int i = 0; i < desc_count; i++)
         //    {
         //        int _pose_idx = -1;
@@ -905,14 +905,14 @@ namespace cv{
         //#if 0
         //        descriptors[i].EstimatePose(patch, _pose_idx, _distance);
         //#else
-        //             if (!avg)
-        //             {
-        //                     descriptors[i].EstimatePosePCA(patch, _pose_idx, _distance, avg, eigenvectors);
-        //             }
-        //             else
-        //             {
-        //                     descriptors[i].EstimatePosePCA(pca_coeffs, _pose_idx, _distance, avg, eigenvectors);
-        //             }
+        //      if (!avg)
+        //      {
+        //          descriptors[i].EstimatePosePCA(patch, _pose_idx, _distance, avg, eigenvectors);
+        //      }
+        //      else
+        //      {
+        //          descriptors[i].EstimatePosePCA(pca_coeffs, _pose_idx, _distance, avg, eigenvectors);
+        //      }
         //#endif
         //
         //        if(_distance < distance)
@@ -948,7 +948,7 @@ namespace cv{
             IplImage* test_img = cvCreateImage(cvSize(patch_width,patch_height), IPL_DEPTH_8U, 1);
             if(_roi.width != patch_width|| _roi.height != patch_height)
             {
-                
+
                 cvResize(patch, test_img);
                 _roi = cvGetImageROI(test_img);
             }
@@ -959,7 +959,7 @@ namespace cv{
             IplImage* patch_32f = cvCreateImage(cvSize(_roi.width, _roi.height), IPL_DEPTH_32F, 1);
             double sum = cvSum(test_img).val[0];
             cvConvertScale(test_img, patch_32f, 1.0f/sum);
-            
+
             //ProjectPCASample(patch_32f, avg, eigenvectors, pca_coeffs);
             //Projecting PCA
             CvMat* patch_mat = ConvertImageToMatrix(patch_32f);
@@ -971,19 +971,19 @@ namespace cv{
             cvReleaseMat(&temp);
             cvReleaseMat(&patch_mat);
             //End of projecting
-            
+
             cvReleaseImage(&patch_32f);
             cvReleaseImage(&test_img);
         }
         //--------
-        
-        
-        
+
+
+
         for(int i = 0; i < desc_count; i++)
         {
             int _pose_idx = -1;
             float _distance = 0;
-            
+
 #if 0
             descriptors[i].EstimatePose(patch, _pose_idx, _distance);
 #else
@@ -996,7 +996,7 @@ namespace cv{
                 descriptors[i].EstimatePosePCA(pca_coeffs, _pose_idx, _distance, avg, eigenvectors);
             }
 #endif
-            
+
             for (int j=0;j<n;j++)
             {
                 if(_distance < distances[j])
@@ -1016,7 +1016,7 @@ namespace cv{
         }
         cvReleaseMat(&pca_coeffs);
     }
-    
+
     void FindOneWayDescriptorEx(int desc_count, const OneWayDescriptor* descriptors, IplImage* patch,
                                 float scale_min, float scale_max, float scale_step,
                                 int& desc_idx, int& pose_idx, float& distance, float& scale,
@@ -1025,22 +1025,22 @@ namespace cv{
         CvSize patch_size = descriptors[0].GetPatchSize();
         IplImage* input_patch;
         CvRect roi;
-        
+
         input_patch= cvCreateImage(patch_size, IPL_DEPTH_8U, 1);
         roi = cvGetImageROI((IplImage*)patch);
-        
+
         int _desc_idx, _pose_idx;
         float _distance;
         distance = 1e10;
         for(float cur_scale = scale_min; cur_scale < scale_max; cur_scale *= scale_step)
         {
             //        printf("Scale = %f\n", cur_scale);
-            
+
             CvRect roi_scaled = resize_rect(roi, cur_scale);
             cvSetImageROI(patch, roi_scaled);
             cvResize(patch, input_patch);
-            
-            
+
+
 #if 0
             if(roi.x > 244 && roi.y < 200)
             {
@@ -1049,7 +1049,7 @@ namespace cv{
                 cvWaitKey(0);
             }
 #endif
-            
+
             FindOneWayDescriptor(desc_count, descriptors, input_patch, _desc_idx, _pose_idx, _distance, avg, eigenvectors);
             if(_distance < distance)
             {
@@ -1059,13 +1059,13 @@ namespace cv{
                 scale = cur_scale;
             }
         }
-        
-        
+
+
         cvSetImageROI((IplImage*)patch, roi);
         cvReleaseImage(&input_patch);
-        
+
     }
-    
+
     void FindOneWayDescriptorEx(int desc_count, const OneWayDescriptor* descriptors, IplImage* patch,
                                 float scale_min, float scale_max, float scale_step,
                                 int n, std::vector<int>& desc_idxs, std::vector<int>& pose_idxs,
@@ -1075,10 +1075,10 @@ namespace cv{
         CvSize patch_size = descriptors[0].GetPatchSize();
         IplImage* input_patch;
         CvRect roi;
-        
+
         input_patch= cvCreateImage(patch_size, IPL_DEPTH_8U, 1);
         roi = cvGetImageROI((IplImage*)patch);
-        
+
         //  float min_distance = 1e10;
         std::vector<int> _desc_idxs;
         _desc_idxs.resize(n);
@@ -1086,22 +1086,22 @@ namespace cv{
         _pose_idxs.resize(n);
         std::vector<float> _distances;
         _distances.resize(n);
-        
-        
+
+
         for (int i=0;i<n;i++)
         {
             distances[i] = 1e10;
         }
-        
+
         for(float cur_scale = scale_min; cur_scale < scale_max; cur_scale *= scale_step)
         {
-            
+
             CvRect roi_scaled = resize_rect(roi, cur_scale);
             cvSetImageROI(patch, roi_scaled);
             cvResize(patch, input_patch);
-            
-            
-            
+
+
+
             FindOneWayDescriptor(desc_count, descriptors, input_patch, n,_desc_idxs, _pose_idxs, _distances, avg, eigenvectors);
             for (int i=0;i<n;i++)
             {
@@ -1114,13 +1114,13 @@ namespace cv{
                 }
             }
         }
-        
-        
-        
+
+
+
         cvSetImageROI((IplImage*)patch, roi);
         cvReleaseImage(&input_patch);
     }
-    
+
 #if defined(_KDTREE)
     void FindOneWayDescriptorEx(cv::flann::Index* m_pca_descriptors_tree, CvSize patch_size, int m_pca_dim_low,
                                 int m_pose_count, IplImage* patch,
@@ -1130,21 +1130,21 @@ namespace cv{
     {
         IplImage* input_patch;
         CvRect roi;
-        
+
         input_patch= cvCreateImage(patch_size, IPL_DEPTH_8U, 1);
         roi = cvGetImageROI((IplImage*)patch);
-        
+
         int _desc_idx, _pose_idx;
         float _distance;
         distance = 1e10;
         for(float cur_scale = scale_min; cur_scale < scale_max; cur_scale *= scale_step)
         {
             //        printf("Scale = %f\n", cur_scale);
-            
+
             CvRect roi_scaled = resize_rect(roi, cur_scale);
             cvSetImageROI(patch, roi_scaled);
             cvResize(patch, input_patch);
-            
+
             FindOneWayDescriptor(m_pca_descriptors_tree, patch_size, m_pca_dim_low, m_pose_count, input_patch, _desc_idx, _pose_idx, _distance, avg, eigenvectors);
             if(_distance < distance)
             {
@@ -1154,29 +1154,29 @@ namespace cv{
                 scale = cur_scale;
             }
         }
-        
-        
+
+
         cvSetImageROI((IplImage*)patch, roi);
         cvReleaseImage(&input_patch);
-        
+
     }
 #endif
-    
+
     const char* OneWayDescriptor::GetFeatureName() const
     {
         return m_feature_name.c_str();
     }
-    
+
     CvPoint OneWayDescriptor::GetCenter() const
     {
         return m_center;
     }
-    
+
     int OneWayDescriptor::GetPCADimLow() const
     {
         return m_pca_dim_low;
     }
-    
+
     int OneWayDescriptor::GetPCADimHigh() const
     {
         return m_pca_dim_high;
@@ -1186,7 +1186,7 @@ namespace cv{
     {
         CvRect roi = cvGetImageROI(patch);
         CvMat* mat = cvCreateMat(1, roi.width*roi.height, CV_32FC1);
-        
+
         if(patch->depth == 32)
         {
             for(int y = 0; y < roi.height; y++)
@@ -1212,10 +1212,10 @@ namespace cv{
             printf("Image depth %d is not supported\n", patch->depth);
             return 0;
         }
-        
+
         return mat;
     }
-    
+
     OneWayDescriptorBase::OneWayDescriptorBase(CvSize patch_size, int pose_count, const char* train_path,
                                                const char* pca_config, const char* pca_hr_config,
                                                const char* pca_desc_config, int pyr_levels,
@@ -1226,21 +1226,21 @@ namespace cv{
         m_pca_descriptors_matrix = 0;
         m_pca_descriptors_tree = 0;
 #endif
-        //     m_pca_descriptors_matrix = 0;
+        //  m_pca_descriptors_matrix = 0;
         m_patch_size = patch_size;
         m_pose_count = pose_count;
         m_pyr_levels = pyr_levels;
         m_poses = 0;
         m_transforms = 0;
-        
+
         m_pca_avg = 0;
         m_pca_eigenvectors = 0;
         m_pca_hr_avg = 0;
         m_pca_hr_eigenvectors = 0;
         m_pca_descriptors = 0;
-        
+
         m_descriptors = 0;
-        
+
         if(train_path == 0 || strlen(train_path) == 0)
         {
             // skip pca loading
@@ -1255,9 +1255,9 @@ namespace cv{
             sprintf(pca_hr_config_filename, "%s/%s", train_path, pca_hr_config);
             readPCAFeatures(pca_hr_config_filename, &m_pca_hr_avg, &m_pca_hr_eigenvectors);
         }
-        
+
         m_pca_descriptors = new OneWayDescriptor[m_pca_dim_high + 1];
-        
+
 #if !defined(_GH_REGIONS)
         if(pca_desc_config && strlen(pca_desc_config) > 0)
             //    if(0)
@@ -1277,7 +1277,7 @@ namespace cv{
         }
 #endif //_GH_REGIONS
         //    SavePCADescriptors("./pca_descriptors.yml");
-        
+
     }
 
     OneWayDescriptorBase::OneWayDescriptorBase(CvSize patch_size, int pose_count, const string &pca_filename,
@@ -1346,8 +1346,8 @@ namespace cv{
         scale_min = fn["minScale"];
         scale_max = fn["maxScale"];
         scale_step = fn["stepScale"];
-       
-       LoadPCAall (fn);
+
+    LoadPCAall (fn);
     }
 
     void OneWayDescriptorBase::LoadPCAall (const FileNode &fn)
@@ -1364,21 +1364,21 @@ namespace cv{
     {
         cvReleaseMat(&m_pca_avg);
         cvReleaseMat(&m_pca_eigenvectors);
-        
+
         if(m_pca_hr_eigenvectors)
         {
             delete[] m_pca_descriptors;
             cvReleaseMat(&m_pca_hr_avg);
             cvReleaseMat(&m_pca_hr_eigenvectors);
         }
-        
-        
+
+
         if(m_descriptors)
             delete []m_descriptors;
 
         if(m_poses)
             delete []m_poses;
-        
+
         if (m_transforms)
         {
             for(int i = 0; i < m_pose_count; i++)
@@ -1398,7 +1398,7 @@ namespace cv{
         }
 #endif
     }
-    
+
     void OneWayDescriptorBase::clear(){
         if (m_descriptors)
         {
@@ -1428,7 +1428,7 @@ namespace cv{
             m_poses[i] = GenRandomAffinePose();
         }
     }
-    
+
     void OneWayDescriptorBase::InitializeTransformsFromPoses()
     {
         m_transforms = new CvMat*[m_pose_count];
@@ -1438,19 +1438,19 @@ namespace cv{
             GenerateAffineTransformFromPose(cvSize(m_patch_size.width*2, m_patch_size.height*2), m_poses[i], m_transforms[i]);
         }
     }
-    
+
     void OneWayDescriptorBase::InitializePoseTransforms()
     {
         InitializePoses();
         InitializeTransformsFromPoses();
     }
-    
+
     void OneWayDescriptorBase::InitializeDescriptor(int desc_idx, IplImage* train_image, const KeyPoint& keypoint, const char* feature_label)
     {
-        
+
         // TBD add support for octave != 0
         CvPoint center = keypoint.pt;
-        
+
         CvRect roi = cvRect(center.x - m_patch_size.width/2, center.y - m_patch_size.height/2, m_patch_size.width, m_patch_size.height);
         cvResetImageROI(train_image);
         roi = fit_rect_fixedsize(roi, train_image);
@@ -1459,17 +1459,17 @@ namespace cv{
         {
             return;
         }
-        
+
         InitializeDescriptor(desc_idx, train_image, feature_label);
         cvResetImageROI(train_image);
     }
-    
+
     void OneWayDescriptorBase::InitializeDescriptor(int desc_idx, IplImage* train_image, const char* feature_label)
     {
         m_descriptors[desc_idx].SetPCADimHigh(m_pca_dim_high);
         m_descriptors[desc_idx].SetPCADimLow(m_pca_dim_low);
         m_descriptors[desc_idx].SetTransforms(m_poses, m_transforms);
-        
+
         if(!m_pca_hr_eigenvectors)
         {
             m_descriptors[desc_idx].Initialize(m_pose_count, train_image, feature_label);
@@ -1479,24 +1479,24 @@ namespace cv{
             m_descriptors[desc_idx].InitializeFast(m_pose_count, train_image, feature_label,
                                                    m_pca_hr_avg, m_pca_hr_eigenvectors, m_pca_descriptors);
         }
-        
+
         if(m_pca_avg)
         {
             m_descriptors[desc_idx].InitializePCACoeffs(m_pca_avg, m_pca_eigenvectors);
         }
     }
-    
+
     void OneWayDescriptorBase::FindDescriptor(IplImage* src, cv::Point2f pt, int& desc_idx, int& pose_idx, float& distance) const
     {
         CvRect roi = cvRect(cvRound(pt.x - m_patch_size.width/4),
                             cvRound(pt.y - m_patch_size.height/4),
                             m_patch_size.width/2, m_patch_size.height/2);
         cvSetImageROI(src, roi);
-        
+
         FindDescriptor(src, desc_idx, pose_idx, distance);
-        cvResetImageROI(src);   
+        cvResetImageROI(src);
     }
-    
+
     void OneWayDescriptorBase::FindDescriptor(IplImage* patch, int& desc_idx, int& pose_idx, float& distance, float* _scale, float* scale_ranges) const
     {
 #if 0
@@ -1505,15 +1505,15 @@ namespace cv{
         float min = scale_min;
         float max = scale_max;
         float step = scale_step;
-        
+
         if (scale_ranges)
         {
             min = scale_ranges[0];
             max = scale_ranges[1];
         }
-        
+
         float scale = 1.0f;
-        
+
 #if !defined(_KDTREE)
         cv::FindOneWayDescriptorEx(m_train_feature_count, m_descriptors, patch,
                                    min, max, step, desc_idx, pose_idx, distance, scale,
@@ -1523,50 +1523,50 @@ namespace cv{
                                    min, max, step, desc_idx, pose_idx, distance, scale,
                                    m_pca_avg, m_pca_eigenvectors);
 #endif
-        
+
         if (_scale)
             *_scale = scale;
-        
+
 #endif
     }
-    
+
     void OneWayDescriptorBase::FindDescriptor(IplImage* patch, int n, std::vector<int>& desc_idxs, std::vector<int>& pose_idxs,
                                               std::vector<float>& distances, std::vector<float>& _scales, float* scale_ranges) const
     {
         float min = scale_min;
         float max = scale_max;
         float step = scale_step;
-        
+
         if (scale_ranges)
         {
             min = scale_ranges[0];
             max = scale_ranges[1];
         }
-        
+
         distances.resize(n);
         _scales.resize(n);
         desc_idxs.resize(n);
         pose_idxs.resize(n);
         /*float scales = 1.0f;*/
-        
+
         cv::FindOneWayDescriptorEx(m_train_feature_count, m_descriptors, patch,
                                    min, max, step ,n, desc_idxs, pose_idxs, distances, _scales,
                                    m_pca_avg, m_pca_eigenvectors);
-        
+
     }
-    
+
     void OneWayDescriptorBase::SetPCAHigh(CvMat* avg, CvMat* eigenvectors)
     {
         m_pca_hr_avg = cvCloneMat(avg);
         m_pca_hr_eigenvectors = cvCloneMat(eigenvectors);
     }
-    
+
     void OneWayDescriptorBase::SetPCALow(CvMat* avg, CvMat* eigenvectors)
     {
         m_pca_avg = cvCloneMat(avg);
         m_pca_eigenvectors = cvCloneMat(eigenvectors);
     }
-    
+
     void OneWayDescriptorBase::AllocatePCADescriptors()
     {
         m_pca_descriptors = new OneWayDescriptor[m_pca_dim_high + 1];
@@ -1576,7 +1576,7 @@ namespace cv{
             m_pca_descriptors[i].SetPCADimLow(m_pca_dim_low);
         }
     }
-    
+
     void OneWayDescriptorBase::CreatePCADescriptors()
     {
         if(m_pca_descriptors == 0)
@@ -1584,27 +1584,27 @@ namespace cv{
             AllocatePCADescriptors();
         }
         IplImage* frontal = cvCreateImage(m_patch_size, IPL_DEPTH_32F, 1);
-        
+
         eigenvector2image(m_pca_hr_avg, frontal);
         m_pca_descriptors[0].SetTransforms(m_poses, m_transforms);
         m_pca_descriptors[0].Initialize(m_pose_count, frontal, "", 0);
-        
+
         for(int j = 0; j < m_pca_dim_high; j++)
         {
             CvMat eigenvector;
             cvGetSubRect(m_pca_hr_eigenvectors, &eigenvector, cvRect(0, j, m_pca_hr_eigenvectors->cols, 1));
             eigenvector2image(&eigenvector, frontal);
-            
+
             m_pca_descriptors[j + 1].SetTransforms(m_poses, m_transforms);
             m_pca_descriptors[j + 1].Initialize(m_pose_count, frontal, "", 0);
-            
+
             printf("Created descriptor for PCA component %d\n", j);
         }
-        
+
         cvReleaseImage(&frontal);
     }
-    
-    
+
+
     int OneWayDescriptorBase::LoadPCADescriptors(const char* filename)
     {
         FileStorage fs = FileStorage (filename, FileStorage::READ);
@@ -1618,7 +1618,7 @@ namespace cv{
 
         printf("Successfully read %d pca components\n", m_pca_dim_high);
         fs.release ();
-        
+
         return 1;
     }
 
@@ -1726,7 +1726,7 @@ namespace cv{
         cvReleaseMat(&eigenvalues);
     }
 
-    void extractPatches (IplImage *img, vector<IplImage*>& patches, CvSize patch_size)
+    static void extractPatches (IplImage *img, vector<IplImage*>& patches, CvSize patch_size)
     {
         vector<KeyPoint> features;
         Ptr<FeatureDetector> surf_extractor = FeatureDetector::create("SURF");
@@ -1891,13 +1891,13 @@ namespace cv{
     {
         CvMemStorage* storage = cvCreateMemStorage();
         CvFileStorage* fs = cvOpenFileStorage(filename, storage, CV_STORAGE_WRITE);
-        
+
         SavePCADescriptors (fs);
-        
+
         cvReleaseMemStorage(&storage);
         cvReleaseFileStorage(&fs);
     }
-    
+
     void OneWayDescriptorBase::SavePCADescriptors(CvFileStorage *fs) const
     {
         cvWriteInt(fs, "pca_components_number", m_pca_dim_high);
@@ -1939,32 +1939,32 @@ namespace cv{
             m_descriptors[i].SetPCADimLow(m_pca_dim_low);
         }
     }
-    
+
     void OneWayDescriptorBase::InitializeDescriptors(IplImage* train_image, const vector<KeyPoint>& features,
                                                      const char* feature_label, int desc_start_idx)
     {
         for(int i = 0; i < (int)features.size(); i++)
         {
             InitializeDescriptor(desc_start_idx + i, train_image, features[i], feature_label);
-            
+
         }
         cvResetImageROI(train_image);
-        
+
 #if defined(_KDTREE)
         ConvertDescriptorsArrayToTree();
 #endif
     }
-    
+
     void OneWayDescriptorBase::CreateDescriptorsFromImage(IplImage* src, const std::vector<KeyPoint>& features)
     {
         m_train_feature_count = (int)features.size();
-        
+
         m_descriptors = new OneWayDescriptor[m_train_feature_count];
-        
+
         InitializeDescriptors(src, features);
-        
+
     }
-    
+
 #if defined(_KDTREE)
     void OneWayDescriptorBase::ConvertDescriptorsArrayToTree()
     {
@@ -1972,18 +1972,18 @@ namespace cv{
         if (n<1)
             return;
         int pca_dim_low = this->GetDescriptor(0)->GetPCADimLow();
-        
+
         //if (!m_pca_descriptors_matrix)
-        //     m_pca_descriptors_matrix = new ::cvflann::Matrix<float>(n*m_pose_count,pca_dim_low);
+        //  m_pca_descriptors_matrix = new ::cvflann::Matrix<float>(n*m_pose_count,pca_dim_low);
         //else
         //{
-        //     if ((m_pca_descriptors_matrix->cols != pca_dim_low)&&(m_pca_descriptors_matrix->rows != n*m_pose_count))
-        //     {
-        //             delete m_pca_descriptors_matrix;
-        //             m_pca_descriptors_matrix = new ::cvflann::Matrix<float>(n*m_pose_count,pca_dim_low);
-        //     }
+        //  if ((m_pca_descriptors_matrix->cols != pca_dim_low)&&(m_pca_descriptors_matrix->rows != n*m_pose_count))
+        //  {
+        //      delete m_pca_descriptors_matrix;
+        //      m_pca_descriptors_matrix = new ::cvflann::Matrix<float>(n*m_pose_count,pca_dim_low);
+        //  }
         //}
-        
+
         m_pca_descriptors_matrix = cvCreateMat(n*m_pose_count,pca_dim_low,CV_32FC1);
         for (int i=0;i<n;i++)
         {
@@ -1997,7 +1997,7 @@ namespace cv{
             }
         }
         cv::Mat pca_descriptors_mat(m_pca_descriptors_matrix,false);
-        
+
         //::cvflann::KDTreeIndexParams params;
         //params.trees = 1;
         //m_pca_descriptors_tree = new KDTree(pca_descriptors_mat);
@@ -2006,25 +2006,25 @@ namespace cv{
         //m_pca_descriptors_tree->buildIndex();
     }
 #endif
-    
+
     void OneWayDescriptorObject::Allocate(int train_feature_count, int object_feature_count)
     {
         OneWayDescriptorBase::Allocate(train_feature_count);
         m_object_feature_count = object_feature_count;
-        
+
         m_part_id = new int[m_object_feature_count];
     }
-    
-    
+
+
     void OneWayDescriptorObject::InitializeObjectDescriptors(IplImage* train_image, const vector<KeyPoint>& features,
                                                              const char* feature_label, int desc_start_idx, float scale, int is_background)
     {
         InitializeDescriptors(train_image, features, feature_label, desc_start_idx);
-        
+
         for(int i = 0; i < (int)features.size(); i++)
         {
             CvPoint center = features[i].pt;
-            
+
             if(!is_background)
             {
                 // remember descriptor part id
@@ -2034,12 +2034,12 @@ namespace cv{
         }
         cvResetImageROI(train_image);
     }
-    
+
     int OneWayDescriptorObject::IsDescriptorObject(int desc_idx) const
     {
         return desc_idx < m_object_feature_count ? 1 : 0;
     }
-    
+
     int OneWayDescriptorObject::MatchPointToPart(CvPoint pt) const
     {
         int idx = -1;
@@ -2052,23 +2052,23 @@ namespace cv{
                 break;
             }
         }
-        
+
         return idx;
     }
-    
+
     int OneWayDescriptorObject::GetDescriptorPart(int desc_idx) const
     {
         //    return MatchPointToPart(GetDescriptor(desc_idx)->GetCenter());
         return desc_idx < m_object_feature_count ? m_part_id[desc_idx] : -1;
     }
-    
+
     OneWayDescriptorObject::OneWayDescriptorObject(CvSize patch_size, int pose_count, const char* train_path,
                                                    const char* pca_config, const char* pca_hr_config, const char* pca_desc_config, int pyr_levels) :
     OneWayDescriptorBase(patch_size, pose_count, train_path, pca_config, pca_hr_config, pca_desc_config, pyr_levels)
     {
         m_part_id = 0;
     }
-    
+
     OneWayDescriptorObject::OneWayDescriptorObject(CvSize patch_size, int pose_count, const string &pca_filename,
                                                    const string &train_path, const string &images_list, float _scale_min, float _scale_max, float _scale_step, int pyr_levels) :
     OneWayDescriptorBase(patch_size, pose_count, pca_filename, train_path, images_list, _scale_min, _scale_max, _scale_step, pyr_levels)
@@ -2081,7 +2081,7 @@ namespace cv{
         if (m_part_id)
             delete []m_part_id;
     }
-    
+
     vector<KeyPoint> OneWayDescriptorObject::_GetLabeledFeatures() const
     {
         vector<KeyPoint> features;
@@ -2089,10 +2089,10 @@ namespace cv{
         {
             features.push_back(m_train_features[i]);
         }
-        
+
         return features;
     }
-    
+
     void eigenvector2image(CvMat* eigenvector, IplImage* img)
     {
         CvRect roi = cvGetImageROI(img);
@@ -2150,11 +2150,11 @@ namespace cv{
             cvReleaseMat(&_eigenvectors);
         }
     }
-    
+
     /****************************************************************************************\
      *                                OneWayDescriptorMatcher                                  *
      \****************************************************************************************/
-    
+
     OneWayDescriptorMatcher::Params::Params( int _poseCount, Size _patchSize, string _pcaFilename,
                                             string _trainPath, string _trainImagesList,
                                             float _minScale, float _maxScale, float _stepScale ) :
@@ -2162,45 +2162,45 @@ namespace cv{
     trainPath(_trainPath), trainImagesList(_trainImagesList),
     minScale(_minScale), maxScale(_maxScale), stepScale(_stepScale)
     {}
-    
-    
+
+
     OneWayDescriptorMatcher::OneWayDescriptorMatcher( const Params& _params)
     {
         initialize(_params);
     }
-    
+
     OneWayDescriptorMatcher::~OneWayDescriptorMatcher()
     {}
-    
+
     void OneWayDescriptorMatcher::initialize( const Params& _params, const Ptr<OneWayDescriptorBase>& _base )
     {
         clear();
-        
+
         if( _base.empty() )
             base = _base;
-        
+
         params = _params;
     }
-    
+
     void OneWayDescriptorMatcher::clear()
     {
         GenericDescriptorMatcher::clear();
-        
+
         prevTrainCount = 0;
         if( !base.empty() )
             base->clear();
     }
-    
+
     void OneWayDescriptorMatcher::train()
     {
         if( base.empty() || prevTrainCount < (int)trainPointCollection.keypointCount() )
         {
             base = new OneWayDescriptorObject( params.patchSize, params.poseCount, params.pcaFilename,
                                               params.trainPath, params.trainImagesList, params.minScale, params.maxScale, params.stepScale );
-            
+
             base->Allocate( (int)trainPointCollection.keypointCount() );
             prevTrainCount = (int)trainPointCollection.keypointCount();
-            
+
             const vector<vector<KeyPoint> >& points = trainPointCollection.getKeypoints();
             int count = 0;
             for( size_t i = 0; i < points.size(); i++ )
@@ -2209,26 +2209,26 @@ namespace cv{
                 for( size_t j = 0; j < points[i].size(); j++ )
                     base->InitializeDescriptor( count++, &_image, points[i][j], "" );
             }
-            
+
 #if defined(_KDTREE)
             base->ConvertDescriptorsArrayToTree();
 #endif
         }
     }
-    
+
     bool OneWayDescriptorMatcher::isMaskSupported()
     {
         return false;
     }
-    
+
     void OneWayDescriptorMatcher::knnMatchImpl( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
                                                vector<vector<DMatch> >& matches, int knn,
                                                const vector<Mat>& /*masks*/, bool /*compactResult*/ )
     {
         train();
-        
+
         CV_Assert( knn == 1 ); // knn > 1 unsupported because of bug in OneWayDescriptorBase for this case
-        
+
         matches.resize( queryKeypoints.size() );
         IplImage _qimage = queryImage;
         for( size_t i = 0; i < queryKeypoints.size(); i++ )
@@ -2239,13 +2239,13 @@ namespace cv{
             matches[i].push_back( DMatch((int)i, descIdx, distance) );
         }
     }
-    
+
     void OneWayDescriptorMatcher::radiusMatchImpl( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
                                                   vector<vector<DMatch> >& matches, float maxDistance,
                                                   const vector<Mat>& /*masks*/, bool /*compactResult*/ )
     {
         train();
-        
+
         matches.resize( queryKeypoints.size() );
         IplImage _qimage = queryImage;
         for( size_t i = 0; i < queryKeypoints.size(); i++ )
@@ -2257,33 +2257,33 @@ namespace cv{
                 matches[i].push_back( DMatch((int)i, descIdx, distance) );
         }
     }
-    
+
     void OneWayDescriptorMatcher::read( const FileNode &fn )
     {
         base = new OneWayDescriptorObject( params.patchSize, params.poseCount, string (), string (), string (),
                                           params.minScale, params.maxScale, params.stepScale );
         base->Read (fn);
     }
-    
+
     void OneWayDescriptorMatcher::write( FileStorage& fs ) const
     {
         base->Write (fs);
     }
-    
+
     bool OneWayDescriptorMatcher::empty() const
     {
         return base.empty() || base->empty();
     }
-    
+
     Ptr<GenericDescriptorMatcher> OneWayDescriptorMatcher::clone( bool emptyTrainData ) const
     {
         OneWayDescriptorMatcher* matcher = new OneWayDescriptorMatcher( params );
-        
+
         if( !emptyTrainData )
         {
             CV_Error( CV_StsNotImplemented, "deep clone functionality is not implemented, because "
                      "OneWayDescriptorBase has not copy constructor or clone method ");
-            
+
             //matcher->base;
             matcher->params = params;
             matcher->prevTrainCount = prevTrainCount;
index 7c1993b..d919ca3 100644 (file)
 #ifndef __OPENCV_PRECOMP_H__
 #define __OPENCV_PRECOMP_H__
 
-#if _MSC_VER >= 1200
+#if defined _MSC_VER && _MSC_VER >= 1200
 #pragma warning( disable: 4251 4710 4711 4514 4996 )
 #endif
 
-#ifdef HAVE_CVCONFIG_H 
+#ifdef HAVE_CVCONFIG_H
 #include "cvconfig.h"
 #endif
 
index 1b15efe..dd14fe1 100644 (file)
@@ -65,7 +65,7 @@ typedef struct CvTSTrans
     float           angle;
 } CvTSTrans;
 
-void SET_TRANS_0(CvTSTrans *pT)
+static void SET_TRANS_0(CvTSTrans *pT)
 {
     memset(pT,0,sizeof(CvTSTrans));
     pT->C = 1;
index 22f2fda..cf74108 100644 (file)
@@ -99,8 +99,9 @@ void icvReconstructPointsFor3View( CvMat* projMatr1,CvMat* projMatr2,CvMat* proj
 /*==========================================================================================*/
 /*                        Functions for calculation the tensor                              */
 /*==========================================================================================*/
+#if 0
 #if 1
-void fprintMatrix(FILE* file,CvMat* matrix)
+static void fprintMatrix(FILE* file,CvMat* matrix)
 {
     int i,j;
     fprintf(file,"\n");
@@ -116,7 +117,7 @@ void fprintMatrix(FILE* file,CvMat* matrix)
 #endif
 /*==========================================================================================*/
 
-void icvNormalizePoints( CvMat* points, CvMat* normPoints,CvMat* cameraMatr )
+static void icvNormalizePoints( CvMat* points, CvMat* normPoints,CvMat* cameraMatr )
 {
     /* Normalize image points using camera matrix */
 
@@ -169,7 +170,7 @@ void icvNormalizePoints( CvMat* points, CvMat* normPoints,CvMat* cameraMatr )
 
     return;
 }
-
+#endif
 
 /*=====================================================================================*/
 /*
@@ -405,7 +406,7 @@ int icvComputeProjectMatrices6Points( CvMat* points1,CvMat* points2,CvMat* point
 }
 
 /*==========================================================================================*/
-int icvGetRandNumbers(int range,int count,int* arr)
+static int icvGetRandNumbers(int range,int count,int* arr)
 {
     /* Generate random numbers [0,range-1] */
 
@@ -454,7 +455,7 @@ int icvGetRandNumbers(int range,int count,int* arr)
     return 1;
 }
 /*==========================================================================================*/
-void icvSelectColsByNumbers(CvMat* srcMatr, CvMat* dstMatr, int* indexes,int number)
+static void icvSelectColsByNumbers(CvMat* srcMatr, CvMat* dstMatr, int* indexes,int number)
 {
 
     CV_FUNCNAME( "icvSelectColsByNumbers" );
@@ -501,7 +502,7 @@ void icvSelectColsByNumbers(CvMat* srcMatr, CvMat* dstMatr, int* indexes,int num
 }
 
 /*==========================================================================================*/
-void icvProject4DPoints(CvMat* points4D,CvMat* projMatr, CvMat* projPoints)
+static void icvProject4DPoints(CvMat* points4D,CvMat* projMatr, CvMat* projPoints)
 {
 
     CvMat* tmpProjPoints = 0;
@@ -584,7 +585,8 @@ void icvProject4DPoints(CvMat* points4D,CvMat* projMatr, CvMat* projPoints)
     return;
 }
 /*==========================================================================================*/
-int icvCompute3ProjectMatricesNPointsStatus( CvMat** points,/* 3 arrays of points on image  */
+#if 0
+static int icvCompute3ProjectMatricesNPointsStatus( CvMat** points,/* 3 arrays of points on image  */
                                              CvMat** projMatrs,/* array of 3 prejection matrices */
                                              CvMat** statuses,/* 3 arrays of status of points */
                                              double threshold,/* Threshold for good point */
@@ -783,6 +785,7 @@ int icvCompute3ProjectMatricesNPointsStatus( CvMat** points,/* 3 arrays of point
     return numProjMatrs;
 
 }
+#endif
 
 /*==========================================================================================*/
 int icvComputeProjectMatricesNPoints(  CvMat* points1,CvMat* points2,CvMat* points3,
@@ -2350,8 +2353,8 @@ void ReconstructPointsFor3View_bySolve( CvMat* projMatr1,CvMat* projMatr2,CvMat*
 #endif
 
 /*==========================================================================================*/
-
-void icvComputeCameraExrinnsicByPosition(CvMat* camPos, CvMat* rotMatr, CvMat* transVect)
+#if 0
+static void icvComputeCameraExrinnsicByPosition(CvMat* camPos, CvMat* rotMatr, CvMat* transVect)
 {
     /* We know position of camera. we must to compute rotate matrix and translate vector */
 
@@ -2468,7 +2471,7 @@ void icvComputeCameraExrinnsicByPosition(CvMat* camPos, CvMat* rotMatr, CvMat* t
 
 /*==========================================================================================*/
 
-void FindTransformForProjectMatrices(CvMat* projMatr1,CvMat* projMatr2,CvMat* rotMatr,CvMat* transVect)
+static void FindTransformForProjectMatrices(CvMat* projMatr1,CvMat* projMatr2,CvMat* rotMatr,CvMat* transVect)
 {
     /* Computes homography for project matrix be "canonical" form */
     CV_FUNCNAME( "computeProjMatrHomography" );
@@ -2586,7 +2589,7 @@ void icvComputeQknowPrincipalPoint(int numImages, CvMat **projMatrs,CvMat *matrQ
 /* Part with metric reconstruction */
 
 #if 1
-void icvComputeQ(int numMatr, CvMat** projMatr, CvMat** cameraMatr, CvMat* matrQ)
+static void icvComputeQ(int numMatr, CvMat** projMatr, CvMat** cameraMatr, CvMat* matrQ)
 {
     /* K*K' = P*Q*P' */
     /* try to solve Q by linear method */
@@ -2731,7 +2734,7 @@ void icvComputeQ(int numMatr, CvMat** projMatr, CvMat** cameraMatr, CvMat* matrQ
 #endif
 /*-----------------------------------------------------------------------------------------------------*/
 
-void icvDecomposeQ(CvMat* /*matrQ*/,CvMat* /*matrH*/)
+static void icvDecomposeQ(CvMat* /*matrQ*/,CvMat* /*matrH*/)
 {
 #if 0
     /* Use SVD to decompose matrix Q=H*I*H' */
@@ -2789,3 +2792,5 @@ void icvDecomposeQ(CvMat* /*matrQ*/,CvMat* /*matrH*/)
 #endif
 }
 
+#endif
+
index 2dbc462..8b45ac4 100644 (file)
@@ -48,7 +48,7 @@
 #include "_vectrack.h"
 
 #define NUM_FACE_ELEMENTS   3
-enum 
+enum
 {
     MOUTH = 0,
     LEYE = 1,
@@ -69,7 +69,7 @@ int ChoiceTrackingFace2(CvFaceTracker* pTF, const int nElements, const CvFaceEle
 inline int GetEnergy(CvTrackingRect** ppNew, const CvTrackingRect* pPrev, CvPoint* ptTempl, CvRect* rTempl);
 inline int GetEnergy2(CvTrackingRect** ppNew, const CvTrackingRect* pPrev, CvPoint* ptTempl, CvRect* rTempl, int* element);
 inline double CalculateTransformationLMS3_0( CvPoint* pTemplPoints, CvPoint* pSrcPoints);
-inline double CalculateTransformationLMS3( CvPoint* pTemplPoints, 
+inline double CalculateTransformationLMS3( CvPoint* pTemplPoints,
                                    CvPoint* pSrcPoints,
                                    double*       pdbAverageScale,
                                    double*       pdbAverageRotate,
@@ -91,13 +91,13 @@ struct CvTrackingRect
     int Energy(const CvTrackingRect& prev)
     {
         int prev_color = 0 == prev.iColor ? iColor : prev.iColor;
-        iEnergy =      1 * pow2(r.width - prev.r.width) + 
-            1 * pow2(r.height - prev.r.height) + 
-            1 * pow2(iColor - prev_color) / 4 + 
-            - 1 * nRectsInThis + 
-            - 0 * nRectsOnTop + 
-            + 0 * nRectsOnLeft + 
-            + 0 * nRectsOnRight + 
+        iEnergy =   1 * pow2(r.width - prev.r.width) +
+            1 * pow2(r.height - prev.r.height) +
+            1 * pow2(iColor - prev_color) / 4 +
+            - 1 * nRectsInThis +
+            - 0 * nRectsOnTop +
+            + 0 * nRectsOnLeft +
+            + 0 * nRectsOnRight +
             + 0 * nRectsOnBottom;
         return iEnergy;
     }
@@ -110,10 +110,10 @@ struct CvFaceTracker
     double dbRotateDelta;
     double dbRotateAngle;
     CvPoint ptRotate;
-    
+
     CvPoint ptTempl[NUM_FACE_ELEMENTS];
     CvRect rTempl[NUM_FACE_ELEMENTS];
-    
+
     IplImage* imgGray;
     IplImage* imgThresh;
     CvMemStorage* mstgContours;
@@ -149,8 +149,8 @@ struct CvFaceTracker
         imgGray = cvCreateImage(cvSize(imgGray->width, imgGray->height), 8, 1);
         imgThresh = cvCreateImage(cvSize(imgGray->width, imgGray->height), 8, 1);
         mstgContours = cvCreateMemStorage();
-        if ((NULL == imgGray) || 
-            (NULL == imgThresh) || 
+        if ((NULL == imgGray) ||
+            (NULL == imgThresh) ||
             (NULL == mstgContours))
             return FALSE;
         return TRUE;
@@ -162,11 +162,11 @@ struct CvFaceTracker
         ReallocImage(&imgThresh, sz, 1);
         ptRotate = face[MOUTH].ptCenter;
         float m[6];
-        CvMat mat = cvMat( 2, 3, CV_32FC1, m ); 
+        CvMat mat = cvMat( 2, 3, CV_32FC1, m );
 
         if (NULL == imgGray || NULL == imgThresh)
             return FALSE;
-        
+
         /*m[0] = (float)cos(-dbRotateAngle*CV_PI/180.);
         m[1] = (float)sin(-dbRotateAngle*CV_PI/180.);
         m[2] = (float)ptRotate.x;
@@ -175,7 +175,7 @@ struct CvFaceTracker
         m[5] = (float)ptRotate.y;*/
         cv2DRotationMatrix( cvPointTo32f(ptRotate), -dbRotateAngle, 1., &mat );
         cvWarpAffine( img, imgGray, &mat );
-        
+
         if (NULL == mstgContours)
             mstgContours = cvCreateMemStorage();
         else
@@ -225,7 +225,7 @@ protected:
     void Energy();
 }; //class CvFaceElement
 
-int CV_CDECL CompareEnergy(const void* el1, const void* el2, void*)
+inline int CV_CDECL CompareEnergy(const void* el1, const void* el2, void*)
 {
     return ((CvTrackingRect*)el1)->iEnergy - ((CvTrackingRect*)el2)->iEnergy;
 }// int CV_CDECL CompareEnergy(const void* el1, const void* el2, void*)
@@ -322,7 +322,7 @@ void CvFaceElement::FindContours(IplImage* img, IplImage* thresh, int nLayers, i
                 }
                 for (CvSeq* internal = external->v_next; internal; internal = internal->h_next)
                 {
-                    cr.r = cvContourBoundingRect(internal);      
+                    cr.r = cvContourBoundingRect(internal);
                     Move(cr.r, roi.x, roi.y);
                     if (RectInRect(cr.r, m_rROI) && cr.r.width > dMinSize  && cr.r.height > dMinSize)
                     {
@@ -353,7 +353,7 @@ void CvFaceElement::MergeRects(int d)
         for (j = i + 1; j < nRects; j++)
         {
             CvTrackingRect* pRect2 = (CvTrackingRect*)(reader2.ptr);
-            if (abs(pRect1->ptCenter.y - pRect2->ptCenter.y) < d && 
+            if (abs(pRect1->ptCenter.y - pRect2->ptCenter.y) < d &&
                 abs(pRect1->r.height - pRect2->r.height) < d)
             {
                 CvTrackingRect rNew;
@@ -432,7 +432,7 @@ cvInitFaceTracker(CvFaceTracker* pFaceTracker, const IplImage* imgGray, CvRect*
         (NULL == pRects) ||
         (nRects < NUM_FACE_ELEMENTS))
         return NULL;
-    
+
     //int new_face = FALSE;
     CvFaceTracker* pFace = pFaceTracker;
     if (NULL == pFace)
@@ -468,7 +468,7 @@ cvTrackFace(CvFaceTracker* pFaceTracker, IplImage* imgGray, CvRect* pRects, int
     pFaceTracker->InitNextImage(imgGray);
     *ptRotate = pFaceTracker->ptRotate;
     *dbAngleRotate = pFaceTracker->dbRotateAngle;
-    
+
     int nElements = 16;
     double dx = pFaceTracker->face[LEYE].ptCenter.x - pFaceTracker->face[REYE].ptCenter.x;
     double dy = pFaceTracker->face[LEYE].ptCenter.y - pFaceTracker->face[REYE].ptCenter.y;
@@ -476,9 +476,9 @@ cvTrackFace(CvFaceTracker* pFaceTracker, IplImage* imgGray, CvRect* pRects, int
     int d = cvRound(0.25 * d_eyes);
     int dMinSize = d;
     int nRestarts = 0;
-    
+
     int elem;
-    
+
     CvFaceElement big_face[NUM_FACE_ELEMENTS];
 START:
     // init
@@ -533,7 +533,7 @@ START:
             }
             if (2 == elements)
                 find2 = TRUE;
-            else 
+            else
                 restart = TRUE;
         }
     }
@@ -563,13 +563,13 @@ RESTART:
             pFaceTracker->iTrackingFaceType = noel;
             found = TRUE;
         }
-        else 
+        else
         {
             restart = TRUE;
             goto RESTART;
         }
     }
-    
+
     if (found)
     {
         // angle by mouth & eyes
@@ -613,7 +613,7 @@ void ThresholdingParam(IplImage *imgGray, int iNumLayers, int &iMinLevel, int &i
 {
     assert(imgGray != NULL);
     assert(imgGray->nChannels == 1);
-    int i, j; 
+    int i, j;
     // create histogram
     int histImg[256] = {0};
     uchar* buffImg = (uchar*)imgGray->imageData;
@@ -760,7 +760,7 @@ int ChoiceTrackingFace2(CvFaceTracker* pTF, const int nElements, const CvFaceEle
         double prev_d02 = sqrt((double)prev_v02.x*prev_v02.x + prev_v02.y*prev_v02.y);
         double new_d01 = sqrt((double)new_v01.x*new_v01.x + new_v01.y*new_v01.y);
         double scale = templ_d01 / new_d01;
-        double new_d02 = templ_d02 / scale; 
+        double new_d02 = templ_d02 / scale;
         double sin_a = double(prev_v01.x * prev_v02.y - prev_v01.y * prev_v02.x) / (prev_d01 * prev_d02);
         double cos_a = cos(asin(sin_a));
         double x = double(new_v01.x) * cos_a - double(new_v01.y) * sin_a;
@@ -806,12 +806,12 @@ inline int GetEnergy(CvTrackingRect** ppNew, const CvTrackingRect* pPrev, CvPoin
     double h_mouth = double(ppNew[MOUTH]->r.height) * scale;
     energy +=
         int(512.0 * (e_prev + 16.0 * e_templ)) +
-        4 * pow2(ppNew[LEYE]->r.width - ppNew[REYE]->r.width) + 
-        4 * pow2(ppNew[LEYE]->r.height - ppNew[REYE]->r.height) + 
-        4 * (int)pow(w_eye - double(rTempl[LEYE].width + rTempl[REYE].width) / 2.0, 2) + 
-        2 * (int)pow(h_eye - double(rTempl[LEYE].height + rTempl[REYE].height) / 2.0, 2) + 
-        1 * (int)pow(w_mouth - double(rTempl[MOUTH].width), 2) + 
-        1 * (int)pow(h_mouth - double(rTempl[MOUTH].height), 2) + 
+        4 * pow2(ppNew[LEYE]->r.width - ppNew[REYE]->r.width) +
+        4 * pow2(ppNew[LEYE]->r.height - ppNew[REYE]->r.height) +
+        4 * (int)pow(w_eye - double(rTempl[LEYE].width + rTempl[REYE].width) / 2.0, 2) +
+        2 * (int)pow(h_eye - double(rTempl[LEYE].height + rTempl[REYE].height) / 2.0, 2) +
+        1 * (int)pow(w_mouth - double(rTempl[MOUTH].width), 2) +
+        1 * (int)pow(h_mouth - double(rTempl[MOUTH].height), 2) +
         0;
     return energy;
 }
@@ -832,20 +832,20 @@ inline int GetEnergy2(CvTrackingRect** ppNew, const CvTrackingRect* pPrev, CvPoi
     double h0 = (double)ppNew[element[0]]->r.height * scale_templ;
     double w1 = (double)ppNew[element[1]]->r.width * scale_templ;
     double h1 = (double)ppNew[element[1]]->r.height * scale_templ;
-    
+
     int energy = ppNew[element[0]]->iEnergy + ppNew[element[1]]->iEnergy +
-        - 2 * (ppNew[element[0]]->nRectsInThis - ppNew[element[1]]->nRectsInThis) + 
+        - 2 * (ppNew[element[0]]->nRectsInThis - ppNew[element[1]]->nRectsInThis) +
         (int)pow(w0 - (double)rTempl[element[0]].width, 2) +
         (int)pow(h0 - (double)rTempl[element[0]].height, 2) +
         (int)pow(w1 - (double)rTempl[element[1]].width, 2) +
         (int)pow(h1 - (double)rTempl[element[1]].height, 2) +
         (int)pow(new_d - prev_d, 2) +
         0;
-    
+
     return energy;
 }
 
-inline double CalculateTransformationLMS3( CvPoint* pTemplPoints, 
+inline double CalculateTransformationLMS3( CvPoint* pTemplPoints,
                                    CvPoint* pSrcPoints,
                                    double*       pdbAverageScale,
                                    double*       pdbAverageRotate,
@@ -866,41 +866,41 @@ inline double CalculateTransformationLMS3( CvPoint* pTemplPoints,
     double dbYt = double(pTemplPoints[0].y + pTemplPoints[1].y + pTemplPoints[2].y ) / 3.0;
     double dbXs = double(pSrcPoints[0].x + pSrcPoints[1].x + pSrcPoints[2].x) / 3.0;
     double dbYs = double(pSrcPoints[0].y + pSrcPoints[1].y + pSrcPoints[2].y) / 3.0;
-    
+
     double dbXtXt = double(pow2(pTemplPoints[0].x) + pow2(pTemplPoints[1].x) + pow2(pTemplPoints[2].x)) / 3.0;
     double dbYtYt = double(pow2(pTemplPoints[0].y) + pow2(pTemplPoints[1].y) + pow2(pTemplPoints[2].y)) / 3.0;
-    
+
     double dbXsXs = double(pow2(pSrcPoints[0].x) + pow2(pSrcPoints[1].x) + pow2(pSrcPoints[2].x)) / 3.0;
     double dbYsYs = double(pow2(pSrcPoints[0].y) + pow2(pSrcPoints[1].y) + pow2(pSrcPoints[2].y)) / 3.0;
-    
-    double dbXtXs = double(pTemplPoints[0].x * pSrcPoints[0].x + 
-        pTemplPoints[1].x * pSrcPoints[1].x + 
+
+    double dbXtXs = double(pTemplPoints[0].x * pSrcPoints[0].x +
+        pTemplPoints[1].x * pSrcPoints[1].x +
         pTemplPoints[2].x * pSrcPoints[2].x) / 3.0;
-    double dbYtYs = double(pTemplPoints[0].y * pSrcPoints[0].y + 
-        pTemplPoints[1].y * pSrcPoints[1].y + 
+    double dbYtYs = double(pTemplPoints[0].y * pSrcPoints[0].y +
+        pTemplPoints[1].y * pSrcPoints[1].y +
         pTemplPoints[2].y * pSrcPoints[2].y) / 3.0;
-    
-    double dbXtYs = double(pTemplPoints[0].x * pSrcPoints[0].y + 
-        pTemplPoints[1].x * pSrcPoints[1].y + 
+
+    double dbXtYs = double(pTemplPoints[0].x * pSrcPoints[0].y +
+        pTemplPoints[1].x * pSrcPoints[1].y +
         pTemplPoints[2].x * pSrcPoints[2].y) / 3.0;
-    double dbYtXs = double(pTemplPoints[0].y * pSrcPoints[0].x + 
-        pTemplPoints[1].y * pSrcPoints[1].x + 
+    double dbYtXs = double(pTemplPoints[0].y * pSrcPoints[0].x +
+        pTemplPoints[1].y * pSrcPoints[1].x +
         pTemplPoints[2].y * pSrcPoints[2].x ) / 3.0;
-    
+
     dbXtXt -= dbXt * dbXt;
     dbYtYt -= dbYt * dbYt;
-    
+
     dbXsXs -= dbXs * dbXs;
     dbYsYs -= dbYs * dbYs;
-    
+
     dbXtXs -= dbXt * dbXs;
     dbYtYs -= dbYt * dbYs;
-    
+
     dbXtYs -= dbXt * dbYs;
     dbYtXs -= dbYt * dbXs;
-    
+
     dbAverageRotate = atan2( dbXtYs - dbYtXs, dbXtXs + dbYtYs );
-    
+
     double cosR = cos(dbAverageRotate);
     double sinR = sin(dbAverageRotate);
     double del = dbXsXs + dbYsYs;
@@ -909,15 +909,15 @@ inline double CalculateTransformationLMS3( CvPoint* pTemplPoints,
         dbAverageScale = (double(dbXtXs + dbYtYs) * cosR + double(dbXtYs - dbYtXs) * sinR) / del;
         dbLMS = dbXtXt + dbYtYt - ((double)pow(dbXtXs + dbYtYs,2) + (double)pow(dbXtYs - dbYtXs,2)) / del;
     }
-    
+
     dbAverageShiftX = double(dbXt) - dbAverageScale * (double(dbXs) * cosR + double(dbYs) * sinR);
     dbAverageShiftY = double(dbYt) - dbAverageScale * (double(dbYs) * cosR - double(dbXs) * sinR);
-    
+
     if( pdbAverageScale != NULL ) *pdbAverageScale = dbAverageScale;
     if( pdbAverageRotate != NULL ) *pdbAverageRotate = dbAverageRotate;
     if( pdbAverageShiftX != NULL ) *pdbAverageShiftX = dbAverageShiftX;
     if( pdbAverageShiftY != NULL ) *pdbAverageShiftY = dbAverageShiftY;
-    
+
     assert(dbLMS >= 0);
     return dbLMS;
 }
@@ -933,39 +933,39 @@ inline double CalculateTransformationLMS3_0( CvPoint* pTemplPoints, CvPoint* pSr
     double dbYt = double(pTemplPoints[0].y + pTemplPoints[1].y + pTemplPoints[2].y ) / 3.0;
     double dbXs = double(pSrcPoints[0].x + pSrcPoints[1].x + pSrcPoints[2].x) / 3.0;
     double dbYs = double(pSrcPoints[0].y + pSrcPoints[1].y + pSrcPoints[2].y) / 3.0;
-    
+
     double dbXtXt = double(pow2(pTemplPoints[0].x) + pow2(pTemplPoints[1].x) + pow2(pTemplPoints[2].x)) / 3.0;
     double dbYtYt = double(pow2(pTemplPoints[0].y) + pow2(pTemplPoints[1].y) + pow2(pTemplPoints[2].y)) / 3.0;
-    
+
     double dbXsXs = double(pow2(pSrcPoints[0].x) + pow2(pSrcPoints[1].x) + pow2(pSrcPoints[2].x)) / 3.0;
     double dbYsYs = double(pow2(pSrcPoints[0].y) + pow2(pSrcPoints[1].y) + pow2(pSrcPoints[2].y)) / 3.0;
-    
-    double dbXtXs = double(pTemplPoints[0].x * pSrcPoints[0].x + 
-        pTemplPoints[1].x * pSrcPoints[1].x + 
+
+    double dbXtXs = double(pTemplPoints[0].x * pSrcPoints[0].x +
+        pTemplPoints[1].x * pSrcPoints[1].x +
         pTemplPoints[2].x * pSrcPoints[2].x) / 3.0;
-    double dbYtYs = double(pTemplPoints[0].y * pSrcPoints[0].y + 
-        pTemplPoints[1].y * pSrcPoints[1].y + 
+    double dbYtYs = double(pTemplPoints[0].y * pSrcPoints[0].y +
+        pTemplPoints[1].y * pSrcPoints[1].y +
         pTemplPoints[2].y * pSrcPoints[2].y) / 3.0;
-    
-    double dbXtYs = double(pTemplPoints[0].x * pSrcPoints[0].y + 
-        pTemplPoints[1].x * pSrcPoints[1].y + 
+
+    double dbXtYs = double(pTemplPoints[0].x * pSrcPoints[0].y +
+        pTemplPoints[1].x * pSrcPoints[1].y +
         pTemplPoints[2].x * pSrcPoints[2].y) / 3.0;
-    double dbYtXs = double(pTemplPoints[0].y * pSrcPoints[0].x + 
-        pTemplPoints[1].y * pSrcPoints[1].x + 
+    double dbYtXs = double(pTemplPoints[0].y * pSrcPoints[0].x +
+        pTemplPoints[1].y * pSrcPoints[1].x +
         pTemplPoints[2].y * pSrcPoints[2].x ) / 3.0;
-    
+
     dbXtXt -= dbXt * dbXt;
     dbYtYt -= dbYt * dbYt;
-    
+
     dbXsXs -= dbXs * dbXs;
     dbYsYs -= dbYs * dbYs;
-    
+
     dbXtXs -= dbXt * dbXs;
     dbYtYs -= dbYt * dbYs;
-    
+
     dbXtYs -= dbXt * dbYs;
     dbYtXs -= dbYt * dbXs;
-    
+
     double del = dbXsXs + dbYsYs;
     if( del != 0 )
         dbLMS = dbXtXt + dbYtYt - ((double)pow(dbXtXs + dbYtYs,2) + (double)pow(dbXtYs - dbYtXs,2)) / del;
index 002f240..3ef3e5e 100644 (file)
@@ -12,7 +12,7 @@ using namespace std;
 static CV_IMPLEMENT_QSORT_EX( icvSortFloat, float, CV_CMP_FLOAT, float)\r
 \r
 //===========================================================================\r
-string ToString(int i)\r
+static string ToString(int i)\r
 {\r
     stringstream tmp;\r
     tmp << i;\r
@@ -25,7 +25,7 @@ string ToString(int i)
 //----------------------------- CvGBTreesParams -----------------------------\r
 //===========================================================================\r
 \r
-CvGBTreesParams::CvGBTreesParams() \r
+CvGBTreesParams::CvGBTreesParams()\r
             : CvDTreeParams( 3, 10, 0, false, 10, 0, false, false, 0 )\r
 {\r
     weak_count = 200;\r
@@ -36,8 +36,8 @@ CvGBTreesParams::CvGBTreesParams()
 \r
 //===========================================================================\r
 \r
-CvGBTreesParams::CvGBTreesParams( int _loss_function_type, int _weak_count, \r
-                         float _shrinkage, float _subsample_portion, \r
+CvGBTreesParams::CvGBTreesParams( int _loss_function_type, int _weak_count,\r
+                         float _shrinkage, float _subsample_portion,\r
                          int _max_depth, bool _use_surrogates )\r
             : CvDTreeParams( 3, 10, 0, false, 10, 0, false, false, 0 )\r
 {\r
@@ -64,7 +64,7 @@ CvGBTrees::CvGBTrees()
     class_labels = 0;\r
     class_count = 1;\r
     delta = 0.0f;\r
-    \r
+\r
     clear();\r
 }\r
 \r
@@ -88,10 +88,10 @@ void CvGBTrees::clear()
         //data->shared = false;\r
         for (int i=0; i<class_count; ++i)\r
         {\r
-                       int weak_count = cvSliceLength( slice, weak[i] );\r
+            int weak_count = cvSliceLength( slice, weak[i] );\r
             if ((weak[i]) && (weak_count))\r
             {\r
-                cvStartReadSeq( weak[i], &reader ); \r
+                cvStartReadSeq( weak[i], &reader );\r
                 cvSetSeqReaderPos( &reader, slice.start_index );\r
                 for (int j=0; j<weak_count; ++j)\r
                 {\r
@@ -106,7 +106,7 @@ void CvGBTrees::clear()
             if (weak[i]) cvReleaseMemStorage( &(weak[i]->storage) );\r
         delete[] weak;\r
     }\r
-    if (data) \r
+    if (data)\r
     {\r
         data->shared = false;\r
         delete data;\r
@@ -165,7 +165,7 @@ bool CvGBTrees::problem_type() const
 \r
 //===========================================================================\r
 \r
-bool \r
+bool\r
 CvGBTrees::train( CvMLData* data, CvGBTreesParams params, bool update )\r
 {\r
     bool result;\r
@@ -218,14 +218,14 @@ CvGBTrees::train( const CvMat* _train_data, int _tflag,
     }\r
 \r
     orig_response = cvCreateMat( 1, n, CV_32F );\r
-       int step = (_responses->cols > _responses->rows) ? 1 : _responses->step / CV_ELEM_SIZE(_responses->type);\r
+    int step = (_responses->cols > _responses->rows) ? 1 : _responses->step / CV_ELEM_SIZE(_responses->type);\r
     switch (CV_MAT_TYPE(_responses->type))\r
     {\r
         case CV_32FC1:\r
-               {\r
-                       for (int i=0; i<n; ++i)\r
+        {\r
+            for (int i=0; i<n; ++i)\r
                 orig_response->data.fl[i] = _responses->data.fl[i*step];\r
-               }; break;\r
+        }; break;\r
         case CV_32SC1:\r
         {\r
             for (int i=0; i<n; ++i)\r
@@ -250,7 +250,7 @@ CvGBTrees::train( const CvMat* _train_data, int _tflag,
                         mask[j] = 1;\r
             }\r
         delete[] mask;\r
-    \r
+\r
         class_labels = cvCreateMat(1, class_count, CV_32S);\r
         class_labels->data.i[0] = int(orig_response->data.fl[0]);\r
         int j = 1;\r
@@ -274,14 +274,14 @@ CvGBTrees::train( const CvMat* _train_data, int _tflag,
     if (_sample_idx)\r
     {\r
         int sample_idx_len = get_len(_sample_idx);\r
-        \r
+\r
         switch (CV_MAT_TYPE(_sample_idx->type))\r
         {\r
             case CV_32SC1:\r
             {\r
                 sample_idx = cvCreateMat( 1, sample_idx_len, CV_32S );\r
                 for (int i=0; i<sample_idx_len; ++i)\r
-                                       sample_idx->data.i[i] = _sample_idx->data.i[i];\r
+                    sample_idx->data.i[i] = _sample_idx->data.i[i];\r
             } break;\r
             case CV_8S:\r
             case CV_8U:\r
@@ -294,7 +294,7 @@ CvGBTrees::train( const CvMat* _train_data, int _tflag,
                 for (int i=0; i<sample_idx_len; ++i)\r
                     if (int( _sample_idx->data.ptr[i] ))\r
                         sample_idx->data.i[active_samples_count++] = i;\r
-                    \r
+\r
             } break;\r
             default: CV_Error(CV_StsUnmatchedFormats, "_sample_idx should be a 32sC1, 8sC1 or 8uC1 vector.");\r
         }\r
@@ -335,14 +335,14 @@ CvGBTrees::train( const CvMat* _train_data, int _tflag,
         storage = cvCreateMemStorage();\r
         weak[i] = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvDTree*), storage );\r
         storage = 0;\r
-    }    \r
+    }\r
 \r
     // subsample params and data\r
     rng = &cv::theRNG();\r
 \r
-       int samples_count = get_len(sample_idx);\r
+    int samples_count = get_len(sample_idx);\r
 \r
-    params.subsample_portion = params.subsample_portion <= FLT_EPSILON || \r
+    params.subsample_portion = params.subsample_portion <= FLT_EPSILON ||\r
         1 - params.subsample_portion <= FLT_EPSILON\r
         ? 1 : params.subsample_portion;\r
     int train_sample_count = cvFloor(params.subsample_portion * samples_count);\r
@@ -358,12 +358,12 @@ CvGBTrees::train( const CvMat* _train_data, int _tflag,
         *subsample_test = cvMat( 1, test_sample_count, CV_32SC1,\r
                                  idx_data + train_sample_count );\r
     }\r
-    \r
+\r
     // training procedure\r
 \r
     for ( int i=0; i < params.weak_count; ++i )\r
     {\r
-               do_subsample();\r
+        do_subsample();\r
         for ( int k=0; k < class_count; ++k )\r
         {\r
             find_gradient(k);\r
@@ -387,21 +387,21 @@ CvGBTrees::train( const CvMat* _train_data, int _tflag,
                         cvGetRow( data->train_data, &x, idx);\r
                     else\r
                         cvGetCol( data->train_data, &x, idx);\r
-                        \r
+\r
                     if (missing)\r
                     {\r
                         if (_tflag == CV_ROW_SAMPLE)\r
                             cvGetRow( missing, &x_miss, idx);\r
                         else\r
                             cvGetCol( missing, &x_miss, idx);\r
-                        \r
+\r
                         res = (float)tree->predict(&x, &x_miss)->value;\r
                     }\r
                     else\r
                     {\r
                         res = (float)tree->predict(&x)->value;\r
                     }\r
-                    sum_response_tmp->data.fl[idx + k*n] = \r
+                    sum_response_tmp->data.fl[idx + k*n] =\r
                                     sum_response->data.fl[idx + k*n] +\r
                                     params.shrinkage * res;\r
                 }\r
@@ -421,13 +421,13 @@ CvGBTrees::train( const CvMat* _train_data, int _tflag,
     cvReleaseMat(&new_responses);\r
     data->free_train_data();\r
 \r
-       return true;\r
+    return true;\r
 \r
 } // CvGBTrees::train(...)\r
 \r
 //===========================================================================\r
 \r
-float Sign(float x)\r
+inline float Sign(float x)\r
   {\r
   if (x<0.0f) return -1.0f;\r
   else if (x>0.0f) return 1.0f;\r
@@ -484,7 +484,7 @@ void CvGBTrees::find_gradient(const int k)
                 residuals[i] = fabs(resp_data[idx] - current_data[idx]);\r
             }\r
             icvSortFloat(residuals, n, 0.0f);\r
-            \r
+\r
             delta = residuals[int(ceil(n*alpha))];\r
 \r
             for (int i=0; i<n; ++i)\r
@@ -506,7 +506,7 @@ void CvGBTrees::find_gradient(const int k)
                 int s_step = (sample_idx->cols > sample_idx->rows) ? 1\r
                              : sample_idx->step/CV_ELEM_SIZE(sample_idx->type);\r
                 int idx = *(sample_data + subsample_data[i]*s_step);\r
-            \r
+\r
                 for (int j=0; j<class_count; ++j)\r
                 {\r
                     double res;\r
@@ -516,14 +516,14 @@ void CvGBTrees::find_gradient(const int k)
                     exp_sfi += res;\r
                 }\r
                 int orig_label = int(resp_data[idx]);\r
-                               /*\r
+                /*\r
                 grad_data[idx] = (float)(!(k-class_labels->data.i[orig_label]+1)) -\r
                                  (float)(exp_fk / exp_sfi);\r
-                               */\r
-                               int ensemble_label = 0;\r
-                               while (class_labels->data.i[ensemble_label] - orig_label)\r
-                                       ensemble_label++;                               \r
-                               \r
+                */\r
+                int ensemble_label = 0;\r
+                while (class_labels->data.i[ensemble_label] - orig_label)\r
+                    ensemble_label++;\r
+\r
                 grad_data[idx] = (float)(!(k-ensemble_label)) -\r
                                  (float)(exp_fk / exp_sfi);\r
             }\r
@@ -550,19 +550,19 @@ void CvGBTrees::change_values(CvDTree* tree, const int _k)
 \r
     for (int i=0; i<get_len(subsample_train); ++i)\r
     {\r
-               int idx = *(sample_data + subsample_data[i]*s_step);\r
-               if (data->tflag == CV_ROW_SAMPLE)\r
+        int idx = *(sample_data + subsample_data[i]*s_step);\r
+        if (data->tflag == CV_ROW_SAMPLE)\r
             cvGetRow( data->train_data, &x, idx);\r
         else\r
             cvGetCol( data->train_data, &x, idx);\r
-            \r
+\r
         if (missing)\r
         {\r
             if (data->tflag == CV_ROW_SAMPLE)\r
                 cvGetRow( missing, &miss_x, idx);\r
             else\r
                 cvGetCol( missing, &miss_x, idx);\r
-            \r
+\r
             predictions[i] = tree->predict(&x, &miss_x);\r
         }\r
         else\r
@@ -585,7 +585,7 @@ void CvGBTrees::change_values(CvDTree* tree, const int _k)
         if (!samples_in_leaf) // It should not be done anyways! but...\r
         {\r
             leaves[i]->value = 0.0;\r
-            continue; \r
+            continue;\r
         }\r
 \r
         CvMat* leaf_idx = cvCreateMat(1, samples_in_leaf, CV_32S);\r
@@ -606,12 +606,12 @@ void CvGBTrees::change_values(CvDTree* tree, const int _k)
         int len = sum_response_tmp->cols;\r
         for (int j=0; j<get_len(leaf_idx); ++j)\r
         {\r
-            int idx = leaf_idx_data[j];        \r
+            int idx = leaf_idx_data[j];\r
             sum_response_tmp->data.fl[idx + _k*len] =\r
                                     sum_response->data.fl[idx + _k*len] +\r
                                     params.shrinkage * value;\r
         }\r
-        leaf_idx_data = 0;     \r
+        leaf_idx_data = 0;\r
         cvReleaseMat(&leaf_idx);\r
     }\r
 \r
@@ -634,13 +634,13 @@ void CvGBTrees::change_values(CvDTree* tree, const int _k)
 /*\r
 void CvGBTrees::change_values(CvDTree* tree, const int _k)\r
 {\r
-    \r
+\r
     CvDTreeNode** leaves;\r
     int leaves_count = 0;\r
-       int offset = _k*sum_response_tmp->cols;\r
-       CvMat leaf_idx;\r
-       leaf_idx.rows = 1;\r
-    \r
+    int offset = _k*sum_response_tmp->cols;\r
+    CvMat leaf_idx;\r
+    leaf_idx.rows = 1;\r
+\r
     leaves = GetLeaves( tree, leaves_count);\r
 \r
     for (int i=0; i<leaves_count; ++i)\r
@@ -650,14 +650,14 @@ void CvGBTrees::change_values(CvDTree* tree, const int _k)
         data->get_sample_indices(leaves[i], leaf_idx_data);\r
         //CvMat* leaf_idx = new CvMat();\r
         //cvInitMatHeader(leaf_idx, n, 1, CV_32S, leaf_idx_data);\r
-               leaf_idx.cols = n;\r
-               leaf_idx.data.i = leaf_idx_data;\r
+        leaf_idx.cols = n;\r
+        leaf_idx.data.i = leaf_idx_data;\r
 \r
         float value = find_optimal_value(&leaf_idx);\r
         leaves[i]->value = value;\r
-               float val = params.shrinkage * value;\r
+        float val = params.shrinkage * value;\r
+\r
 \r
-        \r
         for (int j=0; j<n; ++j)\r
         {\r
             int idx = leaf_idx_data[j] + offset;\r
@@ -665,9 +665,9 @@ void CvGBTrees::change_values(CvDTree* tree, const int _k)
         }\r
         //leaf_idx_data = 0;\r
         //cvReleaseMat(&leaf_idx);\r
-               leaf_idx.data.i = 0;\r
-               //delete leaf_idx;\r
-               delete[] leaf_idx_data;\r
+        leaf_idx.data.i = 0;\r
+        //delete leaf_idx;\r
+        delete[] leaf_idx_data;\r
     }\r
 \r
     // releasing the memory\r
@@ -707,7 +707,7 @@ float CvGBTrees::find_optimal_value( const CvMat* _Idx )
             for (int i=0; i<n; ++i, ++idx)\r
                 residuals[i] = (resp_data[*idx] - cur_data[*idx]);\r
             icvSortFloat(residuals, n, 0.0f);\r
-            if (n % 2) \r
+            if (n % 2)\r
                 gamma = residuals[n/2];\r
             else gamma = (residuals[n/2-1] + residuals[n/2]) / 2.0f;\r
             delete[] residuals;\r
@@ -748,7 +748,7 @@ float CvGBTrees::find_optimal_value( const CvMat* _Idx )
                 tmp1 += tmp;\r
                 tmp2 += fabs(tmp)*(1-fabs(tmp));\r
             };\r
-            if (tmp2 == 0) \r
+            if (tmp2 == 0)\r
             {\r
                 tmp2 = 1;\r
             }\r
@@ -818,7 +818,7 @@ void CvGBTrees::do_subsample()
 //===========================================================================\r
 \r
 float CvGBTrees::predict_serial( const CvMat* _sample, const CvMat* _missing,\r
-        CvMat* weak_responses, CvSlice slice, int k) const \r
+        CvMat* weak_responses, CvSlice slice, int k) const\r
 {\r
     float result = 0.0f;\r
 \r
@@ -827,10 +827,10 @@ float CvGBTrees::predict_serial( const CvMat* _sample, const CvMat* _missing,
     CvSeqReader reader;\r
     int weak_count = cvSliceLength( slice, weak[class_count-1] );\r
     CvDTree* tree;\r
-    \r
+\r
     if (weak_responses)\r
     {\r
-               if (CV_MAT_TYPE(weak_responses->type) != CV_32F)\r
+        if (CV_MAT_TYPE(weak_responses->type) != CV_32F)\r
             return 0.0f;\r
         if ((k >= 0) && (k<class_count) && (weak_responses->rows != 1))\r
             return 0.0f;\r
@@ -839,7 +839,7 @@ float CvGBTrees::predict_serial( const CvMat* _sample, const CvMat* _missing,
         if (weak_responses->cols != weak_count)\r
             return 0.0f;\r
     }\r
-    \r
+\r
     float* sum = new float[class_count];\r
     memset(sum, 0, class_count*sizeof(float));\r
 \r
@@ -847,7 +847,7 @@ float CvGBTrees::predict_serial( const CvMat* _sample, const CvMat* _missing,
     {\r
         if ((weak[i]) && (weak_count))\r
         {\r
-            cvStartReadSeq( weak[i], &reader ); \r
+            cvStartReadSeq( weak[i], &reader );\r
             cvSetSeqReaderPos( &reader, slice.start_index );\r
             for (int j=0; j<weak_count; ++j)\r
             {\r
@@ -859,7 +859,7 @@ float CvGBTrees::predict_serial( const CvMat* _sample, const CvMat* _missing,
             }\r
         }\r
     }\r
-    \r
+\r
     for (int i=0; i<class_count; ++i)\r
         sum[i] += base_value;\r
 \r
@@ -888,13 +888,13 @@ float CvGBTrees::predict_serial( const CvMat* _sample, const CvMat* _missing,
 \r
     delete[] sum;\r
 \r
-       /*\r
+    /*\r
     int orig_class_label = -1;\r
     for (int i=0; i<get_len(class_labels); ++i)\r
         if (class_labels->data.i[i] == class_label+1)\r
             orig_class_label = i;\r
-       */\r
-       int orig_class_label = class_labels->data.i[class_label];\r
+    */\r
+    int orig_class_label = class_labels->data.i[class_label];\r
 \r
     return float(orig_class_label);\r
 }\r
@@ -903,69 +903,71 @@ float CvGBTrees::predict_serial( const CvMat* _sample, const CvMat* _missing,
 class Tree_predictor\r
 {\r
 private:\r
-       pCvSeq* weak;\r
-       float* sum;\r
-       const int k;\r
-       const CvMat* sample;\r
-       const CvMat* missing;\r
+    pCvSeq* weak;\r
+    float* sum;\r
+    const int k;\r
+    const CvMat* sample;\r
+    const CvMat* missing;\r
     const float shrinkage;\r
-    \r
+\r
 #ifdef HAVE_TBB\r
     static tbb::spin_mutex SumMutex;\r
 #endif\r
 \r
 \r
 public:\r
-       Tree_predictor() : weak(0), sum(0), k(0), sample(0), missing(0), shrinkage(1.0f) {}\r
-       Tree_predictor(pCvSeq* _weak, const int _k, const float _shrinkage,\r
-                                  const CvMat* _sample, const CvMat* _missing, float* _sum ) :\r
-                                  weak(_weak), sum(_sum), k(_k), sample(_sample),\r
+    Tree_predictor() : weak(0), sum(0), k(0), sample(0), missing(0), shrinkage(1.0f) {}\r
+    Tree_predictor(pCvSeq* _weak, const int _k, const float _shrinkage,\r
+                   const CvMat* _sample, const CvMat* _missing, float* _sum ) :\r
+                   weak(_weak), sum(_sum), k(_k), sample(_sample),\r
                    missing(_missing), shrinkage(_shrinkage)\r
-       {}\r
-       \r
+    {}\r
+\r
     Tree_predictor( const Tree_predictor& p, cv::Split ) :\r
-                       weak(p.weak), sum(p.sum), k(p.k), sample(p.sample),\r
+            weak(p.weak), sum(p.sum), k(p.k), sample(p.sample),\r
             missing(p.missing), shrinkage(p.shrinkage)\r
-       {}\r
+    {}\r
+\r
+    Tree_predictor& operator=( const Tree_predictor& )\r
+    { return *this; }\r
 \r
-       Tree_predictor& operator=( const Tree_predictor& )\r
-       { return *this; }\r
-       \r
     virtual void operator()(const cv::BlockedRange& range) const\r
-       {\r
+    {\r
 #ifdef HAVE_TBB\r
         tbb::spin_mutex::scoped_lock lock;\r
 #endif\r
         CvSeqReader reader;\r
-               int begin = range.begin();\r
-               int end = range.end();\r
-               \r
-               int weak_count = end - begin;\r
-               CvDTree* tree;\r
-\r
-               for (int i=0; i<k; ++i)\r
-               {\r
-                       float tmp_sum = 0.0f;\r
-                       if ((weak[i]) && (weak_count))\r
-                       {\r
-                               cvStartReadSeq( weak[i], &reader ); \r
-                               cvSetSeqReaderPos( &reader, begin );\r
-                               for (int j=0; j<weak_count; ++j)\r
-                               {\r
-                                       CV_READ_SEQ_ELEM( tree, reader );\r
-                                       tmp_sum += shrinkage*(float)(tree->predict(sample, missing)->value);\r
-                               }\r
-                       }\r
+        int begin = range.begin();\r
+        int end = range.end();\r
+\r
+        int weak_count = end - begin;\r
+        CvDTree* tree;\r
+\r
+        for (int i=0; i<k; ++i)\r
+        {\r
+            float tmp_sum = 0.0f;\r
+            if ((weak[i]) && (weak_count))\r
+            {\r
+                cvStartReadSeq( weak[i], &reader );\r
+                cvSetSeqReaderPos( &reader, begin );\r
+                for (int j=0; j<weak_count; ++j)\r
+                {\r
+                    CV_READ_SEQ_ELEM( tree, reader );\r
+                    tmp_sum += shrinkage*(float)(tree->predict(sample, missing)->value);\r
+                }\r
+            }\r
 #ifdef HAVE_TBB\r
             lock.acquire(SumMutex);\r
-                       sum[i] += tmp_sum;\r
+            sum[i] += tmp_sum;\r
             lock.release();\r
 #else\r
             sum[i] += tmp_sum;\r
 #endif\r
-               }\r
-       } // Tree_predictor::operator()\r
-    \r
+        }\r
+    } // Tree_predictor::operator()\r
+\r
+    virtual ~Tree_predictor() {}\r
+\r
 }; // class Tree_predictor\r
 \r
 \r
@@ -976,28 +978,28 @@ tbb::spin_mutex Tree_predictor::SumMutex;
 \r
 \r
 float CvGBTrees::predict( const CvMat* _sample, const CvMat* _missing,\r
-            CvMat* /*weak_responses*/, CvSlice slice, int k) const \r
+            CvMat* /*weak_responses*/, CvSlice slice, int k) const\r
     {\r
         float result = 0.0f;\r
-           if (!weak) return 0.0f;\r
+        if (!weak) return 0.0f;\r
         float* sum = new float[class_count];\r
         for (int i=0; i<class_count; ++i)\r
             sum[i] = 0.0f;\r
-           int begin = slice.start_index;\r
-           int end = begin + cvSliceLength( slice, weak[0] );\r
-       \r
+        int begin = slice.start_index;\r
+        int end = begin + cvSliceLength( slice, weak[0] );\r
+\r
         pCvSeq* weak_seq = weak;\r
-           Tree_predictor predictor = Tree_predictor(weak_seq, class_count,\r
+        Tree_predictor predictor = Tree_predictor(weak_seq, class_count,\r
                                     params.shrinkage, _sample, _missing, sum);\r
-        \r
+\r
 //#ifdef HAVE_TBB\r
-//             tbb::parallel_for(cv::BlockedRange(begin, end), predictor,\r
+//      tbb::parallel_for(cv::BlockedRange(begin, end), predictor,\r
 //                          tbb::auto_partitioner());\r
 //#else\r
         cv::parallel_for(cv::BlockedRange(begin, end), predictor);\r
 //#endif\r
 \r
-           for (int i=0; i<class_count; ++i)\r
+        for (int i=0; i<class_count; ++i)\r
             sum[i] = sum[i] /** params.shrinkage*/ + base_value;\r
 \r
         if (class_count == 1)\r
@@ -1170,7 +1172,7 @@ void CvGBTrees::write( CvFileStorage* fs, const char* name ) const
 \r
 void CvGBTrees::read( CvFileStorage* fs, CvFileNode* node )\r
 {\r
-  \r
+\r
     CV_FUNCNAME( "CvGBTrees::read" );\r
 \r
     __BEGIN__;\r
@@ -1194,7 +1196,7 @@ void CvGBTrees::read( CvFileStorage* fs, CvFileNode* node )
 \r
 \r
     for (int j=0; j<class_count; ++j)\r
-    { \r
+    {\r
         s = "trees_";\r
         s += ToString(j);\r
 \r
@@ -1229,39 +1231,39 @@ void CvGBTrees::read( CvFileStorage* fs, CvFileNode* node )
 class Sample_predictor\r
 {\r
 private:\r
-       const CvGBTrees* gbt;\r
-       float* predictions;\r
-       const CvMat* samples;\r
-       const CvMat* missing;\r
+    const CvGBTrees* gbt;\r
+    float* predictions;\r
+    const CvMat* samples;\r
+    const CvMat* missing;\r
     const CvMat* idx;\r
     CvSlice slice;\r
 \r
 public:\r
-       Sample_predictor() : gbt(0), predictions(0), samples(0), missing(0),\r
+    Sample_predictor() : gbt(0), predictions(0), samples(0), missing(0),\r
                          idx(0), slice(CV_WHOLE_SEQ)\r
     {}\r
 \r
-       Sample_predictor(const CvGBTrees* _gbt, float* _predictions,\r
-                                  const CvMat* _samples, const CvMat* _missing,\r
+    Sample_predictor(const CvGBTrees* _gbt, float* _predictions,\r
+                   const CvMat* _samples, const CvMat* _missing,\r
                    const CvMat* _idx, CvSlice _slice=CV_WHOLE_SEQ) :\r
-                                  gbt(_gbt), predictions(_predictions), samples(_samples),\r
+                   gbt(_gbt), predictions(_predictions), samples(_samples),\r
                    missing(_missing), idx(_idx), slice(_slice)\r
-       {}\r
-       \r
+    {}\r
+\r
 \r
     Sample_predictor( const Sample_predictor& p, cv::Split ) :\r
-                       gbt(p.gbt), predictions(p.predictions),\r
+            gbt(p.gbt), predictions(p.predictions),\r
             samples(p.samples), missing(p.missing), idx(p.idx),\r
             slice(p.slice)\r
-       {}\r
+    {}\r
 \r
 \r
     virtual void operator()(const cv::BlockedRange& range) const\r
-       {\r
-               int begin = range.begin();\r
-               int end = range.end();\r
+    {\r
+        int begin = range.begin();\r
+        int end = range.end();\r
 \r
-               CvMat x;\r
+        CvMat x;\r
         CvMat miss;\r
 \r
         for (int i=begin; i<end; ++i)\r
@@ -1278,14 +1280,16 @@ public:
                 predictions[i] = gbt->predict_serial(&x,&miss,0,slice);\r
             }\r
         }\r
-       } // Sample_predictor::operator()\r
+    } // Sample_predictor::operator()\r
+\r
+    virtual ~Sample_predictor() {}\r
 \r
 }; // class Sample_predictor\r
 \r
 \r
 \r
 // type in {CV_TRAIN_ERROR, CV_TEST_ERROR}\r
-float \r
+float\r
 CvGBTrees::calc_error( CvMLData* _data, int type, std::vector<float> *resp )\r
 {\r
 \r
@@ -1294,14 +1298,14 @@ CvGBTrees::calc_error( CvMLData* _data, int type, std::vector<float> *resp )
                               _data->get_train_sample_idx() :\r
                               _data->get_test_sample_idx();\r
     const CvMat* response = _data->get_responses();\r
-                              \r
+\r
     int n = sample_idx ? get_len(sample_idx) : 0;\r
     n = (type == CV_TRAIN_ERROR && n == 0) ? _data->get_values()->rows : n;\r
-    \r
+\r
     if (!n)\r
         return -FLT_MAX;\r
-    \r
-    float* pred_resp = 0;  \r
+\r
+    float* pred_resp = 0;\r
     if (resp)\r
     {\r
         resp->resize(n);\r
@@ -1312,17 +1316,17 @@ CvGBTrees::calc_error( CvMLData* _data, int type, std::vector<float> *resp )
 \r
     Sample_predictor predictor = Sample_predictor(this, pred_resp, _data->get_values(),\r
             _data->get_missing(), sample_idx);\r
-        \r
+\r
 //#ifdef HAVE_TBB\r
 //    tbb::parallel_for(cv::BlockedRange(0,n), predictor, tbb::auto_partitioner());\r
 //#else\r
     cv::parallel_for(cv::BlockedRange(0,n), predictor);\r
 //#endif\r
-        \r
+\r
     int* sidx = sample_idx ? sample_idx->data.i : 0;\r
     int r_step = CV_IS_MAT_CONT(response->type) ?\r
                 1 : response->step / CV_ELEM_SIZE(response->type);\r
-    \r
+\r
 \r
     if ( !problem_type() )\r
     {\r
@@ -1342,9 +1346,9 @@ CvGBTrees::calc_error( CvMLData* _data, int type, std::vector<float> *resp )
             float d = pred_resp[i] - response->data.fl[si*r_step];\r
             err += d*d;\r
         }\r
-        err = err / (float)n;    \r
+        err = err / (float)n;\r
     }\r
-    \r
+\r
     return err;\r
 }\r
 \r
@@ -1364,9 +1368,9 @@ CvGBTrees::CvGBTrees( const cv::Mat& trainData, int tflag,
     class_labels = 0;\r
     class_count = 1;\r
     delta = 0.0f;\r
-    \r
+\r
     clear();\r
-    \r
+\r
     train(trainData, tflag, responses, varIdx, sampleIdx, varType, missingDataMask, params, false);\r
 }\r
 \r
@@ -1380,7 +1384,7 @@ bool CvGBTrees::train( const cv::Mat& trainData, int tflag,
     CvMat _trainData = trainData, _responses = responses;\r
     CvMat _varIdx = varIdx, _sampleIdx = sampleIdx, _varType = varType;\r
     CvMat _missingDataMask = missingDataMask;\r
-    \r
+\r
     return train( &_trainData, tflag, &_responses, varIdx.empty() ? 0 : &_varIdx,\r
                   sampleIdx.empty() ? 0 : &_sampleIdx, varType.empty() ? 0 : &_varType,\r
                   missingDataMask.empty() ? 0 : &_missingDataMask, params, update);\r
index 0059597..f0e085d 100644 (file)
@@ -122,7 +122,7 @@ void CvStatModel::read( CvFileStorage*, CvFileNode* )
 
 
 /* Calculates upper triangular matrix S, where A is a symmetrical matrix A=S'*S */
-CV_IMPL void cvChol( CvMat* A, CvMat* S )
+static void cvChol( CvMat* A, CvMat* S )
 {
     int dim = A->rows;
 
@@ -182,7 +182,7 @@ CV_IMPL void cvRandMVNormal( CvMat* mean, CvMat* cov, CvMat* sample, CvRNG* rng
 
 /* Generates <sample> of <amount> points from a discrete variate xi,
    where Pr{xi = k} == probs[k], 0 < k < len - 1. */
-CV_IMPL void cvRandSeries( float probs[], int len, int sample[], int amount )
+static void cvRandSeries( float probs[], int len, int sample[], int amount )
 {
     CvMat* univals = cvCreateMat(1, amount, CV_32FC1);
     float* knots = (float*)cvAlloc( len * sizeof(float) );
@@ -321,48 +321,48 @@ CvMat* icvGenerateRandomClusterCenters ( int seed, const CvMat* data,
 
 #define ICV_RAND_MAX    4294967296 // == 2^32
 
-CV_IMPL void cvRandRoundUni (CvMat* center,
-                             float radius_small,
-                             float radius_large,
-                             CvMat* desired_matrix,
-                             CvRNG* rng_state_ptr)
-{
-    float rad, norm, coefficient;
-    int dim, size, i, j;
-    CvMat *cov, sample;
-    CvRNG rng_local;
-
-    CV_FUNCNAME("cvRandRoundUni");
-    __BEGIN__
-
-    rng_local = *rng_state_ptr;
-
-    CV_ASSERT ((radius_small >= 0) &&
-               (radius_large > 0) &&
-               (radius_small <= radius_large));
-    CV_ASSERT (center && desired_matrix && rng_state_ptr);
-    CV_ASSERT (center->rows == 1);
-    CV_ASSERT (center->cols == desired_matrix->cols);
-
-    dim = desired_matrix->cols;
-    size = desired_matrix->rows;
-    cov = cvCreateMat (dim, dim, CV_32FC1);
-    cvSetIdentity (cov);
-    cvRandMVNormal (center, cov, desired_matrix, &rng_local);
-
-    for (i = 0; i < size; i++)
-    {
-        rad = (float)(cvRandReal(&rng_local)*(radius_large - radius_small) + radius_small);
-        cvGetRow (desired_matrix, &sample, i);
-        norm = (float) cvNorm (&sample, 0, CV_L2);
-        coefficient = rad / norm;
-        for (j = 0; j < dim; j++)
-             CV_MAT_ELEM (sample, float, 0, j) *= coefficient;
-    }
-
-    __END__
-
-}
+// static void cvRandRoundUni (CvMat* center,
+//                              float radius_small,
+//                              float radius_large,
+//                              CvMat* desired_matrix,
+//                              CvRNG* rng_state_ptr)
+// {
+//     float rad, norm, coefficient;
+//     int dim, size, i, j;
+//     CvMat *cov, sample;
+//     CvRNG rng_local;
+
+//     CV_FUNCNAME("cvRandRoundUni");
+//     __BEGIN__
+
+//     rng_local = *rng_state_ptr;
+
+//     CV_ASSERT ((radius_small >= 0) &&
+//                (radius_large > 0) &&
+//                (radius_small <= radius_large));
+//     CV_ASSERT (center && desired_matrix && rng_state_ptr);
+//     CV_ASSERT (center->rows == 1);
+//     CV_ASSERT (center->cols == desired_matrix->cols);
+
+//     dim = desired_matrix->cols;
+//     size = desired_matrix->rows;
+//     cov = cvCreateMat (dim, dim, CV_32FC1);
+//     cvSetIdentity (cov);
+//     cvRandMVNormal (center, cov, desired_matrix, &rng_local);
+
+//     for (i = 0; i < size; i++)
+//     {
+//         rad = (float)(cvRandReal(&rng_local)*(radius_large - radius_small) + radius_small);
+//         cvGetRow (desired_matrix, &sample, i);
+//         norm = (float) cvNorm (&sample, 0, CV_L2);
+//         coefficient = rad / norm;
+//         for (j = 0; j < dim; j++)
+//              CV_MAT_ELEM (sample, float, 0, j) *= coefficient;
+//     }
+
+//     __END__
+
+// }
 
 // By S. Dilman - end -
 
@@ -1769,7 +1769,7 @@ void cvCombineResponseMaps (CvMat*  _responses,
 }
 
 
-int icvGetNumberOfCluster( double* prob_vector, int num_of_clusters, float r,
+static int icvGetNumberOfCluster( double* prob_vector, int num_of_clusters, float r,
                            float outlier_thresh, int normalize_probs )
 {
     int max_prob_loc = 0;
index 285abea..5ec1a60 100644 (file)
 #ifndef __OPENCV_PRECOMP_H__
 #define __OPENCV_PRECOMP_H__
 
-#if _MSC_VER >= 1200
+#if defined _MSC_VER && _MSC_VER >= 1200
 #pragma warning( disable: 4251 4514 4710 4711 4710 )
 #endif
 
-#ifdef HAVE_CVCONFIG_H 
+#ifdef HAVE_CVCONFIG_H
 #include "cvconfig.h"
 #endif
 
@@ -298,10 +298,10 @@ cvPrepareTrainData( const char* /*funcname*/,
                     CvMat** out_sample_idx=0 );
 
 void
-cvSortSamplesByClasses( const float** samples, const CvMat* classes, 
+cvSortSamplesByClasses( const float** samples, const CvMat* classes,
                         int* class_ranges, const uchar** mask CV_DEFAULT(0) );
 
-void 
+void
 cvCombineResponseMaps (CvMat*  _responses,
                  const CvMat*  old_response_map,
                        CvMat*  new_response_map,
@@ -329,7 +329,7 @@ CvFileNode* icvFileNodeGetNext(CvFileNode* n, const char* name);
 
 
 void cvCheckTrainData( const CvMat* train_data, int tflag,
-                       const CvMat* missing_mask, 
+                       const CvMat* missing_mask,
                        int* var_all, int* sample_all );
 
 CvMat* cvPreprocessIndexArray( const CvMat* idx_arr, int data_arr_size, bool check_for_duplicates=false );
@@ -365,7 +365,7 @@ namespace cv
         CvDTree* tree;
         CvDTreeNode* node;
     };
-    
+
     struct ForestTreeBestSplitFinder : DTreeBestSplitFinder
     {
         ForestTreeBestSplitFinder() : DTreeBestSplitFinder() {}
index c16cce5..bfe1ed6 100644 (file)
@@ -88,7 +88,7 @@ using namespace cv;
 #include <stdarg.h>
 #include <ctype.h>
 
-#if _MSC_VER >= 1200
+#if defined _MSC_VER && _MSC_VER >= 1200
 #pragma warning( disable: 4514 ) /* unreferenced inline functions */
 #endif
 
@@ -1593,7 +1593,7 @@ bool CvSVM::train( const CvMat* _train_data, const CvMat* _responses,
     return ok;
 }
 
-struct indexedratio 
+struct indexedratio
 {
     double val;
     int ind;
@@ -1774,7 +1774,7 @@ bool CvSVM::train_auto( const CvMat* _train_data, const CvMat* _responses,
         else
             CV_SWAP( responses->data.i[i1], responses->data.i[i2], y );
     }
-        
+
     if (!is_regression && class_labels->cols==2 && balanced)
     {
         // count class samples
@@ -1786,13 +1786,13 @@ bool CvSVM::train_auto( const CvMat* _train_data, const CvMat* _responses,
             else
                 ++num_1;
         }
-        
+
         int label_smallest_class;
         int label_biggest_class;
         if (num_0 < num_1)
         {
             label_biggest_class = class_labels->data.i[1];
-            label_smallest_class = class_labels->data.i[0]; 
+            label_smallest_class = class_labels->data.i[0];
         }
         else
         {
@@ -2001,7 +2001,7 @@ float CvSVM::predict( const float* row_sample, int row_len, bool returnDFVal ) c
 
     int var_count = get_var_count();
     assert( row_len == var_count );
-       (void)row_len;
+    (void)row_len;
 
     int class_count = class_labels ? class_labels->cols :
                   params.svm_type == ONE_CLASS ? 1 : 0;
@@ -2072,7 +2072,7 @@ float CvSVM::predict( const CvMat* sample, bool returnDFVal ) const
     __BEGIN__;
 
     int class_count;
-    
+
     if( !kernel )
         CV_ERROR( CV_StsBadArg, "The SVM should be trained first" );
 
@@ -2082,7 +2082,7 @@ float CvSVM::predict( const CvMat* sample, bool returnDFVal ) const
     CV_CALL( cvPreparePredictData( sample, var_all, var_idx,
                                    class_count, 0, &row_sample ));
     result = predict( row_sample, get_var_count(), returnDFVal );
-  
+
     __END__;
 
     if( sample && (!CV_IS_MAT(sample) || sample->data.fl != row_sample) )
@@ -2099,12 +2099,12 @@ struct predict_body_svm {
         samples = _samples;
         results = _results;
     }
-    
+
     const CvSVM* pointer;
     float* result;
     const CvMat* samples;
     CvMat* results;
-  
+
     void operator()( const cv::BlockedRange& range ) const
     {
         for(int i = range.begin(); i < range.end(); i++ )
@@ -2116,15 +2116,15 @@ struct predict_body_svm {
                 results->data.fl[i] = (float)r;
             if (i == 0)
                 *result = (float)r;
-       }
+    }
     }
 };
 
 float CvSVM::predict(const CvMat* samples, CV_OUT CvMat* results) const
 {
     float result = 0;
-    cv::parallel_for(cv::BlockedRange(0, samples->rows), 
-                    predict_body_svm(this, &result, samples, results)
+    cv::parallel_for(cv::BlockedRange(0, samples->rows),
+             predict_body_svm(this, &result, samples, results)
     );
     return result;
 }
@@ -2141,7 +2141,7 @@ CvSVM::CvSVM( const Mat& _train_data, const Mat& _responses,
     kernel = 0;
     solver = 0;
     default_model_name = "my_svm";
-    
+
     train( _train_data, _responses, _var_idx, _sample_idx, _params );
 }
 
@@ -2166,7 +2166,7 @@ bool CvSVM::train_auto( const Mat& _train_data, const Mat& _responses,
 
 float CvSVM::predict( const Mat& _sample, bool returnDFVal ) const
 {
-    CvMat sample = _sample; 
+    CvMat sample = _sample;
     return predict(&sample, returnDFVal);
 }
 
@@ -2648,11 +2648,11 @@ cvTrainSVM_CrossValidation( const CvMat* train_data, int tflag,
     __BEGIN__;
 
     double degree_step = 7,
-              g_step      = 15,
-                  coef_step   = 14,
-                  C_step      = 20,
-                  nu_step     = 5,
-                  p_step      = 7; // all steps must be > 1
+           g_step      = 15,
+           coef_step   = 14,
+           C_step      = 20,
+           nu_step     = 5,
+           p_step      = 7; // all steps must be > 1
     double degree_begin = 0.01, degree_end = 2;
     double g_begin      = 1e-5, g_end      = 0.5;
     double coef_begin   = 0.1,  coef_end   = 300;
@@ -2662,12 +2662,12 @@ cvTrainSVM_CrossValidation( const CvMat* train_data, int tflag,
 
     double rate = 0, gamma = 0, C = 0, degree = 0, coef = 0, p = 0, nu = 0;
 
-       double best_rate    = 0;
+    double best_rate    = 0;
     double best_degree  = degree_begin;
     double best_gamma   = g_begin;
     double best_coef    = coef_begin;
-       double best_C       = C_begin;
-       double best_nu      = nu_begin;
+    double best_C       = C_begin;
+    double best_nu      = nu_begin;
     double best_p       = p_begin;
 
     CvSVMModelParams svm_params, *psvm_params;
index e4f3249..5edb3b4 100644 (file)
@@ -46,7 +46,7 @@ typedef struct CvDI
     int    i;
 } CvDI;
 
-int CV_CDECL
+static int CV_CDECL
 icvCmpDI( const void* a, const void* b, void* )
 {
     const CvDI* e1 = (const CvDI*) a;
@@ -65,7 +65,7 @@ cvCreateTestSet( int type, CvMat** samples,
     CvMat* mean = NULL;
     CvMat* cov = NULL;
     CvMemStorage* storage = NULL;
-    
+
     CV_FUNCNAME( "cvCreateTestSet" );
 
     __BEGIN__;
@@ -125,7 +125,7 @@ cvCreateTestSet( int type, CvMat** samples,
             CV_WRITE_SEQ_ELEM( elem, writer );
         }
         CV_CALL( seq = cvEndWriteSeq( &writer ) );
-    
+
         /* sort the sequence in a distance ascending order */
         CV_CALL( cvSeqSort( seq, icvCmpDI, NULL ) );
 
@@ -137,7 +137,7 @@ cvCreateTestSet( int type, CvMat** samples,
         {
             int last_idx;
             double max_dst;
-        
+
             last_idx = num_samples * (cur_class + 1) / num_classes - 1;
             CV_CALL( max_dst = (*((CvDI*) cvGetSeqElem( seq, last_idx ))).d );
             max_dst = MAX( max_dst, elem.d );
index d86769c..64e2dba 100644 (file)
@@ -5,7 +5,7 @@
 #include "opencv2/nonfree/nonfree.hpp"
 #include "opencv2/highgui/highgui.hpp"
 
-#if GTEST_CREATE_SHARED_LIBRARY
+#ifdef GTEST_CREATE_SHARED_LIBRARY
 #error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
 #endif
 
index 9d7c4f0..1d5c2f8 100644 (file)
 #ifndef __OPENCV_PRECOMP_H__
 #define __OPENCV_PRECOMP_H__
 
-#if _MSC_VER >= 1200
+#if defined _MSC_VER && _MSC_VER >= 1200
 #pragma warning( disable: 4251 4512 4710 4711 4514 4996 )
 #endif
 
-#ifdef HAVE_CVCONFIG_H 
+#ifdef HAVE_CVCONFIG_H
 #include "cvconfig.h"
 #endif
 
index 76939f8..b6e776d 100644 (file)
  * Redistribution and use in source and binary forms, with or
  * without modification, are permitted provided that the following
  * conditions are met:
- *     Redistributions of source code must retain the above
- *     copyright notice, this list of conditions and the following
- *     disclaimer.
- *     Redistributions in binary form must reproduce the above
- *     copyright notice, this list of conditions and the following
- *     disclaimer in the documentation and/or other materials
- *     provided with the distribution.
- *     The name of Contributor may not be used to endorse or
- *     promote products derived from this software without
- *     specific prior written permission.
+ *  Redistributions of source code must retain the above
+ *  copyright notice, this list of conditions and the following
+ *  disclaimer.
+ *  Redistributions in binary form must reproduce the above
+ *  copyright notice, this list of conditions and the following
+ *  disclaimer in the documentation and/or other materials
+ *  provided with the distribution.
+ *  The name of Contributor may not be used to endorse or
+ *  promote products derived from this software without
+ *  specific prior written permission.
  *
  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
@@ -43,7 +43,7 @@
  * OF SUCH DAMAGE.
  */
 
-/* 
+/*
    The following changes have been made, comparing to the original contribution:
    1. A lot of small optimizations, less memory allocations, got rid of global buffers
    2. Reversed order of cvGetQuadrangleSubPix and cvResize calls; probably less accurate, but much faster
@@ -79,8 +79,8 @@ octave.
 The extraction of the patch of pixels surrounding a keypoint used to build a
 descriptor has been simplified.
 
-KeyPoint descriptor normalisation has been changed from normalising each 4x4 
-cell (resulting in a descriptor of magnitude 16) to normalising the entire 
+KeyPoint descriptor normalisation has been changed from normalising each 4x4
+cell (resulting in a descriptor of magnitude 16) to normalising the entire
 descriptor to magnitude 1.
 
 The default number of octaves has been increased from 3 to 4 to match the
@@ -88,20 +88,20 @@ original SURF binary default. The increase in computation time is minimal since
 the higher octaves are sampled sparsely.
 
 The default number of layers per octave has been reduced from 3 to 2, to prevent
-redundant calculation of similar sizes in consecutive octaves.  This decreases 
-computation time. The number of features extracted may be less, however the 
+redundant calculation of similar sizes in consecutive octaves.  This decreases
+computation time. The number of features extracted may be less, however the
 additional features were mostly redundant.
 
 The radius of the circle of gradient samples used to assign an orientation has
-been increased from 4 to 6 to match the description in the SURF paper. This is 
+been increased from 4 to 6 to match the description in the SURF paper. This is
 now defined by ORI_RADIUS, and could be made into a parameter.
 
 The size of the sliding window used in orientation assignment has been reduced
 from 120 to 60 degrees to match the description in the SURF paper. This is now
 defined by ORI_WIN, and could be made into a parameter.
 
-Other options like  HAAR_SIZE0, HAAR_SIZE_INC, SAMPLE_STEP0, ORI_SEARCH_INC, 
-ORI_SIGMA and DESC_SIGMA have been separated from the code and documented. 
+Other options like  HAAR_SIZE0, HAAR_SIZE_INC, SAMPLE_STEP0, ORI_SEARCH_INC,
+ORI_SIGMA and DESC_SIGMA have been separated from the code and documented.
 These could also be made into parameters.
 
 Modifications by Ian Mahon
@@ -124,12 +124,14 @@ static const int SURF_HAAR_SIZE0 = 9;
 // This ensures that when looking for the neighbours of a sample, the layers
 // above and below are aligned correctly.
 static const int SURF_HAAR_SIZE_INC = 6;
-    
-    
+
+
 struct SurfHF
 {
     int p0, p1, p2, p3;
     float w;
+
+    SurfHF(): p0(0), p1(0), p2(0), p3(0), w(0) {}
 };
 
 inline float calcHaarPattern( const int* origin, const SurfHF* f, int n )
@@ -208,10 +210,10 @@ static void calcLayerDetAndTrace( const Mat& sum, int size, int sampleStep,
  * Maxima location interpolation as described in "Invariant Features from
  * Interest Point Groups" by Matthew Brown and David Lowe. This is performed by
  * fitting a 3D quadratic to a set of neighbouring samples.
- * 
- * The gradient vector and Hessian matrix at the initial keypoint location are 
+ *
+ * The gradient vector and Hessian matrix at the initial keypoint location are
  * approximated using central differences. The linear system Ax = b is then
- * solved, where A is the Hessian, b is the negative gradient, and x is the 
+ * solved, where A is the Hessian, b is the negative gradient, and x is the
  * offset of the interpolated maxima coordinates from the initial estimate.
  * This is equivalent to an iteration of Netwon's optimisation algorithm.
  *
@@ -234,18 +236,18 @@ interpolateKeypoint( float N9[3][9], int dx, int dy, int ds, KeyPoint& kpt )
         N9[1][3]-2*N9[1][4]+N9[1][5],            // 2nd deriv x, x
         (N9[1][8]-N9[1][6]-N9[1][2]+N9[1][0])/4, // 2nd deriv x, y
         (N9[2][5]-N9[2][3]-N9[0][5]+N9[0][3])/4, // 2nd deriv x, s
-        (N9[1][8]-N9[1][6]-N9[1][2]+N9[1][0])/4, // 2nd deriv x, y 
-        N9[1][1]-2*N9[1][4]+N9[1][7],            // 2nd deriv y, y 
-        (N9[2][7]-N9[2][1]-N9[0][7]+N9[0][1])/4, // 2nd deriv y, s 
+        (N9[1][8]-N9[1][6]-N9[1][2]+N9[1][0])/4, // 2nd deriv x, y
+        N9[1][1]-2*N9[1][4]+N9[1][7],            // 2nd deriv y, y
+        (N9[2][7]-N9[2][1]-N9[0][7]+N9[0][1])/4, // 2nd deriv y, s
         (N9[2][5]-N9[2][3]-N9[0][5]+N9[0][3])/4, // 2nd deriv x, s
         (N9[2][7]-N9[2][1]-N9[0][7]+N9[0][1])/4, // 2nd deriv y, s
         N9[0][4]-2*N9[1][4]+N9[2][4]);           // 2nd deriv s, s
 
     Vec3f x = A.solve(b, DECOMP_LU);
-    
+
     bool ok = (x[0] != 0 || x[1] != 0 || x[2] != 0) &&
         std::abs(x[0]) <= 1 && std::abs(x[1]) <= 1 && std::abs(x[2]) <= 1;
-    
+
     if( ok )
     {
         kpt.pt.x += x[0]*dx;
@@ -425,7 +427,7 @@ struct SURFFindInvoker
         {
             int layer = (*middleIndices)[i];
             int octave = i / nOctaveLayers;
-            findMaximaInLayer( *sum, *mask_sum, *dets, *traces, *sizes, 
+            findMaximaInLayer( *sum, *mask_sum, *dets, *traces, *sizes,
                                *keypoints, octave, layer, hessianThreshold,
                                (*sampleSteps)[layer] );
         }
@@ -459,7 +461,7 @@ struct KeypointGreater
     }
 };
 
-    
+
 static void fastHessianDetector( const Mat& sum, const Mat& mask_sum, vector<KeyPoint>& keypoints,
                                  int nOctaves, int nOctaveLayers, float hessianThreshold )
 {
@@ -479,7 +481,7 @@ static void fastHessianDetector( const Mat& sum, const Mat& mask_sum, vector<Key
 
     // Allocate space and calculate properties of each layer
     int index = 0, middleIndex = 0, step = SAMPLE_STEP0;
-    
+
     for( int octave = 0; octave < nOctaves; octave++ )
     {
         for( int layer = 0; layer < nOctaveLayers+2; layer++ )
@@ -566,7 +568,7 @@ struct SURFInvoker
         const int dx_s[NX][5] = {{0, 0, 2, 4, -1}, {2, 0, 4, 4, 1}};
         const int dy_s[NY][5] = {{0, 0, 4, 2, 1}, {0, 2, 4, 4, -1}};
 
-        // Optimisation is better using nOriSampleBound than nOriSamples for 
+        // Optimisation is better using nOriSampleBound than nOriSamples for
         // array lengths.  Maybe because it is a constant known at compile time
         const int nOriSampleBound =(2*ORI_RADIUS+1)*(2*ORI_RADIUS+1);
 
@@ -579,7 +581,7 @@ struct SURFInvoker
         Mat _patch(PATCH_SZ+1, PATCH_SZ+1, CV_8U, PATCH);
 
         int dsize = extended ? 128 : 64;
-        
+
         int k, k1 = range.begin(), k2 = range.end();
         float maxSize = 0;
         for( k = k1; k < k2; k++ )
@@ -601,7 +603,7 @@ struct SURFInvoker
             float s = size*1.2f/9.0f;
             /* To find the dominant orientation, the gradients in x and y are
              sampled in a circle of radius 6s using wavelets of size 4s.
-             We ensure the gradient wavelet size is even to ensure the 
+             We ensure the gradient wavelet size is even to ensure the
              wavelet pattern is balanced and symmetric around its center */
             int grad_wav_size = 2*cvRound( 2*s );
             if( sum->rows < grad_wav_size || sum->cols < grad_wav_size )
@@ -670,7 +672,7 @@ struct SURFInvoker
             kp.angle = descriptor_dir;
             if( !descriptors || !descriptors->data )
                 continue;
-            
+
             /* Extract a window of pixels around the keypoint of size 20s */
             int win_size = (int)((PATCH_SZ+1)*s);
             CV_Assert( winbuf->cols >= win_size*win_size );
@@ -678,13 +680,13 @@ struct SURFInvoker
 
             if( !upright )
             {
-               descriptor_dir *= (float)(CV_PI/180);
+                descriptor_dir *= (float)(CV_PI/180);
                 float sin_dir = std::sin(descriptor_dir);
                 float cos_dir = std::cos(descriptor_dir);
 
                 /* Subpixel interpolation version (slower). Subpixel not required since
                 the pixels will all get averaged when we scale down to 20 pixels */
-                /*  
+                /*
                 float w[] = { cos_dir, sin_dir, center.x,
                 -sin_dir, cos_dir , center.y };
                 CvMat W = cvMat(2, 3, CV_32F, w);
@@ -711,12 +713,12 @@ struct SURFInvoker
             else
             {
                 // extract rect - slightly optimized version of the code above
-                // TODO: find faster code, as this is simply an extract rect operation, 
+                // TODO: find faster code, as this is simply an extract rect operation,
                 //       e.g. by using cvGetSubRect, problem is the border processing
                 // descriptor_dir == 90 grad
                 // sin_dir == 1
                 // cos_dir == 0
-                
+
                 float win_offset = -(float)(win_size-1)/2;
                 int start_x = cvRound(center.x + win_offset);
                 int start_y = cvRound(center.y - win_offset);
@@ -733,7 +735,7 @@ struct SURFInvoker
                         y = MIN( y, img->rows-1 );
                         WIN[i*win_size + j] = img->at<uchar>(y, x);
                     }
-                }               
+                }
             }
             // Scale the window to size PATCH_SZ so each pixel's size is s. This
             // makes calculating the gradients with wavelets of size 2s easy
@@ -860,7 +862,7 @@ void SURF::operator()(InputArray imgarg, InputArray maskarg,
 {
     (*this)(imgarg, maskarg, keypoints, noArray(), false);
 }
-    
+
 void SURF::operator()(InputArray _img, InputArray _mask,
                       CV_OUT vector<KeyPoint>& keypoints,
                       OutputArray _descriptors,
@@ -868,18 +870,18 @@ void SURF::operator()(InputArray _img, InputArray _mask,
 {
     Mat img = _img.getMat(), mask = _mask.getMat(), mask1, sum, msum;
     bool doDescriptors = _descriptors.needed();
-    
+
     CV_Assert(!img.empty() && img.depth() == CV_8U);
     if( img.channels() > 1 )
         cvtColor(img, img, COLOR_BGR2GRAY);
-    
+
     CV_Assert(mask.empty() || (mask.type() == CV_8U && mask.size() == img.size()));
     CV_Assert(hessianThreshold >= 0);
     CV_Assert(nOctaves > 0);
     CV_Assert(nOctaveLayers > 0);
-    
+
     integral(img, sum, CV_32S);
-    
+
     // Compute keypoints only if we are not asked for evaluating the descriptors are some given locations:
     if( !useProvidedKeypoints )
     {
@@ -890,7 +892,7 @@ void SURF::operator()(InputArray _img, InputArray _mask,
         }
         fastHessianDetector( sum, msum, keypoints, nOctaves, nOctaveLayers, (float)hessianThreshold );
     }
-    
+
     int i, j, N = (int)keypoints.size();
     if( N > 0 )
     {
@@ -898,7 +900,7 @@ void SURF::operator()(InputArray _img, InputArray _mask,
         bool _1d = false;
         int dcols = extended ? 128 : 64;
         size_t dsize = dcols*sizeof(float);
-        
+
         if( doDescriptors )
         {
             _1d = _descriptors.kind() == _InputArray::STD_VECTOR && _descriptors.type() == CV_32F;
@@ -913,11 +915,11 @@ void SURF::operator()(InputArray _img, InputArray _mask,
                 descriptors = _descriptors.getMat();
             }
         }
-        
+
         // we call SURFInvoker in any case, even if we do not need descriptors,
         // since it computes orientation of each feature.
         parallel_for(BlockedRange(0, N), SURFInvoker(img, sum, keypoints, descriptors, extended, upright) );
-        
+
         // remove keypoints that were marked for deletion
         for( i = j = 0; i < N; i++ )
         {
@@ -951,7 +953,7 @@ void SURF::operator()(InputArray _img, InputArray _mask,
 void SURF::detectImpl( const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask) const
 {
     (*this)(image, mask, keypoints, noArray(), false);
-}    
+}
 
 void SURF::computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors) const
 {
index 397f127..deb362e 100644 (file)
@@ -5,7 +5,7 @@
 #include "opencv2/objdetect/objdetect.hpp"
 #include "opencv2/highgui/highgui.hpp"
 
-#if GTEST_CREATE_SHARED_LIBRARY
+#ifdef GTEST_CREATE_SHARED_LIBRARY
 #error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
 #endif
 
index dd8c22c..1e3555e 100644 (file)
@@ -9,8 +9,8 @@
 //\r
 //\r
 // API\r
-// int GetPointOfIntersection(const float *f, \r
-                              const float a, const float b,           \r
+// int GetPointOfIntersection(const float *f,\r
+                              const float a, const float b,\r
                               int q1, int q2, float *point);\r
 // INPUT\r
 // f                - function on the regular grid\r
 // RESULT\r
 // Error status\r
 */\r
-int GetPointOfIntersection(const float *f,  \r
-                           const float a, const float b, \r
+int GetPointOfIntersection(const float *f,\r
+                           const float a, const float b,\r
                            int q1, int q2, float *point)\r
 {\r
     if (q1 == q2)\r
     {\r
         return DISTANCE_TRANSFORM_EQUAL_POINTS;\r
-    } /* if (q1 == q2) */ \r
-    (*point) = ( (f[q2] - a * q2 + b *q2 * q2) - \r
+    } /* if (q1 == q2) */\r
+    (*point) = ( (f[q2] - a * q2 + b *q2 * q2) -\r
                  (f[q1] - a * q1 + b * q1 * q1) ) / (2 * b * (q2 - q1));\r
     return DISTANCE_TRANSFORM_OK;\r
 }\r
@@ -43,9 +43,9 @@ int GetPointOfIntersection(const float *f,
 //\r
 // API\r
 // int DistanceTransformOneDimensionalProblem(const float *f, const int n,\r
-                                              const float a, const float b,                                             \r
+                                              const float a, const float b,\r
                                               float *distanceTransform,\r
-                                              int *points); \r
+                                              int *points);\r
 // INPUT\r
 // f                 - function on the regular grid\r
 // n                 - grid dimension\r
@@ -58,7 +58,7 @@ int GetPointOfIntersection(const float *f,
 // Error status\r
 */\r
 int DistanceTransformOneDimensionalProblem(const float *f, const int n,\r
-                                           const float a, const float b,                                             \r
+                                           const float a, const float b,\r
                                            float *distanceTransform,\r
                                            int *points)\r
 {\r
@@ -73,7 +73,7 @@ int DistanceTransformOneDimensionalProblem(const float *f, const int n,
     // Allocation memory (must be free in this function)\r
     v = (int *)malloc (sizeof(int) * n);\r
     z = (float *)malloc (sizeof(float) * (n + 1));\r
-    \r
+\r
     v[0] = 0;\r
     z[0] = (float)F_MIN; // left border of envelope\r
     z[1] = (float)F_MAX; // right border of envelope\r
@@ -89,7 +89,7 @@ int DistanceTransformOneDimensionalProblem(const float *f, const int n,
         } /* if (tmp != DISTANCE_TRANSFORM_OK) */\r
         if (pointIntersection <= z[k])\r
         {\r
-            // Envelope doesn't contain current parabola            \r
+            // Envelope doesn't contain current parabola\r
             do\r
             {\r
                 k--;\r
@@ -144,7 +144,7 @@ int DistanceTransformOneDimensionalProblem(const float *f, const int n,
 // INPUT\r
 // k                 - index of the previous cycle element\r
 // n                 - number of matrix rows\r
-// q                 - parameter that equal \r
+// q                 - parameter that equal\r
                        (number_of_rows * number_of_columns - 1)\r
 // OUTPUT\r
 // None\r
@@ -196,7 +196,7 @@ void TransposeCycleElements(float *a, int *cycle, int cycle_len)
 // RESULT\r
 // Error status\r
 */\r
-void TransposeCycleElements_int(int *a, int *cycle, int cycle_len)\r
+static void TransposeCycleElements_int(int *a, int *cycle, int cycle_len)\r
 {\r
     int i;\r
     int buf;\r
@@ -229,7 +229,7 @@ void Transpose(float *a, int n, int m)
     int max_cycle_len;\r
 \r
     max_cycle_len = n * m;\r
-    \r
+\r
     // Allocation memory  (must be free in this function)\r
     cycle = (int *)malloc(sizeof(int) * max_cycle_len);\r
 \r
@@ -240,12 +240,12 @@ void Transpose(float *a, int n, int m)
         k = GetNextCycleElement(i, n, q);\r
         cycle[cycle_len] = i;\r
         cycle_len++;\r
-        \r
+\r
         while (k > i)\r
-        {            \r
-            cycle[cycle_len] = k;            \r
+        {\r
+            cycle[cycle_len] = k;\r
             cycle_len++;\r
-            k = GetNextCycleElement(k, n, q);            \r
+            k = GetNextCycleElement(k, n, q);\r
         }\r
         if (k == i)\r
         {\r
@@ -272,14 +272,14 @@ void Transpose(float *a, int n, int m)
 // RESULT\r
 // None\r
 */\r
-void Transpose_int(int *a, int n, int m)\r
+static void Transpose_int(int *a, int n, int m)\r
 {\r
     int *cycle;\r
     int i, k, q, cycle_len;\r
     int max_cycle_len;\r
 \r
     max_cycle_len = n * m;\r
-    \r
+\r
     // Allocation memory  (must be free in this function)\r
     cycle = (int *)malloc(sizeof(int) * max_cycle_len);\r
 \r
@@ -290,12 +290,12 @@ void Transpose_int(int *a, int n, int m)
         k = GetNextCycleElement(i, n, q);\r
         cycle[cycle_len] = i;\r
         cycle_len++;\r
-        \r
+\r
         while (k > i)\r
-        {            \r
-            cycle[cycle_len] = k;            \r
+        {\r
+            cycle[cycle_len] = k;\r
             cycle_len++;\r
-            k = GetNextCycleElement(k, n, q);            \r
+            k = GetNextCycleElement(k, n, q);\r
         }\r
         if (k == i)\r
         {\r
@@ -311,21 +311,21 @@ void Transpose_int(int *a, int n, int m)
 /*\r
 // Decision of two dimensional problem generalized distance transform\r
 // on the regular grid at all points\r
-//      min{d2(y' - y) + d4(y' - y)(y' - y) + \r
+//      min{d2(y' - y) + d4(y' - y)(y' - y) +\r
             min(d1(x' - x) + d3(x' - x)(x' - x) + f(x',y'))} (on x', y')\r
 //\r
 // API\r
-// int DistanceTransformTwoDimensionalProblem(const float *f, \r
+// int DistanceTransformTwoDimensionalProblem(const float *f,\r
                                               const int n, const int m,\r
-                                              const float coeff[4],                                             \r
+                                              const float coeff[4],\r
                                               float *distanceTransform,\r
-                                              int *pointsX, int *pointsY); \r
+                                              int *pointsX, int *pointsY);\r
 // INPUT\r
 // f                 - function on the regular grid\r
 // n                 - number of rows\r
 // m                 - number of columns\r
 // coeff             - coefficients of optimizable function\r
-                       coeff[0] = d1, coeff[1] = d2, \r
+                       coeff[0] = d1, coeff[1] = d2,\r
                        coeff[2] = d3, coeff[3] = d4\r
 // OUTPUT\r
 // distanceTransform - values of generalized distance transform\r
@@ -334,9 +334,9 @@ void Transpose_int(int *a, int n, int m)
 // RESULT\r
 // Error status\r
 */\r
-int DistanceTransformTwoDimensionalProblem(const float *f,  \r
+int DistanceTransformTwoDimensionalProblem(const float *f,\r
                                            const int n, const int m,\r
-                                           const float coeff[4],                                             \r
+                                           const float coeff[4],\r
                                            float *distanceTransform,\r
                                            int *pointsX, int *pointsY)\r
 {\r
@@ -349,10 +349,10 @@ int DistanceTransformTwoDimensionalProblem(const float *f,
     for (i = 0; i < n; i++)\r
     {\r
         resOneDimProblem = DistanceTransformOneDimensionalProblem(\r
-                                    f + i * m, m, \r
-                                    coeff[0], coeff[2], \r
-                                    &internalDistTrans[i * m], \r
-                                    &internalPointsX[i * m]); \r
+                                    f + i * m, m,\r
+                                    coeff[0], coeff[2],\r
+                                    &internalDistTrans[i * m],\r
+                                    &internalPointsX[i * m]);\r
         if (resOneDimProblem != DISTANCE_TRANSFORM_OK)\r
             return DISTANCE_TRANSFORM_ERROR;\r
     }\r
@@ -360,9 +360,9 @@ int DistanceTransformTwoDimensionalProblem(const float *f,
     for (j = 0; j < m; j++)\r
     {\r
         resOneDimProblem = DistanceTransformOneDimensionalProblem(\r
-                                    &internalDistTrans[j * n], n, \r
-                                    coeff[1], coeff[3], \r
-                                    distanceTransform + j * n, \r
+                                    &internalDistTrans[j * n], n,\r
+                                    coeff[1], coeff[3],\r
+                                    distanceTransform + j * n,\r
                                     pointsY + j * n);\r
         if (resOneDimProblem != DISTANCE_TRANSFORM_OK)\r
             return DISTANCE_TRANSFORM_ERROR;\r
index 22bee33..12dc5c4 100644 (file)
@@ -30,12 +30,12 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
     int height, width, numChannels;\r
     int i, j, kk, c, ii, jj, d;\r
     float  * datadx, * datady;\r
-    \r
+\r
     //íîìåð ÃªÃ Ã­Ã Ã«Ã  Ã¢ Ã¶Ã¨ÃªÃ«Ã¥\r
-    int   ch; \r
+    int   ch;\r
     //ïåðåìåííûå Ã¢Ã»Ã·Ã¨Ã±Ã«Ã¥Ã­Ã¨Ã¿ Ã¬Ã Ã£Ã­Ã¨Ã²Ã³Ã¤Ã»\r
     float magnitude, x, y, tx, ty;\r
-    \r
+\r
     IplImage * dx, * dy;\r
     int *nearest;\r
     float *w, a_x, b_x;\r
@@ -51,7 +51,7 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
     //     Ã·Ã¥Ã²Ã­Ã»Ã¥ Ã¨Ã­Ã­Ã¥ÃªÃ±Ã» Ã­Ã¥ ÃªÃ®Ã­Ã²Ã°Ã Ã±Ã²Ã­Ã®Ã¥ Ã¨Ã§Ã®Ã¡Ã°Ã Ã¦Ã¥Ã­Ã¨Ã¥\r
     //  Ã­Ã¥ Ã·Ã¥Ã²Ã­Ã»Ã¥ Ã¨Ã­Ã­Ã¥ÃªÃ±Ã»    ÃªÃ®Ã­Ã²Ã°Ã Ã±Ã²Ã­Ã®Ã¥ Ã¨Ã§Ã®Ã¡Ã°Ã Ã¦Ã¥Ã­Ã¨Ã¥\r
     int   * alfa;\r
-    \r
+\r
     // Ã¢Ã¥ÃªÃ²Ã®Ã°Ã» Ã£Ã°Ã Ã­Ã¨Ã¶ Ã±Ã¥ÃªÃ²Ã®Ã°Ã®Ã¢\r
     float boundary_x[NUM_SECTOR + 1];\r
     float boundary_y[NUM_SECTOR + 1];\r
@@ -63,9 +63,9 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
 \r
     numChannels = image->nChannels;\r
 \r
-    dx    = cvCreateImage(cvSize(image->width, image->height), \r
+    dx    = cvCreateImage(cvSize(image->width, image->height),\r
                           IPL_DEPTH_32F, 3);\r
-    dy    = cvCreateImage(cvSize(image->width, image->height), \r
+    dy    = cvCreateImage(cvSize(image->width, image->height),\r
                           IPL_DEPTH_32F, 3);\r
 \r
     sizeX = width  / k;\r
@@ -77,7 +77,7 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
 \r
     cvFilter2D(image, dx, &kernel_dx, cvPoint(-1, 0));\r
     cvFilter2D(image, dy, &kernel_dy, cvPoint(0, -1));\r
-    \r
+\r
     float arg_vector;\r
     for(i = 0; i <= NUM_SECTOR; i++)\r
     {\r
@@ -113,20 +113,20 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
                     y = ty;\r
                 }\r
             }/*for(ch = 1; ch < numChannels; ch++)*/\r
-            \r
+\r
             max  = boundary_x[0] * x + boundary_y[0] * y;\r
             maxi = 0;\r
-            for (kk = 0; kk < NUM_SECTOR; kk++) \r
+            for (kk = 0; kk < NUM_SECTOR; kk++)\r
             {\r
                 dotProd = boundary_x[kk] * x + boundary_y[kk] * y;\r
-                if (dotProd > max) \r
+                if (dotProd > max)\r
                 {\r
                     max  = dotProd;\r
                     maxi = kk;\r
                 }\r
-                else \r
+                else\r
                 {\r
-                    if (-dotProd > max) \r
+                    if (-dotProd > max)\r
                     {\r
                         max  = -dotProd;\r
                         maxi = kk + NUM_SECTOR;\r
@@ -134,14 +134,14 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
                 }\r
             }\r
             alfa[j * width * 2 + i * 2    ] = maxi % NUM_SECTOR;\r
-            alfa[j * width * 2 + i * 2 + 1] = maxi;  \r
+            alfa[j * width * 2 + i * 2 + 1] = maxi;\r
         }/*for(i = 0; i < width; i++)*/\r
     }/*for(j = 0; j < height; j++)*/\r
 \r
     //ïîäñ÷åò Ã¢Ã¥Ã±Ã®Ã¢ Ã¨ Ã±Ã¬Ã¥Ã¹Ã¥Ã­Ã¨Ã©\r
     nearest = (int  *)malloc(sizeof(int  ) *  k);\r
     w       = (float*)malloc(sizeof(float) * (k * 2));\r
-    \r
+\r
     for(i = 0; i < k / 2; i++)\r
     {\r
         nearest[i] = -1;\r
@@ -155,15 +155,15 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
     {\r
         b_x = k / 2 + j + 0.5f;\r
         a_x = k / 2 - j - 0.5f;\r
-        w[j * 2    ] = 1.0f/a_x * ((a_x * b_x) / ( a_x + b_x)); \r
-        w[j * 2 + 1] = 1.0f/b_x * ((a_x * b_x) / ( a_x + b_x));  \r
+        w[j * 2    ] = 1.0f/a_x * ((a_x * b_x) / ( a_x + b_x));\r
+        w[j * 2 + 1] = 1.0f/b_x * ((a_x * b_x) / ( a_x + b_x));\r
     }/*for(j = 0; j < k / 2; j++)*/\r
     for(j = k / 2; j < k; j++)\r
     {\r
         a_x = j - k / 2 + 0.5f;\r
         b_x =-j + k / 2 - 0.5f + k;\r
-        w[j * 2    ] = 1.0f/a_x * ((a_x * b_x) / ( a_x + b_x)); \r
-        w[j * 2 + 1] = 1.0f/b_x * ((a_x * b_x) / ( a_x + b_x));  \r
+        w[j * 2    ] = 1.0f/a_x * ((a_x * b_x) / ( a_x + b_x));\r
+        w[j * 2 + 1] = 1.0f/b_x * ((a_x * b_x) / ( a_x + b_x));\r
     }/*for(j = k / 2; j < k; j++)*/\r
 \r
 \r
@@ -176,40 +176,40 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
         {\r
           for(jj = 0; jj < k; jj++)\r
           {\r
-            if ((i * k + ii > 0) && \r
-                (i * k + ii < height - 1) && \r
-                (j * k + jj > 0) && \r
+            if ((i * k + ii > 0) &&\r
+                (i * k + ii < height - 1) &&\r
+                (j * k + jj > 0) &&\r
                 (j * k + jj < width  - 1))\r
             {\r
               d = (k * i + ii) * width + (j * k + jj);\r
-              (*map)->map[ i * stringSize + j * (*map)->numFeatures + alfa[d * 2    ]] += \r
+              (*map)->map[ i * stringSize + j * (*map)->numFeatures + alfa[d * 2    ]] +=\r
                   r[d] * w[ii * 2] * w[jj * 2];\r
-              (*map)->map[ i * stringSize + j * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] += \r
+              (*map)->map[ i * stringSize + j * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=\r
                   r[d] * w[ii * 2] * w[jj * 2];\r
-              if ((i + nearest[ii] >= 0) && \r
+              if ((i + nearest[ii] >= 0) &&\r
                   (i + nearest[ii] <= sizeY - 1))\r
               {\r
-                (*map)->map[(i + nearest[ii]) * stringSize + j * (*map)->numFeatures + alfa[d * 2    ]             ] += \r
+                (*map)->map[(i + nearest[ii]) * stringSize + j * (*map)->numFeatures + alfa[d * 2    ]             ] +=\r
                   r[d] * w[ii * 2 + 1] * w[jj * 2 ];\r
-                (*map)->map[(i + nearest[ii]) * stringSize + j * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] += \r
+                (*map)->map[(i + nearest[ii]) * stringSize + j * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=\r
                   r[d] * w[ii * 2 + 1] * w[jj * 2 ];\r
               }\r
-              if ((j + nearest[jj] >= 0) && \r
+              if ((j + nearest[jj] >= 0) &&\r
                   (j + nearest[jj] <= sizeX - 1))\r
               {\r
-                (*map)->map[i * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2    ]             ] += \r
+                (*map)->map[i * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2    ]             ] +=\r
                   r[d] * w[ii * 2] * w[jj * 2 + 1];\r
-                (*map)->map[i * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] += \r
+                (*map)->map[i * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=\r
                   r[d] * w[ii * 2] * w[jj * 2 + 1];\r
               }\r
-              if ((i + nearest[ii] >= 0) && \r
-                  (i + nearest[ii] <= sizeY - 1) && \r
-                  (j + nearest[jj] >= 0) && \r
+              if ((i + nearest[ii] >= 0) &&\r
+                  (i + nearest[ii] <= sizeY - 1) &&\r
+                  (j + nearest[jj] >= 0) &&\r
                   (j + nearest[jj] <= sizeX - 1))\r
               {\r
-                (*map)->map[(i + nearest[ii]) * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2    ]             ] += \r
+                (*map)->map[(i + nearest[ii]) * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2    ]             ] +=\r
                   r[d] * w[ii * 2 + 1] * w[jj * 2 + 1];\r
-                (*map)->map[(i + nearest[ii]) * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] += \r
+                (*map)->map[(i + nearest[ii]) * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=\r
                   r[d] * w[ii * 2 + 1] * w[jj * 2 + 1];\r
               }\r
             }\r
@@ -217,14 +217,14 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
         }/*for(ii = 0; ii < k; ii++)*/\r
       }/*for(j = 1; j < sizeX - 1; j++)*/\r
     }/*for(i = 1; i < sizeY - 1; i++)*/\r
-    \r
+\r
     cvReleaseImage(&dx);\r
     cvReleaseImage(&dy);\r
 \r
 \r
     free(w);\r
     free(nearest);\r
-    \r
+\r
     free(r);\r
     free(alfa);\r
 \r
@@ -232,7 +232,7 @@ int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
 }\r
 \r
 /*\r
-// Feature map Normalization and Truncation \r
+// Feature map Normalization and Truncation\r
 //\r
 // API\r
 // int normalizeAndTruncate(featureMap *map, const float alfa);\r
@@ -270,7 +270,7 @@ int normalizeAndTruncate(CvLSVMFeatureMap *map, const float alfa)
         }/*for(j = 0; j < p; j++)*/\r
         partOfNorm[i] = valOfNorm;\r
     }/*for(i = 0; i < sizeX * sizeY; i++)*/\r
-    \r
+\r
     sizeX -= 2;\r
     sizeY -= 2;\r
 \r
@@ -369,13 +369,13 @@ int normalizeAndTruncate(CvLSVMFeatureMap *map, const float alfa)
 // Error status\r
 */\r
 int PCAFeatureMaps(CvLSVMFeatureMap *map)\r
-{ \r
+{\r
     int i,j, ii, jj, k;\r
     int sizeX, sizeY, p,  pp, xp, yp, pos1, pos2;\r
     float * newData;\r
     float val;\r
     float nx, ny;\r
-    \r
+\r
     sizeX = map->sizeX;\r
     sizeY = map->sizeY;\r
     p     = map->numFeatures;\r
@@ -424,7 +424,7 @@ int PCAFeatureMaps(CvLSVMFeatureMap *map)
                 }/*for(jj = 0; jj < xp; jj++)*/\r
                 newData[pos2 + k] = val * nx;\r
                 k++;\r
-            } /*for(ii = 0; ii < yp; ii++)*/           \r
+            } /*for(ii = 0; ii < yp; ii++)*/\r
         }/*for(j = 0; j < sizeX; j++)*/\r
     }/*for(i = 0; i < sizeY; i++)*/\r
 //swop data\r
@@ -439,7 +439,7 @@ int PCAFeatureMaps(CvLSVMFeatureMap *map)
 }\r
 \r
 \r
-int getPathOfFeaturePyramid(IplImage * image, \r
+static int getPathOfFeaturePyramid(IplImage * image,\r
                             float step, int numStep, int startIndex,\r
                             int sideLength, CvLSVMFeaturePyramid **maps)\r
 {\r
@@ -447,7 +447,7 @@ int getPathOfFeaturePyramid(IplImage * image,
     IplImage *scaleTmp;\r
     float scale;\r
     int   i, err;\r
-    \r
+\r
     for(i = 0; i < numStep; i++)\r
     {\r
         scale = 1.0f / powf(step, (float)i);\r
@@ -462,13 +462,13 @@ int getPathOfFeaturePyramid(IplImage * image,
 }\r
 \r
 /*\r
-// Getting feature pyramid  \r
+// Getting feature pyramid\r
 //\r
 // API\r
-// int getFeaturePyramid(IplImage * image, const filterObject **all_F, \r
+// int getFeaturePyramid(IplImage * image, const filterObject **all_F,\r
                       const int n_f,\r
-                      const int lambda, const int k, \r
-                      const int startX, const int startY, \r
+                      const int lambda, const int k,\r
+                      const int startX, const int startY,\r
                       const int W, const int H, featurePyramid **maps);\r
 // INPUT\r
 // image             - image\r
@@ -484,7 +484,7 @@ int getFeaturePyramid(IplImage * image, CvLSVMFeaturePyramid **maps)
     int   numStep;\r
     int   maxNumCells;\r
     int   W, H;\r
-    \r
+\r
     if(image->depth == IPL_DEPTH_32F)\r
     {\r
         imgResize = image;\r
@@ -493,9 +493,9 @@ int getFeaturePyramid(IplImage * image, CvLSVMFeaturePyramid **maps)
     {\r
         imgResize = cvCreateImage(cvSize(image->width , image->height) ,\r
                                   IPL_DEPTH_32F , 3);\r
-        cvConvert(image, imgResize);        \r
+        cvConvert(image, imgResize);\r
     }\r
-    \r
+\r
     W = imgResize->width;\r
     H = imgResize->height;\r
 \r
@@ -506,14 +506,14 @@ int getFeaturePyramid(IplImage * image, CvLSVMFeaturePyramid **maps)
         maxNumCells = H / SIDE_LENGTH;\r
     }\r
     numStep = (int)(logf((float) maxNumCells / (5.0f)) / logf( step )) + 1;\r
-    \r
+\r
     allocFeaturePyramidObject(maps, numStep + LAMBDA);\r
 \r
-    getPathOfFeaturePyramid(imgResize, step   , LAMBDA, 0, \r
+    getPathOfFeaturePyramid(imgResize, step   , LAMBDA, 0,\r
                             SIDE_LENGTH / 2, maps);\r
-    getPathOfFeaturePyramid(imgResize, step, numStep, LAMBDA, \r
+    getPathOfFeaturePyramid(imgResize, step, numStep, LAMBDA,\r
                             SIDE_LENGTH    , maps);\r
-    \r
+\r
     if(image->depth != IPL_DEPTH_32F)\r
     {\r
         cvReleaseImage(&imgResize);\r
index f60121f..b4164f5 100644 (file)
@@ -1,14 +1,14 @@
 #include "precomp.hpp"\r
 #include "_lsvm_fft.h"\r
 \r
-int getEntireRes(int number, int divisor, int *entire, int *res)\r
-{\r
-    *entire = number / divisor;\r
-    *res = number % divisor;\r
-    return FFT_OK;\r
-}\r
+// static int getEntireRes(int number, int divisor, int *entire, int *res)\r
+// {\r
+//     *entire = number / divisor;\r
+//     *res = number % divisor;\r
+//     return FFT_OK;\r
+// }\r
 \r
-int getMultipliers(int n, int *n1, int *n2)\r
+static int getMultipliers(int n, int *n1, int *n2)\r
 {\r
     int multiplier, i;\r
     if (n == 1)\r
@@ -36,13 +36,13 @@ int getMultipliers(int n, int *n1, int *n2)
 // 1-dimensional FFT\r
 //\r
 // API\r
-// int fft(float *x_in, float *x_out, int n, int shift); \r
+// int fft(float *x_in, float *x_out, int n, int shift);\r
 // INPUT\r
 // x_in              - input signal\r
 // n                 - number of elements for searching Fourier image\r
 // shift             - shift between input elements\r
 // OUTPUT\r
-// x_out             - output signal (contains 2n elements in order \r
+// x_out             - output signal (contains 2n elements in order\r
                        Re(x_in[0]), Im(x_in[0]), Re(x_in[1]), Im(x_in[1]) and etc.)\r
 // RESULT\r
 // Error status\r
@@ -107,8 +107,8 @@ int fft(float *x_in, float *x_out, int n, int shift)
 // API\r
 // int fftInverse(float *x_in, float *x_out, int n, int shift);\r
 // INPUT\r
-// x_in              - Fourier image of 1d input signal(contains 2n elements \r
-                       in order Re(x_in[0]), Im(x_in[0]), \r
+// x_in              - Fourier image of 1d input signal(contains 2n elements\r
+                       in order Re(x_in[0]), Im(x_in[0]),\r
                        Re(x_in[1]), Im(x_in[1]) and etc.)\r
 // n                 - number of elements for searching counter FFT image\r
 // shift             - shift between input elements\r
@@ -180,7 +180,7 @@ int fftInverse(float *x_in, float *x_out, int n, int shift)
 // numColls          - number of collumns\r
 // OUTPUT\r
 // x_out             - output signal (contains (2 * numRows * numColls) elements\r
-                       in order Re(x_in[0][0]), Im(x_in[0][0]), \r
+                       in order Re(x_in[0][0]), Im(x_in[0][0]),\r
                        Re(x_in[0][1]), Im(x_in[0][1]) and etc.)\r
 // RESULT\r
 // Error status\r
@@ -193,14 +193,14 @@ int fft2d(float *x_in, float *x_out, int numRows, int numColls)
     x_outTmp = (float *)malloc(sizeof(float) * (2 * size));\r
     for (i = 0; i < numRows; i++)\r
     {\r
-        fft(x_in + i * 2 * numColls, \r
+        fft(x_in + i * 2 * numColls,\r
             x_outTmp + i * 2 * numColls,\r
             numColls, 2);\r
     }\r
     for (i = 0; i < numColls; i++)\r
     {\r
-        fft(x_outTmp + 2 * i, \r
-            x_out + 2 * i, \r
+        fft(x_outTmp + 2 * i,\r
+            x_out + 2 * i,\r
             numRows, 2 * numColls);\r
     }\r
     free(x_outTmp);\r
@@ -213,8 +213,8 @@ int fft2d(float *x_in, float *x_out, int numRows, int numColls)
 // API\r
 // int fftInverse2d(float *x_in, float *x_out, int numRows, int numColls);\r
 // INPUT\r
-// x_in              - Fourier image of matrix (contains (2 * numRows * numColls) \r
-                       elements in order Re(x_in[0][0]), Im(x_in[0][0]), \r
+// x_in              - Fourier image of matrix (contains (2 * numRows * numColls)\r
+                       elements in order Re(x_in[0][0]), Im(x_in[0][0]),\r
                        Re(x_in[0][1]), Im(x_in[0][1]) and etc.)\r
 // numRows           - number of rows\r
 // numColls          - number of collumns\r
@@ -237,8 +237,8 @@ int fftInverse2d(float *x_in, float *x_out, int numRows, int numColls)
     }\r
     for (i = 0; i < numColls; i++)\r
     {\r
-        fftInverse(x_outTmp + 2 * i, \r
-            x_out + 2 * i, \r
+        fftInverse(x_outTmp + 2 * i,\r
+            x_out + 2 * i,\r
             numRows, 2 * numColls);\r
     }\r
     free(x_outTmp);\r
index f6c7d61..06e89e6 100644 (file)
@@ -653,7 +653,7 @@ double icvEvalHidHaarClassifier( CvHidHaarClassifier* classifier,
 }
 
 
-CV_IMPL int
+static int
 cvRunHaarClassifierCascadeSum( const CvHaarClassifierCascade* _cascade,
                                CvPoint pt, double& stage_sum, int start_stage )
 {
@@ -759,7 +759,7 @@ cvRunHaarClassifierCascadeSum( const CvHaarClassifierCascade* _cascade,
                     sum += calc_sum(node->feature.rect[1],p_offset) * node->feature.rect[1].weight;
                     if( node->feature.rect[2].p0 )
                         sum += calc_sum(node->feature.rect[2],p_offset) * node->feature.rect[2].weight;
-                    
+
                     stage_sum += classifier->alpha[sum >= t];
 #else
                     // ayasin - NHM perf optim. Avoid use of costly flaky jcc
@@ -771,7 +771,7 @@ cvRunHaarClassifierCascadeSum( const CvHaarClassifierCascade* _cascade,
                     if( node->feature.rect[2].p0 )
                         _sum += calc_sum(node->feature.rect[2],p_offset) * node->feature.rect[2].weight;
                     __m128d sum = _mm_set_sd(_sum);
-                    
+
                     t = _mm_cmpgt_sd(t, sum);
                     stage_sum = _mm_add_sd(stage_sum, _mm_blendv_pd(b, a, t));
 #endif
@@ -823,7 +823,7 @@ struct HaarDetectObjects_ScaleImage_Invoker
     HaarDetectObjects_ScaleImage_Invoker( const CvHaarClassifierCascade* _cascade,
                                           int _stripSize, double _factor,
                                           const Mat& _sum1, const Mat& _sqsum1, Mat* _norm1,
-                                          Mat* _mask1, Rect _equRect, ConcurrentRectVector& _vec, 
+                                          Mat* _mask1, Rect _equRect, ConcurrentRectVector& _vec,
                                           std::vector<int>& _levels, std::vector<double>& _weights,
                                           bool _outputLevels  )
     {
@@ -839,19 +839,19 @@ struct HaarDetectObjects_ScaleImage_Invoker
         rejectLevels = _outputLevels ? &_levels : 0;
         levelWeights = _outputLevels ? &_weights : 0;
     }
-    
+
     void operator()( const BlockedRange& range ) const
     {
         Size winSize0 = cascade->orig_window_size;
         Size winSize(cvRound(winSize0.width*factor), cvRound(winSize0.height*factor));
         int y1 = range.begin()*stripSize, y2 = min(range.end()*stripSize, sum1.rows - 1 - winSize0.height);
-        
+
         if (y2 <= y1 || sum1.cols <= 1 + winSize0.width)
             return;
-        
+
         Size ssz(sum1.cols - 1 - winSize0.width, y2 - y1);
         int x, y, ystep = factor > 2 ? 1 : 2;
-        
+
     #ifdef HAVE_IPP
         if( cascade->hid_cascade->ipp_stages )
         {
@@ -860,7 +860,7 @@ struct HaarDetectObjects_ScaleImage_Invoker
                                    sqsum1.ptr<double>(y1), sqsum1.step,
                                    norm1->ptr<float>(y1), norm1->step,
                                    ippiSize(ssz.width, ssz.height), iequRect );
-            
+
             int positive = (ssz.width/ystep)*((ssz.height + ystep-1)/ystep);
 
             if( ystep == 1 )
@@ -870,12 +870,12 @@ struct HaarDetectObjects_ScaleImage_Invoker
                 {
                     uchar* mask1row = mask1->ptr(y);
                     memset( mask1row, 0, ssz.width );
-                    
+
                     if( y % ystep == 0 )
                         for( x = 0; x < ssz.width; x += ystep )
                             mask1row[x] = (uchar)1;
                 }
-            
+
             for( int j = 0; j < cascade->count; j++ )
             {
                 if( ippiApplyHaarClassifier_32f_C1R(
@@ -889,7 +889,7 @@ struct HaarDetectObjects_ScaleImage_Invoker
                 if( positive <= 0 )
                     break;
             }
-            
+
             if( positive > 0 )
                 for( y = y1; y < y2; y += ystep )
                 {
@@ -929,11 +929,11 @@ struct HaarDetectObjects_ScaleImage_Invoker
                     {
                         if( result > 0 )
                             vec->push_back(Rect(cvRound(x*factor), cvRound(y*factor),
-                                           winSize.width, winSize.height)); 
+                                           winSize.width, winSize.height));
                     }
                 }
     }
-    
+
     const CvHaarClassifierCascade* cascade;
     int stripSize;
     double factor;
@@ -943,7 +943,7 @@ struct HaarDetectObjects_ScaleImage_Invoker
     std::vector<int>* rejectLevels;
     std::vector<double>* levelWeights;
 };
-    
+
 
 struct HaarDetectObjects_ScaleCascade_Invoker
 {
@@ -960,7 +960,7 @@ struct HaarDetectObjects_ScaleCascade_Invoker
         p = _p; pq = _pq;
         vec = &_vec;
     }
-    
+
     void operator()( const BlockedRange& range ) const
     {
         int iy, startY = range.begin(), endY = range.end();
@@ -968,14 +968,14 @@ struct HaarDetectObjects_ScaleCascade_Invoker
         const int *pq0 = pq[0], *pq1 = pq[1], *pq2 = pq[2], *pq3 = pq[3];
         bool doCannyPruning = p0 != 0;
         int sstep = (int)(sumstep/sizeof(p0[0]));
-        
+
         for( iy = startY; iy < endY; iy++ )
         {
             int ix, y = cvRound(iy*ystep), ixstep = 1;
             for( ix = xrange.start; ix < xrange.end; ix += ixstep )
             {
                 int x = cvRound(ix*ystep); // it should really be ystep, not ixstep
-                
+
                 if( doCannyPruning )
                 {
                     int offset = y*sstep + x;
@@ -987,7 +987,7 @@ struct HaarDetectObjects_ScaleCascade_Invoker
                         continue;
                     }
                 }
-                
+
                 int result = cvRunHaarClassifierCascade( cascade, cvPoint(x, y), 0 );
                 if( result > 0 )
                     vec->push_back(Rect(x, y, winsize.width, winsize.height));
@@ -995,7 +995,7 @@ struct HaarDetectObjects_ScaleCascade_Invoker
             }
         }
     }
-    
+
     const CvHaarClassifierCascade* cascade;
     double ystep;
     size_t sumstep;
@@ -1005,16 +1005,16 @@ struct HaarDetectObjects_ScaleCascade_Invoker
     const int** pq;
     ConcurrentRectVector* vec;
 };
-    
-    
+
+
 }
-    
+
 
 CvSeq*
-cvHaarDetectObjectsForROC( const CvArr* _img, 
+cvHaarDetectObjectsForROC( const CvArr* _img,
                      CvHaarClassifierCascade* cascade, CvMemStorage* storage,
                      std::vector<int>& rejectLevels, std::vector<double>& levelWeights,
-                     double scaleFactor, int minNeighbors, int flags, 
+                     double scaleFactor, int minNeighbors, int flags,
                      CvSize minSize, CvSize maxSize, bool outputRejectLevels )
 {
     const double GROUP_EPS = 0.2;
@@ -1044,13 +1044,13 @@ cvHaarDetectObjectsForROC( const CvArr* _img,
 
     if( CV_MAT_DEPTH(img->type) != CV_8U )
         CV_Error( CV_StsUnsupportedFormat, "Only 8-bit images are supported" );
-    
+
     if( scaleFactor <= 1 )
         CV_Error( CV_StsOutOfRange, "scale factor must be > 1" );
 
     if( findBiggestObject )
         flags &= ~CV_HAAR_SCALE_IMAGE;
-    
+
     if( maxSize.height == 0 || maxSize.width == 0 )
     {
         maxSize.height = img->rows;
@@ -1132,7 +1132,7 @@ cvHaarDetectObjectsForROC( const CvArr* _img,
         #else
             const int stripCount = 1;
         #endif
-            
+
 #ifdef HAVE_IPP
             if( use_ipp )
             {
@@ -1141,8 +1141,8 @@ cvHaarDetectObjectsForROC( const CvArr* _img,
             }
             else
 #endif
-                cvSetImagesForHaarClassifierCascade( cascade, &sum1, &sqsum1, _tilted, 1. );            
-            
+                cvSetImagesForHaarClassifierCascade( cascade, &sum1, &sqsum1, _tilted, 1. );
+
             cv::Mat _norm1(&norm1), _mask1(&mask1);
             cv::parallel_for(cv::BlockedRange(0, stripCount),
                          cv::HaarDetectObjects_ScaleImage_Invoker(cascade,
@@ -1242,22 +1242,22 @@ cvHaarDetectObjectsForROC( const CvArr* _img,
             {
                 rectList.resize(allCandidates.size());
                 std::copy(allCandidates.begin(), allCandidates.end(), rectList.begin());
-                
+
                 groupRectangles(rectList, std::max(minNeighbors, 1), GROUP_EPS);
-                
+
                 if( !rectList.empty() )
                 {
                     size_t i, sz = rectList.size();
                     cv::Rect maxRect;
-                    
+
                     for( i = 0; i < sz; i++ )
                     {
                         if( rectList[i].area() > maxRect.area() )
                             maxRect = rectList[i];
                     }
-                    
+
                     allCandidates.push_back(maxRect);
-                    
+
                     scanROI = maxRect;
                     int dx = cvRound(maxRect.width*GROUP_EPS);
                     int dy = cvRound(maxRect.height*GROUP_EPS);
@@ -1265,7 +1265,7 @@ cvHaarDetectObjectsForROC( const CvArr* _img,
                     scanROI.y = std::max(scanROI.y - dy, 0);
                     scanROI.width = std::min(scanROI.width + dx*2, img->cols-1-scanROI.x);
                     scanROI.height = std::min(scanROI.height + dy*2, img->rows-1-scanROI.y);
-                
+
                     double minScale = roughSearch ? 0.6 : 0.4;
                     minSize.width = cvRound(maxRect.width*minScale);
                     minSize.height = cvRound(maxRect.height*minScale);
@@ -1277,7 +1277,7 @@ cvHaarDetectObjectsForROC( const CvArr* _img,
     rectList.resize(allCandidates.size());
     if(!allCandidates.empty())
         std::copy(allCandidates.begin(), allCandidates.end(), rectList.begin());
-    
+
     if( minNeighbors != 0 || findBiggestObject )
     {
         if( outputRejectLevels )
@@ -1291,11 +1291,11 @@ cvHaarDetectObjectsForROC( const CvArr* _img,
     }
     else
         rweights.resize(rectList.size(),0);
-        
+
     if( findBiggestObject && rectList.size() )
     {
         CvAvgComp result_comp = {{0,0,0,0},0};
-        
+
         for( size_t i = 0; i < rectList.size(); i++ )
         {
             cv::Rect r = rectList[i];
@@ -1322,14 +1322,14 @@ cvHaarDetectObjectsForROC( const CvArr* _img,
 }
 
 CV_IMPL CvSeq*
-cvHaarDetectObjects( const CvArr* _img, 
+cvHaarDetectObjects( const CvArr* _img,
                      CvHaarClassifierCascade* cascade, CvMemStorage* storage,
                      double scaleFactor,
                      int minNeighbors, int flags, CvSize minSize, CvSize maxSize )
 {
     std::vector<int> fakeLevels;
     std::vector<double> fakeWeights;
-    return cvHaarDetectObjectsForROC( _img, cascade, storage, fakeLevels, fakeWeights, 
+    return cvHaarDetectObjectsForROC( _img, cascade, storage, fakeLevels, fakeWeights,
                                 scaleFactor, minNeighbors, flags, minSize, maxSize, false );
 
 }
@@ -2091,7 +2091,7 @@ namespace cv
 HaarClassifierCascade::HaarClassifierCascade() {}
 HaarClassifierCascade::HaarClassifierCascade(const String& filename)
 { load(filename); }
-    
+
 bool HaarClassifierCascade::load(const String& filename)
 {
     cascade = Ptr<CvHaarClassifierCascade>((CvHaarClassifierCascade*)cvLoad(filename.c_str(), 0, 0, 0));
index d61755a..3856cd3 100644 (file)
@@ -3,11 +3,11 @@
 #include "_lsvm_matching.h"\r
 \r
 /*\r
-// Transformation filter displacement from the block space \r
+// Transformation filter displacement from the block space\r
 // to the space of pixels at the initial image\r
 //\r
 // API\r
-// int convertPoints(int countLevel, CvPoint *points, int *levels, \r
+// int convertPoints(int countLevel, CvPoint *points, int *levels,\r
                   CvPoint **partsDisplacement, int kPoints, int n);\r
 // INPUT\r
 // countLevel        - the number of levels in the feature pyramid\r
 // RESULT\r
 // Error status\r
 */\r
-int convertPoints(int /*countLevel*/, int lambda, \r
+int convertPoints(int /*countLevel*/, int lambda,\r
                   int initialImageLevel,\r
-                  CvPoint *points, int *levels, \r
-                  CvPoint **partsDisplacement, int kPoints, int n, \r
+                  CvPoint *points, int *levels,\r
+                  CvPoint **partsDisplacement, int kPoints, int n,\r
                   int maxXBorder,\r
                   int maxYBorder)\r
 {\r
@@ -37,7 +37,7 @@ int convertPoints(int /*countLevel*/, int lambda,
     step = powf( 2.0f, 1.0f / ((float)lambda) );\r
 \r
     computeBorderSize(maxXBorder, maxYBorder, &bx, &by);\r
-    \r
+\r
     for (i = 0; i < kPoints; i++)\r
     {\r
         // scaling factor for root filter\r
@@ -48,10 +48,10 @@ int convertPoints(int /*countLevel*/, int lambda,
         // scaling factor for part filters\r
         scale = SIDE_LENGTH * powf(step, (float)(levels[i] - lambda - initialImageLevel));\r
         for (j = 0; j < n; j++)\r
-        {            \r
-            partsDisplacement[i][j].x = (int)((partsDisplacement[i][j].x - \r
+        {\r
+            partsDisplacement[i][j].x = (int)((partsDisplacement[i][j].x -\r
                                                2 * bx + 1) * scale);\r
-            partsDisplacement[i][j].y = (int)((partsDisplacement[i][j].y - \r
+            partsDisplacement[i][j].y = (int)((partsDisplacement[i][j].y -\r
                                                2 * by + 1) * scale);\r
         }\r
     }\r
@@ -62,7 +62,7 @@ int convertPoints(int /*countLevel*/, int lambda,
 // Elimination boxes that are outside the image boudaries\r
 //\r
 // API\r
-// int clippingBoxes(int width, int height, \r
+// int clippingBoxes(int width, int height,\r
                      CvPoint *points, int kPoints);\r
 // INPUT\r
 // width             - image wediht\r
@@ -72,12 +72,12 @@ int convertPoints(int /*countLevel*/, int lambda,
 // kPoints           - points number\r
 // OUTPUT\r
 // points            - updated points (if coordinates less than zero then\r
-                       set zero coordinate, if coordinates more than image \r
+                       set zero coordinate, if coordinates more than image\r
                        size then set coordinates equal image size)\r
 // RESULT\r
 // Error status\r
 */\r
-int clippingBoxes(int width, int height, \r
+int clippingBoxes(int width, int height,\r
                   CvPoint *points, int kPoints)\r
 {\r
     int i;\r
@@ -111,7 +111,7 @@ int clippingBoxes(int width, int height,
                                                   int maxXBorder, int maxYBorder);\r
 \r
 // INPUT\r
-// image             - initial image     \r
+// image             - initial image\r
 // maxXBorder        - the largest root filter size (X-direction)\r
 // maxYBorder        - the largest root filter size (Y-direction)\r
 // OUTPUT\r
@@ -149,54 +149,54 @@ CvLSVMFeaturePyramid* createFeaturePyramidWithBorder(IplImage *image,
 // Computation of the root filter displacement and values of score function\r
 //\r
 // API\r
-// int searchObject(const featurePyramid *H, const filterObject **all_F, int n, \r
-                    float b, \r
+// int searchObject(const featurePyramid *H, const filterObject **all_F, int n,\r
+                    float b,\r
                     int maxXBorder,\r
-                     int maxYBorder, \r
+                     int maxYBorder,\r
                      CvPoint **points, int **levels, int *kPoints, float *score,\r
                      CvPoint ***partsDisplacement);\r
 // INPUT\r
 // image             - initial image for searhing object\r
-// all_F             - the set of filters (the first element is root filter, \r
+// all_F             - the set of filters (the first element is root filter,\r
                        other elements - part filters)\r
 // n                 - the number of part filters\r
 // b                 - linear term of the score function\r
 // maxXBorder        - the largest root filter size (X-direction)\r
 // maxYBorder        - the largest root filter size (Y-direction)\r
 // OUTPUT\r
-// points            - positions (x, y) of the upper-left corner \r
+// points            - positions (x, y) of the upper-left corner\r
                        of root filter frame\r
 // levels            - levels that correspond to each position\r
 // kPoints           - number of positions\r
 // score             - value of the score function\r
-// partsDisplacement - part filters displacement for each position \r
+// partsDisplacement - part filters displacement for each position\r
                        of the root filter\r
 // RESULT\r
 // Error status\r
 */\r
-int searchObject(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObject **all_F, \r
-                 int n, float b, \r
+int searchObject(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObject **all_F,\r
+                 int n, float b,\r
                  int maxXBorder,\r
-                 int maxYBorder,  \r
+                 int maxYBorder,\r
                  CvPoint **points, int **levels, int *kPoints, float *score,\r
                  CvPoint ***partsDisplacement)\r
 {\r
     int opResult;\r
 \r
     // Matching\r
-    opResult = maxFunctionalScore(all_F, n, H, b, maxXBorder, maxYBorder, \r
-                                  score, points, levels, \r
+    opResult = maxFunctionalScore(all_F, n, H, b, maxXBorder, maxYBorder,\r
+                                  score, points, levels,\r
                                   kPoints, partsDisplacement);\r
     if (opResult != LATENT_SVM_OK)\r
     {\r
         return LATENT_SVM_SEARCH_OBJECT_FAILED;\r
     }\r
-   \r
-    // Transformation filter displacement from the block space \r
+\r
+    // Transformation filter displacement from the block space\r
     // to the space of pixels at the initial image\r
     // that settles at the level number LAMBDA\r
-    convertPoints(H->numLevels, LAMBDA, LAMBDA, (*points), \r
-                  (*levels), (*partsDisplacement), (*kPoints), n, \r
+    convertPoints(H->numLevels, LAMBDA, LAMBDA, (*points),\r
+                  (*levels), (*partsDisplacement), (*kPoints), n,\r
                   maxXBorder, maxYBorder);\r
 \r
     return LATENT_SVM_OK;\r
@@ -206,7 +206,7 @@ int searchObject(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObject **all_F
 // Computation right bottom corners coordinates of bounding boxes\r
 //\r
 // API\r
-// int estimateBoxes(CvPoint *points, int *levels, int kPoints, \r
+// int estimateBoxes(CvPoint *points, int *levels, int kPoints,\r
                      int sizeX, int sizeY, CvPoint **oppositePoints);\r
 // INPUT\r
 // points            - left top corners coordinates of bounding boxes\r
@@ -217,7 +217,7 @@ int searchObject(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObject **all_F
 // RESULT\r
 // Error status\r
 */\r
-int estimateBoxes(CvPoint *points, int *levels, int kPoints, \r
+static int estimateBoxes(CvPoint *points, int *levels, int kPoints,\r
                   int sizeX, int sizeY, CvPoint **oppositePoints)\r
 {\r
     int i;\r
@@ -237,16 +237,16 @@ int estimateBoxes(CvPoint *points, int *levels, int kPoints,
 // Computation of the root filter displacement and values of score function\r
 //\r
 // API\r
-// int searchObjectThreshold(const featurePyramid *H, \r
+// int searchObjectThreshold(const featurePyramid *H,\r
                              const filterObject **all_F, int n,\r
-                             float b, \r
-                             int maxXBorder, int maxYBorder, \r
+                             float b,\r
+                             int maxXBorder, int maxYBorder,\r
                              float scoreThreshold,\r
-                             CvPoint **points, int **levels, int *kPoints, \r
+                             CvPoint **points, int **levels, int *kPoints,\r
                              float **score, CvPoint ***partsDisplacement);\r
 // INPUT\r
 // H                 - feature pyramid\r
-// all_F             - the set of filters (the first element is root filter, \r
+// all_F             - the set of filters (the first element is root filter,\r
                        other elements - part filters)\r
 // n                 - the number of part filters\r
 // b                 - linear term of the score function\r
@@ -254,22 +254,22 @@ int estimateBoxes(CvPoint *points, int *levels, int kPoints,
 // maxYBorder        - the largest root filter size (Y-direction)\r
 // scoreThreshold    - score threshold\r
 // OUTPUT\r
-// points            - positions (x, y) of the upper-left corner \r
+// points            - positions (x, y) of the upper-left corner\r
                        of root filter frame\r
 // levels            - levels that correspond to each position\r
 // kPoints           - number of positions\r
 // score             - values of the score function\r
-// partsDisplacement - part filters displacement for each position \r
+// partsDisplacement - part filters displacement for each position\r
                        of the root filter\r
 // RESULT\r
 // Error status\r
 */\r
-int searchObjectThreshold(const CvLSVMFeaturePyramid *H, \r
+int searchObjectThreshold(const CvLSVMFeaturePyramid *H,\r
                           const CvLSVMFilterObject **all_F, int n,\r
-                          float b, \r
-                          int maxXBorder, int maxYBorder, \r
+                          float b,\r
+                          int maxXBorder, int maxYBorder,\r
                           float scoreThreshold,\r
-                          CvPoint **points, int **levels, int *kPoints, \r
+                          CvPoint **points, int **levels, int *kPoints,\r
                           float **score, CvPoint ***partsDisplacement,\r
                           int numThreads)\r
 {\r
@@ -284,28 +284,28 @@ int searchObjectThreshold(const CvLSVMFeaturePyramid *H,
         return opResult;\r
     }\r
     opResult = tbbThresholdFunctionalScore(all_F, n, H, b, maxXBorder, maxYBorder,\r
-                                           scoreThreshold, numThreads, score, \r
-                                           points, levels, kPoints, \r
+                                           scoreThreshold, numThreads, score,\r
+                                           points, levels, kPoints,\r
                                            partsDisplacement);\r
 #else\r
-    opResult = thresholdFunctionalScore(all_F, n, H, b, \r
-                                        maxXBorder, maxYBorder, \r
-                                        scoreThreshold, \r
-                                        score, points, levels, \r
+    opResult = thresholdFunctionalScore(all_F, n, H, b,\r
+                                        maxXBorder, maxYBorder,\r
+                                        scoreThreshold,\r
+                                        score, points, levels,\r
                                         kPoints, partsDisplacement);\r
 \r
-       (void)numThreads;\r
+  (void)numThreads;\r
 #endif\r
     if (opResult != LATENT_SVM_OK)\r
     {\r
         return LATENT_SVM_SEARCH_OBJECT_FAILED;\r
-    }  \r
-   \r
-    // Transformation filter displacement from the block space \r
+    }\r
+\r
+    // Transformation filter displacement from the block space\r
     // to the space of pixels at the initial image\r
     // that settles at the level number LAMBDA\r
-    convertPoints(H->numLevels, LAMBDA, LAMBDA, (*points), \r
-                  (*levels), (*partsDisplacement), (*kPoints), n, \r
+    convertPoints(H->numLevels, LAMBDA, LAMBDA, (*points),\r
+                  (*levels), (*partsDisplacement), (*kPoints), n,\r
                   maxXBorder, maxYBorder);\r
 \r
     return LATENT_SVM_OK;\r
@@ -350,9 +350,9 @@ int getOppositePoint(CvPoint point,
 //\r
 // API\r
 // int showRootFilterBoxes(const IplImage *image,\r
-                           const filterObject *filter, \r
+                           const filterObject *filter,\r
                            CvPoint *points, int *levels, int kPoints,\r
-                           CvScalar color, int thickness, \r
+                           CvScalar color, int thickness,\r
                            int line_type, int shift);\r
 // INPUT\r
 // image             - initial image\r
@@ -370,22 +370,22 @@ int getOppositePoint(CvPoint point,
 // Error status\r
 */\r
 int showRootFilterBoxes(IplImage *image,\r
-                        const CvLSVMFilterObject *filter, \r
+                        const CvLSVMFilterObject *filter,\r
                         CvPoint *points, int *levels, int kPoints,\r
-                        CvScalar color, int thickness, \r
+                        CvScalar color, int thickness,\r
                         int line_type, int shift)\r
-{   \r
+{\r
     int i;\r
     float step;\r
     CvPoint oppositePoint;\r
     step = powf( 2.0f, 1.0f / ((float)LAMBDA));\r
-    \r
+\r
     for (i = 0; i < kPoints; i++)\r
     {\r
         // Drawing rectangle for filter\r
-        getOppositePoint(points[i], filter->sizeX, filter->sizeY, \r
+        getOppositePoint(points[i], filter->sizeX, filter->sizeY,\r
                          step, levels[i] - LAMBDA, &oppositePoint);\r
-        cvRectangle(image, points[i], oppositePoint, \r
+        cvRectangle(image, points[i], oppositePoint,\r
                     color, thickness, line_type, shift);\r
     }\r
 #ifdef HAVE_OPENCV_HIGHGUI\r
@@ -399,9 +399,9 @@ int showRootFilterBoxes(IplImage *image,
 //\r
 // API\r
 // int showPartFilterBoxes(const IplImage *image,\r
-                           const filterObject *filter, \r
+                           const filterObject *filter,\r
                            CvPoint *points, int *levels, int kPoints,\r
-                           CvScalar color, int thickness, \r
+                           CvScalar color, int thickness,\r
                            int line_type, int shift);\r
 // INPUT\r
 // image             - initial image\r
@@ -421,9 +421,9 @@ int showRootFilterBoxes(IplImage *image,
 */\r
 int showPartFilterBoxes(IplImage *image,\r
                         const CvLSVMFilterObject **filters,\r
-                        int n, CvPoint **partsDisplacement, \r
+                        int n, CvPoint **partsDisplacement,\r
                         int *levels, int kPoints,\r
-                        CvScalar color, int thickness, \r
+                        CvScalar color, int thickness,\r
                         int line_type, int shift)\r
 {\r
     int i, j;\r
@@ -437,10 +437,10 @@ int showPartFilterBoxes(IplImage *image,
         for (j = 0; j < n; j++)\r
         {\r
             // Drawing rectangles for part filters\r
-            getOppositePoint(partsDisplacement[i][j], \r
-                             filters[j + 1]->sizeX, filters[j + 1]->sizeY, \r
+            getOppositePoint(partsDisplacement[i][j],\r
+                             filters[j + 1]->sizeX, filters[j + 1]->sizeY,\r
                              step, levels[i] - 2 * LAMBDA, &oppositePoint);\r
-            cvRectangle(image, partsDisplacement[i][j], oppositePoint, \r
+            cvRectangle(image, partsDisplacement[i][j], oppositePoint,\r
                         color, thickness, line_type, shift);\r
         }\r
     }\r
@@ -454,8 +454,8 @@ int showPartFilterBoxes(IplImage *image,
 // Drawing boxes\r
 //\r
 // API\r
-// int showBoxes(const IplImage *img, \r
-                 const CvPoint *points, const CvPoint *oppositePoints, int kPoints, \r
+// int showBoxes(const IplImage *img,\r
+                 const CvPoint *points, const CvPoint *oppositePoints, int kPoints,\r
                  CvScalar color, int thickness, int line_type, int shift);\r
 // INPUT\r
 // img               - initial image\r
@@ -470,14 +470,14 @@ int showPartFilterBoxes(IplImage *image,
 // RESULT\r
 // Error status\r
 */\r
-int showBoxes(IplImage *img, \r
-              const CvPoint *points, const CvPoint *oppositePoints, int kPoints, \r
+int showBoxes(IplImage *img,\r
+              const CvPoint *points, const CvPoint *oppositePoints, int kPoints,\r
               CvScalar color, int thickness, int line_type, int shift)\r
 {\r
     int i;\r
     for (i = 0; i < kPoints; i++)\r
     {\r
-        cvRectangle(img, points[i], oppositePoints[i], \r
+        cvRectangle(img, points[i], oppositePoints[i],\r
                     color, thickness, line_type, shift);\r
     }\r
 #ifdef HAVE_OPENCV_HIGHGUI\r
@@ -491,10 +491,10 @@ int showBoxes(IplImage *img,
 //\r
 // API\r
 // int getMaxFilterDims(const filterObject **filters, int kComponents,\r
-                        const int *kPartFilters, \r
+                        const int *kPartFilters,\r
                         unsigned int *maxXBorder, unsigned int *maxYBorder);\r
 // INPUT\r
-// filters           - a set of filters (at first root filter, then part filters \r
+// filters           - a set of filters (at first root filter, then part filters\r
                        and etc. for all components)\r
 // kComponents       - number of components\r
 // kPartFilters      - number of part filters for each component\r
@@ -505,10 +505,10 @@ int showBoxes(IplImage *img,
 // Error status\r
 */\r
 int getMaxFilterDims(const CvLSVMFilterObject **filters, int kComponents,\r
-                     const int *kPartFilters, \r
+                     const int *kPartFilters,\r
                      unsigned int *maxXBorder, unsigned int *maxYBorder)\r
 {\r
-    int i, componentIndex;    \r
+    int i, componentIndex;\r
     *maxXBorder = filters[0]->sizeX;\r
     *maxYBorder = filters[0]->sizeY;\r
     componentIndex = kPartFilters[0] + 1;\r
@@ -532,7 +532,7 @@ int getMaxFilterDims(const CvLSVMFilterObject **filters, int kComponents,
 //\r
 // API\r
 // int searchObjectThresholdSomeComponents(const featurePyramid *H,\r
-                                           const filterObject **filters, \r
+                                           const filterObject **filters,\r
                                            int kComponents, const int *kPartFilters,\r
                                            const float *b, float scoreThreshold,\r
                                            CvPoint **points, CvPoint **oppPoints,\r
@@ -553,7 +553,7 @@ int getMaxFilterDims(const CvLSVMFilterObject **filters, int kComponents,
 // Error status\r
 */\r
 int searchObjectThresholdSomeComponents(const CvLSVMFeaturePyramid *H,\r
-                                        const CvLSVMFilterObject **filters, \r
+                                        const CvLSVMFilterObject **filters,\r
                                         int kComponents, const int *kPartFilters,\r
                                         const float *b, float scoreThreshold,\r
                                         CvPoint **points, CvPoint **oppPoints,\r
@@ -566,7 +566,7 @@ int searchObjectThresholdSomeComponents(const CvLSVMFeaturePyramid *H,
     CvPoint **pointsArr, **oppPointsArr, ***partsDisplacementArr;\r
     float **scoreArr;\r
     int *kPointsArr, **levelsArr;\r
-    \r
+\r
     // Allocation memory\r
     pointsArr = (CvPoint **)malloc(sizeof(CvPoint *) * kComponents);\r
     oppPointsArr = (CvPoint **)malloc(sizeof(CvPoint *) * kComponents);\r
@@ -574,7 +574,7 @@ int searchObjectThresholdSomeComponents(const CvLSVMFeaturePyramid *H,
     kPointsArr = (int *)malloc(sizeof(int) * kComponents);\r
     levelsArr = (int **)malloc(sizeof(int *) * kComponents);\r
     partsDisplacementArr = (CvPoint ***)malloc(sizeof(CvPoint **) * kComponents);\r
-    \r
+\r
     // Getting maximum filter dimensions\r
     error = getMaxFilterDims(filters, kComponents, kPartFilters, &maxXBorder, &maxYBorder);\r
     componentIndex = 0;\r
@@ -585,7 +585,7 @@ int searchObjectThresholdSomeComponents(const CvLSVMFeaturePyramid *H,
 #ifdef HAVE_TBB\r
         error = searchObjectThreshold(H, &(filters[componentIndex]), kPartFilters[i],\r
             b[i], maxXBorder, maxYBorder, scoreThreshold,\r
-            &(pointsArr[i]), &(levelsArr[i]), &(kPointsArr[i]), \r
+            &(pointsArr[i]), &(levelsArr[i]), &(kPointsArr[i]),\r
             &(scoreArr[i]), &(partsDisplacementArr[i]), numThreads);\r
         if (error != LATENT_SVM_OK)\r
         {\r
@@ -599,17 +599,17 @@ int searchObjectThresholdSomeComponents(const CvLSVMFeaturePyramid *H,
             return LATENT_SVM_SEARCH_OBJECT_FAILED;\r
         }\r
 #else\r
-               (void)numThreads;\r
+    (void)numThreads;\r
         searchObjectThreshold(H, &(filters[componentIndex]), kPartFilters[i],\r
-            b[i], maxXBorder, maxYBorder, scoreThreshold, \r
-            &(pointsArr[i]), &(levelsArr[i]), &(kPointsArr[i]), \r
+            b[i], maxXBorder, maxYBorder, scoreThreshold,\r
+            &(pointsArr[i]), &(levelsArr[i]), &(kPointsArr[i]),\r
             &(scoreArr[i]), &(partsDisplacementArr[i]));\r
 #endif\r
-        estimateBoxes(pointsArr[i], levelsArr[i], kPointsArr[i], \r
-            filters[componentIndex]->sizeX, filters[componentIndex]->sizeY, &(oppPointsArr[i]));        \r
+        estimateBoxes(pointsArr[i], levelsArr[i], kPointsArr[i],\r
+            filters[componentIndex]->sizeX, filters[componentIndex]->sizeY, &(oppPointsArr[i]));\r
         componentIndex += (kPartFilters[i] + 1);\r
         *kPoints += kPointsArr[i];\r
-    }  \r
+    }\r
 \r
     *points = (CvPoint *)malloc(sizeof(CvPoint) * (*kPoints));\r
     *oppPoints = (CvPoint *)malloc(sizeof(CvPoint) * (*kPoints));\r
index cf20702..dd417fa 100644 (file)
@@ -192,7 +192,7 @@ size_t LatentSvmDetector::getClassCount() const
     return classNames.size();\r
 }\r
 \r
-string extractModelName( const string& filename )\r
+static string extractModelName( const string& filename )\r
 {\r
     size_t startPos = filename.rfind('/');\r
     if( startPos == string::npos )\r
index 4635cc0..7a83190 100644 (file)
@@ -91,7 +91,7 @@ void Feature::write(FileStorage& fs) const
  *
  * \return The bounding box of all the templates in original image coordinates.
  */
-Rect cropTemplates(std::vector<Template>& templates)
+static Rect cropTemplates(std::vector<Template>& templates)
 {
   int min_x = std::numeric_limits<int>::max();
   int min_y = std::numeric_limits<int>::max();
@@ -113,7 +113,7 @@ Rect cropTemplates(std::vector<Template>& templates)
       max_y = std::max(max_y, y);
     }
   }
-  
+
   /// @todo Why require even min_x, min_y?
   if (min_x % 2 == 1) --min_x;
   if (min_y % 2 == 1) --min_y;
@@ -126,7 +126,7 @@ Rect cropTemplates(std::vector<Template>& templates)
     templ.height = (max_y - min_y) >> templ.pyramid_level;
     int offset_x = min_x >> templ.pyramid_level;
     int offset_y = min_y >> templ.pyramid_level;
-    
+
     for (int j = 0; j < (int)templ.features.size(); ++j)
     {
       templ.features[j].x -= offset_x;
@@ -265,7 +265,7 @@ void hysteresisGradient(Mat& magnitude, Mat& angle,
  * \param      threshold Magnitude threshold. Keep only gradients whose norms are
  *                       larger than this.
  */
-void quantizedOrientations(const Mat& src, Mat& magnitude,
+static void quantizedOrientations(const Mat& src, Mat& magnitude,
                            Mat& angle, float threshold)
 {
   magnitude.create(src.size(), CV_32F);
@@ -383,7 +383,7 @@ void hysteresisGradient(Mat& magnitude, Mat& quantized_angle,
     {
       if (mag_r[c] > threshold)
       {
-       // Compute histogram of quantized bins in 3x3 patch around pixel
+  // Compute histogram of quantized bins in 3x3 patch around pixel
         int histogram[8] = {0, 0, 0, 0, 0, 0, 0, 0};
 
         uchar* patch3x3_row = &quantized_unfiltered(r-1, c-1);
@@ -391,17 +391,17 @@ void hysteresisGradient(Mat& magnitude, Mat& quantized_angle,
         histogram[patch3x3_row[1]]++;
         histogram[patch3x3_row[2]]++;
 
-       patch3x3_row += quantized_unfiltered.step1();
+  patch3x3_row += quantized_unfiltered.step1();
         histogram[patch3x3_row[0]]++;
         histogram[patch3x3_row[1]]++;
         histogram[patch3x3_row[2]]++;
 
-       patch3x3_row += quantized_unfiltered.step1();
+  patch3x3_row += quantized_unfiltered.step1();
         histogram[patch3x3_row[0]]++;
         histogram[patch3x3_row[1]]++;
         histogram[patch3x3_row[2]]++;
 
-       // Find bin with the most votes from the patch
+  // Find bin with the most votes from the patch
         int max_votes = 0;
         int index = -1;
         for (int i = 0; i < 8; ++i)
@@ -413,8 +413,8 @@ void hysteresisGradient(Mat& magnitude, Mat& quantized_angle,
           }
         }
 
-       // Only accept the quantization if majority of pixels in the patch agree
-       static const int NEIGHBOR_THRESHOLD = 5;
+  // Only accept the quantization if majority of pixels in the patch agree
+  static const int NEIGHBOR_THRESHOLD = 5;
         if (max_votes >= NEIGHBOR_THRESHOLD)
           quantized_angle.at<uchar>(r, c) = 1 << index;
       }
@@ -630,7 +630,7 @@ static void accumBilateral(long delta, long i, long j, long * A, long * b, int t
  *
  * \todo Should also need camera model, or at least focal lengths? Replace distance_threshold with mask?
  */
-void quantizedNormals(const Mat& src, Mat& dst, int distance_threshold,
+static void quantizedNormals(const Mat& src, Mat& dst, int distance_threshold,
                       int difference_threshold)
 {
   dst = Mat::zeros(src.size(), CV_8U);
@@ -923,7 +923,7 @@ void DepthNormal::write(FileStorage& fs) const
 *                                 Response maps                                          *
 \****************************************************************************************/
 
-void orUnaligned8u(const uchar * src, const int src_stride,
+static void orUnaligned8u(const uchar * src, const int src_stride,
                    uchar * dst, const int dst_stride,
                    const int width, const int height)
 {
@@ -971,7 +971,7 @@ void orUnaligned8u(const uchar * src, const int src_stride,
         __m128i* dst_ptr = reinterpret_cast<__m128i*>(dst + c);
         *dst_ptr = _mm_or_si128(*dst_ptr, val);
       }
-    }    
+    }
 #endif
     for ( ; c < width; ++c)
       dst[c] |= src[c];
@@ -991,7 +991,7 @@ void orUnaligned8u(const uchar * src, const int src_stride,
  * \param[out] dst Destination 8-bit spread image.
  * \param      T   Sampling step. Spread labels T/2 pixels in each direction.
  */
-void spread(const Mat& src, Mat& dst, int T)
+static void spread(const Mat& src, Mat& dst, int T)
 {
   // Allocate and zero-initialize spread (OR'ed) image
   dst = Mat::zeros(src.size(), CV_8U);
@@ -1019,7 +1019,7 @@ CV_DECL_ALIGNED(16) static const unsigned char SIMILARITY_LUT[256] = {0, 4, 3, 4
  * \param[in]  src           The source 8-bit spread quantized image.
  * \param[out] response_maps Vector of 8 response maps, one for each bit label.
  */
-void computeResponseMaps(const Mat& src, std::vector<Mat>& response_maps)
+static void computeResponseMaps(const Mat& src, std::vector<Mat>& response_maps)
 {
   CV_Assert((src.rows * src.cols) % 16 == 0);
 
@@ -1027,16 +1027,16 @@ void computeResponseMaps(const Mat& src, std::vector<Mat>& response_maps)
   response_maps.resize(8);
   for (int i = 0; i < 8; ++i)
     response_maps[i].create(src.size(), CV_8U);
-  
+
   Mat lsb4(src.size(), CV_8U);
   Mat msb4(src.size(), CV_8U);
-  
+
   for (int r = 0; r < src.rows; ++r)
   {
     const uchar* src_r = src.ptr(r);
     uchar* lsb4_r = lsb4.ptr(r);
     uchar* msb4_r = msb4.ptr(r);
-    
+
     for (int c = 0; c < src.cols; ++c)
     {
       // Least significant 4 bits of spread image pixel
@@ -1100,7 +1100,7 @@ void computeResponseMaps(const Mat& src, std::vector<Mat>& response_maps)
  *                          each of which is a linear memory of length (W/T)*(H/T).
  * \param      T            Sampling step.
  */
-void linearize(const Mat& response_map, Mat& linearized, int T)
+static void linearize(const Mat& response_map, Mat& linearized, int T)
 {
   CV_Assert(response_map.rows % T == 0);
   CV_Assert(response_map.cols % T == 0);
@@ -1109,7 +1109,7 @@ void linearize(const Mat& response_map, Mat& linearized, int T)
   int mem_width = response_map.cols / T;
   int mem_height = response_map.rows / T;
   linearized.create(T*T, mem_width * mem_height, CV_8U);
-  
+
   // Outer two for loops iterate over top-left T^2 starting pixels
   int index = 0;
   for (int r_start = 0; r_start < T; ++r_start)
@@ -1118,7 +1118,7 @@ void linearize(const Mat& response_map, Mat& linearized, int T)
     {
       uchar* memory = linearized.ptr(index);
       ++index;
-      
+
       // Inner two loops copy every T-th pixel into the linear memory
       for (int r = r_start; r < response_map.rows; r += T)
       {
@@ -1134,8 +1134,8 @@ void linearize(const Mat& response_map, Mat& linearized, int T)
 *                               Linearized similarities                                  *
 \****************************************************************************************/
 
-const unsigned char* accessLinearMemory(const std::vector<Mat>& linear_memories,
-                                       const Feature& f, int T, int W)
+static const unsigned char* accessLinearMemory(const std::vector<Mat>& linear_memories,
+          const Feature& f, int T, int W)
 {
   // Retrieve the TxT grid of linear memories associated with the feature label
   const Mat& memory_grid = linear_memories[f.label];
@@ -1170,7 +1170,7 @@ const unsigned char* accessLinearMemory(const std::vector<Mat>& linear_memories,
  * \param      size            Size (W, H) of the original input image.
  * \param      T               Sampling step.
  */
-void similarity(const std::vector<Mat>& linear_memories, const Template& templ,
+static void similarity(const std::vector<Mat>& linear_memories, const Template& templ,
                 Mat& dst, Size size, int T)
 {
   // 63 features or less is a special case because the max similarity per-feature is 4.
@@ -1266,7 +1266,7 @@ void similarity(const std::vector<Mat>& linear_memories, const Template& templ,
  * \param      T               Sampling step.
  * \param      center          Center of the local region.
  */
-void similarityLocal(const std::vector<Mat>& linear_memories, const Template& templ,
+static void similarityLocal(const std::vector<Mat>& linear_memories, const Template& templ,
                      Mat& dst, Size size, int T, Point center)
 {
   // Similar to whole-image similarity() above. This version takes a position 'center'
@@ -1342,7 +1342,7 @@ void similarityLocal(const std::vector<Mat>& linear_memories, const Template& te
   }
 }
 
-void addUnaligned8u16u(const uchar * src1, const uchar * src2, ushort * res, int length)
+static void addUnaligned8u16u(const uchar * src1, const uchar * src2, ushort * res, int length)
 {
   const uchar * end = src1 + length;
 
@@ -1362,7 +1362,7 @@ void addUnaligned8u16u(const uchar * src1, const uchar * src2, ushort * res, int
  * \param[in]  similarities Source 8-bit similarity images.
  * \param[out] dst          Destination 16-bit similarity image.
  */
-void addSimilarities(const std::vector<Mat>& similarities, Mat& dst)
+static void addSimilarities(const std::vector<Mat>& similarities, Mat& dst)
 {
   if (similarities.size() == 1)
   {
index 46c8d9e..bff7696 100644 (file)
@@ -4,6 +4,8 @@
 #include "_lsvmparser.h"\r
 #include "_lsvm_error.h"\r
 \r
+namespace\r
+{\r
 int isMODEL    (char *str){\r
     char stag [] = "<Model>";\r
     char etag [] = "</Model>";\r
@@ -213,9 +215,9 @@ void parserRFilter  (FILE * xmlf, int p, CvLSVMFilterObject * model, float *b){
             if(ch == '>'){\r
                 tagBuf[j    ] = ch;\r
                 tagBuf[j + 1] = '\0';\r
-                \r
+\r
                 tagVal = getTeg(tagBuf);\r
-               \r
+\r
                 if(tagVal == ERFILTER){\r
                     //printf("</RootFilter>\n");\r
                     return;\r
@@ -267,7 +269,7 @@ void parserRFilter  (FILE * xmlf, int p, CvLSVMFilterObject * model, float *b){
                 }\r
 \r
                 tag = 0;\r
-                i   = 0;                \r
+                i   = 0;\r
             }else{\r
                 if((tag == 0)&& (st == 1)){\r
                     buf[i] = ch; i++;\r
@@ -275,7 +277,7 @@ void parserRFilter  (FILE * xmlf, int p, CvLSVMFilterObject * model, float *b){
                     tagBuf[j] = ch; j++;\r
                 }\r
             }\r
-        }        \r
+        }\r
     }\r
 }\r
 \r
@@ -303,9 +305,9 @@ void parserV  (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
             if(ch == '>'){\r
                 tagBuf[j    ] = ch;\r
                 tagBuf[j + 1] = '\0';\r
-                \r
+\r
                 tagVal = getTeg(tagBuf);\r
-               \r
+\r
                 if(tagVal == ETAGV){\r
                     //printf("    </V>\n");\r
                     return;\r
@@ -331,7 +333,7 @@ void parserV  (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
                     //printf("        <Vy>%d</Vy>\n", model->V.y);\r
                 }\r
                 tag = 0;\r
-                i   = 0;                \r
+                i   = 0;\r
             }else{\r
                 if((tag == 0)&& (st == 1)){\r
                     buf[i] = ch; i++;\r
@@ -339,7 +341,7 @@ void parserV  (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
                     tagBuf[j] = ch; j++;\r
                 }\r
             }\r
-        }        \r
+        }\r
     }\r
 }\r
 void parserD  (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){\r
@@ -366,9 +368,9 @@ void parserD  (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
             if(ch == '>'){\r
                 tagBuf[j    ] = ch;\r
                 tagBuf[j + 1] = '\0';\r
-                \r
+\r
                 tagVal = getTeg(tagBuf);\r
-               \r
+\r
                 if(tagVal == ETAGD){\r
                     //printf("    </D>\n");\r
                     return;\r
@@ -380,7 +382,7 @@ void parserD  (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
                 if(tagVal == EDx){\r
                     st = 0;\r
                     buf[i] = '\0';\r
-                    \r
+\r
                     model->fineFunction[0] = (float)atof(buf);\r
                     //printf("        <Dx>%f</Dx>\n", model->fineFunction[0]);\r
                 }\r
@@ -391,7 +393,7 @@ void parserD  (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
                 if(tagVal == EDy){\r
                     st = 0;\r
                     buf[i] = '\0';\r
-                    \r
+\r
                     model->fineFunction[1] = (float)atof(buf);\r
                     //printf("        <Dy>%f</Dy>\n", model->fineFunction[1]);\r
                 }\r
@@ -402,7 +404,7 @@ void parserD  (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
                 if(tagVal == EDxx){\r
                     st = 0;\r
                     buf[i] = '\0';\r
-                    \r
+\r
                     model->fineFunction[2] = (float)atof(buf);\r
                     //printf("        <Dxx>%f</Dxx>\n", model->fineFunction[2]);\r
                 }\r
@@ -413,13 +415,13 @@ void parserD  (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
                 if(tagVal == EDyy){\r
                     st = 0;\r
                     buf[i] = '\0';\r
-                    \r
+\r
                     model->fineFunction[3] = (float)atof(buf);\r
                     //printf("        <Dyy>%f</Dyy>\n", model->fineFunction[3]);\r
                 }\r
 \r
                 tag = 0;\r
-                i   = 0;                \r
+                i   = 0;\r
             }else{\r
                 if((tag == 0)&& (st == 1)){\r
                     buf[i] = ch; i++;\r
@@ -427,7 +429,7 @@ void parserD  (FILE * xmlf, int /*p*/, CvLSVMFilterObject * model){
                     tagBuf[j] = ch; j++;\r
                 }\r
             }\r
-        }        \r
+        }\r
     }\r
 }\r
 \r
@@ -465,9 +467,9 @@ void parserPFilter  (FILE * xmlf, int p, int /*N_path*/, CvLSVMFilterObject * mo
             if(ch == '>'){\r
                 tagBuf[j    ] = ch;\r
                 tagBuf[j + 1] = '\0';\r
-                \r
+\r
                 tagVal = getTeg(tagBuf);\r
-               \r
+\r
                 if(tagVal == EPFILTER){\r
                     //printf("</PathFilter>\n");\r
                     return;\r
@@ -515,7 +517,7 @@ void parserPFilter  (FILE * xmlf, int p, int /*N_path*/, CvLSVMFilterObject * mo
                     //printf("WEIGHTS OK\n");\r
                 }\r
                 tag = 0;\r
-                i   = 0;                \r
+                i   = 0;\r
             }else{\r
                 if((tag == 0)&& (st == 1)){\r
                     buf[i] = ch; i++;\r
@@ -523,7 +525,7 @@ void parserPFilter  (FILE * xmlf, int p, int /*N_path*/, CvLSVMFilterObject * mo
                     tagBuf[j] = ch; j++;\r
                 }\r
             }\r
-        }        \r
+        }\r
     }\r
 }\r
 void parserPFilterS (FILE * xmlf, int p, CvLSVMFilterObject *** model, int *last, int *max){\r
@@ -551,9 +553,9 @@ void parserPFilterS (FILE * xmlf, int p, CvLSVMFilterObject *** model, int *last
             if(ch == '>'){\r
                 tagBuf[j    ] = ch;\r
                 tagBuf[j + 1] = '\0';\r
-                \r
+\r
                 tagVal = getTeg(tagBuf);\r
-               \r
+\r
                 if(tagVal == EPFILTERs){\r
                     //printf("</PartFilters>\n");\r
                     return;\r
@@ -564,7 +566,7 @@ void parserPFilterS (FILE * xmlf, int p, CvLSVMFilterObject *** model, int *last
                     N_path++;\r
                 }\r
                 tag = 0;\r
-                i   = 0;                \r
+                i   = 0;\r
             }else{\r
                 if((tag == 0)&& (st == 1)){\r
                     buf[i] = ch; i++;\r
@@ -572,7 +574,7 @@ void parserPFilterS (FILE * xmlf, int p, CvLSVMFilterObject *** model, int *last
                     tagBuf[j] = ch; j++;\r
                 }\r
             }\r
-        }        \r
+        }\r
     }\r
 }\r
 void parserComp (FILE * xmlf, int p, int *N_comp, CvLSVMFilterObject *** model, float *b, int *last, int *max){\r
@@ -599,9 +601,9 @@ void parserComp (FILE * xmlf, int p, int *N_comp, CvLSVMFilterObject *** model,
             if(ch == '>'){\r
                 tagBuf[j    ] = ch;\r
                 tagBuf[j + 1] = '\0';\r
-                \r
+\r
                 tagVal = getTeg(tagBuf);\r
-               \r
+\r
                 if(tagVal == ECOMP){\r
                     (*N_comp) ++;\r
                     return;\r
@@ -614,7 +616,7 @@ void parserComp (FILE * xmlf, int p, int *N_comp, CvLSVMFilterObject *** model,
                     parserPFilterS  (xmlf, p, model, last, max);\r
                 }\r
                 tag = 0;\r
-                i   = 0;                \r
+                i   = 0;\r
             }else{\r
                 if((tag == 0)&& (st == 1)){\r
                     buf[i] = ch; i++;\r
@@ -622,7 +624,7 @@ void parserComp (FILE * xmlf, int p, int *N_comp, CvLSVMFilterObject *** model,
                     tagBuf[j] = ch; j++;\r
                 }\r
             }\r
-        }        \r
+        }\r
     }\r
 }\r
 void parserModel(FILE * xmlf, CvLSVMFilterObject *** model, int *last, int *max, int **comp, float **b, int *count, float * score){\r
@@ -637,9 +639,9 @@ void parserModel(FILE * xmlf, CvLSVMFilterObject *** model, int *last, int *max,
     int i,j, ii = 0;\r
     char buf[1024];\r
     char tagBuf[1024];\r
-    \r
+\r
     //printf("<Model>\n");\r
-    \r
+\r
     i   = 0;\r
     j   = 0;\r
     st  = 0;\r
@@ -654,9 +656,9 @@ void parserModel(FILE * xmlf, CvLSVMFilterObject *** model, int *last, int *max,
             if(ch == '>'){\r
                 tagBuf[j    ] = ch;\r
                 tagBuf[j + 1] = '\0';\r
-                \r
+\r
                 tagVal = getTeg(tagBuf);\r
-               \r
+\r
                 if(tagVal == EMODEL){\r
                     //printf("</Model>\n");\r
                     for(ii = 0; ii <= *last; ii++){\r
@@ -671,7 +673,7 @@ void parserModel(FILE * xmlf, CvLSVMFilterObject *** model, int *last, int *max,
                         bb  = (float *)malloc(sizeof(float));\r
                         * comp = cmp;\r
                         * b    = bb;\r
-                        * count = N_comp + 1; \r
+                        * count = N_comp + 1;\r
                     } else {\r
                         cmp = (int   *)malloc(sizeof(int)   * (N_comp + 1));\r
                         bb  = (float *)malloc(sizeof(float) * (N_comp + 1));\r
@@ -683,7 +685,7 @@ void parserModel(FILE * xmlf, CvLSVMFilterObject *** model, int *last, int *max,
                         free(* b   );\r
                         * comp = cmp;\r
                         * b    = bb;\r
-                        * count = N_comp + 1; \r
+                        * count = N_comp + 1;\r
                     }\r
                     parserComp(xmlf, p, &N_comp, model, &((*b)[N_comp]), last, max);\r
                     cmp[N_comp - 1] = *last;\r
@@ -709,7 +711,7 @@ void parserModel(FILE * xmlf, CvLSVMFilterObject *** model, int *last, int *max,
                     //printf("<ScoreThreshold>%f</ScoreThreshold>\n", score);\r
                 }\r
                 tag = 0;\r
-                i   = 0;                \r
+                i   = 0;\r
             }else{\r
                 if((tag == 0)&& (st == 1)){\r
                     buf[i] = ch; i++;\r
@@ -717,10 +719,12 @@ void parserModel(FILE * xmlf, CvLSVMFilterObject *** model, int *last, int *max,
                     tagBuf[j] = ch; j++;\r
                 }\r
             }\r
-        }        \r
+        }\r
     }\r
 }\r
 \r
+}//namespace\r
+\r
 int LSVMparser(const char * filename, CvLSVMFilterObject *** model, int *last, int *max, int **comp, float **b, int *count, float * score){\r
     int st = 0;\r
     int tag;\r
@@ -739,7 +743,7 @@ int LSVMparser(const char * filename, CvLSVMFilterObject *** model, int *last, i
     xmlf = fopen(filename, "rb");\r
     if(xmlf == NULL)\r
         return LSVM_PARSER_FILE_NOT_FOUND;\r
-    \r
+\r
     i   = 0;\r
     j   = 0;\r
     st  = 0;\r
@@ -766,9 +770,9 @@ int LSVMparser(const char * filename, CvLSVMFilterObject *** model, int *last, i
                     tagBuf[j] = ch; j++;\r
                 }\r
             }\r
-        }        \r
+        }\r
     }\r
-    \r
+\r
     fclose(xmlf);\r
     return LATENT_SVM_OK;\r
 }\r
@@ -776,24 +780,24 @@ int LSVMparser(const char * filename, CvLSVMFilterObject *** model, int *last, i
 int loadModel(\r
               const char *modelPath,\r
               CvLSVMFilterObject ***filters,\r
-              int *kFilters, \r
-              int *kComponents, \r
-              int **kPartFilters, \r
-              float **b, \r
-              float *scoreThreshold){ \r
+              int *kFilters,\r
+              int *kComponents,\r
+              int **kPartFilters,\r
+              float **b,\r
+              float *scoreThreshold){\r
     int last;\r
     int max;\r
     int *comp;\r
     int count;\r
     int i;\r
-       int err;\r
+    int err;\r
     float score;\r
     //printf("start_parse\n\n");\r
 \r
     err = LSVMparser(modelPath, filters, &last, &max, &comp, b, &count, &score);\r
-       if(err != LATENT_SVM_OK){\r
-               return err;\r
-       }\r
+    if(err != LATENT_SVM_OK){\r
+        return err;\r
+    }\r
     (*kFilters)       = last + 1;\r
     (*kComponents)    = count;\r
     (*scoreThreshold) = (float) score;\r
index a5b4c44..79d8bd3 100644 (file)
@@ -547,7 +547,7 @@ int addNullableBorder(CvLSVMFeatureMap *map, int bx, int by)
     return LATENT_SVM_OK;\r
 }\r
 \r
-CvLSVMFeatureMap* featureMapBorderPartFilter(CvLSVMFeatureMap *map,\r
+static CvLSVMFeatureMap* featureMapBorderPartFilter(CvLSVMFeatureMap *map,\r
                                        int maxXBorder, int maxYBorder)\r
 {\r
     int bx, by;\r
@@ -1366,6 +1366,7 @@ int thresholdFunctionalScore(const CvLSVMFilterObject **all_F, int n,
     return LATENT_SVM_OK;\r
 }\r
 \r
+#ifdef HAVE_TBB\r
 /*\r
 // Creating schedule of pyramid levels processing\r
 //\r
@@ -1390,7 +1391,7 @@ int thresholdFunctionalScore(const CvLSVMFilterObject **all_F, int n,
 // RESULT\r
 // Error status\r
 */\r
-int createSchedule(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObject **all_F,\r
+static int createSchedule(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObject **all_F,\r
                    const int n, const int bx, const int by,\r
                    const int threadsNum, int *kLevels, int **processingLevels)\r
 {\r
@@ -1521,7 +1522,6 @@ int createSchedule(const CvLSVMFeaturePyramid *H, const CvLSVMFilterObject **all
     return LATENT_SVM_OK;\r
 }\r
 \r
-#ifdef HAVE_TBB\r
 /*\r
 // int tbbThresholdFunctionalScore(const CvLSVMFilterObject **all_F, int n,\r
                                    const CvLSVMFeaturePyramid *H,\r
@@ -1679,7 +1679,7 @@ int tbbThresholdFunctionalScore(const CvLSVMFilterObject **all_F, int n,
 }\r
 #endif\r
 \r
-void sort(int n, const float* x, int* indices)\r
+static void sort(int n, const float* x, int* indices)\r
 {\r
     int i, j;\r
     for (i = 0; i < n; i++)\r
index 8dad2a1..b1967f1 100644 (file)
 #ifndef __OPENCV_PRECOMP_H__
 #define __OPENCV_PRECOMP_H__
 
-#if _MSC_VER >= 1200
+#if defined _MSC_VER && _MSC_VER >= 1200
 #pragma warning( disable: 4251 4710 4711 4514 4996 )
 #endif
 
-#ifdef HAVE_CVCONFIG_H 
+#ifdef HAVE_CVCONFIG_H
 #include "cvconfig.h"
 #endif
 
index 02b2945..506081f 100644 (file)
@@ -5,7 +5,7 @@
 #include "opencv2/photo/photo.hpp"
 #include "opencv2/highgui/highgui.hpp"
 
-#if GTEST_CREATE_SHARED_LIBRARY
+#ifdef GTEST_CREATE_SHARED_LIBRARY
 #error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
 #endif
 
index a1f6d8a..672e6c0 100644 (file)
@@ -208,13 +208,13 @@ inline float VectorLength(CvPoint2D32f v1) {
 //HEAP::iterator Heap_Iterator;
 //HEAP Heap;
 
-float FastMarching_solve(int i1,int j1,int i2,int j2, const CvMat* f, const CvMat* t)
+static float FastMarching_solve(int i1,int j1,int i2,int j2, const CvMat* f, const CvMat* t)
 {
     double sol, a11, a22, m12;
     a11=CV_MAT_ELEM(*t,float,i1,j1);
     a22=CV_MAT_ELEM(*t,float,i2,j2);
     m12=MIN(a11,a22);
-    
+
     if( CV_MAT_ELEM(*f,uchar,i1,j1) != INSIDE )
         if( CV_MAT_ELEM(*f,uchar,i2,j2) != INSIDE )
             if( fabs(a11-a22) >= 1.0 )
@@ -227,7 +227,7 @@ float FastMarching_solve(int i1,int j1,int i2,int j2, const CvMat* f, const CvMa
         sol = 1+a22;
     else
         sol = 1+m12;
-            
+
     return (float)sol;
 }
 
@@ -724,19 +724,19 @@ cvInpaint( const CvArr* _input_img, const CvArr* _inpaint_mask, CvArr* _output_i
     cv::Ptr<CvMat> mask, band, f, t, out;
     cv::Ptr<CvPriorityQueueFloat> Heap, Out;
     cv::Ptr<IplConvKernel> el_cross, el_range;
-    
+
     CvMat input_hdr, mask_hdr, output_hdr;
     CvMat* input_img, *inpaint_mask, *output_img;
-    int range=cvRound(inpaintRange);    
+    int range=cvRound(inpaintRange);
     int erows, ecols;
 
     input_img = cvGetMat( _input_img, &input_hdr );
     inpaint_mask = cvGetMat( _inpaint_mask, &mask_hdr );
     output_img = cvGetMat( _output_img, &output_hdr );
-    
+
     if( !CV_ARE_SIZES_EQ(input_img,output_img) || !CV_ARE_SIZES_EQ(input_img,inpaint_mask))
         CV_Error( CV_StsUnmatchedSizes, "All the input and output images must have the same size" );
-    
+
     if( (CV_MAT_TYPE(input_img->type) != CV_8UC1 &&
         CV_MAT_TYPE(input_img->type) != CV_8UC3) ||
         !CV_ARE_TYPES_EQ(input_img,output_img) )
@@ -757,7 +757,7 @@ cvInpaint( const CvArr* _input_img, const CvArr* _inpaint_mask, CvArr* _output_i
     band = cvCreateMat(erows, ecols, CV_8UC1);
     mask = cvCreateMat(erows, ecols, CV_8UC1);
     el_cross = cvCreateStructuringElementEx(3,3,1,1,CV_SHAPE_CROSS,NULL);
-    
+
     cvCopy( input_img, output_img );
     cvSet(mask,cvScalar(KNOWN,0,0,0));
     COPY_MASK_BORDER1_C1(inpaint_mask,mask,uchar);
@@ -775,7 +775,7 @@ cvInpaint( const CvArr* _input_img, const CvArr* _inpaint_mask, CvArr* _output_i
     cvSet(f,cvScalar(BAND,0,0,0),band);
     cvSet(f,cvScalar(INSIDE,0,0,0),mask);
     cvSet(t,cvScalar(0,0,0,0),band);
-    
+
     if( flags == CV_INPAINT_TELEA )
     {
         out = cvCreateMat(erows, ecols, CV_8UC1);
index 2bf3705..996f4c8 100644 (file)
@@ -43,7 +43,7 @@
 #ifndef __OPENCV_PRECOMP_H__
 #define __OPENCV_PRECOMP_H__
 
-#if _MSC_VER >= 1200
+#if defined _MSC_VER && _MSC_VER >= 1200
 #pragma warning( disable: 4251 4512 4710 4711 4514 4996 )
 #endif
 
index 24cf533..ebef099 100644 (file)
@@ -35,12 +35,12 @@ static PyObject* opencv_error = 0;
 static int failmsg(const char *fmt, ...)
 {
     char str[1000];
-    
+
     va_list ap;
     va_start(ap, fmt);
     vsnprintf(str, sizeof(str), fmt, ap);
     va_end(ap);
-    
+
     PyErr_SetString(PyExc_TypeError, str);
     return 0;
 }
@@ -49,7 +49,7 @@ class PyAllowThreads
 {
 public:
     PyAllowThreads() : _state(PyEval_SaveThread()) {}
-    ~PyAllowThreads() 
+    ~PyAllowThreads()
     {
         PyEval_RestoreThread(_state);
     }
@@ -61,7 +61,7 @@ class PyEnsureGIL
 {
 public:
     PyEnsureGIL() : _state(PyGILState_Ensure()) {}
-    ~PyEnsureGIL() 
+    ~PyEnsureGIL()
     {
         PyGILState_Release(_state);
     }
@@ -147,7 +147,7 @@ class NumpyAllocator : public MatAllocator
 public:
     NumpyAllocator() {}
     ~NumpyAllocator() {}
-    
+
     void allocate(int dims, const int* sizes, int type, int*& refcount,
                   uchar*& datastart, uchar*& data, size_t* step)
     {
@@ -180,7 +180,7 @@ public:
             step[i] = (size_t)_strides[i];
         datastart = data = (uchar*)PyArray_DATA(o);
     }
-    
+
     void deallocate(int* refcount, uchar* datastart, uchar* data)
     {
         PyEnsureGIL gil;
@@ -193,7 +193,7 @@ public:
 };
 
 NumpyAllocator g_numpyAllocator;
-    
+
 enum { ARG_NONE = 0, ARG_MAT = 1, ARG_SCALAR = 2 };
 
 static int pyopencv_to(const PyObject* o, Mat& m, const char* name = "<unknown>", bool allowND=true)
@@ -204,72 +204,72 @@ static int pyopencv_to(const PyObject* o, Mat& m, const char* name = "<unknown>"
             m.allocator = &g_numpyAllocator;
         return true;
     }
-        
+
     if( !PyArray_Check(o) )
     {
         failmsg("%s is not a numpy array", name);
         return false;
     }
-    
+
     int typenum = PyArray_TYPE(o);
     int type = typenum == NPY_UBYTE ? CV_8U : typenum == NPY_BYTE ? CV_8S :
-               typenum == NPY_USHORT ? CV_16U : typenum == NPY_SHORT ? CV_16S : 
+               typenum == NPY_USHORT ? CV_16U : typenum == NPY_SHORT ? CV_16S :
                typenum == NPY_INT || typenum == NPY_LONG ? CV_32S :
                typenum == NPY_FLOAT ? CV_32F :
                typenum == NPY_DOUBLE ? CV_64F : -1;
-    
+
     if( type < 0 )
     {
         failmsg("%s data type = %d is not supported", name, typenum);
         return false;
     }
-    
+
     int ndims = PyArray_NDIM(o);
     if(ndims >= CV_MAX_DIM)
     {
         failmsg("%s dimensionality (=%d) is too high", name, ndims);
         return false;
     }
-    
+
     int size[CV_MAX_DIM+1];
     size_t step[CV_MAX_DIM+1], elemsize = CV_ELEM_SIZE1(type);
     const npy_intp* _sizes = PyArray_DIMS(o);
     const npy_intp* _strides = PyArray_STRIDES(o);
     bool transposed = false;
-    
+
     for(int i = 0; i < ndims; i++)
     {
         size[i] = (int)_sizes[i];
         step[i] = (size_t)_strides[i];
     }
-    
+
     if( ndims == 0 || step[ndims-1] > elemsize ) {
         size[ndims] = 1;
         step[ndims] = elemsize;
         ndims++;
     }
-    
+
     if( ndims >= 2 && step[0] < step[1] )
     {
         std::swap(size[0], size[1]);
         std::swap(step[0], step[1]);
         transposed = true;
     }
-    
+
     if( ndims == 3 && size[2] <= CV_CN_MAX && step[1] == elemsize*size[2] )
     {
         ndims--;
         type |= CV_MAKETYPE(0, size[2]);
     }
-    
+
     if( ndims > 2 && !allowND )
     {
         failmsg("%s has more than 2 dimensions", name);
         return false;
     }
-    
+
     m = Mat(ndims, size, type, PyArray_DATA(o), step);
-    
+
     if( m.data )
     {
         m.refcount = refcountFromPyObject(o);
@@ -277,7 +277,7 @@ static int pyopencv_to(const PyObject* o, Mat& m, const char* name = "<unknown>"
                     // (since Mat destructor will decrement the reference counter)
     };
     m.allocator = &g_numpyAllocator;
-    
+
     if( transposed )
     {
         Mat tmp;
@@ -503,12 +503,12 @@ static inline bool pyopencv_to(PyObject* obj, CvSlice& r, const char* name = "<u
     }
     return PyArg_ParseTuple(obj, "ii", &r.start_index, &r.end_index) > 0;
 }
-                                                
+
 static inline PyObject* pyopencv_from(const CvSlice& r)
 {
     return Py_BuildValue("(ii)", r.start_index, r.end_index);
-}                                                    
-                                                    
+}
+
 static inline bool pyopencv_to(PyObject* obj, Point& p, const char* name = "<unknown>")
 {
     if(!obj || obj == Py_None)
@@ -584,18 +584,18 @@ template<typename _Tp> struct pyopencvVecConverter
             return false;
         int i, j, n = (int)PySequence_Fast_GET_SIZE(seq);
         value.resize(n);
-        
+
         int type = DataType<_Tp>::type;
         int depth = CV_MAT_DEPTH(type), channels = CV_MAT_CN(type);
         PyObject** items = PySequence_Fast_ITEMS(seq);
-        
+
         for( i = 0; i < n; i++ )
         {
             PyObject* item = items[i];
             PyObject* seq_i = 0;
             PyObject** items_i = &item;
             _Cp* data = (_Cp*)&value[i];
-            
+
             if( channels == 2 && PyComplex_CheckExact(item) )
             {
                 Py_complex c = PyComplex_AsCComplex(obj);
@@ -619,7 +619,7 @@ template<typename _Tp> struct pyopencvVecConverter
                         break;
                     continue;
                 }
-                
+
                 seq_i = PySequence_Fast(item, name);
                 if( !seq_i || (int)PySequence_Fast_GET_SIZE(seq_i) != channels )
                 {
@@ -628,7 +628,7 @@ template<typename _Tp> struct pyopencvVecConverter
                 }
                 items_i = PySequence_Fast_ITEMS(seq_i);
             }
-            
+
             for( j = 0; j < channels; j++ )
             {
                 PyObject* item_ij = items_i[j];
@@ -656,7 +656,7 @@ template<typename _Tp> struct pyopencvVecConverter
         Py_DECREF(seq);
         return i == n;
     }
-    
+
     static PyObject* from(const vector<_Tp>& value)
     {
         if(value.empty())
@@ -691,9 +691,9 @@ template<typename _Tp> static inline bool pyopencv_to_generic_vec(PyObject* obj,
         return false;
     int i, n = (int)PySequence_Fast_GET_SIZE(seq);
     value.resize(n);
-    
+
     PyObject** items = PySequence_Fast_ITEMS(seq);
-    
+
     for( i = 0; i < n; i++ )
     {
         PyObject* item = items[i];
@@ -709,7 +709,7 @@ template<typename _Tp> static inline PyObject* pyopencv_from_generic_vec(const v
     int i, n = (int)value.size();
     PyObject* seq = PyList_New(n);
     for( i = 0; i < n; i++ )
-    {        
+    {
         PyObject* item = pyopencv_from(value[i]);
         if(!item)
             break;
@@ -730,7 +730,7 @@ template<typename _Tp> struct pyopencvVecConverter<vector<_Tp> >
     {
         return pyopencv_to_generic_vec(obj, value, name);
     }
-    
+
     static PyObject* from(const vector<vector<_Tp> >& value)
     {
         return pyopencv_from_generic_vec(value);
@@ -743,7 +743,7 @@ template<> struct pyopencvVecConverter<Mat>
     {
         return pyopencv_to_generic_vec(obj, value, name);
     }
-    
+
     static PyObject* from(const vector<Mat>& value)
     {
         return pyopencv_from_generic_vec(value);
@@ -756,7 +756,7 @@ template<> struct pyopencvVecConverter<KeyPoint>
     {
         return pyopencv_to_generic_vec(obj, value, name);
     }
-    
+
     static PyObject* from(const vector<KeyPoint>& value)
     {
         return pyopencv_from_generic_vec(value);
@@ -769,7 +769,7 @@ template<> struct pyopencvVecConverter<DMatch>
     {
         return pyopencv_to_generic_vec(obj, value, name);
     }
-    
+
     static PyObject* from(const vector<DMatch>& value)
     {
         return pyopencv_from_generic_vec(value);
@@ -782,7 +782,7 @@ template<> struct pyopencvVecConverter<string>
     {
         return pyopencv_to_generic_vec(obj, value, name);
     }
-    
+
     static PyObject* from(const vector<string>& value)
     {
         return pyopencv_from_generic_vec(value);
@@ -850,7 +850,7 @@ static bool pyopencv_to(PyObject *o, cv::flann::IndexParams& p, const char *name
     bool ok = false;
     PyObject* keys = PyObject_CallMethod(o,(char*)"keys",0);
     PyObject* values = PyObject_CallMethod(o,(char*)"values",0);
-    
+
     if( keys && values )
     {
         int i, n = (int)PyList_GET_SIZE(keys);
@@ -886,7 +886,7 @@ static bool pyopencv_to(PyObject *o, cv::flann::IndexParams& p, const char *name
         }
         ok = i == n && !PyErr_Occurred();
     }
-    
+
     Py_XDECREF(keys);
     Py_XDECREF(values);
     return ok;
@@ -914,10 +914,10 @@ static void OnMouse(int event, int x, int y, int flags, void* param)
 {
     PyGILState_STATE gstate;
     gstate = PyGILState_Ensure();
-    
+
     PyObject *o = (PyObject*)param;
     PyObject *args = Py_BuildValue("iiiiO", event, x, y, flags, PyTuple_GetItem(o, 1));
-    
+
     PyObject *r = PyObject_Call(PyTuple_GetItem(o, 0), args, NULL);
     if (r == NULL)
         PyErr_Print();
@@ -933,7 +933,7 @@ static PyObject *pycvSetMouseCallback(PyObject *self, PyObject *args, PyObject *
     char* name;
     PyObject *on_mouse;
     PyObject *param = NULL;
-    
+
     if (!PyArg_ParseTupleAndKeywords(args, kw, "sO|O", (char**)keywords, &name, &on_mouse, &param))
         return NULL;
     if (!PyCallable_Check(on_mouse)) {
@@ -947,11 +947,11 @@ static PyObject *pycvSetMouseCallback(PyObject *self, PyObject *args, PyObject *
     Py_RETURN_NONE;
 }
 
-void OnChange(int pos, void *param)
+static void OnChange(int pos, void *param)
 {
     PyGILState_STATE gstate;
     gstate = PyGILState_Ensure();
-    
+
     PyObject *o = (PyObject*)param;
     PyObject *args = Py_BuildValue("(i)", pos);
     PyObject *r = PyObject_Call(PyTuple_GetItem(o, 0), args, NULL);
@@ -968,7 +968,7 @@ static PyObject *pycvCreateTrackbar(PyObject *self, PyObject *args)
     char* window_name;
     int *value = new int;
     int count;
-    
+
     if (!PyArg_ParseTuple(args, "ssiiO", &trackbar_name, &window_name, value, &count, &on_change))
         return NULL;
     if (!PyCallable_Check(on_change)) {
@@ -1011,13 +1011,14 @@ extern "C"
 #if defined WIN32 || defined _WIN32
 __declspec(dllexport)
 #endif
+void initcv2();
 
 void initcv2()
 {
 #if PYTHON_USE_NUMPY
     import_array();
 #endif
-    
+
 #if PYTHON_USE_NUMPY
 #include "pyopencv_generated_type_reg.h"
 #endif
@@ -1029,10 +1030,10 @@ void initcv2()
 
   opencv_error = PyErr_NewException((char*)MODULESTR".error", NULL, NULL);
   PyDict_SetItemString(d, "error", opencv_error);
-  
+
   PyObject* cv_m = init_cv();
 
-  PyDict_SetItemString(d, "cv", cv_m);  
+  PyDict_SetItemString(d, "cv", cv_m);
 
 #define PUBLISH(I) PyDict_SetItemString(d, #I, PyInt_FromLong(I))
 #define PUBLISHU(I) PyDict_SetItemString(d, #I, PyLong_FromUnsignedLong(I))
index 151d4e5..c61326c 100644 (file)
@@ -198,7 +198,7 @@ static Py_ssize_t what_size(PyObject *o)
 
 /************************************************************************/
 
-CvMat *PyCvMat_AsCvMat(PyObject *o)
+static CvMat *PyCvMat_AsCvMat(PyObject *o)
 {
   assert(0); // not yet implemented: reference counting for CvMat in Kalman is unclear...
   return NULL;
@@ -908,19 +908,19 @@ static PyTypeObject memtrack_Type = {
   sizeof(memtrack_t),                        /*basicsize*/
 };
 
-Py_ssize_t memtrack_getreadbuffer(PyObject *self, Py_ssize_t segment, void **ptrptr)
+static Py_ssize_t memtrack_getreadbuffer(PyObject *self, Py_ssize_t segment, void **ptrptr)
 {
   *ptrptr = &((memtrack_t*)self)->ptr;
   return ((memtrack_t*)self)->size;
 }
 
-Py_ssize_t memtrack_getwritebuffer(PyObject *self, Py_ssize_t segment, void **ptrptr)
+static Py_ssize_t memtrack_getwritebuffer(PyObject *self, Py_ssize_t segment, void **ptrptr)
 {
   *ptrptr = ((memtrack_t*)self)->ptr;
   return ((memtrack_t*)self)->size;
 }
 
-Py_ssize_t memtrack_getsegcount(PyObject *self, Py_ssize_t *lenp)
+static Py_ssize_t memtrack_getsegcount(PyObject *self, Py_ssize_t *lenp)
 {
   return (Py_ssize_t)1;
 }
@@ -3858,7 +3858,7 @@ static PyMethodDef old_methods[] = {
 /************************************************************************/
 /* Module init */
 
-PyObject* init_cv()
+static PyObject* init_cv()
 {
   PyObject *m, *d;
   cvSetErrMode(CV_ErrModeParent);
index ddbe932..b33292e 100644 (file)
@@ -50,7 +50,7 @@
 \r
 // TODO remove LOG macros, add logging class\r
 #if ENABLE_LOG\r
-#if ANDROID\r
+#ifdef ANDROID\r
   #include <iostream>\r
   #include <sstream>\r
   #include <android/log.h>\r
index 3c2e8cf..29f9bf5 100644 (file)
@@ -5,7 +5,7 @@
 
 #include "opencv2/stitching/stitcher.hpp"
 
-#if GTEST_CREATE_SHARED_LIBRARY
+#ifdef GTEST_CREATE_SHARED_LIBRARY
 #error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
 #endif
 
index 1e91c80..63cb433 100644 (file)
@@ -1,6 +1,7 @@
 #include "perf_precomp.hpp"\r
 \r
 #include "opencv2/highgui/highgui.hpp"\r
+#include "opencv2/core/internal.hpp"\r
 #include "opencv2/flann/flann.hpp"\r
 #include "opencv2/opencv_modules.hpp"\r
 \r
@@ -26,7 +27,7 @@ typedef TestBaseWithParam<String> match;
 PERF_TEST_P(stitch, a123, TEST_DETECTORS)\r
 {\r
     Mat pano;\r
-    \r
+\r
     vector<Mat> imgs;\r
     imgs.push_back( imread( getDataPath("stitching/a1.jpg") ) );\r
     imgs.push_back( imread( getDataPath("stitching/a2.jpg") ) );\r
@@ -60,7 +61,7 @@ PERF_TEST_P(stitch, a123, TEST_DETECTORS)
 PERF_TEST_P(stitch, b12, TEST_DETECTORS)\r
 {\r
     Mat pano;\r
-    \r
+\r
     vector<Mat> imgs;\r
     imgs.push_back( imread( getDataPath("stitching/b1.jpg") ) );\r
     imgs.push_back( imread( getDataPath("stitching/b2.jpg") ) );\r
index 9f3bcd2..a27d8b6 100644 (file)
 
 #include <stdarg.h> // for va_list
 
-#if _MSC_VER >= 1200
+#if defined _MSC_VER && _MSC_VER >= 1200
 #pragma warning( disable: 4251 4275 4355 4127 )
 #endif
 
+#define GTEST_DONT_DEFINE_FAIL      0
+#define GTEST_DONT_DEFINE_SUCCEED   0
+#define GTEST_DONT_DEFINE_ASSERT_EQ 0
+#define GTEST_DONT_DEFINE_ASSERT_NE 0
+#define GTEST_DONT_DEFINE_ASSERT_LE 0
+#define GTEST_DONT_DEFINE_ASSERT_LT 0
+#define GTEST_DONT_DEFINE_ASSERT_GE 0
+#define GTEST_DONT_DEFINE_ASSERT_GT 0
+#define GTEST_DONT_DEFINE_TEST      0
+
 #include "opencv2/ts/ts_gtest.h"
+
+#ifndef GTEST_USES_SIMPLE_RE
+#  define GTEST_USES_SIMPLE_RE 0
+#endif
+#ifdef __GNUC__
+#  pragma GCC diagnostic ignored "-Wmissing-declarations"
+#endif
+
 #include "opencv2/core/core.hpp"
 
 namespace cvtest
@@ -39,9 +57,9 @@ using cv::Point;
 using cv::Rect;
 
 class CV_EXPORTS TS;
-    
+
 CV_EXPORTS int64 readSeed(const char* str);
-    
+
 CV_EXPORTS void randUni( RNG& rng, Mat& a, const Scalar& param1, const Scalar& param2 );
 
 inline unsigned randInt( RNG& rng )
@@ -53,13 +71,13 @@ inline  double randReal( RNG& rng )
 {
     return (double)rng;
 }
-    
-    
+
+
 CV_EXPORTS const char* getTypeName( int type );
 CV_EXPORTS int typeByName( const char* type_name );
 
 CV_EXPORTS string vec2str(const string& sep, const int* v, size_t nelems);
-    
+
 inline int clipInt( int val, int min_val, int max_val )
 {
     if( val < min_val )
@@ -71,9 +89,9 @@ inline int clipInt( int val, int min_val, int max_val )
 
 CV_EXPORTS double getMinVal(int depth);
 CV_EXPORTS double getMaxVal(int depth);
-    
+
 CV_EXPORTS Size randomSize(RNG& rng, double maxSizeLog);
-CV_EXPORTS void randomSize(RNG& rng, int minDims, int maxDims, double maxSizeLog, vector<int>& sz);    
+CV_EXPORTS void randomSize(RNG& rng, int minDims, int maxDims, double maxSizeLog, vector<int>& sz);
 CV_EXPORTS int randomType(RNG& rng, int typeMask, int minChannels, int maxChannels);
 CV_EXPORTS Mat randomMat(RNG& rng, Size size, int type, double minVal, double maxVal, bool useRoi);
 CV_EXPORTS Mat randomMat(RNG& rng, const vector<int>& size, int type, double minVal, double maxVal, bool useRoi);
@@ -85,7 +103,7 @@ CV_EXPORTS void divide(const Mat& a, const Mat& b, Mat& c, double alpha=1);
 CV_EXPORTS void convert(const Mat& src, Mat& dst, int dtype, double alpha=1, double beta=0);
 CV_EXPORTS void copy(const Mat& src, Mat& dst, const Mat& mask=Mat(), bool invertMask=false);
 CV_EXPORTS void set(Mat& dst, const Scalar& gamma, const Mat& mask=Mat());
-    
+
 // working with multi-channel arrays
 CV_EXPORTS void extract( const Mat& a, Mat& plane, int coi );
 CV_EXPORTS void insert( const Mat& plane, Mat& a, int coi );
@@ -93,10 +111,10 @@ CV_EXPORTS void insert( const Mat& plane, Mat& a, int coi );
 // checks that the array does not have NaNs and/or Infs and all the elements are
 // within [min_val,max_val). idx is the index of the first "bad" element.
 CV_EXPORTS int check( const Mat& data, double min_val, double max_val, vector<int>* idx );
-    
+
 // modifies values that are close to zero
 CV_EXPORTS void  patchZeros( Mat& mat, double level );
-    
+
 CV_EXPORTS void transpose(const Mat& src, Mat& dst);
 CV_EXPORTS void erode(const Mat& src, Mat& dst, const Mat& _kernel, Point anchor=Point(-1,-1),
                       int borderType=IPL_BORDER_CONSTANT, const Scalar& borderValue=Scalar());
@@ -109,17 +127,17 @@ CV_EXPORTS void copyMakeBorder(const Mat& src, Mat& dst, int top, int bottom, in
                                int borderType, const Scalar& borderValue=Scalar());
 CV_EXPORTS Mat calcSobelKernel2D( int dx, int dy, int apertureSize, int origin=0 );
 CV_EXPORTS Mat calcLaplaceKernel2D( int aperture_size );
-    
+
 CV_EXPORTS void initUndistortMap( const Mat& a, const Mat& k, Size sz, Mat& mapx, Mat& mapy );
-    
+
 CV_EXPORTS void minMaxLoc(const Mat& src, double* minval, double* maxval,
                           vector<int>* minloc, vector<int>* maxloc, const Mat& mask=Mat());
 CV_EXPORTS double norm(const Mat& src, int normType, const Mat& mask=Mat());
 CV_EXPORTS double norm(const Mat& src1, const Mat& src2, int normType, const Mat& mask=Mat());
 CV_EXPORTS Scalar mean(const Mat& src, const Mat& mask=Mat());
-    
+
 CV_EXPORTS bool cmpUlps(const Mat& data, const Mat& refdata, int expMaxDiff, double* realMaxDiff, vector<int>* idx);
-    
+
 // compares two arrays. max_diff is the maximum actual difference,
 // success_err_level is maximum allowed difference, idx is the index of the first
 // element for which difference is >success_err_level
@@ -127,23 +145,23 @@ CV_EXPORTS bool cmpUlps(const Mat& data, const Mat& refdata, int expMaxDiff, dou
 CV_EXPORTS int cmpEps( const Mat& data, const Mat& refdata, double* max_diff,
                        double success_err_level, vector<int>* idx,
                        bool element_wise_relative_error );
-    
+
 // a wrapper for the previous function. in case of error prints the message to log file.
 CV_EXPORTS int cmpEps2( TS* ts, const Mat& data, const Mat& refdata, double success_err_level,
                         bool element_wise_relative_error, const char* desc );
-    
+
 CV_EXPORTS int cmpEps2_64f( TS* ts, const double* val, const double* refval, int len,
                         double eps, const char* param_name );
-    
+
 CV_EXPORTS void logicOp(const Mat& src1, const Mat& src2, Mat& dst, char c);
 CV_EXPORTS void logicOp(const Mat& src, const Scalar& s, Mat& dst, char c);
 CV_EXPORTS void min(const Mat& src1, const Mat& src2, Mat& dst);
-CV_EXPORTS void min(const Mat& src, double s, Mat& dst);    
+CV_EXPORTS void min(const Mat& src, double s, Mat& dst);
 CV_EXPORTS void max(const Mat& src1, const Mat& src2, Mat& dst);
-CV_EXPORTS void max(const Mat& src, double s, Mat& dst);    
-    
+CV_EXPORTS void max(const Mat& src, double s, Mat& dst);
+
 CV_EXPORTS void compare(const Mat& src1, const Mat& src2, Mat& dst, int cmpop);
-CV_EXPORTS void compare(const Mat& src, double s, Mat& dst, int cmpop);    
+CV_EXPORTS void compare(const Mat& src, double s, Mat& dst, int cmpop);
 CV_EXPORTS void gemm(const Mat& src1, const Mat& src2, double alpha,
                      const Mat& src3, double beta, Mat& dst, int flags);
     CV_EXPORTS void transform( const Mat& src, Mat& dst, const Mat& transmat, const Mat& shift );
@@ -155,16 +173,16 @@ struct CV_EXPORTS MatInfo
     const Mat* m;
 };
 
-CV_EXPORTS std::ostream& operator << (std::ostream& out, const MatInfo& m);    
-    
+CV_EXPORTS std::ostream& operator << (std::ostream& out, const MatInfo& m);
+
 struct CV_EXPORTS MatComparator
 {
 public:
     MatComparator(double maxdiff, int context);
-    
+
     ::testing::AssertionResult operator()(const char* expr1, const char* expr2,
                                           const Mat& m1, const Mat& m2);
-    
+
     double maxdiff;
     double realmaxdiff;
     vector<int> loc0;
@@ -238,7 +256,7 @@ protected:
 struct TestInfo
 {
     TestInfo();
-    
+
     // pointer to the test
     BaseTest* test;
 
@@ -247,7 +265,7 @@ struct TestInfo
 
     // seed value right before the data for the failed test case is prepared.
     uint64 rng_seed;
-    
+
     // seed value right before running the test
     uint64 rng_seed0;
 
@@ -263,18 +281,18 @@ struct TestInfo
 struct CV_EXPORTS TSParams
 {
     TSParams();
-    
+
     // RNG seed, passed to and updated by every test executed.
     uint64 rng_seed;
-    
+
     // whether to use IPP, MKL etc. or not
     bool use_optimized;
-    
+
     // extensivity of the tests, scale factor for test_case_count
     double test_case_count_scale;
 };
 
-    
+
 class CV_EXPORTS TS
 {
 public:
@@ -297,10 +315,10 @@ public:
     };
 
     static TS* ptr();
-    
+
     // initialize test system before running the first test
     virtual void init( const string& modulename );
-    
+
     // low-level printing functions that are used by individual tests and by the system itself
     virtual void printf( int streams, const char* fmt, ... );
     virtual void vprintf( int streams, const char* fmt, va_list arglist );
@@ -312,7 +330,7 @@ public:
 
     // sets information about a failed test
     virtual void set_failed_test_info( int fail_code );
-    
+
     virtual void set_gtest_status();
 
     // test error codes
@@ -398,7 +416,7 @@ public:
 
     // returns textual description of failure code
     static string str_from_code( int code );
-    
+
 protected:
 
     // these are allocated within a test to try keep them valid in case of stack corruption
@@ -406,10 +424,10 @@ protected:
 
     // information about the current test
     TestInfo current_test_info;
-        
+
     // the path to data files used by tests
     string data_path;
-    
+
     TSParams params;
     std::string output_buf[MAX_IDX];
 };
@@ -468,7 +486,7 @@ protected:
     virtual void run_func(void) = 0;
     int test_case_idx;
     int progress;
-    double t, freq;   
+    double t, freq;
 
     template<class F>
     int run_test_case( int expected_code, const string& _descr, F f)
@@ -486,7 +504,7 @@ protected:
             t = new_t;
         }
         progress = update_progress(progress, test_case_idx, 0, dt);
-        
+
         int errcount = 0;
         bool thrown = false;
         const char* descr = _descr.c_str() ? _descr.c_str() : "";
@@ -519,21 +537,21 @@ protected:
             errcount = 1;
         }
         test_case_idx++;
-        
+
         return errcount;
     }
 };
-    
+
 struct CV_EXPORTS DefaultRngAuto
 {
     const uint64 old_state;
-    
+
     DefaultRngAuto() : old_state(cv::theRNG().state) { cv::theRNG().state = (uint64)-1; }
     ~DefaultRngAuto() { cv::theRNG().state = old_state; }
-    
+
     DefaultRngAuto& operator=(const DefaultRngAuto&);
 };
-    
+
 }
 
 // fills c with zeros
index 4d49fdc..dbcd06e 100644 (file)
 #endif  // __GNUC__
 
 // Determines the platform on which Google Test is compiled.
+#define GTEST_OS_CYGWIN 0
+#define GTEST_OS_SYMBIAN 0
+#define GTEST_OS_WINDOWS 0
+#define GTEST_OS_WINDOWS_MOBILE 0
+#define GTEST_OS_WINDOWS_MINGW 0
+#define GTEST_OS_WINDOWS_DESKTOP 0
+#define GTEST_OS_MAC 0
+#define GTEST_OS_MAC_IOS 0
+#define GTEST_OS_LINUX 0
+#define GTEST_OS_LINUX_ANDROID 0
+#define GTEST_OS_ZOS 0
+#define GTEST_OS_SOLARIS 0
+#define GTEST_OS_AIX 0
+#define GTEST_OS_HPUX 0
+#define GTEST_OS_NACL 0
+
+
 #ifdef __CYGWIN__
+# undef GTEST_OS_CYGWIN
 # define GTEST_OS_CYGWIN 1
 #elif defined __SYMBIAN32__
+# undef GTEST_OS_SYMBIAN
 # define GTEST_OS_SYMBIAN 1
 #elif defined _WIN32
+# undef GTEST_OS_WINDOWS
 # define GTEST_OS_WINDOWS 1
 # ifdef _WIN32_WCE
+#  undef GTEST_OS_WINDOWS_MOBILE
 #  define GTEST_OS_WINDOWS_MOBILE 1
 # elif defined(__MINGW__) || defined(__MINGW32__)
+#  undef GTEST_OS_WINDOWS_MINGW
 #  define GTEST_OS_WINDOWS_MINGW 1
 # else
+#  undef GTEST_OS_WINDOWS_DESKTOP
 #  define GTEST_OS_WINDOWS_DESKTOP 1
 # endif  // _WIN32_WCE
 #elif defined __APPLE__
+# undef GTEST_OS_MAC
 # define GTEST_OS_MAC 1
 # include <TargetConditionals.h>
 # if TARGET_OS_IPHONE || TARGET_IPHONE_SIMULATOR
+#  undef GTEST_OS_MAC_IOS
 #  define GTEST_OS_MAC_IOS 1
 # endif
 #include <TargetConditionals.h>
 #if TARGET_OS_IPHONE || TARGET_IPHONE_SIMULATOR
-#define GTEST_OS_MAC_IOS 1
+# undef GTEST_OS_MAC_IOS
+# define GTEST_OS_MAC_IOS 1
 #endif
 #elif defined __linux__
+# undef GTEST_OS_LINUX
 # define GTEST_OS_LINUX 1
 # ifdef ANDROID
+#  undef GTEST_OS_LINUX_ANDROID
 #  define GTEST_OS_LINUX_ANDROID 1
 # endif  // ANDROID
 #elif defined __MVS__
+# undef GTEST_OS_ZOS
 # define GTEST_OS_ZOS 1
 #elif defined(__sun) && defined(__SVR4)
+# undef GTEST_OS_SOLARIS
 # define GTEST_OS_SOLARIS 1
 #elif defined(_AIX)
+# undef GTEST_OS_AIX
 # define GTEST_OS_AIX 1
 #elif defined(__hpux)
+# undef GTEST_OS_HPUX
 # define GTEST_OS_HPUX 1
 #elif defined __native_client__
+# undef GTEST_OS_NACL
 # define GTEST_OS_NACL 1
 #endif  // __CYGWIN__
 
index d2967ea..b788ae0 100644 (file)
@@ -1,10 +1,10 @@
-#if _MSC_VER >= 1200
+#if defined _MSC_VER && _MSC_VER >= 1200
 #pragma warning( disable: 4127 4251)
 #endif
 
 #include "opencv2/core/core_c.h"
 #include "opencv2/ts/ts.hpp"
 
-#if GTEST_LINKED_AS_SHARED_LIBRARY
+#ifdef GTEST_LINKED_AS_SHARED_LIBRARY
 #error ts module should not have GTEST_LINKED_AS_SHARED_LIBRARY defined
 #endif
index f9e784e..681f9bf 100644 (file)
@@ -112,7 +112,7 @@ static const int tsSigId[] = { SIGSEGV, SIGBUS, SIGFPE, SIGILL, SIGABRT, -1 };
 
 static jmp_buf tsJmpMark;
 
-void signalHandler( int sig_code )
+static void signalHandler( int sig_code )
 {
     int code = TS::FAIL_EXCEPTION;
     switch( sig_code )
@@ -197,7 +197,7 @@ void BaseTest::safe_run( int start_from )
     read_params( ts->get_file_storage() );
     ts->update_context( 0, -1, true );
     ts->update_context( this, -1, true );
-    
+
     if( !::testing::GTEST_FLAG(catch_exceptions) )
         run( start_from );
     else
@@ -218,7 +218,7 @@ void BaseTest::safe_run( int start_from )
         {
             const char* errorStr = cvErrorStr(exc.code);
             char buf[1 << 16];
-            
+
             sprintf( buf, "OpenCV Error: %s (%s) in %s, file %s, line %d",
                     errorStr, exc.err.c_str(), exc.func.size() > 0 ?
                     exc.func.c_str() : "unknown function", exc.file.c_str(), exc.line );
@@ -230,7 +230,7 @@ void BaseTest::safe_run( int start_from )
             ts->set_failed_test_info( TS::FAIL_EXCEPTION );
         }
     }
-    
+
     ts->set_gtest_status();
 }
 
@@ -346,7 +346,7 @@ int BadArgTest::run_test_case( int expected_code, const string& _descr )
     int errcount = 0;
     bool thrown = false;
     const char* descr = _descr.c_str() ? _descr.c_str() : "";
-    
+
     try
     {
         run_func();
@@ -402,7 +402,7 @@ TestInfo::TestInfo()
     test_case_idx = -1;
 }
 
-    
+
 TS::TS()
 {
 } // ctor
@@ -450,7 +450,7 @@ static int tsErrorCallback( int status, const char* func_name, const char* err_m
 void TS::init( const string& modulename )
 {
     char* datapath_dir = getenv("OPENCV_TEST_DATA_PATH");
-    
+
     if( datapath_dir )
     {
         char buf[1024];
@@ -459,7 +459,7 @@ void TS::init( const string& modulename )
         sprintf( buf, "%s%s%s/", datapath_dir, haveSlash ? "" : "/", modulename.c_str() );
         data_path = string(buf);
     }
-    
+
     cv::redirectError((cv::ErrorCallback)tsErrorCallback, this);
 
     if( ::testing::GTEST_FLAG(catch_exceptions) )
@@ -484,10 +484,10 @@ void TS::init( const string& modulename )
             signal( tsSigId[i], SIG_DFL );
 #endif
     }
-    
+
     if( params.use_optimized == 0 )
         cv::setUseOptimized(false);
-    
+
     rng = RNG(params.rng_seed);
 }
 
@@ -497,11 +497,11 @@ void TS::set_gtest_status()
     int code = get_err_code();
     if( code >= 0 )
         return SUCCEED();
-    
+
     char seedstr[32];
     sprintf(seedstr, "%08x%08x", (unsigned)(current_test_info.rng_seed>>32),
                                 (unsigned)(current_test_info.rng_seed));
-    
+
     string logs = "";
     if( !output_buf[SUMMARY_IDX].empty() )
         logs += "\n-----------------------------------\n\tSUM: " + output_buf[SUMMARY_IDX];
@@ -510,7 +510,7 @@ void TS::set_gtest_status()
     if( !output_buf[CONSOLE_IDX].empty() )
         logs += "\n-----------------------------------\n\tCONSOLE: " + output_buf[CONSOLE_IDX];
     logs += "\n-----------------------------------\n";
-    
+
     FAIL() << "\n\tfailure reason: " << str_from_code(code) <<
         "\n\ttest case #" << current_test_info.test_case_idx <<
         "\n\tseed: " << seedstr << logs;
@@ -518,7 +518,7 @@ void TS::set_gtest_status()
 
 
 CvFileStorage* TS::get_file_storage() { return 0; }
-    
+
 void TS::update_context( BaseTest* test, int test_case_idx, bool update_ts_context )
 {
     if( current_test_info.test != test )
@@ -528,7 +528,7 @@ void TS::update_context( BaseTest* test, int test_case_idx, bool update_ts_conte
         rng = RNG(params.rng_seed);
         current_test_info.rng_seed0 = current_test_info.rng_seed = rng.state;
     }
-        
+
     current_test_info.test = test;
     current_test_info.test_case_idx = test_case_idx;
     current_test_info.code = 0;
@@ -537,7 +537,7 @@ void TS::update_context( BaseTest* test, int test_case_idx, bool update_ts_conte
         current_test_info.rng_seed = rng.state;
 }
 
-    
+
 void TS::set_failed_test_info( int fail_code )
 {
     if( current_test_info.code >= 0 )
@@ -577,7 +577,7 @@ void TS::printf( int streams, const char* fmt, ... )
         va_end( l );
     }
 }
-    
+
 
 TS ts;
 TS* TS::ptr() { return &ts; }
index ee12665..81d49fa 100644 (file)
@@ -35,8 +35,8 @@ string vec2str( const string& sep, const int* v, size_t nelems )
     }
     return result;
 }
-    
-    
+
+
 Size randomSize(RNG& rng, double maxSizeLog)
 {
     double width_log = rng.uniform(0., maxSizeLog);
@@ -99,8 +99,8 @@ double getMaxVal(int depth)
     depth == CV_32F ? FLT_MAX : depth == CV_64F ? DBL_MAX : -1;
     CV_Assert(val != -1);
     return val;
-}    
-    
+}
+
 Mat randomMat(RNG& rng, Size size, int type, double minVal, double maxVal, bool useRoi)
 {
     Size size0 = size;
@@ -109,9 +109,9 @@ Mat randomMat(RNG& rng, Size size, int type, double minVal, double maxVal, bool
         size0.width += std::max(rng.uniform(0, 10) - 5, 0);
         size0.height += std::max(rng.uniform(0, 10) - 5, 0);
     }
-        
+
     Mat m(size0, type);
-    
+
     rng.fill(m, RNG::UNIFORM, Scalar::all(minVal), Scalar::all(maxVal));
     if( size0 == size )
         return m;
@@ -135,15 +135,15 @@ Mat randomMat(RNG& rng, const vector<int>& size, int type, double minVal, double
         }
         eqsize = eqsize && size[i] == size0[i];
     }
-    
+
     Mat m(dims, &size0[0], type);
-    
+
     rng.fill(m, RNG::UNIFORM, Scalar::all(minVal), Scalar::all(maxVal));
     if( eqsize )
         return m;
     return m(&r[0]);
 }
-    
+
 void add(const Mat& _a, double alpha, const Mat& _b, double beta,
         Scalar gamma, Mat& c, int ctype, bool calcAbs)
 {
@@ -168,26 +168,26 @@ void add(const Mat& _a, double alpha, const Mat& _b, double beta,
     }
     else
         CV_Assert(a.size == b.size);
-    
+
     if( ctype < 0 )
         ctype = a.depth();
     ctype = CV_MAKETYPE(CV_MAT_DEPTH(ctype), a.channels());
     c.create(a.dims, &a.size[0], ctype);
     const Mat *arrays[] = {&a, &b, &c, 0};
     Mat planes[3], buf[3];
-    
+
     NAryMatIterator it(arrays, planes);
     size_t i, nplanes = it.nplanes;
-       int cn=a.channels();
+    int cn=a.channels();
     int total = (int)planes[0].total(), maxsize = std::min(12*12*std::max(12/cn, 1), total);
-    
+
     CV_Assert(planes[0].rows == 1);
     buf[0].create(1, maxsize, CV_64FC(cn));
     if(!b.empty())
         buf[1].create(1, maxsize, CV_64FC(cn));
     buf[2].create(1, maxsize, CV_64FC(cn));
     scalarToRawData(gamma, buf[2].data, CV_64FC(cn), (int)(maxsize*cn));
-    
+
     for( i = 0; i < nplanes; i++, ++it)
     {
         for( int j = 0; j < total; j += maxsize )
@@ -196,12 +196,12 @@ void add(const Mat& _a, double alpha, const Mat& _b, double beta,
             Mat apart0 = planes[0].colRange(j, j2);
             Mat cpart0 = planes[2].colRange(j, j2);
             Mat apart = buf[0].colRange(0, j2 - j);
-            
+
             apart0.convertTo(apart, apart.type(), alpha);
             size_t k, n = (j2 - j)*cn;
             double* aptr = (double*)apart.data;
             const double* gptr = (const double*)buf[2].data;
-            
+
             if( b.empty() )
             {
                 for( k = 0; k < n; k++ )
@@ -213,7 +213,7 @@ void add(const Mat& _a, double alpha, const Mat& _b, double beta,
                 Mat bpart = buf[1].colRange(0, (int)(j2 - j));
                 bpart0.convertTo(bpart, bpart.type(), beta);
                 const double* bptr = (const double*)bpart.data;
-                
+
                 for( k = 0; k < n; k++ )
                     aptr[k] += bptr[k] + gptr[k];
             }
@@ -271,7 +271,7 @@ convertTo(const _Tp* src, void* dst, int dtype, size_t total, double alpha, doub
         CV_Assert(0);
     }
 }
-    
+
 void convert(const Mat& src, Mat& dst, int dtype, double alpha, double beta)
 {
     dtype = CV_MAKETYPE(CV_MAT_DEPTH(dtype), src.channels());
@@ -286,19 +286,19 @@ void convert(const Mat& src, Mat& dst, int dtype, double alpha, double beta)
         copy( src, dst );
         return;
     }
-    
+
     const Mat *arrays[]={&src, &dst, 0};
     Mat planes[2];
-    
+
     NAryMatIterator it(arrays, planes);
     size_t total = planes[0].total()*planes[0].channels();
     size_t i, nplanes = it.nplanes;
-    
+
     for( i = 0; i < nplanes; i++, ++it)
     {
         const uchar* sptr = planes[0].data;
         uchar* dptr = planes[1].data;
-        
+
         switch( src.depth() )
         {
         case CV_8U:
@@ -325,12 +325,12 @@ void convert(const Mat& src, Mat& dst, int dtype, double alpha, double beta)
         }
     }
 }
-    
-     
+
+
 void copy(const Mat& src, Mat& dst, const Mat& mask, bool invertMask)
 {
     dst.create(src.dims, &src.size[0], src.type());
-    
+
     if(mask.empty())
     {
         const Mat* arrays[] = {&src, &dst, 0};
@@ -338,28 +338,28 @@ void copy(const Mat& src, Mat& dst, const Mat& mask, bool invertMask)
         NAryMatIterator it(arrays, planes);
         size_t i, nplanes = it.nplanes;
         size_t planeSize = planes[0].total()*src.elemSize();
-        
+
         for( i = 0; i < nplanes; i++, ++it )
             memcpy(planes[1].data, planes[0].data, planeSize);
-        
+
         return;
     }
-    
+
     CV_Assert( src.size == mask.size && mask.type() == CV_8U );
-    
+
     const Mat *arrays[]={&src, &dst, &mask, 0};
     Mat planes[3];
-    
+
     NAryMatIterator it(arrays, planes);
     size_t j, k, elemSize = src.elemSize(), total = planes[0].total();
     size_t i, nplanes = it.nplanes;
-    
+
     for( i = 0; i < nplanes; i++, ++it)
     {
         const uchar* sptr = planes[0].data;
         uchar* dptr = planes[1].data;
         const uchar* mptr = planes[2].data;
-        
+
         for( j = 0; j < total; j++, sptr += elemSize, dptr += elemSize )
         {
             if( (mptr[j] != 0) ^ invertMask )
@@ -369,13 +369,13 @@ void copy(const Mat& src, Mat& dst, const Mat& mask, bool invertMask)
     }
 }
 
-    
+
 void set(Mat& dst, const Scalar& gamma, const Mat& mask)
 {
     double buf[12];
     scalarToRawData(gamma, &buf, dst.type(), dst.channels());
     const uchar* gptr = (const uchar*)&buf[0];
-    
+
     if(mask.empty())
     {
         const Mat* arrays[] = {&dst, 0};
@@ -383,12 +383,12 @@ void set(Mat& dst, const Scalar& gamma, const Mat& mask)
         NAryMatIterator it(arrays, &plane);
         size_t i, nplanes = it.nplanes;
         size_t j, k, elemSize = dst.elemSize(), planeSize = plane.total()*elemSize;
-        
+
         for( k = 1; k < elemSize; k++ )
             if( gptr[k] != gptr[0] )
                 break;
         bool uniform = k >= elemSize;
-        
+
         for( i = 0; i < nplanes; i++, ++it )
         {
             uchar* dptr = plane.data;
@@ -405,21 +405,21 @@ void set(Mat& dst, const Scalar& gamma, const Mat& mask)
         }
         return;
     }
-    
+
     CV_Assert( dst.size == mask.size && mask.type() == CV_8U );
-    
+
     const Mat *arrays[]={&dst, &mask, 0};
     Mat planes[2];
-    
+
     NAryMatIterator it(arrays, planes);
     size_t j, k, elemSize = dst.elemSize(), total = planes[0].total();
     size_t i, nplanes = it.nplanes;
-    
+
     for( i = 0; i < nplanes; i++, ++it)
     {
         uchar* dptr = planes[0].data;
         const uchar* mptr = planes[1].data;
-        
+
         for( j = 0; j < total; j++, dptr += elemSize )
         {
             if( mptr[j] )
@@ -434,18 +434,18 @@ void insert(const Mat& src, Mat& dst, int coi)
 {
     CV_Assert( dst.size == src.size && src.depth() == dst.depth() &&
               0 <= coi && coi < dst.channels() );
-    
+
     const Mat* arrays[] = {&src, &dst, 0};
     Mat planes[2];
     NAryMatIterator it(arrays, planes);
     size_t i, nplanes = it.nplanes;
     size_t j, k, size0 = src.elemSize(), size1 = dst.elemSize(), total = planes[0].total();
-    
+
     for( i = 0; i < nplanes; i++, ++it )
     {
         const uchar* sptr = planes[0].data;
         uchar* dptr = planes[1].data + coi*size0;
-        
+
         for( j = 0; j < total; j++, sptr += size0, dptr += size1 )
         {
             for( k = 0; k < size0; k++ )
@@ -454,23 +454,23 @@ void insert(const Mat& src, Mat& dst, int coi)
     }
 }
 
-    
+
 void extract(const Mat& src, Mat& dst, int coi)
 {
     dst.create( src.dims, &src.size[0], src.depth() );
     CV_Assert( 0 <= coi && coi < src.channels() );
-    
+
     const Mat* arrays[] = {&src, &dst, 0};
     Mat planes[2];
     NAryMatIterator it(arrays, planes);
     size_t i, nplanes = it.nplanes;
     size_t j, k, size0 = src.elemSize(), size1 = dst.elemSize(), total = planes[0].total();
-    
+
     for( i = 0; i < nplanes; i++, ++it )
     {
         const uchar* sptr = planes[0].data + coi*size1;
         uchar* dptr = planes[1].data;
-        
+
         for( j = 0; j < total; j++, sptr += size0, dptr += size1 )
         {
             for( k = 0; k < size1; k++ )
@@ -478,19 +478,19 @@ void extract(const Mat& src, Mat& dst, int coi)
         }
     }
 }
-    
-    
+
+
 void transpose(const Mat& src, Mat& dst)
 {
     CV_Assert(src.dims == 2);
     dst.create(src.cols, src.rows, src.type());
     int i, j, k, esz = (int)src.elemSize();
-    
+
     for( i = 0; i < dst.rows; i++ )
     {
         const uchar* sptr = src.ptr(0) + i*esz;
         uchar* dptr = dst.ptr(i);
-        
+
         for( j = 0; j < dst.cols; j++, sptr += src.step[0], dptr += esz )
         {
             for( k = 0; k < esz; k++ )
@@ -499,7 +499,7 @@ void transpose(const Mat& src, Mat& dst)
     }
 }
 
-    
+
 template<typename _Tp> static void
 randUniInt_(RNG& rng, _Tp* data, size_t total, int cn, const Scalar& scale, const Scalar& delta)
 {
@@ -511,7 +511,7 @@ randUniInt_(RNG& rng, _Tp* data, size_t total, int cn, const Scalar& scale, cons
         }
 }
 
-    
+
 template<typename _Tp> static void
 randUniFlt_(RNG& rng, _Tp* data, size_t total, int cn, const Scalar& scale, const Scalar& delta)
 {
@@ -523,13 +523,13 @@ randUniFlt_(RNG& rng, _Tp* data, size_t total, int cn, const Scalar& scale, cons
         }
 }
 
-    
+
 void randUni( RNG& rng, Mat& a, const Scalar& param0, const Scalar& param1 )
 {
     Scalar scale = param0;
     Scalar delta = param1;
     double C = a.depth() < CV_32F ? 1./(65536.*65536.) : 1.;
-    
+
     for( int k = 0; k < 4; k++ )
     {
         double s = scale.val[k] - delta.val[k];
@@ -545,12 +545,12 @@ void randUni( RNG& rng, Mat& a, const Scalar& param0, const Scalar& param1 )
 
     const Mat *arrays[]={&a, 0};
     Mat plane;
-    
+
     NAryMatIterator it(arrays, &plane);
     size_t i, nplanes = it.nplanes;
-       int depth = a.depth(), cn = a.channels();
+    int depth = a.depth(), cn = a.channels();
     size_t total = plane.total()*cn;
-    
+
     for( i = 0; i < nplanes; i++, ++it )
     {
         switch( depth )
@@ -581,19 +581,19 @@ void randUni( RNG& rng, Mat& a, const Scalar& param0, const Scalar& param1 )
         }
     }
 }
-    
-    
+
+
 template<typename _Tp> static void
 erode_(const Mat& src, Mat& dst, const vector<int>& ofsvec)
 {
     int width = dst.cols*src.channels(), n = (int)ofsvec.size();
     const int* ofs = &ofsvec[0];
-    
+
     for( int y = 0; y < dst.rows; y++ )
     {
         const _Tp* sptr = src.ptr<_Tp>(y);
         _Tp* dptr = dst.ptr<_Tp>(y);
-        
+
         for( int x = 0; x < width; x++ )
         {
             _Tp result = sptr[x + ofs[0]];
@@ -604,18 +604,18 @@ erode_(const Mat& src, Mat& dst, const vector<int>& ofsvec)
     }
 }
 
-    
+
 template<typename _Tp> static void
 dilate_(const Mat& src, Mat& dst, const vector<int>& ofsvec)
 {
     int width = dst.cols*src.channels(), n = (int)ofsvec.size();
     const int* ofs = &ofsvec[0];
-    
+
     for( int y = 0; y < dst.rows; y++ )
     {
         const _Tp* sptr = src.ptr<_Tp>(y);
         _Tp* dptr = dst.ptr<_Tp>(y);
-        
+
         for( int x = 0; x < width; x++ )
         {
             _Tp result = sptr[x + ofs[0]];
@@ -625,8 +625,8 @@ dilate_(const Mat& src, Mat& dst, const vector<int>& ofsvec)
         }
     }
 }
-    
-    
+
+
 void erode(const Mat& _src, Mat& dst, const Mat& _kernel, Point anchor,
            int borderType, const Scalar& _borderValue)
 {
@@ -648,7 +648,7 @@ void erode(const Mat& _src, Mat& dst, const Mat& _kernel, Point anchor,
                    anchor.x, kernel.cols - anchor.x - 1,
                    borderType, borderValue);
     dst.create( _src.size(), src.type() );
-    
+
     vector<int> ofs;
     int step = (int)(src.step/src.elemSize1()), cn = src.channels();
     for( int i = 0; i < kernel.rows; i++ )
@@ -657,7 +657,7 @@ void erode(const Mat& _src, Mat& dst, const Mat& _kernel, Point anchor,
                 ofs.push_back(i*step + j*cn);
     if( ofs.empty() )
         ofs.push_back(anchor.y*step + anchor.x*cn);
-    
+
     switch( src.depth() )
     {
     case CV_8U:
@@ -705,7 +705,7 @@ void dilate(const Mat& _src, Mat& dst, const Mat& _kernel, Point anchor,
                    anchor.x, kernel.cols - anchor.x - 1,
                    borderType, borderValue);
     dst.create( _src.size(), src.type() );
-    
+
     vector<int> ofs;
     int step = (int)(src.step/src.elemSize1()), cn = src.channels();
     for( int i = 0; i < kernel.rows; i++ )
@@ -714,7 +714,7 @@ void dilate(const Mat& _src, Mat& dst, const Mat& _kernel, Point anchor,
                 ofs.push_back(i*step + j*cn);
     if( ofs.empty() )
         ofs.push_back(anchor.y*step + anchor.x*cn);
-    
+
     switch( src.depth() )
     {
     case CV_8U:
@@ -741,21 +741,21 @@ void dilate(const Mat& _src, Mat& dst, const Mat& _kernel, Point anchor,
     default:
         CV_Assert(0);
     }
-}    
+}
+
 
-    
 template<typename _Tp> static void
 filter2D_(const Mat& src, Mat& dst, const vector<int>& ofsvec, const vector<double>& coeffvec)
 {
     const int* ofs = &ofsvec[0];
     const double* coeff = &coeffvec[0];
     int width = dst.cols*dst.channels(), ncoeffs = (int)ofsvec.size();
-    
+
     for( int y = 0; y < dst.rows; y++ )
     {
         const _Tp* sptr = src.ptr<_Tp>(y);
         double* dptr = dst.ptr<double>(y);
-        
+
         for( int x = 0; x < width; x++ )
         {
             double s = 0;
@@ -765,8 +765,8 @@ filter2D_(const Mat& src, Mat& dst, const vector<int>& ofsvec, const vector<doub
         }
     }
 }
-    
-    
+
+
 void filter2D(const Mat& _src, Mat& dst, int ddepth, const Mat& kernel,
               Point anchor, double delta, int borderType, const Scalar& _borderValue)
 {
@@ -781,17 +781,17 @@ void filter2D(const Mat& _src, Mat& dst, int ddepth, const Mat& kernel,
                    anchor.x, kernel.cols - anchor.x - 1,
                    borderType, borderValue);
     _dst.create( _src.size(), CV_MAKETYPE(CV_64F, src.channels()) );
-    
+
     vector<int> ofs;
     vector<double> coeff(kernel.rows*kernel.cols);
     Mat cmat(kernel.rows, kernel.cols, CV_64F, &coeff[0]);
     convert(kernel, cmat, cmat.type());
-    
+
     int step = (int)(src.step/src.elemSize1()), cn = src.channels();
     for( int i = 0; i < kernel.rows; i++ )
         for( int j = 0; j < kernel.cols; j++ )
                 ofs.push_back(i*step + j*cn);
-    
+
     switch( src.depth() )
     {
     case CV_8U:
@@ -818,7 +818,7 @@ void filter2D(const Mat& _src, Mat& dst, int ddepth, const Mat& kernel,
     default:
         CV_Assert(0);
     }
-    
+
     convert(_dst, dst, ddepth, 1, delta);
 }
 
@@ -855,8 +855,8 @@ static int borderInterpolate( int p, int len, int borderType )
     else
         CV_Error( CV_StsBadArg, "Unknown/unsupported border type" );
     return p;
-}    
-    
+}
+
 
 void copyMakeBorder(const Mat& src, Mat& dst, int top, int bottom, int left, int right,
                     int borderType, const Scalar& borderValue)
@@ -864,13 +864,13 @@ void copyMakeBorder(const Mat& src, Mat& dst, int top, int bottom, int left, int
     dst.create(src.rows + top + bottom, src.cols + left + right, src.type());
     int i, j, k, esz = (int)src.elemSize();
     int width = src.cols*esz, width1 = dst.cols*esz;
-    
+
     if( borderType == IPL_BORDER_CONSTANT )
     {
         vector<uchar> valvec((src.cols + left + right)*esz);
         uchar* val = &valvec[0];
         scalarToRawData(borderValue, val, src.type(), (src.cols + left + right)*src.channels());
-        
+
         left *= esz;
         right *= esz;
         for( i = 0; i < src.rows; i++ )
@@ -885,14 +885,14 @@ void copyMakeBorder(const Mat& src, Mat& dst, int top, int bottom, int left, int
             for( j = 0; j < right; j++ )
                 dptr[j + width] = val[j];
         }
-        
+
         for( i = 0; i < top; i++ )
         {
             uchar* dptr = dst.ptr(i);
             for( j = 0; j < width1; j++ )
                 dptr[j] = val[j];
         }
-        
+
         for( i = 0; i < bottom; i++ )
         {
             uchar* dptr = dst.ptr(i + top + src.rows);
@@ -917,14 +917,14 @@ void copyMakeBorder(const Mat& src, Mat& dst, int top, int bottom, int left, int
             for( k = 0; k < esz; k++ )
                 rtab[i*esz + k] = j + k;
         }
-        
+
         left *= esz;
         right *= esz;
         for( i = 0; i < src.rows; i++ )
         {
             const uchar* sptr = src.ptr(i);
             uchar* dptr = dst.ptr(i + top);
-            
+
             for( j = 0; j < left; j++ )
                 dptr[j] = sptr[ltab[j]];
             if( dptr + left != sptr )
@@ -935,29 +935,29 @@ void copyMakeBorder(const Mat& src, Mat& dst, int top, int bottom, int left, int
             for( j = 0; j < right; j++ )
                 dptr[j + left + width] = sptr[rtab[j]];
         }
-        
+
         for( i = 0; i < top; i++ )
         {
             j = borderInterpolate(i - top, src.rows, borderType);
             const uchar* sptr = dst.ptr(j + top);
             uchar* dptr = dst.ptr(i);
-            
+
             for( k = 0; k < width1; k++ )
                 dptr[k] = sptr[k];
         }
-        
+
         for( i = 0; i < bottom; i++ )
         {
             j = borderInterpolate(i + src.rows, src.rows, borderType);
             const uchar* sptr = dst.ptr(j + top);
             uchar* dptr = dst.ptr(i + top + src.rows);
-            
+
             for( k = 0; k < width1; k++ )
                 dptr[k] = sptr[k];
         }
     }
 }
-    
+
 
 template<typename _Tp> static void
 minMaxLoc_(const _Tp* src, size_t total, size_t startidx,
@@ -967,7 +967,7 @@ minMaxLoc_(const _Tp* src, size_t total, size_t startidx,
 {
     _Tp maxval = saturate_cast<_Tp>(*_maxval), minval = saturate_cast<_Tp>(*_minval);
     size_t minpos = *_minpos, maxpos = *_maxpos;
-    
+
     if( !mask )
     {
         for( size_t i = 0; i < total; i++ )
@@ -1002,7 +1002,7 @@ minMaxLoc_(const _Tp* src, size_t total, size_t startidx,
             }
         }
     }
-    
+
     *_maxval = maxval;
     *_minval = minval;
     *_maxpos = maxpos;
@@ -1029,7 +1029,7 @@ static void setpos( const Mat& mtx, vector<int>& pos, size_t idx )
             pos[i] = -1;
     }
 }
-    
+
 void minMaxLoc(const Mat& src, double* _minval, double* _maxval,
                vector<int>* _minloc, vector<int>* _maxloc,
                const Mat& mask)
@@ -1037,20 +1037,20 @@ void minMaxLoc(const Mat& src, double* _minval, double* _maxval,
     CV_Assert( src.channels() == 1 );
     const Mat *arrays[]={&src, &mask, 0};
     Mat planes[2];
-    
+
     NAryMatIterator it(arrays, planes);
     size_t startidx = 1, total = planes[0].total();
     size_t i, nplanes = it.nplanes;
-       int depth = src.depth();
+    int depth = src.depth();
     double maxval = depth < CV_32F ? INT_MIN : depth == CV_32F ? -FLT_MAX : -DBL_MAX;
     double minval = depth < CV_32F ? INT_MAX : depth == CV_32F ? FLT_MAX : DBL_MAX;
     size_t maxidx = 0, minidx = 0;
-    
+
     for( i = 0; i < nplanes; i++, ++it, startidx += total )
     {
         const uchar* sptr = planes[0].data;
         const uchar* mptr = planes[1].data;
-        
+
         switch( depth )
         {
         case CV_8U:
@@ -1085,10 +1085,10 @@ void minMaxLoc(const Mat& src, double* _minval, double* _maxval,
             CV_Assert(0);
         }
     }
-    
+
     if( minidx == 0 )
         minval = maxval = 0;
-    
+
     if( _maxval )
         *_maxval = maxval;
     if( _minval )
@@ -1099,14 +1099,14 @@ void minMaxLoc(const Mat& src, double* _minval, double* _maxval,
         setpos( src, *_minloc, minidx );
 }
 
-    
+
 static int
 normHamming(const uchar* src, size_t total, int cellSize)
 {
     int result = 0;
     int mask = cellSize == 1 ? 1 : cellSize == 2 ? 3 : cellSize == 4 ? 15 : -1;
     CV_Assert( mask >= 0 );
-    
+
     for( size_t i = 0; i < total; i++ )
     {
         unsigned a = src[i];
@@ -1115,8 +1115,8 @@ normHamming(const uchar* src, size_t total, int cellSize)
     }
     return result;
 }
-    
-    
+
+
 template<typename _Tp> static double
 norm_(const _Tp* src, size_t total, int cn, int normType, double startval, const uchar* mask)
 {
@@ -1124,7 +1124,7 @@ norm_(const _Tp* src, size_t total, int cn, int normType, double startval, const
     double result = startval;
     if( !mask )
         total *= cn;
-    
+
     if( normType == NORM_INF )
     {
         if( !mask )
@@ -1181,7 +1181,7 @@ norm_(const _Tp* src1, const _Tp* src2, size_t total, int cn, int normType, doub
     double result = startval;
     if( !mask )
         total *= cn;
-    
+
     if( normType == NORM_INF )
     {
         if( !mask )
@@ -1229,8 +1229,8 @@ norm_(const _Tp* src1, const _Tp* src2, size_t total, int cn, int normType, doub
     }
     return result;
 }
-    
-    
+
+
 double norm(const Mat& src, int normType, const Mat& mask)
 {
     if( normType == NORM_HAMMING || normType == NORM_HAMMING2 )
@@ -1241,42 +1241,42 @@ double norm(const Mat& src, int normType, const Mat& mask)
             bitwise_and(src, mask, temp);
             return norm(temp, normType, Mat());
         }
-        
+
         CV_Assert( src.depth() == CV_8U );
-        
+
         const Mat *arrays[]={&src, 0};
         Mat planes[1];
-        
+
         NAryMatIterator it(arrays, planes);
         size_t total = planes[0].total();
         size_t i, nplanes = it.nplanes;
         double result = 0;
         int cellSize = normType == NORM_HAMMING ? 1 : 2;
-        
+
         for( i = 0; i < nplanes; i++, ++it )
             result += normHamming(planes[0].data, total, cellSize);
         return result;
     }
     int normType0 = normType;
     normType = normType == NORM_L2SQR ? NORM_L2 : normType;
-    
+
     CV_Assert( mask.empty() || (src.size == mask.size && mask.type() == CV_8U) );
     CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 );
-    
+
     const Mat *arrays[]={&src, &mask, 0};
     Mat planes[2];
-    
+
     NAryMatIterator it(arrays, planes);
     size_t total = planes[0].total();
     size_t i, nplanes = it.nplanes;
     int depth = src.depth(), cn = planes[0].channels();
     double result = 0;
-    
+
     for( i = 0; i < nplanes; i++, ++it )
     {
         const uchar* sptr = planes[0].data;
         const uchar* mptr = planes[1].data;
-        
+
         switch( depth )
         {
         case CV_8U:
@@ -1309,7 +1309,7 @@ double norm(const Mat& src, int normType, const Mat& mask)
     return result;
 }
 
-    
+
 double norm(const Mat& src1, const Mat& src2, int normType, const Mat& mask)
 {
     if( normType == NORM_HAMMING || normType == NORM_HAMMING2 )
@@ -1318,43 +1318,43 @@ double norm(const Mat& src1, const Mat& src2, int normType, const Mat& mask)
         bitwise_xor(src1, src2, temp);
         if( !mask.empty() )
             bitwise_and(temp, mask, temp);
-        
+
         CV_Assert( temp.depth() == CV_8U );
-        
+
         const Mat *arrays[]={&temp, 0};
         Mat planes[1];
-        
+
         NAryMatIterator it(arrays, planes);
         size_t total = planes[0].total();
         size_t i, nplanes = it.nplanes;
         double result = 0;
         int cellSize = normType == NORM_HAMMING ? 1 : 2;
-        
+
         for( i = 0; i < nplanes; i++, ++it )
             result += normHamming(planes[0].data, total, cellSize);
         return result;
     }
     int normType0 = normType;
     normType = normType == NORM_L2SQR ? NORM_L2 : normType;
-    
+
     CV_Assert( src1.type() == src2.type() && src1.size == src2.size );
     CV_Assert( mask.empty() || (src1.size == mask.size && mask.type() == CV_8U) );
     CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 );
     const Mat *arrays[]={&src1, &src2, &mask, 0};
     Mat planes[3];
-    
+
     NAryMatIterator it(arrays, planes);
     size_t total = planes[0].total();
     size_t i, nplanes = it.nplanes;
-       int depth = src1.depth(), cn = planes[0].channels();
+    int depth = src1.depth(), cn = planes[0].channels();
     double result = 0;
-    
+
     for( i = 0; i < nplanes; i++, ++it )
     {
         const uchar* sptr1 = planes[0].data;
         const uchar* sptr2 = planes[1].data;
         const uchar* mptr = planes[2].data;
-        
+
         switch( depth )
         {
         case CV_8U:
@@ -1387,7 +1387,7 @@ double norm(const Mat& src1, const Mat& src2, int normType, const Mat& mask)
     return result;
 }
 
-    
+
 template<typename _Tp> static double
 crossCorr_(const _Tp* src1, const _Tp* src2, size_t total)
 {
@@ -1396,24 +1396,24 @@ crossCorr_(const _Tp* src1, const _Tp* src2, size_t total)
         result += (double)src1[i]*src2[i];
     return result;
 }
-    
+
 double crossCorr(const Mat& src1, const Mat& src2)
 {
     CV_Assert( src1.size == src2.size && src1.type() == src2.type() );
     const Mat *arrays[]={&src1, &src2, 0};
     Mat planes[2];
-    
+
     NAryMatIterator it(arrays, planes);
     size_t total = planes[0].total()*planes[0].channels();
     size_t i, nplanes = it.nplanes;
-       int depth = src1.depth();
+    int depth = src1.depth();
     double result = 0;
-    
+
     for( i = 0; i < nplanes; i++, ++it )
     {
         const uchar* sptr1 = planes[0].data;
         const uchar* sptr2 = planes[1].data;
-        
+
         switch( depth )
         {
         case CV_8U:
@@ -1443,7 +1443,7 @@ double crossCorr(const Mat& src1, const Mat& src2)
     }
     return result;
 }
-    
+
 
 static void
 logicOp_(const uchar* src1, const uchar* src2, uchar* dst, size_t total, char c)
@@ -1491,9 +1491,9 @@ logicOpS_(const uchar* src, const uchar* scalar, uchar* dst, size_t total, char
     else
         for( i = 0; i < total; i++ )
             dst[i] = ~src[i];
-}    
-    
+}
+
+
 void logicOp( const Mat& src1, const Mat& src2, Mat& dst, char op )
 {
     CV_Assert( op == '&' || op == '|' || op == '^' );
@@ -1501,40 +1501,40 @@ void logicOp( const Mat& src1, const Mat& src2, Mat& dst, char op )
     dst.create( src1.dims, &src1.size[0], src1.type() );
     const Mat *arrays[]={&src1, &src2, &dst, 0};
     Mat planes[3];
-    
+
     NAryMatIterator it(arrays, planes);
     size_t total = planes[0].total()*planes[0].elemSize();
     size_t i, nplanes = it.nplanes;
-    
+
     for( i = 0; i < nplanes; i++, ++it )
     {
         const uchar* sptr1 = planes[0].data;
         const uchar* sptr2 = planes[1].data;
         uchar* dptr = planes[2].data;
-        
+
         logicOp_(sptr1, sptr2, dptr, total, op);
     }
 }
-    
-    
+
+
 void logicOp(const Mat& src, const Scalar& s, Mat& dst, char op)
 {
     CV_Assert( op == '&' || op == '|' || op == '^' || op == '~' );
     dst.create( src.dims, &src.size[0], src.type() );
     const Mat *arrays[]={&src, &dst, 0};
     Mat planes[2];
-    
+
     NAryMatIterator it(arrays, planes);
     size_t total = planes[0].total()*planes[0].elemSize();
     size_t i, nplanes = it.nplanes;
     double buf[12];
     scalarToRawData(s, buf, src.type(), (int)(96/planes[0].elemSize1()));
-    
+
     for( i = 0; i < nplanes; i++, ++it )
     {
         const uchar* sptr = planes[0].data;
         uchar* dptr = planes[1].data;
-        
+
         logicOpS_(sptr, (uchar*)&buf[0], dptr, total, op);
     }
 }
@@ -1609,27 +1609,27 @@ compareS_(const _Tp* src1, _WTp value, uchar* dst, size_t total, int cmpop)
     default:
         CV_Error(CV_StsBadArg, "Unknown comparison operation");
     }
-}    
-    
-    
+}
+
+
 void compare(const Mat& src1, const Mat& src2, Mat& dst, int cmpop)
 {
     CV_Assert( src1.type() == src2.type() && src1.channels() == 1 && src1.size == src2.size );
     dst.create( src1.dims, &src1.size[0], CV_8U );
     const Mat *arrays[]={&src1, &src2, &dst, 0};
     Mat planes[3];
-    
+
     NAryMatIterator it(arrays, planes);
     size_t total = planes[0].total();
     size_t i, nplanes = it.nplanes;
-       int depth = src1.depth();
-    
+    int depth = src1.depth();
+
     for( i = 0; i < nplanes; i++, ++it )
     {
         const uchar* sptr1 = planes[0].data;
         const uchar* sptr2 = planes[1].data;
         uchar* dptr = planes[2].data;
-        
+
         switch( depth )
         {
         case CV_8U:
@@ -1658,25 +1658,25 @@ void compare(const Mat& src1, const Mat& src2, Mat& dst, int cmpop)
         }
     }
 }
-    
+
 void compare(const Mat& src, double value, Mat& dst, int cmpop)
 {
     CV_Assert( src.channels() == 1 );
     dst.create( src.dims, &src.size[0], CV_8U );
     const Mat *arrays[]={&src, &dst, 0};
     Mat planes[2];
-    
+
     NAryMatIterator it(arrays, planes);
     size_t total = planes[0].total();
     size_t i, nplanes = it.nplanes;
-       int depth = src.depth();
+    int depth = src.depth();
     int ivalue = saturate_cast<int>(value);
-    
+
     for( i = 0; i < nplanes; i++, ++it )
     {
         const uchar* sptr = planes[0].data;
         uchar* dptr = planes[1].data;
-        
+
         switch( depth )
         {
         case CV_8U:
@@ -1706,7 +1706,7 @@ void compare(const Mat& src, double value, Mat& dst, int cmpop)
     }
 }
 
-    
+
 template<typename _Tp> double
 cmpUlpsInt_(const _Tp* src1, const _Tp* src2, size_t total, int imaxdiff,
            size_t startidx, size_t& idx)
@@ -1726,7 +1726,7 @@ cmpUlpsInt_(const _Tp* src1, const _Tp* src2, size_t total, int imaxdiff,
     return realmaxdiff;
 }
 
-    
+
 template<> double cmpUlpsInt_<int>(const int* src1, const int* src2,
                                           size_t total, int imaxdiff,
                                           size_t startidx, size_t& idx)
@@ -1744,8 +1744,8 @@ template<> double cmpUlpsInt_<int>(const int* src1, const int* src2,
         }
     }
     return realmaxdiff;
-}    
-    
+}
+
 
 static double
 cmpUlpsFlt_(const int* src1, const int* src2, size_t total, int imaxdiff, size_t startidx, size_t& idx)
@@ -1767,7 +1767,7 @@ cmpUlpsFlt_(const int* src1, const int* src2, size_t total, int imaxdiff, size_t
     }
     return realmaxdiff;
 }
-    
+
 
 static double
 cmpUlpsFlt_(const int64* src1, const int64* src2, size_t total, int imaxdiff, size_t startidx, size_t& idx)
@@ -1789,7 +1789,7 @@ cmpUlpsFlt_(const int64* src1, const int64* src2, size_t total, int imaxdiff, si
     }
     return realmaxdiff;
 }
-    
+
 bool cmpUlps(const Mat& src1, const Mat& src2, int imaxDiff, double* _realmaxdiff, vector<int>* loc)
 {
     CV_Assert( src1.type() == src2.type() && src1.size == src2.size );
@@ -1798,11 +1798,11 @@ bool cmpUlps(const Mat& src1, const Mat& src2, int imaxDiff, double* _realmaxdif
     NAryMatIterator it(arrays, planes);
     size_t total = planes[0].total()*planes[0].channels();
     size_t i, nplanes = it.nplanes;
-       int depth = src1.depth();
+    int depth = src1.depth();
     size_t startidx = 1, idx = 0;
     if(_realmaxdiff)
         *_realmaxdiff = 0;
-    
+
     for( i = 0; i < nplanes; i++, ++it, startidx += total )
     {
         const uchar* sptr1 = planes[0].data;
@@ -1835,7 +1835,7 @@ bool cmpUlps(const Mat& src1, const Mat& src2, int imaxDiff, double* _realmaxdif
         default:
             CV_Error(CV_StsUnsupportedFormat, "");
         }
-        
+
         if(_realmaxdiff)
             *_realmaxdiff = std::max(*_realmaxdiff, realmaxdiff);
     }
@@ -1844,7 +1844,7 @@ bool cmpUlps(const Mat& src1, const Mat& src2, int imaxDiff, double* _realmaxdif
     return idx == 0;
 }
 
-    
+
 template<typename _Tp> static void
 checkInt_(const _Tp* a, size_t total, int imin, int imax, size_t startidx, size_t& idx)
 {
@@ -1859,7 +1859,7 @@ checkInt_(const _Tp* a, size_t total, int imin, int imax, size_t startidx, size_
     }
 }
 
-    
+
 template<typename _Tp> static void
 checkFlt_(const _Tp* a, size_t total, double fmin, double fmax, size_t startidx, size_t& idx)
 {
@@ -1873,7 +1873,7 @@ checkFlt_(const _Tp* a, size_t total, double fmin, double fmax, size_t startidx,
         }
     }
 }
-        
+
 
 // checks that the array does not have NaNs and/or Infs and all the elements are
 // within [min_val,max_val). idx is the index of the first "bad" element.
@@ -1884,20 +1884,20 @@ int check( const Mat& a, double fmin, double fmax, vector<int>* _idx )
     NAryMatIterator it(arrays, &plane);
     size_t total = plane.total()*plane.channels();
     size_t i, nplanes = it.nplanes;
-       int depth = a.depth();
+    int depth = a.depth();
     size_t startidx = 1, idx = 0;
     int imin = 0, imax = 0;
-    
+
     if( depth <= CV_32S )
     {
         imin = cvCeil(fmin);
         imax = cvFloor(fmax);
-    }    
-    
+    }
+
     for( i = 0; i < nplanes; i++, ++it, startidx += total )
     {
         const uchar* aptr = plane.data;
-        
+
         switch( depth )
         {
             case CV_8U:
@@ -1924,11 +1924,11 @@ int check( const Mat& a, double fmin, double fmax, vector<int>* _idx )
             default:
                 CV_Error(CV_StsUnsupportedFormat, "");
         }
-        
+
         if( idx != 0 )
             break;
     }
-    
+
     if(idx != 0 && _idx)
         setpos(a, *_idx, idx);
     return idx == 0 ? 0 : -1;
@@ -1944,7 +1944,7 @@ int cmpEps( const Mat& arr, const Mat& refarr, double* _realmaxdiff,
             bool element_wise_relative_error )
 {
     CV_Assert( arr.type() == refarr.type() && arr.size == refarr.size );
-    
+
     int ilevel = refarr.depth() <= CV_32S ? cvFloor(success_err_level) : 0;
     int result = 0;
 
@@ -1953,10 +1953,10 @@ int cmpEps( const Mat& arr, const Mat& refarr, double* _realmaxdiff,
     NAryMatIterator it(arrays, planes);
     size_t total = planes[0].total()*planes[0].channels(), j = total;
     size_t i, nplanes = it.nplanes;
-       int depth = arr.depth();
+    int depth = arr.depth();
     size_t startidx = 1, idx = 0;
     double realmaxdiff = 0, maxval = 0;
-    
+
     if(_realmaxdiff)
         *_realmaxdiff = 0;
 
@@ -2066,7 +2066,7 @@ int cmpEps( const Mat& arr, const Mat& refarr, double* _realmaxdiff,
         *_realmaxdiff = exp(1000.);
     if(idx > 0 && _idx)
         setpos(arr, *_idx, idx);
-    
+
     return result;
 }
 
@@ -2127,7 +2127,7 @@ int cmpEps2_64f( TS* ts, const double* val, const double* refval, int len,
     return cmpEps2( ts, _val, _refval, eps, true, param_name );
 }
 
-    
+
 template<typename _Tp> static void
 GEMM_(const _Tp* a_data0, int a_step, int a_delta,
       const _Tp* b_data0, int b_step, int b_delta,
@@ -2143,7 +2143,7 @@ GEMM_(const _Tp* a_data0, int a_step, int a_delta,
             const _Tp* a_data = a_data0;
             const _Tp* b_data = b_data0 + j*b_delta;
             const _Tp* c_data = c_data0 + j*c_delta;
-            
+
             if( cn == 1 )
             {
                 double s = 0;
@@ -2158,7 +2158,7 @@ GEMM_(const _Tp* a_data0, int a_step, int a_delta,
             else
             {
                 double s_re = 0, s_im = 0;
-                
+
                 for( int k = 0; k < a_cols; k++ )
                 {
                     s_re += ((double)a_data[0])*b_data[0] - ((double)a_data[1])*b_data[1];
@@ -2166,38 +2166,38 @@ GEMM_(const _Tp* a_data0, int a_step, int a_delta,
                     a_data += a_delta;
                     b_data += b_step;
                 }
-                
+
                 s_re *= alpha;
                 s_im *= alpha;
-                
+
                 if( c_data )
                 {
                     s_re += c_data[0]*beta;
                     s_im += c_data[1]*beta;
                 }
-                
+
                 d_data[j*2] = (_Tp)s_re;
                 d_data[j*2+1] = (_Tp)s_im;
             }
         }
     }
 }
-    
+
+
 void gemm( const Mat& _a, const Mat& _b, double alpha,
            const Mat& _c, double beta, Mat& d, int flags )
 {
     Mat a = _a, b = _b, c = _c;
-    
-    if( a.data == d.data ) 
+
+    if( a.data == d.data )
         a = a.clone();
-    
-    if( b.data == d.data ) 
+
+    if( b.data == d.data )
         b = b.clone();
-    
+
     if( !c.empty() && c.data == d.data && (flags & cv::GEMM_3_T) )
         c = c.clone();
-    
+
     int a_rows = a.rows, a_cols = a.cols, b_rows = b.rows, b_cols = b.cols;
     int cn = a.channels();
     int a_step = (int)a.step1(), a_delta = cn;
@@ -2211,31 +2211,31 @@ void gemm( const Mat& _a, const Mat& _b, double alpha,
         std::swap( a_rows, a_cols );
         std::swap( a_step, a_delta );
     }
-    
+
     if( flags & cv::GEMM_2_T )
     {
         std::swap( b_rows, b_cols );
         std::swap( b_step, b_delta );
     }
-    
+
     if( !c.empty() )
     {
         c_rows = c.rows;
         c_cols = c.cols;
         c_step = (int)c.step1();
         c_delta = cn;
-        
+
         if( flags & cv::GEMM_3_T )
         {
             std::swap( c_rows, c_cols );
             std::swap( c_step, c_delta );
         }
-        
+
         CV_Assert( c.dims == 2 && c.type() == a.type() && c_rows == a_rows && c_cols == b_cols );
     }
 
     d.create(a_rows, b_cols, a.type());
-        
+
     if( a.depth() == CV_32F )
         GEMM_(a.ptr<float>(), a_step, a_delta, b.ptr<float>(), b_step, b_delta,
               !c.empty() ? c.ptr<float>() : 0, c_step, c_delta, d.ptr<float>(),
@@ -2247,61 +2247,6 @@ void gemm( const Mat& _a, const Mat& _b, double alpha,
 }
 
 
-double cvTsCrossCorr( const CvMat* a, const CvMat* b )
-{
-    int i, j;
-    int cn, ncols;
-    double s = 0;
-
-    cn = CV_MAT_CN(a->type);
-    ncols = a->cols*cn;
-
-    assert( CV_ARE_SIZES_EQ( a, b ) && CV_ARE_TYPES_EQ( a, b ) );
-    for( i = 0; i < a->rows; i++ )
-    {
-        uchar* a_data = a->data.ptr + a->step*i;
-        uchar* b_data = b->data.ptr + b->step*i;
-
-        switch( CV_MAT_DEPTH(a->type) )
-        {
-        case CV_8U:
-            for( j = 0; j < ncols; j++ )
-                s += ((uchar*)a_data)[j]*((uchar*)b_data)[j];
-            break;
-        case CV_8S:
-            for( j = 0; j < ncols; j++ )
-                s += ((schar*)a_data)[j]*((schar*)b_data)[j];
-            break;
-        case CV_16U:
-            for( j = 0; j < ncols; j++ )
-                s += (double)((ushort*)a_data)[j]*((ushort*)b_data)[j];
-            break;
-        case CV_16S:
-            for( j = 0; j < ncols; j++ )
-                s += ((short*)a_data)[j]*((short*)b_data)[j];
-            break;
-        case CV_32S:
-            for( j = 0; j < ncols; j++ )
-                s += ((double)((int*)a_data)[j])*((int*)b_data)[j];
-            break;
-        case CV_32F:
-            for( j = 0; j < ncols; j++ )
-                s += ((double)((float*)a_data)[j])*((float*)b_data)[j];
-            break;
-        case CV_64F:
-            for( j = 0; j < ncols; j++ )
-                s += ((double*)a_data)[j]*((double*)b_data)[j];
-            break;
-        default:
-            assert(0);
-            return log(-1.);
-        }
-    }
-
-    return s;
-}
-
-
 template<typename _Tp> static void
 transform_(const _Tp* sptr, _Tp* dptr, size_t total, int scn, int dcn, const double* mat)
 {
@@ -2316,8 +2261,8 @@ transform_(const _Tp* sptr, _Tp* dptr, size_t total, int scn, int dcn, const dou
         }
     }
 }
-    
-    
+
+
 void transform( const Mat& src, Mat& dst, const Mat& transmat, const Mat& _shift )
 {
     double mat[20];
@@ -2328,7 +2273,7 @@ void transform( const Mat& src, Mat& dst, const Mat& transmat, const Mat& _shift
     int mattype = transmat.depth();
     Mat shift = _shift.reshape(1, 0);
     bool haveShift = !shift.empty();
-    
+
     CV_Assert( scn <= 4 && dcn <= 4 &&
               (mattype == CV_32F || mattype == CV_64F) &&
               (!haveShift || (shift.type() == mattype && (shift.rows == 1 || shift.cols == 1))) );
@@ -2362,12 +2307,12 @@ void transform( const Mat& src, Mat& dst, const Mat& transmat, const Mat& _shift
     NAryMatIterator it(arrays, planes);
     size_t total = planes[0].total();
     size_t i, nplanes = it.nplanes;
-    
+
     for( i = 0; i < nplanes; i++, ++it )
     {
         const uchar* sptr = planes[0].data;
         uchar* dptr = planes[1].data;
-        
+
         switch( depth )
         {
         case CV_8U:
@@ -2396,7 +2341,7 @@ void transform( const Mat& src, Mat& dst, const Mat& transmat, const Mat& _shift
         }
     }
 }
-    
+
 template<typename _Tp> static void
 minmax_(const _Tp* src1, const _Tp* src2, _Tp* dst, size_t total, char op)
 {
@@ -2414,17 +2359,17 @@ static void minmax(const Mat& src1, const Mat& src2, Mat& dst, char op)
     CV_Assert( src1.type() == src2.type() && src1.size == src2.size );
     const Mat *arrays[]={&src1, &src2, &dst, 0};
     Mat planes[3];
-    
+
     NAryMatIterator it(arrays, planes);
     size_t total = planes[0].total()*planes[0].channels();
     size_t i, nplanes = it.nplanes, depth = src1.depth();
-    
+
     for( i = 0; i < nplanes; i++, ++it )
     {
         const uchar* sptr1 = planes[0].data;
         const uchar* sptr2 = planes[1].data;
         uchar* dptr = planes[2].data;
-        
+
         switch( depth )
         {
         case CV_8U:
@@ -2454,7 +2399,7 @@ static void minmax(const Mat& src1, const Mat& src2, Mat& dst, char op)
     }
 }
 
-    
+
 void min(const Mat& src1, const Mat& src2, Mat& dst)
 {
     minmax( src1, src2, dst, 'm' );
@@ -2482,17 +2427,17 @@ static void minmax(const Mat& src1, double val, Mat& dst, char op)
     dst.create(src1.dims, src1.size, src1.type());
     const Mat *arrays[]={&src1, &dst, 0};
     Mat planes[2];
-    
+
     NAryMatIterator it(arrays, planes);
     size_t total = planes[0].total()*planes[0].channels();
     size_t i, nplanes = it.nplanes, depth = src1.depth();
     int ival = saturate_cast<int>(val);
-    
+
     for( i = 0; i < nplanes; i++, ++it )
     {
         const uchar* sptr1 = planes[0].data;
         uchar* dptr = planes[1].data;
-        
+
         switch( depth )
         {
         case CV_8U:
@@ -2522,7 +2467,7 @@ static void minmax(const Mat& src1, double val, Mat& dst, char op)
     }
 }
 
-    
+
 void min(const Mat& src1, double val, Mat& dst)
 {
     minmax( src1, val, dst, 'm' );
@@ -2532,8 +2477,8 @@ void max(const Mat& src1, double val, Mat& dst)
 {
     minmax( src1, val, dst, 'M' );
 }
-    
-    
+
+
 template<typename _Tp> static void
 muldiv_(const _Tp* src1, const _Tp* src2, _Tp* dst, size_t total, double scale, char op)
 {
@@ -2554,17 +2499,17 @@ static void muldiv(const Mat& src1, const Mat& src2, Mat& dst, double scale, cha
     CV_Assert( src1.empty() || (src1.type() == src2.type() && src1.size == src2.size) );
     const Mat *arrays[]={&src1, &src2, &dst, 0};
     Mat planes[3];
-    
+
     NAryMatIterator it(arrays, planes);
     size_t total = planes[1].total()*planes[1].channels();
     size_t i, nplanes = it.nplanes, depth = src2.depth();
-    
+
     for( i = 0; i < nplanes; i++, ++it )
     {
         const uchar* sptr1 = planes[0].data;
         const uchar* sptr2 = planes[1].data;
         uchar* dptr = planes[2].data;
-        
+
         switch( depth )
         {
         case CV_8U:
@@ -2593,7 +2538,7 @@ static void muldiv(const Mat& src1, const Mat& src2, Mat& dst, double scale, cha
         }
     }
 }
-    
+
 
 void multiply(const Mat& src1, const Mat& src2, Mat& dst, double scale)
 {
@@ -2603,9 +2548,9 @@ void multiply(const Mat& src1, const Mat& src2, Mat& dst, double scale)
 void divide(const Mat& src1, const Mat& src2, Mat& dst, double scale)
 {
     muldiv( src1, src2, dst, scale, '/' );
-}    
-    
+}
+
+
 template<typename _Tp> static void
 mean_(const _Tp* src, const uchar* mask, size_t total, int cn, Scalar& sum, int& nz)
 {
@@ -2630,26 +2575,26 @@ mean_(const _Tp* src, const uchar* mask, size_t total, int cn, Scalar& sum, int&
             }
     }
 }
-    
+
 Scalar mean(const Mat& src, const Mat& mask)
 {
     CV_Assert(mask.empty() || (mask.type() == CV_8U && mask.size == src.size));
     Scalar sum;
     int nz = 0;
-    
+
     const Mat *arrays[]={&src, &mask, 0};
     Mat planes[2];
-    
+
     NAryMatIterator it(arrays, planes);
     size_t total = planes[0].total();
     size_t i, nplanes = it.nplanes;
     int depth = src.depth(), cn = src.channels();
-    
+
     for( i = 0; i < nplanes; i++, ++it )
     {
         const uchar* sptr = planes[0].data;
         const uchar* mptr = planes[1].data;
-        
+
         switch( depth )
         {
         case CV_8U:
@@ -2677,17 +2622,17 @@ Scalar mean(const Mat& src, const Mat& mask)
             CV_Error(CV_StsUnsupportedFormat, "");
         }
     }
-    
+
     return sum * (1./std::max(nz, 1));
 }
 
-    
+
 void  patchZeros( Mat& mat, double level )
 {
     int j, ncols = mat.cols * mat.channels();
     int depth = mat.depth();
     CV_Assert( depth == CV_32F || depth == CV_64F );
-    
+
     for( int i = 0; i < mat.rows; i++ )
     {
         if( depth == CV_32F )
@@ -2707,12 +2652,12 @@ void  patchZeros( Mat& mat, double level )
     }
 }
 
-    
+
 static void calcSobelKernel1D( int order, int _aperture_size, int size, vector<int>& kernel )
 {
     int i, j, oldval, newval;
     kernel.resize(size + 1);
-    
+
     if( _aperture_size < 0 )
     {
         static const int scharr[] = { 3, 10, 3, -1, 0, 1 };
@@ -2721,11 +2666,11 @@ static void calcSobelKernel1D( int order, int _aperture_size, int size, vector<i
             kernel[i] = scharr[order*3 + i];
         return;
     }
-    
+
     for( i = 1; i <= size; i++ )
         kernel[i] = 0;
     kernel[0] = 1;
-    
+
     for( i = 0; i < size - order - 1; i++ )
     {
         oldval = kernel[0];
@@ -2736,7 +2681,7 @@ static void calcSobelKernel1D( int order, int _aperture_size, int size, vector<i
             oldval = newval;
         }
     }
-    
+
     for( i = 0; i < order; i++ )
     {
         oldval = -kernel[0];
@@ -2756,31 +2701,31 @@ Mat calcSobelKernel2D( int dx, int dy, int _aperture_size, int origin )
               dx >= 0 && dy >= 0 && dx + dy <= 3 );
     Size ksize = _aperture_size == -1 ? Size(3,3) : _aperture_size > 1 ?
         Size(_aperture_size, _aperture_size) : dx > 0 ? Size(3, 1) : Size(1, 3);
-    
+
     Mat kernel(ksize, CV_32F);
     vector<int> kx, ky;
-    
+
     calcSobelKernel1D( dx, _aperture_size, ksize.width, kx );
     calcSobelKernel1D( dy, _aperture_size, ksize.height, ky );
-    
+
     for( int i = 0; i < kernel.rows; i++ )
     {
         float ay = (float)ky[i]*(origin && (dy & 1) ? -1 : 1);
         for( int j = 0; j < kernel.cols; j++ )
             kernel.at<float>(i, j) = kx[j]*ay;
     }
-    
+
     return kernel;
 }
 
-    
+
 Mat calcLaplaceKernel2D( int aperture_size )
 {
     int ksize = aperture_size == 1 ? 3 : aperture_size;
     Mat kernel(ksize, ksize, CV_32F);
-    
+
     vector<int> kx, ky;
-    
+
     calcSobelKernel1D( 2, aperture_size, ksize, kx );
     if( aperture_size > 1 )
         calcSobelKernel1D( 0, aperture_size, ksize, ky );
@@ -2789,32 +2734,32 @@ Mat calcLaplaceKernel2D( int aperture_size )
         ky.resize(3);
         ky[0] = ky[2] = 0; ky[1] = 1;
     }
-    
+
     for( int i = 0; i < ksize; i++ )
         for( int j = 0; j < ksize; j++ )
             kernel.at<float>(i, j) = (float)(kx[j]*ky[i] + kx[i]*ky[j]);
-    
+
     return kernel;
 }
 
-    
+
 void initUndistortMap( const Mat& _a0, const Mat& _k0, Size sz, Mat& _mapx, Mat& _mapy )
 {
     _mapx.create(sz, CV_32F);
     _mapy.create(sz, CV_32F);
-    
+
     double a[9], k[5]={0,0,0,0,0};
     Mat _a(3, 3, CV_64F, a);
     Mat _k(_k0.rows,_k0.cols, CV_MAKETYPE(CV_64F,_k0.channels()),k);
     double fx, fy, cx, cy, ifx, ify, cxn, cyn;
-    
+
     _a0.convertTo(_a, CV_64F);
     _k0.convertTo(_k, CV_64F);
     fx = a[0]; fy = a[4]; cx = a[2]; cy = a[5];
     ifx = 1./fx; ify = 1./fy;
     cxn = cx;
     cyn = cy;
-    
+
     for( int v = 0; v < sz.height; v++ )
     {
         for( int u = 0; u < sz.width; u++ )
@@ -2826,14 +2771,14 @@ void initUndistortMap( const Mat& _a0, const Mat& _k0, Size sz, Mat& _mapx, Mat&
             double cdist = 1 + (k[0] + (k[1] + k[4]*r2)*r2)*r2;
             double x1 = x*cdist + k[2]*2*x*y + k[3]*(r2 + 2*x2);
             double y1 = y*cdist + k[3]*2*x*y + k[2]*(r2 + 2*y2);
-            
+
             _mapy.at<float>(v, u) = (float)(y1*fy + cy);
             _mapx.at<float>(v, u) = (float)(x1*fx + cx);
         }
     }
 }
-    
-    
+
+
 std::ostream& operator << (std::ostream& out, const MatInfo& m)
 {
     if( !m.m || m.m->empty() )
@@ -2938,11 +2883,11 @@ static std::ostream& operator << (std::ostream& out, const MatPart& m)
         }
     }
     return out;
-}    
-    
+}
+
 MatComparator::MatComparator(double _maxdiff, int _context)
     : maxdiff(_maxdiff), context(_context) {}
-    
+
 ::testing::AssertionResult
 MatComparator::operator()(const char* expr1, const char* expr2,
                           const Mat& m1, const Mat& m2)
@@ -2952,18 +2897,18 @@ MatComparator::operator()(const char* expr1, const char* expr2,
         << "The reference and the actual output arrays have different type or size:\n"
         << expr1 << " ~ " << MatInfo(m1) << "\n"
         << expr2 << " ~ " << MatInfo(m2) << "\n";
-    
+
     //bool ok = cvtest::cmpUlps(m1, m2, maxdiff, &realmaxdiff, &loc0);
     int code = cmpEps( m1, m2, &realmaxdiff, maxdiff, &loc0, true);
-    
+
     if(code >= 0)
         return ::testing::AssertionSuccess();
-    
+
     Mat m[] = {m1.reshape(1,0), m2.reshape(1,0)};
     int dims = m[0].dims;
     vector<int> loc;
     int border = dims <= 2 ? context : 0;
-    
+
     Mat m1part, m2part;
     if( border == 0 )
     {
@@ -2976,7 +2921,7 @@ MatComparator::operator()(const char* expr1, const char* expr2,
         m1part = getSubArray(m[0], border, loc0, loc);
         m2part = getSubArray(m[1], border, loc0, loc);
     }
-    
+
     return ::testing::AssertionFailure()
     << "too big relative difference (" << realmaxdiff << " > "
     << maxdiff << ") between "
index c4828e5..96f8a75 100644 (file)
@@ -36,7 +36,7 @@
 
 // This line ensures that gtest.h can be compiled on its own, even
 // when it's fused.
-#include "opencv2/ts/ts_gtest.h"
+#include "opencv2/ts/ts.hpp"
 
 // The following lines pull in the real gtest *.cc files.
 // Copyright 2005, Google Inc.
@@ -1906,7 +1906,7 @@ extern const TypeId kTestTypeIdInGoogleTest = GetTestTypeId();
 // This predicate-formatter checks that 'results' contains a test part
 // failure of the given type and that the failure message contains the
 // given substring.
-AssertionResult HasOneFailure(const char* /* results_expr */,
+static AssertionResult HasOneFailure(const char* /* results_expr */,
                               const char* /* type_expr */,
                               const char* /* substr_expr */,
                               const TestPartResultArray& results,
@@ -3874,7 +3874,7 @@ WORD GetColorAttribute(GTestColor color) {
 
 // Returns the ANSI color code for the given color.  COLOR_DEFAULT is
 // an invalid input.
-const char* GetAnsiColorCode(GTestColor color) {
+static const char* GetAnsiColorCode(GTestColor color) {
   switch (color) {
     case COLOR_RED:     return "1";
     case COLOR_GREEN:   return "2";
@@ -3921,7 +3921,7 @@ bool ShouldUseColor(bool stdout_is_tty) {
 // cannot simply emit special characters and have the terminal change colors.
 // This routine must actually emit the characters rather than return a string
 // that would be colored when printed, as can be done on Linux.
-void ColoredPrintf(GTestColor color, const char* fmt, ...) {
+static void ColoredPrintf(GTestColor color, const char* fmt, ...) {
   va_list args;
   va_start(args, fmt);
 
@@ -3967,7 +3967,7 @@ void ColoredPrintf(GTestColor color, const char* fmt, ...) {
   va_end(args);
 }
 
-void PrintFullTestCommentIfPresent(const TestInfo& test_info) {
+static void PrintFullTestCommentIfPresent(const TestInfo& test_info) {
   const char* const type_param = test_info.type_param();
   const char* const value_param = test_info.value_param();
 
@@ -4946,13 +4946,13 @@ UnitTest * UnitTest::GetInstance() {
   // default implementation.  Use this implementation to keep good OO
   // design with private destructor.
 
-#if (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__)
+#if (defined(_MSC_VER) && _MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__)
   static UnitTest* const instance = new UnitTest;
   return instance;
 #else
   static UnitTest instance;
   return &instance;
-#endif  // (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__)
+#endif  // (defined(_MSC_VER) && _MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__)
 }
 
 // Gets the number of successful test cases.
@@ -5861,7 +5861,7 @@ bool SkipPrefix(const char* prefix, const char** pstr) {
 // part can be omitted.
 //
 // Returns the value of the flag, or NULL if the parsing failed.
-const char* ParseFlagValue(const char* str,
+static const char* ParseFlagValue(const char* str,
                            const char* flag,
                            bool def_optional) {
   // str and flag must not be NULL.
@@ -5899,7 +5899,7 @@ const char* ParseFlagValue(const char* str,
 //
 // On success, stores the value of the flag in *value, and returns
 // true.  On failure, returns false without changing *value.
-bool ParseBoolFlag(const char* str, const char* flag, bool* value) {
+static bool ParseBoolFlag(const char* str, const char* flag, bool* value) {
   // Gets the value of the flag as a string.
   const char* const value_str = ParseFlagValue(str, flag, true);
 
@@ -5933,7 +5933,7 @@ bool ParseInt32Flag(const char* str, const char* flag, Int32* value) {
 //
 // On success, stores the value of the flag in *value, and returns
 // true.  On failure, returns false without changing *value.
-bool ParseStringFlag(const char* str, const char* flag, String* value) {
+static bool ParseStringFlag(const char* str, const char* flag, String* value) {
   // Gets the value of the flag as a string.
   const char* const value_str = ParseFlagValue(str, flag, false);
 
@@ -6407,7 +6407,7 @@ enum DeathTestOutcome { IN_PROGRESS, DIED, LIVED, RETURNED, THREW };
 // message is propagated back to the parent process.  Otherwise, the
 // message is simply printed to stderr.  In either case, the program
 // then exits with status 1.
-void DeathTestAbort(const String& message) {
+static void DeathTestAbort(const String& message) {
   // On a POSIX system, this function may be called from a threadsafe-style
   // death test child process, which operates on a very small stack.  Use
   // the heap for any additional non-minuscule memory requirements.
@@ -7139,7 +7139,7 @@ bool StackLowerThanAddress(const void* ptr) {
   return &dummy < ptr;
 }
 
-bool StackGrowsDown() {
+static bool StackGrowsDown() {
   int dummy;
   return StackLowerThanAddress(&dummy);
 }
@@ -8391,7 +8391,7 @@ static CapturedStream* g_captured_stderr = NULL;
 static CapturedStream* g_captured_stdout = NULL;
 
 // Starts capturing an output stream (stdout/stderr).
-void CaptureStream(int fd, const char* stream_name, CapturedStream** stream) {
+static void CaptureStream(int fd, const char* stream_name, CapturedStream** stream) {
   if (*stream != NULL) {
     GTEST_LOG_(FATAL) << "Only one " << stream_name
                       << " capturer can exist at a time.";
@@ -8400,7 +8400,7 @@ void CaptureStream(int fd, const char* stream_name, CapturedStream** stream) {
 }
 
 // Stops capturing the output stream and returns the captured string.
-String GetCapturedStream(CapturedStream** captured_stream) {
+static String GetCapturedStream(CapturedStream** captured_stream) {
   const String content = (*captured_stream)->GetCapturedString();
 
   delete *captured_stream;
@@ -8603,9 +8603,9 @@ using ::std::ostream;
 
 #if GTEST_OS_WINDOWS_MOBILE  // Windows CE does not define _snprintf_s.
 # define snprintf _snprintf
-#elif _MSC_VER >= 1400  // VC 8.0 and later deprecate snprintf and _snprintf.
+#elif defined(_MSC_VER) && _MSC_VER >= 1400  // VC 8.0 and later deprecate snprintf and _snprintf.
 # define snprintf _snprintf_s
-#elif _MSC_VER
+#elif defined(_MSC_VER) && _MSC_VER
 # define snprintf _snprintf
 #endif  // GTEST_OS_WINDOWS_MOBILE
 
index 52e99fd..5eea641 100644 (file)
@@ -1,6 +1,6 @@
 #include "precomp.hpp"\r
 \r
-#if ANDROID\r
+#ifdef ANDROID\r
 # include <sys/time.h>\r
 #endif\r
 \r
@@ -18,7 +18,7 @@ const char *command_line_keys =
     "{   |perf_seed           |809564   |seed for random numbers generator}"\r
     "{   |perf_tbb_nthreads   |-1       |if TBB is enabled, the number of TBB threads}"\r
     "{   |perf_write_sanity   |false    |allow to create new records for sanity checks}"\r
-    #if ANDROID\r
+    #ifdef ANDROID\r
     "{   |perf_time_limit     |6.0      |default time limit for a single test (in seconds)}"\r
     "{   |perf_affinity_mask  |0        |set affinity mask for the main thread}"\r
     "{   |perf_log_power_checkpoints  |false    |additional xml logging for power measurement}"\r
@@ -37,7 +37,7 @@ static uint64       param_seed;
 static double       param_time_limit;\r
 static int          param_tbb_nthreads;\r
 static bool         param_write_sanity;\r
-#if ANDROID\r
+#ifdef ANDROID\r
 static int          param_affinity_mask;\r
 static bool         log_power_checkpoints;\r
 \r
@@ -57,7 +57,7 @@ static void setCurrentThreadAffinityMask(int mask)
 \r
 #endif\r
 \r
-void randu(cv::Mat& m)\r
+static void randu(cv::Mat& m)\r
 {\r
     const int bigValue = 0x00000FFF;\r
     if (m.depth() < CV_32F)\r
@@ -151,7 +151,7 @@ void Regression::init(const std::string& testSuitName, const std::string& ext)
     {\r
         LOGE("Failed to open sanity data for reading: %s", storageInPath.c_str());\r
     }\r
-    \r
+\r
     if(!storageIn.isOpened())\r
         storageOutPath = storageInPath;\r
 }\r
@@ -534,7 +534,7 @@ void TestBase::Init(int argc, const char* const argv[])
     param_force_samples = args.get<unsigned int>("perf_force_samples");\r
     param_write_sanity = args.get<bool>("perf_write_sanity");\r
     param_tbb_nthreads  = args.get<int>("perf_tbb_nthreads");\r
-#if ANDROID\r
+#ifdef ANDROID\r
     param_affinity_mask = args.get<int>("perf_affinity_mask");\r
     log_power_checkpoints = args.get<bool>("perf_log_power_checkpoints");\r
 #endif\r
@@ -636,17 +636,17 @@ cv::Size TestBase::getSize(cv::InputArray a)
 bool TestBase::next()\r
 {\r
     bool has_next = ++currentIter < nIters && totalTime < timeLimit;\r
-#if ANDROID\r
+#ifdef ANDROID\r
     if (log_power_checkpoints)\r
     {\r
         timeval tim;\r
         gettimeofday(&tim, NULL);\r
         unsigned long long t1 = tim.tv_sec * 1000LLU + (unsigned long long)(tim.tv_usec / 1000.f);\r
-        \r
+\r
         if (currentIter == 1) RecordProperty("test_start", cv::format("%llu",t1).c_str());\r
         if (!has_next) RecordProperty("test_complete", cv::format("%llu",t1).c_str());\r
     }\r
-#endif    \r
+#endif\r
     return has_next;\r
 }\r
 \r
@@ -898,7 +898,7 @@ void TestBase::SetUp()
         p_tbb_initializer=new tbb::task_scheduler_init(param_tbb_nthreads);\r
     }\r
 #endif\r
-#if ANDROID\r
+#ifdef ANDROID\r
     if (param_affinity_mask)\r
         setCurrentThreadAffinityMask(param_affinity_mask);\r
 #endif\r
index ebac290..1dd96f5 100644 (file)
@@ -51,7 +51,7 @@ namespace cv
 {
 
 CV_EXPORTS bool initModule_video(void);
-    
+
 }
 #endif
 
index 2dd31c2..5378a49 100644 (file)
@@ -6,7 +6,7 @@
 #include <opencv2/highgui/highgui.hpp>
 #include "opencv2/ts/ts.hpp"
 
-#if GTEST_CREATE_SHARED_LIBRARY
+#ifdef GTEST_CREATE_SHARED_LIBRARY
 #error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
 #endif
 
index 8bd889f..57be856 100644 (file)
 #ifndef __OPENCV_PRECOMP_H__
 #define __OPENCV_PRECOMP_H__
 
-#if _MSC_VER >= 1200
+#if defined _MSC_VER && _MSC_VER >= 1200
 #pragma warning( disable: 4251 4710 4711 4514 4996 )
 #endif
 
-#ifdef HAVE_CVCONFIG_H 
+#ifdef HAVE_CVCONFIG_H
 #include "cvconfig.h"
 #endif
 
index 8f2812e..91c215b 100644 (file)
 //M*/
 
 #include "precomp.hpp"
+#include "opencv2/video/video.hpp"
 
 namespace cv
 {
-    
+
 ///////////////////////////////////////////////////////////////////////////////////////////////////////////
 
 CV_INIT_ALGORITHM(BackgroundSubtractorMOG, "BackgroundSubtractor.MOG",
@@ -61,12 +62,15 @@ CV_INIT_ALGORITHM(BackgroundSubtractorMOG2, "BackgroundSubtractor.MOG2",
     obj.info()->addParam(obj, "varThreshold", obj.varThreshold);
     obj.info()->addParam(obj, "detectShadows", obj.bShadowDetection));
 
-///////////////////////////////////////////////////////////////////////////////////////////////////////////    
+///////////////////////////////////////////////////////////////////////////////////////////////////////////
 
 bool initModule_video(void)
 {
-    Ptr<Algorithm> mog = createBackgroundSubtractorMOG(), mog2 = createBackgroundSubtractorMOG2();
-    return mog->info() != 0 && mog2->info() != 0;
+    bool all = true;
+    all &= !BackgroundSubtractorMOG_info_auto.name().empty();
+    all &= !BackgroundSubtractorMOG2_info_auto.name().empty();
+
+    return all;
 }
-    
+
 }
index 4cd143c..13f9ff0 100644 (file)
@@ -83,6 +83,7 @@ public:
 
     virtual void setMaxLevel(int val) { maxLevel_ = val; }
     virtual int maxLevel() const { return maxLevel_; }
+    virtual ~PyrLkOptFlowEstimatorBase() {}
 
 protected:
     Size winSize_;
index e6a1e9d..d829a2a 100644 (file)
 #define __OPENCV_VIDEOSTAB_CLP_HPP__
 
 #ifdef HAVE_CLP
-  #undef PACKAGE
-  #undef PACKAGE_BUGREPORT
-  #undef PACKAGE_NAME
-  #undef PACKAGE_STRING
-  #undef PACKAGE_TARNAME
-  #undef PACKAGE_VERSION
-  #undef VERSION
-  #include "ClpSimplex.hpp"
-  #include "ClpPresolve.hpp"
-  #include "ClpPrimalColumnSteepest.hpp"
-  #include "ClpDualRowSteepest.hpp"
-  #define INF 1e10
+#  undef PACKAGE
+#  undef PACKAGE_BUGREPORT
+#  undef PACKAGE_NAME
+#  undef PACKAGE_STRING
+#  undef PACKAGE_TARNAME
+#  undef PACKAGE_VERSION
+#  undef VERSION
+
+#  define COIN_BIG_INDEX 0
+#  define DEBUG_COIN 0
+#  define PRESOLVE_DEBUG 0
+#  define PRESOLVE_CONSISTENCY 0
+
+#  include "ClpSimplex.hpp"
+#  include "ClpPresolve.hpp"
+#  include "ClpPrimalColumnSteepest.hpp"
+#  include "ClpDualRowSteepest.hpp"
+#  define INF 1e10
 #endif
 
 // Clp replaces min and max with ?: globally, we can't use std::min and std::max in case
index 6fdfa96..06c725a 100644 (file)
 #include "opencv2/contrib/contrib.hpp"\r
 #include "opencv2/highgui/highgui.hpp"\r
 \r
-void help(char **argv)\r
+static void help(char **argv)\r
 {\r
-       std::cout << "\nThis program demonstrates the contributed flesh detector CvAdaptiveSkinDetector which can be found in contrib.cpp\n"\r
-                       << "Usage: " << std::endl <<\r
-               argv[0] << " fileMask firstFrame lastFrame" << std::endl << std::endl <<\r
-               "Example: " << std::endl <<\r
-               argv[0] << " C:\\VideoSequences\\sample1\\right_view\\temp_%05d.jpg  0  1000" << std::endl <<\r
-               "       iterates through temp_00000.jpg  to  temp_01000.jpg" << std::endl << std::endl <<\r
-               "If no parameter specified, this application will try to capture from the default Webcam." << std::endl <<\r
-               "Please note: Background should not contain large surfaces with skin tone." <<\r
-               "\n\n ESC will stop\n"\r
-               "Using OpenCV version %s\n" << CV_VERSION << "\n"\r
-               << std::endl;\r
+    std::cout << "\nThis program demonstrates the contributed flesh detector CvAdaptiveSkinDetector which can be found in contrib.cpp\n"\r
+            << "Usage: " << std::endl <<\r
+        argv[0] << " fileMask firstFrame lastFrame" << std::endl << std::endl <<\r
+        "Example: " << std::endl <<\r
+        argv[0] << " C:\\VideoSequences\\sample1\\right_view\\temp_%05d.jpg  0  1000" << std::endl <<\r
+        "   iterates through temp_00000.jpg  to  temp_01000.jpg" << std::endl << std::endl <<\r
+        "If no parameter specified, this application will try to capture from the default Webcam." << std::endl <<\r
+        "Please note: Background should not contain large surfaces with skin tone." <<\r
+        "\n\n ESC will stop\n"\r
+        "Using OpenCV version %s\n" << CV_VERSION << "\n"\r
+        << std::endl;\r
 }\r
 \r
 class ASDFrameHolder\r
 {\r
 private:\r
-       IplImage *image;\r
-       double timeStamp;\r
+    IplImage *image;\r
+    double timeStamp;\r
 \r
 public:\r
-       ASDFrameHolder();\r
-       virtual ~ASDFrameHolder();\r
-       virtual void assignFrame(IplImage *sourceImage, double frameTime);\r
-       inline IplImage *getImage();\r
-       inline double getTimeStamp();\r
-       virtual void setImage(IplImage *sourceImage);\r
+    ASDFrameHolder();\r
+    virtual ~ASDFrameHolder();\r
+    virtual void assignFrame(IplImage *sourceImage, double frameTime);\r
+    inline IplImage *getImage();\r
+    inline double getTimeStamp();\r
+    virtual void setImage(IplImage *sourceImage);\r
 };\r
 \r
 class ASDFrameSequencer\r
 {\r
 public:\r
-       virtual ~ASDFrameSequencer();\r
-       virtual IplImage *getNextImage();\r
-       virtual void close();\r
-       virtual bool isOpen();\r
-       virtual void getFrameCaption(char *caption);\r
+    virtual ~ASDFrameSequencer();\r
+    virtual IplImage *getNextImage();\r
+    virtual void close();\r
+    virtual bool isOpen();\r
+    virtual void getFrameCaption(char *caption);\r
 };\r
 \r
 class ASDCVFrameSequencer : public ASDFrameSequencer\r
 {\r
 protected:\r
-       CvCapture *capture;\r
+    CvCapture *capture;\r
 \r
 public:\r
-       virtual IplImage *getNextImage();\r
-       virtual void close();\r
-       virtual bool isOpen();\r
+    virtual IplImage *getNextImage();\r
+    virtual void close();\r
+    virtual bool isOpen();\r
 };\r
 \r
 class ASDFrameSequencerWebCam : public ASDCVFrameSequencer\r
 {\r
 public:\r
-       virtual bool open(int cameraIndex);\r
+    virtual bool open(int cameraIndex);\r
 };\r
 \r
 class ASDFrameSequencerVideoFile : public ASDCVFrameSequencer\r
 {\r
 public:\r
-       virtual bool open(const char *fileName);\r
+    virtual bool open(const char *fileName);\r
 };\r
 \r
 class ASDFrameSequencerImageFile : public ASDFrameSequencer {\r
 private:\r
-       char sFileNameMask[2048];\r
-       int nCurrentIndex, nStartIndex, nEndIndex;\r
+    char sFileNameMask[2048];\r
+    int nCurrentIndex, nStartIndex, nEndIndex;\r
 \r
 public:\r
-       virtual void open(const char *fileNameMask, int startIndex, int endIndex);\r
-       virtual void getFrameCaption(char *caption);\r
-       virtual IplImage *getNextImage();\r
-       virtual void close();\r
-       virtual bool isOpen();\r
+    virtual void open(const char *fileNameMask, int startIndex, int endIndex);\r
+    virtual void getFrameCaption(char *caption);\r
+    virtual IplImage *getNextImage();\r
+    virtual void close();\r
+    virtual bool isOpen();\r
 };\r
 \r
 //-------------------- ASDFrameHolder -----------------------//\r
 ASDFrameHolder::ASDFrameHolder( )\r
 {\r
-       image = NULL;\r
-       timeStamp = 0;\r
+    image = NULL;\r
+    timeStamp = 0;\r
 };\r
 \r
 ASDFrameHolder::~ASDFrameHolder( )\r
 {\r
-       cvReleaseImage(&image);\r
+    cvReleaseImage(&image);\r
 };\r
 \r
 void ASDFrameHolder::assignFrame(IplImage *sourceImage, double frameTime)\r
 {\r
-       if (image != NULL)\r
-       {\r
-               cvReleaseImage(&image);\r
-               image = NULL;\r
-       }\r
-\r
-       image = cvCloneImage(sourceImage);\r
-       timeStamp = frameTime;\r
+    if (image != NULL)\r
+    {\r
+        cvReleaseImage(&image);\r
+        image = NULL;\r
+    }\r
+\r
+    image = cvCloneImage(sourceImage);\r
+    timeStamp = frameTime;\r
 };\r
 \r
 IplImage *ASDFrameHolder::getImage()\r
 {\r
-       return image;\r
+    return image;\r
 };\r
 \r
 double ASDFrameHolder::getTimeStamp()\r
 {\r
-       return timeStamp;\r
+    return timeStamp;\r
 };\r
 \r
 void ASDFrameHolder::setImage(IplImage *sourceImage)\r
 {\r
-       image = sourceImage;\r
+    image = sourceImage;\r
 };\r
 \r
 \r
@@ -162,12 +162,12 @@ void ASDFrameHolder::setImage(IplImage *sourceImage)
 \r
 ASDFrameSequencer::~ASDFrameSequencer()\r
 {\r
-       close();\r
+    close();\r
 };\r
 \r
 IplImage *ASDFrameSequencer::getNextImage()\r
 {\r
-       return NULL;\r
+    return NULL;\r
 };\r
 \r
 void ASDFrameSequencer::close()\r
@@ -177,40 +177,40 @@ void ASDFrameSequencer::close()
 \r
 bool ASDFrameSequencer::isOpen()\r
 {\r
-       return false;\r
+    return false;\r
 };\r
 \r
 void ASDFrameSequencer::getFrameCaption(char* /*caption*/) {\r
-       return;\r
+    return;\r
 };\r
 \r
 IplImage* ASDCVFrameSequencer::getNextImage()\r
 {\r
-       IplImage *image;\r
-\r
-       image = cvQueryFrame(capture);\r
-\r
-       if (image != NULL)\r
-       {\r
-               return cvCloneImage(image);\r
-       }\r
-       else\r
-       {\r
-               return NULL;\r
-       }\r
+    IplImage *image;\r
+\r
+    image = cvQueryFrame(capture);\r
+\r
+    if (image != NULL)\r
+    {\r
+        return cvCloneImage(image);\r
+    }\r
+    else\r
+    {\r
+        return NULL;\r
+    }\r
 };\r
 \r
 void ASDCVFrameSequencer::close()\r
 {\r
-       if (capture != NULL)\r
-       {\r
-               cvReleaseCapture(&capture);\r
-       }\r
+    if (capture != NULL)\r
+    {\r
+        cvReleaseCapture(&capture);\r
+    }\r
 };\r
 \r
 bool ASDCVFrameSequencer::isOpen()\r
 {\r
-       return (capture != NULL);\r
+    return (capture != NULL);\r
 };\r
 \r
 \r
@@ -218,18 +218,18 @@ bool ASDCVFrameSequencer::isOpen()
 \r
 bool ASDFrameSequencerWebCam::open(int cameraIndex)\r
 {\r
-       close();\r
-\r
-       capture = cvCaptureFromCAM(cameraIndex);\r
-\r
-       if (!capture)\r
-       {\r
-               return false;\r
-       }\r
-       else\r
-       {\r
-               return true;\r
-       }\r
+    close();\r
+\r
+    capture = cvCaptureFromCAM(cameraIndex);\r
+\r
+    if (!capture)\r
+    {\r
+        return false;\r
+    }\r
+    else\r
+    {\r
+        return true;\r
+    }\r
 };\r
 \r
 \r
@@ -237,17 +237,17 @@ bool ASDFrameSequencerWebCam::open(int cameraIndex)
 \r
 bool ASDFrameSequencerVideoFile::open(const char *fileName)\r
 {\r
-       close();\r
-\r
-       capture = cvCaptureFromFile(fileName);\r
-       if (!capture)\r
-       {\r
-               return false;\r
-       }\r
-       else\r
-       {\r
-               return true;\r
-       }\r
+    close();\r
+\r
+    capture = cvCaptureFromFile(fileName);\r
+    if (!capture)\r
+    {\r
+        return false;\r
+    }\r
+    else\r
+    {\r
+        return true;\r
+    }\r
 };\r
 \r
 \r
@@ -255,159 +255,159 @@ bool ASDFrameSequencerVideoFile::open(const char *fileName)
 \r
 void ASDFrameSequencerImageFile::open(const char *fileNameMask, int startIndex, int endIndex)\r
 {\r
-       nCurrentIndex = startIndex-1;\r
-       nStartIndex = startIndex;\r
-       nEndIndex = endIndex;\r
+    nCurrentIndex = startIndex-1;\r
+    nStartIndex = startIndex;\r
+    nEndIndex = endIndex;\r
 \r
-       std::sprintf(sFileNameMask, "%s", fileNameMask);\r
+    std::sprintf(sFileNameMask, "%s", fileNameMask);\r
 };\r
 \r
 void ASDFrameSequencerImageFile::getFrameCaption(char *caption) {\r
-       std::sprintf(caption, sFileNameMask, nCurrentIndex);\r
+    std::sprintf(caption, sFileNameMask, nCurrentIndex);\r
 };\r
 \r
 IplImage* ASDFrameSequencerImageFile::getNextImage()\r
 {\r
-       char fileName[2048];\r
+    char fileName[2048];\r
 \r
-       nCurrentIndex++;\r
+    nCurrentIndex++;\r
 \r
-       if (nCurrentIndex > nEndIndex)\r
-               return NULL;\r
+    if (nCurrentIndex > nEndIndex)\r
+        return NULL;\r
 \r
-       std::sprintf(fileName, sFileNameMask, nCurrentIndex);\r
+    std::sprintf(fileName, sFileNameMask, nCurrentIndex);\r
 \r
-       IplImage* img = cvLoadImage(fileName);\r
+    IplImage* img = cvLoadImage(fileName);\r
 \r
-       return img;\r
+    return img;\r
 };\r
 \r
 void ASDFrameSequencerImageFile::close()\r
 {\r
-       nCurrentIndex = nEndIndex+1;\r
+    nCurrentIndex = nEndIndex+1;\r
 };\r
 \r
 bool ASDFrameSequencerImageFile::isOpen()\r
 {\r
-       return (nCurrentIndex <= nEndIndex);\r
+    return (nCurrentIndex <= nEndIndex);\r
 };\r
 \r
-void putTextWithShadow(IplImage *img, const char *str, CvPoint point, CvFont *font, CvScalar color = CV_RGB(255, 255, 128))\r
+static void putTextWithShadow(IplImage *img, const char *str, CvPoint point, CvFont *font, CvScalar color = CV_RGB(255, 255, 128))\r
 {\r
-       cvPutText(img, str, cvPoint(point.x-1,point.y-1), font, CV_RGB(0, 0, 0));\r
-       cvPutText(img, str, point, font, color);\r
+    cvPutText(img, str, cvPoint(point.x-1,point.y-1), font, CV_RGB(0, 0, 0));\r
+    cvPutText(img, str, point, font, color);\r
 };\r
 \r
-#define ASD_RGB_SET_PIXEL(pointer, r, g, b)    { (*pointer) = (unsigned char)b; (*(pointer+1)) = (unsigned char)g;     (*(pointer+2)) = (unsigned char)r; }\r
+#define ASD_RGB_SET_PIXEL(pointer, r, g, b) { (*pointer) = (unsigned char)b; (*(pointer+1)) = (unsigned char)g; (*(pointer+2)) = (unsigned char)r; }\r
 \r
 #define ASD_RGB_GET_PIXEL(pointer, r, g, b) {b = (unsigned char)(*(pointer)); g = (unsigned char)(*(pointer+1)); r = (unsigned char)(*(pointer+2));}\r
 \r
-void displayBuffer(IplImage *rgbDestImage, IplImage *buffer, int rValue, int gValue, int bValue)\r
+static void displayBuffer(IplImage *rgbDestImage, IplImage *buffer, int rValue, int gValue, int bValue)\r
 {\r
-       int x, y, nWidth, nHeight;\r
-       double destX, destY, dx, dy;\r
-       uchar c;\r
-       unsigned char *pSrc;\r
-\r
-       nWidth = buffer->width;\r
-       nHeight = buffer->height;\r
-\r
-       dx = double(rgbDestImage->width)/double(nWidth);\r
-       dy = double(rgbDestImage->height)/double(nHeight);\r
-\r
-       destX = 0;\r
-       for (x = 0; x < nWidth; x++)\r
-       {\r
-               destY = 0;\r
-               for (y = 0; y < nHeight; y++)\r
-               {\r
-                       c = ((uchar*)(buffer->imageData + buffer->widthStep*y))[x];\r
-\r
-                       if (c)\r
-                       {\r
-                               pSrc = (unsigned char *)rgbDestImage->imageData + rgbDestImage->widthStep*int(destY) + (int(destX)*rgbDestImage->nChannels);\r
-                               ASD_RGB_SET_PIXEL(pSrc, rValue, gValue, bValue);\r
-                       }\r
-                       destY += dy;\r
-               }\r
-               destY = 0;\r
-               destX += dx;\r
-       }\r
+    int x, y, nWidth, nHeight;\r
+    double destX, destY, dx, dy;\r
+    uchar c;\r
+    unsigned char *pSrc;\r
+\r
+    nWidth = buffer->width;\r
+    nHeight = buffer->height;\r
+\r
+    dx = double(rgbDestImage->width)/double(nWidth);\r
+    dy = double(rgbDestImage->height)/double(nHeight);\r
+\r
+    destX = 0;\r
+    for (x = 0; x < nWidth; x++)\r
+    {\r
+        destY = 0;\r
+        for (y = 0; y < nHeight; y++)\r
+        {\r
+            c = ((uchar*)(buffer->imageData + buffer->widthStep*y))[x];\r
+\r
+            if (c)\r
+            {\r
+                pSrc = (unsigned char *)rgbDestImage->imageData + rgbDestImage->widthStep*int(destY) + (int(destX)*rgbDestImage->nChannels);\r
+                ASD_RGB_SET_PIXEL(pSrc, rValue, gValue, bValue);\r
+            }\r
+            destY += dy;\r
+        }\r
+        destY = 0;\r
+        destX += dx;\r
+    }\r
 };\r
 \r
 int main(int argc, char** argv )\r
 {\r
-       IplImage *img, *filterMask = NULL;\r
-       CvAdaptiveSkinDetector filter(1, CvAdaptiveSkinDetector::MORPHING_METHOD_ERODE_DILATE);\r
-       ASDFrameSequencer *sequencer;\r
-       CvFont base_font;\r
-       char caption[2048], s[256], windowName[256];\r
-       long int clockTotal = 0, numFrames = 0;\r
-       std::clock_t clock;\r
-\r
-       if (argc < 4)\r
-       {\r
-               help(argv);\r
-               sequencer = new ASDFrameSequencerWebCam();\r
-               (dynamic_cast<ASDFrameSequencerWebCam*>(sequencer))->open(-1);\r
-\r
-               if (! sequencer->isOpen())\r
-               {\r
-                       std::cout << std::endl << "Error: Cannot initialize the default Webcam" << std::endl << std::endl;\r
-               }\r
-       }\r
-       else\r
-       {\r
-               sequencer = new ASDFrameSequencerImageFile();\r
-               (dynamic_cast<ASDFrameSequencerImageFile*>(sequencer))->open(argv[1], std::atoi(argv[2]), std::atoi(argv[3]) ); // A sequence of images captured from video source, is stored here\r
-\r
-       }\r
-       std::sprintf(windowName, "%s", "Adaptive Skin Detection Algorithm for Video Sequences");\r
-\r
-       cvNamedWindow(windowName, CV_WINDOW_AUTOSIZE);\r
-       cvInitFont( &base_font, CV_FONT_VECTOR0, 0.5, 0.5);\r
-\r
-       // Usage:\r
-       //              c:\>CvASDSample "C:\VideoSequences\sample1\right_view\temp_%05d.jpg" 0 1000\r
-\r
-       std::cout << "Press ESC to stop." << std::endl << std::endl;\r
-       while ((img = sequencer->getNextImage()) != 0)\r
-       {\r
-               numFrames++;\r
-\r
-               if (filterMask == NULL)\r
-               {\r
-                       filterMask = cvCreateImage( cvSize(img->width, img->height), IPL_DEPTH_8U, 1);\r
-               }\r
-               clock = std::clock();\r
-               filter.process(img, filterMask);        // DETECT SKIN\r
-               clockTotal += (std::clock() - clock);\r
-\r
-               displayBuffer(img, filterMask, 0, 255, 0);\r
-\r
-               sequencer->getFrameCaption(caption);\r
-               std::sprintf(s, "%s - %d x %d", caption, img->width, img->height);\r
-               putTextWithShadow(img, s, cvPoint(10, img->height-35), &base_font);\r
-\r
-               std::sprintf(s, "Average processing time per frame: %5.2fms", (double(clockTotal*1000/CLOCKS_PER_SEC))/numFrames);\r
-               putTextWithShadow(img, s, cvPoint(10, img->height-15), &base_font);\r
-\r
-               cvShowImage (windowName, img);\r
-               cvReleaseImage(&img);\r
-\r
-               if (cvWaitKey(1) == 27)\r
-                       break;\r
-       }\r
-\r
-       sequencer->close();\r
-       delete sequencer;\r
-\r
-       cvReleaseImage(&filterMask);\r
-\r
-       cvDestroyWindow(windowName);\r
-\r
-       std::cout << "Finished, " << numFrames << " frames processed." << std::endl;\r
-\r
-       return 0;\r
+    IplImage *img, *filterMask = NULL;\r
+    CvAdaptiveSkinDetector filter(1, CvAdaptiveSkinDetector::MORPHING_METHOD_ERODE_DILATE);\r
+    ASDFrameSequencer *sequencer;\r
+    CvFont base_font;\r
+    char caption[2048], s[256], windowName[256];\r
+    long int clockTotal = 0, numFrames = 0;\r
+    std::clock_t clock;\r
+\r
+    if (argc < 4)\r
+    {\r
+        help(argv);\r
+        sequencer = new ASDFrameSequencerWebCam();\r
+        (dynamic_cast<ASDFrameSequencerWebCam*>(sequencer))->open(-1);\r
+\r
+        if (! sequencer->isOpen())\r
+        {\r
+            std::cout << std::endl << "Error: Cannot initialize the default Webcam" << std::endl << std::endl;\r
+        }\r
+    }\r
+    else\r
+    {\r
+        sequencer = new ASDFrameSequencerImageFile();\r
+        (dynamic_cast<ASDFrameSequencerImageFile*>(sequencer))->open(argv[1], std::atoi(argv[2]), std::atoi(argv[3]) ); // A sequence of images captured from video source, is stored here\r
+\r
+    }\r
+    std::sprintf(windowName, "%s", "Adaptive Skin Detection Algorithm for Video Sequences");\r
+\r
+    cvNamedWindow(windowName, CV_WINDOW_AUTOSIZE);\r
+    cvInitFont( &base_font, CV_FONT_VECTOR0, 0.5, 0.5);\r
+\r
+    // Usage:\r
+    //      c:\>CvASDSample "C:\VideoSequences\sample1\right_view\temp_%05d.jpg" 0 1000\r
+\r
+    std::cout << "Press ESC to stop." << std::endl << std::endl;\r
+    while ((img = sequencer->getNextImage()) != 0)\r
+    {\r
+        numFrames++;\r
+\r
+        if (filterMask == NULL)\r
+        {\r
+            filterMask = cvCreateImage( cvSize(img->width, img->height), IPL_DEPTH_8U, 1);\r
+        }\r
+        clock = std::clock();\r
+        filter.process(img, filterMask);    // DETECT SKIN\r
+        clockTotal += (std::clock() - clock);\r
+\r
+        displayBuffer(img, filterMask, 0, 255, 0);\r
+\r
+        sequencer->getFrameCaption(caption);\r
+        std::sprintf(s, "%s - %d x %d", caption, img->width, img->height);\r
+        putTextWithShadow(img, s, cvPoint(10, img->height-35), &base_font);\r
+\r
+        std::sprintf(s, "Average processing time per frame: %5.2fms", (double(clockTotal*1000/CLOCKS_PER_SEC))/numFrames);\r
+        putTextWithShadow(img, s, cvPoint(10, img->height-15), &base_font);\r
+\r
+        cvShowImage (windowName, img);\r
+        cvReleaseImage(&img);\r
+\r
+        if (cvWaitKey(1) == 27)\r
+            break;\r
+    }\r
+\r
+    sequencer->close();\r
+    delete sequencer;\r
+\r
+    cvReleaseImage(&filterMask);\r
+\r
+    cvDestroyWindow(windowName);\r
+\r
+    std::cout << "Finished, " << numFrames << " frames processed." << std::endl;\r
+\r
+    return 0;\r
 }\r
 \r
index 48df51c..2b15728 100644 (file)
@@ -1,11 +1,11 @@
 // Background average sample code done with averages and done with codebooks
 // (adapted from the OpenCV book sample)
-// 
+//
 // NOTE: To get the keyboard to work, you *have* to have one of the video windows be active
 //       and NOT the consule window.
 //
 // Gary Bradski Oct 3, 2008.
-// 
+//
 /* *************** License:**************************
    Oct. 3, 2008
    Right to use this code in any way you want without warrenty, support or any guarentee of it working.
    Learning OpenCV: Computer Vision with the OpenCV Library
      by Gary Bradski and Adrian Kaehler
      Published by O'Reilly Media, October 3, 2008
-   AVAILABLE AT: 
+
+   AVAILABLE AT:
      http://www.amazon.com/Learning-OpenCV-Computer-Vision-Library/dp/0596516134
      Or: http://oreilly.com/catalog/9780596516130/
-     ISBN-10: 0596516134 or: ISBN-13: 978-0596516130    
+     ISBN-10: 0596516134 or: ISBN-13: 978-0596516130
 ************************************************** */
 #include "opencv2/core/core.hpp"
 #include "opencv2/video/background_segm.hpp"
@@ -38,19 +38,19 @@ CvBGCodeBookModel* model = 0;
 const int NCHANNELS = 3;
 bool ch[NCHANNELS]={true,true,true}; // This sets what channels should be adjusted for background bounds
 
-void help()
+static void help()
 {
     printf("\nLearn background and find foreground using simple average and average difference learning method:\n"
-               "Originally from the book: Learning OpenCV by O'Reilly press\n"
+            "Originally from the book: Learning OpenCV by O'Reilly press\n"
         "\nUSAGE:\n"
         "   bgfg_codebook [--nframes(-nf)=300] [--movie_filename(-mf)=tree.avi] [--camera(-c), use camera or not]\n"
         "***Keep the focus on the video windows, NOT the consol***\n\n"
         "INTERACTIVE PARAMETERS:\n"
         "\tESC,q,Q  - quit the program\n"
-        "\th   - print this help\n"
-        "\tp   - pause toggle\n"
-        "\ts   - single step\n"
-        "\tr   - run mode (single step off)\n"
+        "\th    - print this help\n"
+        "\tp    - pause toggle\n"
+        "\ts    - single step\n"
+        "\tr    - run mode (single step off)\n"
         "=== AVG PARAMS ===\n"
         "\t-    - bump high threshold UP by 0.25\n"
         "\t=    - bump high threshold DOWN by 0.25\n"
@@ -58,10 +58,10 @@ void help()
         "\t]    - bump low threshold DOWN by 0.25\n"
         "=== CODEBOOK PARAMS ===\n"
         "\ty,u,v- only adjust channel 0(y) or 1(u) or 2(v) respectively\n"
-        "\ta   - adjust all 3 channels at once\n"
-        "\tb   - adjust both 2 and 3 at once\n"
-        "\ti,o - bump upper threshold up,down by 1\n"
-        "\tk,l - bump lower threshold up,down by 1\n"
+        "\ta    - adjust all 3 channels at once\n"
+        "\tb    - adjust both 2 and 3 at once\n"
+        "\ti,o  - bump upper threshold up,down by 1\n"
+        "\tk,l  - bump lower threshold up,down by 1\n"
         "\tSPACE - reset the model\n"
         );
 }
@@ -91,7 +91,7 @@ int main(int argc, const char** argv)
     int c, n, nframes = 0;
 
     model = cvCreateBGCodeBookModel();
-    
+
     //Set color thresholds to default values
     model->modMin[0] = 3;
     model->modMin[1] = model->modMin[2] = 3;
@@ -127,12 +127,12 @@ int main(int argc, const char** argv)
         {
             rawImage = cvQueryFrame( capture );
             ++nframes;
-            if(!rawImage) 
+            if(!rawImage)
                 break;
         }
         if( singlestep )
             pause = true;
-        
+
         //First time:
         if( nframes == 1 && rawImage )
         {
@@ -141,13 +141,13 @@ int main(int argc, const char** argv)
             ImaskCodeBook = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
             ImaskCodeBookCC = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
             cvSet(ImaskCodeBook,cvScalar(255));
-            
+
             cvNamedWindow( "Raw", 1 );
             cvNamedWindow( "ForegroundCodeBook",1);
             cvNamedWindow( "CodeBook_ConnectComp",1);
         }
 
-        // If we've got an rawImage and are good to go:                
+        // If we've got an rawImage and are good to go:
         if( rawImage )
         {
             cvCvtColor( rawImage, yuvImage, CV_BGR2YCrCb );//YUV For codebook method
@@ -157,14 +157,14 @@ int main(int argc, const char** argv)
 
             if( nframes-1 == nframesToLearnBG  )
                 cvBGCodeBookClearStale( model, model->t/2 );
-            
+
             //Find the foreground if any
             if( nframes-1 >= nframesToLearnBG  )
             {
                 // Find foreground by codebook method
                 cvBGCodeBookDiff( model, yuvImage, ImaskCodeBook );
                 // This part just to visualize bounding boxes and centers if desired
-                cvCopy(ImaskCodeBook,ImaskCodeBookCC); 
+                cvCopy(ImaskCodeBook,ImaskCodeBookCC);
                 cvSegmentFGMask( ImaskCodeBookCC );
             }
             //Display
@@ -205,7 +205,7 @@ int main(int argc, const char** argv)
         case 'u': case '1':
         case 'v': case '2':
         case 'a': case '3':
-        case 'b': 
+        case 'b':
             ch[0] = c == 'y' || c == '0' || c == 'a' || c == '3';
             ch[1] = c == 'u' || c == '1' || c == 'a' || c == '3' || c == 'b';
             ch[2] = c == 'v' || c == '2' || c == 'a' || c == '3' || c == 'b';
@@ -230,8 +230,8 @@ int main(int argc, const char** argv)
             }
             break;
         }
-    }          
-    
+    }
+
     cvReleaseCapture( &capture );
     cvDestroyWindow( "Raw" );
     cvDestroyWindow( "ForegroundCodeBook");
index d777408..2125d16 100644 (file)
@@ -1,22 +1,23 @@
-#include <opencv2/imgproc/imgproc_c.h>
-#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/imgproc/imgproc_c.h"
+#include "opencv2/highgui/highgui_c.h"
 #include <stdio.h>
-void help()
+
+static void help(void)
 {
-       printf("\nThis program creates an image to demonstrate the use of the \"c\" contour\n"
-                       "functions: cvFindContours() and cvApproxPoly() along with the storage\n"
-                       "functions cvCreateMemStorage() and cvDrawContours().\n"
-                       "It also shows the use of a trackbar to control contour retrieval.\n"
-                       "\n"
+    printf("\nThis program creates an image to demonstrate the use of the \"c\" contour\n"
+            "functions: cvFindContours() and cvApproxPoly() along with the storage\n"
+            "functions cvCreateMemStorage() and cvDrawContours().\n"
+            "It also shows the use of a trackbar to control contour retrieval.\n"
+            "\n"
             "Usage :\n"
-                       "./contours\n");
+            "./contours\n");
 }
 
 #define w 500
 int levels = 3;
 CvSeq* contours = 0;
 
-void on_trackbar(int pos)
+static void on_trackbar(int pos)
 {
     IplImage* cnt_img = cvCreateImage( cvSize(w,w), 8, 3 );
     CvSeq* _contours = contours;
@@ -36,7 +37,7 @@ static void findCComp( IplImage* img )
     cvZero(mask);
     cvRectangle( mask, cvPoint(0, 0), cvPoint(mask->width-1, mask->height-1),
                  cvScalarAll(1), 1, 8, 0 );
-    
+
     for( y = 0; y < img->height; y++ )
         for( x = 0; x < img->width; x++ )
         {
@@ -49,7 +50,7 @@ static void findCComp( IplImage* img )
 }
 
 
-int main()
+int main(int argc, char* argv[])
 {
     int i, j;
     CvMemStorage* storage = cvCreateMemStorage(0);
@@ -100,11 +101,11 @@ int main()
 
     cvFindContours( img32s, storage, &contours, sizeof(CvContour),
                     CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );
-    
+
     //cvFindContours( img, storage, &contours, sizeof(CvContour),
     //                CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );
-    
-    
+
+
     {
     const char* attrs[] = {"recursive", "1", 0};
     cvSave("contours.xml", contours, 0, 0, cvAttrList(attrs, 0));
@@ -119,7 +120,7 @@ int main()
 
     {
         CvRNG rng = cvRNG(-1);
-        
+
         CvSeq* tcontours = contours;
         cvCvtColor( img, img3, CV_GRAY2BGR );
         while( tcontours->h_next )
@@ -142,9 +143,9 @@ int main()
                 cvDrawContours(img3, tcontours->v_next, color, color, 1, -1, 8, cvPoint(0,0));
             }
         }
-        
+
     }
-    
+
     cvShowImage( "colored", img3 );
     on_trackbar(0);
     cvWaitKey(0);
@@ -153,7 +154,7 @@ int main()
     cvReleaseImage( &img32f );
     cvReleaseImage( &img32s );
     cvReleaseImage( &img3 );
-    
+
     return 0;
 }
 
index a112562..8a18ea1 100644 (file)
@@ -4,7 +4,7 @@
 #include <ctype.h>
 #include <stdio.h>
 
-void help()
+static void help(void)
 {
     printf("\n This sample demonstrates cascade's convertation \n"
     "Usage:\n"
index bad0b0a..579041f 100644 (file)
@@ -3,7 +3,7 @@
 #include "opencv2/highgui/highgui.hpp"
 
 #include <stdio.h>
-void help()
+static void help( void )
 {
        printf("\nThis program demostrates iterative construction of\n"
                        "delaunay triangulation and voronoi tesselation.\n"
@@ -14,7 +14,7 @@ void help()
                        "hitting any key.\n");
 }
 
-CvSubdiv2D* init_delaunay( CvMemStorage* storage,
+static CvSubdiv2D* init_delaunay( CvMemStorage* storage,
                            CvRect rect )
 {
     CvSubdiv2D* subdiv;
@@ -29,13 +29,13 @@ CvSubdiv2D* init_delaunay( CvMemStorage* storage,
 }
 
 
-void draw_subdiv_point( IplImage* img, CvPoint2D32f fp, CvScalar color )
+static void draw_subdiv_point( IplImage* img, CvPoint2D32f fp, CvScalar color )
 {
     cvCircle( img, cvPoint(cvRound(fp.x), cvRound(fp.y)), 3, color, CV_FILLED, 8, 0 );
 }
 
 
-void draw_subdiv_edge( IplImage* img, CvSubdiv2DEdge edge, CvScalar color )
+static void draw_subdiv_edge( IplImage* img, CvSubdiv2DEdge edge, CvScalar color )
 {
     CvSubdiv2DPoint* org_pt;
     CvSubdiv2DPoint* dst_pt;
@@ -59,7 +59,7 @@ void draw_subdiv_edge( IplImage* img, CvSubdiv2DEdge edge, CvScalar color )
 }
 
 
-void draw_subdiv( IplImage* img, CvSubdiv2D* subdiv,
+static void draw_subdiv( IplImage* img, CvSubdiv2D* subdiv,
                   CvScalar delaunay_color, CvScalar voronoi_color )
 {
     CvSeqReader  reader;
@@ -83,7 +83,7 @@ void draw_subdiv( IplImage* img, CvSubdiv2D* subdiv,
 }
 
 
-void locate_point( CvSubdiv2D* subdiv, CvPoint2D32f fp, IplImage* img,
+static void locate_point( CvSubdiv2D* subdiv, CvPoint2D32f fp, IplImage* img,
                    CvScalar active_color )
 {
     CvSubdiv2DEdge e;
@@ -107,7 +107,7 @@ void locate_point( CvSubdiv2D* subdiv, CvPoint2D32f fp, IplImage* img,
 }
 
 
-void draw_subdiv_facet( IplImage* img, CvSubdiv2DEdge edge )
+static void draw_subdiv_facet( IplImage* img, CvSubdiv2DEdge edge )
 {
     CvSubdiv2DEdge t = edge;
     int i, count = 0;
@@ -142,7 +142,7 @@ void draw_subdiv_facet( IplImage* img, CvSubdiv2DEdge edge )
     free( buf );
 }
 
-void paint_voronoi( CvSubdiv2D* subdiv, IplImage* img )
+static void paint_voronoi( CvSubdiv2D* subdiv, IplImage* img )
 {
     CvSeqReader  reader;
     int i, total = subdiv->edges->total;
@@ -171,7 +171,7 @@ void paint_voronoi( CvSubdiv2D* subdiv, IplImage* img )
 }
 
 
-void run(void)
+static void run(void)
 {
     char win[] = "source";
     int i;
index 46b3a6c..a641a74 100644 (file)
@@ -8,7 +8,7 @@
 using namespace std;
 using namespace cv;
 
-void help()
+static void help()
 {
     cout << "\nThis program demonstrates the cascade recognizer. Now you can use Haar or LBP features.\n"
             "This classifier can recognize many ~rigid objects, it's most known use is for faces.\n"
index 3bc5063..25d718e 100644 (file)
@@ -2,16 +2,18 @@
 #include "opencv2/highgui/highgui.hpp"
 #include "opencv2/imgproc/imgproc_c.h"
 #include <stdio.h>
-void help()
+
+static void help(void)
 {
-       printf(
+    printf(
             "\n This program demonstrate dense \"Farneback\n optical flow\n"
-                       "It read from camera 0, and shows how to use and display dense Franeback optical flow\n"
+            "It read from camera 0, and shows how to use and display dense Franeback optical flow\n"
             "Usage: \n"
             "./fback_c \n");
 
 }
-void drawOptFlowMap(const CvMat* flow, CvMat* cflowmap, int step,
+
+static void drawOptFlowMap(const CvMat* flow, CvMat* cflowmap, int step,
                     double scale, CvScalar color)
 {
     int x, y;
@@ -25,7 +27,7 @@ void drawOptFlowMap(const CvMat* flow, CvMat* cflowmap, int step,
         }
 }
 
-int main()
+int main( int argc, char** argv )
 {
     CvCapture* capture = cvCreateCameraCapture(0);
     CvMat* prevgray = 0, *gray = 0, *flow = 0, *cflow = 0;
@@ -34,9 +36,9 @@ int main()
 
     if( !capture )
         return -1;
-    
+
     cvNamedWindow("flow", 1);
-    
+
     for(;;)
     {
         int firstFrame = gray == 0;
@@ -51,7 +53,7 @@ int main()
             cflow = cvCreateMat(gray->rows, gray->cols, CV_8UC3);
         }
         cvCvtColor(frame, gray, CV_BGR2GRAY);
-        
+
         if( !firstFrame )
         {
             cvCalcOpticalFlowFarneback(prevgray, gray, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
@@ -62,7 +64,7 @@ int main()
         if(cvWaitKey(30)>=0)
             break;
         {
-        CvMat* temp;    
+        CvMat* temp;
         CV_SWAP(prevgray, gray, temp);
         }
     }
index f285981..ee0d80f 100644 (file)
@@ -18,7 +18,7 @@
 #include <stdio.h>
 
 using namespace std;
-void help()
+static void help()
 {
     printf(
         "This program demonstrated the use of the SURF Detector and Descriptor using\n"
@@ -35,7 +35,57 @@ void help()
 
 IplImage* image = 0;
 
-double
+#ifdef USE_FLANN
+static void
+flannFindPairs( const CvSeq*, const CvSeq* objectDescriptors,
+           const CvSeq*, const CvSeq* imageDescriptors, vector<int>& ptpairs )
+{
+       int length = (int)(objectDescriptors->elem_size/sizeof(float));
+
+    cv::Mat m_object(objectDescriptors->total, length, CV_32F);
+       cv::Mat m_image(imageDescriptors->total, length, CV_32F);
+
+
+       // copy descriptors
+    CvSeqReader obj_reader;
+       float* obj_ptr = m_object.ptr<float>(0);
+    cvStartReadSeq( objectDescriptors, &obj_reader );
+    for(int i = 0; i < objectDescriptors->total; i++ )
+    {
+        const float* descriptor = (const float*)obj_reader.ptr;
+        CV_NEXT_SEQ_ELEM( obj_reader.seq->elem_size, obj_reader );
+        memcpy(obj_ptr, descriptor, length*sizeof(float));
+        obj_ptr += length;
+    }
+    CvSeqReader img_reader;
+       float* img_ptr = m_image.ptr<float>(0);
+    cvStartReadSeq( imageDescriptors, &img_reader );
+    for(int i = 0; i < imageDescriptors->total; i++ )
+    {
+        const float* descriptor = (const float*)img_reader.ptr;
+        CV_NEXT_SEQ_ELEM( img_reader.seq->elem_size, img_reader );
+        memcpy(img_ptr, descriptor, length*sizeof(float));
+        img_ptr += length;
+    }
+
+    // find nearest neighbors using FLANN
+    cv::Mat m_indices(objectDescriptors->total, 2, CV_32S);
+    cv::Mat m_dists(objectDescriptors->total, 2, CV_32F);
+    cv::flann::Index flann_index(m_image, cv::flann::KDTreeIndexParams(4));  // using 4 randomized kdtrees
+    flann_index.knnSearch(m_object, m_indices, m_dists, 2, cv::flann::SearchParams(64) ); // maximum number of leafs checked
+
+    int* indices_ptr = m_indices.ptr<int>(0);
+    float* dists_ptr = m_dists.ptr<float>(0);
+    for (int i=0;i<m_indices.rows;++i) {
+       if (dists_ptr[2*i]<0.6*dists_ptr[2*i+1]) {
+               ptpairs.push_back(i);
+               ptpairs.push_back(indices_ptr[2*i]);
+       }
+    }
+}
+#else
+
+static double
 compareSURFDescriptors( const float* d1, const float* d2, double best, int length )
 {
     double total_cost = 0;
@@ -53,8 +103,7 @@ compareSURFDescriptors( const float* d1, const float* d2, double best, int lengt
     return total_cost;
 }
 
-
-int
+static int
 naiveNearestNeighbor( const float* vec, int laplacian,
                       const CvSeq* model_keypoints,
                       const CvSeq* model_descriptors )
@@ -70,7 +119,7 @@ naiveNearestNeighbor( const float* vec, int laplacian,
     {
         const CvSURFPoint* kp = (const CvSURFPoint*)kreader.ptr;
         const float* mvec = (const float*)reader.ptr;
-       CV_NEXT_SEQ_ELEM( kreader.seq->elem_size, kreader );
+        CV_NEXT_SEQ_ELEM( kreader.seq->elem_size, kreader );
         CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader );
         if( laplacian != kp->laplacian )
             continue;
@@ -89,7 +138,7 @@ naiveNearestNeighbor( const float* vec, int laplacian,
     return -1;
 }
 
-void
+static void
 findPairs( const CvSeq* objectKeypoints, const CvSeq* objectDescriptors,
            const CvSeq* imageKeypoints, const CvSeq* imageDescriptors, vector<int>& ptpairs )
 {
@@ -113,59 +162,10 @@ findPairs( const CvSeq* objectKeypoints, const CvSeq* objectDescriptors,
         }
     }
 }
-
-
-void
-flannFindPairs( const CvSeq*, const CvSeq* objectDescriptors,
-           const CvSeq*, const CvSeq* imageDescriptors, vector<int>& ptpairs )
-{
-       int length = (int)(objectDescriptors->elem_size/sizeof(float));
-
-    cv::Mat m_object(objectDescriptors->total, length, CV_32F);
-       cv::Mat m_image(imageDescriptors->total, length, CV_32F);
-
-
-       // copy descriptors
-    CvSeqReader obj_reader;
-       float* obj_ptr = m_object.ptr<float>(0);
-    cvStartReadSeq( objectDescriptors, &obj_reader );
-    for(int i = 0; i < objectDescriptors->total; i++ )
-    {
-        const float* descriptor = (const float*)obj_reader.ptr;
-        CV_NEXT_SEQ_ELEM( obj_reader.seq->elem_size, obj_reader );
-        memcpy(obj_ptr, descriptor, length*sizeof(float));
-        obj_ptr += length;
-    }
-    CvSeqReader img_reader;
-       float* img_ptr = m_image.ptr<float>(0);
-    cvStartReadSeq( imageDescriptors, &img_reader );
-    for(int i = 0; i < imageDescriptors->total; i++ )
-    {
-        const float* descriptor = (const float*)img_reader.ptr;
-        CV_NEXT_SEQ_ELEM( img_reader.seq->elem_size, img_reader );
-        memcpy(img_ptr, descriptor, length*sizeof(float));
-        img_ptr += length;
-    }
-
-    // find nearest neighbors using FLANN
-    cv::Mat m_indices(objectDescriptors->total, 2, CV_32S);
-    cv::Mat m_dists(objectDescriptors->total, 2, CV_32F);
-    cv::flann::Index flann_index(m_image, cv::flann::KDTreeIndexParams(4));  // using 4 randomized kdtrees
-    flann_index.knnSearch(m_object, m_indices, m_dists, 2, cv::flann::SearchParams(64) ); // maximum number of leafs checked
-
-    int* indices_ptr = m_indices.ptr<int>(0);
-    float* dists_ptr = m_dists.ptr<float>(0);
-    for (int i=0;i<m_indices.rows;++i) {
-       if (dists_ptr[2*i]<0.6*dists_ptr[2*i+1]) {
-               ptpairs.push_back(i);
-               ptpairs.push_back(indices_ptr[2*i]);
-       }
-    }
-}
-
+#endif
 
 /* a rough implementation for object location */
-int
+static int
 locatePlanarObject( const CvSeq* objectKeypoints, const CvSeq* objectDescriptors,
                     const CvSeq* imageKeypoints, const CvSeq* imageDescriptors,
                     const CvPoint src_corners[4], CvPoint dst_corners[4] )
index 25eb136..02cd266 100644 (file)
@@ -11,7 +11,7 @@
 using namespace std;
 using namespace cv;
 
-void help()
+static void help()
 {
     cout << "This program shows the use of the Calonder point descriptor classifier"
             "SURF is used to detect interest points, Calonder is used to describe/match these points\n"
@@ -28,7 +28,7 @@ void help()
 /*
  * Generates random perspective transform of image
  */
-void warpPerspectiveRand( const Mat& src, Mat& dst, Mat& H, RNG& rng )
+static void warpPerspectiveRand( const Mat& src, Mat& dst, Mat& H, RNG& rng )
 {
     H.create(3, 3, CV_32FC1);
     H.at<float>(0,0) = rng.uniform( 0.8f, 1.2f);
@@ -51,7 +51,7 @@ void warpPerspectiveRand( const Mat& src, Mat& dst, Mat& H, RNG& rng )
  *
  * To train Calonder classifier RTreeClassifier class need to be used.
  */
-void trainCalonderClassifier( const string& classifierFilename, const string& imgFilename )
+static void trainCalonderClassifier( const string& classifierFilename, const string& imgFilename )
 {
     // Reads train images
     ifstream is( imgFilename.c_str(), ifstream::in );
@@ -104,7 +104,7 @@ void trainCalonderClassifier( const string& classifierFilename, const string& im
  * but it is convenient to use CalonderDescriptorExtractor class which is wrapper of
  * RTreeClassifier.
  */
-void testCalonderClassifier( const string& classifierFilename, const string& imgFilename )
+static void testCalonderClassifier( const string& classifierFilename, const string& imgFilename )
 {
     Mat img1 = imread( imgFilename, CV_LOAD_IMAGE_GRAYSCALE ), img2, H12;
     if( img1.empty() )
index 81ebe1d..3a27c5e 100644 (file)
@@ -11,7 +11,7 @@
 #include <stdio.h>
 
 using namespace cv;
-void help()
+static void help()
 {
     printf( "This program shows the use of the \"fern\" plannar PlanarObjectDetector point\n"
             "descriptor classifier\n"
index 6067e2a..cd6e096 100644 (file)
@@ -2,8 +2,8 @@
 #include "opencv2/highgui/highgui.hpp"\r
 #include <stdio.h>\r
 \r
-#ifdef HAVE_CVCONFIG_H \r
-#include <cvconfig.h> \r
+#ifdef HAVE_CVCONFIG_H\r
+#include <cvconfig.h>\r
 #endif\r
 #ifdef HAVE_TBB\r
 #include "tbb/task_scheduler_init.h"\r
 \r
 using namespace cv;\r
 \r
-void help()\r
+static void help()\r
 {\r
-       printf( "This program demonstrated the use of the latentSVM detector.\n"\r
-                       "It reads in a trained object model and then uses that to detect the object in an image\n"\r
-                       "Call:\n"\r
+    printf( "This program demonstrated the use of the latentSVM detector.\n"\r
+            "It reads in a trained object model and then uses that to detect the object in an image\n"\r
+            "Call:\n"\r
             "./latentsvmdetect [<image_filename> <model_filename> [<threads_number>]]\n"\r
-                       "  The defaults for image_filename and model_filename are cat.jpg and cat.xml respectively\n"\r
-                       "  Press any key to quit.\n");\r
+            "  The defaults for image_filename and model_filename are cat.jpg and cat.xml respectively\n"\r
+            "  Press any key to quit.\n");\r
 }\r
 \r
 const char* model_filename = "cat.xml";\r
 const char* image_filename = "cat.jpg";\r
 int   tbbNumThreads = -1;\r
 \r
-void detect_and_draw_objects( IplImage* image, CvLatentSvmDetector* detector, int numThreads = -1)\r
+static void detect_and_draw_objects( IplImage* image, CvLatentSvmDetector* detector, int numThreads = -1)\r
 {\r
     CvMemStorage* storage = cvCreateMemStorage(0);\r
     CvSeq* detections = 0;\r
     int i = 0;\r
-       int64 start = 0, finish = 0;\r
+    int64 start = 0, finish = 0;\r
 #ifdef HAVE_TBB\r
     tbb::task_scheduler_init init(tbb::task_scheduler_init::deferred);\r
-       if (numThreads > 0)\r
-       {\r
-               init.initialize(numThreads);\r
+    if (numThreads > 0)\r
+    {\r
+        init.initialize(numThreads);\r
         printf("Number of threads %i\n", numThreads);\r
-       }\r
-       else\r
-       {\r
-               printf("Number of threads is not correct for TBB version");\r
-               return;\r
-       }\r
+    }\r
+    else\r
+    {\r
+        printf("Number of threads is not correct for TBB version");\r
+        return;\r
+    }\r
 #endif\r
 \r
-       start = cvGetTickCount();\r
+    start = cvGetTickCount();\r
     detections = cvLatentSvmDetectObjects(image, detector, storage, 0.5f, numThreads);\r
-       finish = cvGetTickCount();\r
-       printf("detection time = %.3f\n", (float)(finish - start) / (float)(cvGetTickFrequency() * 1000000.0));\r
+    finish = cvGetTickCount();\r
+    printf("detection time = %.3f\n", (float)(finish - start) / (float)(cvGetTickFrequency() * 1000000.0));\r
 \r
 #ifdef HAVE_TBB\r
     init.terminate();\r
@@ -56,10 +56,10 @@ void detect_and_draw_objects( IplImage* image, CvLatentSvmDetector* detector, in
     for( i = 0; i < detections->total; i++ )\r
     {\r
         CvObjectDetection detection = *(CvObjectDetection*)cvGetSeqElem( detections, i );\r
-               CvRect bounding_box = detection.rect;\r
+        CvRect bounding_box = detection.rect;\r
         cvRectangle( image, cvPoint(bounding_box.x, bounding_box.y),\r
-                     cvPoint(bounding_box.x + bounding_box.width, \r
-                                                       bounding_box.y + bounding_box.height),\r
+                     cvPoint(bounding_box.x + bounding_box.width,\r
+                            bounding_box.y + bounding_box.height),\r
                      CV_RGB(255,0,0), 3 );\r
     }\r
     cvReleaseMemStorage( &storage );\r
@@ -67,31 +67,31 @@ void detect_and_draw_objects( IplImage* image, CvLatentSvmDetector* detector, in
 \r
 int main(int argc, char* argv[])\r
 {\r
-       help();\r
-       if (argc > 2)\r
-       {\r
-               image_filename = argv[1];\r
-               model_filename = argv[2];\r
+    help();\r
+    if (argc > 2)\r
+    {\r
+        image_filename = argv[1];\r
+        model_filename = argv[2];\r
         if (argc > 3)\r
         {\r
             tbbNumThreads = atoi(argv[3]);\r
         }\r
-       }\r
-       IplImage* image = cvLoadImage(image_filename);\r
-       if (!image)\r
-       {\r
-               printf( "Unable to load the image\n"\r
+    }\r
+    IplImage* image = cvLoadImage(image_filename);\r
+    if (!image)\r
+    {\r
+        printf( "Unable to load the image\n"\r
                 "Pass it as the first parameter: latentsvmdetect <path to cat.jpg> <path to cat.xml>\n" );\r
-               return -1;\r
-       }\r
+        return -1;\r
+    }\r
     CvLatentSvmDetector* detector = cvLoadLatentSvmDetector(model_filename);\r
-       if (!detector)\r
-       {\r
-               printf( "Unable to load the model\n"\r
+    if (!detector)\r
+    {\r
+        printf( "Unable to load the model\n"\r
                 "Pass it as the second parameter: latentsvmdetect <path to cat.jpg> <path to cat.xml>\n" );\r
-               cvReleaseImage( &image );\r
-               return -1;\r
-       }\r
+        cvReleaseImage( &image );\r
+        return -1;\r
+    }\r
     detect_and_draw_objects( image, detector, tbbNumThreads );\r
     cvNamedWindow( "test", 0 );\r
     cvShowImage( "test", image );\r
@@ -99,6 +99,6 @@ int main(int argc, char* argv[])
     cvReleaseLatentSvmDetector( &detector );\r
     cvReleaseImage( &image );\r
     cvDestroyAllWindows();\r
-    \r
-       return 0;\r
+\r
+    return 0;\r
 }\r
index ffd7547..c904dd9 100644 (file)
@@ -15,7 +15,7 @@ int open_close_pos = 0;
 int erode_dilate_pos = 0;
 
 // callback function for open/close trackbar
-void OpenClose(int pos)
+static void OpenClose(int pos)
 {
     int n = open_close_pos - max_iters;
     int an = n > 0 ? n : -n;
@@ -35,7 +35,7 @@ void OpenClose(int pos)
 }
 
 // callback function for erode/dilate trackbar
-void ErodeDilate(int pos)
+static void ErodeDilate(int pos)
 {
     int n = erode_dilate_pos - max_iters;
     int an = n > 0 ? n : -n;
@@ -52,7 +52,7 @@ void ErodeDilate(int pos)
     cvShowImage("Erode/Dilate",dst);
 }
 
-void help()
+static void help(void)
 {
     printf( "This program demonstrated the use of the morphology operator, especially open, close, erode, dilate operations\n"
                "Morphology operators are built on max (close) and min (open) operators as measured by pixels covered by small structuring elements.\n"
index ebcc8d8..a290c48 100644 (file)
@@ -5,7 +5,7 @@
 #include <stdio.h>
 #include <ctype.h>
 
-void help()
+static void help(void)
 {
        printf(
                        "\nThis program demonstrated the use of motion templates -- basically using the gradients\n"
@@ -39,7 +39,7 @@ CvMemStorage* storage = 0; // temporary storage
 //  img - input video frame
 //  dst - resultant motion picture
 //  args - optional parameters
-void  update_mhi( IplImage* img, IplImage* dst, int diff_threshold )
+static void  update_mhi( IplImage* img, IplImage* dst, int diff_threshold )
 {
     double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
     CvSize size = cvSize(img->width,img->height); // get current frame size
index 1729b04..e0217a2 100644 (file)
 using namespace cv;
 using namespace std;
 
-void help()
+static void help()
 {
-       cout << "\nThis program demonstrates the Maximal Extremal Region interest point detector.\n"
+    cout << "\nThis program demonstrates the Maximal Extremal Region interest point detector.\n"
     "It finds the most stable (in size) dark and white regions as a threshold is increased.\n"
     "\nCall:\n"
     "./mser_sample <path_and_image_filename, Default is 'puzzle.png'>\n\n";
 }
 
-static const Vec3b bcolors[] = 
+static const Vec3b bcolors[] =
 {
     Vec3b(0,0,255),
     Vec3b(0,128,255),
@@ -35,10 +35,10 @@ static const Vec3b bcolors[] =
 
 int main( int argc, char** argv )
 {
-       string path;
-       Mat img0, img, yuv, gray, ellipses;
-       help();
-    
+    string path;
+    Mat img0, img, yuv, gray, ellipses;
+    help();
+
     img0 = imread( argc != 2 ? "puzzle.png" : argv[1], 1 );
     if( img0.empty() )
     {
@@ -48,39 +48,39 @@ int main( int argc, char** argv )
             cout << "Unable to load image " << argv[1] << endl;
         return 0;
     }
-    
-       cvtColor(img0, yuv, COLOR_BGR2YCrCb);
+
+    cvtColor(img0, yuv, COLOR_BGR2YCrCb);
     cvtColor(img0, gray, COLOR_BGR2GRAY);
     cvtColor(gray, img, COLOR_GRAY2BGR);
     img.copyTo(ellipses);
-    
+
     vector<vector<Point> > contours;
-       double t = (double)getTickCount();
+    double t = (double)getTickCount();
     MSER()(yuv, contours);
-       t = (double)getTickCount() - t;
-       printf( "MSER extracted %d contours in %g ms.\n", (int)contours.size(),
+    t = (double)getTickCount() - t;
+    printf( "MSER extracted %d contours in %g ms.\n", (int)contours.size(),
            t*1000./getTickFrequency() );
-    
-       // draw mser's with different colors
-       for( int i = (int)contours.size()-1; i >= 0; i-- )
-       {
-               const vector<Point>& r = contours[i];
-               for ( int j = 0; j < (int)r.size(); j++ )
-               {
-                       Point pt = r[j];
+
+    // draw mser's with different colors
+    for( int i = (int)contours.size()-1; i >= 0; i-- )
+    {
+        const vector<Point>& r = contours[i];
+        for ( int j = 0; j < (int)r.size(); j++ )
+        {
+            Point pt = r[j];
             img.at<Vec3b>(pt) = bcolors[i%9];
-               }
-        
+        }
+
         // find ellipse (it seems cvfitellipse2 have error or sth?)
         RotatedRect box = fitEllipse( r );
-        
+
         box.angle=(float)CV_PI/2-box.angle;
         ellipse( ellipses, box, Scalar(196,255,255), 2 );
-       }
-    
-       imshow( "original", img0 );
-       imshow( "response", img );
-       imshow( "ellipses", ellipses );
-    
-       waitKey(0);
+    }
+
+    imshow( "original", img0 );
+    imshow( "response", img );
+    imshow( "ellipses", ellipses );
+
+    waitKey(0);
 }
index 4211bcf..2c80727 100644 (file)
@@ -2,9 +2,9 @@
 #include "opencv2/ml/ml.hpp"
 #include <stdio.h>
 
-void help()
+static void help()
 {
-       printf("\nThis program demonstrated the use of OpenCV's decision tree function for learning and predicting data\n"
+    printf("\nThis program demonstrated the use of OpenCV's decision tree function for learning and predicting data\n"
             "Usage :\n"
             "./mushroom <path to agaricus-lepiota.data>\n"
             "\n"
@@ -21,7 +21,7 @@ void help()
             "// the values are encoded by characters.\n\n");
 }
 
-int mushroom_read_database( const char* filename, CvMat** data, CvMat** missing, CvMat** responses )
+static int mushroom_read_database( const char* filename, CvMat** data, CvMat** missing, CvMat** responses )
 {
     const int M = 1024;
     FILE* f = fopen( filename, "rt" );
@@ -95,7 +95,7 @@ int mushroom_read_database( const char* filename, CvMat** data, CvMat** missing,
 }
 
 
-CvDTree* mushroom_create_dtree( const CvMat* data, const CvMat* missing,
+static CvDTree* mushroom_create_dtree( const CvMat* data, const CvMat* missing,
                                 const CvMat* responses, float p_weight )
 {
     CvDTree* dtree;
@@ -107,7 +107,7 @@ CvDTree* mushroom_create_dtree( const CvMat* data, const CvMat* missing,
     cvSet( var_type, cvScalarAll(CV_VAR_CATEGORICAL) ); // all the variables are categorical
 
     dtree = new CvDTree;
-    
+
     dtree->train( data, CV_ROW_SAMPLE, responses, 0, 0, var_type, missing,
                   CvDTreeParams( 8, // max depth
                                  10, // min sample count
@@ -179,7 +179,7 @@ static const char* var_desc[] =
 };
 
 
-void print_variable_importance( CvDTree* dtree, const char** var_desc )
+static void print_variable_importance( CvDTree* dtree, const char** var_desc )
 {
     const CvMat* var_importance = dtree->get_var_importance();
     int i;
@@ -215,7 +215,7 @@ void print_variable_importance( CvDTree* dtree, const char** var_desc )
     }
 }
 
-void interactive_classification( CvDTree* dtree, const char** var_desc )
+static void interactive_classification( CvDTree* dtree, const char** var_desc )
 {
     char input[1000];
     const CvDTreeNode* root;
@@ -230,14 +230,14 @@ void interactive_classification( CvDTree* dtree, const char** var_desc )
     for(;;)
     {
         const CvDTreeNode* node;
-        
+
         printf( "Start/Proceed with interactive mushroom classification (y/n): " );
         int values_read = scanf( "%1s", input );
         CV_Assert(values_read == 1);
 
         if( input[0] != 'y' && input[0] != 'Y' )
             break;
-        printf( "Enter 1-letter answers, '?' for missing/unknown value...\n" ); 
+        printf( "Enter 1-letter answers, '?' for missing/unknown value...\n" );
 
         // custom version of predict
         node = root;
@@ -245,7 +245,7 @@ void interactive_classification( CvDTree* dtree, const char** var_desc )
         {
             CvDTreeSplit* split = node->split;
             int dir = 0;
-            
+
             if( !node->left || node->Tn <= dtree->get_pruned_tree_idx() || !node->split )
                 break;
 
@@ -279,7 +279,7 @@ void interactive_classification( CvDTree* dtree, const char** var_desc )
                 else
                     printf( "Error: unrecognized value\n" );
             }
-            
+
             if( !dir )
             {
                 printf( "Impossible to classify the sample\n");
index fdbe1f7..59af78e 100644 (file)
@@ -18,7 +18,7 @@
 #include <string>
 #include <stdio.h>
 
-void help()
+static void help()
 {
        printf("\nThis program demonstrates the one way interest point descriptor found in features2d.hpp\n"
                        "Correspondences are drawn\n");
index 382e9f8..c93c1d2 100644 (file)
@@ -6,7 +6,7 @@
 #include <ctype.h>
 #include <stdio.h>
 
-void help()
+static void help( void )
 {
        printf("\nThis program illustrates Linear-Polar and Log-Polar image transforms\n"
             "Usage :\n"
index 987036d..5f9b381 100644 (file)
@@ -4,7 +4,7 @@
 #include "opencv2/legacy/legacy.hpp"
 #include <stdio.h>
 
-void help()
+static void help(void)
 {
        printf("\nThis program demonstrated color pyramid segmentation cvcvPyrSegmentation() which is controlled\n"
                        "by two trhesholds which can be manipulated by a trackbar. It can take an image file name or defaults to 'fruits.jpg'\n"
@@ -32,7 +32,7 @@ CvMemStorage *storage;
 
 CvPoint pt1, pt2;
 
-void ON_SEGMENT(int a)
+static void ON_SEGMENT(int a)
 {
     cvPyrSegmentation(image0, image1, storage, &comp,
                       level, threshold1+1, threshold2+1);
index 5c255e9..74993ae 100644 (file)
@@ -3,23 +3,23 @@
 #include <stdio.h>
 #include <map>
 
-void help()
+static void help()
 {
-       printf(
-               "\nThis sample demonstrates how to use different decision trees and forests including boosting and random trees:\n"
-               "CvDTree dtree;\n"
-               "CvBoost boost;\n"
-               "CvRTrees rtrees;\n"
-               "CvERTrees ertrees;\n"
-               "CvGBTrees gbtrees;\n"
-               "Call:\n\t./tree_engine [-r <response_column>] [-c] <csv filename>\n"
+    printf(
+        "\nThis sample demonstrates how to use different decision trees and forests including boosting and random trees:\n"
+        "CvDTree dtree;\n"
+        "CvBoost boost;\n"
+        "CvRTrees rtrees;\n"
+        "CvERTrees ertrees;\n"
+        "CvGBTrees gbtrees;\n"
+        "Call:\n\t./tree_engine [-r <response_column>] [-c] <csv filename>\n"
         "where -r <response_column> specified the 0-based index of the response (0 by default)\n"
         "-c specifies that the response is categorical (it's ordered by default) and\n"
         "<csv filename> is the name of training data file in comma-separated value format\n\n");
 }
 
 
-int count_classes(CvMLData& data)
+static int count_classes(CvMLData& data)
 {
     cv::Mat r(data.get_responses());
     std::map<int, int> rmap;
@@ -30,26 +30,26 @@ int count_classes(CvMLData& data)
         int ival = cvRound(val);
         if( ival != val )
             return -1;
-        rmap[ival] = 1; 
+        rmap[ival] = 1;
     }
     return (int)rmap.size();
 }
 
-void print_result(float train_err, float test_err, const CvMat* _var_imp)
+static void print_result(float train_err, float test_err, const CvMat* _var_imp)
 {
     printf( "train error    %f\n", train_err );
     printf( "test error    %f\n\n", test_err );
-       
+
     if (_var_imp)
     {
         cv::Mat var_imp(_var_imp), sorted_idx;
         cv::sortIdx(var_imp, sorted_idx, CV_SORT_EVERY_ROW + CV_SORT_DESCENDING);
-        
+
         printf( "variable importance:\n" );
         int i, n = (int)var_imp.total();
         int type = var_imp.type();
         CV_Assert(type == CV_32F || type == CV_64F);
-        
+
         for( i = 0; i < n; i++)
         {
             int k = sorted_idx.at<int>(i);
@@ -69,7 +69,7 @@ int main(int argc, char** argv)
     const char* filename = 0;
     int response_idx = 0;
     bool categorical_response = false;
-    
+
     for(int i = 1; i < argc; i++)
     {
         if(strcmp(argv[i], "-r") == 0)
@@ -85,26 +85,26 @@ int main(int argc, char** argv)
             return -1;
         }
     }
-        
+
     printf("\nReading in %s...\n\n",filename);
     CvDTree dtree;
     CvBoost boost;
     CvRTrees rtrees;
     CvERTrees ertrees;
-       CvGBTrees gbtrees;
+    CvGBTrees gbtrees;
 
     CvMLData data;
 
-    
+
     CvTrainTestSplit spl( 0.5f );
-    
+
     if ( data.read_csv( filename ) == 0)
     {
         data.set_response_idx( response_idx );
         if(categorical_response)
             data.change_var_type( response_idx, CV_VAR_CATEGORICAL );
         data.set_train_test_split( &spl );
-        
+
         printf("======DTREE=====\n");
         dtree.train( &data, CvDTreeParams( 10, 2, 0, false, 16, 0, false, false, 0 ));
         print_result( dtree.calc_error( &data, CV_TRAIN_ERROR), dtree.calc_error( &data, CV_TEST_ERROR ), dtree.get_var_importance() );
@@ -125,10 +125,10 @@ int main(int argc, char** argv)
         print_result( ertrees.calc_error( &data, CV_TRAIN_ERROR), ertrees.calc_error( &data, CV_TEST_ERROR ), ertrees.get_var_importance() );
 
         printf("======GBTREES=====\n");
-               if (categorical_response)
-                       gbtrees.train( &data, CvGBTreesParams(CvGBTrees::DEVIANCE_LOSS, 100, 0.1f, 0.8f, 5, false));
-               else
-                       gbtrees.train( &data, CvGBTreesParams(CvGBTrees::SQUARED_LOSS, 100, 0.1f, 0.8f, 5, false));
+        if (categorical_response)
+            gbtrees.train( &data, CvGBTreesParams(CvGBTrees::DEVIANCE_LOSS, 100, 0.1f, 0.8f, 5, false));
+        else
+            gbtrees.train( &data, CvGBTreesParams(CvGBTrees::SQUARED_LOSS, 100, 0.1f, 0.8f, 5, false));
         print_result( gbtrees.calc_error( &data, CV_TRAIN_ERROR), gbtrees.calc_error( &data, CV_TEST_ERROR ), 0 ); //doesn't compute importance
     }
     else
index 203b6d7..061e977 100644 (file)
@@ -15,7 +15,7 @@ using namespace std;
 
 enum { DETECTION = 0, CAPTURING = 1, CALIBRATED = 2 };
 
-void help()
+static void help()
 {
         printf( "\nThis is a camera calibration sample that calibrates 3 horizontally placed cameras together.\n"
                "Usage: 3calibration\n"
@@ -34,7 +34,7 @@ void help()
 static void calcChessboardCorners(Size boardSize, float squareSize, vector<Point3f>& corners)
 {
     corners.resize(0);
-    
+
     for( int i = 0; i < boardSize.height; i++ )
         for( int j = 0; j < boardSize.width; j++ )
             corners.push_back(Point3f(float(j*squareSize),
@@ -43,7 +43,7 @@ static void calcChessboardCorners(Size boardSize, float squareSize, vector<Point
 
 static bool run3Calibration( vector<vector<Point2f> > imagePoints1,
                             vector<vector<Point2f> > imagePoints2,
-                            vector<vector<Point2f> > imagePoints3,                            
+                            vector<vector<Point2f> > imagePoints3,
                             Size imageSize, Size boardSize,
                             float squareSize, float aspectRatio,
                             int flags,
@@ -53,13 +53,13 @@ static bool run3Calibration( vector<vector<Point2f> > imagePoints1,
                             Mat& R12, Mat& T12, Mat& R13, Mat& T13)
 {
     int c, i;
-    
+
     // step 1: calibrate each camera individually
     vector<vector<Point3f> > objpt(1);
     vector<vector<Point2f> > imgpt;
     calcChessboardCorners(boardSize, squareSize, objpt[0]);
     vector<Mat> rvecs, tvecs;
-    
+
     for( c = 1; c <= 3; c++ )
     {
         const vector<vector<Point2f> >& imgpt0 = c == 1 ? imagePoints1 : c == 2 ? imagePoints2 : imagePoints3;
@@ -71,7 +71,7 @@ static bool run3Calibration( vector<vector<Point2f> > imagePoints1,
                 imgpt.push_back(imgpt0[i]);
                 N += (int)imgpt0[i].size();
             }
-        
+
         if( imgpt.size() < 3 )
         {
             printf("Error: not enough views for camera %d\n", c);
@@ -79,13 +79,13 @@ static bool run3Calibration( vector<vector<Point2f> > imagePoints1,
         }
 
         objpt.resize(imgpt.size(),objpt[0]);
-            
+
         Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
         if( flags & CV_CALIB_FIX_ASPECT_RATIO )
             cameraMatrix.at<double>(0,0) = aspectRatio;
-        
+
         Mat distCoeffs = Mat::zeros(5, 1, CV_64F);
-        
+
         double err = calibrateCamera(objpt, imgpt, imageSize, cameraMatrix,
                         distCoeffs, rvecs, tvecs,
                         flags|CV_CALIB_FIX_K3/*|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5|CV_CALIB_FIX_K6*/);
@@ -96,7 +96,7 @@ static bool run3Calibration( vector<vector<Point2f> > imagePoints1,
             return false;
         }
         printf("Camera %d calibration reprojection error = %g\n", c, sqrt(err/N));
-        
+
         if( c == 1 )
             cameraMatrix1 = cameraMatrix, distCoeffs1 = distCoeffs;
         else if( c == 2 )
@@ -104,18 +104,18 @@ static bool run3Calibration( vector<vector<Point2f> > imagePoints1,
         else
             cameraMatrix3 = cameraMatrix, distCoeffs3 = distCoeffs;
     }
-    
+
     vector<vector<Point2f> > imgpt_right;
-    
+
     // step 2: calibrate (1,2) and (3,2) pairs
     for( c = 2; c <= 3; c++ )
     {
         const vector<vector<Point2f> >& imgpt0 = c == 2 ? imagePoints2 : imagePoints3;
-        
+
         imgpt.clear();
         imgpt_right.clear();
         int N = 0;
-        
+
         for( i = 0; i < (int)std::min(imagePoints1.size(), imgpt0.size()); i++ )
             if( !imagePoints1.empty() && !imgpt0[i].empty() )
             {
@@ -123,13 +123,13 @@ static bool run3Calibration( vector<vector<Point2f> > imagePoints1,
                 imgpt_right.push_back(imgpt0[i]);
                 N += (int)imgpt0[i].size();
             }
-        
+
         if( imgpt.size() < 3 )
         {
             printf("Error: not enough shared views for cameras 1 and %d\n", c);
             return false;
         }
-        
+
         objpt.resize(imgpt.size(),objpt[0]);
         Mat cameraMatrix = c == 2 ? cameraMatrix2 : cameraMatrix3;
         Mat distCoeffs = c == 2 ? distCoeffs2 : distCoeffs3;
@@ -151,7 +151,7 @@ static bool run3Calibration( vector<vector<Point2f> > imagePoints1,
             R13 = R; T13 = T;
         }
     }
-    
+
     return true;
 }
 
@@ -179,17 +179,17 @@ int main( int argc, char** argv )
     float squareSize = 1.f, aspectRatio = 1.f;
     const char* outputFilename = "out_camera_data.yml";
     const char* inputFilename = 0;
-    
+
     vector<vector<Point2f> > imgpt[3];
     vector<string> imageList;
-    
+
     if(argc < 2)
     {
-       help();
-       return 1;
+        help();
+        return 1;
     }
 
-    
+
     for( i = 1; i < argc; i++ )
     {
         const char* s = argv[i];
@@ -229,11 +229,11 @@ int main( int argc, char** argv )
         else if( s[0] != '-' )
         {
             inputFilename = s;
-               }
+        }
         else
             return fprintf( stderr, "Unknown option %s", s ), -1;
     }
-    
+
     if( !inputFilename ||
        !readStringList(inputFilename, imageList) ||
        imageList.size() == 0 || imageList.size() % 3 != 0 )
@@ -241,7 +241,7 @@ int main( int argc, char** argv )
         printf("Error: the input image list is not specified, or can not be read, or the number of files is not divisible by 3\n");
         return -1;
     }
-    
+
     Mat view, viewGray;
     Mat cameraMatrix[3], distCoeffs[3], R[3], P[3], R12, T12;
     for( k = 0; k < 3; k++ )
@@ -252,13 +252,13 @@ int main( int argc, char** argv )
         distCoeffs[k] = Mat_<double>::zeros(5,1);
     }
     Mat R13=Mat_<double>::eye(3,3), T13=Mat_<double>::zeros(3,1);
-    
+
     FileStorage fs;
     namedWindow( "Image View", 0 );
-    
+
     for( k = 0; k < 3; k++ )
         imgpt[k].resize(imageList.size()/3);
-    
+
     for( i = 0; i < (int)(imageList.size()/3); i++ )
     {
         for( k = 0; k < 3; k++ )
@@ -266,14 +266,14 @@ int main( int argc, char** argv )
             int k1 = k == 0 ? 2 : k == 1 ? 0 : 1;
             printf("%s\n", imageList[i*3+k].c_str());
             view = imread(imageList[i*3+k], 1);
-            
+
             if(view.data)
             {
                 vector<Point2f> ptvec;
                 imageSize = view.size();
                 cvtColor(view, viewGray, CV_BGR2GRAY);
                 bool found = findChessboardCorners( view, boardSize, ptvec, CV_CALIB_CB_ADAPTIVE_THRESH );
-            
+
                 drawChessboardCorners( view, boardSize, Mat(ptvec), found );
                 if( found )
                 {
@@ -287,36 +287,36 @@ int main( int argc, char** argv )
             }
         }
     }
-    
+
     printf("Running calibration ...\n");
-    
+
     run3Calibration(imgpt[0], imgpt[1], imgpt[2], imageSize,
                     boardSize, squareSize, aspectRatio, flags|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5,
                     cameraMatrix[0], distCoeffs[0],
                     cameraMatrix[1], distCoeffs[1],
                     cameraMatrix[2], distCoeffs[2],
                     R12, T12, R13, T13);
-        
+
     fs.open(outputFilename, CV_STORAGE_WRITE);
-    
+
     fs << "cameraMatrix1" << cameraMatrix[0];
     fs << "cameraMatrix2" << cameraMatrix[1];
     fs << "cameraMatrix3" << cameraMatrix[2];
-    
+
     fs << "distCoeffs1" << distCoeffs[0];
     fs << "distCoeffs2" << distCoeffs[1];
     fs << "distCoeffs3" << distCoeffs[2];
-    
+
     fs << "R12" << R12;
     fs << "T12" << T12;
     fs << "R13" << R13;
     fs << "T13" << T13;
-    
+
     fs << "imageWidth" << imageSize.width;
     fs << "imageHeight" << imageSize.height;
-    
+
     Mat Q;
-    
+
     // step 3: find rectification transforms
     double ratio = rectify3Collinear(cameraMatrix[0], distCoeffs[0], cameraMatrix[1],
              distCoeffs[1], cameraMatrix[2], distCoeffs[2],
@@ -325,27 +325,27 @@ int main( int argc, char** argv )
              R[0], R[1], R[2], P[0], P[1], P[2], Q, -1.,
              imageSize, 0, 0, CV_CALIB_ZERO_DISPARITY);
     Mat map1[3], map2[3];
-    
+
     fs << "R1" << R[0];
     fs << "R2" << R[1];
     fs << "R3" << R[2];
-    
+
     fs << "P1" << P[0];
     fs << "P2" << P[1];
     fs << "P3" << P[2];
-    
+
     fs << "disparityRatio" << ratio;
     fs.release();
-    
+
     printf("Disparity ratio = %g\n", ratio);
-    
+
     for( k = 0; k < 3; k++ )
         initUndistortRectifyMap(cameraMatrix[k], distCoeffs[k], R[k], P[k], imageSize, CV_16SC2, map1[k], map2[k]);
-    
+
     Mat canvas(imageSize.height, imageSize.width*3, CV_8UC3), small_canvas;
     destroyWindow("view");
     canvas = Scalar::all(0);
-    
+
     for( i = 0; i < (int)(imageList.size()/3); i++ )
     {
         canvas = Scalar::all(0);
@@ -354,10 +354,10 @@ int main( int argc, char** argv )
             int k1 = k == 0 ? 2 : k == 1 ? 0 : 1;
             int k2 = k == 0 ? 1 : k == 1 ? 0 : 2;
             view = imread(imageList[i*3+k], 1);
-            
+
             if(!view.data)
                 continue;
-            
+
             Mat rview = canvas.colRange(k2*imageSize.width, (k2+1)*imageSize.width);
             remap(view, rview, map1[k1], map2[k1], CV_INTER_LINEAR);
         }
@@ -370,6 +370,6 @@ int main( int argc, char** argv )
         if( c == 27 || c == 'q' || c == 'Q' )
             break;
     }
-    
+
     return 0;
 }
index 7dcb99b..bfae1eb 100644 (file)
 
 #include "opencv2/opencv.hpp"
 
-void help(std::string errorMessage)
+static void help(std::string errorMessage)
 {
-       std::cout<<"Program init error : "<<errorMessage<<std::endl;
-       std::cout<<"\nProgram call procedure : ./OpenEXRimages_HighDynamicRange_Retina_toneMapping [OpenEXR image to process]"<<std::endl;
-       std::cout<<"\t[OpenEXR image to process] : the input HDR image to process, must be an OpenEXR format, see http://www.openexr.com/ to get some samples or create your own using camera bracketing and Photoshop or equivalent software for OpenEXR image synthesis"<<std::endl;
-       std::cout<<"\nExamples:"<<std::endl;
-       std::cout<<"\t-Image processing : ./OpenEXRimages_HighDynamicRange_Retina_toneMapping memorial.exr"<<std::endl;
+    std::cout<<"Program init error : "<<errorMessage<<std::endl;
+    std::cout<<"\nProgram call procedure : ./OpenEXRimages_HighDynamicRange_Retina_toneMapping [OpenEXR image to process]"<<std::endl;
+    std::cout<<"\t[OpenEXR image to process] : the input HDR image to process, must be an OpenEXR format, see http://www.openexr.com/ to get some samples or create your own using camera bracketing and Photoshop or equivalent software for OpenEXR image synthesis"<<std::endl;
+    std::cout<<"\nExamples:"<<std::endl;
+    std::cout<<"\t-Image processing : ./OpenEXRimages_HighDynamicRange_Retina_toneMapping memorial.exr"<<std::endl;
 }
 
 // simple procedure for 1D curve tracing
-void drawPlot(const cv::Mat curve, const std::string figureTitle, const int lowerLimit, const int upperLimit)
+static void drawPlot(const cv::Mat curve, const std::string figureTitle, const int lowerLimit, const int upperLimit)
 {
-       //std::cout<<"curve size(h,w) = "<<curve.size().height<<", "<<curve.size().width<<std::endl;
-       cv::Mat displayedCurveImage = cv::Mat::ones(200, curve.size().height, CV_8U);
-
-       cv::Mat windowNormalizedCurve;
-       normalize(curve, windowNormalizedCurve, 0, 200, CV_MINMAX, CV_32F);
-
-       displayedCurveImage = cv::Scalar::all(255); // set a white background
-       int binW = cvRound((double)displayedCurveImage.cols/curve.size().height);
-
-       for( int i = 0; i < curve.size().height; i++ )
-               rectangle( displayedCurveImage, cv::Point(i*binW, displayedCurveImage.rows),
-                               cv::Point((i+1)*binW, displayedCurveImage.rows - cvRound(windowNormalizedCurve.at<float>(i))),
-                               cv::Scalar::all(0), -1, 8, 0 );
-       rectangle( displayedCurveImage, cv::Point(0, 0),
-                       cv::Point((lowerLimit)*binW, 200),
-                       cv::Scalar::all(128), -1, 8, 0 );
-       rectangle( displayedCurveImage, cv::Point(displayedCurveImage.cols, 0),
-                       cv::Point((upperLimit)*binW, 200),
-                       cv::Scalar::all(128), -1, 8, 0 );
-
-       cv::imshow(figureTitle, displayedCurveImage);
+    //std::cout<<"curve size(h,w) = "<<curve.size().height<<", "<<curve.size().width<<std::endl;
+    cv::Mat displayedCurveImage = cv::Mat::ones(200, curve.size().height, CV_8U);
+
+    cv::Mat windowNormalizedCurve;
+    normalize(curve, windowNormalizedCurve, 0, 200, CV_MINMAX, CV_32F);
+
+    displayedCurveImage = cv::Scalar::all(255); // set a white background
+    int binW = cvRound((double)displayedCurveImage.cols/curve.size().height);
+
+    for( int i = 0; i < curve.size().height; i++ )
+        rectangle( displayedCurveImage, cv::Point(i*binW, displayedCurveImage.rows),
+                cv::Point((i+1)*binW, displayedCurveImage.rows - cvRound(windowNormalizedCurve.at<float>(i))),
+                cv::Scalar::all(0), -1, 8, 0 );
+    rectangle( displayedCurveImage, cv::Point(0, 0),
+            cv::Point((lowerLimit)*binW, 200),
+            cv::Scalar::all(128), -1, 8, 0 );
+    rectangle( displayedCurveImage, cv::Point(displayedCurveImage.cols, 0),
+            cv::Point((upperLimit)*binW, 200),
+            cv::Scalar::all(128), -1, 8, 0 );
+
+    cv::imshow(figureTitle, displayedCurveImage);
 }
 /*
  * objective : get the gray level map of the input image and rescale it to the range [0-255]
- */void rescaleGrayLevelMat(const cv::Mat &inputMat, cv::Mat &outputMat, const float histogramClippingLimit)
+ */
+ static void rescaleGrayLevelMat(const cv::Mat &inputMat, cv::Mat &outputMat, const float histogramClippingLimit)
  {
 
-        // adjust output matrix wrt the input size but single channel
-        std::cout<<"Input image rescaling with histogram edges cutting (in order to eliminate bad pixels created during the HDR image creation) :"<<std::endl;
-        //std::cout<<"=> image size (h,w,channels) = "<<inputMat.size().height<<", "<<inputMat.size().width<<", "<<inputMat.channels()<<std::endl;
-        //std::cout<<"=> pixel coding (nbchannel, bytes per channel) = "<<inputMat.elemSize()/inputMat.elemSize1()<<", "<<inputMat.elemSize1()<<std::endl;
-
-        // rescale between 0-255, keeping floating point values
-        cv::normalize(inputMat, outputMat, 0.0, 255.0, cv::NORM_MINMAX);
-
-        // extract a 8bit image that will be used for histogram edge cut
-        cv::Mat intGrayImage;
-        if (inputMat.channels()==1)
-        {
-                outputMat.convertTo(intGrayImage, CV_8U);
-        }else
-        {
-                cv::Mat rgbIntImg;
-                outputMat.convertTo(rgbIntImg, CV_8UC3);
-                cvtColor(rgbIntImg, intGrayImage, CV_BGR2GRAY);
-        }
-
-        // get histogram density probability in order to cut values under above edges limits (here 5-95%)... usefull for HDR pixel errors cancellation
-        cv::Mat dst, hist;
-        int histSize = 256;
-        calcHist(&intGrayImage, 1, 0, cv::Mat(), hist, 1, &histSize, 0);
-        cv::Mat normalizedHist;
-        normalize(hist, normalizedHist, 1, 0, cv::NORM_L1, CV_32F); // normalize histogram so that its sum equals 1
-
-        double min_val, max_val;
-        CvMat histArr(normalizedHist);
-        cvMinMaxLoc(&histArr, &min_val, &max_val);
-        //std::cout<<"Hist max,min = "<<max_val<<", "<<min_val<<std::endl;
-
-        // compute density probability
-        cv::Mat denseProb=cv::Mat::zeros(normalizedHist.size(), CV_32F);
-        denseProb.at<float>(0)=normalizedHist.at<float>(0);
-        int histLowerLimit=0, histUpperLimit=0;
-        for (int i=1;i<normalizedHist.size().height;++i)
-        {
-                denseProb.at<float>(i)=denseProb.at<float>(i-1)+normalizedHist.at<float>(i);
-                //std::cout<<normalizedHist.at<float>(i)<<", "<<denseProb.at<float>(i)<<std::endl;
-                if ( denseProb.at<float>(i)<histogramClippingLimit)
-                        histLowerLimit=i;
-                if ( denseProb.at<float>(i)<1-histogramClippingLimit)
-                        histUpperLimit=i;
-        }
-        // deduce min and max admitted gray levels
-        float minInputValue = (float)histLowerLimit/histSize*255;
-        float maxInputValue = (float)histUpperLimit/histSize*255;
-
-        std::cout<<"=> Histogram limits "
-                        <<"\n\t"<<histogramClippingLimit*100<<"% index = "<<histLowerLimit<<" => normalizedHist value = "<<denseProb.at<float>(histLowerLimit)<<" => input gray level = "<<minInputValue
-                        <<"\n\t"<<(1-histogramClippingLimit)*100<<"% index = "<<histUpperLimit<<" => normalizedHist value = "<<denseProb.at<float>(histUpperLimit)<<" => input gray level = "<<maxInputValue
-                        <<std::endl;
-        //drawPlot(denseProb, "input histogram density probability", histLowerLimit, histUpperLimit);
-        drawPlot(normalizedHist, "input histogram", histLowerLimit, histUpperLimit);
-
-        // rescale image range [minInputValue-maxInputValue] to [0-255]
-        outputMat-=minInputValue;
-        outputMat*=255.0/(maxInputValue-minInputValue);
-        // cut original histogram and back project to original image
-        cv::threshold( outputMat, outputMat, 255.0, 255.0, 2 ); //THRESH_TRUNC, clips values above 255
-        cv::threshold( outputMat, outputMat, 0.0, 0.0, 3 ); //THRESH_TOZERO, clips values under 0
+     // adjust output matrix wrt the input size but single channel
+     std::cout<<"Input image rescaling with histogram edges cutting (in order to eliminate bad pixels created during the HDR image creation) :"<<std::endl;
+     //std::cout<<"=> image size (h,w,channels) = "<<inputMat.size().height<<", "<<inputMat.size().width<<", "<<inputMat.channels()<<std::endl;
+     //std::cout<<"=> pixel coding (nbchannel, bytes per channel) = "<<inputMat.elemSize()/inputMat.elemSize1()<<", "<<inputMat.elemSize1()<<std::endl;
+
+     // rescale between 0-255, keeping floating point values
+     cv::normalize(inputMat, outputMat, 0.0, 255.0, cv::NORM_MINMAX);
+
+     // extract a 8bit image that will be used for histogram edge cut
+     cv::Mat intGrayImage;
+     if (inputMat.channels()==1)
+     {
+         outputMat.convertTo(intGrayImage, CV_8U);
+     }else
+     {
+         cv::Mat rgbIntImg;
+         outputMat.convertTo(rgbIntImg, CV_8UC3);
+         cvtColor(rgbIntImg, intGrayImage, CV_BGR2GRAY);
+     }
+
+     // get histogram density probability in order to cut values under above edges limits (here 5-95%)... usefull for HDR pixel errors cancellation
+     cv::Mat dst, hist;
+     int histSize = 256;
+     calcHist(&intGrayImage, 1, 0, cv::Mat(), hist, 1, &histSize, 0);
+     cv::Mat normalizedHist;
+     normalize(hist, normalizedHist, 1, 0, cv::NORM_L1, CV_32F); // normalize histogram so that its sum equals 1
+
+     double min_val, max_val;
+     CvMat histArr(normalizedHist);
+     cvMinMaxLoc(&histArr, &min_val, &max_val);
+     //std::cout<<"Hist max,min = "<<max_val<<", "<<min_val<<std::endl;
+
+     // compute density probability
+     cv::Mat denseProb=cv::Mat::zeros(normalizedHist.size(), CV_32F);
+     denseProb.at<float>(0)=normalizedHist.at<float>(0);
+     int histLowerLimit=0, histUpperLimit=0;
+     for (int i=1;i<normalizedHist.size().height;++i)
+     {
+         denseProb.at<float>(i)=denseProb.at<float>(i-1)+normalizedHist.at<float>(i);
+         //std::cout<<normalizedHist.at<float>(i)<<", "<<denseProb.at<float>(i)<<std::endl;
+         if ( denseProb.at<float>(i)<histogramClippingLimit)
+             histLowerLimit=i;
+         if ( denseProb.at<float>(i)<1-histogramClippingLimit)
+             histUpperLimit=i;
+     }
+     // deduce min and max admitted gray levels
+     float minInputValue = (float)histLowerLimit/histSize*255;
+     float maxInputValue = (float)histUpperLimit/histSize*255;
+
+     std::cout<<"=> Histogram limits "
+             <<"\n\t"<<histogramClippingLimit*100<<"% index = "<<histLowerLimit<<" => normalizedHist value = "<<denseProb.at<float>(histLowerLimit)<<" => input gray level = "<<minInputValue
+             <<"\n\t"<<(1-histogramClippingLimit)*100<<"% index = "<<histUpperLimit<<" => normalizedHist value = "<<denseProb.at<float>(histUpperLimit)<<" => input gray level = "<<maxInputValue
+             <<std::endl;
+     //drawPlot(denseProb, "input histogram density probability", histLowerLimit, histUpperLimit);
+     drawPlot(normalizedHist, "input histogram", histLowerLimit, histUpperLimit);
+
+     // rescale image range [minInputValue-maxInputValue] to [0-255]
+     outputMat-=minInputValue;
+     outputMat*=255.0/(maxInputValue-minInputValue);
+     // cut original histogram and back project to original image
+     cv::threshold( outputMat, outputMat, 255.0, 255.0, 2 ); //THRESH_TRUNC, clips values above 255
+     cv::threshold( outputMat, outputMat, 0.0, 0.0, 3 ); //THRESH_TOZERO, clips values under 0
 
  }
  // basic callback method for interface management
  cv::Mat inputImage;
  cv::Mat imageInputRescaled;
  int histogramClippingValue;
- void callBack_rescaleGrayLevelMat(int, void*)
static void callBack_rescaleGrayLevelMat(int, void*)
  {
-        std::cout<<"Histogram clipping value changed, current value = "<<histogramClippingValue<<std::endl;
-        rescaleGrayLevelMat(inputImage, imageInputRescaled, (float)(histogramClippingValue/100.0));
-        normalize(imageInputRescaled, imageInputRescaled, 0.0, 255.0, cv::NORM_MINMAX);
+     std::cout<<"Histogram clipping value changed, current value = "<<histogramClippingValue<<std::endl;
+     rescaleGrayLevelMat(inputImage, imageInputRescaled, (float)(histogramClippingValue/100.0));
+     normalize(imageInputRescaled, imageInputRescaled, 0.0, 255.0, cv::NORM_MINMAX);
  }
 
  cv::Ptr<cv::Retina> retina;
  int retinaHcellsGain;
  int localAdaptation_photoreceptors, localAdaptation_Gcells;
- void callBack_updateRetinaParams(int, void*)
static void callBack_updateRetinaParams(int, void*)
  {
-        retina->setupOPLandIPLParvoChannel(true, true, (float)(localAdaptation_photoreceptors/200.0), 0.5f, 0.43f, (float)retinaHcellsGain, 1.f, 7.f, (float)(localAdaptation_Gcells/200.0));
+     retina->setupOPLandIPLParvoChannel(true, true, (float)(localAdaptation_photoreceptors/200.0), 0.5f, 0.43f, (float)retinaHcellsGain, 1.f, 7.f, (float)(localAdaptation_Gcells/200.0));
  }
 
  int colorSaturationFactor;
- void callback_saturateColors(int, void*)
static void callback_saturateColors(int, void*)
  {
-        retina->setColorSaturation(true, (float)colorSaturationFactor);
+     retina->setColorSaturation(true, (float)colorSaturationFactor);
  }
 
  int main(int argc, char* argv[]) {
-        // welcome message
-        std::cout<<"*********************************************************************************"<<std::endl;
-        std::cout<<"* Retina demonstration for High Dynamic Range compression (tone-mapping) : demonstrates the use of a wrapper class of the Gipsa/Listic Labs retina model."<<std::endl;
-        std::cout<<"* This retina model allows spatio-temporal image processing (applied on still images, video sequences)."<<std::endl;
-        std::cout<<"* This demo focuses demonstration of the dynamic compression capabilities of the model"<<std::endl;
-        std::cout<<"* => the main application is tone mapping of HDR images (i.e. see on a 8bit display a more than 8bits coded (up to 16bits) image with details in high and low luminance ranges"<<std::endl;
-        std::cout<<"* The retina model still have the following properties:"<<std::endl;
-        std::cout<<"* => It applies a spectral whithening (mid-frequency details enhancement)"<<std::endl;
-        std::cout<<"* => high frequency spatio-temporal noise reduction"<<std::endl;
-        std::cout<<"* => low frequency luminance to be reduced (luminance range compression)"<<std::endl;
-        std::cout<<"* => local logarithmic luminance compression allows details to be enhanced in low light conditions\n"<<std::endl;
-        std::cout<<"* for more information, reer to the following papers :"<<std::endl;
-        std::cout<<"* Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011"<<std::endl;
-        std::cout<<"* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891."<<std::endl;
-        std::cout<<"* => reports comments/remarks at benoit.alexandre.vision@gmail.com"<<std::endl;
-        std::cout<<"* => more informations and papers at : http://sites.google.com/site/benoitalexandrevision/"<<std::endl;
-        std::cout<<"*********************************************************************************"<<std::endl;
-        std::cout<<"** WARNING : this sample requires OpenCV to be configured with OpenEXR support **"<<std::endl;
-        std::cout<<"*********************************************************************************"<<std::endl;
-        std::cout<<"*** You can use free tools to generate OpenEXR images from images sets   :    ***"<<std::endl;
-        std::cout<<"*** =>  1. take a set of photos from the same viewpoint using bracketing      ***"<<std::endl;
-        std::cout<<"*** =>  2. generate an OpenEXR image with tools like qtpfsgui.sourceforge.net ***"<<std::endl;
-        std::cout<<"*** =>  3. apply tone mapping with this program                               ***"<<std::endl;
-        std::cout<<"*********************************************************************************"<<std::endl;
-
-        // basic input arguments checking
-        if (argc<2)
-        {
-                help("bad number of parameter");
-                return -1;
-        }
-
-        bool useLogSampling = !strcmp(argv[argc-1], "log"); // check if user wants retina log sampling processing
-
-        std::string inputImageName=argv[1];
-
-        //////////////////////////////////////////////////////////////////////////////
-        // checking input media type (still image, video file, live video acquisition)
-        std::cout<<"RetinaDemo: processing image "<<inputImageName<<std::endl;
-        // image processing case
-        // declare the retina input buffer... that will be fed differently in regard of the input media
-        inputImage = cv::imread(inputImageName, -1); // load image in RGB mode
-        std::cout<<"=> image size (h,w) = "<<inputImage.size().height<<", "<<inputImage.size().width<<std::endl;
-        if (!inputImage.total())
-        {
-           help("could not load image, program end");
-            return -1; 
+     // welcome message
+     std::cout<<"*********************************************************************************"<<std::endl;
+     std::cout<<"* Retina demonstration for High Dynamic Range compression (tone-mapping) : demonstrates the use of a wrapper class of the Gipsa/Listic Labs retina model."<<std::endl;
+     std::cout<<"* This retina model allows spatio-temporal image processing (applied on still images, video sequences)."<<std::endl;
+     std::cout<<"* This demo focuses demonstration of the dynamic compression capabilities of the model"<<std::endl;
+     std::cout<<"* => the main application is tone mapping of HDR images (i.e. see on a 8bit display a more than 8bits coded (up to 16bits) image with details in high and low luminance ranges"<<std::endl;
+     std::cout<<"* The retina model still have the following properties:"<<std::endl;
+     std::cout<<"* => It applies a spectral whithening (mid-frequency details enhancement)"<<std::endl;
+     std::cout<<"* => high frequency spatio-temporal noise reduction"<<std::endl;
+     std::cout<<"* => low frequency luminance to be reduced (luminance range compression)"<<std::endl;
+     std::cout<<"* => local logarithmic luminance compression allows details to be enhanced in low light conditions\n"<<std::endl;
+     std::cout<<"* for more information, reer to the following papers :"<<std::endl;
+     std::cout<<"* Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011"<<std::endl;
+     std::cout<<"* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891."<<std::endl;
+     std::cout<<"* => reports comments/remarks at benoit.alexandre.vision@gmail.com"<<std::endl;
+     std::cout<<"* => more informations and papers at : http://sites.google.com/site/benoitalexandrevision/"<<std::endl;
+     std::cout<<"*********************************************************************************"<<std::endl;
+     std::cout<<"** WARNING : this sample requires OpenCV to be configured with OpenEXR support **"<<std::endl;
+     std::cout<<"*********************************************************************************"<<std::endl;
+     std::cout<<"*** You can use free tools to generate OpenEXR images from images sets   :    ***"<<std::endl;
+     std::cout<<"*** =>  1. take a set of photos from the same viewpoint using bracketing      ***"<<std::endl;
+     std::cout<<"*** =>  2. generate an OpenEXR image with tools like qtpfsgui.sourceforge.net ***"<<std::endl;
+     std::cout<<"*** =>  3. apply tone mapping with this program                               ***"<<std::endl;
+     std::cout<<"*********************************************************************************"<<std::endl;
+
+     // basic input arguments checking
+     if (argc<2)
+     {
+         help("bad number of parameter");
+         return -1;
+     }
+
+     bool useLogSampling = !strcmp(argv[argc-1], "log"); // check if user wants retina log sampling processing
+
+     std::string inputImageName=argv[1];
+
+     //////////////////////////////////////////////////////////////////////////////
+     // checking input media type (still image, video file, live video acquisition)
+     std::cout<<"RetinaDemo: processing image "<<inputImageName<<std::endl;
+     // image processing case
+     // declare the retina input buffer... that will be fed differently in regard of the input media
+     inputImage = cv::imread(inputImageName, -1); // load image in RGB mode
+     std::cout<<"=> image size (h,w) = "<<inputImage.size().height<<", "<<inputImage.size().width<<std::endl;
+     if (!inputImage.total())
+     {
+        help("could not load image, program end");
+            return -1;
          }
-        // rescale between 0 and 1
-        normalize(inputImage, inputImage, 0.0, 1.0, cv::NORM_MINMAX);
-        cv::Mat gammaTransformedImage;
-        cv::pow(inputImage, 1./5, gammaTransformedImage); // apply gamma curve: img = img ** (1./5)
-        imshow("EXR image original image, 16bits=>8bits linear rescaling ", inputImage);
-        imshow("EXR image with basic processing : 16bits=>8bits with gamma correction", gammaTransformedImage);
-        if (inputImage.empty())
-        {
-                help("Input image could not be loaded, aborting");
-                return -1;
-        }
-
-        //////////////////////////////////////////////////////////////////////////////
-        // Program start in a try/catch safety context (Retina may throw errors)
-        try
-        {
-                /* create a retina instance with default parameters setup, uncomment the initialisation you wanna test
-                 * -> if the last parameter is 'log', then activate log sampling (favour foveal vision and subsamples peripheral vision)
-                 */
-                if (useLogSampling)
+     // rescale between 0 and 1
+     normalize(inputImage, inputImage, 0.0, 1.0, cv::NORM_MINMAX);
+     cv::Mat gammaTransformedImage;
+     cv::pow(inputImage, 1./5, gammaTransformedImage); // apply gamma curve: img = img ** (1./5)
+     imshow("EXR image original image, 16bits=>8bits linear rescaling ", inputImage);
+     imshow("EXR image with basic processing : 16bits=>8bits with gamma correction", gammaTransformedImage);
+     if (inputImage.empty())
+     {
+         help("Input image could not be loaded, aborting");
+         return -1;
+     }
+
+     //////////////////////////////////////////////////////////////////////////////
+     // Program start in a try/catch safety context (Retina may throw errors)
+     try
+     {
+         /* create a retina instance with default parameters setup, uncomment the initialisation you wanna test
+          * -> if the last parameter is 'log', then activate log sampling (favour foveal vision and subsamples peripheral vision)
+          */
+         if (useLogSampling)
                 {
                      retina = new cv::Retina(inputImage.size(),true, cv::RETINA_COLOR_BAYER, true, 2.0, 10.0);
                  }
-                else// -> else allocate "classical" retina :
-                    retina = new cv::Retina(inputImage.size());
-               
-               // save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"
-               retina->write("RetinaDefaultParameters.xml");
+         else// -> else allocate "classical" retina :
+             retina = new cv::Retina(inputImage.size());
+
+        // save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"
+        retina->write("RetinaDefaultParameters.xml");
 
                  // desactivate Magnocellular pathway processing (motion information extraction) since it is not usefull here
                  retina->activateMovingContoursProcessing(false);
 
-                // declare retina output buffers
-                cv::Mat retinaOutput_parvo;
-
-                /////////////////////////////////////////////
-                // prepare displays and interactions
-                histogramClippingValue=0; // default value... updated with interface slider
-                //inputRescaleMat = inputImage;
-                //outputRescaleMat = imageInputRescaled;
-                cv::namedWindow("Retina input image (with cut edges histogram for basic pixels error avoidance)",1);
-                cv::createTrackbar("histogram edges clipping limit", "Retina input image (with cut edges histogram for basic pixels error avoidance)",&histogramClippingValue,50,callBack_rescaleGrayLevelMat);
-
-                cv::namedWindow("Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", 1);
-                colorSaturationFactor=3;
-                cv::createTrackbar("Color saturation", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &colorSaturationFactor,5,callback_saturateColors);
-
-                retinaHcellsGain=40;
-                cv::createTrackbar("Hcells gain", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping",&retinaHcellsGain,100,callBack_updateRetinaParams);
-
-                localAdaptation_photoreceptors=197;
-                localAdaptation_Gcells=190;
-                cv::createTrackbar("Ph sensitivity", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &localAdaptation_photoreceptors,199,callBack_updateRetinaParams);
-                cv::createTrackbar("Gcells sensitivity", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &localAdaptation_Gcells,199,callBack_updateRetinaParams);
-
-
-                /////////////////////////////////////////////
-                // apply default parameters of user interaction variables
-                rescaleGrayLevelMat(inputImage, imageInputRescaled, (float)histogramClippingValue/100);
-                retina->setColorSaturation(true,(float)colorSaturationFactor);
-                callBack_updateRetinaParams(1,NULL); // first call for default parameters setup
-
-                // processing loop with stop condition
-                bool continueProcessing=true;
-                while(continueProcessing)
-                {
-                        // run retina filter
-                        retina->run(imageInputRescaled);
-                        // Retrieve and display retina output
-                        retina->getParvo(retinaOutput_parvo);
-                        cv::imshow("Retina input image (with cut edges histogram for basic pixels error avoidance)", imageInputRescaled/255.0);
-                        cv::imshow("Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", retinaOutput_parvo);
-                        cv::waitKey(10);
-                }
-        }catch(cv::Exception e)
-        {
-                std::cerr<<"Error using Retina : "<<e.what()<<std::endl;
-        }
-
-        // Program end message
-        std::cout<<"Retina demo end"<<std::endl;
-
-        return 0;
+         // declare retina output buffers
+         cv::Mat retinaOutput_parvo;
+
+         /////////////////////////////////////////////
+         // prepare displays and interactions
+         histogramClippingValue=0; // default value... updated with interface slider
+         //inputRescaleMat = inputImage;
+         //outputRescaleMat = imageInputRescaled;
+         cv::namedWindow("Retina input image (with cut edges histogram for basic pixels error avoidance)",1);
+         cv::createTrackbar("histogram edges clipping limit", "Retina input image (with cut edges histogram for basic pixels error avoidance)",&histogramClippingValue,50,callBack_rescaleGrayLevelMat);
+
+         cv::namedWindow("Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", 1);
+         colorSaturationFactor=3;
+         cv::createTrackbar("Color saturation", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &colorSaturationFactor,5,callback_saturateColors);
+
+         retinaHcellsGain=40;
+         cv::createTrackbar("Hcells gain", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping",&retinaHcellsGain,100,callBack_updateRetinaParams);
+
+         localAdaptation_photoreceptors=197;
+         localAdaptation_Gcells=190;
+         cv::createTrackbar("Ph sensitivity", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &localAdaptation_photoreceptors,199,callBack_updateRetinaParams);
+         cv::createTrackbar("Gcells sensitivity", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &localAdaptation_Gcells,199,callBack_updateRetinaParams);
+
+
+         /////////////////////////////////////////////
+         // apply default parameters of user interaction variables
+         rescaleGrayLevelMat(inputImage, imageInputRescaled, (float)histogramClippingValue/100);
+         retina->setColorSaturation(true,(float)colorSaturationFactor);
+         callBack_updateRetinaParams(1,NULL); // first call for default parameters setup
+
+         // processing loop with stop condition
+         bool continueProcessing=true;
+         while(continueProcessing)
+         {
+             // run retina filter
+             retina->run(imageInputRescaled);
+             // Retrieve and display retina output
+             retina->getParvo(retinaOutput_parvo);
+             cv::imshow("Retina input image (with cut edges histogram for basic pixels error avoidance)", imageInputRescaled/255.0);
+             cv::imshow("Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", retinaOutput_parvo);
+             cv::waitKey(10);
+         }
+     }catch(cv::Exception e)
+     {
+         std::cerr<<"Error using Retina : "<<e.what()<<std::endl;
+     }
+
+     // Program end message
+     std::cout<<"Retina demo end"<<std::endl;
+
+     return 0;
  }
 
 
index 134b4c7..0a92428 100644 (file)
@@ -7,7 +7,7 @@
 // Description : HighDynamicRange compression (tone mapping) for image sequences with the help of the Gipsa/Listic's retina in C++, Ansi-style
 // Known issues: the input OpenEXR sequences can have bad computed pixels that should be removed
 //               => a simple method consists of cutting histogram edges (a slider for this on the UI is provided)
-//               => however, in image sequences, this histogramm cut must be done in an elegant way from frame to frame... still not done...     
+//               => however, in image sequences, this histogramm cut must be done in an elegant way from frame to frame... still not done...
 //============================================================================
 
 #include <iostream>
 
 #include "opencv2/opencv.hpp"
 
-void help(std::string errorMessage)
+static void help(std::string errorMessage)
 {
-       std::cout<<"Program init error : "<<errorMessage<<std::endl;
-       std::cout<<"\nProgram call procedure : ./OpenEXRimages_HighDynamicRange_Retina_toneMapping [OpenEXR image sequence to process] [OPTIONNAL start frame] [OPTIONNAL end frame]"<<std::endl;
-       std::cout<<"\t[OpenEXR image sequence to process] : std::sprintf style ready prototype filename of the input HDR images to process, must be an OpenEXR format, see http://www.openexr.com/ to get some samples or create your own using camera bracketing and Photoshop or equivalent software for OpenEXR image synthesis"<<std::endl;
-       std::cout<<"\t\t => WARNING : image index number of digits cannot exceed 10"<<std::endl;
-       std::cout<<"\t[start frame] : the starting frame tat should be considered"<<std::endl;
-       std::cout<<"\t[end frame] : the ending frame tat should be considered"<<std::endl;
-       std::cout<<"\nExamples:"<<std::endl;
-       std::cout<<"\t-Image processing : ./OpenEXRimages_HighDynamicRange_Retina_toneMapping_video memorial%3d.exr 20 45"<<std::endl;
-       std::cout<<"\t-Image processing : ./OpenEXRimages_HighDynamicRange_Retina_toneMapping_video memorial%3d.exr 20 45 log"<<std::endl;
-       std::cout<<"\t ==> to process images from memorial020d.exr to memorial045d.exr"<<std::endl;
+    std::cout<<"Program init error : "<<errorMessage<<std::endl;
+    std::cout<<"\nProgram call procedure : ./OpenEXRimages_HighDynamicRange_Retina_toneMapping [OpenEXR image sequence to process] [OPTIONNAL start frame] [OPTIONNAL end frame]"<<std::endl;
+    std::cout<<"\t[OpenEXR image sequence to process] : std::sprintf style ready prototype filename of the input HDR images to process, must be an OpenEXR format, see http://www.openexr.com/ to get some samples or create your own using camera bracketing and Photoshop or equivalent software for OpenEXR image synthesis"<<std::endl;
+    std::cout<<"\t\t => WARNING : image index number of digits cannot exceed 10"<<std::endl;
+    std::cout<<"\t[start frame] : the starting frame tat should be considered"<<std::endl;
+    std::cout<<"\t[end frame] : the ending frame tat should be considered"<<std::endl;
+    std::cout<<"\nExamples:"<<std::endl;
+    std::cout<<"\t-Image processing : ./OpenEXRimages_HighDynamicRange_Retina_toneMapping_video memorial%3d.exr 20 45"<<std::endl;
+    std::cout<<"\t-Image processing : ./OpenEXRimages_HighDynamicRange_Retina_toneMapping_video memorial%3d.exr 20 45 log"<<std::endl;
+    std::cout<<"\t ==> to process images from memorial020d.exr to memorial045d.exr"<<std::endl;
 
 }
 
 // simple procedure for 1D curve tracing
-void drawPlot(const cv::Mat curve, const std::string figureTitle, const int lowerLimit, const int upperLimit)
+static void drawPlot(const cv::Mat curve, const std::string figureTitle, const int lowerLimit, const int upperLimit)
 {
-       //std::cout<<"curve size(h,w) = "<<curve.size().height<<", "<<curve.size().width<<std::endl;
-       cv::Mat displayedCurveImage = cv::Mat::ones(200, curve.size().height, CV_8U);
-
-       cv::Mat windowNormalizedCurve;
-       normalize(curve, windowNormalizedCurve, 0, 200, CV_MINMAX, CV_32F);
-
-       displayedCurveImage = cv::Scalar::all(255); // set a white background
-       int binW = cvRound((double)displayedCurveImage.cols/curve.size().height);
-
-       for( int i = 0; i < curve.size().height; i++ )
-               rectangle( displayedCurveImage, cv::Point(i*binW, displayedCurveImage.rows),
-                               cv::Point((i+1)*binW, displayedCurveImage.rows - cvRound(windowNormalizedCurve.at<float>(i))),
-                               cv::Scalar::all(0), -1, 8, 0 );
-       rectangle( displayedCurveImage, cv::Point(0, 0),
-                       cv::Point((lowerLimit)*binW, 200),
-                       cv::Scalar::all(128), -1, 8, 0 );
-       rectangle( displayedCurveImage, cv::Point(displayedCurveImage.cols, 0),
-                       cv::Point((upperLimit)*binW, 200),
-                       cv::Scalar::all(128), -1, 8, 0 );
-
-       cv::imshow(figureTitle, displayedCurveImage);
+    //std::cout<<"curve size(h,w) = "<<curve.size().height<<", "<<curve.size().width<<std::endl;
+    cv::Mat displayedCurveImage = cv::Mat::ones(200, curve.size().height, CV_8U);
+
+    cv::Mat windowNormalizedCurve;
+    normalize(curve, windowNormalizedCurve, 0, 200, CV_MINMAX, CV_32F);
+
+    displayedCurveImage = cv::Scalar::all(255); // set a white background
+    int binW = cvRound((double)displayedCurveImage.cols/curve.size().height);
+
+    for( int i = 0; i < curve.size().height; i++ )
+        rectangle( displayedCurveImage, cv::Point(i*binW, displayedCurveImage.rows),
+                cv::Point((i+1)*binW, displayedCurveImage.rows - cvRound(windowNormalizedCurve.at<float>(i))),
+                cv::Scalar::all(0), -1, 8, 0 );
+    rectangle( displayedCurveImage, cv::Point(0, 0),
+            cv::Point((lowerLimit)*binW, 200),
+            cv::Scalar::all(128), -1, 8, 0 );
+    rectangle( displayedCurveImage, cv::Point(displayedCurveImage.cols, 0),
+            cv::Point((upperLimit)*binW, 200),
+            cv::Scalar::all(128), -1, 8, 0 );
+
+    cv::imshow(figureTitle, displayedCurveImage);
 }
 
 /*
  * objective : get the gray level map of the input image and rescale it to the range [0-255] if rescale0_255=TRUE, simply trunks else
  */
-void rescaleGrayLevelMat(const cv::Mat &inputMat, cv::Mat &outputMat, const float histogramClippingLimit, const bool rescale0_255)
+static void rescaleGrayLevelMat(const cv::Mat &inputMat, cv::Mat &outputMat, const float histogramClippingLimit, const bool rescale0_255)
  {
-        // adjust output matrix wrt the input size but single channel
-        std::cout<<"Input image rescaling with histogram edges cutting (in order to eliminate bad pixels created during the HDR image creation) :"<<std::endl;
-        //std::cout<<"=> image size (h,w,channels) = "<<inputMat.size().height<<", "<<inputMat.size().width<<", "<<inputMat.channels()<<std::endl;
-        //std::cout<<"=> pixel coding (nbchannel, bytes per channel) = "<<inputMat.elemSize()/inputMat.elemSize1()<<", "<<inputMat.elemSize1()<<std::endl;
-
-        // get min and max values to use afterwards if no 0-255 rescaling is used
-        double maxInput, minInput, histNormRescalefactor=1.f;
-        double histNormOffset=0.f;
-        minMaxLoc(inputMat, &minInput, &maxInput);
-        histNormRescalefactor=255.f/(maxInput-minInput);
-        histNormOffset=minInput;
-        std::cout<<"Hist max,min = "<<maxInput<<", "<<minInput<<" => scale, offset = "<<histNormRescalefactor<<", "<<histNormOffset<<std::endl;
-        // rescale between 0-255, keeping floating point values
-        cv::Mat normalisedImage;
-        cv::normalize(inputMat, normalisedImage, 0.f, 255.f, cv::NORM_MINMAX);
-        if (rescale0_255)
-               normalisedImage.copyTo(outputMat);
-        // extract a 8bit image that will be used for histogram edge cut
-        cv::Mat intGrayImage;
-        if (inputMat.channels()==1)
-        {
-                normalisedImage.convertTo(intGrayImage, CV_8U);
-        }else
-        {
-                cv::Mat rgbIntImg;
-                normalisedImage.convertTo(rgbIntImg, CV_8UC3);
-                cvtColor(rgbIntImg, intGrayImage, CV_BGR2GRAY);
-        }
-
-        // get histogram density probability in order to cut values under above edges limits (here 5-95%)... usefull for HDR pixel errors cancellation
-        cv::Mat dst, hist;
-        int histSize = 256;
-        calcHist(&intGrayImage, 1, 0, cv::Mat(), hist, 1, &histSize, 0);
-        cv::Mat normalizedHist;
-       
-        normalize(hist, normalizedHist, 1.f, 0.f, cv::NORM_L1, CV_32F); // normalize histogram so that its sum equals 1
-
-        // compute density probability
-        cv::Mat denseProb=cv::Mat::zeros(normalizedHist.size(), CV_32F);
-        denseProb.at<float>(0)=normalizedHist.at<float>(0);
-        int histLowerLimit=0, histUpperLimit=0;
-        for (int i=1;i<normalizedHist.size().height;++i)
-        {
-                denseProb.at<float>(i)=denseProb.at<float>(i-1)+normalizedHist.at<float>(i);
-                //std::cout<<normalizedHist.at<float>(i)<<", "<<denseProb.at<float>(i)<<std::endl;
-                if ( denseProb.at<float>(i)<histogramClippingLimit)
-                        histLowerLimit=i;
-                if ( denseProb.at<float>(i)<1.f-histogramClippingLimit)
-                        histUpperLimit=i;
-        }
-        // deduce min and max admitted gray levels
-        float minInputValue = (float)histLowerLimit/histSize*255.f;
-        float maxInputValue = (float)histUpperLimit/histSize*255.f;
-
-        std::cout<<"=> Histogram limits "
-                        <<"\n\t"<<histogramClippingLimit*100.f<<"% index = "<<histLowerLimit<<" => normalizedHist value = "<<denseProb.at<float>(histLowerLimit)<<" => input gray level = "<<minInputValue
-                        <<"\n\t"<<(1.f-histogramClippingLimit)*100.f<<"% index = "<<histUpperLimit<<" => normalizedHist value = "<<denseProb.at<float>(histUpperLimit)<<" => input gray level = "<<maxInputValue
-                        <<std::endl;
-        //drawPlot(denseProb, "input histogram density probability", histLowerLimit, histUpperLimit);
-        drawPlot(normalizedHist, "input histogram", histLowerLimit, histUpperLimit);
-
-       if(rescale0_255) // rescale between 0-255 if asked to
-       {
-               cv::threshold( outputMat, outputMat, maxInputValue, maxInputValue, 2 ); //THRESH_TRUNC, clips values above maxInputValue
-               cv::threshold( outputMat, outputMat, minInputValue, minInputValue, 3 ); //THRESH_TOZERO, clips values under minInputValue
-               // rescale image range [minInputValue-maxInputValue] to [0-255]
-               outputMat-=minInputValue;
-               outputMat*=255.f/(maxInputValue-minInputValue);
-       }else
-       {
-               inputMat.copyTo(outputMat);
-               // update threshold in the initial input image range
-               maxInputValue=(float)((maxInputValue-255.f)/histNormRescalefactor+maxInput);
-               minInputValue=(float)(minInputValue/histNormRescalefactor+minInput);
-               std::cout<<"===> Input Hist clipping values (max,min) = "<<maxInputValue<<", "<<minInputValue<<std::endl;
-               cv::threshold( outputMat, outputMat, maxInputValue, maxInputValue, 2 ); //THRESH_TRUNC, clips values above maxInputValue
-               cv::threshold( outputMat, outputMat, minInputValue, minInputValue, 3 ); //
-       }
+     // adjust output matrix wrt the input size but single channel
+     std::cout<<"Input image rescaling with histogram edges cutting (in order to eliminate bad pixels created during the HDR image creation) :"<<std::endl;
+     //std::cout<<"=> image size (h,w,channels) = "<<inputMat.size().height<<", "<<inputMat.size().width<<", "<<inputMat.channels()<<std::endl;
+     //std::cout<<"=> pixel coding (nbchannel, bytes per channel) = "<<inputMat.elemSize()/inputMat.elemSize1()<<", "<<inputMat.elemSize1()<<std::endl;
+
+     // get min and max values to use afterwards if no 0-255 rescaling is used
+     double maxInput, minInput, histNormRescalefactor=1.f;
+     double histNormOffset=0.f;
+     minMaxLoc(inputMat, &minInput, &maxInput);
+     histNormRescalefactor=255.f/(maxInput-minInput);
+     histNormOffset=minInput;
+     std::cout<<"Hist max,min = "<<maxInput<<", "<<minInput<<" => scale, offset = "<<histNormRescalefactor<<", "<<histNormOffset<<std::endl;
+     // rescale between 0-255, keeping floating point values
+     cv::Mat normalisedImage;
+     cv::normalize(inputMat, normalisedImage, 0.f, 255.f, cv::NORM_MINMAX);
+     if (rescale0_255)
+        normalisedImage.copyTo(outputMat);
+     // extract a 8bit image that will be used for histogram edge cut
+     cv::Mat intGrayImage;
+     if (inputMat.channels()==1)
+     {
+         normalisedImage.convertTo(intGrayImage, CV_8U);
+     }else
+     {
+         cv::Mat rgbIntImg;
+         normalisedImage.convertTo(rgbIntImg, CV_8UC3);
+         cvtColor(rgbIntImg, intGrayImage, CV_BGR2GRAY);
+     }
+
+     // get histogram density probability in order to cut values under above edges limits (here 5-95%)... usefull for HDR pixel errors cancellation
+     cv::Mat dst, hist;
+     int histSize = 256;
+     calcHist(&intGrayImage, 1, 0, cv::Mat(), hist, 1, &histSize, 0);
+     cv::Mat normalizedHist;
+
+     normalize(hist, normalizedHist, 1.f, 0.f, cv::NORM_L1, CV_32F); // normalize histogram so that its sum equals 1
+
+     // compute density probability
+     cv::Mat denseProb=cv::Mat::zeros(normalizedHist.size(), CV_32F);
+     denseProb.at<float>(0)=normalizedHist.at<float>(0);
+     int histLowerLimit=0, histUpperLimit=0;
+     for (int i=1;i<normalizedHist.size().height;++i)
+     {
+         denseProb.at<float>(i)=denseProb.at<float>(i-1)+normalizedHist.at<float>(i);
+         //std::cout<<normalizedHist.at<float>(i)<<", "<<denseProb.at<float>(i)<<std::endl;
+         if ( denseProb.at<float>(i)<histogramClippingLimit)
+             histLowerLimit=i;
+         if ( denseProb.at<float>(i)<1.f-histogramClippingLimit)
+             histUpperLimit=i;
+     }
+     // deduce min and max admitted gray levels
+     float minInputValue = (float)histLowerLimit/histSize*255.f;
+     float maxInputValue = (float)histUpperLimit/histSize*255.f;
+
+     std::cout<<"=> Histogram limits "
+             <<"\n\t"<<histogramClippingLimit*100.f<<"% index = "<<histLowerLimit<<" => normalizedHist value = "<<denseProb.at<float>(histLowerLimit)<<" => input gray level = "<<minInputValue
+             <<"\n\t"<<(1.f-histogramClippingLimit)*100.f<<"% index = "<<histUpperLimit<<" => normalizedHist value = "<<denseProb.at<float>(histUpperLimit)<<" => input gray level = "<<maxInputValue
+             <<std::endl;
+     //drawPlot(denseProb, "input histogram density probability", histLowerLimit, histUpperLimit);
+     drawPlot(normalizedHist, "input histogram", histLowerLimit, histUpperLimit);
+
+    if(rescale0_255) // rescale between 0-255 if asked to
+    {
+        cv::threshold( outputMat, outputMat, maxInputValue, maxInputValue, 2 ); //THRESH_TRUNC, clips values above maxInputValue
+        cv::threshold( outputMat, outputMat, minInputValue, minInputValue, 3 ); //THRESH_TOZERO, clips values under minInputValue
+        // rescale image range [minInputValue-maxInputValue] to [0-255]
+        outputMat-=minInputValue;
+        outputMat*=255.f/(maxInputValue-minInputValue);
+    }else
+    {
+        inputMat.copyTo(outputMat);
+        // update threshold in the initial input image range
+        maxInputValue=(float)((maxInputValue-255.f)/histNormRescalefactor+maxInput);
+        minInputValue=(float)(minInputValue/histNormRescalefactor+minInput);
+        std::cout<<"===> Input Hist clipping values (max,min) = "<<maxInputValue<<", "<<minInputValue<<std::endl;
+        cv::threshold( outputMat, outputMat, maxInputValue, maxInputValue, 2 ); //THRESH_TRUNC, clips values above maxInputValue
+        cv::threshold( outputMat, outputMat, minInputValue, minInputValue, 3 ); //
+    }
  }
 
  // basic callback method for interface management
@@ -148,213 +148,213 @@ void rescaleGrayLevelMat(const cv::Mat &inputMat, cv::Mat &outputMat, const floa
  float globalRescalefactor=1;
  cv::Scalar globalOffset=0;
  int histogramClippingValue;
- void callBack_rescaleGrayLevelMat(int, void*)
static void callBack_rescaleGrayLevelMat(int, void*)
  {
-        std::cout<<"Histogram clipping value changed, current value = "<<histogramClippingValue<<std::endl;
-       // rescale and process
-       inputImage+=globalOffset;
-       inputImage*=globalRescalefactor;
-       inputImage+=cv::Scalar(50, 50, 50, 50); // WARNING value linked to the hardcoded value (200.0) used in the globalRescalefactor in order to center on the 128 mean value... experimental but... basic compromise
-       rescaleGrayLevelMat(inputImage, imageInputRescaled, (float)histogramClippingValue/100.f, true);
+     std::cout<<"Histogram clipping value changed, current value = "<<histogramClippingValue<<std::endl;
+    // rescale and process
+    inputImage+=globalOffset;
+    inputImage*=globalRescalefactor;
+    inputImage+=cv::Scalar(50, 50, 50, 50); // WARNING value linked to the hardcoded value (200.0) used in the globalRescalefactor in order to center on the 128 mean value... experimental but... basic compromise
+    rescaleGrayLevelMat(inputImage, imageInputRescaled, (float)histogramClippingValue/100.f, true);
 
  }
 
  cv::Ptr<cv::Retina> retina;
  int retinaHcellsGain;
  int localAdaptation_photoreceptors, localAdaptation_Gcells;
- void callBack_updateRetinaParams(int, void*)
static void callBack_updateRetinaParams(int, void*)
  {
-        retina->setupOPLandIPLParvoChannel(true, true, (float)(localAdaptation_photoreceptors/200.0), 0.5f, 0.43f, (float)retinaHcellsGain, 1.f, 7.f, (float)(localAdaptation_Gcells/200.0));
+     retina->setupOPLandIPLParvoChannel(true, true, (float)(localAdaptation_photoreceptors/200.0), 0.5f, 0.43f, (float)retinaHcellsGain, 1.f, 7.f, (float)(localAdaptation_Gcells/200.0));
  }
 
  int colorSaturationFactor;
- void callback_saturateColors(int, void*)
static void callback_saturateColors(int, void*)
  {
-        retina->setColorSaturation(true, (float)colorSaturationFactor);
+     retina->setColorSaturation(true, (float)colorSaturationFactor);
  }
 
 // loadNewFrame : loads a n image wrt filename parameters. it also manages image rescaling/histogram edges cutting (acts differently at first image i.e. if firstTimeread=true)
-void loadNewFrame(const std::string filenamePrototype, const int currentFileIndex, const bool firstTimeread)
+static void loadNewFrame(const std::string filenamePrototype, const int currentFileIndex, const bool firstTimeread)
 {
-        char *currentImageName=NULL;
-       currentImageName = (char*)malloc(sizeof(char)*filenamePrototype.size()+10);
-
-       // grab the first frame  
-       sprintf(currentImageName, filenamePrototype.c_str(), currentFileIndex);
-       
-        //////////////////////////////////////////////////////////////////////////////
-        // checking input media type (still image, video file, live video acquisition)
-        std::cout<<"RetinaDemo: reading image : "<<currentImageName<<std::endl;
-        // image processing case
-        // declare the retina input buffer... that will be fed differently in regard of the input media
-        inputImage = cv::imread(currentImageName, -1); // load image in RGB mode
-        std::cout<<"=> image size (h,w) = "<<inputImage.size().height<<", "<<inputImage.size().width<<std::endl;
-        if (inputImage.empty())
-        {
-           help("could not load image, program end");
-            return;; 
+     char *currentImageName=NULL;
+    currentImageName = (char*)malloc(sizeof(char)*filenamePrototype.size()+10);
+
+    // grab the first frame
+    sprintf(currentImageName, filenamePrototype.c_str(), currentFileIndex);
+
+     //////////////////////////////////////////////////////////////////////////////
+     // checking input media type (still image, video file, live video acquisition)
+     std::cout<<"RetinaDemo: reading image : "<<currentImageName<<std::endl;
+     // image processing case
+     // declare the retina input buffer... that will be fed differently in regard of the input media
+     inputImage = cv::imread(currentImageName, -1); // load image in RGB mode
+     std::cout<<"=> image size (h,w) = "<<inputImage.size().height<<", "<<inputImage.size().width<<std::endl;
+     if (inputImage.empty())
+     {
+        help("could not load image, program end");
+            return;;
          }
 
-       // rescaling/histogram clipping stage
-       // rescale between 0 and 1
-       // TODO : take care of this step !!! maybe disable of do this in a nicer way ... each successive image should get the same transformation... but it depends on the initial image format
-       double maxInput, minInput;
-       minMaxLoc(inputImage, &minInput, &maxInput);
-       std::cout<<"ORIGINAL IMAGE pixels values range (max,min) : "<<maxInput<<", "<<minInput<<std::endl
+    // rescaling/histogram clipping stage
+    // rescale between 0 and 1
+    // TODO : take care of this step !!! maybe disable of do this in a nicer way ... each successive image should get the same transformation... but it depends on the initial image format
+    double maxInput, minInput;
+    minMaxLoc(inputImage, &minInput, &maxInput);
+    std::cout<<"ORIGINAL IMAGE pixels values range (max,min) : "<<maxInput<<", "<<minInput<<std::endl
 ;if (firstTimeread)
-       {
-               /* the first time, get the pixel values range and rougthly update scaling value
-               in order to center values around 128 and getting a range close to [0-255], 
-               => actually using a little less in order to let some more flexibility in range evolves...
-               */
-               double maxInput, minInput;
-               minMaxLoc(inputImage, &minInput, &maxInput);
-               std::cout<<"FIRST IMAGE pixels values range (max,min) : "<<maxInput<<", "<<minInput<<std::endl;
-               globalRescalefactor=(float)(50.0/(maxInput-minInput)); // less than 255 for flexibility... experimental value to be carefull about
-               double channelOffset = -1.5*minInput;
-               globalOffset= cv::Scalar(channelOffset, channelOffset, channelOffset, channelOffset);   
-       }
-       // call the generic input image rescaling callback      
-       callBack_rescaleGrayLevelMat(1,NULL);
+    {
+        /* the first time, get the pixel values range and rougthly update scaling value
+        in order to center values around 128 and getting a range close to [0-255],
+        => actually using a little less in order to let some more flexibility in range evolves...
+        */
+        double maxInput, minInput;
+        minMaxLoc(inputImage, &minInput, &maxInput);
+        std::cout<<"FIRST IMAGE pixels values range (max,min) : "<<maxInput<<", "<<minInput<<std::endl;
+        globalRescalefactor=(float)(50.0/(maxInput-minInput)); // less than 255 for flexibility... experimental value to be carefull about
+        double channelOffset = -1.5*minInput;
+        globalOffset= cv::Scalar(channelOffset, channelOffset, channelOffset, channelOffset);
+    }
+    // call the generic input image rescaling callback
+    callBack_rescaleGrayLevelMat(1,NULL);
 }
 
  int main(int argc, char* argv[]) {
-        // welcome message
-        std::cout<<"*********************************************************************************"<<std::endl;
-        std::cout<<"* Retina demonstration for High Dynamic Range compression (tone-mapping) : demonstrates the use of a wrapper class of the Gipsa/Listic Labs retina model."<<std::endl;
-        std::cout<<"* This retina model allows spatio-temporal image processing (applied on still images, video sequences)."<<std::endl;
-        std::cout<<"* This demo focuses demonstration of the dynamic compression capabilities of the model"<<std::endl;
-        std::cout<<"* => the main application is tone mapping of HDR images (i.e. see on a 8bit display a more than 8bits coded (up to 16bits) image with details in high and low luminance ranges"<<std::endl;
-        std::cout<<"* The retina model still have the following properties:"<<std::endl;
-        std::cout<<"* => It applies a spectral whithening (mid-frequency details enhancement)"<<std::endl;
-        std::cout<<"* => high frequency spatio-temporal noise reduction"<<std::endl;
-        std::cout<<"* => low frequency luminance to be reduced (luminance range compression)"<<std::endl;
-        std::cout<<"* => local logarithmic luminance compression allows details to be enhanced in low light conditions\n"<<std::endl;
-        std::cout<<"* for more information, reer to the following papers :"<<std::endl;
-        std::cout<<"* Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011"<<std::endl;
-        std::cout<<"* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891."<<std::endl;
-        std::cout<<"* => reports comments/remarks at benoit.alexandre.vision@gmail.com"<<std::endl;
-        std::cout<<"* => more informations and papers at : http://sites.google.com/site/benoitalexandrevision/"<<std::endl;
-        std::cout<<"*********************************************************************************"<<std::endl;
-        std::cout<<"** WARNING : this sample requires OpenCV to be configured with OpenEXR support **"<<std::endl;
-        std::cout<<"*********************************************************************************"<<std::endl;
-        std::cout<<"*** You can use free tools to generate OpenEXR images from images sets   :    ***"<<std::endl;
-        std::cout<<"*** =>  1. take a set of photos from the same viewpoint using bracketing      ***"<<std::endl;
-        std::cout<<"*** =>  2. generate an OpenEXR image with tools like qtpfsgui.sourceforge.net ***"<<std::endl;
-        std::cout<<"*** =>  3. apply tone mapping with this program                               ***"<<std::endl;
-        std::cout<<"*********************************************************************************"<<std::endl;
-
-        // basic input arguments checking
-        if (argc<4)
-        {
-                help("bad number of parameter");
-                return -1;
-        }
-
-        bool useLogSampling = !strcmp(argv[argc-1], "log"); // check if user wants retina log sampling processing
-
-        int startFrameIndex=0, endFrameIndex=0, currentFrameIndex=0;
-        sscanf(argv[2], "%d", &startFrameIndex);
-        sscanf(argv[3], "%d", &endFrameIndex);
-        std::string inputImageNamePrototype(argv[1]);
-
-        //////////////////////////////////////////////////////////////////////////////
-        // checking input media type (still image, video file, live video acquisition)
-        std::cout<<"RetinaDemo: setting up system with first image..."<<std::endl;
-        loadNewFrame(inputImageNamePrototype, startFrameIndex, true);
-
-        if (inputImage.empty())
-        {
-           help("could not load image, program end");
-            return -1; 
+     // welcome message
+     std::cout<<"*********************************************************************************"<<std::endl;
+     std::cout<<"* Retina demonstration for High Dynamic Range compression (tone-mapping) : demonstrates the use of a wrapper class of the Gipsa/Listic Labs retina model."<<std::endl;
+     std::cout<<"* This retina model allows spatio-temporal image processing (applied on still images, video sequences)."<<std::endl;
+     std::cout<<"* This demo focuses demonstration of the dynamic compression capabilities of the model"<<std::endl;
+     std::cout<<"* => the main application is tone mapping of HDR images (i.e. see on a 8bit display a more than 8bits coded (up to 16bits) image with details in high and low luminance ranges"<<std::endl;
+     std::cout<<"* The retina model still have the following properties:"<<std::endl;
+     std::cout<<"* => It applies a spectral whithening (mid-frequency details enhancement)"<<std::endl;
+     std::cout<<"* => high frequency spatio-temporal noise reduction"<<std::endl;
+     std::cout<<"* => low frequency luminance to be reduced (luminance range compression)"<<std::endl;
+     std::cout<<"* => local logarithmic luminance compression allows details to be enhanced in low light conditions\n"<<std::endl;
+     std::cout<<"* for more information, reer to the following papers :"<<std::endl;
+     std::cout<<"* Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011"<<std::endl;
+     std::cout<<"* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891."<<std::endl;
+     std::cout<<"* => reports comments/remarks at benoit.alexandre.vision@gmail.com"<<std::endl;
+     std::cout<<"* => more informations and papers at : http://sites.google.com/site/benoitalexandrevision/"<<std::endl;
+     std::cout<<"*********************************************************************************"<<std::endl;
+     std::cout<<"** WARNING : this sample requires OpenCV to be configured with OpenEXR support **"<<std::endl;
+     std::cout<<"*********************************************************************************"<<std::endl;
+     std::cout<<"*** You can use free tools to generate OpenEXR images from images sets   :    ***"<<std::endl;
+     std::cout<<"*** =>  1. take a set of photos from the same viewpoint using bracketing      ***"<<std::endl;
+     std::cout<<"*** =>  2. generate an OpenEXR image with tools like qtpfsgui.sourceforge.net ***"<<std::endl;
+     std::cout<<"*** =>  3. apply tone mapping with this program                               ***"<<std::endl;
+     std::cout<<"*********************************************************************************"<<std::endl;
+
+     // basic input arguments checking
+     if (argc<4)
+     {
+         help("bad number of parameter");
+         return -1;
+     }
+
+     bool useLogSampling = !strcmp(argv[argc-1], "log"); // check if user wants retina log sampling processing
+
+     int startFrameIndex=0, endFrameIndex=0, currentFrameIndex=0;
+     sscanf(argv[2], "%d", &startFrameIndex);
+     sscanf(argv[3], "%d", &endFrameIndex);
+     std::string inputImageNamePrototype(argv[1]);
+
+     //////////////////////////////////////////////////////////////////////////////
+     // checking input media type (still image, video file, live video acquisition)
+     std::cout<<"RetinaDemo: setting up system with first image..."<<std::endl;
+     loadNewFrame(inputImageNamePrototype, startFrameIndex, true);
+
+     if (inputImage.empty())
+     {
+        help("could not load image, program end");
+            return -1;
          }
 
-        //////////////////////////////////////////////////////////////////////////////
-        // Program start in a try/catch safety context (Retina may throw errors)
-        try
-        {
-                /* create a retina instance with default parameters setup, uncomment the initialisation you wanna test
-                 * -> if the last parameter is 'log', then activate log sampling (favour foveal vision and subsamples peripheral vision)
-                 */
-                if (useLogSampling)
+     //////////////////////////////////////////////////////////////////////////////
+     // Program start in a try/catch safety context (Retina may throw errors)
+     try
+     {
+         /* create a retina instance with default parameters setup, uncomment the initialisation you wanna test
+          * -> if the last parameter is 'log', then activate log sampling (favour foveal vision and subsamples peripheral vision)
+          */
+         if (useLogSampling)
                 {
                      retina = new cv::Retina(inputImage.size(),true, cv::RETINA_COLOR_BAYER, true, 2.0, 10.0);
                  }
-                else// -> else allocate "classical" retina :
-                    retina = new cv::Retina(inputImage.size());
-               
-               // save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"
-               retina->write("RetinaDefaultParameters.xml");
+         else// -> else allocate "classical" retina :
+             retina = new cv::Retina(inputImage.size());
+
+        // save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"
+        retina->write("RetinaDefaultParameters.xml");
 
                  // desactivate Magnocellular pathway processing (motion information extraction) since it is not usefull here
                  retina->activateMovingContoursProcessing(false);
 
-                // declare retina output buffers
-                cv::Mat retinaOutput_parvo;
-
-                /////////////////////////////////////////////
-                // prepare displays and interactions
-                histogramClippingValue=0; // default value... updated with interface slider
-
-                std::string retinaInputCorrected("Retina input image (with cut edges histogram for basic pixels error avoidance)");
-                cv::namedWindow(retinaInputCorrected,1);
-                cv::createTrackbar("histogram edges clipping limit", "Retina input image (with cut edges histogram for basic pixels error avoidance)",&histogramClippingValue,50,callBack_rescaleGrayLevelMat);
-
-                std::string RetinaParvoWindow("Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping");
-                cv::namedWindow(RetinaParvoWindow, 1);
-                colorSaturationFactor=3;
-                cv::createTrackbar("Color saturation", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &colorSaturationFactor,5,callback_saturateColors);
-
-                retinaHcellsGain=40;
-                cv::createTrackbar("Hcells gain", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping",&retinaHcellsGain,100,callBack_updateRetinaParams);
-
-                localAdaptation_photoreceptors=197;
-                localAdaptation_Gcells=190;
-                cv::createTrackbar("Ph sensitivity", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &localAdaptation_photoreceptors,199,callBack_updateRetinaParams);
-                cv::createTrackbar("Gcells sensitivity", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &localAdaptation_Gcells,199,callBack_updateRetinaParams);
-
-               std::string powerTransformedInput("EXR image with basic processing : 16bits=>8bits with gamma correction");
-
-                /////////////////////////////////////////////
-                // apply default parameters of user interaction variables
-                callBack_updateRetinaParams(1,NULL); // first call for default parameters setup
-                callback_saturateColors(1, NULL);
-
-                // processing loop with stop condition
-                currentFrameIndex=startFrameIndex;
-                while(currentFrameIndex <= endFrameIndex)
-                {
-                        loadNewFrame(inputImageNamePrototype, currentFrameIndex, false);
-
-                        if (inputImage.empty())
-                        {
-                           std::cout<<"Could not load new image (index = "<<currentFrameIndex<<"), program end"<<std::endl;
-                           return -1; 
-                        }
-                       // display input & process standard power transformation        
-                       imshow("EXR image original image, 16bits=>8bits linear rescaling ", imageInputRescaled);
-                       cv::Mat gammaTransformedImage;
-                       cv::pow(imageInputRescaled, 1./5, gammaTransformedImage); // apply gamma curve: img = img ** (1./5)
-                       imshow(powerTransformedInput, gammaTransformedImage);
-                        // run retina filter
-                        retina->run(imageInputRescaled);
-                        // Retrieve and display retina output
-                        retina->getParvo(retinaOutput_parvo);
-                        cv::imshow(retinaInputCorrected, imageInputRescaled/255.f);
-                        cv::imshow(RetinaParvoWindow, retinaOutput_parvo);
-                        cv::waitKey(4);
-                       // jump to next frame                   
-                       ++currentFrameIndex;
-                }
-        }catch(cv::Exception e)
-        {
-                std::cerr<<"Error using Retina : "<<e.what()<<std::endl;
-        }
-
-        // Program end message
-        std::cout<<"Retina demo end"<<std::endl;
-
-        return 0;
+         // declare retina output buffers
+         cv::Mat retinaOutput_parvo;
+
+         /////////////////////////////////////////////
+         // prepare displays and interactions
+         histogramClippingValue=0; // default value... updated with interface slider
+
+         std::string retinaInputCorrected("Retina input image (with cut edges histogram for basic pixels error avoidance)");
+         cv::namedWindow(retinaInputCorrected,1);
+         cv::createTrackbar("histogram edges clipping limit", "Retina input image (with cut edges histogram for basic pixels error avoidance)",&histogramClippingValue,50,callBack_rescaleGrayLevelMat);
+
+         std::string RetinaParvoWindow("Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping");
+         cv::namedWindow(RetinaParvoWindow, 1);
+         colorSaturationFactor=3;
+         cv::createTrackbar("Color saturation", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &colorSaturationFactor,5,callback_saturateColors);
+
+         retinaHcellsGain=40;
+         cv::createTrackbar("Hcells gain", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping",&retinaHcellsGain,100,callBack_updateRetinaParams);
+
+         localAdaptation_photoreceptors=197;
+         localAdaptation_Gcells=190;
+         cv::createTrackbar("Ph sensitivity", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &localAdaptation_photoreceptors,199,callBack_updateRetinaParams);
+         cv::createTrackbar("Gcells sensitivity", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &localAdaptation_Gcells,199,callBack_updateRetinaParams);
+
+        std::string powerTransformedInput("EXR image with basic processing : 16bits=>8bits with gamma correction");
+
+         /////////////////////////////////////////////
+         // apply default parameters of user interaction variables
+         callBack_updateRetinaParams(1,NULL); // first call for default parameters setup
+         callback_saturateColors(1, NULL);
+
+         // processing loop with stop condition
+         currentFrameIndex=startFrameIndex;
+         while(currentFrameIndex <= endFrameIndex)
+         {
+             loadNewFrame(inputImageNamePrototype, currentFrameIndex, false);
+
+             if (inputImage.empty())
+             {
+                std::cout<<"Could not load new image (index = "<<currentFrameIndex<<"), program end"<<std::endl;
+                return -1;
+             }
+            // display input & process standard power transformation
+            imshow("EXR image original image, 16bits=>8bits linear rescaling ", imageInputRescaled);
+            cv::Mat gammaTransformedImage;
+            cv::pow(imageInputRescaled, 1./5, gammaTransformedImage); // apply gamma curve: img = img ** (1./5)
+            imshow(powerTransformedInput, gammaTransformedImage);
+             // run retina filter
+             retina->run(imageInputRescaled);
+             // Retrieve and display retina output
+             retina->getParvo(retinaOutput_parvo);
+             cv::imshow(retinaInputCorrected, imageInputRescaled/255.f);
+             cv::imshow(RetinaParvoWindow, retinaOutput_parvo);
+             cv::waitKey(4);
+            // jump to next frame
+            ++currentFrameIndex;
+         }
+     }catch(cv::Exception e)
+     {
+         std::cerr<<"Error using Retina : "<<e.what()<<std::endl;
+     }
+
+     // Program end message
+     std::cout<<"Retina demo end"<<std::endl;
+
+     return 0;
  }
 
 
index a8f9a50..413f862 100644 (file)
@@ -28,7 +28,7 @@ const string bowImageDescriptorsDir = "/bowImageDescriptors";
 const string svmsDir = "/svms";
 const string plotsDir = "/plots";
 
-void help(char** argv)
+static void help(char** argv)
 {
        cout << "\nThis program shows how to read in, train on and produce test results for the PASCAL VOC (Visual Object Challenge) data. \n"
         << "It shows how to use detectors, descriptors and recognition methods \n"
@@ -57,7 +57,7 @@ void help(char** argv)
 
 
 
-void makeDir( const string& dir )
+static void makeDir( const string& dir )
 {
 #if defined WIN32 || defined _WIN32
     CreateDirectory( dir.c_str(), 0 );
@@ -66,7 +66,7 @@ void makeDir( const string& dir )
 #endif
 }
 
-void makeUsedDirs( const string& rootPath )
+static void makeUsedDirs( const string& rootPath )
 {
     makeDir(rootPath + bowImageDescriptorsDir);
     makeDir(rootPath + svmsDir);
@@ -1356,7 +1356,7 @@ const vector<string>& VocData::getObjectClasses()
 // Protected Functions ------------------------------------
 //---------------------------------------------------------
 
-string getVocName( const string& vocPath )
+static string getVocName( const string& vocPath )
 {
     size_t found = vocPath.rfind( '/' );
     if( found == string::npos )
@@ -2047,7 +2047,7 @@ struct SVMTrainParamsExt
     bool balanceClasses;    // Balance class weights by number of samples in each (if true cSvmTrainTargetRatio is ignored).
 };
 
-void readUsedParams( const FileNode& fn, string& vocName, DDMParams& ddmParams, VocabTrainParams& vocabTrainParams, SVMTrainParamsExt& svmTrainParamsExt )
+static void readUsedParams( const FileNode& fn, string& vocName, DDMParams& ddmParams, VocabTrainParams& vocabTrainParams, SVMTrainParamsExt& svmTrainParamsExt )
 {
     fn["vocName"] >> vocName;
 
@@ -2063,7 +2063,7 @@ void readUsedParams( const FileNode& fn, string& vocName, DDMParams& ddmParams,
     svmTrainParamsExt.read( currFn );
 }
 
-void writeUsedParams( FileStorage& fs, const string& vocName, const DDMParams& ddmParams, const VocabTrainParams& vocabTrainParams, const SVMTrainParamsExt& svmTrainParamsExt )
+static void writeUsedParams( FileStorage& fs, const string& vocName, const DDMParams& ddmParams, const VocabTrainParams& vocabTrainParams, const SVMTrainParamsExt& svmTrainParamsExt )
 {
     fs << "vocName" << vocName;
 
@@ -2080,7 +2080,7 @@ void writeUsedParams( FileStorage& fs, const string& vocName, const DDMParams& d
     fs << "}";
 }
 
-void printUsedParams( const string& vocPath, const string& resDir,
+static void printUsedParams( const string& vocPath, const string& resDir,
                       const DDMParams& ddmParams, const VocabTrainParams& vocabTrainParams,
                       const SVMTrainParamsExt& svmTrainParamsExt )
 {
@@ -2094,7 +2094,7 @@ void printUsedParams( const string& vocPath, const string& resDir,
     cout << "----------------------------------------------------------------" << endl << endl;
 }
 
-bool readVocabulary( const string& filename, Mat& vocabulary )
+static bool readVocabulary( const string& filename, Mat& vocabulary )
 {
     cout << "Reading vocabulary...";
     FileStorage fs( filename, FileStorage::READ );
@@ -2107,7 +2107,7 @@ bool readVocabulary( const string& filename, Mat& vocabulary )
     return false;
 }
 
-bool writeVocabulary( const string& filename, const Mat& vocabulary )
+static bool writeVocabulary( const string& filename, const Mat& vocabulary )
 {
     cout << "Saving vocabulary..." << endl;
     FileStorage fs( filename, FileStorage::WRITE );
@@ -2119,7 +2119,7 @@ bool writeVocabulary( const string& filename, const Mat& vocabulary )
     return false;
 }
 
-Mat trainVocabulary( const string& filename, VocData& vocData, const VocabTrainParams& trainParams,
+static Mat trainVocabulary( const string& filename, VocData& vocData, const VocabTrainParams& trainParams,
                      const Ptr<FeatureDetector>& fdetector, const Ptr<DescriptorExtractor>& dextractor )
 {
     Mat vocabulary;
@@ -2209,7 +2209,7 @@ Mat trainVocabulary( const string& filename, VocData& vocData, const VocabTrainP
     return vocabulary;
 }
 
-bool readBowImageDescriptor( const string& file, Mat& bowImageDescriptor )
+static bool readBowImageDescriptor( const string& file, Mat& bowImageDescriptor )
 {
     FileStorage fs( file, FileStorage::READ );
     if( fs.isOpened() )
@@ -2220,7 +2220,7 @@ bool readBowImageDescriptor( const string& file, Mat& bowImageDescriptor )
     return false;
 }
 
-bool writeBowImageDescriptor( const string& file, const Mat& bowImageDescriptor )
+static bool writeBowImageDescriptor( const string& file, const Mat& bowImageDescriptor )
 {
     FileStorage fs( file, FileStorage::WRITE );
     if( fs.isOpened() )
@@ -2232,7 +2232,7 @@ bool writeBowImageDescriptor( const string& file, const Mat& bowImageDescriptor
 }
 
 // Load in the bag of words vectors for a set of images, from file if possible
-void calculateImageDescriptors( const vector<ObdImage>& images, vector<Mat>& imageDescriptors,
+static void calculateImageDescriptors( const vector<ObdImage>& images, vector<Mat>& imageDescriptors,
                                 Ptr<BOWImgDescriptorExtractor>& bowExtractor, const Ptr<FeatureDetector>& fdetector,
                                 const string& resPath )
 {
@@ -2276,7 +2276,7 @@ void calculateImageDescriptors( const vector<ObdImage>& images, vector<Mat>& ima
     }
 }
 
-void removeEmptyBowImageDescriptors( vector<ObdImage>& images, vector<Mat>& bowImageDescriptors,
+static void removeEmptyBowImageDescriptors( vector<ObdImage>& images, vector<Mat>& bowImageDescriptors,
                                      vector<char>& objectPresent )
 {
     CV_Assert( !images.empty() );
@@ -2293,7 +2293,7 @@ void removeEmptyBowImageDescriptors( vector<ObdImage>& images, vector<Mat>& bowI
     }
 }
 
-void removeBowImageDescriptorsByCount( vector<ObdImage>& images, vector<Mat> bowImageDescriptors, vector<char> objectPresent,
+static void removeBowImageDescriptorsByCount( vector<ObdImage>& images, vector<Mat> bowImageDescriptors, vector<char> objectPresent,
                                        const SVMTrainParamsExt& svmParamsExt, int descsToDelete )
 {
     RNG& rng = theRNG();
@@ -2325,7 +2325,7 @@ void removeBowImageDescriptorsByCount( vector<ObdImage>& images, vector<Mat> bow
     CV_Assert( bowImageDescriptors.size() == objectPresent.size() );
 }
 
-void setSVMParams( CvSVMParams& svmParams, CvMat& class_wts_cv, const Mat& responses, bool balanceClasses )
+static void setSVMParams( CvSVMParams& svmParams, CvMat& class_wts_cv, const Mat& responses, bool balanceClasses )
 {
     int pos_ex = countNonZero(responses == 1);
     int neg_ex = countNonZero(responses == -1);
@@ -2354,7 +2354,7 @@ void setSVMParams( CvSVMParams& svmParams, CvMat& class_wts_cv, const Mat& respo
     }
 }
 
-void setSVMTrainAutoParams( CvParamGrid& c_grid, CvParamGrid& gamma_grid,
+static void setSVMTrainAutoParams( CvParamGrid& c_grid, CvParamGrid& gamma_grid,
                             CvParamGrid& p_grid, CvParamGrid& nu_grid,
                             CvParamGrid& coef_grid, CvParamGrid& degree_grid )
 {
@@ -2375,7 +2375,7 @@ void setSVMTrainAutoParams( CvParamGrid& c_grid, CvParamGrid& gamma_grid,
     degree_grid.step = 0;
 }
 
-void trainSVMClassifier( CvSVM& svm, const SVMTrainParamsExt& svmParamsExt, const string& objClassName, VocData& vocData,
+static void trainSVMClassifier( CvSVM& svm, const SVMTrainParamsExt& svmParamsExt, const string& objClassName, VocData& vocData,
                          Ptr<BOWImgDescriptorExtractor>& bowExtractor, const Ptr<FeatureDetector>& fdetector,
                          const string& resPath )
 {
@@ -2450,7 +2450,7 @@ void trainSVMClassifier( CvSVM& svm, const SVMTrainParamsExt& svmParamsExt, cons
     }
 }
 
-void computeConfidences( CvSVM& svm, const string& objClassName, VocData& vocData,
+static void computeConfidences( CvSVM& svm, const string& objClassName, VocData& vocData,
                          Ptr<BOWImgDescriptorExtractor>& bowExtractor, const Ptr<FeatureDetector>& fdetector,
                          const string& resPath )
 {
@@ -2491,7 +2491,7 @@ void computeConfidences( CvSVM& svm, const string& objClassName, VocData& vocDat
     cout << "---------------------------------------------------------------" << endl;
 }
 
-void computeGnuPlotOutput( const string& resPath, const string& objClassName, VocData& vocData )
+static void computeGnuPlotOutput( const string& resPath, const string& objClassName, VocData& vocData )
 {
     vector<float> precision, recall;
     float ap;
index c4ce206..ffb2382 100644 (file)
@@ -7,7 +7,7 @@
 using namespace std;
 using namespace cv;
 
-void help()
+static void help()
 {
  printf("\nDo background segmentation, especially demonstrating the use of cvUpdateBGStatModel().\n"
 "Learns the background at the start and then segments.\n"
index 67d1764..291da19 100644 (file)
@@ -16,7 +16,7 @@ using namespace cv;
 using namespace std;
 
 //Copy (x,y) location of descriptor matches found from KeyPoint data structures into Point2f vectors
-void matches2points(const vector<DMatch>& matches, const vector<KeyPoint>& kpts_train,
+static void matches2points(const vector<DMatch>& matches, const vector<KeyPoint>& kpts_train,
                     const vector<KeyPoint>& kpts_query, vector<Point2f>& pts_train, vector<Point2f>& pts_query)
 {
   pts_train.clear();
@@ -32,7 +32,7 @@ void matches2points(const vector<DMatch>& matches, const vector<KeyPoint>& kpts_
 
 }
 
-double match(const vector<KeyPoint>& /*kpts_train*/, const vector<KeyPoint>& /*kpts_query*/, DescriptorMatcher& matcher,
+static double match(const vector<KeyPoint>& /*kpts_train*/, const vector<KeyPoint>& /*kpts_query*/, DescriptorMatcher& matcher,
             const Mat& train, const Mat& query, vector<DMatch>& matches)
 {
 
@@ -41,7 +41,7 @@ double match(const vector<KeyPoint>& /*kpts_train*/, const vector<KeyPoint>& /*k
   return ((double)getTickCount() - t) / getTickFrequency();
 }
 
-void help()
+static void help()
 {
        cout << "This program shows how to use BRIEF descriptor to match points in features2d" << endl <<
                "It takes in two images, finds keypoints and matches them displaying matches and final homography warped results" << endl <<
index 081c9db..8169c01 100644 (file)
@@ -13,7 +13,7 @@
 using namespace cv;
 using namespace std;
 
-void help()
+static void help()
 {
     printf("\nSigh: This program is not complete/will be replaced. \n"
            "So:   Use this just to see hints of how to use things like Rodrigues\n"
@@ -51,17 +51,17 @@ static bool readModelViews( const string& filename, vector<Point3f>& box,
     roiList.resize(0);
     poseList.resize(0);
     box.resize(0);
-    
+
     FileStorage fs(filename, FileStorage::READ);
     if( !fs.isOpened() )
         return false;
     fs["box"] >> box;
-    
+
     FileNode all = fs["views"];
     if( all.type() != FileNode::SEQ )
         return false;
     FileNodeIterator it = all.begin(), it_end = all.end();
-    
+
     for(; it != it_end; ++it)
     {
         FileNode n = *it;
@@ -72,7 +72,7 @@ static bool readModelViews( const string& filename, vector<Point3f>& box,
         poseList.push_back(Vec6f((float)np[0], (float)np[1], (float)np[2],
                                  (float)np[3], (float)np[4], (float)np[5]));
     }
-    
+
     return true;
 }
 
@@ -90,11 +90,11 @@ static void writeModel(const string& modelFileName, const string& modelname,
                        const PointModel& model)
 {
     FileStorage fs(modelFileName, FileStorage::WRITE);
-    
+
     fs << modelname << "{" <<
         "points" << "[:" << model.points << "]" <<
         "idx" << "[:";
-    
+
     for( size_t i = 0; i < model.didx.size(); i++ )
         fs << "[:" << model.didx[i] << "]";
     fs << "]" << "descriptors" << model.descriptors;
@@ -259,7 +259,7 @@ static void findConstrainedCorrespondences(const Mat& _F,
 
 
 static Point3f findRayIntersection(Point3f k1, Point3f b1, Point3f k2, Point3f b2)
-{    
+{
     float a[4], b[2], x[2];
     a[0] = k1.dot(k1);
     a[1] = a[2] = -k1.dot(k2);
@@ -268,7 +268,7 @@ static Point3f findRayIntersection(Point3f k1, Point3f b1, Point3f k2, Point3f b
     b[1] = k2.dot(b1 - b2);
     Mat_<float> A(2, 2, a), B(2, 1, b), X(2, 1, x);
     solve(A, B, X);
-    
+
     float s1 = X.at<float>(0, 0);
     float s2 = X.at<float>(1, 0);
     return (k1*s1 + b1 + k2*s2 + b2)*0.5f;
@@ -281,19 +281,19 @@ static Point3f triangulatePoint(const vector<Point2f>& ps,
                                 const Mat& cameraMatrix)
 {
     Mat_<double> K(cameraMatrix);
-    
+
     /*if( ps.size() > 2 )
     {
         Mat_<double> L(ps.size()*3, 4), U, evalues;
         Mat_<double> P(3,4), Rt(3,4), Rt_part1=Rt.colRange(0,3), Rt_part2=Rt.colRange(3,4);
-        
+
         for( size_t i = 0; i < ps.size(); i++ )
         {
             double x = ps[i].x, y = ps[i].y;
             Rs[i].convertTo(Rt_part1, Rt_part1.type());
             ts[i].convertTo(Rt_part2, Rt_part2.type());
             P = K*Rt;
-            
+
             for( int k = 0; k < 4; k++ )
             {
                 L(i*3, k) = x*P(2,k) - P(0,k);
@@ -301,10 +301,10 @@ static Point3f triangulatePoint(const vector<Point2f>& ps,
                 L(i*3+2, k) = x*P(1,k) - y*P(0,k);
             }
         }
-        
+
         eigen(L.t()*L, evalues, U);
         CV_Assert(evalues(0,0) >= evalues(3,0));
-        
+
         double W = fabs(U(3,3)) > FLT_EPSILON ? 1./U(3,3) : 0;
         return Point3f((float)(U(3,0)*W), (float)(U(3,1)*W), (float)(U(3,2)*W));
     }
@@ -324,7 +324,7 @@ static Point3f triangulatePoint(const vector<Point2f>& ps,
 }
 
 
-void triangulatePoint_test(void)
+static void triangulatePoint_test(void)
 {
     int i, n = 100;
     vector<Point3f> objpt(n), delta1(n), delta2(n);
@@ -370,13 +370,13 @@ struct EqKeypoints
 {
     EqKeypoints(const vector<int>* _dstart, const Set2i* _pairs)
     : dstart(_dstart), pairs(_pairs) {}
-    
+
     bool operator()(const Pair2i& a, const Pair2i& b) const
     {
         return pairs->find(Pair2i(dstart->at(a.first) + a.second,
                                   dstart->at(b.first) + b.second)) != pairs->end();
     }
-    
+
     const vector<int>* dstart;
     const Set2i* pairs;
 };
@@ -423,7 +423,7 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
         vector<KeyPoint> keypoints;
         detector->detect(gray, keypoints);
         descriptorExtractor->compute(gray, keypoints, descriptorbuf);
-        Point2f roiofs = roiList[i].tl(); 
+        Point2f roiofs = roiList[i].tl();
         for( size_t k = 0; k < keypoints.size(); k++ )
             keypoints[k].pt += roiofs;
         allkeypoints.push_back(keypoints);
@@ -438,7 +438,7 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
 
         size_t prev = alldescriptorsVec.size();
         size_t delta = buf.rows*buf.cols;
-        alldescriptorsVec.resize(prev + delta); 
+        alldescriptorsVec.resize(prev + delta);
         std::copy(buf.ptr<float>(), buf.ptr<float>() + delta,
                   alldescriptorsVec.begin() + prev);
         dstart.push_back(dstart.back() + (int)keypoints.size());
@@ -514,7 +514,7 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
 
                 pairsFound++;
 
-                //model.points.push_back(objpt);   
+                //model.points.push_back(objpt);
                 pairs[Pair2i(i1+dstart[i], i2+dstart[j])] = 1;
                 pairs[Pair2i(i2+dstart[j], i1+dstart[i])] = 1;
                 keypointsIdxMap[Pair2i((int)i,i1)] = 1;
@@ -618,7 +618,7 @@ int main(int argc, char** argv)
     const char* intrinsicsFilename = 0;
     const char* modelName = 0;
     const char* detectorName = "SURF";
-    const char* descriptorExtractorName = "SURF"; 
+    const char* descriptorExtractorName = "SURF";
 
     vector<Point3f> modelBox;
     vector<string>  imageList;
index bfa91cd..e425758 100644 (file)
@@ -41,7 +41,7 @@ const char* liveCaptureHelp =
         "  'g' - start capturing images\n"
         "  'u' - switch undistortion on/off\n";
 
-void help()
+static void help()
 {
     printf( "This is a camera calibration sample.\n"
         "Usage: calibration\n"
@@ -88,7 +88,7 @@ static double computeReprojectionErrors(
     int i, totalPoints = 0;
     double totalErr = 0, err;
     perViewErrors.resize(objectPoints.size());
-    
+
     for( i = 0; i < (int)objectPoints.size(); i++ )
     {
         projectPoints(Mat(objectPoints[i]), rvecs[i], tvecs[i],
@@ -99,14 +99,14 @@ static double computeReprojectionErrors(
         totalErr += err*err;
         totalPoints += n;
     }
-    
+
     return std::sqrt(totalErr/totalPoints);
 }
 
 static void calcChessboardCorners(Size boardSize, float squareSize, vector<Point3f>& corners, Pattern patternType = CHESSBOARD)
 {
     corners.resize(0);
-    
+
     switch(patternType)
     {
       case CHESSBOARD:
@@ -140,21 +140,21 @@ static bool runCalibration( vector<vector<Point2f> > imagePoints,
     cameraMatrix = Mat::eye(3, 3, CV_64F);
     if( flags & CV_CALIB_FIX_ASPECT_RATIO )
         cameraMatrix.at<double>(0,0) = aspectRatio;
-    
+
     distCoeffs = Mat::zeros(8, 1, CV_64F);
-    
+
     vector<vector<Point3f> > objectPoints(1);
     calcChessboardCorners(boardSize, squareSize, objectPoints[0], patternType);
 
     objectPoints.resize(imagePoints.size(),objectPoints[0]);
-    
+
     double rms = calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix,
                     distCoeffs, rvecs, tvecs, flags|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5);
                     ///*|CV_CALIB_FIX_K3*/|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5);
     printf("RMS error reported by calibrateCamera: %g\n", rms);
-    
+
     bool ok = checkRange(cameraMatrix) && checkRange(distCoeffs);
-    
+
     totalAvgErr = computeReprojectionErrors(objectPoints, imagePoints,
                 rvecs, tvecs, cameraMatrix, distCoeffs, reprojErrs);
 
@@ -162,7 +162,7 @@ static bool runCalibration( vector<vector<Point2f> > imagePoints,
 }
 
 
-void saveCameraParams( const string& filename,
+static void saveCameraParams( const string& filename,
                        Size imageSize, Size boardSize,
                        float squareSize, float aspectRatio, int flags,
                        const Mat& cameraMatrix, const Mat& distCoeffs,
@@ -172,7 +172,7 @@ void saveCameraParams( const string& filename,
                        double totalAvgErr )
 {
     FileStorage fs( filename, FileStorage::WRITE );
-    
+
     time_t t;
     time( &t );
     struct tm *t2 = localtime( &t );
@@ -180,7 +180,7 @@ void saveCameraParams( const string& filename,
     strftime( buf, sizeof(buf)-1, "%c", t2 );
 
     fs << "calibration_time" << buf;
-    
+
     if( !rvecs.empty() || !reprojErrs.empty() )
         fs << "nframes" << (int)std::max(rvecs.size(), reprojErrs.size());
     fs << "image_width" << imageSize.width;
@@ -188,7 +188,7 @@ void saveCameraParams( const string& filename,
     fs << "board_width" << boardSize.width;
     fs << "board_height" << boardSize.height;
     fs << "square_size" << squareSize;
-    
+
     if( flags & CV_CALIB_FIX_ASPECT_RATIO )
         fs << "aspectRatio" << aspectRatio;
 
@@ -201,7 +201,7 @@ void saveCameraParams( const string& filename,
             flags & CV_CALIB_ZERO_TANGENT_DIST ? "+zero_tangent_dist" : "" );
         cvWriteComment( *fs, buf, 0 );
     }
-    
+
     fs << "flags" << flags;
 
     fs << "camera_matrix" << cameraMatrix;
@@ -210,7 +210,7 @@ void saveCameraParams( const string& filename,
     fs << "avg_reprojection_error" << totalAvgErr;
     if( !reprojErrs.empty() )
         fs << "per_view_reprojection_errors" << Mat(reprojErrs);
-    
+
     if( !rvecs.empty() && !tvecs.empty() )
     {
         CV_Assert(rvecs[0].type() == tvecs[0].type());
@@ -229,7 +229,7 @@ void saveCameraParams( const string& filename,
         cvWriteComment( *fs, "a set of 6-tuples (rotation vector + translation vector) for each view", 0 );
         fs << "extrinsic_parameters" << bigmat;
     }
-    
+
     if( !imagePoints.empty() )
     {
         Mat imagePtMat((int)imagePoints.size(), (int)imagePoints[0].size(), CV_32FC2);
@@ -259,7 +259,7 @@ static bool readStringList( const string& filename, vector<string>& l )
 }
 
 
-bool runAndSave(const string& outputFilename,
+static bool runAndSave(const string& outputFilename,
                 const vector<vector<Point2f> >& imagePoints,
                 Size imageSize, Size boardSize, Pattern patternType, float squareSize,
                 float aspectRatio, int flags, Mat& cameraMatrix,
@@ -268,14 +268,14 @@ bool runAndSave(const string& outputFilename,
     vector<Mat> rvecs, tvecs;
     vector<float> reprojErrs;
     double totalAvgErr = 0;
-    
+
     bool ok = runCalibration(imagePoints, imageSize, boardSize, patternType, squareSize,
                    aspectRatio, flags, cameraMatrix, distCoeffs,
                    rvecs, tvecs, reprojErrs, totalAvgErr);
     printf("%s. avg reprojection error = %.2f\n",
            ok ? "Calibration succeeded" : "Calibration failed",
            totalAvgErr);
-    
+
     if( ok )
         saveCameraParams( outputFilename, imageSize,
                          boardSize, squareSize, aspectRatio,
@@ -296,7 +296,7 @@ int main( int argc, char** argv )
     Mat cameraMatrix, distCoeffs;
     const char* outputFilename = "out_camera_data.yml";
     const char* inputFilename = 0;
-    
+
     int i, nframes = 10;
     bool writeExtrinsics = false, writePoints = false;
     bool undistortImage = false;
@@ -412,7 +412,7 @@ int main( int argc, char** argv )
     {
         if( !videofile && readStringList(inputFilename, imageList) )
             mode = CAPTURING;
-        else    
+        else
             capture.open(inputFilename);
     }
     else
@@ -420,7 +420,7 @@ int main( int argc, char** argv )
 
     if( !capture.isOpened() && imageList.empty() )
         return fprintf( stderr, "Could not initialize video (%d) capture\n",cameraId ), -2;
-    
+
     if( !imageList.empty() )
         nframes = (int)imageList.size();
 
@@ -433,7 +433,7 @@ int main( int argc, char** argv )
     {
         Mat view, viewGray;
         bool blink = false;
-        
+
         if( capture.isOpened() )
         {
             Mat view0;
@@ -442,7 +442,7 @@ int main( int argc, char** argv )
         }
         else if( i < (int)imageList.size() )
             view = imread(imageList[i], 1);
-        
+
         if(!view.data)
         {
             if( imagePoints.size() > 0 )
@@ -452,14 +452,14 @@ int main( int argc, char** argv )
                            writeExtrinsics, writePoints);
             break;
         }
-        
+
         imageSize = view.size();
 
         if( flipVertical )
             flip( view, view, 0 );
 
         vector<Point2f> pointbuf;
-        cvtColor(view, viewGray, CV_BGR2GRAY); 
+        cvtColor(view, viewGray, CV_BGR2GRAY);
 
         bool found;
         switch( pattern )
@@ -489,14 +489,14 @@ int main( int argc, char** argv )
             prevTimestamp = clock();
             blink = capture.isOpened();
         }
-        
+
         if(found)
             drawChessboardCorners( view, boardSize, Mat(pointbuf), found );
 
         string msg = mode == CAPTURING ? "100/100" :
             mode == CALIBRATED ? "Calibrated" : "Press 'g' to start";
         int baseLine = 0;
-        Size textSize = getTextSize(msg, 1, 1, 1, &baseLine);        
+        Size textSize = getTextSize(msg, 1, 1, 1, &baseLine);
         Point textOrigin(view.cols - 2*textSize.width - 10, view.rows - 2*baseLine - 10);
 
         if( mode == CAPTURING )
@@ -524,7 +524,7 @@ int main( int argc, char** argv )
 
         if( (key & 255) == 27 )
             break;
-        
+
         if( key == 'u' && mode == CALIBRATED )
             undistortImage = !undistortImage;
 
@@ -547,14 +547,14 @@ int main( int argc, char** argv )
                 break;
         }
     }
-    
+
     if( !capture.isOpened() && showUndistorted )
     {
         Mat view, rview, map1, map2;
         initUndistortRectifyMap(cameraMatrix, distCoeffs, Mat(),
                                 getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0),
                                 imageSize, CV_16SC2, map1, map2);
-        
+
         for( i = 0; i < (int)imageList.size(); i++ )
         {
             view = imread(imageList[i], 1);
@@ -568,6 +568,6 @@ int main( int argc, char** argv )
                 break;
         }
     }
-                                                
+
     return 0;
 }
index 25149c3..a95113c 100644 (file)
@@ -11,7 +11,7 @@
 using namespace cv;
 using namespace std;
 
-void help()
+static void help()
 {
     printf( "\nThis code generates an artificial camera and artificial chessboard images,\n"
             "and then calibrates. It is basically test code for calibration that shows\n"
@@ -26,8 +26,8 @@ namespace cv
 class ChessBoardGenerator
 {
 public:
-    double sensorWidth; 
-    double sensorHeight;     
+    double sensorWidth;
+    double sensorHeight;
     size_t squareEdgePointsNum;
     double min_cos;
     mutable double cov;
@@ -35,14 +35,14 @@ public:
     int rendererResolutionMultiplier;
 
     ChessBoardGenerator(const Size& patternSize = Size(8, 6));
-    Mat operator()(const Mat& bg, const Mat& camMat, const Mat& distCoeffs, vector<Point2f>& corners) const;    
+    Mat operator()(const Mat& bg, const Mat& camMat, const Mat& distCoeffs, vector<Point2f>& corners) const;
     Size cornersSize() const;
 private:
     void generateEdge(const Point3f& p1, const Point3f& p2, vector<Point3f>& out) const;
-    Mat generageChessBoard(const Mat& bg, const Mat& camMat, const Mat& distCoeffs, 
-        const Point3f& zero, const Point3f& pb1, const Point3f& pb2, 
+    Mat generageChessBoard(const Mat& bg, const Mat& camMat, const Mat& distCoeffs,
+        const Point3f& zero, const Point3f& pb1, const Point3f& pb2,
         float sqWidth, float sqHeight, const vector<Point3f>& whole, vector<Point2f>& corners) const;
-    void generateBasis(Point3f& pb1, Point3f& pb2) const;  
+    void generateBasis(Point3f& pb1, Point3f& pb2) const;
     Point3f generateChessBoardCenter(const Mat& camMat, const Size& imgSize) const;
     Mat rvec, tvec;
 };
@@ -55,25 +55,25 @@ const Size brdSize(8, 7);
 const size_t brds_num = 20;
 
 template<class T> ostream& operator<<(ostream& out, const Mat_<T>& mat)
-{    
+{
     for(int j = 0; j < mat.rows; ++j)
         for(int i = 0; i < mat.cols; ++i)
-            out << mat(j, i) << " ";        
+            out << mat(j, i) << " ";
     return out;
 }
 
 
 
 int main()
-{          
-       help();
-    cout << "Initializing background...";    
-    Mat background(imgSize, CV_8UC3);  
-    randu(background, Scalar::all(32), Scalar::all(255));    
+{
+    help();
+    cout << "Initializing background...";
+    Mat background(imgSize, CV_8UC3);
+    randu(background, Scalar::all(32), Scalar::all(255));
     GaussianBlur(background, background, Size(5, 5), 2);
     cout << "Done" << endl;
 
-    cout << "Initializing chess board generator...";    
+    cout << "Initializing chess board generator...";
     ChessBoardGenerator cbg(brdSize);
     cbg.rendererResolutionMultiplier = 4;
     cout << "Done" << endl;
@@ -81,24 +81,24 @@ int main()
     /* camera params */
     Mat_<double> camMat(3, 3);
     camMat << 300., 0., background.cols/2., 0, 300., background.rows/2., 0., 0., 1.;
-    
+
     Mat_<double> distCoeffs(1, 5);
     distCoeffs << 1.2, 0.2, 0., 0., 0.;
-       
-    cout << "Generating chessboards...";    
+
+    cout << "Generating chessboards...";
     vector<Mat> boards(brds_num);
     vector<Point2f> tmp;
     for(size_t i = 0; i < brds_num; ++i)
         cout << (boards[i] = cbg(background, camMat, distCoeffs, tmp), i) << " ";
-    cout << "Done" << endl;    
+    cout << "Done" << endl;
 
     vector<Point3f> chessboard3D;
     for(int j = 0; j < cbg.cornersSize().height; ++j)
         for(int i = 0; i < cbg.cornersSize().width; ++i)
             chessboard3D.push_back(Point3i(i, j, 0));
-    
+
     /* init points */
-    vector< vector<Point3f> > objectPoints;    
+    vector< vector<Point3f> > objectPoints;
     vector< vector<Point2f> > imagePoints;
 
     cout << endl << "Finding chessboards' corners...";
@@ -110,22 +110,22 @@ int main()
         if (found)
         {
             imagePoints.push_back(tmp);
-            objectPoints.push_back(chessboard3D);             
-            cout<< "-found ";                                   
+            objectPoints.push_back(chessboard3D);
+            cout<< "-found ";
         }
         else
-            cout<< "-not-found ";        
+            cout<< "-not-found ";
 
         drawChessboardCorners(boards[i], cbg.cornersSize(), Mat(tmp), found);
         imshow("Current chessboard", boards[i]); waitKey(1000);
     }
     cout << "Done" << endl;
     cvDestroyAllWindows();
-        
+
     Mat camMat_est;
     Mat distCoeffs_est;
     vector<Mat> rvecs, tvecs;
-    
+
     cout << "Calibrating...";
     double rep_err = calibrateCamera(objectPoints, imagePoints, imgSize, camMat_est, distCoeffs_est, rvecs, tvecs);
     cout << "Done" << endl;
@@ -137,7 +137,7 @@ int main()
     cout << "==================================" << endl;
     cout << "Estimated camera matrix:\n" << (Mat_<double>&)camMat_est << endl;
     cout << "Estimated distCoeffs:\n" << (Mat_<double>&)distCoeffs_est << endl;
-        
+
     return 0;
 }
 
@@ -150,18 +150,18 @@ int main()
 
 
 ChessBoardGenerator::ChessBoardGenerator(const Size& _patternSize) : sensorWidth(32), sensorHeight(24),
-    squareEdgePointsNum(200), min_cos(sqrt(2.f)*0.5f), cov(0.5), 
+    squareEdgePointsNum(200), min_cos(sqrt(2.f)*0.5f), cov(0.5),
     patternSize(_patternSize), rendererResolutionMultiplier(4), tvec(Mat::zeros(1, 3, CV_32F))
-{    
+{
     Rodrigues(Mat::eye(3, 3, CV_32F), rvec);
 }
 
 void cv::ChessBoardGenerator::generateEdge(const Point3f& p1, const Point3f& p2, vector<Point3f>& out) const
-{    
-    Point3f step = (p2 - p1) * (1.f/squareEdgePointsNum);    
+{
+    Point3f step = (p2 - p1) * (1.f/squareEdgePointsNum);
     for(size_t n = 0; n < squareEdgePointsNum; ++n)
         out.push_back( p1 + step * (float)n);
-}    
+}
 
 Size cv::ChessBoardGenerator::cornersSize() const
 {
@@ -172,7 +172,7 @@ struct Mult
 {
     float m;
     Mult(int mult) : m((float)mult) {}
-    Point2f operator()(const Point2f& p)const { return p * m; }    
+    Point2f operator()(const Point2f& p)const { return p * m; }
 };
 
 void cv::ChessBoardGenerator::generateBasis(Point3f& pb1, Point3f& pb2) const
@@ -181,39 +181,39 @@ void cv::ChessBoardGenerator::generateBasis(Point3f& pb1, Point3f& pb2) const
 
     Vec3f n;
     for(;;)
-    {        
+    {
         n[0] = rng.uniform(-1.f, 1.f);
         n[1] = rng.uniform(-1.f, 1.f);
-        n[2] = rng.uniform(-1.f, 1.f);        
-        float len = (float)norm(n);    
-        n[0]/=len;  
-        n[1]/=len;  
+        n[2] = rng.uniform(-1.f, 1.f);
+        float len = (float)norm(n);
+        n[0]/=len;
+        n[1]/=len;
         n[2]/=len;
-        
+
         if (fabs(n[2]) > min_cos)
             break;
     }
 
     Vec3f n_temp = n; n_temp[0] += 100;
-    Vec3f b1 = n.cross(n_temp); 
+    Vec3f b1 = n.cross(n_temp);
     Vec3f b2 = n.cross(b1);
     float len_b1 = (float)norm(b1);
-    float len_b2 = (float)norm(b2);    
+    float len_b2 = (float)norm(b2);
 
     pb1 = Point3f(b1[0]/len_b1, b1[1]/len_b1, b1[2]/len_b1);
     pb2 = Point3f(b2[0]/len_b1, b2[1]/len_b2, b2[2]/len_b2);
 }
 
-Mat cv::ChessBoardGenerator::generageChessBoard(const Mat& bg, const Mat& camMat, const Mat& distCoeffs, 
-                                                const Point3f& zero, const Point3f& pb1, const Point3f& pb2, 
+Mat cv::ChessBoardGenerator::generageChessBoard(const Mat& bg, const Mat& camMat, const Mat& distCoeffs,
+                                                const Point3f& zero, const Point3f& pb1, const Point3f& pb2,
                                                 float sqWidth, float sqHeight, const vector<Point3f>& whole,
                                                 vector<Point2f>& corners) const
 {
-    vector< vector<Point> > squares_black;    
+    vector< vector<Point> > squares_black;
     for(int i = 0; i < patternSize.width; ++i)
         for(int j = 0; j < patternSize.height; ++j)
-            if ( (i % 2 == 0 && j % 2 == 0) || (i % 2 != 0 && j % 2 != 0) ) 
-            {            
+            if ( (i % 2 == 0 && j % 2 == 0) || (i % 2 != 0 && j % 2 != 0) )
+            {
                 vector<Point3f> pts_square3d;
                 vector<Point2f> pts_square2d;
 
@@ -224,17 +224,17 @@ Mat cv::ChessBoardGenerator::generageChessBoard(const Mat& bg, const Mat& camMat
                 generateEdge(p1, p2, pts_square3d);
                 generateEdge(p2, p3, pts_square3d);
                 generateEdge(p3, p4, pts_square3d);
-                generateEdge(p4, p1, pts_square3d);  
-                
+                generateEdge(p4, p1, pts_square3d);
+
                 projectPoints( Mat(pts_square3d), rvec, tvec, camMat, distCoeffs, pts_square2d);
-                squares_black.resize(squares_black.size() + 1);  
-                vector<Point2f> temp; 
-                approxPolyDP(Mat(pts_square2d), temp, 1.0, true); 
-                transform(temp.begin(), temp.end(), back_inserter(squares_black.back()), Mult(rendererResolutionMultiplier));             
-            }  
+                squares_black.resize(squares_black.size() + 1);
+                vector<Point2f> temp;
+                approxPolyDP(Mat(pts_square2d), temp, 1.0, true);
+                transform(temp.begin(), temp.end(), back_inserter(squares_black.back()), Mult(rendererResolutionMultiplier));
+            }
 
     /* calculate corners */
-    vector<Point3f> corners3d;    
+    vector<Point3f> corners3d;
     for(int j = 0; j < patternSize.height - 1; ++j)
         for(int i = 0; i < patternSize.width - 1; ++i)
             corners3d.push_back(zero + (i + 1) * sqWidth * pb1 + (j + 1) * sqHeight * pb2);
@@ -248,66 +248,66 @@ Mat cv::ChessBoardGenerator::generageChessBoard(const Mat& bg, const Mat& camMat
     generateEdge(whole[2], whole[3], whole3d);
     generateEdge(whole[3], whole[0], whole3d);
     projectPoints( Mat(whole3d), rvec, tvec, camMat, distCoeffs, whole2d);
-    vector<Point2f> temp_whole2d; 
-    approxPolyDP(Mat(whole2d), temp_whole2d, 1.0, true); 
+    vector<Point2f> temp_whole2d;
+    approxPolyDP(Mat(whole2d), temp_whole2d, 1.0, true);
 
     vector< vector<Point > > whole_contour(1);
-    transform(temp_whole2d.begin(), temp_whole2d.end(), 
-        back_inserter(whole_contour.front()), Mult(rendererResolutionMultiplier));    
+    transform(temp_whole2d.begin(), temp_whole2d.end(),
+        back_inserter(whole_contour.front()), Mult(rendererResolutionMultiplier));
 
     Mat result;
     if (rendererResolutionMultiplier == 1)
-    {        
+    {
         result = bg.clone();
-        drawContours(result, whole_contour, -1, Scalar::all(255), CV_FILLED, CV_AA);       
+        drawContours(result, whole_contour, -1, Scalar::all(255), CV_FILLED, CV_AA);
         drawContours(result, squares_black, -1, Scalar::all(0), CV_FILLED, CV_AA);
     }
     else
     {
-        Mat tmp;        
+        Mat tmp;
         resize(bg, tmp, bg.size() * rendererResolutionMultiplier);
-        drawContours(tmp, whole_contour, -1, Scalar::all(255), CV_FILLED, CV_AA);       
+        drawContours(tmp, whole_contour, -1, Scalar::all(255), CV_FILLED, CV_AA);
         drawContours(tmp, squares_black, -1, Scalar::all(0), CV_FILLED, CV_AA);
         resize(tmp, result, bg.size(), 0, 0, INTER_AREA);
-    }        
+    }
     return result;
 }
 
 Mat cv::ChessBoardGenerator::operator ()(const Mat& bg, const Mat& camMat, const Mat& distCoeffs, vector<Point2f>& corners) const
-{      
+{
     cov = min(cov, 0.8);
     double fovx, fovy, focalLen;
     Point2d principalPoint;
     double aspect;
-    calibrationMatrixValues( camMat, bg.size(), sensorWidth, sensorHeight, 
+    calibrationMatrixValues( camMat, bg.size(), sensorWidth, sensorHeight,
         fovx, fovy, focalLen, principalPoint, aspect);
 
     RNG& rng = theRNG();
 
-    float d1 = static_cast<float>(rng.uniform(0.1, 10.0)); 
+    float d1 = static_cast<float>(rng.uniform(0.1, 10.0));
     float ah = static_cast<float>(rng.uniform(-fovx/2 * cov, fovx/2 * cov) * CV_PI / 180);
-    float av = static_cast<float>(rng.uniform(-fovy/2 * cov, fovy/2 * cov) * CV_PI / 180);        
-    
+    float av = static_cast<float>(rng.uniform(-fovy/2 * cov, fovy/2 * cov) * CV_PI / 180);
+
     Point3f p;
     p.z = cos(ah) * d1;
     p.x = sin(ah) * d1;
-    p.y = p.z * tan(av);  
+    p.y = p.z * tan(av);
 
-    Point3f pb1, pb2;    
+    Point3f pb1, pb2;
     generateBasis(pb1, pb2);
-            
+
     float cbHalfWidth = static_cast<float>(norm(p) * sin( min(fovx, fovy) * 0.5 * CV_PI / 180));
     float cbHalfHeight = cbHalfWidth * patternSize.height / patternSize.width;
-    
+
     vector<Point3f> pts3d(4);
     vector<Point2f> pts2d(4);
     for(;;)
-    {        
+    {
         pts3d[0] = p + pb1 * cbHalfWidth + cbHalfHeight * pb2;
         pts3d[1] = p + pb1 * cbHalfWidth - cbHalfHeight * pb2;
         pts3d[2] = p - pb1 * cbHalfWidth - cbHalfHeight * pb2;
         pts3d[3] = p - pb1 * cbHalfWidth + cbHalfHeight * pb2;
-        
+
         /* can remake with better perf */
         projectPoints( Mat(pts3d), rvec, tvec, camMat, distCoeffs, pts2d);
 
@@ -315,12 +315,12 @@ Mat cv::ChessBoardGenerator::operator ()(const Mat& bg, const Mat& camMat, const
         bool inrect2 = pts2d[1].x < bg.cols && pts2d[1].y < bg.rows && pts2d[1].x > 0 && pts2d[1].y > 0;
         bool inrect3 = pts2d[2].x < bg.cols && pts2d[2].y < bg.rows && pts2d[2].x > 0 && pts2d[2].y > 0;
         bool inrect4 = pts2d[3].x < bg.cols && pts2d[3].y < bg.rows && pts2d[3].x > 0 && pts2d[3].y > 0;
-        
+
         if ( inrect1 && inrect2 && inrect3 && inrect4)
             break;
 
         cbHalfWidth*=0.8f;
-        cbHalfHeight = cbHalfWidth * patternSize.height / patternSize.width;        
+        cbHalfHeight = cbHalfWidth * patternSize.height / patternSize.width;
     }
 
     cbHalfWidth  *= static_cast<float>(patternSize.width)/(patternSize.width + 1);
@@ -329,7 +329,7 @@ Mat cv::ChessBoardGenerator::operator ()(const Mat& bg, const Mat& camMat, const
     Point3f zero = p - pb1 * cbHalfWidth - cbHalfHeight * pb2;
     float sqWidth  = 2 * cbHalfWidth/patternSize.width;
     float sqHeight = 2 * cbHalfHeight/patternSize.height;
-        
-    return generageChessBoard(bg, camMat, distCoeffs, zero, pb1, pb2, sqWidth, sqHeight,  pts3d, corners);      
+
+    return generageChessBoard(bg, camMat, distCoeffs, zero, pb1, pb2, sqWidth, sqHeight,  pts3d, corners);
 }
 
index 921ac9e..64b675a 100644 (file)
@@ -18,7 +18,7 @@ Point origin;
 Rect selection;
 int vmin = 10, vmax = 256, smin = 30;
 
-void onMouse( int event, int x, int y, int, void* )
+static void onMouse( int event, int x, int y, int, void* )
 {
     if( selectObject )
     {
@@ -45,7 +45,7 @@ void onMouse( int event, int x, int y, int, void* )
     }
 }
 
-void help()
+static void help()
 {
     cout << "\nThis is a demo that shows mean-shift based tracking\n"
                        "You select a color objects such as your face and it tracks it.\n"
index 2c03d3d..4be87cf 100644 (file)
@@ -7,9 +7,9 @@
 using namespace cv;
 using namespace std;
 
-void help()
+static void help()
 {
+
    cout << "\nThis program demonstrates Chamfer matching -- computing a distance between an \n"
             "edge template and a query edge image.\n"
             "Usage: \n"
@@ -17,24 +17,24 @@ void help()
             " By default the inputs are logo_in_clutter.png logo.png\n";
 }
 
-const char* keys = 
+const char* keys =
 {
-       "{1| |logo_in_clutter.png|image edge map    }"
-       "{2| |logo.png               |template edge map}"
+    "{1| |logo_in_clutter.png|image edge map    }"
+    "{2| |logo.png               |template edge map}"
 };
 
 int main( int argc, const char** argv )
 {
 
-       help();
-       CommandLineParser parser(argc, argv, keys);
+    help();
+    CommandLineParser parser(argc, argv, keys);
 
-       string image = parser.get<string>("1");
-       string templ = parser.get<string>("2");
-       Mat img = imread(image.c_str(), 0);
-       Mat tpl = imread(templ.c_str(), 0);
+    string image = parser.get<string>("1");
+    string templ = parser.get<string>("2");
+    Mat img = imread(image.c_str(), 0);
+    Mat tpl = imread(templ.c_str(), 0);
 
-       if (img.empty() || tpl.empty())
+    if (img.empty() || tpl.empty())
     {
         cout << "Could not read image file " << image << " or " << templ << "." << endl;
         return -1;
index ce9a41e..b83660d 100644 (file)
@@ -8,16 +8,16 @@ using namespace std;
 Mat img;
 int threshval = 100;
 
-void on_trackbar(int, void*)
+static void on_trackbar(int, void*)
 {
-       Mat bw = threshval < 128 ? (img < threshval) : (img > threshval);
-    
+    Mat bw = threshval < 128 ? (img < threshval) : (img > threshval);
+
     vector<vector<Point> > contours;
     vector<Vec4i> hierarchy;
-    
+
     findContours( bw, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
-       
-       Mat dst = Mat::zeros(img.size(), CV_8UC3);
+
+    Mat dst = Mat::zeros(img.size(), CV_8UC3);
 
     if( !contours.empty() && !hierarchy.empty() )
     {
@@ -31,42 +31,42 @@ void on_trackbar(int, void*)
         }
     }
 
-       imshow( "Connected Components", dst );
+    imshow( "Connected Components", dst );
 }
 
-void help()
+static void help()
 {
     cout << "\n This program demonstrates connected components and use of the trackbar\n"
-                        "Usage: \n"
-                        "      ./connected_components <image(stuff.jpg as default)>\n"
-                        "The image is converted to grayscale and displayed, another image has a trackbar\n"
+             "Usage: \n"
+             "  ./connected_components <image(stuff.jpg as default)>\n"
+             "The image is converted to grayscale and displayed, another image has a trackbar\n"
              "that controls thresholding and thereby the extracted contours which are drawn in color\n";
 }
 
-const char* keys = 
+const char* keys =
 {
-       "{1| |stuff.jpg|image for converting to a grayscale}"
+    "{1| |stuff.jpg|image for converting to a grayscale}"
 };
 
 int main( int argc, const char** argv )
 {
-       help();
-       CommandLineParser parser(argc, argv, keys);
-       string inputImage = parser.get<string>("1");
-       img = imread(inputImage.c_str(), 0);
+    help();
+    CommandLineParser parser(argc, argv, keys);
+    string inputImage = parser.get<string>("1");
+    img = imread(inputImage.c_str(), 0);
 
-       if(img.empty())
-       {
+    if(img.empty())
+    {
         cout << "Could not read input image file: " << inputImage << endl;
-               return -1;
-       }
+        return -1;
+    }
 
-       namedWindow( "Image", 1 );
+    namedWindow( "Image", 1 );
     imshow( "Image", img );
 
-       namedWindow( "Connected Components", 1 );
-       createTrackbar( "Threshold", "Connected Components", &threshval, 255, on_trackbar );
-       on_trackbar(threshval, 0);
+    namedWindow( "Connected Components", 1 );
+    createTrackbar( "Threshold", "Connected Components", &threshval, 255, on_trackbar );
+    on_trackbar(threshval, 0);
 
     waitKey(0);
     return 0;
index a717123..2c1e6ba 100644 (file)
@@ -6,7 +6,7 @@
 using namespace cv;
 using namespace std;
 
-void help()
+static void help()
 {
        cout
        << "\nThis program illustrates the use of findContours and drawContours\n"
@@ -23,7 +23,7 @@ int levels = 3;
 vector<vector<Point> > contours;
 vector<Vec4i> hierarchy;
 
-void on_trackbar(int, void*)
+static void on_trackbar(int, void*)
 {
        Mat cnt_img = Mat::zeros(w, w, CV_8UC3);
     int _levels = levels - 3;
index 418750b..ce1503c 100644 (file)
@@ -6,11 +6,11 @@
 using namespace cv;
 using namespace std;
 
-void help()
+static void help()
 {
-       cout << "\nThis sample program demonstrates the use of the convexHull() function\n"
-                << "Call:\n"
-                << "./convexhull\n" << endl;
+    cout << "\nThis sample program demonstrates the use of the convexHull() function\n"
+         << "Call:\n"
+         << "./convexhull\n" << endl;
 }
 
 int main( int /*argc*/, char** /*argv*/ )
@@ -24,7 +24,7 @@ int main( int /*argc*/, char** /*argv*/ )
     {
         char key;
         int i, count = (unsigned)rng%100 + 1;
-        
+
         vector<Point> points;
 
         for( i = 0; i < count; i++ )
@@ -32,20 +32,20 @@ int main( int /*argc*/, char** /*argv*/ )
             Point pt;
             pt.x = rng.uniform(img.cols/4, img.cols*3/4);
             pt.y = rng.uniform(img.rows/4, img.rows*3/4);
-            
+
             points.push_back(pt);
         }
-        
+
         vector<int> hull;
         convexHull(Mat(points), hull, true);
-        
+
         img = Scalar::all(0);
         for( i = 0; i < count; i++ )
             circle(img, points[i], 3, Scalar(0, 0, 255), CV_FILLED, CV_AA);
-        
+
         int hullcount = (int)hull.size();
         Point pt0 = points[hull[hullcount-1]];
-        
+
         for( i = 0; i < hullcount; i++ )
         {
             Point pt = points[hull[i]];
index 4a2b5f5..d7ccda0 100644 (file)
 using namespace std;
 using namespace cv;
 
-void help()
+static void help()
 {
-       cout
-       << "\n------------------------------------------------------------------\n"
-       << " This program shows the serial out capabilities of cv::Mat\n"
-       << "That is, cv::Mat M(...); cout << M;  Now works.\n"
-       << "Output can be formated to OpenCV, python, numpy, csv and C styles"
-       << "Usage:\n"
-       << "./cvout_sample\n"
-       << "------------------------------------------------------------------\n\n"
-       << endl;
+    cout
+    << "\n------------------------------------------------------------------\n"
+    << " This program shows the serial out capabilities of cv::Mat\n"
+    << "That is, cv::Mat M(...); cout << M;  Now works.\n"
+    << "Output can be formated to OpenCV, python, numpy, csv and C styles"
+    << "Usage:\n"
+    << "./cvout_sample\n"
+    << "------------------------------------------------------------------\n\n"
+    << endl;
 }
 
 
 int main(int,char**)
 {
-       help();
+    help();
     Mat i = Mat::eye(4, 4, CV_64F);
     i.at<double>(1,1) = CV_PI;
     cout << "i = " << i << ";" << endl;
@@ -38,7 +38,7 @@ int main(int,char**)
     cout << "r (default) = " << r << ";" << endl << endl;
     cout << "r (python) = " << format(r,"python") << ";" << endl << endl;
     cout << "r (numpy) = " << format(r,"numpy") << ";" << endl << endl;
-    cout << "r (csv) = " << format(r,"csv") << ";" << endl << endl; 
+    cout << "r (csv) = " << format(r,"csv") << ";" << endl << endl;
     cout << "r (c) = " << format(r,"C") << ";" << endl << endl;
 
     Point2f p(5, 1);
@@ -51,9 +51,9 @@ int main(int,char**)
     v.push_back(1);
     v.push_back(2);
     v.push_back(3);
-    
+
     cout << "shortvec = " << Mat(v) << endl;
-        
+
     vector<Point2f> points(20);
     for (size_t i = 0; i < points.size(); ++i)
         points[i] = Point2f((float)(i * 5), (float)(i % 7));
index e5f3c2f..87e6664 100644 (file)
@@ -7,7 +7,7 @@ using namespace std;
 
 static void help()
 {
-       cout << "\nThis program demostrates iterative construction of\n"
+    cout << "\nThis program demostrates iterative construction of\n"
            "delaunay triangulation and voronoi tesselation.\n"
            "It draws a random set of points in an image and then delaunay triangulates them.\n"
            "Usage: \n"
@@ -27,7 +27,7 @@ static void draw_subdiv( Mat& img, Subdiv2D& subdiv, Scalar delaunay_color )
     vector<Vec6f> triangleList;
     subdiv.getTriangleList(triangleList);
     vector<Point> pt(3);
-    
+
     for( size_t i = 0; i < triangleList.size(); i++ )
     {
         Vec6f t = triangleList[i];
@@ -54,9 +54,9 @@ static void draw_subdiv( Mat& img, Subdiv2D& subdiv, Scalar delaunay_color )
 static void locate_point( Mat& img, Subdiv2D& subdiv, Point2f fp, Scalar active_color )
 {
     int e0=0, vertex=0;
-    
+
     subdiv.locate(fp, e0, vertex);
-    
+
     if( e0 > 0 )
     {
         int e = e0;
@@ -65,37 +65,37 @@ static void locate_point( Mat& img, Subdiv2D& subdiv, Point2f fp, Scalar active_
             Point2f org, dst;
             if( subdiv.edgeOrg(e, &org) > 0 && subdiv.edgeDst(e, &dst) > 0 )
                 line( img, org, dst, active_color, 3, CV_AA, 0 );
-                
+
             e = subdiv.getEdge(e, Subdiv2D::NEXT_AROUND_LEFT);
         }
         while( e != e0 );
     }
-    
+
     draw_subdiv_point( img, fp, active_color );
 }
 
 
-void paint_voronoi( Mat& img, Subdiv2D& subdiv )
+static void paint_voronoi( Mat& img, Subdiv2D& subdiv )
 {
     vector<vector<Point2f> > facets;
     vector<Point2f> centers;
     subdiv.getVoronoiFacetList(vector<int>(), facets, centers);
-    
+
     vector<Point> ifacet;
     vector<vector<Point> > ifacets(1);
-    
+
     for( size_t i = 0; i < facets.size(); i++ )
     {
         ifacet.resize(facets[i].size());
         for( size_t j = 0; j < facets[i].size(); j++ )
             ifacet[j] = facets[i][j];
-    
+
         Scalar color;
         color[0] = rand() & 255;
         color[1] = rand() & 255;
         color[2] = rand() & 255;
         fillConvexPoly(img, ifacet, color, 8, 0);
-        
+
         ifacets[0] = ifacet;
         polylines(img, ifacets, true, Scalar(), 1, CV_AA, 0);
         circle(img, centers[i], 3, Scalar(), -1, CV_AA, 0);
@@ -106,43 +106,43 @@ void paint_voronoi( Mat& img, Subdiv2D& subdiv )
 int main( int, char** )
 {
     help();
-    
+
     Scalar active_facet_color(0, 0, 255), delaunay_color(255,255,255);
     Rect rect(0, 0, 600, 600);
-    
+
     Subdiv2D subdiv(rect);
     Mat img(rect.size(), CV_8UC3);
-    
+
     img = Scalar::all(0);
     string win = "Delaunay Demo";
     imshow(win, img);
-    
+
     for( int i = 0; i < 200; i++ )
     {
         Point2f fp( (float)(rand()%(rect.width-10)+5),
                     (float)(rand()%(rect.height-10)+5));
-        
+
         locate_point( img, subdiv, fp, active_facet_color );
         imshow( win, img );
-        
+
         if( waitKey( 100 ) >= 0 )
             break;
-        
+
         subdiv.insert(fp);
-        
+
         img = Scalar::all(0);
         draw_subdiv( img, subdiv, delaunay_color );
         imshow( win, img );
-        
+
         if( waitKey( 100 ) >= 0 )
             break;
     }
-    
+
     img = Scalar::all(0);
     paint_voronoi( img, subdiv );
     imshow( win, img );
-        
+
     waitKey(0);
-    
+
     return 0;
 }
index d71ea5c..59ecf25 100644 (file)
@@ -12,7 +12,7 @@ int _contrast = 100;
 Mat image;
 
 /* brightness/contrast callback function */
-void updateBrightnessContrast( int /*arg*/, void* )
+static void updateBrightnessContrast( int /*arg*/, void* )
 {
     int histSize = 64;
     int brightness = _brightness - 100;
@@ -42,7 +42,7 @@ void updateBrightnessContrast( int /*arg*/, void* )
 
     calcHist(&dst, 1, 0, Mat(), hist, 1, &histSize, 0);
     Mat histImage = Mat::ones(200, 320, CV_8U)*255;
-    
+
     normalize(hist, hist, 0, histImage.rows, CV_MINMAX, CV_32F);
 
     histImage = Scalar::all(255);
@@ -54,31 +54,31 @@ void updateBrightnessContrast( int /*arg*/, void* )
                    Scalar::all(0), -1, 8, 0 );
     imshow("histogram", histImage);
 }
-void help()
+static void help()
 {
-       std::cout << "\nThis program demonstrates the use of calcHist() -- histogram creation.\n"
-                 << "Usage: \n" << "demhist [image_name -- Defaults to baboon.jpg]" << std::endl;
+    std::cout << "\nThis program demonstrates the use of calcHist() -- histogram creation.\n"
+              << "Usage: \n" << "demhist [image_name -- Defaults to baboon.jpg]" << std::endl;
 }
 
-const char* keys = 
+const char* keys =
 {
-       "{1| |baboon.jpg|input image file}"
+    "{1| |baboon.jpg|input image file}"
 };
 
 int main( int argc, const char** argv )
 {
-       help();
-
-       CommandLineParser parser(argc, argv, keys);
-       string inputImage = parser.get<string>("1");
-
-       // Load the source image. HighGUI use.
-       image = imread( inputImage, 0 );
-       if(image.empty())
-       {
-               std::cerr << "Cannot read image file: " << inputImage << std::endl;
-               return -1;
-       }
+    help();
+
+    CommandLineParser parser(argc, argv, keys);
+    string inputImage = parser.get<string>("1");
+
+    // Load the source image. HighGUI use.
+    image = imread( inputImage, 0 );
+    if(image.empty())
+    {
+        std::cerr << "Cannot read image file: " << inputImage << std::endl;
+        return -1;
+    }
 
     namedWindow("image", 0);
     namedWindow("histogram", 0);
index cb4d2ff..0a3243d 100644 (file)
@@ -9,7 +9,7 @@
 using namespace cv;
 using namespace std;
 
-void help(char** argv)
+static void help(char** argv)
 {
     cout << "\nThis program demonstrats keypoint finding and matching between 2 images using features2d framework.\n"
      << "   In one case, the 2nd image is synthesized by homography from the first, in the second case, there are 2 images\n"
@@ -39,7 +39,7 @@ const string winName = "correspondences";
 
 enum { NONE_FILTER = 0, CROSS_CHECK_FILTER = 1 };
 
-int getMatcherFilterType( const string& str )
+static int getMatcherFilterType( const string& str )
 {
     if( str == "NoneFilter" )
         return NONE_FILTER;
@@ -49,7 +49,7 @@ int getMatcherFilterType( const string& str )
     return -1;
 }
 
-void simpleMatching( Ptr<DescriptorMatcher>& descriptorMatcher,
+static void simpleMatching( Ptr<DescriptorMatcher>& descriptorMatcher,
                      const Mat& descriptors1, const Mat& descriptors2,
                      vector<DMatch>& matches12 )
 {
@@ -57,7 +57,7 @@ void simpleMatching( Ptr<DescriptorMatcher>& descriptorMatcher,
     descriptorMatcher->match( descriptors1, descriptors2, matches12 );
 }
 
-void crossCheckMatching( Ptr<DescriptorMatcher>& descriptorMatcher,
+static void crossCheckMatching( Ptr<DescriptorMatcher>& descriptorMatcher,
                          const Mat& descriptors1, const Mat& descriptors2,
                          vector<DMatch>& filteredMatches12, int knn=1 )
 {
@@ -87,7 +87,7 @@ void crossCheckMatching( Ptr<DescriptorMatcher>& descriptorMatcher,
     }
 }
 
-void warpPerspectiveRand( const Mat& src, Mat& dst, Mat& H, RNG& rng )
+static void warpPerspectiveRand( const Mat& src, Mat& dst, Mat& H, RNG& rng )
 {
     H.create(3, 3, CV_32FC1);
     H.at<float>(0,0) = rng.uniform( 0.8f, 1.2f);
@@ -103,7 +103,7 @@ void warpPerspectiveRand( const Mat& src, Mat& dst, Mat& H, RNG& rng )
     warpPerspective( src, dst, H, src.size() );
 }
 
-void doIteration( const Mat& img1, Mat& img2, bool isWarpPerspective,
+static void doIteration( const Mat& img1, Mat& img2, bool isWarpPerspective,
                   vector<KeyPoint>& keypoints1, const Mat& descriptors1,
                   Ptr<FeatureDetector>& detector, Ptr<DescriptorExtractor>& descriptorExtractor,
                   Ptr<DescriptorMatcher>& descriptorMatcher, int matcherFilter, bool eval,
index 33b9b20..6aeb1b2 100644 (file)
@@ -14,7 +14,7 @@
 #define DEBUGLOGS 1
 
 
-#if ANDROID
+#ifdef ANDROID
 #include <android/log.h>
 #define LOG_TAG "DETECTIONBASEDTRACKER__TEST_APPLICAT"
 #define LOGD0(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__))
@@ -36,7 +36,7 @@
 #define LOGI(_str, ...) LOGI0(_str , ## __VA_ARGS__)
 #define LOGW(_str, ...) LOGW0(_str , ## __VA_ARGS__)
 #define LOGE(_str, ...) LOGE0(_str , ## __VA_ARGS__)
-#else 
+#else
 #define LOGD(...) do{} while(0)
 #define LOGI(...) do{} while(0)
 #define LOGW(...) do{} while(0)
@@ -51,7 +51,7 @@ using namespace std;
 #define ORIGINAL 0
 #define SHOULD_USE_EXTERNAL_BUFFERS 1
 
-void usage()
+static void usage()
 {
     LOGE0("usage: filepattern outfilepattern cascadefile");
     LOGE0("\t where ");
@@ -63,7 +63,7 @@ void usage()
     LOGE0("\t       (e.g.\"opencv/data/lbpcascades/lbpcascade_frontalface.xml\" ");
 }
 
-int test_FaceDetector(int argc, char *argv[])
+static int test_FaceDetector(int argc, char *argv[])
 {
     if (argc < 4) {
         usage();
@@ -102,7 +102,7 @@ int test_FaceDetector(int argc, char *argv[])
     fd.run();
 
     Mat gray;
-       Mat m;
+    Mat m;
 
     int64 tprev=getTickCount();
     double freq=getTickFrequency();
index 11f4f3c..5624e0d 100644 (file)
@@ -175,6 +175,8 @@ public:
 
     void run();
 
+    virtual ~BaseQualityEvaluator(){}
+
 protected:
     virtual string getRunParamsFilename() const = 0;
     virtual string getResultsFilename() const = 0;
@@ -540,7 +542,7 @@ void DetectorQualityEvaluator::readAlgorithm ()
     }
 }
 
-int update_progress( const string& /*name*/, int progress, int test_case_idx, int count, double dt )
+static int update_progress( const string& /*name*/, int progress, int test_case_idx, int count, double dt )
 {
     int width = 60 /*- (int)name.length()*/;
     if( count > 0 )
@@ -588,13 +590,13 @@ void DetectorQualityEvaluator::runDatasetTest (const vector<Mat> &imgs, const ve
     }
 }
 
-void testLog( bool isBadAccuracy )
-{
-    if( isBadAccuracy )
-        printf(" bad accuracy\n");
-    else
-        printf("\n");
-}
+// static void testLog( bool isBadAccuracy )
+// {
+//     if( isBadAccuracy )
+//         printf(" bad accuracy\n");
+//     else
+//         printf("\n");
+// }
 
 /****************************************************************************************\
 *                                  Descriptors evaluation                                 *
index ee1d9b1..dbbc2cc 100644 (file)
@@ -7,56 +7,56 @@
 using namespace cv;
 using namespace std;
 
-void help()
+static void help()
 {
-       printf("\nThis program demonstrated the use of the discrete Fourier transform (dft)\n"
-                  "The dft of an image is taken and it's power spectrum is displayed.\n"
-                  "Usage:\n"
-                       "./dft [image_name -- default lena.jpg]\n");
+    printf("\nThis program demonstrated the use of the discrete Fourier transform (dft)\n"
+           "The dft of an image is taken and it's power spectrum is displayed.\n"
+           "Usage:\n"
+            "./dft [image_name -- default lena.jpg]\n");
 }
 
-const char* keys = 
+const char* keys =
 {
-       "{1| |lena.jpg|input image file}"
+    "{1| |lena.jpg|input image file}"
 };
 
 int main(int argc, const char ** argv)
 {
     help();
-       CommandLineParser parser(argc, argv, keys);
-       string filename = parser.get<string>("1");
+    CommandLineParser parser(argc, argv, keys);
+    string filename = parser.get<string>("1");
 
-       Mat img = imread(filename.c_str(), CV_LOAD_IMAGE_GRAYSCALE);
+    Mat img = imread(filename.c_str(), CV_LOAD_IMAGE_GRAYSCALE);
     if( img.empty() )
     {
         help();
-               printf("Cannot read image file: %s\n", filename.c_str());
+        printf("Cannot read image file: %s\n", filename.c_str());
         return -1;
     }
     int M = getOptimalDFTSize( img.rows );
     int N = getOptimalDFTSize( img.cols );
     Mat padded;
     copyMakeBorder(img, padded, 0, M - img.rows, 0, N - img.cols, BORDER_CONSTANT, Scalar::all(0));
-    
+
     Mat planes[] = {Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F)};
     Mat complexImg;
     merge(planes, 2, complexImg);
-    
+
     dft(complexImg, complexImg);
-    
+
     // compute log(1 + sqrt(Re(DFT(img))**2 + Im(DFT(img))**2))
     split(complexImg, planes);
     magnitude(planes[0], planes[1], planes[0]);
     Mat mag = planes[0];
     mag += Scalar::all(1);
     log(mag, mag);
-    
+
     // crop the spectrum, if it has an odd number of rows or columns
     mag = mag(Rect(0, 0, mag.cols & -2, mag.rows & -2));
-    
+
     int cx = mag.cols/2;
     int cy = mag.rows/2;
-    
+
     // rearrange the quadrants of Fourier image
     // so that the origin is at the image center
     Mat tmp;
@@ -64,17 +64,17 @@ int main(int argc, const char ** argv)
     Mat q1(mag, Rect(cx, 0, cx, cy));
     Mat q2(mag, Rect(0, cy, cx, cy));
     Mat q3(mag, Rect(cx, cy, cx, cy));
-    
+
     q0.copyTo(tmp);
     q3.copyTo(q0);
     tmp.copyTo(q3);
-    
+
     q1.copyTo(tmp);
     q2.copyTo(q1);
     tmp.copyTo(q2);
-    
+
     normalize(mag, mag, 0, 1, CV_MINMAX);
-    
+
     imshow("spectrum magnitude", mag);
     waitKey();
     return 0;
index b9cd67d..4e3c3a2 100644 (file)
@@ -14,7 +14,7 @@ int distType0 = CV_DIST_L1;
 Mat gray;
 
 // threshold trackbar callback
-void onTrackbar( int, void* )
+static void onTrackbar( int, void* )
 {
     static const Scalar colors[] =
     {
@@ -44,20 +44,20 @@ void onTrackbar( int, void* )
         // begin "painting" the distance transform result
         dist *= 5000;
         pow(dist, 0.5, dist);
-        
+
         Mat dist32s, dist8u1, dist8u2;
-        
+
         dist.convertTo(dist32s, CV_32S, 1, 0.5);
         dist32s &= Scalar::all(255);
-        
+
         dist32s.convertTo(dist8u1, CV_8U, 1, 0);
         dist32s *= -1;
-        
+
         dist32s += Scalar::all(255);
         dist32s.convertTo(dist8u2, CV_8U);
-        
+
         Mat planes[] = {dist8u1, dist8u2, dist8u2};
-        merge(planes, 3, dist8u); 
+        merge(planes, 3, dist8u);
     }
     else
     {
@@ -84,40 +84,40 @@ void onTrackbar( int, void* )
     imshow("Distance Map", dist8u );
 }
 
-void help()
+static void help()
 {
-       printf("\nProgram to demonstrate the use of the distance transform function between edge images.\n"
-                       "Usage:\n"
-                       "./distrans [image_name -- default image is stuff.jpg]\n"
-                       "\nHot keys: \n"
-                       "\tESC - quit the program\n"
-                       "\tC - use C/Inf metric\n"
-                       "\tL1 - use L1 metric\n"
-                       "\tL2 - use L2 metric\n"
-                       "\t3 - use 3x3 mask\n"
-                       "\t5 - use 5x5 mask\n"
-                       "\t0 - use precise distance transform\n"
-                       "\tv - switch to Voronoi diagram mode\n"
+    printf("\nProgram to demonstrate the use of the distance transform function between edge images.\n"
+            "Usage:\n"
+            "./distrans [image_name -- default image is stuff.jpg]\n"
+            "\nHot keys: \n"
+            "\tESC - quit the program\n"
+            "\tC - use C/Inf metric\n"
+            "\tL1 - use L1 metric\n"
+            "\tL2 - use L2 metric\n"
+            "\t3 - use 3x3 mask\n"
+            "\t5 - use 5x5 mask\n"
+            "\t0 - use precise distance transform\n"
+            "\tv - switch to Voronoi diagram mode\n"
             "\tp - switch to pixel-based Voronoi diagram mode\n"
-                       "\tSPACE - loop through all the modes\n\n");
+            "\tSPACE - loop through all the modes\n\n");
 }
 
-const char* keys = 
+const char* keys =
 {
-       "{1| |stuff.jpg|input image file}"
+    "{1| |stuff.jpg|input image file}"
 };
 
 int main( int argc, const char** argv )
 {
     help();
-       CommandLineParser parser(argc, argv, keys);
-       string filename = parser.get<string>("1");
-       gray = imread(filename.c_str(), 0);
+    CommandLineParser parser(argc, argv, keys);
+    string filename = parser.get<string>("1");
+    gray = imread(filename.c_str(), 0);
     if(gray.empty())
     {
-               printf("Cannot read image file: %s\n", filename.c_str());
-               help();
-       return -1;
+        printf("Cannot read image file: %s\n", filename.c_str());
+        help();
+        return -1;
     }
 
     namedWindow("Distance Map", 1);
@@ -136,7 +136,7 @@ int main( int argc, const char** argv )
         if( c == 'c' || c == 'C' || c == '1' || c == '2' ||
             c == '3' || c == '5' || c == '0' )
             voronoiType = -1;
-        
+
         if( c == 'c' || c == 'C' )
             distType0 = CV_DIST_C;
         else if( c == '1' )
index 3ff4714..5467463 100644 (file)
@@ -3,11 +3,11 @@
 #include <stdio.h>
 using namespace cv;
 
-void help()
+static void help()
 {
-       printf("\nThis program demonstrates OpenCV drawing and text output functions.\n"
-       "Usage:\n"
-       "       ./drawing\n");
+    printf("\nThis program demonstrates OpenCV drawing and text output functions.\n"
+    "Usage:\n"
+    "   ./drawing\n");
 }
 static Scalar randomColor(RNG& rng)
 {
@@ -18,7 +18,7 @@ static Scalar randomColor(RNG& rng)
 int main()
 {
     help();
-       char wndname[] = "Drawing Demo";
+    char wndname[] = "Drawing Demo";
     const int NUMBER = 100;
     const int DELAY = 5;
     int lineType = CV_AA; // change it to 8 to see non-antialiased graphics
@@ -39,7 +39,7 @@ int main()
         pt2.y = rng.uniform(y1, y2);
 
         line( image, pt1, pt2, randomColor(rng), rng.uniform(1,10), lineType );
-        
+
         imshow(wndname, image);
         if(waitKey(DELAY) >= 0)
             return 0;
@@ -53,14 +53,14 @@ int main()
         pt2.x = rng.uniform(x1, x2);
         pt2.y = rng.uniform(y1, y2);
         int thickness = rng.uniform(-3, 10);
-        
+
         rectangle( image, pt1, pt2, randomColor(rng), MAX(thickness, -1), lineType );
-        
+
         imshow(wndname, image);
         if(waitKey(DELAY) >= 0)
             return 0;
     }
-    
+
     for (i = 0; i < NUMBER; i++)
     {
         Point center;
@@ -73,7 +73,7 @@ int main()
 
         ellipse( image, center, axes, angle, angle - 100, angle + 200,
                  randomColor(rng), rng.uniform(-1,9), lineType );
-        
+
         imshow(wndname, image);
         if(waitKey(DELAY) >= 0)
             return 0;
@@ -84,46 +84,46 @@ int main()
         Point pt[2][3];
         pt[0][0].x = rng.uniform(x1, x2);
         pt[0][0].y = rng.uniform(y1, y2);
-        pt[0][1].x = rng.uniform(x1, x2); 
-        pt[0][1].y = rng.uniform(y1, y2); 
+        pt[0][1].x = rng.uniform(x1, x2);
+        pt[0][1].y = rng.uniform(y1, y2);
         pt[0][2].x = rng.uniform(x1, x2);
         pt[0][2].y = rng.uniform(y1, y2);
-        pt[1][0].x = rng.uniform(x1, x2); 
+        pt[1][0].x = rng.uniform(x1, x2);
         pt[1][0].y = rng.uniform(y1, y2);
-        pt[1][1].x = rng.uniform(x1, x2); 
+        pt[1][1].x = rng.uniform(x1, x2);
         pt[1][1].y = rng.uniform(y1, y2);
-        pt[1][2].x = rng.uniform(x1, x2); 
+        pt[1][2].x = rng.uniform(x1, x2);
         pt[1][2].y = rng.uniform(y1, y2);
         const Point* ppt[2] = {pt[0], pt[1]};
         int npt[] = {3, 3};
-        
+
         polylines(image, ppt, npt, 2, true, randomColor(rng), rng.uniform(1,10), lineType);
-        
+
         imshow(wndname, image);
         if(waitKey(DELAY) >= 0)
             return 0;
     }
-    
+
     for (i = 0; i< NUMBER; i++)
     {
         Point pt[2][3];
         pt[0][0].x = rng.uniform(x1, x2);
         pt[0][0].y = rng.uniform(y1, y2);
-        pt[0][1].x = rng.uniform(x1, x2); 
-        pt[0][1].y = rng.uniform(y1, y2); 
+        pt[0][1].x = rng.uniform(x1, x2);
+        pt[0][1].y = rng.uniform(y1, y2);
         pt[0][2].x = rng.uniform(x1, x2);
         pt[0][2].y = rng.uniform(y1, y2);
-        pt[1][0].x = rng.uniform(x1, x2); 
+        pt[1][0].x = rng.uniform(x1, x2);
         pt[1][0].y = rng.uniform(y1, y2);
-        pt[1][1].x = rng.uniform(x1, x2); 
+        pt[1][1].x = rng.uniform(x1, x2);
         pt[1][1].y = rng.uniform(y1, y2);
-        pt[1][2].x = rng.uniform(x1, x2); 
+        pt[1][2].x = rng.uniform(x1, x2);
         pt[1][2].y = rng.uniform(y1, y2);
         const Point* ppt[2] = {pt[0], pt[1]};
         int npt[] = {3, 3};
-        
+
         fillPoly(image, ppt, npt, 2, randomColor(rng), lineType);
-        
+
         imshow(wndname, image);
         if(waitKey(DELAY) >= 0)
             return 0;
@@ -134,10 +134,10 @@ int main()
         Point center;
         center.x = rng.uniform(x1, x2);
         center.y = rng.uniform(y1, y2);
-        
+
         circle(image, center, rng.uniform(0, 300), randomColor(rng),
                rng.uniform(-1, 9), lineType);
-        
+
         imshow(wndname, image);
         if(waitKey(DELAY) >= 0)
             return 0;
@@ -151,7 +151,7 @@ int main()
 
         putText(image, "Testing text rendering", org, rng.uniform(0,8),
                 rng.uniform(0,100)*0.05+0.1, randomColor(rng), rng.uniform(1, 10), lineType);
-        
+
         imshow(wndname, image);
         if(waitKey(DELAY) >= 0)
             return 0;
@@ -159,14 +159,14 @@ int main()
 
     Size textsize = getTextSize("OpenCV forever!", CV_FONT_HERSHEY_COMPLEX, 3, 5, 0);
     Point org((width - textsize.width)/2, (height - textsize.height)/2);
-    
+
     Mat image2;
     for( i = 0; i < 255; i += 2 )
     {
         image2 = image - Scalar::all(i);
         putText(image2, "OpenCV forever!", org, CV_FONT_HERSHEY_COMPLEX, 3,
                 Scalar(i, i, 255), 5, lineType);
-        
+
         imshow(wndname, image2);
         if(waitKey(DELAY) >= 0)
             return 0;
index a76176a..6ff30a2 100644 (file)
@@ -10,42 +10,42 @@ int edgeThresh = 1;
 Mat image, gray, edge, cedge;
 
 // define a trackbar callback
-void onTrackbar(int, void*)
+static void onTrackbar(int, void*)
 {
     blur(gray, edge, Size(3,3));
 
     // Run the edge detector on grayscale
     Canny(edge, edge, edgeThresh, edgeThresh*3, 3);
     cedge = Scalar::all(0);
-    
+
     image.copyTo(cedge, edge);
     imshow("Edge map", cedge);
 }
 
-void help()
+static void help()
 {
-       printf("\nThis sample demonstrates Canny edge detection\n"
-                  "Call:\n"
-                  "    /.edge [image_name -- Default is fruits.jpg]\n\n");
+    printf("\nThis sample demonstrates Canny edge detection\n"
+           "Call:\n"
+           "    /.edge [image_name -- Default is fruits.jpg]\n\n");
 }
 
-const char* keys = 
+const char* keys =
 {
-       "{1| |fruits.jpg|input image name}"
+    "{1| |fruits.jpg|input image name}"
 };
 
 int main( int argc, const char** argv )
 {
     help();
 
-       CommandLineParser parser(argc, argv, keys);
-       string filename = parser.get<string>("1");
+    CommandLineParser parser(argc, argv, keys);
+    string filename = parser.get<string>("1");
 
     image = imread(filename, 1);
     if(image.empty())
     {
-               printf("Cannot read image file: %s\n", filename.c_str());
-               help();
+        printf("Cannot read image file: %s\n", filename.c_str());
+        help();
         return -1;
     }
     cedge.create(image.size(), image.type());
index 8b6d7dd..0b238ef 100644 (file)
@@ -25,7 +25,7 @@
 using namespace cv;
 using namespace std;
 
-Mat toGrayscale(InputArray _src) {
+static Mat toGrayscale(InputArray _src) {
     Mat src = _src.getMat();
     // only allow one channel
     if(src.channels() != 1)
@@ -36,7 +36,7 @@ Mat toGrayscale(InputArray _src) {
     return dst;
 }
 
-void read_csv(const string& filename, vector<Mat>& images, vector<int>& labels, char separator = ';') {
+static void read_csv(const string& filename, vector<Mat>& images, vector<int>& labels, char separator = ';') {
     std::ifstream file(filename.c_str(), ifstream::in);
     if (!file)
         throw std::exception();
index d6a734e..4923e8a 100644 (file)
@@ -7,16 +7,16 @@
 using namespace cv;
 using namespace std;
 
-void help()
+static void help()
 {
-       cout <<
-                       "\nThis program demonstrates dense optical flow algorithm by Gunnar Farneback\n"
-                       "Mainly the function: calcOpticalFlowFarneback()\n"
-                       "Call:\n"
-                       "./fback\n"
-                       "This reads from video camera 0\n" << endl;
+    cout <<
+            "\nThis program demonstrates dense optical flow algorithm by Gunnar Farneback\n"
+            "Mainly the function: calcOpticalFlowFarneback()\n"
+            "Call:\n"
+            "./fback\n"
+            "This reads from video camera 0\n" << endl;
 }
-void drawOptFlowMap(const Mat& flow, Mat& cflowmap, int step,
+static void drawOptFlowMap(const Mat& flow, Mat& cflowmap, int step,
                     double, const Scalar& color)
 {
     for(int y = 0; y < cflowmap.rows; y += step)
@@ -35,15 +35,15 @@ int main(int, char**)
     help();
     if( !cap.isOpened() )
         return -1;
-    
+
     Mat prevgray, gray, flow, cflow, frame;
     namedWindow("flow", 1);
-    
+
     for(;;)
     {
         cap >> frame;
         cvtColor(frame, gray, CV_BGR2GRAY);
-        
+
         if( prevgray.data )
         {
             calcOpticalFlowFarneback(prevgray, gray, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
index 063860a..a7fff77 100644 (file)
@@ -6,22 +6,22 @@
 using namespace cv;
 using namespace std;
 
-void help()
+static void help()
 {
     cout << "\nThis program demonstrated the floodFill() function\n"
-               "Call:\n"
-               "./ffilldemo [image_name -- Default: fruits.jpg]\n" << endl;
-
-       cout << "Hot keys: \n"
-                       "\tESC - quit the program\n"
-                       "\tc - switch color/grayscale mode\n"
-                       "\tm - switch mask mode\n"
-                       "\tr - restore the original image\n"
-                       "\ts - use null-range floodfill\n"
-                       "\tf - use gradient floodfill with fixed(absolute) range\n"
-                       "\tg - use gradient floodfill with floating(relative) range\n"
-                       "\t4 - use 4-connectivity mode\n"
-                       "\t8 - use 8-connectivity mode\n" << endl;
+            "Call:\n"
+            "./ffilldemo [image_name -- Default: fruits.jpg]\n" << endl;
+
+    cout << "Hot keys: \n"
+            "\tESC - quit the program\n"
+            "\tc - switch color/grayscale mode\n"
+            "\tm - switch mask mode\n"
+            "\tr - restore the original image\n"
+            "\ts - use null-range floodfill\n"
+            "\tf - use gradient floodfill with fixed(absolute) range\n"
+            "\tg - use gradient floodfill with floating(relative) range\n"
+            "\t4 - use 4-connectivity mode\n"
+            "\t8 - use 8-connectivity mode\n" << endl;
 }
 
 Mat image0, image, gray, mask;
@@ -32,7 +32,7 @@ int isColor = true;
 bool useMask = false;
 int newMaskVal = 255;
 
-void onMouse( int event, int x, int y, int, void* )
+static void onMouse( int event, int x, int y, int, void* )
 {
     if( event != CV_EVENT_LBUTTONDOWN )
         return;
@@ -50,7 +50,7 @@ void onMouse( int event, int x, int y, int, void* )
     Scalar newVal = isColor ? Scalar(b, g, r) : Scalar(r*0.299 + g*0.587 + b*0.114);
     Mat dst = isColor ? image : gray;
     int area;
-    
+
     if( useMask )
     {
         threshold(mask, mask, 1, 128, CV_THRESH_BINARY);
@@ -63,7 +63,7 @@ void onMouse( int event, int x, int y, int, void* )
         area = floodFill(dst, seed, newVal, &ccomp, Scalar(lo, lo, lo),
                   Scalar(up, up, up), flags);
     }
-    
+
     imshow("image", dst);
     cout << area << " pixels were repainted\n";
 }
@@ -73,7 +73,7 @@ int main( int argc, char** argv )
 {
     char* filename = argc >= 2 ? argv[1] : (char*)"fruits.jpg";
     image0 = imread(filename, 1);
-    
+
     if( image0.empty() )
     {
         cout << "Image empty. Usage: ffilldemo <image_name>\n";
index 0dffef8..fa48fe6 100644 (file)
@@ -13,15 +13,15 @@ using std::cerr;
 using std::ostream;
 using namespace cv;
 
-void help(char** av)
+static void help(char** av)
 {
   cout << "\nfilestorage_sample demonstrate the usage of the opencv serialization functionality.\n"
-                 << "usage:\n"
-                 <<  av[0] << " outputfile.yml.gz\n"
-                 << "\n   outputfile above can have many different extenstions, see below."
-                 << "\nThis program demonstrates the use of FileStorage for serialization, that is use << and >>  in OpenCV\n"
-                 << "For example, how to create a class and have it serialize, but also how to use it to read and write matrices.\n"
-                 << "FileStorage allows you to serialize to various formats specified by the file end type."
+      << "usage:\n"
+      <<  av[0] << " outputfile.yml.gz\n"
+      << "\n   outputfile above can have many different extenstions, see below."
+      << "\nThis program demonstrates the use of FileStorage for serialization, that is use << and >>  in OpenCV\n"
+      << "For example, how to create a class and have it serialize, but also how to use it to read and write matrices.\n"
+      << "FileStorage allows you to serialize to various formats specified by the file end type."
           << "\nYou should try using different file extensions.(e.g. yaml yml xml xml.gz yaml.gz etc...)\n" << endl;
 }
 
@@ -52,17 +52,17 @@ struct MyData
 };
 
 //These write and read functions must exist as per the inline functions in operations.hpp
-void write(FileStorage& fs, const std::string&, const MyData& x){
+static void write(FileStorage& fs, const std::string&, const MyData& x){
   x.write(fs);
 }
-void read(const FileNode& node, MyData& x, const MyData& default_value = MyData()){
+static void read(const FileNode& node, MyData& x, const MyData& default_value = MyData()){
   if(node.empty())
     x = default_value;
   else
     x.read(node);
 }
 
-ostream& operator<<(ostream& out, const MyData& m){
+static ostream& operator<<(ostream& out, const MyData& m){
   out << "{ id = " << m.id << ", ";
   out << "X = " << m.X << ", ";
   out << "A = " << m.A << "}";
index 8ce6b26..55f9e4a 100644 (file)
 using namespace cv;
 using namespace std;
 
-void help()
-{
-       cout <<
-                       "\nThis program is demonstration for ellipse fitting. The program finds\n"
-                       "contours and approximate it by ellipses.\n"
-                       "Call:\n"
-                       "./fitellipse [image_name -- Default stuff.jpg]\n" << endl;
-}
+// static void help()
+// {
+//     cout <<
+//             "\nThis program is demonstration for ellipse fitting. The program finds\n"
+//             "contours and approximate it by ellipses.\n"
+//             "Call:\n"
+//             "./fitellipse [image_name -- Default stuff.jpg]\n" << endl;
+// }
 
 int sliderPos = 70;
 
@@ -47,7 +47,7 @@ int main( int argc, char** argv )
 
     imshow("source", image);
     namedWindow("result", 1);
-    
+
     // Create toolbars. HighGUI use.
     createTrackbar( "threshold", "result", &sliderPos, 255, processImage );
     processImage(0, 0);
@@ -63,7 +63,7 @@ void processImage(int /*h*/, void*)
 {
     vector<vector<Point> > contours;
     Mat bimage = image >= sliderPos;
-    
+
     findContours(bimage, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
 
     Mat cimage = Mat::zeros(bimage.size(), CV_8UC3);
@@ -73,11 +73,11 @@ void processImage(int /*h*/, void*)
         size_t count = contours[i].size();
         if( count < 6 )
             continue;
-        
+
         Mat pointsf;
         Mat(contours[i]).convertTo(pointsf, CV_32F);
         RotatedRect box = fitEllipse(pointsf);
-        
+
         if( MAX(box.size.width, box.size.height) > MIN(box.size.width, box.size.height)*30 )
             continue;
         drawContours(cimage, contours, (int)i, Scalar::all(255), 1, 8);
index eeab4d7..c86fdeb 100644 (file)
@@ -8,7 +8,7 @@
 
 using namespace cv;
 
-void help()
+static void help()
 {
     printf("Use the SURF descriptor for matching keypoints between 2 images\n");
     printf("Format: \n./generic_descriptor_match <image1> <image2> <algorithm> <XML params>\n");
@@ -22,7 +22,7 @@ int main(int argc, char** argv)
 {
     if (argc != 5)
     {
-       help();
+        help();
         return 0;
     }
 
@@ -41,14 +41,14 @@ int main(int argc, char** argv)
     //printf("Reading the images...\n");
     Mat img1 = imread(img1_name, CV_LOAD_IMAGE_GRAYSCALE);
     Mat img2 = imread(img2_name, CV_LOAD_IMAGE_GRAYSCALE);
-    
+
     // extract keypoints from the first image
     SURF surf_extractor(5.0e3);
     vector<KeyPoint> keypoints1;
 
     // printf("Extracting keypoints\n");
     surf_extractor(img1, Mat(), keypoints1);
-    
+
     printf("Extracted %d keypoints from the first image\n", (int)keypoints1.size());
 
     vector<KeyPoint> keypoints2;
index 2861595..31f75f3 100644 (file)
@@ -6,7 +6,7 @@
 using namespace std;
 using namespace cv;
 
-void help()
+static void help()
 {
     cout << "\nThis program demonstrates GrabCut segmentation -- select an object in a region\n"
                "and then grabcut will attempt to segment it out.\n"
@@ -36,7 +36,7 @@ const Scalar GREEN = Scalar(0,255,0);
 const int BGD_KEY = CV_EVENT_FLAG_CTRLKEY;
 const int FGD_KEY = CV_EVENT_FLAG_SHIFTKEY;
 
-void getBinMask( const Mat& comMask, Mat& binMask )
+static void getBinMask( const Mat& comMask, Mat& binMask )
 {
     if( comMask.empty() || comMask.type()!=CV_8UC1 )
         CV_Error( CV_StsBadArg, "comMask is empty or has incorrect type (not CV_8UC1)" );
@@ -268,7 +268,7 @@ int GCApplication::nextIter()
 
 GCApplication gcapp;
 
-void on_mouse( int event, int x, int y, int flags, void* param )
+static void on_mouse( int event, int x, int y, int flags, void* param )
 {
     gcapp.mouseClick( event, x, y, flags, param );
 }
index 287929e..bc53fd1 100644 (file)
@@ -6,7 +6,7 @@
 using namespace cv;
 using namespace std;
 
-void help()
+static void help()
 {
     cout << "\nThis program demonstrates circle finding with the Hough transform.\n"
             "Usage:\n"
@@ -28,10 +28,10 @@ int main(int argc, char** argv)
     Mat cimg;
     medianBlur(img, img, 5);
     cvtColor(img, cimg, CV_GRAY2BGR);
-    
+
     vector<Vec3f> circles;
     HoughCircles(img, circles, CV_HOUGH_GRADIENT, 1, 10,
-                 100, 30, 1, 30 // change the last two parameters 
+                 100, 30, 1, 30 // change the last two parameters
                                 // (min_radius & max_radius) to detect larger circles
                  );
     for( size_t i = 0; i < circles.size(); i++ )
index 3c152d9..b79ea2f 100644 (file)
@@ -6,7 +6,7 @@
 using namespace cv;
 using namespace std;
 
-void help()
+static void help()
 {
     cout << "\nThis program demonstrates line finding with the Hough transform.\n"
             "Usage:\n"
index 7277610..9a189cf 100644 (file)
@@ -32,151 +32,151 @@ bool selectObject = false;
 int trackObject = 0;
 int live = 1;
 
-void drawRectangle(Mat* image, Rect win) {
-       rectangle(*image, Point(win.x, win.y), Point(win.x + win.width, win.y
-                       + win.height), Scalar(0, 255, 0), 2, CV_AA);
+static void drawRectangle(Mat* image, Rect win) {
+    rectangle(*image, Point(win.x, win.y), Point(win.x + win.width, win.y
+            + win.height), Scalar(0, 255, 0), 2, CV_AA);
 }
 
-void onMouse(int event, int x, int y, int, void*) {
-       if (selectObject) {
-               selection.x = MIN(x, origin.x);
-               selection.y = MIN(y, origin.y);
-               selection.width = std::abs(x - origin.x);
-               selection.height = std::abs(y - origin.y);
-               selection &= Rect(0, 0, image.cols, image.rows);
-       }
-
-       switch (event) {
-       case CV_EVENT_LBUTTONDOWN:
-               origin = Point(x, y);
-               selection = Rect(x, y, 0, 0);
-               selectObject = true;
-               break;
-       case CV_EVENT_LBUTTONUP:
-               selectObject = false;
-               trackObject = -1;
-               break;
-       }
+static void onMouse(int event, int x, int y, int, void*) {
+    if (selectObject) {
+        selection.x = MIN(x, origin.x);
+        selection.y = MIN(y, origin.y);
+        selection.width = std::abs(x - origin.x);
+        selection.height = std::abs(y - origin.y);
+        selection &= Rect(0, 0, image.cols, image.rows);
+    }
+
+    switch (event) {
+    case CV_EVENT_LBUTTONDOWN:
+        origin = Point(x, y);
+        selection = Rect(x, y, 0, 0);
+        selectObject = true;
+        break;
+    case CV_EVENT_LBUTTONUP:
+        selectObject = false;
+        trackObject = -1;
+        break;
+    }
 }
 
-void help()
+static void help()
 {
-       printf("Usage: ./hytrack live or ./hytrack <test_file> \n\
+    printf("Usage: ./hytrack live or ./hytrack <test_file> \n\
 For Live View or Benchmarking. Read documentation is source code.\n\n");
 }
 
 
 int main(int argc, char** argv)
 {
-       if(argc != 2) {
-               help();
-               return 1;
-       }
-
-       FILE* f = 0;
-       VideoCapture cap;
-       char test_file[20] = "";
-
-       if (strcmp(argv[1], "live") != 0)
-       {
-               sprintf(test_file, "%s", argv[1]);
-               f = fopen(test_file, "r");
-               char vid[20];
-               int values_read = fscanf(f, "%s\n", vid);
-               CV_Assert(values_read == 1);
-               cout << "Benchmarking against " << vid << endl;
-               live = 0;
-       }
-       else
-       {
-               cap.open(0);
-               if (!cap.isOpened())
-               {
-                       cout << "Failed to open camera" << endl;
-                       return 0;
-               }
-               cout << "Opened camera" << endl;
+    if(argc != 2) {
+        help();
+        return 1;
+    }
+
+    FILE* f = 0;
+    VideoCapture cap;
+    char test_file[20] = "";
+
+    if (strcmp(argv[1], "live") != 0)
+    {
+        sprintf(test_file, "%s", argv[1]);
+        f = fopen(test_file, "r");
+        char vid[20];
+        int values_read = fscanf(f, "%s\n", vid);
+        CV_Assert(values_read == 1);
+        cout << "Benchmarking against " << vid << endl;
+        live = 0;
+    }
+    else
+    {
+        cap.open(0);
+        if (!cap.isOpened())
+        {
+            cout << "Failed to open camera" << endl;
+            return 0;
+        }
+        cout << "Opened camera" << endl;
         cap.set(CV_CAP_PROP_FRAME_WIDTH, 640);
         cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
-               cap >> frame;
-       }
-    
-       HybridTrackerParams params;
-       // motion model params
-       params.motion_model = CvMotionModel::LOW_PASS_FILTER;
-       params.low_pass_gain = 0.1f;
-       // mean shift params
-       params.ms_tracker_weight = 0.8f;
-       params.ms_params.tracking_type = CvMeanShiftTrackerParams::HS;
-       // feature tracking params
-       params.ft_tracker_weight = 0.2f;
-       params.ft_params.feature_type = CvFeatureTrackerParams::OPTICAL_FLOW;
-       params.ft_params.window_size = 0;
-
-       HybridTracker tracker(params);
-       char img_file[20] = "seqG/0001.png";
-       char img_file_num[10];
-       namedWindow("Win", 1);
-
-       setMouseCallback("Win", onMouse, 0);
-
-       int i = 0;
-       float w[4];
-       for(;;)
-       {
-               i++;
-               if (live)
-               {
-                       cap >> frame;
+        cap >> frame;
+    }
+
+    HybridTrackerParams params;
+    // motion model params
+    params.motion_model = CvMotionModel::LOW_PASS_FILTER;
+    params.low_pass_gain = 0.1f;
+    // mean shift params
+    params.ms_tracker_weight = 0.8f;
+    params.ms_params.tracking_type = CvMeanShiftTrackerParams::HS;
+    // feature tracking params
+    params.ft_tracker_weight = 0.2f;
+    params.ft_params.feature_type = CvFeatureTrackerParams::OPTICAL_FLOW;
+    params.ft_params.window_size = 0;
+
+    HybridTracker tracker(params);
+    char img_file[20] = "seqG/0001.png";
+    char img_file_num[10];
+    namedWindow("Win", 1);
+
+    setMouseCallback("Win", onMouse, 0);
+
+    int i = 0;
+    float w[4];
+    for(;;)
+    {
+        i++;
+        if (live)
+        {
+            cap >> frame;
             if( frame.empty() )
                 break;
-                       frame.copyTo(image);
-               }
-               else
-               {
-                       int values_read = fscanf(f, "%d %f %f %f %f\n", &i, &w[0], &w[1], &w[2], &w[3]);
-                       CV_Assert(values_read == 5);
-                       sprintf(img_file, "seqG/%04d.png", i);
-                       image = imread(img_file, CV_LOAD_IMAGE_COLOR);
+            frame.copyTo(image);
+        }
+        else
+        {
+            int values_read = fscanf(f, "%d %f %f %f %f\n", &i, &w[0], &w[1], &w[2], &w[3]);
+            CV_Assert(values_read == 5);
+            sprintf(img_file, "seqG/%04d.png", i);
+            image = imread(img_file, CV_LOAD_IMAGE_COLOR);
             if (image.empty())
-                           break;
-                       selection = Rect(cvRound(w[0]*image.cols), cvRound(w[1]*image.rows),
+                break;
+            selection = Rect(cvRound(w[0]*image.cols), cvRound(w[1]*image.rows),
                              cvRound(w[2]*image.cols), cvRound(w[3]*image.rows));
-               }
-
-               sprintf(img_file_num, "Frame: %d", i);
-               putText(image, img_file_num, Point(10, image.rows-20), FONT_HERSHEY_PLAIN, 0.75, Scalar(255, 255, 255));
-               if (!image.empty())
-               {
-
-                       if (trackObject < 0)
-                       {
-                               tracker.newTracker(image, selection);
-                               trackObject = 1;
-                       }
-
-                       if (trackObject)
-                       {
-                               tracker.updateTracker(image);
-                               drawRectangle(&image, tracker.getTrackingWindow());
-                       }
-
-                       if (selectObject && selection.width > 0 && selection.height > 0)
-                       {
-                               Mat roi(image, selection);
-                               bitwise_not(roi, roi);
-                       }
-
-                       drawRectangle(&image, Rect(cvRound(w[0]*image.cols), cvRound(w[1]*image.rows),
+        }
+
+        sprintf(img_file_num, "Frame: %d", i);
+        putText(image, img_file_num, Point(10, image.rows-20), FONT_HERSHEY_PLAIN, 0.75, Scalar(255, 255, 255));
+        if (!image.empty())
+        {
+
+            if (trackObject < 0)
+            {
+                tracker.newTracker(image, selection);
+                trackObject = 1;
+            }
+
+            if (trackObject)
+            {
+                tracker.updateTracker(image);
+                drawRectangle(&image, tracker.getTrackingWindow());
+            }
+
+            if (selectObject && selection.width > 0 && selection.height > 0)
+            {
+                Mat roi(image, selection);
+                bitwise_not(roi, roi);
+            }
+
+            drawRectangle(&image, Rect(cvRound(w[0]*image.cols), cvRound(w[1]*image.rows),
                                        cvRound(w[2]*image.cols), cvRound(w[3]*image.rows)));
-                       imshow("Win", image);
+            imshow("Win", image);
 
-                       waitKey(100);
-               }
-               else
-                       i = 0;
-       }
+            waitKey(100);
+        }
+        else
+            i = 0;
+    }
 
-       fclose(f);
-       return 0;
+    fclose(f);
+    return 0;
 }
index e4805c4..5647f1c 100644 (file)
@@ -8,14 +8,14 @@ using namespace cv; // all the new API is put into "cv" namespace. Export its co
 using namespace std;
 using namespace cv::flann;
 
-void help()
+static void help()
 {
-       cout <<
-       "\nThis program shows how to use cv::Mat and IplImages converting back and forth.\n"
-       "It shows reading of images, converting to planes and merging back, color conversion\n"
-       "and also iterating through pixels.\n"
-       "Call:\n"
-       "./image [image-name Default: lena.jpg]\n" << endl;
+    cout <<
+    "\nThis program shows how to use cv::Mat and IplImages converting back and forth.\n"
+    "It shows reading of images, converting to planes and merging back, color conversion\n"
+    "and also iterating through pixels.\n"
+    "Call:\n"
+    "./image [image-name Default: lena.jpg]\n" << endl;
 }
 
 // enable/disable use of mixed API in the code below.
@@ -23,7 +23,7 @@ void help()
 
 int main( int argc, char** argv )
 {
-       help();
+    help();
     const char* imagename = argc > 1 ? argv[1] : "lena.jpg";
 #if DEMO_MIXED_API_USE
     Ptr<IplImage> iplimg = cvLoadImage(imagename); // Ptr<T> is safe ref-conting pointer class
@@ -43,16 +43,16 @@ int main( int argc, char** argv )
         return -1;
     }
 #endif
-    
+
     if( !img.data ) // check if the image has been loaded properly
         return -1;
-    
+
     Mat img_yuv;
     cvtColor(img, img_yuv, CV_BGR2YCrCb); // convert image to YUV color space. The output image will be created automatically
-    
+
     vector<Mat> planes; // Vector is template vector class, similar to STL's vector. It can store matrices too.
     split(img_yuv, planes); // split the image into separate color planes
-    
+
 #if 1
     // method 1. process Y plane using an iterator
     MatIterator_<uchar> it = planes[0].begin<uchar>(), it_end = planes[0].end<uchar>();
@@ -61,7 +61,7 @@ int main( int argc, char** argv )
         double v = *it*1.7 + rand()%21-10;
         *it = saturate_cast<uchar>(v*v/255.);
     }
-    
+
     // method 2. process the first chroma plane using pre-stored row pointer.
     // method 3. process the second chroma plane using individual element access
     for( int y = 0; y < img_yuv.rows; y++ )
@@ -74,13 +74,13 @@ int main( int argc, char** argv )
             Vxy = saturate_cast<uchar>((Vxy-128)/2 + 128);
         }
     }
-    
+
 #else
     Mat noise(img.size(), CV_8U); // another Mat constructor; allocates a matrix of the specified size and type
     randn(noise, Scalar::all(128), Scalar::all(20)); // fills the matrix with normally distributed random values;
                                                      // there is also randu() for uniformly distributed random number generation
     GaussianBlur(noise, noise, Size(3, 3), 0.5, 0.5); // blur the noise a bit, kernel size is 3x3 and both sigma's are set to 0.5
-    
+
     const double brightness_gain = 0;
     const double contrast_gain = 1.7;
 #if DEMO_MIXED_API_USE
@@ -98,16 +98,16 @@ int main( int argc, char** argv )
     // alternative form of cv::convertScale if we know the datatype at compile time ("uchar" here).
     // This expression will not create any temporary arrays and should be almost as fast as the above variant
     planes[2] = Mat_<uchar>(planes[2]*color_scale + 128*(1-color_scale));
-    
+
     // Mat::mul replaces cvMul(). Again, no temporary arrays are created in case of simple expressions.
     planes[0] = planes[0].mul(planes[0], 1./255);
 #endif
-    
+
     // now merge the results back
     merge(planes, img_yuv);
     // and produce the output RGB image
     cvtColor(img_yuv, img, CV_YCrCb2BGR);
-    
+
     // this is counterpart for cvNamedWindow
     namedWindow("image with grain", CV_WINDOW_AUTOSIZE);
 #if DEMO_MIXED_API_USE
@@ -118,7 +118,7 @@ int main( int argc, char** argv )
     imshow("image with grain", img);
 #endif
     waitKey();
-    
+
     return 0;
     // all the memory will automatically be released by Vector<>, Mat and Ptr<> destructors.
 }
index 172f7fd..96114a9 100644 (file)
@@ -12,10 +12,10 @@ using std::endl;
 
 using namespace cv;
 
-void help(char** av)
+static void help(char** av)
 {
   cout << "\nThis creates a yaml or xml list of files from the command line args\n"
-                 "usage:\n./" << av[0] << " imagelist.yaml *.png\n"
+      "usage:\n./" << av[0] << " imagelist.yaml *.png\n"
       << "Try using different extensions.(e.g. yaml yml xml xml.gz etc...)\n"
       << "This will serialize this list of images or whatever with opencv's FileStorage framework" << endl;
 }
index 32a2844..223b2de 100644 (file)
@@ -7,13 +7,13 @@
 using namespace cv;
 using namespace std;
 
-void help()
+static void help()
 {
     cout << "\nCool inpainging demo. Inpainting repairs damage to images by floodfilling the damage \n"
-               << "with surrounding image areas.\n"
-               "Using OpenCV version %s\n" << CV_VERSION << "\n"
-       "Usage:\n"
-       "./inpaint [image_name -- Default fruits.jpg]\n" << endl;
+            << "with surrounding image areas.\n"
+            "Using OpenCV version %s\n" << CV_VERSION << "\n"
+    "Usage:\n"
+        "./inpaint [image_name -- Default fruits.jpg]\n" << endl;
 
     cout << "Hot keys: \n"
         "\tESC - quit the program\n"
@@ -25,7 +25,7 @@ void help()
 Mat img, inpaintMask;
 Point prevPt(-1,-1);
 
-void onMouse( int event, int x, int y, int flags, void* )
+static void onMouse( int event, int x, int y, int flags, void* )
 {
     if( event == CV_EVENT_LBUTTONUP || !(flags & CV_EVENT_FLAG_LBUTTON) )
         prevPt = Point(-1,-1);
@@ -55,7 +55,7 @@ int main( int argc, char** argv )
     }
 
     help();
-    
+
     namedWindow( "image", 1 );
 
     img = img0.clone();
index 95bfa2c..9c6a900 100644 (file)
@@ -10,9 +10,9 @@ static inline Point calcPoint(Point2f center, double R, double angle)
     return center + Point2f((float)cos(angle), (float)-sin(angle))*(float)R;
 }
 
-void help()
+static void help()
 {
-       printf( "\nExamle of c calls to OpenCV's Kalman filter.\n"
+    printf( "\nExamle of c calls to OpenCV's Kalman filter.\n"
 "   Tracking of rotating point.\n"
 "   Rotation speed is constant.\n"
 "   Both state and measurements vectors are 1D (a point angle),\n"
@@ -21,10 +21,10 @@ void help()
 "   the real and the measured points are connected with red line segment.\n"
 "   (if Kalman filter works correctly,\n"
 "    the yellow segment should be shorter than the red one).\n"
-                       "\n"
+            "\n"
 "   Pressing any key (except ESC) will reset the tracking with a different speed.\n"
 "   Pressing ESC will stop the program.\n"
-                       );
+            );
 }
 
 int main(int, char**)
index 7e52ca4..97de6a0 100644 (file)
@@ -5,14 +5,14 @@
 using namespace cv;
 using namespace std;
 
-void help()
-{
-       cout << "\nThis program demonstrates kmeans clustering.\n"
-                       "It generates an image with random points, then assigns a random number of cluster\n"
-                       "centers and uses kmeans to move those cluster centers to their representitive location\n"
-                       "Call\n"
-                       "./kmeans\n" << endl;
-}
+// static void help()
+// {
+//     cout << "\nThis program demonstrates kmeans clustering.\n"
+//             "It generates an image with random points, then assigns a random number of cluster\n"
+//             "centers and uses kmeans to move those cluster centers to their representitive location\n"
+//             "Call\n"
+//             "./kmeans\n" << endl;
+// }
 
 int main( int /*argc*/, char** /*argv*/ )
 {
@@ -25,7 +25,7 @@ int main( int /*argc*/, char** /*argv*/ )
         Scalar(255,0,255),
         Scalar(0,255,255)
     };
-        
+
     Mat img(500, 500, CV_8UC3);
     RNG rng(12345);
 
@@ -34,7 +34,7 @@ int main( int /*argc*/, char** /*argv*/ )
         int k, clusterCount = rng.uniform(2, MAX_CLUSTERS+1);
         int i, sampleCount = rng.uniform(1, 1001);
         Mat points(sampleCount, 1, CV_32FC2), labels;
-        
+
         clusterCount = MIN(clusterCount, sampleCount);
         Mat centers(clusterCount, 1, points.type());
 
@@ -52,7 +52,7 @@ int main( int /*argc*/, char** /*argv*/ )
 
         randShuffle(points, 1, &rng);
 
-        kmeans(points, clusterCount, labels, 
+        kmeans(points, clusterCount, labels,
                TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 10, 1.0),
                3, KMEANS_PP_CENTERS, centers);
 
index 9b2f81f..50cb846 100644 (file)
@@ -8,13 +8,13 @@
 using namespace cv;
 using namespace std;
 
-void help()
+static void help()
 {
-       cout <<
-                       "\nThis program demonstrates Laplace point/edge detection using OpenCV function Laplacian()\n"
-                       "It captures from the camera of your choice: 0, 1, ... default 0\n"
-                       "Call:\n"
-                       "./laplace [camera #, default 0]\n" << endl;
+    cout <<
+            "\nThis program demonstrates Laplace point/edge detection using OpenCV function Laplacian()\n"
+            "It captures from the camera of your choice: 0, 1, ... default 0\n"
+            "Call:\n"
+            "./laplace [camera #, default 0]\n" << endl;
 }
 
 int sigma = 3;
@@ -54,7 +54,7 @@ int main( int argc, char** argv )
     createTrackbar( "Sigma", "Laplacian", &sigma, 15, 0 );
 
     Mat smoothed, laplace, result;
-    
+
     for(;;)
     {
         Mat frame;
@@ -69,7 +69,7 @@ int main( int argc, char** argv )
             blur(frame, smoothed, Size(ksize, ksize));
         else
             medianBlur(frame, smoothed, ksize);
-        
+
         Laplacian(smoothed, laplace, CV_16S, 5);
         convertScaleAbs(laplace, result, (sigma+1)*0.25);
         imshow("Laplacian", result);
index 07af7c9..d210512 100644 (file)
@@ -9,8 +9,8 @@
 #include <dirent.h>
 #endif
 
-#ifdef HAVE_CVCONFIG_H 
-#include <cvconfig.h> 
+#ifdef HAVE_CVCONFIG_H
+#include <cvconfig.h>
 #endif
 
 #ifdef HAVE_TBB
@@ -20,7 +20,7 @@
 using namespace std;
 using namespace cv;
 
-void help()
+static void help()
 {
     cout << "This program demonstrated the use of the latentSVM detector." << endl <<
             "It reads in a trained object models and then uses them to detect the objects in an images." << endl <<
@@ -36,7 +36,7 @@ void help()
             endl;
 }
 
-void detectAndDrawObjects( Mat& image, LatentSvmDetector& detector, const vector<Scalar>& colors, float overlapThreshold, int numThreads )
+static void detectAndDrawObjects( Mat& image, LatentSvmDetector& detector, const vector<Scalar>& colors, float overlapThreshold, int numThreads )
 {
     vector<LatentSvmDetector::ObjectDetection> detections;
 
@@ -63,7 +63,7 @@ void detectAndDrawObjects( Mat& image, LatentSvmDetector& detector, const vector
     }
 }
 
-void readDirectory( const string& directoryName, vector<string>& filenames, bool addDirectoryName=true )
+static void readDirectory( const string& directoryName, vector<string>& filenames, bool addDirectoryName=true )
 {
     filenames.clear();
 
@@ -71,8 +71,8 @@ void readDirectory( const string& directoryName, vector<string>& filenames, bool
     struct _finddata_t s_file;
     string str = directoryName + "\\*.*";
 
-       intptr_t h_file = _findfirst( str.c_str(), &s_file );
-       if( h_file != static_cast<intptr_t>(-1.0) )
+    intptr_t h_file = _findfirst( str.c_str(), &s_file );
+    if( h_file != static_cast<intptr_t>(-1.0) )
     {
         do
         {
@@ -104,13 +104,13 @@ void readDirectory( const string& directoryName, vector<string>& filenames, bool
 
 int main(int argc, char* argv[])
 {
-       help();
+    help();
 
     string images_folder, models_folder;
     float overlapThreshold = 0.2f;
     int numThreads = -1;
     if( argc > 2 )
-       {
+    {
         images_folder = argv[1];
         models_folder = argv[2];
         if( argc > 3 ) overlapThreshold = (float)atof(argv[3]);
@@ -121,7 +121,7 @@ int main(int argc, char* argv[])
         }
 
         if( argc > 4 ) numThreads = atoi(argv[4]);
-       }
+    }
 
     vector<string> images_filenames, models_filenames;
     readDirectory( images_folder, images_filenames );
@@ -166,6 +166,6 @@ int main(int argc, char* argv[])
                 exit(0);
         }
     }
-    
-       return 0;
+
+    return 0;
 }
index e4d6666..d006b49 100644 (file)
@@ -7,24 +7,24 @@
 
 */
 
-void help()
+static void help()
 {
-       printf("\nThe sample demonstrates how to train Random Trees classifier\n"
-       "(or Boosting classifier, or MLP, or Knearest, or Nbayes, or Support Vector Machines - see main()) using the provided dataset.\n"
-       "\n"
-       "We use the sample database letter-recognition.data\n"
-       "from UCI Repository, here is the link:\n"
-       "\n"
-       "Newman, D.J. & Hettich, S. & Blake, C.L. & Merz, C.J. (1998).\n"
-       "UCI Repository of machine learning databases\n"
-       "[http://www.ics.uci.edu/~mlearn/MLRepository.html].\n"
-       "Irvine, CA: University of California, Department of Information and Computer Science.\n"
-       "\n"
-       "The dataset consists of 20000 feature vectors along with the\n"
-       "responses - capital latin letters A..Z.\n"
-       "The first 16000 (10000 for boosting)) samples are used for training\n"
-       "and the remaining 4000 (10000 for boosting) - to test the classifier.\n"
-       "======================================================\n");
+    printf("\nThe sample demonstrates how to train Random Trees classifier\n"
+    "(or Boosting classifier, or MLP, or Knearest, or Nbayes, or Support Vector Machines - see main()) using the provided dataset.\n"
+    "\n"
+    "We use the sample database letter-recognition.data\n"
+    "from UCI Repository, here is the link:\n"
+    "\n"
+    "Newman, D.J. & Hettich, S. & Blake, C.L. & Merz, C.J. (1998).\n"
+    "UCI Repository of machine learning databases\n"
+    "[http://www.ics.uci.edu/~mlearn/MLRepository.html].\n"
+    "Irvine, CA: University of California, Department of Information and Computer Science.\n"
+    "\n"
+    "The dataset consists of 20000 feature vectors along with the\n"
+    "responses - capital latin letters A..Z.\n"
+    "The first 16000 (10000 for boosting)) samples are used for training\n"
+    "and the remaining 4000 (10000 for boosting) - to test the classifier.\n"
+    "======================================================\n");
     printf("\nThis is letter recognition sample.\n"
             "The usage: letter_recog [-data <path to letter-recognition.data>] \\\n"
             "  [-save <output XML file for the classifier>] \\\n"
@@ -312,7 +312,7 @@ int build_boost_classifier( char* data_filename,
     }
 
     temp_sample = cvCreateMat( 1, var_count + 1, CV_32F );
-    weak_responses = cvCreateMat( 1, boost.get_weak_predictors()->total, CV_32F ); 
+    weak_responses = cvCreateMat( 1, boost.get_weak_predictors()->total, CV_32F );
 
     // compute prediction error on train and test data
     for( i = 0; i < nsamples_all; i++ )
@@ -548,7 +548,7 @@ int build_knearest_classifier( char* data_filename, int K )
         }
     }
 
-    printf("true_resp = %f%%\tavg accuracy = %f%%\n", (float)true_resp / (nsamples_all - ntrain_samples) * 100, 
+    printf("true_resp = %f%%\tavg accuracy = %f%%\n", (float)true_resp / (nsamples_all - ntrain_samples) * 100,
                                                       (float)accuracy / (nsamples_all - ntrain_samples) / K * 100);
 
     delete[] true_results;
@@ -674,15 +674,15 @@ int build_svm_classifier( char* data_filename )
     for (int j = ntrain_samples; j < nsamples_all; j++)
     {
         float *s = data->data.fl + j * var_count;
-        
+
         for (int i = 0; i < var_count; i++)
-        {   
+        {
             sample.data.fl[(j - ntrain_samples) * var_count + i] = s[i];
         }
         true_results[j - ntrain_samples] = responses->data.fl[j];
     }
     CvMat *result = cvCreateMat(1, nsamples_all - ntrain_samples, CV_32FC1);
-    
+
     printf("Classification (may take a few minutes)...\n");
     svm.predict(&sample, result);
 
@@ -692,9 +692,9 @@ int build_svm_classifier( char* data_filename )
         if (result->data.fl[i] == true_results[i])
             true_resp++;
     }
-    
+
     printf("true_resp = %f%%\n", (float)true_resp / (nsamples_all - ntrain_samples) * 100);
-    
+
     cvReleaseMat( &train_resp );
     cvReleaseMat( &result );
     cvReleaseMat( &data );
@@ -738,17 +738,17 @@ int main( int argc, char *argv[] )
             method = 2;
         }
         else if ( strcmp(argv[i], "-knearest") == 0)
-       {
-           method = 3;
-       }
-       else if ( strcmp(argv[i], "-nbayes") == 0)
-       {
-           method = 4;
-       }
-       else if ( strcmp(argv[i], "-svm") == 0)
-       {
-           method = 5;
-       }
+    {
+        method = 3;
+    }
+    else if ( strcmp(argv[i], "-nbayes") == 0)
+    {
+        method = 4;
+    }
+    else if ( strcmp(argv[i], "-svm") == 0)
+    {
+        method = 5;
+    }
         else
             break;
     }
@@ -768,7 +768,7 @@ int main( int argc, char *argv[] )
         build_svm_classifier( data_filename ):
         -1) < 0)
     {
-       help();
+        help();
     }
     return 0;
 }
index 8677717..8f0c71a 100644 (file)
 // Function prototypes
 void subtractPlane(const cv::Mat& depth, cv::Mat& mask, std::vector<CvPoint>& chain, double f);
 
-std::vector<CvPoint> maskFromTemplate(const std::vector<cv::linemod::Template>& templates, 
+std::vector<CvPoint> maskFromTemplate(const std::vector<cv::linemod::Template>& templates,
                                       int num_modalities, cv::Point offset, cv::Size size,
                                       cv::Mat& mask, cv::Mat& dst);
 
-void templateConvexHull(const std::vector<cv::linemod::Template>& templates, 
+void templateConvexHull(const std::vector<cv::linemod::Template>& templates,
                         int num_modalities, cv::Point offset, cv::Size size,
                         cv::Mat& dst);
 
-void drawResponse(const std::vector<cv::linemod::Template>& templates, 
+void drawResponse(const std::vector<cv::linemod::Template>& templates,
                   int num_modalities, cv::Mat& dst, cv::Point offset, int T);
 
 cv::Mat displayQuantized(const cv::Mat& quantized);
@@ -54,7 +54,7 @@ private:
     m_x = a_x;
     m_y = a_y;
   }
-  
+
   static int m_event;
   static int m_x;
   static int m_y;
@@ -63,7 +63,7 @@ int Mouse::m_event;
 int Mouse::m_x;
 int Mouse::m_y;
 
-void help()
+static void help()
 {
   printf("Usage: openni_demo [templates.yml]\n\n"
          "Place your object on a planar, featureless surface. With the mouse,\n"
@@ -111,7 +111,7 @@ private:
 };
 
 // Functions to store detector and templates in single XML/YAML file
-cv::Ptr<cv::linemod::Detector> readLinemod(const std::string& filename)
+static cv::Ptr<cv::linemod::Detector> readLinemod(const std::string& filename)
 {
   cv::Ptr<cv::linemod::Detector> detector = new cv::linemod::Detector;
   cv::FileStorage fs(filename, cv::FileStorage::READ);
@@ -124,7 +124,7 @@ cv::Ptr<cv::linemod::Detector> readLinemod(const std::string& filename)
   return detector;
 }
 
-void writeLinemod(const cv::Ptr<cv::linemod::Detector>& detector, const std::string& filename)
+static void writeLinemod(const cv::Ptr<cv::linemod::Detector>& detector, const std::string& filename)
 {
   cv::FileStorage fs(filename, cv::FileStorage::WRITE);
   detector->write(fs);
@@ -207,7 +207,7 @@ int main(int argc, char * argv[])
     capture.grab();
     capture.retrieve(depth, CV_CAP_OPENNI_DEPTH_MAP);
     capture.retrieve(color, CV_CAP_OPENNI_BGR_IMAGE);
-    
+
     std::vector<cv::Mat> sources;
     sources.push_back(color);
     sources.push_back(depth);
@@ -235,7 +235,7 @@ int main(int argc, char * argv[])
         subtractPlane(depth, mask, chain, focal_length);
 
         cv::imshow("mask", mask);
-        
+
         // Extract template
         std::string class_id = cv::format("class%d", num_classes);
         cv::Rect bb;
@@ -267,7 +267,7 @@ int main(int argc, char * argv[])
 
     int classes_visited = 0;
     std::set<std::string> visited;
-    
+
     for (int i = 0; (i < (int)matches.size()) && (classes_visited < num_classes); ++i)
     {
       cv::linemod::Match m = matches[i];
@@ -281,7 +281,7 @@ int main(int argc, char * argv[])
           printf("Similarity: %5.1f%%; x: %3d; y: %3d; class: %s; template: %3d\n",
                  m.similarity, m.x, m.y, m.class_id.c_str(), m.template_id);
         }
-        
+
         // Draw matching template
         const std::vector<cv::linemod::Template>& templates = detector->getTemplates(m.class_id, m.template_id);
         drawResponse(templates, num_modalities, display, cv::Point(m.x, m.y), detector->getT(0));
@@ -290,7 +290,7 @@ int main(int argc, char * argv[])
         {
           /// @todo Online learning possibly broken by new gradient feature extraction,
           /// which assumes an accurate object outline.
-          
+
           // Compute masks based on convex hull of matched template
           cv::Mat color_mask, depth_mask;
           std::vector<CvPoint> chain = maskFromTemplate(templates, num_modalities,
@@ -376,11 +376,11 @@ int main(int argc, char * argv[])
   return 0;
 }
 
-void reprojectPoints(const std::vector<cv::Point3d>& proj, std::vector<cv::Point3d>& real, double f)
+static void reprojectPoints(const std::vector<cv::Point3d>& proj, std::vector<cv::Point3d>& real, double f)
 {
   real.resize(proj.size());
   double f_inv = 1.0 / f;
-  
+
   for (int i = 0; i < (int)proj.size(); ++i)
   {
     double Z = proj[i].z;
@@ -390,7 +390,7 @@ void reprojectPoints(const std::vector<cv::Point3d>& proj, std::vector<cv::Point
   }
 }
 
-void filterPlane(IplImage * ap_depth, std::vector<IplImage *> & a_masks, std::vector<CvPoint> & a_chain, double f)
+static void filterPlane(IplImage * ap_depth, std::vector<IplImage *> & a_masks, std::vector<CvPoint> & a_chain, double f)
 {
   const int l_num_cost_pts = 200;
 
@@ -576,7 +576,7 @@ void subtractPlane(const cv::Mat& depth, cv::Mat& mask, std::vector<CvPoint>& ch
   filterPlane(&depth_ipl, tmp, chain, f);
 }
 
-std::vector<CvPoint> maskFromTemplate(const std::vector<cv::linemod::Template>& templates, 
+std::vector<CvPoint> maskFromTemplate(const std::vector<cv::linemod::Template>& templates,
                                       int num_modalities, cv::Point offset, cv::Size size,
                                       cv::Mat& mask, cv::Mat& dst)
 {
@@ -629,7 +629,7 @@ cv::Mat displayQuantized(const cv::Mat& quantized)
   {
     const uchar* quant_r = quantized.ptr(r);
     cv::Vec3b* color_r = color.ptr<cv::Vec3b>(r);
-    
+
     for (int c = 0; c < quantized.cols; ++c)
     {
       cv::Vec3b& bgr = color_r[c];
@@ -649,12 +649,12 @@ cv::Mat displayQuantized(const cv::Mat& quantized)
       }
     }
   }
-  
+
   return color;
 }
 
 // Adapted from cv_line_template::convex_hull
-void templateConvexHull(const std::vector<cv::linemod::Template>& templates, 
+void templateConvexHull(const std::vector<cv::linemod::Template>& templates,
                         int num_modalities, cv::Point offset, cv::Size size,
                         cv::Mat& dst)
 {
@@ -667,7 +667,7 @@ void templateConvexHull(const std::vector<cv::linemod::Template>& templates,
       points.push_back(cv::Point(f.x, f.y) + offset);
     }
   }
-  
+
   std::vector<cv::Point> hull;
   cv::convexHull(points, hull);
 
@@ -677,7 +677,7 @@ void templateConvexHull(const std::vector<cv::linemod::Template>& templates,
   cv::fillPoly(dst, &hull_pts, &hull_count, 1, cv::Scalar(255));
 }
 
-void drawResponse(const std::vector<cv::linemod::Template>& templates, 
+void drawResponse(const std::vector<cv::linemod::Template>& templates,
                   int num_modalities, cv::Mat& dst, cv::Point offset, int T)
 {
   static const cv::Scalar COLORS[5] = { CV_RGB(0, 0, 255),
@@ -692,7 +692,7 @@ void drawResponse(const std::vector<cv::linemod::Template>& templates,
     // box around it and chose the display color based on that response. Here
     // the display color just depends on the modality.
     cv::Scalar color = COLORS[m];
-    
+
     for (int i = 0; i < (int)templates[m].features.size(); ++i)
     {
       cv::linemod::Feature f = templates[m].features[i];
index 4ec6cf8..f476ce8 100644 (file)
@@ -8,12 +8,12 @@
 using namespace cv;
 using namespace std;
 
-void help()
+static void help()
 {
     // print a welcome message, and the OpenCV version
     cout << "\nThis is a demo of Lukas-Kanade optical flow lkdemo(),\n"
-               "Using OpenCV version %s\n" << CV_VERSION << "\n"
-               << endl;
+            "Using OpenCV version %s\n" << CV_VERSION << "\n"
+            << endl;
 
     cout << "\nHot keys: \n"
             "\tESC - quit the program\n"
@@ -26,7 +26,7 @@ void help()
 Point2f pt;
 bool addRemovePt = false;
 
-void onMouse( int event, int x, int y, int /*flags*/, void* /*param*/ )
+static void onMouse( int event, int x, int y, int /*flags*/, void* /*param*/ )
 {
     if( event == CV_EVENT_LBUTTONDOWN )
     {
@@ -40,11 +40,11 @@ int main( int argc, char** argv )
     VideoCapture cap;
     TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03);
     Size subPixWinSize(10,10), winSize(31,31);
-    
+
     const int MAX_COUNT = 500;
     bool needToInit = false;
     bool nightMode = false;
-    
+
     if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
         cap.open(argc == 2 ? argv[1][0] - '0' : 0);
     else if( argc == 2 )
@@ -63,7 +63,7 @@ int main( int argc, char** argv )
 
     Mat gray, prevGray, image;
     vector<Point2f> points[2];
-    
+
     for(;;)
     {
         Mat frame;
@@ -72,7 +72,7 @@ int main( int argc, char** argv )
             break;
 
         frame.copyTo(image);
-        cvtColor(image, gray, CV_BGR2GRAY); 
+        cvtColor(image, gray, CV_BGR2GRAY);
 
         if( nightMode )
             image = Scalar::all(0);
@@ -142,7 +142,7 @@ int main( int argc, char** argv )
         default:
             ;
         }
-        
+
         std::swap(points[1], points[0]);
         swap(prevGray, gray);
     }
index 619959b..bfe99ce 100644 (file)
@@ -13,7 +13,7 @@
 using namespace cv;
 using namespace std;
 
-void help()
+static void help()
 {
     cout << "LogPolar Blind Spot Model sample.\nShortcuts:"
         "\n\tn for nearest pixel technique"
@@ -22,7 +22,7 @@ void help()
         "\n\ta for adjacent receptive fields"
         "\n\tq or ESC quit\n";
 }
-    
+
 int main(int argc, char** argv)
 {
     Mat img = imread(argc > 1 ? argv[1] : "lena.jpg",1); // open the image
@@ -32,12 +32,12 @@ int main(int argc, char** argv)
         return 0;
     }
     help();
-    
+
     Size s=img.size();
     int w=s.width, h=s.height;
     int ro0=3; //radius of the blind spot
-    int R=120;  //number of rings   
-    
+    int R=120;  //number of rings
+
     //Creation of the four different objects that implement the four log-polar transformations
     //Off-line computation
     Point2i center(w/2,h/2);
@@ -60,13 +60,13 @@ int main(int argc, char** argv)
             Retinal=nearest.to_cartesian(Cortical);
         }else if (wk=='b'){
             Cortical=bilin.to_cortical(img);
-            Retinal=bilin.to_cartesian(Cortical);           
+            Retinal=bilin.to_cartesian(Cortical);
         }else if (wk=='o'){
             Cortical=overlap.to_cortical(img);
-            Retinal=overlap.to_cartesian(Cortical);         
+            Retinal=overlap.to_cartesian(Cortical);
         }else if (wk=='a'){
             Cortical=adj.to_cortical(img);
-            Retinal=adj.to_cartesian(Cortical); 
+            Retinal=adj.to_cartesian(Cortical);
         }
 
         imshow("Cartesian", img);
index fb04f7a..42e89fb 100644 (file)
@@ -6,53 +6,53 @@
 
 using namespace cv;
 
-void help()
+static void help()
 {
-       printf("\nThis program demonstrates using features2d detector, descriptor extractor and simple matcher\n"
-                       "Using the SURF desriptor:\n"
-                       "\n"
-                       "Usage:\n matcher_simple <image1> <image2>\n");
+    printf("\nThis program demonstrates using features2d detector, descriptor extractor and simple matcher\n"
+            "Using the SURF desriptor:\n"
+            "\n"
+            "Usage:\n matcher_simple <image1> <image2>\n");
 }
 
 int main(int argc, char** argv)
 {
-       if(argc != 3)
-       {
-               help();
-               return -1;
-       }
-
-       Mat img1 = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
-       Mat img2 = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE);
-       if(img1.empty() || img2.empty())
-       {
-               printf("Can't read one of the images\n");
-               return -1;
-       }
-
-       // detecting keypoints
-       SurfFeatureDetector detector(400);
-       vector<KeyPoint> keypoints1, keypoints2;
-       detector.detect(img1, keypoints1);
-       detector.detect(img2, keypoints2);
-
-       // computing descriptors
-       SurfDescriptorExtractor extractor;
-       Mat descriptors1, descriptors2;
-       extractor.compute(img1, keypoints1, descriptors1);
-       extractor.compute(img2, keypoints2, descriptors2);
-
-       // matching descriptors
-       BFMatcher matcher(NORM_L2);
+    if(argc != 3)
+    {
+        help();
+        return -1;
+    }
+
+    Mat img1 = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
+    Mat img2 = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE);
+    if(img1.empty() || img2.empty())
+    {
+        printf("Can't read one of the images\n");
+        return -1;
+    }
+
+    // detecting keypoints
+    SurfFeatureDetector detector(400);
+    vector<KeyPoint> keypoints1, keypoints2;
+    detector.detect(img1, keypoints1);
+    detector.detect(img2, keypoints2);
+
+    // computing descriptors
+    SurfDescriptorExtractor extractor;
+    Mat descriptors1, descriptors2;
+    extractor.compute(img1, keypoints1, descriptors1);
+    extractor.compute(img2, keypoints2, descriptors2);
+
+    // matching descriptors
+    BFMatcher matcher(NORM_L2);
     vector<DMatch> matches;
     matcher.match(descriptors1, descriptors2, matches);
 
-       // drawing the results
-       namedWindow("matches", 1);
-       Mat img_matches;
-       drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches);
-       imshow("matches", img_matches);
-       waitKey(0);
+    // drawing the results
+    namedWindow("matches", 1);
+    Mat img_matches;
+    drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches);
+    imshow("matches", img_matches);
+    waitKey(0);
 
-       return 0;
+    return 0;
 }
index ce77637..ed98d59 100644 (file)
@@ -15,7 +15,7 @@ const string defaultQueryImageName = "../../opencv/samples/cpp/matching_to_many_
 const string defaultFileWithTrainImages = "../../opencv/samples/cpp/matching_to_many_images/train/trainImages.txt";
 const string defaultDirToSaveResImages = "../../opencv/samples/cpp/matching_to_many_images/results";
 
-void printPrompt( const string& applName )
+static void printPrompt( const string& applName )
 {
     cout << "/*\n"
          << " * This is a sample on matching descriptors detected on one image to descriptors detected in image set.\n"
@@ -36,7 +36,7 @@ void printPrompt( const string& applName )
          << defaultQueryImageName << " " << defaultFileWithTrainImages << " " << defaultDirToSaveResImages << endl;
 }
 
-void maskMatchesByTrainImgIdx( const vector<DMatch>& matches, int trainImgIdx, vector<char>& mask )
+static void maskMatchesByTrainImgIdx( const vector<DMatch>& matches, int trainImgIdx, vector<char>& mask )
 {
     mask.resize( matches.size() );
     fill( mask.begin(), mask.end(), 0 );
@@ -47,7 +47,7 @@ void maskMatchesByTrainImgIdx( const vector<DMatch>& matches, int trainImgIdx, v
     }
 }
 
-void readTrainFilenames( const string& filename, string& dirName, vector<string>& trainFilenames )
+static void readTrainFilenames( const string& filename, string& dirName, vector<string>& trainFilenames )
 {
     trainFilenames.clear();
 
@@ -73,7 +73,7 @@ void readTrainFilenames( const string& filename, string& dirName, vector<string>
     file.close();
 }
 
-bool createDetectorDescriptorMatcher( const string& detectorType, const string& descriptorType, const string& matcherType,
+static bool createDetectorDescriptorMatcher( const string& detectorType, const string& descriptorType, const string& matcherType,
                                       Ptr<FeatureDetector>& featureDetector,
                                       Ptr<DescriptorExtractor>& descriptorExtractor,
                                       Ptr<DescriptorMatcher>& descriptorMatcher )
@@ -91,7 +91,7 @@ bool createDetectorDescriptorMatcher( const string& detectorType, const string&
     return isCreated;
 }
 
-bool readImages( const string& queryImageName, const string& trainFilename,
+static bool readImages( const string& queryImageName, const string& trainFilename,
                  Mat& queryImage, vector <Mat>& trainImages, vector<string>& trainImageNames )
 {
     cout << "< Reading the images..." << endl;
@@ -131,7 +131,7 @@ bool readImages( const string& queryImageName, const string& trainFilename,
     return true;
 }
 
-void detectKeypoints( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
+static void detectKeypoints( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
                       const vector<Mat>& trainImages, vector<vector<KeyPoint> >& trainKeypoints,
                       Ptr<FeatureDetector>& featureDetector )
 {
@@ -141,14 +141,14 @@ void detectKeypoints( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
     cout << ">" << endl;
 }
 
-void computeDescriptors( const Mat& queryImage, vector<KeyPoint>& queryKeypoints, Mat& queryDescriptors,
+static void computeDescriptors( const Mat& queryImage, vector<KeyPoint>& queryKeypoints, Mat& queryDescriptors,
                          const vector<Mat>& trainImages, vector<vector<KeyPoint> >& trainKeypoints, vector<Mat>& trainDescriptors,
                          Ptr<DescriptorExtractor>& descriptorExtractor )
 {
     cout << "< Computing descriptors for keypoints..." << endl;
     descriptorExtractor->compute( queryImage, queryKeypoints, queryDescriptors );
     descriptorExtractor->compute( trainImages, trainKeypoints, trainDescriptors );
-    
+
     int totalTrainDesc = 0;
     for( vector<Mat>::const_iterator tdIter = trainDescriptors.begin(); tdIter != trainDescriptors.end(); tdIter++ )
         totalTrainDesc += tdIter->rows;
@@ -157,7 +157,7 @@ void computeDescriptors( const Mat& queryImage, vector<KeyPoint>& queryKeypoints
     cout << ">" << endl;
 }
 
-void matchDescriptors( const Mat& queryDescriptors, const vector<Mat>& trainDescriptors,
+static void matchDescriptors( const Mat& queryDescriptors, const vector<Mat>& trainDescriptors,
                        vector<DMatch>& matches, Ptr<DescriptorMatcher>& descriptorMatcher )
 {
     cout << "< Set train descriptors collection in the matcher and match query descriptors to them..." << endl;
@@ -175,13 +175,13 @@ void matchDescriptors( const Mat& queryDescriptors, const vector<Mat>& trainDesc
     double matchTime = tm.getTimeMilli();
 
     CV_Assert( queryDescriptors.rows == (int)matches.size() || matches.empty() );
-    
+
     cout << "Number of matches: " << matches.size() << endl;
     cout << "Build time: " << buildTime << " ms; Match time: " << matchTime << " ms" << endl;
     cout << ">" << endl;
 }
 
-void saveResultImages( const Mat& queryImage, const vector<KeyPoint>& queryKeypoints,
+static void saveResultImages( const Mat& queryImage, const vector<KeyPoint>& queryKeypoints,
                        const vector<Mat>& trainImages, const vector<vector<KeyPoint> >& trainKeypoints,
                        const vector<DMatch>& matches, const vector<string>& trainImagesNames, const string& resultDir )
 {
index bb4ec6c..ce34f6e 100644 (file)
@@ -7,9 +7,9 @@
 using namespace cv;
 using namespace std;
 
-void help(char** argv)
+static void help(char** argv)
 {
-       cout << "\nDemonstrate mean-shift based color segmentation in spatial pyramid.\n"
+    cout << "\nDemonstrate mean-shift based color segmentation in spatial pyramid.\n"
     << "Call:\n   " << argv[0] << " image\n"
     << "This program allows you to set the spatial and color radius\n"
     << "of the mean shift window as well as the number of pyramid reduction levels explored\n"
@@ -17,7 +17,7 @@ void help(char** argv)
 }
 
 //This colors the segmentations
-void floodFillPostprocess( Mat& img, const Scalar& colorDiff=Scalar::all(1) )
+static void floodFillPostprocess( Mat& img, const Scalar& colorDiff=Scalar::all(1) )
 {
     CV_Assert( !img.empty() );
     RNG rng = theRNG();
@@ -39,7 +39,7 @@ string winName = "meanshift";
 int spatialRad, colorRad, maxPyrLevel;
 Mat img, res;
 
-void meanShiftSegmentation( int, void* )
+static void meanShiftSegmentation( int, void* )
 {
     cout << "spatialRad=" << spatialRad << "; "
          << "colorRad=" << colorRad << "; "
@@ -53,7 +53,7 @@ int main(int argc, char** argv)
 {
     if( argc !=2 )
     {
-       help(argv);
+        help(argv);
         return -1;
     }
 
index e7eaf72..6056c39 100644 (file)
@@ -6,9 +6,9 @@
 using namespace cv;
 using namespace std;
 
-void help()
+static void help()
 {
-       cout << "This program demonstrates finding the minimum enclosing box or circle of a set\n"
+    cout << "This program demonstrates finding the minimum enclosing box or circle of a set\n"
             "of points using functions: minAreaRect() minEnclosingCircle().\n"
             "Random points are generated and then enclosed.\n"
             "Call:\n"
@@ -21,7 +21,7 @@ int main( int /*argc*/, char** /*argv*/ )
     help();
 
     Mat img(500, 500, CV_8UC3);
-    RNG& rng = theRNG();    
+    RNG& rng = theRNG();
 
     for(;;)
     {
@@ -32,25 +32,25 @@ int main( int /*argc*/, char** /*argv*/ )
             Point pt;
             pt.x = rng.uniform(img.cols/4, img.cols*3/4);
             pt.y = rng.uniform(img.rows/4, img.rows*3/4);
-            
+
             points.push_back(pt);
         }
-        
+
         RotatedRect box = minAreaRect(Mat(points));
 
         Point2f center, vtx[4];
         float radius = 0;
         minEnclosingCircle(Mat(points), center, radius);
         box.points(vtx);
-        
+
         img = Scalar::all(0);
         for( i = 0; i < count; i++ )
             circle( img, points[i], 3, Scalar(0, 0, 255), CV_FILLED, CV_AA );
 
         for( i = 0; i < 4; i++ )
             line(img, vtx[i], vtx[(i+1)%4], Scalar(0, 255, 0), 1, CV_AA);
-        
-        circle(img, center, cvRound(radius), Scalar(0, 255, 255), 1, CV_AA); 
+
+        circle(img, center, cvRound(radius), Scalar(0, 255, 255), 1, CV_AA);
 
         imshow( "rect & circle", img );
 
index 069584a..d4ab351 100644 (file)
@@ -7,12 +7,12 @@
 
 using namespace cv;
 
-void help()
+static void help()
 {
 
 printf("\nShow off image morphology: erosion, dialation, open and close\n"
-       "Call:\n   morphology2 [image]\n"
-       "This program also shows use of rect, elipse and cross kernels\n\n");
+    "Call:\n   morphology2 [image]\n"
+    "This program also shows use of rect, elipse and cross kernels\n\n");
 printf( "Hot keys: \n"
     "\tESC - quit the program\n"
     "\tr - use rectangle structuring element\n"
@@ -31,7 +31,7 @@ int open_close_pos = 0;
 int erode_dilate_pos = 0;
 
 // callback function for open/close trackbar
-void OpenClose(int, void*)
+static void OpenClose(int, void*)
 {
     int n = open_close_pos - max_iters;
     int an = n > 0 ? n : -n;
@@ -44,7 +44,7 @@ void OpenClose(int, void*)
 }
 
 // callback function for erode/dilate trackbar
-void ErodeDilate(int, void*)
+static void ErodeDilate(int, void*)
 {
     int n = erode_dilate_pos - max_iters;
     int an = n > 0 ? n : -n;
index ece866b..c5a6830 100644 (file)
@@ -10,7 +10,7 @@
 using namespace std;
 using namespace cv;
 
-void help()
+static void help()
 {
     cout << "\nThis program demonstrates the multi cascade recognizer. It is a generalization of facedetect sample.\n\n"
             "Usage: ./multicascadeclassifier \n"
index 366ca13..58f17a5 100644 (file)
@@ -6,7 +6,7 @@
 using namespace cv;
 using namespace std;
 
-void help()
+static void help()
 {
         cout << "\nThis program demonstrates usage of depth sensors (Kinect, XtionPRO,...).\n"
                         "The user gets some of the supported output images.\n"
@@ -23,7 +23,7 @@ void help()
          << endl;
 }
 
-void colorizeDisparity( const Mat& gray, Mat& rgb, double maxDisp=-1.f, float S=1.f, float V=1.f )
+static void colorizeDisparity( const Mat& gray, Mat& rgb, double maxDisp=-1.f, float S=1.f, float V=1.f )
 {
     CV_Assert( !gray.empty() );
     CV_Assert( gray.type() == CV_8UC1 );
@@ -53,30 +53,30 @@ void colorizeDisparity( const Mat& gray, Mat& rgb, double maxDisp=-1.f, float S=
             float t = V * (1 - (1 - f) * S);
 
             Point3f res;
-            
-            if( hi == 0 ) //R = V,     G = t,  B = p
+
+            if( hi == 0 ) //R = V,  G = t,  B = p
                 res = Point3f( p, t, V );
-            if( hi == 1 ) // R = q,    G = V,  B = p
+            if( hi == 1 ) // R = q, G = V,  B = p
                 res = Point3f( p, V, q );
-            if( hi == 2 ) // R = p,    G = V,  B = t
+            if( hi == 2 ) // R = p, G = V,  B = t
                 res = Point3f( t, V, p );
-            if( hi == 3 ) // R = p,    G = q,  B = V
+            if( hi == 3 ) // R = p, G = q,  B = V
                 res = Point3f( V, q, p );
-            if( hi == 4 ) // R = t,    G = p,  B = V
+            if( hi == 4 ) // R = t, G = p,  B = V
                 res = Point3f( V, p, t );
-            if( hi == 5 ) // R = V,    G = p,  B = q
+            if( hi == 5 ) // R = V, G = p,  B = q
                 res = Point3f( q, p, V );
 
             uchar b = (uchar)(std::max(0.f, std::min (res.x, 1.f)) * 255.f);
             uchar g = (uchar)(std::max(0.f, std::min (res.y, 1.f)) * 255.f);
             uchar r = (uchar)(std::max(0.f, std::min (res.z, 1.f)) * 255.f);
 
-            rgb.at<Point3_<uchar> >(y,x) = Point3_<uchar>(b, g, r);     
+            rgb.at<Point3_<uchar> >(y,x) = Point3_<uchar>(b, g, r);
         }
     }
 }
 
-float getMaxDisparity( VideoCapture& capture )
+static float getMaxDisparity( VideoCapture& capture )
 {
     const int minDistance = 400; // mm
     float b = (float)capture.get( CV_CAP_OPENNI_DEPTH_GENERATOR_BASELINE ); // mm
@@ -84,7 +84,7 @@ float getMaxDisparity( VideoCapture& capture )
     return b * F / minDistance;
 }
 
-void printCommandLineParams()
+static void printCommandLineParams()
 {
     cout << "-cd       Colorized disparity? (0 or 1; 1 by default) Ignored if disparity map is not selected to show." << endl;
     cout << "-fmd      Fixed max disparity? (0 or 1; 0 by default) Ignored if disparity map is not colorized (-cd 0)." << endl;
@@ -96,7 +96,7 @@ void printCommandLineParams()
     cout << "-r        Filename of .oni video file. The data will grabbed from it." << endl ;
 }
 
-void parseCommandLine( int argc, char* argv[], bool& isColorizeDisp, bool& isFixedMaxDisp, int& imageMode, bool retrievedImageFlags[],
+static void parseCommandLine( int argc, char* argv[], bool& isColorizeDisp, bool& isFixedMaxDisp, int& imageMode, bool retrievedImageFlags[],
                        string& filename, bool& isFileReading )
 {
     // set defaut values
index 32d6e12..e15b683 100644 (file)
@@ -9,14 +9,14 @@
 using namespace cv;
 using namespace std;
 
-void help()
-{
-       printf(
-                       "\nDemonstrate the use of the HoG descriptor using\n"
-                       "  HOGDescriptor::hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());\n"
-                       "Usage:\n"
-                       "./peopledetect (<image_filename> | <image_list>.txt)\n\n");
-}
+// static void help()
+// {
+//     printf(
+//             "\nDemonstrate the use of the HoG descriptor using\n"
+//             "  HOGDescriptor::hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());\n"
+//             "Usage:\n"
+//             "./peopledetect (<image_filename> | <image_list>.txt)\n\n");
+// }
 
 int main(int argc, char** argv)
 {
@@ -33,16 +33,16 @@ int main(int argc, char** argv)
 
     if( img.data )
     {
-           strcpy(_filename, argv[1]);
+        strcpy(_filename, argv[1]);
     }
     else
     {
         f = fopen(argv[1], "rt");
         if(!f)
         {
-                   fprintf( stderr, "ERROR: the specified file could not be loaded\n");
-                   return -1;
-           }
+            fprintf( stderr, "ERROR: the specified file could not be loaded\n");
+            return -1;
+        }
     }
 
     HOGDescriptor hog;
@@ -51,58 +51,58 @@ int main(int argc, char** argv)
 
     for(;;)
     {
-           char* filename = _filename;
-           if(f)
-           {
-                   if(!fgets(filename, (int)sizeof(_filename)-2, f))
-                           break;
-                   //while(*filename && isspace(*filename))
-                   //  ++filename;
-                   if(filename[0] == '#')
-                           continue;
-                   int l = (int)strlen(filename);
-                   while(l > 0 && isspace(filename[l-1]))
-                           --l;
-                   filename[l] = '\0';
-                   img = imread(filename);
-           }
-           printf("%s:\n", filename);
-           if(!img.data)
-                   continue;
-               
-           fflush(stdout);
-           vector<Rect> found, found_filtered;
-           double t = (double)getTickCount();
-           // run the detector with default parameters. to get a higher hit-rate
-           // (and more false alarms, respectively), decrease the hitThreshold and
-           // groupThreshold (set groupThreshold to 0 to turn off the grouping completely).
-           hog.detectMultiScale(img, found, 0, Size(8,8), Size(32,32), 1.05, 2);
-           t = (double)getTickCount() - t;
-           printf("tdetection time = %gms\n", t*1000./cv::getTickFrequency());
-           size_t i, j;
-           for( i = 0; i < found.size(); i++ )
-           {
-                   Rect r = found[i];
-                   for( j = 0; j < found.size(); j++ )
-                           if( j != i && (r & found[j]) == r)
-                                   break;
-                   if( j == found.size() )
-                           found_filtered.push_back(r);
-           }
-           for( i = 0; i < found_filtered.size(); i++ )
-           {
-                   Rect r = found_filtered[i];
-                   // the HOG detector returns slightly larger rectangles than the real objects.
-                   // so we slightly shrink the rectangles to get a nicer output.
-                   r.x += cvRound(r.width*0.1);
-                   r.width = cvRound(r.width*0.8);
-                   r.y += cvRound(r.height*0.07);
-                   r.height = cvRound(r.height*0.8);
-                   rectangle(img, r.tl(), r.br(), cv::Scalar(0,255,0), 3);
-           }
-           imshow("people detector", img);
-           int c = waitKey(0) & 255;
-           if( c == 'q' || c == 'Q' || !f)
+        char* filename = _filename;
+        if(f)
+        {
+            if(!fgets(filename, (int)sizeof(_filename)-2, f))
+                break;
+            //while(*filename && isspace(*filename))
+            //  ++filename;
+            if(filename[0] == '#')
+                continue;
+            int l = (int)strlen(filename);
+            while(l > 0 && isspace(filename[l-1]))
+                --l;
+            filename[l] = '\0';
+            img = imread(filename);
+        }
+        printf("%s:\n", filename);
+        if(!img.data)
+            continue;
+
+        fflush(stdout);
+        vector<Rect> found, found_filtered;
+        double t = (double)getTickCount();
+        // run the detector with default parameters. to get a higher hit-rate
+        // (and more false alarms, respectively), decrease the hitThreshold and
+        // groupThreshold (set groupThreshold to 0 to turn off the grouping completely).
+        hog.detectMultiScale(img, found, 0, Size(8,8), Size(32,32), 1.05, 2);
+        t = (double)getTickCount() - t;
+        printf("tdetection time = %gms\n", t*1000./cv::getTickFrequency());
+        size_t i, j;
+        for( i = 0; i < found.size(); i++ )
+        {
+            Rect r = found[i];
+            for( j = 0; j < found.size(); j++ )
+                if( j != i && (r & found[j]) == r)
+                    break;
+            if( j == found.size() )
+                found_filtered.push_back(r);
+        }
+        for( i = 0; i < found_filtered.size(); i++ )
+        {
+            Rect r = found_filtered[i];
+            // the HOG detector returns slightly larger rectangles than the real objects.
+            // so we slightly shrink the rectangles to get a nicer output.
+            r.x += cvRound(r.width*0.1);
+            r.width = cvRound(r.width*0.8);
+            r.y += cvRound(r.height*0.07);
+            r.height = cvRound(r.height*0.8);
+            rectangle(img, r.tl(), r.br(), cv::Scalar(0,255,0), 3);
+        }
+        imshow("people detector", img);
+        int c = waitKey(0) & 255;
+        if( c == 'q' || c == 'Q' || !f)
             break;
     }
     if(f)
index 5cfcf79..a9dda17 100644 (file)
@@ -43,7 +43,7 @@ private:
 \r
 bool stop = false;\r
 \r
-void mouseCallback(int event, int x, int y, int flags, void* userdata)\r
+static void mouseCallback(int event, int x, int y, int flags, void* userdata)\r
 {\r
     if (stop)\r
         return;\r
@@ -52,7 +52,7 @@ void mouseCallback(int event, int x, int y, int flags, void* userdata)
     renderer->onMouseEvent(event, x, y, flags);\r
 }\r
 \r
-void openGlDrawCallback(void* userdata)\r
+static void openGlDrawCallback(void* userdata)\r
 {\r
     if (stop)\r
         return;\r
@@ -280,7 +280,7 @@ void PointCloudRenderer::onMouseEvent(int event, int x, int y, int /*flags*/)
     mouse_dy_ = clamp(mouse_dy_, -mouseClamp, mouseClamp);\r
 }\r
 \r
-Point3d rotate(Point3d v, double yaw, double pitch)\r
+static Point3d rotate(Point3d v, double yaw, double pitch)\r
 {\r
     Point3d t1;\r
     t1.x = v.x * cos(-yaw / 180.0 * CV_PI) - v.z * sin(-yaw / 180.0 * CV_PI);\r
index 2567ba8..8bc35e5 100644 (file)
@@ -29,7 +29,7 @@ vector<Scalar> classColors;
 #define _ANN_ 0 // artificial neural networks
 #define _EM_  0 // expectation-maximization
 
-void on_mouse( int event, int x, int y, int /*flags*/, void* )
+static void on_mouse( int event, int x, int y, int /*flags*/, void* )
 {
     if( img.empty() )
         return;
@@ -87,7 +87,7 @@ void on_mouse( int event, int x, int y, int /*flags*/, void* )
    }
 }
 
-void prepare_train_data( Mat& samples, Mat& classes )
+static void prepare_train_data( Mat& samples, Mat& classes )
 {
     Mat( trainedPoints ).copyTo( samples );
     Mat( trainedPointsMarkers ).copyTo( classes );
@@ -98,7 +98,7 @@ void prepare_train_data( Mat& samples, Mat& classes )
 }
 
 #if _NBC_
-void find_decision_boundary_NBC()
+static void find_decision_boundary_NBC()
 {
     img.copyTo( imgDst );
 
@@ -125,7 +125,7 @@ void find_decision_boundary_NBC()
 
 
 #if _KNN_
-void find_decision_boundary_KNN( int K )
+static void find_decision_boundary_KNN( int K )
 {
     img.copyTo( imgDst );
 
@@ -151,7 +151,7 @@ void find_decision_boundary_KNN( int K )
 #endif
 
 #if _SVM_
-void find_decision_boundary_SVM( CvSVMParams params )
+static void find_decision_boundary_SVM( CvSVMParams params )
 {
     img.copyTo( imgDst );
 
@@ -185,7 +185,7 @@ void find_decision_boundary_SVM( CvSVMParams params )
 #endif
 
 #if _DT_
-void find_decision_boundary_DT()
+static void find_decision_boundary_DT()
 {
     img.copyTo( imgDst );
 
index c9fd972..7b8d81a 100644 (file)
 
 #include "opencv2/opencv.hpp"
 
-void help(std::string errorMessage)
+static void help(std::string errorMessage)
 {
-       std::cout<<"Program init error : "<<errorMessage<<std::endl;
-       std::cout<<"\nProgram call procedure : retinaDemo [processing mode] [Optional : media target] [Optional LAST parameter: \"log\" to activate retina log sampling]"<<std::endl;
-       std::cout<<"\t[processing mode] :"<<std::endl;
-       std::cout<<"\t -image : for still image processing"<<std::endl;
-       std::cout<<"\t -video : for video stream processing"<<std::endl;
-       std::cout<<"\t[Optional : media target] :"<<std::endl;
-       std::cout<<"\t if processing an image or video file, then, specify the path and filename of the target to process"<<std::endl;
-       std::cout<<"\t leave empty if processing video stream coming from a connected video device"<<std::endl;
-       std::cout<<"\t[Optional : activate retina log sampling] : an optional last parameter can be specified for retina spatial log sampling"<<std::endl;
-       std::cout<<"\t set \"log\" without quotes to activate this sampling, output frame size will be divided by 4"<<std::endl;
-       std::cout<<"\nExamples:"<<std::endl;
-       std::cout<<"\t-Image processing : ./retinaDemo -image lena.jpg"<<std::endl;
-       std::cout<<"\t-Image processing with log sampling : ./retinaDemo -image lena.jpg log"<<std::endl;
-       std::cout<<"\t-Video processing : ./retinaDemo -video myMovie.mp4"<<std::endl;
-       std::cout<<"\t-Live video processing : ./retinaDemo -video"<<std::endl;
-       std::cout<<"\nPlease start again with new parameters"<<std::endl;
+    std::cout<<"Program init error : "<<errorMessage<<std::endl;
+    std::cout<<"\nProgram call procedure : retinaDemo [processing mode] [Optional : media target] [Optional LAST parameter: \"log\" to activate retina log sampling]"<<std::endl;
+    std::cout<<"\t[processing mode] :"<<std::endl;
+    std::cout<<"\t -image : for still image processing"<<std::endl;
+    std::cout<<"\t -video : for video stream processing"<<std::endl;
+    std::cout<<"\t[Optional : media target] :"<<std::endl;
+    std::cout<<"\t if processing an image or video file, then, specify the path and filename of the target to process"<<std::endl;
+    std::cout<<"\t leave empty if processing video stream coming from a connected video device"<<std::endl;
+    std::cout<<"\t[Optional : activate retina log sampling] : an optional last parameter can be specified for retina spatial log sampling"<<std::endl;
+    std::cout<<"\t set \"log\" without quotes to activate this sampling, output frame size will be divided by 4"<<std::endl;
+    std::cout<<"\nExamples:"<<std::endl;
+    std::cout<<"\t-Image processing : ./retinaDemo -image lena.jpg"<<std::endl;
+    std::cout<<"\t-Image processing with log sampling : ./retinaDemo -image lena.jpg log"<<std::endl;
+    std::cout<<"\t-Video processing : ./retinaDemo -video myMovie.mp4"<<std::endl;
+    std::cout<<"\t-Live video processing : ./retinaDemo -video"<<std::endl;
+    std::cout<<"\nPlease start again with new parameters"<<std::endl;
 }
 
 int main(int argc, char* argv[]) {
-       // welcome message
-       std::cout<<"****************************************************"<<std::endl;
-       std::cout<<"* Retina demonstration : demonstrates the use of is a wrapper class of the Gipsa/Listic Labs retina model."<<std::endl;
-       std::cout<<"* This retina model allows spatio-temporal image processing (applied on still images, video sequences)."<<std::endl;
-       std::cout<<"* As a summary, these are the retina model properties:"<<std::endl;
-       std::cout<<"* => It applies a spectral whithening (mid-frequency details enhancement)"<<std::endl;
-       std::cout<<"* => high frequency spatio-temporal noise reduction"<<std::endl;
-       std::cout<<"* => low frequency luminance to be reduced (luminance range compression)"<<std::endl;
-       std::cout<<"* => local logarithmic luminance compression allows details to be enhanced in low light conditions\n"<<std::endl;
-       std::cout<<"* for more information, reer to the following papers :"<<std::endl;
-       std::cout<<"* Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011"<<std::endl;
-       std::cout<<"* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891."<<std::endl;
-       std::cout<<"* => reports comments/remarks at benoit.alexandre.vision@gmail.com"<<std::endl;
-       std::cout<<"* => more informations and papers at : http://sites.google.com/site/benoitalexandrevision/"<<std::endl;
-       std::cout<<"****************************************************"<<std::endl;
-       std::cout<<" NOTE : this program generates the default retina parameters file 'RetinaDefaultParameters.xml'"<<std::endl;
-       std::cout<<" => you can use this to fine tune parameters and load them if you save to file 'RetinaSpecificParameters.xml'"<<std::endl;
-
-       // basic input arguments checking
-       if (argc<2)
-       {
-               help("bad number of parameter");
-               return -1;
-       }
-
-       bool useLogSampling = !strcmp(argv[argc-1], "log"); // check if user wants retina log sampling processing
-
-       std::string inputMediaType=argv[1];
-
-       // declare the retina input buffer... that will be fed differently in regard of the input media
-       cv::Mat inputFrame;
-       cv::VideoCapture videoCapture; // in case a video media is used, its manager is declared here
-
-       //////////////////////////////////////////////////////////////////////////////
-       // checking input media type (still image, video file, live video acquisition)
-       if (!strcmp(inputMediaType.c_str(), "-image") && argc >= 3)
-       {
-               std::cout<<"RetinaDemo: processing image "<<argv[2]<<std::endl;
-               // image processing case
-               inputFrame = cv::imread(std::string(argv[2]), 1); // load image in RGB mode
-       }else
-               if (!strcmp(inputMediaType.c_str(), "-video"))
-               {
-                       if (argc == 2 || (argc == 3 && useLogSampling)) // attempt to grab images from a video capture device
-                       {
-                               videoCapture.open(0);
-                       }else// attempt to grab images from a video filestream
-                       {
-                               std::cout<<"RetinaDemo: processing video stream "<<argv[2]<<std::endl;
-                               videoCapture.open(argv[2]);
-                       }
-
-                       // grab a first frame to check if everything is ok
-                       videoCapture>>inputFrame;
-               }else
-               {
-                       // bad command parameter
-                       help("bad command parameter");
-                       return -1;
-               }
-
-       if (inputFrame.empty())
-       {
-               help("Input media could not be loaded, aborting");
-               return -1;
-       }
-
-
-       //////////////////////////////////////////////////////////////////////////////
-       // Program start in a try/catch safety context (Retina may throw errors)
-       try
-       {
-               // create a retina instance with default parameters setup, uncomment the initialisation you wanna test
-               cv::Ptr<cv::Retina> myRetina;
-
-               // if the last parameter is 'log', then activate log sampling (favour foveal vision and subsamples peripheral vision)
-               if (useLogSampling)
+    // welcome message
+    std::cout<<"****************************************************"<<std::endl;
+    std::cout<<"* Retina demonstration : demonstrates the use of is a wrapper class of the Gipsa/Listic Labs retina model."<<std::endl;
+    std::cout<<"* This retina model allows spatio-temporal image processing (applied on still images, video sequences)."<<std::endl;
+    std::cout<<"* As a summary, these are the retina model properties:"<<std::endl;
+    std::cout<<"* => It applies a spectral whithening (mid-frequency details enhancement)"<<std::endl;
+    std::cout<<"* => high frequency spatio-temporal noise reduction"<<std::endl;
+    std::cout<<"* => low frequency luminance to be reduced (luminance range compression)"<<std::endl;
+    std::cout<<"* => local logarithmic luminance compression allows details to be enhanced in low light conditions\n"<<std::endl;
+    std::cout<<"* for more information, reer to the following papers :"<<std::endl;
+    std::cout<<"* Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011"<<std::endl;
+    std::cout<<"* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891."<<std::endl;
+    std::cout<<"* => reports comments/remarks at benoit.alexandre.vision@gmail.com"<<std::endl;
+    std::cout<<"* => more informations and papers at : http://sites.google.com/site/benoitalexandrevision/"<<std::endl;
+    std::cout<<"****************************************************"<<std::endl;
+    std::cout<<" NOTE : this program generates the default retina parameters file 'RetinaDefaultParameters.xml'"<<std::endl;
+    std::cout<<" => you can use this to fine tune parameters and load them if you save to file 'RetinaSpecificParameters.xml'"<<std::endl;
+
+    // basic input arguments checking
+    if (argc<2)
+    {
+        help("bad number of parameter");
+        return -1;
+    }
+
+    bool useLogSampling = !strcmp(argv[argc-1], "log"); // check if user wants retina log sampling processing
+
+    std::string inputMediaType=argv[1];
+
+    // declare the retina input buffer... that will be fed differently in regard of the input media
+    cv::Mat inputFrame;
+    cv::VideoCapture videoCapture; // in case a video media is used, its manager is declared here
+
+    //////////////////////////////////////////////////////////////////////////////
+    // checking input media type (still image, video file, live video acquisition)
+    if (!strcmp(inputMediaType.c_str(), "-image") && argc >= 3)
+    {
+        std::cout<<"RetinaDemo: processing image "<<argv[2]<<std::endl;
+        // image processing case
+        inputFrame = cv::imread(std::string(argv[2]), 1); // load image in RGB mode
+    }else
+        if (!strcmp(inputMediaType.c_str(), "-video"))
+        {
+            if (argc == 2 || (argc == 3 && useLogSampling)) // attempt to grab images from a video capture device
+            {
+                videoCapture.open(0);
+            }else// attempt to grab images from a video filestream
+            {
+                std::cout<<"RetinaDemo: processing video stream "<<argv[2]<<std::endl;
+                videoCapture.open(argv[2]);
+            }
+
+            // grab a first frame to check if everything is ok
+            videoCapture>>inputFrame;
+        }else
+        {
+            // bad command parameter
+            help("bad command parameter");
+            return -1;
+        }
+
+    if (inputFrame.empty())
+    {
+        help("Input media could not be loaded, aborting");
+        return -1;
+    }
+
+
+    //////////////////////////////////////////////////////////////////////////////
+    // Program start in a try/catch safety context (Retina may throw errors)
+    try
+    {
+        // create a retina instance with default parameters setup, uncomment the initialisation you wanna test
+        cv::Ptr<cv::Retina> myRetina;
+
+        // if the last parameter is 'log', then activate log sampling (favour foveal vision and subsamples peripheral vision)
+        if (useLogSampling)
                 {
                         myRetina = new cv::Retina(inputFrame.size(), true, cv::RETINA_COLOR_BAYER, true, 2.0, 10.0);
                 }
-               else// -> else allocate "classical" retina :
-                       myRetina = new cv::Retina(inputFrame.size());
-               
-               // save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"
-               myRetina->write("RetinaDefaultParameters.xml");
-
-               // load parameters if file exists
-               myRetina->setup("RetinaSpecificParameters.xml");
-               myRetina->clearBuffers();
-
-               // declare retina output buffers
-               cv::Mat retinaOutput_parvo;
-               cv::Mat retinaOutput_magno;
-
-               // processing loop with stop condition
-               bool continueProcessing=true; // FIXME : not yet managed during process...
-               while(continueProcessing)
-               {
-                       // if using video stream, then, grabbing a new frame, else, input remains the same
-                       if (videoCapture.isOpened())
-                               videoCapture>>inputFrame;
-
-                       // run retina filter
-                       myRetina->run(inputFrame);
-                       // Retrieve and display retina output
-                       myRetina->getParvo(retinaOutput_parvo);
-                       myRetina->getMagno(retinaOutput_magno);
-                       cv::imshow("retina input", inputFrame);
-                       cv::imshow("Retina Parvo", retinaOutput_parvo);
-                       cv::imshow("Retina Magno", retinaOutput_magno);
-                       cv::waitKey(10);
-               }
-       }catch(cv::Exception e)
-       {
-               std::cerr<<"Error using Retina : "<<e.what()<<std::endl;
-       }
-
-       // Program end message
-       std::cout<<"Retina demo end"<<std::endl;
-
-       return 0;
+        else// -> else allocate "classical" retina :
+            myRetina = new cv::Retina(inputFrame.size());
+
+        // save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"
+        myRetina->write("RetinaDefaultParameters.xml");
+
+        // load parameters if file exists
+        myRetina->setup("RetinaSpecificParameters.xml");
+        myRetina->clearBuffers();
+
+        // declare retina output buffers
+        cv::Mat retinaOutput_parvo;
+        cv::Mat retinaOutput_magno;
+
+        // processing loop with stop condition
+        bool continueProcessing=true; // FIXME : not yet managed during process...
+        while(continueProcessing)
+        {
+            // if using video stream, then, grabbing a new frame, else, input remains the same
+            if (videoCapture.isOpened())
+                videoCapture>>inputFrame;
+
+            // run retina filter
+            myRetina->run(inputFrame);
+            // Retrieve and display retina output
+            myRetina->getParvo(retinaOutput_parvo);
+            myRetina->getMagno(retinaOutput_magno);
+            cv::imshow("retina input", inputFrame);
+            cv::imshow("Retina Parvo", retinaOutput_parvo);
+            cv::imshow("Retina Magno", retinaOutput_magno);
+            cv::waitKey(10);
+        }
+    }catch(cv::Exception e)
+    {
+        std::cerr<<"Error using Retina : "<<e.what()<<std::endl;
+    }
+
+    // Program end message
+    std::cout<<"Retina demo end"<<std::endl;
+
+    return 0;
 }
 
index 33c1eb7..8195df6 100644 (file)
@@ -6,41 +6,41 @@
 
 using namespace cv;
 
-void help()
+static void help()
 {
-       printf("\n"
-                       "This program demonstrated a simple method of connected components clean up of background subtraction\n"
-                       "When the program starts, it begins learning the background.\n"
-                       "You can toggle background learning on and off by hitting the space bar.\n"
-                       "Call\n"
-                       "./segment_objects [video file, else it reads camera 0]\n\n");
+    printf("\n"
+            "This program demonstrated a simple method of connected components clean up of background subtraction\n"
+            "When the program starts, it begins learning the background.\n"
+            "You can toggle background learning on and off by hitting the space bar.\n"
+            "Call\n"
+            "./segment_objects [video file, else it reads camera 0]\n\n");
 }
 
-void refineSegments(const Mat& img, Mat& mask, Mat& dst)
+static void refineSegments(const Mat& img, Mat& mask, Mat& dst)
 {
     int niters = 3;
-    
+
     vector<vector<Point> > contours;
     vector<Vec4i> hierarchy;
-    
+
     Mat temp;
-    
+
     dilate(mask, temp, Mat(), Point(-1,-1), niters);
     erode(temp, temp, Mat(), Point(-1,-1), niters*2);
     dilate(temp, temp, Mat(), Point(-1,-1), niters);
-    
+
     findContours( temp, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
-       
-       dst = Mat::zeros(img.size(), CV_8UC3);
-    
+
+    dst = Mat::zeros(img.size(), CV_8UC3);
+
     if( contours.size() == 0 )
         return;
-        
+
     // iterate through all the top-level contours,
     // draw each connected component with its own random color
     int idx = 0, largestComp = 0;
     double maxArea = 0;
-    
+
     for( ; idx >= 0; idx = hierarchy[idx][0] )
     {
         const vector<Point>& c = contours[idx];
@@ -60,35 +60,35 @@ int main(int argc, char** argv)
 {
     VideoCapture cap;
     bool update_bg_model = true;
-    
+
     help();
 
     if( argc < 2 )
         cap.open(0);
     else
         cap.open(std::string(argv[1]));
-    
+
     if( !cap.isOpened() )
     {
         printf("\nCan not open camera or video file\n");
         return -1;
     }
-    
+
     Mat tmp_frame, bgmask, out_frame;
-    
+
     cap >> tmp_frame;
     if(!tmp_frame.data)
     {
         printf("can not read data from the video source\n");
         return -1;
     }
-    
+
     namedWindow("video", 1);
     namedWindow("segmented", 1);
-    
+
     BackgroundSubtractorMOG bgsubtractor;
     bgsubtractor.set("noiseSigma", 10);
-    
+
     for(;;)
     {
         cap >> tmp_frame;
@@ -109,6 +109,6 @@ int main(int argc, char** argv)
             printf("Learn background is in state = %d\n",update_bg_model);
         }
     }
-    
+
     return 0;
 }
index 987290d..fa1df68 100644 (file)
@@ -29,11 +29,11 @@ const char* helphelp =
 "select3dobj -w <board_width> -h <board_height> [-s <square_size>]\n"
 "           -i <camera_intrinsics_filename> -o <output_prefix> [video_filename/cameraId]\n"
 "\n"
-" -w <board_width>                     Number of chessboard corners wide\n"
-" -h <board_height>                    Number of chessboard corners width\n"
-" [-s <square_size>]                   Optional measure of chessboard squares in meters\n"
+" -w <board_width>          Number of chessboard corners wide\n"
+" -h <board_height>         Number of chessboard corners width\n"
+" [-s <square_size>]            Optional measure of chessboard squares in meters\n"
 " -i <camera_intrinsics_filename> Camera matrix .yml file from calibration.cpp\n"
-" -o <output_prefix>           Prefix the output segmentation images with this\n"
+" -o <output_prefix>        Prefix the output segmentation images with this\n"
 " [video_filename/cameraId]  If present, read from that video file or that ID\n"
 "\n"
 "Using a camera's intrinsics (from calibrating a camera -- see calibration.cpp) and an\n"
@@ -57,10 +57,10 @@ const char* helphelp =
 "    q     - Exit the program\n"
 "\n\n";
 
-void help()
-{
-       puts(helphelp);
-}
+// static void help()
+// {
+//     puts(helphelp);
+// }
 
 
 struct MouseEvent
@@ -88,19 +88,19 @@ static bool readCameraMatrix(const string& filename,
     fs["image_height"] >> calibratedImageSize.height;
     fs["distortion_coefficients"] >> distCoeffs;
     fs["camera_matrix"] >> cameraMatrix;
-    
+
     if( distCoeffs.type() != CV_64F )
         distCoeffs = Mat_<double>(distCoeffs);
     if( cameraMatrix.type() != CV_64F )
         cameraMatrix = Mat_<double>(cameraMatrix);
-    
+
     return true;
 }
 
 static void calcChessboardCorners(Size boardSize, float squareSize, vector<Point3f>& corners)
 {
     corners.resize(0);
-    
+
     for( int i = 0; i < boardSize.height; i++ )
         for( int j = 0; j < boardSize.width; j++ )
             corners.push_back(Point3f(float(j*squareSize),
@@ -119,7 +119,7 @@ static Point3f image2plane(Point2f imgpt, const Mat& R, const Mat& tvec,
 }
 
 
-static Rect extract3DBox(const Mat& frame, Mat& shownFrame, Mat& selectedObjFrame, 
+static Rect extract3DBox(const Mat& frame, Mat& shownFrame, Mat& selectedObjFrame,
                          const Mat& cameraMatrix, const Mat& rvec, const Mat& tvec,
                          const vector<Point3f>& box, int nobjpt, bool runExtraSegmentation)
 {
@@ -128,7 +128,7 @@ static Rect extract3DBox(const Mat& frame, Mat& shownFrame, Mat& selectedObjFram
         return Rect();
     vector<Point3f> objpt;
     vector<Point2f> imgpt;
-    
+
     objpt.push_back(box[0]);
     if( nobjpt > 1 )
         objpt.push_back(box[1]);
@@ -140,9 +140,9 @@ static Rect extract3DBox(const Mat& frame, Mat& shownFrame, Mat& selectedObjFram
     if( nobjpt > 3 )
         for( int i = 0; i < 4; i++ )
             objpt.push_back(Point3f(objpt[i].x, objpt[i].y, box[3].z));
-    
+
     projectPoints(Mat(objpt), rvec, tvec, cameraMatrix, Mat(), imgpt);
-    
+
     if( shownFrame.data )
     {
         if( nobjpt == 1 )
@@ -158,7 +158,7 @@ static Rect extract3DBox(const Mat& frame, Mat& shownFrame, Mat& selectedObjFram
             {
                 circle(shownFrame, imgpt[i], 3, Scalar(0,255,0), -1, CV_AA);
                 line(shownFrame, imgpt[i], imgpt[(i+1)%4], Scalar(0,255,0), 3, CV_AA);
-            }    
+            }
         else
             for( int i = 0; i < 8; i++ )
             {
@@ -167,7 +167,7 @@ static Rect extract3DBox(const Mat& frame, Mat& shownFrame, Mat& selectedObjFram
                 line(shownFrame, imgpt[i], imgpt[i%4], Scalar(0,255,0), 3, CV_AA);
             }
     }
-    
+
     if( nobjpt <= 2 )
         return Rect();
     vector<Point> hull;
@@ -175,7 +175,7 @@ static Rect extract3DBox(const Mat& frame, Mat& shownFrame, Mat& selectedObjFram
     Mat selectedObjMask = Mat::zeros(frame.size(), CV_8U);
     fillConvexPoly(selectedObjMask, &hull[0], (int)hull.size(), Scalar::all(255), 8, 0);
     Rect roi = boundingRect(Mat(hull)) & Rect(Point(), frame.size());
-    
+
     if( runExtraSegmentation )
     {
         selectedObjMask = Scalar::all(GC_BGD);
@@ -185,7 +185,7 @@ static Rect extract3DBox(const Mat& frame, Mat& shownFrame, Mat& selectedObjFram
                 3, GC_INIT_WITH_RECT + GC_INIT_WITH_MASK);
         bitwise_and(selectedObjMask, Scalar::all(1), selectedObjMask);
     }
-    
+
     frame.copyTo(selectedObjFrame, selectedObjMask);
     return roi;
 }
@@ -197,7 +197,7 @@ static int select3DBox(const string& windowname, const string& selWinName, const
 {
     const float eps = 1e-3f;
     MouseEvent mouse;
-    
+
     setMouseCallback(windowname, onMouse, &mouse);
     vector<Point3f> tempobj(8);
     vector<Point2f> imgpt(4), tempimg(8);
@@ -206,19 +206,19 @@ static int select3DBox(const string& windowname, const string& selWinName, const
     Mat R, selectedObjMask, selectedObjFrame, shownFrame;
     Rodrigues(rvec, R);
     box.resize(4);
-    
+
     for(;;)
     {
         float Z = 0.f;
         bool dragging = (mouse.buttonState & CV_EVENT_FLAG_LBUTTON) != 0;
         int npt = nobjpt;
-        
+
         if( (mouse.event == CV_EVENT_LBUTTONDOWN ||
              mouse.event == CV_EVENT_LBUTTONUP ||
              dragging) && nobjpt < 4 )
         {
             Point2f m = mouse.pt;
-            
+
             if( nobjpt < 2 )
                 imgpt[npt] = m;
             else
@@ -232,7 +232,7 @@ static int select3DBox(const string& windowname, const string& selWinName, const
                         if( norm(m - imgpt[i]) < norm(m - imgpt[nearestIdx]) )
                             nearestIdx = i;
                 }
-                
+
                 if( npt == 2 )
                 {
                     float dx = box[1].x - box[0].x, dy = box[1].y - box[0].y;
@@ -242,9 +242,9 @@ static int select3DBox(const string& windowname, const string& selWinName, const
                 }
                 else
                     tempobj[0] = Point3f(box[nearestIdx].x, box[nearestIdx].y, 1.f);
-                
+
                 projectPoints(Mat(tempobj), rvec, tvec, cameraMatrix, Mat(), tempimg);
-                
+
                 Point2f a = imgpt[nearestIdx], b = tempimg[0], d1 = b - a, d2 = m - a;
                 float n1 = (float)norm(d1), n2 = (float)norm(d2);
                 if( n1*n2 < eps )
@@ -256,7 +256,7 @@ static int select3DBox(const string& windowname, const string& selWinName, const
                 }
             }
             box[npt] = image2plane(imgpt[npt], R, tvec, cameraMatrix, npt<3 ? 0 : Z);
-            
+
             if( (npt == 0 && mouse.event == CV_EVENT_LBUTTONDOWN) ||
                (npt > 0 && norm(box[npt] - box[npt-1]) > eps &&
                 mouse.event == CV_EVENT_LBUTTONUP) )
@@ -268,19 +268,19 @@ static int select3DBox(const string& windowname, const string& selWinName, const
                     box[nobjpt] = box[nobjpt-1];
                 }
             }
-            
+
             // reset the event
             mouse.event = -1;
             //mouse.buttonState = 0;
             npt++;
         }
-        
+
         frame.copyTo(shownFrame);
         extract3DBox(frame, shownFrame, selectedObjFrame,
                      cameraMatrix, rvec, tvec, box, npt, false);
         imshow(windowname, shownFrame);
         imshow(selWinName, selectedObjFrame);
-        
+
         int c = waitKey(30);
         if( (c & 255) == 27 )
         {
@@ -305,17 +305,17 @@ static bool readModelViews( const string& filename, vector<Point3f>& box,
     roiList.resize(0);
     poseList.resize(0);
     box.resize(0);
-    
+
     FileStorage fs(filename, FileStorage::READ);
     if( !fs.isOpened() )
         return false;
     fs["box"] >> box;
-    
+
     FileNode all = fs["views"];
     if( all.type() != FileNode::SEQ )
         return false;
     FileNodeIterator it = all.begin(), it_end = all.end();
-    
+
     for(; it != it_end; ++it)
     {
         FileNode n = *it;
@@ -326,7 +326,7 @@ static bool readModelViews( const string& filename, vector<Point3f>& box,
         poseList.push_back(Vec6f((float)np[0], (float)np[1], (float)np[2],
                                  (float)np[3], (float)np[4], (float)np[5]));
     }
-    
+
     return true;
 }
 
@@ -339,25 +339,25 @@ static bool writeModelViews(const string& filename, const vector<Point3f>& box,
     FileStorage fs(filename, FileStorage::WRITE);
     if( !fs.isOpened() )
         return false;
-    
+
     fs << "box" << "[:";
     fs << box << "]" << "views" << "[";
-    
+
     size_t i, nviews = imagelist.size();
-    
+
     CV_Assert( nviews == roiList.size() && nviews == poseList.size() );
-    
+
     for( i = 0; i < nviews; i++ )
     {
         Rect r = roiList[i];
         Vec6f p = poseList[i];
-        
+
         fs << "{" << "image" << imagelist[i] <<
             "roi" << "[:" << r.x << r.y << r.width << r.height << "]" <<
             "pose" << "[:" << p[0] << p[1] << p[2] << p[3] << p[4] << p[5] << "]" << "}";
     }
     fs << "]";
-    
+
     return true;
 }
 
@@ -389,82 +389,82 @@ int main(int argc, char** argv)
     "\tSPACE - Skip the frame; move to the next frame (not in video mode)\n"
     "\tENTER - Confirm the selection. Grab next object in video mode.\n"
     "\tq - Exit the program\n";
-    
+
     if(argc < 5)
     {
-       puts(helphelp);
+        puts(helphelp);
         puts(help);
         return 0;
     }
     const char* intrinsicsFilename = 0;
     const char* outprefix = 0;
-       const char* inputName = 0;
-       int cameraId = 0;
-       Size boardSize;
-       double squareSize = 1;
+    const char* inputName = 0;
+    int cameraId = 0;
+    Size boardSize;
+    double squareSize = 1;
     vector<string> imageList;
-    
+
     for( int i = 1; i < argc; i++ )
     {
         if( strcmp(argv[i], "-i") == 0 )
-                       intrinsicsFilename = argv[++i];
-               else if( strcmp(argv[i], "-o") == 0 )
-                       outprefix = argv[++i];
-               else if( strcmp(argv[i], "-w") == 0 )
-               {
-                       if(sscanf(argv[++i], "%d", &boardSize.width) != 1 || boardSize.width <= 0)
-                       {
-                               printf("Incorrect -w parameter (must be a positive integer)\n");
-                               puts(help);
-                               return 0;
-                       }
-               }
-               else if( strcmp(argv[i], "-h") == 0 )
-               {
-                       if(sscanf(argv[++i], "%d", &boardSize.height) != 1 || boardSize.height <= 0)
-                       {
-                               printf("Incorrect -h parameter (must be a positive integer)\n");
-                               puts(help);
-                               return 0;
-                       }
-               }
-               else if( strcmp(argv[i], "-s") == 0 )
-               {
-                       if(sscanf(argv[++i], "%lf", &squareSize) != 1 || squareSize <= 0)
-                       {
-                               printf("Incorrect -w parameter (must be a positive real number)\n");
-                               puts(help);
-                               return 0;
-                       }
-               }
-               else if( argv[i][0] != '-' )
-               {
-                       if( isdigit(argv[i][0]))
-                               sscanf(argv[i], "%d", &cameraId);
-                       else
-                               inputName = argv[i];
-               }
-               else
-               {
-                       printf("Incorrect option\n");
-                       puts(help);
-                       return 0;
-               }
+            intrinsicsFilename = argv[++i];
+        else if( strcmp(argv[i], "-o") == 0 )
+            outprefix = argv[++i];
+        else if( strcmp(argv[i], "-w") == 0 )
+        {
+            if(sscanf(argv[++i], "%d", &boardSize.width) != 1 || boardSize.width <= 0)
+            {
+                printf("Incorrect -w parameter (must be a positive integer)\n");
+                puts(help);
+                return 0;
+            }
+        }
+        else if( strcmp(argv[i], "-h") == 0 )
+        {
+            if(sscanf(argv[++i], "%d", &boardSize.height) != 1 || boardSize.height <= 0)
+            {
+                printf("Incorrect -h parameter (must be a positive integer)\n");
+                puts(help);
+                return 0;
+            }
+        }
+        else if( strcmp(argv[i], "-s") == 0 )
+        {
+            if(sscanf(argv[++i], "%lf", &squareSize) != 1 || squareSize <= 0)
+            {
+                printf("Incorrect -w parameter (must be a positive real number)\n");
+                puts(help);
+                return 0;
+            }
+        }
+        else if( argv[i][0] != '-' )
+        {
+            if( isdigit(argv[i][0]))
+                sscanf(argv[i], "%d", &cameraId);
+            else
+                inputName = argv[i];
+        }
+        else
+        {
+            printf("Incorrect option\n");
+            puts(help);
+            return 0;
+        }
+    }
+
+    if( !intrinsicsFilename || !outprefix ||
+        boardSize.width <= 0 || boardSize.height <= 0 )
+    {
+        printf("Some of the required parameters are missing\n");
+        puts(help);
+        return 0;
     }
-    
-       if( !intrinsicsFilename || !outprefix ||
-               boardSize.width <= 0 || boardSize.height <= 0 )
-       {
-               printf("Some of the required parameters are missing\n");
-               puts(help);
-               return 0;
-       }
-       
+
     Mat cameraMatrix, distCoeffs;
     Size calibratedImageSize;
     readCameraMatrix(intrinsicsFilename, cameraMatrix, distCoeffs, calibratedImageSize );
-    
-       VideoCapture capture;
+
+    VideoCapture capture;
     if( inputName )
     {
         if( !readStringList(inputName, imageList) &&
@@ -476,10 +476,10 @@ int main(int argc, char** argv)
     }
     else
         capture.open(cameraId);
-    
+
     if( !capture.isOpened() && imageList.empty() )
         return fprintf( stderr, "Could not initialize video capture\n" ), -2;
-    
+
     const char* outbarename = 0;
     {
         outbarename = strrchr(outprefix, '/');
@@ -498,30 +498,30 @@ int main(int argc, char** argv)
         else
             outbarename = outprefix;
     }
-       
-       Mat frame, shownFrame, selectedObjFrame, mapxy;
-    
-       namedWindow("View", 1);
+
+    Mat frame, shownFrame, selectedObjFrame, mapxy;
+
+    namedWindow("View", 1);
     namedWindow("Selected Object", 1);
     setMouseCallback("View", onMouse, 0);
     bool boardFound = false;
-    
+
     string indexFilename = format("%s_index.yml", outprefix);
-    
+
     vector<string> capturedImgList;
     vector<Rect> roiList;
     vector<Vec6f> poseList;
     vector<Point3f> box, boardPoints;
-    
+
     readModelViews(indexFilename, box, capturedImgList, roiList, poseList);
     calcChessboardCorners(boardSize, (float)squareSize, boardPoints);
     int frameIdx = 0;
     bool grabNext = !imageList.empty();
-       
+
     puts(screen_help);
 
-       for(int i = 0;;i++)
-       {
+    for(int i = 0;;i++)
+    {
         Mat frame0;
         if( !imageList.empty() )
         {
@@ -538,7 +538,7 @@ int main(int argc, char** argv)
             {
                 double sx = (double)frame0.cols/calibratedImageSize.width;
                 double sy = (double)frame0.rows/calibratedImageSize.height;
-                
+
                 // adjust the camera matrix for the new resolution
                 cameraMatrix.at<double>(0,0) *= sx;
                 cameraMatrix.at<double>(0,2) *= sx;
@@ -554,17 +554,17 @@ int main(int argc, char** argv)
         remap(frame0, frame, mapxy, Mat(), INTER_LINEAR);
         vector<Point2f> foundBoardCorners;
         boardFound = findChessboardCorners(frame, boardSize, foundBoardCorners);
-        
+
         Mat rvec, tvec;
         if( boardFound )
             solvePnP(Mat(boardPoints), Mat(foundBoardCorners), cameraMatrix,
                      distCoeffs, rvec, tvec, false);
-        
+
         frame.copyTo(shownFrame);
         drawChessboardCorners(shownFrame, boardSize, Mat(foundBoardCorners), boardFound);
         selectedObjFrame = Mat::zeros(frame.size(), frame.type());
-               
-               if( boardFound && grabNext )
+
+        if( boardFound && grabNext )
         {
             if( box.empty() )
             {
@@ -573,7 +573,7 @@ int main(int argc, char** argv)
                 if( code == -100 )
                     break;
             }
-        
+
             if( !box.empty() )
             {
                 Rect r = extract3DBox(frame, shownFrame, selectedObjFrame,
@@ -596,10 +596,10 @@ int main(int argc, char** argv)
                         break;
                     }
                     imwrite(path, selectedObjFrame(r));
-                    
+
                     capturedImgList.push_back(string(path));
                     roiList.push_back(r);
-                    
+
                     float p[6];
                     Mat RV(3, 1, CV_32F, p), TV(3, 1, CV_32F, p+3);
                     rvec.convertTo(RV, RV.type());
@@ -612,12 +612,12 @@ int main(int argc, char** argv)
 
         imshow("View", shownFrame);
         imshow("Selected Object", selectedObjFrame);
-               int c = waitKey(imageList.empty() && !box.empty() ? 30 : 300);
+        int c = waitKey(imageList.empty() && !box.empty() ? 30 : 300);
         if( c == 'q' || c == 'Q' )
             break;
         if( c == '\r' || c == '\n' )
             grabNext = true;
-       }
+    }
 
     writeModelViews(indexFilename, box, capturedImgList, roiList, poseList);
     return 0;
index df16e01..13a9946 100644 (file)
 using namespace cv;
 using namespace std;
 
-void help()
+static void help()
 {
-       cout <<
-       "\nA program using pyramid scaling, Canny, contours, contour simpification and\n"
-       "memory storage (it's got it all folks) to find\n"
-       "squares in a list of images pic1-6.png\n"
-       "Returns sequence of squares detected on the image.\n"
-       "the sequence is stored in the specified memory storage\n"
-       "Call:\n"
-       "./squares\n"
+    cout <<
+    "\nA program using pyramid scaling, Canny, contours, contour simpification and\n"
+    "memory storage (it's got it all folks) to find\n"
+    "squares in a list of images pic1-6.png\n"
+    "Returns sequence of squares detected on the image.\n"
+    "the sequence is stored in the specified memory storage\n"
+    "Call:\n"
+    "./squares\n"
     "Using OpenCV version %s\n" << CV_VERSION << "\n" << endl;
 }
 
@@ -33,7 +33,7 @@ const char* wndname = "Square Detection Demo";
 // helper function:
 // finds a cosine of angle between vectors
 // from pt0->pt1 and from pt0->pt2
-double angle( Point pt1, Point pt2, Point pt0 )
+static double angle( Point pt1, Point pt2, Point pt0 )
 {
     double dx1 = pt1.x - pt0.x;
     double dy1 = pt1.y - pt0.y;
@@ -44,23 +44,23 @@ double angle( Point pt1, Point pt2, Point pt0 )
 
 // returns sequence of squares detected on the image.
 // the sequence is stored in the specified memory storage
-void findSquares( const Mat& image, vector<vector<Point> >& squares )
+static void findSquares( const Mat& image, vector<vector<Point> >& squares )
 {
     squares.clear();
-    
+
     Mat pyr, timg, gray0(image.size(), CV_8U), gray;
-    
+
     // down-scale and upscale the image to filter out the noise
     pyrDown(image, pyr, Size(image.cols/2, image.rows/2));
     pyrUp(pyr, timg, image.size());
     vector<vector<Point> > contours;
-    
+
     // find squares in every color plane of the image
     for( int c = 0; c < 3; c++ )
     {
         int ch[] = {c, 0};
         mixChannels(&timg, 1, &gray0, 1, ch, 1);
-        
+
         // try several threshold levels
         for( int l = 0; l < N; l++ )
         {
@@ -86,14 +86,14 @@ void findSquares( const Mat& image, vector<vector<Point> >& squares )
             findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
 
             vector<Point> approx;
-            
+
             // test each contour
             for( size_t i = 0; i < contours.size(); i++ )
             {
                 // approximate contour with accuracy proportional
                 // to the contour perimeter
                 approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
-                
+
                 // square contours should have 4 vertices after approximation
                 // relatively large area (to filter out noisy contours)
                 // and be convex.
@@ -126,7 +126,7 @@ void findSquares( const Mat& image, vector<vector<Point> >& squares )
 
 
 // the function draws all the squares in the image
-void drawSquares( Mat& image, const vector<vector<Point> >& squares )
+static void drawSquares( Mat& image, const vector<vector<Point> >& squares )
 {
     for( size_t i = 0; i < squares.size(); i++ )
     {
@@ -146,7 +146,7 @@ int main(int /*argc*/, char** /*argv*/)
     help();
     namedWindow( wndname, 1 );
     vector<vector<Point> > squares;
-    
+
     for( int i = 0; names[i] != 0; i++ )
     {
         Mat image = imread(names[i], 1);
@@ -155,7 +155,7 @@ int main(int /*argc*/, char** /*argv*/)
             cout << "Couldn't load " << names[i] << endl;
             continue;
         }
-        
+
         findSquares(image, squares);
         drawSquares(image, squares);
 
index 2c1c91f..a6d276f 100644 (file)
@@ -8,11 +8,11 @@
    Learning OpenCV: Computer Vision with the OpenCV Library
      by Gary Bradski and Adrian Kaehler
      Published by O'Reilly Media, October 3, 2008
-   AVAILABLE AT: 
+
+   AVAILABLE AT:
      http://www.amazon.com/Learning-OpenCV-Computer-Vision-Library/dp/0596516134
      Or: http://oreilly.com/catalog/9780596516130/
-     ISBN-10: 0596516134 or: ISBN-13: 978-0596516130    
+     ISBN-10: 0596516134 or: ISBN-13: 978-0596516130
 
    OTHER OPENCV SITES:
    * The source code is on sourceforge at:
 using namespace cv;
 using namespace std;
 
-int print_help()
+static int print_help()
 {
-       cout <<
-                       " Given a list of chessboard images, the number of corners (nx, ny)\n"
-                       " on the chessboards, and a flag: useCalibrated for \n"
-                       "   calibrated (0) or\n"
-                       "   uncalibrated \n"
-                       "     (1: use cvStereoCalibrate(), 2: compute fundamental\n"
-                       "         matrix separately) stereo. \n"
-                       " Calibrate the cameras and display the\n"
-                       " rectified results along with the computed disparity images.   \n" << endl;
+    cout <<
+            " Given a list of chessboard images, the number of corners (nx, ny)\n"
+            " on the chessboards, and a flag: useCalibrated for \n"
+            "   calibrated (0) or\n"
+            "   uncalibrated \n"
+            "     (1: use cvStereoCalibrate(), 2: compute fundamental\n"
+            "         matrix separately) stereo. \n"
+            " Calibrate the cameras and display the\n"
+            " rectified results along with the computed disparity images.   \n" << endl;
     cout << "Usage:\n ./stereo_calib -w board_width -h board_height [-nr /*dot not view results*/] <image list XML/YML file>\n" << endl;
     return 0;
 }
@@ -65,22 +65,22 @@ StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=
         cout << "Error: the image list contains odd (non-even) number of elements\n";
         return;
     }
-    
+
     bool displayCorners = false;//true;
     const int maxScale = 2;
     const float squareSize = 1.f;  // Set this to your actual square size
     // ARRAY AND VECTOR STORAGE:
-    
+
     vector<vector<Point2f> > imagePoints[2];
     vector<vector<Point3f> > objectPoints;
     Size imageSize;
-    
+
     int i, j, k, nimages = (int)imagelist.size()/2;
-    
+
     imagePoints[0].resize(nimages);
     imagePoints[1].resize(nimages);
     vector<string> goodImageList;
-    
+
     for( i = j = 0; i < nimages; i++ )
     {
         for( k = 0; k < 2; k++ )
@@ -105,7 +105,7 @@ StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=
                     timg = img;
                 else
                     resize(img, timg, Size(), scale, scale);
-                found = findChessboardCorners(timg, boardSize, corners, 
+                found = findChessboardCorners(timg, boardSize, corners,
                     CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_NORMALIZE_IMAGE);
                 if( found )
                 {
@@ -152,25 +152,25 @@ StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=
         cout << "Error: too little pairs to run the calibration\n";
         return;
     }
-    
+
     imagePoints[0].resize(nimages);
     imagePoints[1].resize(nimages);
     objectPoints.resize(nimages);
-    
+
     for( i = 0; i < nimages; i++ )
     {
         for( j = 0; j < boardSize.height; j++ )
             for( k = 0; k < boardSize.width; k++ )
                 objectPoints[i].push_back(Point3f(j*squareSize, k*squareSize, 0));
     }
-    
+
     cout << "Running stereo calibration ...\n";
-    
+
     Mat cameraMatrix[2], distCoeffs[2];
     cameraMatrix[0] = Mat::eye(3, 3, CV_64F);
     cameraMatrix[1] = Mat::eye(3, 3, CV_64F);
     Mat R, T, E, F;
-    
+
     double rms = stereoCalibrate(objectPoints, imagePoints[0], imagePoints[1],
                     cameraMatrix[0], distCoeffs[0],
                     cameraMatrix[1], distCoeffs[1],
@@ -182,7 +182,7 @@ StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=
                     CV_CALIB_RATIONAL_MODEL +
                     CV_CALIB_FIX_K3 + CV_CALIB_FIX_K4 + CV_CALIB_FIX_K5);
     cout << "done with RMS error=" << rms << endl;
-    
+
 // CALIBRATION QUALITY CHECK
 // because the output fundamental matrix implicitly
 // includes all the output information,
@@ -212,7 +212,7 @@ StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=
         npoints += npt;
     }
     cout << "average reprojection err = " <<  err/npoints << endl;
-    
+
     // save intrinsic parameters
     FileStorage fs("intrinsics.yml", CV_STORAGE_WRITE);
     if( fs.isOpened() )
@@ -223,15 +223,15 @@ StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=
     }
     else
         cout << "Error: can not save the intrinsic parameters\n";
-    
+
     Mat R1, R2, P1, P2, Q;
     Rect validRoi[2];
-    
+
     stereoRectify(cameraMatrix[0], distCoeffs[0],
                   cameraMatrix[1], distCoeffs[1],
                   imageSize, R, T, R1, R2, P1, P2, Q,
                   CALIB_ZERO_DISPARITY, 1, imageSize, &validRoi[0], &validRoi[1]);
-        
+
     fs.open("extrinsics.yml", CV_STORAGE_WRITE);
     if( fs.isOpened() )
     {
@@ -240,15 +240,15 @@ StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=
     }
     else
         cout << "Error: can not save the intrinsic parameters\n";
-    
+
     // OpenCV can handle left-right
     // or up-down camera arrangements
     bool isVerticalStereo = fabs(P2.at<double>(1, 3)) > fabs(P2.at<double>(0, 3));
-    
+
 // COMPUTE AND DISPLAY RECTIFICATION
     if( !showRectified )
         return;
-    
+
     Mat rmap[2][2];
 // IF BY CALIBRATED (BOUGUET'S METHOD)
     if( useCalibrated )
@@ -270,7 +270,7 @@ StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=
         F = findFundamentalMat(Mat(allimgpt[0]), Mat(allimgpt[1]), FM_8POINT, 0, 0);
         Mat H1, H2;
         stereoRectifyUncalibrated(Mat(allimgpt[0]), Mat(allimgpt[1]), F, imageSize, H1, H2, 3);
-        
+
         R1 = cameraMatrix[0].inv()*H1*cameraMatrix[0];
         R2 = cameraMatrix[1].inv()*H2*cameraMatrix[1];
         P1 = cameraMatrix[0];
@@ -280,7 +280,7 @@ StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=
     //Precompute maps for cv::remap()
     initUndistortRectifyMap(cameraMatrix[0], distCoeffs[0], R1, P1, imageSize, CV_16SC2, rmap[0][0], rmap[0][1]);
     initUndistortRectifyMap(cameraMatrix[1], distCoeffs[1], R2, P2, imageSize, CV_16SC2, rmap[1][0], rmap[1][1]);
-    
+
     Mat canvas;
     double sf;
     int w, h;
@@ -298,7 +298,7 @@ StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=
         h = cvRound(imageSize.height*sf);
         canvas.create(h*2, w, CV_8UC3);
     }
-    
+
     for( i = 0; i < nimages; i++ )
     {
         for( k = 0; k < 2; k++ )
@@ -311,11 +311,11 @@ StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=
             if( useCalibrated )
             {
                 Rect vroi(cvRound(validRoi[k].x*sf), cvRound(validRoi[k].y*sf),
-                          cvRound(validRoi[k].width*sf), cvRound(validRoi[k].height*sf)); 
+                          cvRound(validRoi[k].width*sf), cvRound(validRoi[k].height*sf));
                 rectangle(canvasPart, vroi, Scalar(0,0,255), 3, 8);
             }
         }
-        
+
         if( !isVerticalStereo )
             for( j = 0; j < canvas.rows; j += 16 )
                 line(canvas, Point(0, j), Point(canvas.cols, j), Scalar(0, 255, 0), 1, 8);
@@ -329,7 +329,7 @@ StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=
     }
 }
 
-                   
+
 static bool readStringList( const string& filename, vector<string>& l )
 {
     l.resize(0);
@@ -344,13 +344,13 @@ static bool readStringList( const string& filename, vector<string>& l )
         l.push_back((string)*it);
     return true;
 }
-                   
+
 int main(int argc, char** argv)
 {
     Size boardSize;
     string imagelistfn;
     bool showRectified = true;
-    
+
     for( int i = 1; i < argc; i++ )
     {
         if( string(argv[i]) == "-w" )
@@ -381,7 +381,7 @@ int main(int argc, char** argv)
         else
             imagelistfn = argv[i];
     }
-    
+
     if( imagelistfn == "" )
     {
         imagelistfn = "stereo_calib.xml";
@@ -389,10 +389,10 @@ int main(int argc, char** argv)
     }
     else if( boardSize.width <= 0 || boardSize.height <= 0 )
     {
-        cout << "if you specified XML file with chessboards, you should also specify the board width and height (-w and -h options)" << endl; 
+        cout << "if you specified XML file with chessboards, you should also specify the board width and height (-w and -h options)" << endl;
         return 0;
     }
-    
+
     vector<string> imagelist;
     bool ok = readStringList(imagelistfn, imagelist);
     if(!ok || imagelist.empty())
@@ -400,7 +400,7 @@ int main(int argc, char** argv)
         cout << "can not open " << imagelistfn << " or the string list is empty" << endl;
         return print_help();
     }
-    
+
     StereoCalib(imagelist, boardSize, true, showRectified);
     return 0;
 }
index 08f67e3..8c0739b 100644 (file)
 
 using namespace cv;
 
-void print_help()
+static void print_help()
 {
-       printf("\nDemo stereo matching converting L and R images into disparity and point clouds\n");
+    printf("\nDemo stereo matching converting L and R images into disparity and point clouds\n");
     printf("\nUsage: stereo_match <left_image> <right_image> [--algorithm=bm|sgbm|hh|var] [--blocksize=<block_size>]\n"
            "[--max-disparity=<max_disparity>] [--scale=scale_factor>] [-i <intrinsic_filename>] [-e <extrinsic_filename>]\n"
            "[--no-display] [-o <disparity_image>] [-p <point_cloud_file>]\n");
 }
 
-void saveXYZ(const char* filename, const Mat& mat)
+static void saveXYZ(const char* filename, const Mat& mat)
 {
     const double max_z = 1.0e4;
     FILE* fp = fopen(filename, "wt");
@@ -47,11 +47,11 @@ int main(int argc, char** argv)
     const char* blocksize_opt = "--blocksize=";
     const char* nodisplay_opt = "--no-display=";
     const char* scale_opt = "--scale=";
-    
+
     if(argc < 3)
     {
         print_help();
-               return 0;
+        return 0;
     }
     const char* img1_filename = 0;
     const char* img2_filename = 0;
@@ -59,17 +59,17 @@ int main(int argc, char** argv)
     const char* extrinsic_filename = 0;
     const char* disparity_filename = 0;
     const char* point_cloud_filename = 0;
-    
+
     enum { STEREO_BM=0, STEREO_SGBM=1, STEREO_HH=2, STEREO_VAR=3 };
     int alg = STEREO_SGBM;
     int SADWindowSize = 0, numberOfDisparities = 0;
     bool no_display = false;
     float scale = 1.f;
-    
+
     StereoBM bm;
     StereoSGBM sgbm;
     StereoVar var;
-    
+
     for( int i = 1; i < argc; i++ )
     {
         if( argv[i][0] != '-' )
@@ -136,29 +136,29 @@ int main(int argc, char** argv)
             return -1;
         }
     }
-    
+
     if( !img1_filename || !img2_filename )
     {
         printf("Command-line parameter error: both left and right images must be specified\n");
         return -1;
     }
-    
+
     if( (intrinsic_filename != 0) ^ (extrinsic_filename != 0) )
     {
         printf("Command-line parameter error: either both intrinsic and extrinsic parameters must be specified, or none of them (when the stereo pair is already rectified)\n");
         return -1;
     }
-    
+
     if( extrinsic_filename == 0 && point_cloud_filename )
     {
         printf("Command-line parameter error: extrinsic and intrinsic parameters must be specified to compute the point cloud\n");
         return -1;
     }
-    
+
     int color_mode = alg == STEREO_BM ? 0 : -1;
     Mat img1 = imread(img1_filename, color_mode);
     Mat img2 = imread(img2_filename, color_mode);
-    
+
     if( scale != 1.f )
     {
         Mat temp1, temp2;
@@ -168,12 +168,12 @@ int main(int argc, char** argv)
         resize(img2, temp2, Size(), scale, scale, method);
         img2 = temp2;
     }
-    
+
     Size img_size = img1.size();
-    
+
     Rect roi1, roi2;
     Mat Q;
-    
+
     if( intrinsic_filename )
     {
         // reading intrinsic parameters
@@ -183,40 +183,40 @@ int main(int argc, char** argv)
             printf("Failed to open file %s\n", intrinsic_filename);
             return -1;
         }
-        
+
         Mat M1, D1, M2, D2;
         fs["M1"] >> M1;
         fs["D1"] >> D1;
         fs["M2"] >> M2;
         fs["D2"] >> D2;
-        
+
         fs.open(extrinsic_filename, CV_STORAGE_READ);
         if(!fs.isOpened())
         {
             printf("Failed to open file %s\n", extrinsic_filename);
             return -1;
         }
-        
+
         Mat R, T, R1, P1, R2, P2;
         fs["R"] >> R;
         fs["T"] >> T;
-        
+
         stereoRectify( M1, D1, M2, D2, img_size, R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, -1, img_size, &roi1, &roi2 );
-        
+
         Mat map11, map12, map21, map22;
         initUndistortRectifyMap(M1, D1, R1, P1, img_size, CV_16SC2, map11, map12);
         initUndistortRectifyMap(M2, D2, R2, P2, img_size, CV_16SC2, map21, map22);
-        
+
         Mat img1r, img2r;
         remap(img1, img1r, map11, map12, INTER_LINEAR);
         remap(img2, img2r, map21, map22, INTER_LINEAR);
-        
+
         img1 = img1r;
         img2 = img2r;
     }
-    
+
     numberOfDisparities = numberOfDisparities > 0 ? numberOfDisparities : ((img_size.width/8) + 15) & -16;
-    
+
     bm.state->roi1 = roi1;
     bm.state->roi2 = roi2;
     bm.state->preFilterCap = 31;
@@ -228,12 +228,12 @@ int main(int argc, char** argv)
     bm.state->speckleWindowSize = 100;
     bm.state->speckleRange = 32;
     bm.state->disp12MaxDiff = 1;
-    
+
     sgbm.preFilterCap = 63;
     sgbm.SADWindowSize = SADWindowSize > 0 ? SADWindowSize : 3;
-    
+
     int cn = img1.channels();
-    
+
     sgbm.P1 = 8*cn*sgbm.SADWindowSize*sgbm.SADWindowSize;
     sgbm.P2 = 32*cn*sgbm.SADWindowSize*sgbm.SADWindowSize;
     sgbm.minDisparity = 0;
@@ -243,31 +243,31 @@ int main(int argc, char** argv)
     sgbm.speckleRange = bm.state->speckleRange;
     sgbm.disp12MaxDiff = 1;
     sgbm.fullDP = alg == STEREO_HH;
-    
-    var.levels = 3;                                                                    // ignored with USE_AUTO_PARAMS
-       var.pyrScale = 0.5;                                                             // ignored with USE_AUTO_PARAMS
-       var.nIt = 25;
-       var.minDisp = -numberOfDisparities;     
-       var.maxDisp = 0;
-       var.poly_n = 3;
-       var.poly_sigma = 0.0;
-       var.fi = 15.0f;
-       var.lambda = 0.03f;
-       var.penalization = var.PENALIZATION_TICHONOV;   // ignored with USE_AUTO_PARAMS
-       var.cycle = var.CYCLE_V;                                                // ignored with USE_AUTO_PARAMS
-       var.flags = var.USE_SMART_ID | var.USE_AUTO_PARAMS | var.USE_INITIAL_DISPARITY | var.USE_MEDIAN_FILTERING ;
-    
+
+    var.levels = 3;                                 // ignored with USE_AUTO_PARAMS
+    var.pyrScale = 0.5;                             // ignored with USE_AUTO_PARAMS
+    var.nIt = 25;
+    var.minDisp = -numberOfDisparities;
+    var.maxDisp = 0;
+    var.poly_n = 3;
+    var.poly_sigma = 0.0;
+    var.fi = 15.0f;
+    var.lambda = 0.03f;
+    var.penalization = var.PENALIZATION_TICHONOV;   // ignored with USE_AUTO_PARAMS
+    var.cycle = var.CYCLE_V;                        // ignored with USE_AUTO_PARAMS
+    var.flags = var.USE_SMART_ID | var.USE_AUTO_PARAMS | var.USE_INITIAL_DISPARITY | var.USE_MEDIAN_FILTERING ;
+
     Mat disp, disp8;
     //Mat img1p, img2p, dispp;
     //copyMakeBorder(img1, img1p, 0, 0, numberOfDisparities, 0, IPL_BORDER_REPLICATE);
     //copyMakeBorder(img2, img2p, 0, 0, numberOfDisparities, 0, IPL_BORDER_REPLICATE);
-    
+
     int64 t = getTickCount();
     if( alg == STEREO_BM )
         bm(img1, img2, disp);
     else if( alg == STEREO_VAR ) {
         var(img1, img2, disp);
-       }
+    }
     else if( alg == STEREO_SGBM || alg == STEREO_HH )
         sgbm(img1, img2, disp);
     t = getTickCount() - t;
@@ -291,10 +291,10 @@ int main(int argc, char** argv)
         waitKey();
         printf("\n");
     }
-    
+
     if(disparity_filename)
         imwrite(disparity_filename, disp8);
-    
+
     if(point_cloud_filename)
     {
         printf("storing the point cloud...");
@@ -304,6 +304,6 @@ int main(int argc, char** argv)
         saveXYZ(point_cloud_filename, xyz);
         printf("\n");
     }
-    
+
     return 0;
 }
index 26e3694..39d6b24 100644 (file)
@@ -37,7 +37,7 @@
 // and on any theory of liability, whether in contract, strict liability,
 // or tort (including negligence or otherwise) arising in any way out of
 // the use of this software, even if advised of the possibility of such damage.
-// 
+//
 //
 //M*/
 
@@ -60,7 +60,7 @@ using namespace std;
 using namespace cv;
 using namespace cv::detail;
 
-void printUsage()
+static void printUsage()
 {
     cout <<
         "Rotation model images stitcher.\n\n"
@@ -141,7 +141,7 @@ int blend_type = Blender::MULTI_BAND;
 float blend_strength = 5;
 string result_name = "result.jpg";
 
-int parseCmdArgs(int argc, char** argv)
+static int parseCmdArgs(int argc, char** argv)
 {
     if (argc == 1)
     {
@@ -471,10 +471,10 @@ int main(int argc, char* argv[])
     Ptr<detail::BundleAdjusterBase> adjuster;
     if (ba_cost_func == "reproj") adjuster = new detail::BundleAdjusterReproj();
     else if (ba_cost_func == "ray") adjuster = new detail::BundleAdjusterRay();
-    else 
-    { 
-        cout << "Unknown bundle adjustment cost function: '" << ba_cost_func << "'.\n"; 
-        return -1; 
+    else
+    {
+        cout << "Unknown bundle adjustment cost function: '" << ba_cost_func << "'.\n";
+        return -1;
     }
     adjuster->setConfThresh(conf_thresh);
     Mat_<uchar> refine_mask = Mat::zeros(3, 3, CV_8U);
@@ -544,18 +544,18 @@ int main(int argc, char* argv[])
         if (warp_type == "plane") warper_creator = new cv::PlaneWarper();
         else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarper();
         else if (warp_type == "spherical") warper_creator = new cv::SphericalWarper();
-               else if (warp_type == "fisheye") warper_creator = new cv::FisheyeWarper();
-               else if (warp_type == "stereographic") warper_creator = new cv::StereographicWarper();
-               else if (warp_type == "compressedPlaneA2B1") warper_creator = new cv::CompressedRectilinearWarper(2, 1);
-               else if (warp_type == "compressedPlaneA1.5B1") warper_creator = new cv::CompressedRectilinearWarper(1.5, 1);
-               else if (warp_type == "compressedPlanePortraitA2B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(2, 1);
-               else if (warp_type == "compressedPlanePortraitA1.5B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(1.5, 1);
-               else if (warp_type == "paniniA2B1") warper_creator = new cv::PaniniWarper(2, 1);
-               else if (warp_type == "paniniA1.5B1") warper_creator = new cv::PaniniWarper(1.5, 1);
-               else if (warp_type == "paniniPortraitA2B1") warper_creator = new cv::PaniniPortraitWarper(2, 1);
-               else if (warp_type == "paniniPortraitA1.5B1") warper_creator = new cv::PaniniPortraitWarper(1.5, 1);
-               else if (warp_type == "mercator") warper_creator = new cv::MercatorWarper();
-               else if (warp_type == "transverseMercator") warper_creator = new cv::TransverseMercatorWarper();
+        else if (warp_type == "fisheye") warper_creator = new cv::FisheyeWarper();
+        else if (warp_type == "stereographic") warper_creator = new cv::StereographicWarper();
+        else if (warp_type == "compressedPlaneA2B1") warper_creator = new cv::CompressedRectilinearWarper(2, 1);
+        else if (warp_type == "compressedPlaneA1.5B1") warper_creator = new cv::CompressedRectilinearWarper(1.5, 1);
+        else if (warp_type == "compressedPlanePortraitA2B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(2, 1);
+        else if (warp_type == "compressedPlanePortraitA1.5B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(1.5, 1);
+        else if (warp_type == "paniniA2B1") warper_creator = new cv::PaniniWarper(2, 1);
+        else if (warp_type == "paniniA1.5B1") warper_creator = new cv::PaniniWarper(1.5, 1);
+        else if (warp_type == "paniniPortraitA2B1") warper_creator = new cv::PaniniPortraitWarper(2, 1);
+        else if (warp_type == "paniniPortraitA1.5B1") warper_creator = new cv::PaniniPortraitWarper(1.5, 1);
+        else if (warp_type == "mercator") warper_creator = new cv::MercatorWarper();
+        else if (warp_type == "transverseMercator") warper_creator = new cv::TransverseMercatorWarper();
     }
 
     if (warper_creator.empty())
@@ -563,7 +563,7 @@ int main(int argc, char* argv[])
         cout << "Can't create the following warper '" << warp_type << "'\n";
         return 1;
     }
-    
+
     Ptr<RotationWarper> warper = warper_creator->create(static_cast<float>(warped_image_scale * seam_work_aspect));
 
     for (int i = 0; i < num_images; ++i)
index bdba58f..01af565 100644 (file)
@@ -16,7 +16,7 @@
 using namespace std;
 using namespace cv;
 
-void help(char **av)
+static void help(char **av)
 {
     cout << "\nThis program demonstrated the use of features2d with the Fast corner detector and brief descriptors\n"
         << "to track planar objects by computing their homography from the key (training) image to the query (test) image\n\n" << endl;
index 4e31b19..373a46a 100644 (file)
@@ -7,24 +7,24 @@
 using namespace cv;
 using namespace std;
 
-void help()
+static void help()
 {
-       cout << "\nThis program demonstrates the famous watershed segmentation algorithm in OpenCV: watershed()\n"
-                       "Usage:\n"
-                       "./watershed [image_name -- default is fruits.jpg]\n" << endl;
+    cout << "\nThis program demonstrates the famous watershed segmentation algorithm in OpenCV: watershed()\n"
+            "Usage:\n"
+            "./watershed [image_name -- default is fruits.jpg]\n" << endl;
 
 
-       cout << "Hot keys: \n"
-               "\tESC - quit the program\n"
-               "\tr - restore the original image\n"
-               "\tw or SPACE - run watershed segmentation algorithm\n"
-               "\t\t(before running it, *roughly* mark the areas to segment on the image)\n"
-               "\t  (before that, roughly outline several markers on the image)\n";
+    cout << "Hot keys: \n"
+        "\tESC - quit the program\n"
+        "\tr - restore the original image\n"
+        "\tw or SPACE - run watershed segmentation algorithm\n"
+        "\t\t(before running it, *roughly* mark the areas to segment on the image)\n"
+        "\t  (before that, roughly outline several markers on the image)\n";
 }
 Mat markerMask, img;
 Point prevPt(-1, -1);
 
-void onMouse( int event, int x, int y, int flags, void* )
+static void onMouse( int event, int x, int y, int flags, void* )
 {
     if( x < 0 || x >= img.cols || y < 0 || y >= img.rows )
         return;
@@ -48,7 +48,7 @@ int main( int argc, char** argv )
 {
     char* filename = argc >= 2 ? argv[1] : (char*)"fruits.jpg";
     Mat img0 = imread(filename, 1), imgGray;
-    
+
     if( img0.empty() )
     {
         cout << "Couldn'g open image " << filename << ". Usage: watershed <image_name>\n";
@@ -83,9 +83,9 @@ int main( int argc, char** argv )
             int i, j, compCount = 0;
             vector<vector<Point> > contours;
             vector<Vec4i> hierarchy;
-            
+
             findContours(markerMask, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
-            
+
             if( contours.empty() )
                 continue;
             Mat markers(markerMask.size(), CV_32S);
@@ -96,14 +96,14 @@ int main( int argc, char** argv )
 
             if( compCount == 0 )
                 continue;
-            
+
             vector<Vec3b> colorTab;
             for( i = 0; i < compCount; i++ )
             {
                 int b = theRNG().uniform(0, 255);
                 int g = theRNG().uniform(0, 255);
                 int r = theRNG().uniform(0, 255);
-                
+
                 colorTab.push_back(Vec3b((uchar)b, (uchar)g, (uchar)r));
             }
 
@@ -113,7 +113,7 @@ int main( int argc, char** argv )
             printf( "execution time = %gms\n", t*1000./getTickFrequency() );
 
             Mat wshed(markers.size(), CV_8UC3);
-            
+
             // paint the watershed image
             for( i = 0; i < markers.rows; i++ )
                 for( j = 0; j < markers.cols; j++ )
index 422b4be..6ebbe0f 100644 (file)
@@ -17,7 +17,7 @@ using namespace cv;
 using namespace cv::gpu;\r
 \r
 \r
-void help()\r
+static void help()\r
 {\r
     cout << "Usage: ./cascadeclassifier_gpu \n\t--cascade <cascade_file>\n\t(<image>|--video <video>|--camera <camera_id>)\n"\r
             "Using OpenCV version " << CV_VERSION << endl << endl;\r
@@ -49,7 +49,7 @@ void convertAndResize(const T& src, T& gray, T& resized, double scale)
 }\r
 \r
 \r
-void matPrint(Mat &img, int lineOffsY, Scalar fontColor, const string &ss)\r
+static void matPrint(Mat &img, int lineOffsY, Scalar fontColor, const string &ss)\r
 {\r
     int fontFace = FONT_HERSHEY_DUPLEX;\r
     double fontScale = 0.8;\r
@@ -64,7 +64,7 @@ void matPrint(Mat &img, int lineOffsY, Scalar fontColor, const string &ss)
 }\r
 \r
 \r
-void displayState(Mat &canvas, bool bHelp, bool bGpu, bool bLargestFace, bool bFilter, double fps)\r
+static void displayState(Mat &canvas, bool bHelp, bool bGpu, bool bLargestFace, bool bFilter, double fps)\r
 {\r
     Scalar fontColorRed = CV_RGB(255,0,0);\r
     Scalar fontColorNV  = CV_RGB(118,185,0);\r
index 6a20e9c..bc12bf5 100644 (file)
@@ -1,4 +1,4 @@
-#if _MSC_VER >= 1400\r
+#if defined _MSC_VER && _MSC_VER >= 1400\r
 #pragma warning( disable : 4201 4408 4127 4100)\r
 #endif\r
 \r
index ddfe075..e33c07e 100644 (file)
@@ -18,7 +18,7 @@ inline T mapVal(T x, T a, T b, T c, T d)
     return c + (d-c) * (x-a) / (b-a);
 }
 
-void colorizeFlow(const Mat &u, const Mat &v, Mat &dst)
+static void colorizeFlow(const Mat &u, const Mat &v, Mat &dst)
 {
     double uMin, uMax;
     minMaxLoc(u, &uMin, &uMax, 0, 0);
index 8511efa..9d74162 100644 (file)
@@ -85,8 +85,7 @@ private:
     double work_fps;\r
 };\r
 \r
-\r
-void printHelp()\r
+static void printHelp()\r
 {\r
     cout << "Histogram of Oriented Gradients descriptor and detector sample.\n"\r
          << "\nUsage: hog_gpu\n"\r
@@ -166,10 +165,10 @@ Args Args::read(int argc, char** argv)
         else if (string(argv[i]) == "--resize_src") args.resize_src = (string(argv[++i]) == "true");\r
         else if (string(argv[i]) == "--width") args.width = atoi(argv[++i]);\r
         else if (string(argv[i]) == "--height") args.height = atoi(argv[++i]);\r
-        else if (string(argv[i]) == "--hit_threshold") \r
-        { \r
-            args.hit_threshold = atof(argv[++i]); \r
-            args.hit_threshold_auto = false; \r
+        else if (string(argv[i]) == "--hit_threshold")\r
+        {\r
+            args.hit_threshold = atof(argv[++i]);\r
+            args.hit_threshold_auto = false;\r
         }\r
         else if (string(argv[i]) == "--scale") args.scale = atof(argv[++i]);\r
         else if (string(argv[i]) == "--nlevels") args.nlevels = atoi(argv[++i]);\r
@@ -244,15 +243,15 @@ void App::run()
 \r
     // Create HOG descriptors and detectors here\r
     vector<float> detector;\r
-    if (win_size == Size(64, 128)) \r
+    if (win_size == Size(64, 128))\r
         detector = cv::gpu::HOGDescriptor::getPeopleDetector64x128();\r
     else\r
         detector = cv::gpu::HOGDescriptor::getPeopleDetector48x96();\r
 \r
-    cv::gpu::HOGDescriptor gpu_hog(win_size, Size(16, 16), Size(8, 8), Size(8, 8), 9, \r
-                                   cv::gpu::HOGDescriptor::DEFAULT_WIN_SIGMA, 0.2, gamma_corr, \r
+    cv::gpu::HOGDescriptor gpu_hog(win_size, Size(16, 16), Size(8, 8), Size(8, 8), 9,\r
+                                   cv::gpu::HOGDescriptor::DEFAULT_WIN_SIGMA, 0.2, gamma_corr,\r
                                    cv::gpu::HOGDescriptor::DEFAULT_NLEVELS);\r
-    cv::HOGDescriptor cpu_hog(win_size, Size(16, 16), Size(8, 8), Size(8, 8), 9, 1, -1, \r
+    cv::HOGDescriptor cpu_hog(win_size, Size(16, 16), Size(8, 8), Size(8, 8), 9, 1, -1,\r
                               HOGDescriptor::L2Hys, 0.2, gamma_corr, cv::HOGDescriptor::DEFAULT_NLEVELS);\r
     gpu_hog.setSVMDetector(detector);\r
     cpu_hog.setSVMDetector(detector);\r
@@ -315,10 +314,10 @@ void App::run()
             if (use_gpu)\r
             {\r
                 gpu_img.upload(img);\r
-                gpu_hog.detectMultiScale(gpu_img, found, hit_threshold, win_stride, \r
+                gpu_hog.detectMultiScale(gpu_img, found, hit_threshold, win_stride,\r
                                          Size(0, 0), scale, gr_threshold);\r
             }\r
-            else cpu_hog.detectMultiScale(img, found, hit_threshold, win_stride, \r
+            else cpu_hog.detectMultiScale(img, found, hit_threshold, win_stride,\r
                                           Size(0, 0), scale, gr_threshold);\r
             hogWorkEnd();\r
 \r
@@ -345,7 +344,7 @@ void App::run()
             {\r
                 if (!video_writer.isOpened())\r
                 {\r
-                    video_writer.open(args.dst_video, CV_FOURCC('x','v','i','d'), args.dst_video_fps, \r
+                    video_writer.open(args.dst_video, CV_FOURCC('x','v','i','d'), args.dst_video_fps,\r
                                       img_to_show.size(), true);\r
                     if (!video_writer.isOpened())\r
                         throw std::runtime_error("can't create video writer");\r
index 251c267..141ebb6 100644 (file)
@@ -8,12 +8,12 @@
 using namespace cv;\r
 using namespace cv::gpu;\r
 \r
-void help()\r
+static void help()\r
 {\r
 \r
 printf("\nShow off image morphology: erosion, dialation, open and close\n"\r
-       "Call:\n   morphology2 [image]\n"\r
-       "This program also shows use of rect, elipse and cross kernels\n\n");\r
+    "Call:\n   morphology2 [image]\n"\r
+    "This program also shows use of rect, elipse and cross kernels\n\n");\r
 printf( "Hot keys: \n"\r
     "\tESC - quit the program\n"\r
     "\tr - use rectangle structuring element\n"\r
@@ -32,20 +32,20 @@ int open_close_pos = 0;
 int erode_dilate_pos = 0;\r
 \r
 // callback function for open/close trackbar\r
-void OpenClose(int, void*)\r
+static void OpenClose(int, void*)\r
 {\r
     int n = open_close_pos - max_iters;\r
     int an = n > 0 ? n : -n;\r
     Mat element = getStructuringElement(element_shape, Size(an*2+1, an*2+1), Point(an, an) );\r
     if( n < 0 )\r
-               cv::gpu::morphologyEx(src, dst, CV_MOP_OPEN, element);\r
+        cv::gpu::morphologyEx(src, dst, CV_MOP_OPEN, element);\r
     else\r
         cv::gpu::morphologyEx(src, dst, CV_MOP_CLOSE, element);\r
     imshow("Open/Close",(Mat)dst);\r
 }\r
 \r
 // callback function for erode/dilate trackbar\r
-void ErodeDilate(int, void*)\r
+static void ErodeDilate(int, void*)\r
 {\r
     int n = erode_dilate_pos - max_iters;\r
     int an = n > 0 ? n : -n;\r
@@ -78,14 +78,14 @@ int main( int argc, char** argv )
 \r
     help();\r
 \r
-       \r
-       if (src.channels() == 3)\r
-       {\r
-               // gpu support only 4th channel images\r
-               GpuMat src4ch;\r
-               cv::gpu::cvtColor(src, src4ch, CV_BGR2BGRA); \r
-               src = src4ch;\r
-       }\r
+\r
+    if (src.channels() == 3)\r
+    {\r
+        // gpu support only 4th channel images\r
+        GpuMat src4ch;\r
+        cv::gpu::cvtColor(src, src4ch, CV_BGR2BGRA);\r
+        src = src4ch;\r
+    }\r
 \r
     //create windows for output images\r
     namedWindow("Open/Close",1);\r
index c7b55e1..e40f059 100644 (file)
@@ -1,4 +1,4 @@
-#if _MSC_VER >= 1400\r
+#if defined _MSC_VER && _MSC_VER >= 1400\r
 #pragma warning( disable : 4201 4408 4127 4100)\r
 #endif\r
 \r
@@ -135,13 +135,13 @@ NCVStatus CopyData(const IplImage *image, const NCVMatrixAlloc<Ncv32f> &dst)
     return NCV_SUCCESS;\r
 }\r
 \r
-NCVStatus LoadImages (const char *frame0Name, \r
-                      const char *frame1Name, \r
-                      int &width, \r
-                      int &height, \r
+NCVStatus LoadImages (const char *frame0Name,\r
+                      const char *frame1Name,\r
+                      int &width,\r
+                      int &height,\r
                       Ptr<NCVMatrixAlloc<Ncv32f> > &src,\r
                       Ptr<NCVMatrixAlloc<Ncv32f> > &dst,\r
-                      IplImage *&firstFrame, \r
+                      IplImage *&firstFrame,\r
                       IplImage *&lastFrame)\r
 {\r
     IplImage *image;\r
@@ -151,11 +151,11 @@ NCVStatus LoadImages (const char *frame0Name,
         std::cout << "Could not open '" << frame0Name << "'\n";\r
         return NCV_FILE_ERROR;\r
     }\r
-    \r
+\r
     firstFrame = image;\r
     // copy data to src\r
     ncvAssertReturnNcvStat (CopyData<RgbToMonochrome> (image, src));\r
-    \r
+\r
     IplImage *image2;\r
     image2 = cvLoadImage (frame1Name);\r
     if (image2 == 0)\r
@@ -189,7 +189,7 @@ inline T MapValue (T x, T a, T b, T c, T d)
 NCVStatus ShowFlow (NCVMatrixAlloc<Ncv32f> &u, NCVMatrixAlloc<Ncv32f> &v, const char *name)\r
 {\r
     IplImage *flowField;\r
-    \r
+\r
     NCVMatrixAlloc<Ncv32f> host_u(*g_pHostMemAllocator, u.width(), u.height());\r
     ncvAssertReturn(host_u.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC);\r
 \r
@@ -240,7 +240,7 @@ NCVStatus ShowFlow (NCVMatrixAlloc<Ncv32f> &u, NCVMatrixAlloc<Ncv32f> &v, const
         ptr_u += u.stride () - u.width ();\r
         ptr_v += v.stride () - v.width ();\r
     }\r
-    \r
+\r
     cvShowImage (name, flowField);\r
 \r
     return NCV_SUCCESS;\r
@@ -253,7 +253,7 @@ IplImage *CreateImage (NCVMatrixAlloc<Ncv32f> &h_r, NCVMatrixAlloc<Ncv32f> &h_g,
     if (image == 0) return 0;\r
 \r
     unsigned char *row = reinterpret_cast<unsigned char*> (image->imageData);\r
-    \r
+\r
     for (int i = 0; i < image->height; ++i)\r
     {\r
         for (int j = 0; j < image->width; ++j)\r
@@ -286,10 +286,10 @@ void PrintHelp ()
     std::cout << "\t" << std::setw(15) << PARAM_HELP << " - display this help message\n";\r
 }\r
 \r
-int ProcessCommandLine(int argc, char **argv, \r
-                       Ncv32f &timeStep, \r
-                       char *&frame0Name, \r
-                       char *&frame1Name, \r
+int ProcessCommandLine(int argc, char **argv,\r
+                       Ncv32f &timeStep,\r
+                       char *&frame0Name,\r
+                       char *&frame1Name,\r
                        NCVBroxOpticalFlowDescriptor &desc)\r
 {\r
     timeStep = 0.25f;\r
@@ -461,7 +461,7 @@ int main(int argc, char **argv)
         std::cout << "Failed\n";\r
         return -1;\r
     }\r
-    \r
+\r
     std::cout << "Backward...\n";\r
     if (NCV_SUCCESS != NCVBroxOpticalFlow (desc, *g_pGPUMemAllocator, *dst, *src, uBck, vBck, 0))\r
     {\r
index b9c1939..1cb9176 100644 (file)
@@ -144,7 +144,7 @@ string abspath(const string& relpath)
 }\r
 \r
 \r
-int CV_CDECL cvErrorCallback(int /*status*/, const char* /*func_name*/,\r
+static int CV_CDECL cvErrorCallback(int /*status*/, const char* /*func_name*/,\r
                              const char* err_msg, const char* /*file_name*/,\r
                              int /*line*/, void* /*userdata*/)\r
 {\r
index 01020a9..ae16f6d 100644 (file)
@@ -10,7 +10,7 @@
 using namespace std;\r
 using namespace cv;\r
 \r
-void InitMatchTemplate()\r
+static void InitMatchTemplate()\r
 {\r
     Mat src; gen(src, 500, 500, CV_32F, 0, 1);\r
     Mat templ; gen(templ, 500, 500, CV_32F, 0, 1);\r
@@ -80,7 +80,7 @@ TEST(remap)
 {\r
     Mat src, dst, xmap, ymap;\r
     gpu::GpuMat d_src, d_dst, d_xmap, d_ymap;\r
-    \r
+\r
     int interpolation = INTER_LINEAR;\r
     int borderMode = BORDER_REPLICATE;\r
 \r
@@ -355,10 +355,10 @@ TEST(BruteForceMatcher)
 \r
     BFMatcher matcher(NORM_L2);\r
 \r
-    Mat query; \r
+    Mat query;\r
     gen(query, 3000, desc_len, CV_32F, 0, 1);\r
-    \r
-    Mat train; \r
+\r
+    Mat train;\r
     gen(train, 3000, desc_len, CV_32F, 0, 1);\r
 \r
     // Init GPU matcher\r
@@ -594,17 +594,17 @@ TEST(cvtColor)
 \r
     gen(src, 4000, 4000, CV_8UC1, 0, 255);\r
     d_src.upload(src);\r
-    \r
+\r
     SUBTEST << "4000x4000, 8UC1, CV_GRAY2BGRA";\r
-    \r
+\r
     cvtColor(src, dst, CV_GRAY2BGRA, 4);\r
 \r
     CPU_ON;\r
     cvtColor(src, dst, CV_GRAY2BGRA, 4);\r
     CPU_OFF;\r
-    \r
+\r
     gpu::cvtColor(d_src, d_dst, CV_GRAY2BGRA, 4);\r
-    \r
+\r
     GPU_ON;\r
     gpu::cvtColor(d_src, d_dst, CV_GRAY2BGRA, 4);\r
     GPU_OFF;\r
@@ -613,104 +613,104 @@ TEST(cvtColor)
     d_src.swap(d_dst);\r
 \r
     SUBTEST << "4000x4000, 8UC3 vs 8UC4, CV_BGR2YCrCb";\r
-    \r
+\r
     cvtColor(src, dst, CV_BGR2YCrCb);\r
 \r
     CPU_ON;\r
     cvtColor(src, dst, CV_BGR2YCrCb);\r
     CPU_OFF;\r
-    \r
+\r
     gpu::cvtColor(d_src, d_dst, CV_BGR2YCrCb, 4);\r
-        \r
+\r
     GPU_ON;\r
     gpu::cvtColor(d_src, d_dst, CV_BGR2YCrCb, 4);\r
     GPU_OFF;\r
-    \r
+\r
     cv::swap(src, dst);\r
     d_src.swap(d_dst);\r
 \r
     SUBTEST << "4000x4000, 8UC4, CV_YCrCb2BGR";\r
-    \r
+\r
     cvtColor(src, dst, CV_YCrCb2BGR, 4);\r
 \r
     CPU_ON;\r
     cvtColor(src, dst, CV_YCrCb2BGR, 4);\r
     CPU_OFF;\r
-    \r
+\r
     gpu::cvtColor(d_src, d_dst, CV_YCrCb2BGR, 4);\r
-        \r
+\r
     GPU_ON;\r
     gpu::cvtColor(d_src, d_dst, CV_YCrCb2BGR, 4);\r
     GPU_OFF;\r
-    \r
+\r
     cv::swap(src, dst);\r
     d_src.swap(d_dst);\r
 \r
     SUBTEST << "4000x4000, 8UC3 vs 8UC4, CV_BGR2XYZ";\r
-    \r
+\r
     cvtColor(src, dst, CV_BGR2XYZ);\r
 \r
     CPU_ON;\r
     cvtColor(src, dst, CV_BGR2XYZ);\r
     CPU_OFF;\r
-    \r
+\r
     gpu::cvtColor(d_src, d_dst, CV_BGR2XYZ, 4);\r
-        \r
+\r
     GPU_ON;\r
     gpu::cvtColor(d_src, d_dst, CV_BGR2XYZ, 4);\r
     GPU_OFF;\r
-    \r
+\r
     cv::swap(src, dst);\r
     d_src.swap(d_dst);\r
 \r
     SUBTEST << "4000x4000, 8UC4, CV_XYZ2BGR";\r
-    \r
+\r
     cvtColor(src, dst, CV_XYZ2BGR, 4);\r
 \r
     CPU_ON;\r
     cvtColor(src, dst, CV_XYZ2BGR, 4);\r
     CPU_OFF;\r
-    \r
+\r
     gpu::cvtColor(d_src, d_dst, CV_XYZ2BGR, 4);\r
-        \r
+\r
     GPU_ON;\r
     gpu::cvtColor(d_src, d_dst, CV_XYZ2BGR, 4);\r
     GPU_OFF;\r
-    \r
+\r
     cv::swap(src, dst);\r
     d_src.swap(d_dst);\r
 \r
     SUBTEST << "4000x4000, 8UC3 vs 8UC4, CV_BGR2HSV";\r
-    \r
+\r
     cvtColor(src, dst, CV_BGR2HSV);\r
 \r
     CPU_ON;\r
     cvtColor(src, dst, CV_BGR2HSV);\r
     CPU_OFF;\r
-    \r
+\r
     gpu::cvtColor(d_src, d_dst, CV_BGR2HSV, 4);\r
-        \r
+\r
     GPU_ON;\r
     gpu::cvtColor(d_src, d_dst, CV_BGR2HSV, 4);\r
     GPU_OFF;\r
-    \r
+\r
     cv::swap(src, dst);\r
     d_src.swap(d_dst);\r
 \r
     SUBTEST << "4000x4000, 8UC4, CV_HSV2BGR";\r
-    \r
+\r
     cvtColor(src, dst, CV_HSV2BGR, 4);\r
 \r
     CPU_ON;\r
     cvtColor(src, dst, CV_HSV2BGR, 4);\r
     CPU_OFF;\r
-    \r
+\r
     gpu::cvtColor(d_src, d_dst, CV_HSV2BGR, 4);\r
-        \r
+\r
     GPU_ON;\r
     gpu::cvtColor(d_src, d_dst, CV_HSV2BGR, 4);\r
     GPU_OFF;\r
-    \r
+\r
     cv::swap(src, dst);\r
     d_src.swap(d_dst);\r
 }\r
@@ -757,7 +757,7 @@ TEST(threshold)
 \r
         threshold(src, dst, 50.0, 0.0, THRESH_BINARY);\r
 \r
-        CPU_ON; \r
+        CPU_ON;\r
         threshold(src, dst, 50.0, 0.0, THRESH_BINARY);\r
         CPU_OFF;\r
 \r
@@ -778,7 +778,7 @@ TEST(threshold)
 \r
         threshold(src, dst, 50.0, 0.0, THRESH_TRUNC);\r
 \r
-        CPU_ON; \r
+        CPU_ON;\r
         threshold(src, dst, 50.0, 0.0, THRESH_TRUNC);\r
         CPU_OFF;\r
 \r
@@ -857,7 +857,7 @@ TEST(projectPoints)
 }\r
 \r
 \r
-void InitSolvePnpRansac()\r
+static void InitSolvePnpRansac()\r
 {\r
     Mat object; gen(object, 1, 4, CV_32FC3, Scalar::all(0), Scalar::all(100));\r
     Mat image; gen(image, 1, 4, CV_32FC2, Scalar::all(0), Scalar::all(100));\r
@@ -906,7 +906,7 @@ TEST(GaussianBlur)
         SUBTEST << size << 'x' << size << ", 8UC4";\r
 \r
         Mat src, dst;\r
-        \r
+\r
         gen(src, size, size, CV_8UC4, 0, 256);\r
 \r
         GaussianBlur(src, dst, Size(3, 3), 1);\r
@@ -933,11 +933,11 @@ TEST(filter2D)
     {\r
         Mat src;\r
         gen(src, size, size, CV_8UC4, 0, 256);\r
-                \r
+\r
         for (int ksize = 3; ksize <= 16; ksize += 2)\r
-        {        \r
+        {\r
             SUBTEST << "ksize = " << ksize << ", " << size << 'x' << size << ", 8UC4";\r
-            \r
+\r
             Mat kernel;\r
             gen(kernel, ksize, ksize, CV_32FC1, 0.0, 1.0);\r
 \r
@@ -966,7 +966,7 @@ TEST(pyrDown)
     {\r
         SUBTEST << size << 'x' << size << ", 8UC4";\r
 \r
-        Mat src, dst; \r
+        Mat src, dst;\r
         gen(src, size, size, CV_8UC4, 0, 256);\r
 \r
         pyrDown(src, dst);\r
@@ -992,7 +992,7 @@ TEST(pyrUp)
     {\r
         SUBTEST << size << 'x' << size << ", 8UC4";\r
 \r
-        Mat src, dst; \r
+        Mat src, dst;\r
 \r
         gen(src, size, size, CV_8UC4, 0, 256);\r
 \r
@@ -1055,7 +1055,7 @@ TEST(Canny)
     CPU_ON;\r
     Canny(img, edges, 50.0, 100.0);\r
     CPU_OFF;\r
-    \r
+\r
     gpu::GpuMat d_img(img);\r
     gpu::GpuMat d_edges;\r
     gpu::CannyBuf d_buf;\r
@@ -1176,10 +1176,10 @@ TEST(PyrLKOpticalFlow)
 \r
     Mat frame1 = imread(abspath("rubberwhale2.png"));\r
     if (frame1.empty()) throw runtime_error("can't open rubberwhale2.png");\r
-    \r
+\r
     Mat gray_frame;\r
     cvtColor(frame0, gray_frame, COLOR_BGR2GRAY);\r
-    \r
+\r
     for (int points = 1000; points <= 8000; points *= 2)\r
     {\r
         SUBTEST << points;\r
index 26698ce..bd3b1e6 100644 (file)
@@ -13,21 +13,21 @@ using namespace std;
 using namespace cv;\r
 using namespace cv::gpu;\r
 \r
-void download(const GpuMat& d_mat, vector<Point2f>& vec)\r
+static void download(const GpuMat& d_mat, vector<Point2f>& vec)\r
 {\r
     vec.resize(d_mat.cols);\r
     Mat mat(1, d_mat.cols, CV_32FC2, (void*)&vec[0]);\r
     d_mat.download(mat);\r
 }\r
 \r
-void download(const GpuMat& d_mat, vector<uchar>& vec)\r
+static void download(const GpuMat& d_mat, vector<uchar>& vec)\r
 {\r
     vec.resize(d_mat.cols);\r
     Mat mat(1, d_mat.cols, CV_8UC1, (void*)&vec[0]);\r
     d_mat.download(mat);\r
 }\r
 \r
-void drawArrows(Mat& frame, const vector<Point2f>& prevPts, const vector<Point2f>& nextPts, const vector<uchar>& status, Scalar line_color = Scalar(0, 0, 255))\r
+static void drawArrows(Mat& frame, const vector<Point2f>& prevPts, const vector<Point2f>& nextPts, const vector<uchar>& status, Scalar line_color = Scalar(0, 0, 255))\r
 {\r
     for (size_t i = 0; i < prevPts.size(); ++i)\r
     {\r
@@ -111,7 +111,7 @@ template <typename T> inline T mapValue(T x, T a, T b, T c, T d)
     return c + (d - c) * (x - a) / (b - a);\r
 }\r
 \r
-void getFlowField(const Mat& u, const Mat& v, Mat& flowField)\r
+static void getFlowField(const Mat& u, const Mat& v, Mat& flowField)\r
 {\r
     float maxDisplacement = 1.0f;\r
 \r
index ea6ad37..b6791f6 100644 (file)
@@ -42,7 +42,7 @@ struct App
     void printParams() const;\r
 \r
     void workBegin() { work_begin = getTickCount(); }\r
-    void workEnd() \r
+    void workEnd()\r
     {\r
         int64 d = getTickCount() - work_begin;\r
         double f = getTickFrequency();\r
@@ -61,7 +61,7 @@ private:
     bool running;\r
 \r
     Mat left_src, right_src;\r
-    Mat left, right; \r
+    Mat left, right;\r
     gpu::GpuMat d_left, d_right;\r
 \r
     gpu::StereoBM_GPU bm;\r
@@ -72,7 +72,7 @@ private:
     double work_fps;\r
 };\r
 \r
-void printHelp()\r
+static void printHelp()\r
 {\r
     cout << "Usage: stereo_match_gpu\n"\r
         << "\t--left <left_view> --right <right_view> # must be rectified\n"\r
@@ -119,7 +119,7 @@ Params Params::read(int argc, char** argv)
     {\r
         if (string(argv[i]) == "--left") p.left = argv[++i];\r
         else if (string(argv[i]) == "--right") p.right = argv[++i];\r
-        else if (string(argv[i]) == "--method") \r
+        else if (string(argv[i]) == "--method")\r
         {\r
             if (string(argv[i + 1]) == "BM") p.method = BM;\r
             else if (string(argv[i + 1]) == "BP") p.method = BP;\r
@@ -137,7 +137,7 @@ Params Params::read(int argc, char** argv)
 \r
 \r
 App::App(const Params& p)\r
-    : p(p), running(false) \r
+    : p(p), running(false)\r
 {\r
     cv::gpu::printShortCudaDeviceInfo(cv::gpu::getDevice());\r
 \r
@@ -170,7 +170,7 @@ void App::run()
     imshow("left", left);\r
     imshow("right", right);\r
 \r
-       // Set common parameters\r
+    // Set common parameters\r
     bm.ndisp = p.ndisp;\r
     bp.ndisp = p.ndisp;\r
     csbp.ndisp = p.ndisp;\r
@@ -188,7 +188,7 @@ void App::run()
         workBegin();\r
         switch (p.method)\r
         {\r
-        case Params::BM: \r
+        case Params::BM:\r
             if (d_left.channels() > 1 || d_right.channels() > 1)\r
             {\r
                 cout << "BM doesn't support color images\n";\r
@@ -200,7 +200,7 @@ void App::run()
                 imshow("left", left);\r
                 imshow("right", right);\r
             }\r
-            bm(d_left, d_right, d_disp); \r
+            bm(d_left, d_right, d_disp);\r
             break;\r
         case Params::BP: bp(d_left, d_right, d_disp); break;\r
         case Params::CSBP: csbp(d_left, d_right, d_disp); break;\r
@@ -252,14 +252,14 @@ void App::handleKey(char key)
         break;\r
     case 'p': case 'P':\r
         printParams();\r
-        break;  \r
+        break;\r
     case 'g': case 'G':\r
         if (left.channels() == 1 && p.method != Params::BM)\r
         {\r
             left = left_src;\r
             right = right_src;\r
         }\r
-        else \r
+        else\r
         {\r
             cvtColor(left_src, left, CV_BGR2GRAY);\r
             cvtColor(right_src, right, CV_BGR2GRAY);\r
index b56fa56..605ed6a 100644 (file)
@@ -9,7 +9,7 @@ using namespace std;
 using namespace cv;\r
 using namespace cv::gpu;\r
 \r
-void help()\r
+static void help()\r
 {\r
     cout << "\nThis program demonstrates using SURF_GPU features detector, descriptor extractor and BruteForceMatcher_GPU" << endl;\r
     cout << "\nUsage:\n\tmatcher_simple_gpu --left <image1> --right <image2>" << endl;\r
@@ -52,7 +52,7 @@ int main(int argc, char* argv[])
     GpuMat descriptors1GPU, descriptors2GPU;\r
     surf(img1, GpuMat(), keypoints1GPU, descriptors1GPU);\r
     surf(img2, GpuMat(), keypoints2GPU, descriptors2GPU);\r
-    \r
+\r
     cout << "FOUND " << keypoints1GPU.cols << " keypoints on first image" << endl;\r
     cout << "FOUND " << keypoints2GPU.cols << " keypoints on second image" << endl;\r
 \r
@@ -60,7 +60,7 @@ int main(int argc, char* argv[])
     BFMatcher_GPU matcher(NORM_L2);\r
     GpuMat trainIdx, distance;\r
     matcher.matchSingle(descriptors1GPU, descriptors2GPU, trainIdx, distance);\r
-    \r
+\r
     // downloading results\r
     vector<KeyPoint> keypoints1, keypoints2;\r
     vector<float> descriptors1, descriptors2;\r
@@ -74,7 +74,7 @@ int main(int argc, char* argv[])
     // drawing the results\r
     Mat img_matches;\r
     drawMatches(Mat(img1), keypoints1, Mat(img2), keypoints2, matches, img_matches);\r
-    \r
+\r
     namedWindow("matches", 0);\r
     imshow("matches", img_matches);\r
     waitKey(0);\r