moved nonfree and a part of features2d to opencv_contrib/xfeatures2d
authorVadim Pisarevsky <vadim.pisarevsky@gmail.com>
Mon, 11 Aug 2014 19:26:39 +0000 (23:26 +0400)
committerVadim Pisarevsky <vadim.pisarevsky@gmail.com>
Mon, 11 Aug 2014 19:26:39 +0000 (23:26 +0400)
66 files changed:
doc/CMakeLists.txt
doc/tutorials/features2d/feature_description/feature_description.rst
doc/tutorials/features2d/feature_detection/feature_detection.rst
doc/tutorials/features2d/feature_flann_matcher/feature_flann_matcher.rst
doc/tutorials/features2d/feature_homography/feature_homography.rst
modules/core/doc/basic_structures.rst
modules/features2d/doc/common_interfaces_of_descriptor_extractors.rst
modules/features2d/doc/common_interfaces_of_feature_detectors.rst
modules/features2d/include/opencv2/features2d.hpp
modules/features2d/src/brief.cpp [deleted file]
modules/features2d/src/descriptors.cpp
modules/features2d/src/detectors.cpp
modules/features2d/src/dynamic.cpp
modules/features2d/src/evaluation.cpp
modules/features2d/src/features2d_init.cpp
modules/features2d/src/freak.cpp [deleted file]
modules/features2d/src/generated_16.i [deleted file]
modules/features2d/src/generated_32.i [deleted file]
modules/features2d/src/generated_64.i [deleted file]
modules/features2d/src/matchers.cpp
modules/features2d/src/stardetector.cpp [deleted file]
modules/java/generator/rst_parser.py
modules/java/generator/src/cpp/jni_part.cpp
modules/nonfree/CMakeLists.txt [deleted file]
modules/nonfree/doc/feature_detection.rst [deleted file]
modules/nonfree/doc/nonfree.rst [deleted file]
modules/nonfree/include/opencv2/nonfree.hpp [deleted file]
modules/nonfree/include/opencv2/nonfree/cuda.hpp [deleted file]
modules/nonfree/include/opencv2/nonfree/features2d.hpp [deleted file]
modules/nonfree/include/opencv2/nonfree/nonfree.hpp [deleted file]
modules/nonfree/perf/perf_main.cpp [deleted file]
modules/nonfree/perf/perf_precomp.hpp [deleted file]
modules/nonfree/perf/perf_surf.cpp [deleted file]
modules/nonfree/perf/perf_surf.cuda.cpp [deleted file]
modules/nonfree/perf/perf_surf.ocl.cpp [deleted file]
modules/nonfree/src/cuda/surf.cu [deleted file]
modules/nonfree/src/nonfree_init.cpp [deleted file]
modules/nonfree/src/opencl/surf.cl [deleted file]
modules/nonfree/src/precomp.hpp [deleted file]
modules/nonfree/src/sift.cpp [deleted file]
modules/nonfree/src/surf.cpp [deleted file]
modules/nonfree/src/surf.cuda.cpp [deleted file]
modules/nonfree/src/surf.hpp [deleted file]
modules/nonfree/src/surf.ocl.cpp [deleted file]
modules/nonfree/test/test_detectors.cpp [deleted file]
modules/nonfree/test/test_features2d.cpp [deleted file]
modules/nonfree/test/test_keypoints.cpp [deleted file]
modules/nonfree/test/test_main.cpp [deleted file]
modules/nonfree/test/test_precomp.hpp [deleted file]
modules/nonfree/test/test_rotation_and_scale_invariance.cpp [deleted file]
modules/nonfree/test/test_surf.cuda.cpp [deleted file]
modules/nonfree/test/test_surf.ocl.cpp [deleted file]
modules/python/common.cmake
modules/stitching/CMakeLists.txt
samples/cpp/CMakeLists.txt
samples/cpp/bagofwords_classification.cpp [deleted file]
samples/cpp/descriptor_extractor_matcher.cpp [deleted file]
samples/cpp/shape_transformation.cpp [deleted file]
samples/cpp/tutorial_code/features2D/SURF_FlannMatcher.cpp [deleted file]
samples/cpp/tutorial_code/features2D/SURF_Homography.cpp [deleted file]
samples/cpp/tutorial_code/features2D/SURF_descriptor.cpp [deleted file]
samples/cpp/tutorial_code/features2D/SURF_detector.cpp [deleted file]
samples/cpp/video_homography.cpp [deleted file]
samples/gpu/CMakeLists.txt
samples/tapi/CMakeLists.txt
samples/tapi/surf_matcher.cpp [deleted file]

index 957e69d..7dcc96f 100644 (file)
@@ -33,7 +33,7 @@ if(BUILD_DOCS AND HAVE_SPHINX)
     endif()
   endforeach()
 
-  set(FIXED_ORDER_MODULES core imgproc imgcodecs videoio highgui video calib3d features2d objdetect ml flann photo stitching nonfree contrib legacy)
+  set(FIXED_ORDER_MODULES core imgproc imgcodecs videoio highgui video calib3d features2d objdetect ml flann photo stitching)
 
   list(REMOVE_ITEM BASE_MODULES ${FIXED_ORDER_MODULES})
 
index 967b32d..17dee72 100644 (file)
@@ -23,7 +23,7 @@ Theory
 Code
 ====
 
-This tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/features2D/SURF_descriptor.cpp>`_
+This tutorial code's is shown lines below.
 
 .. code-block:: cpp
 
@@ -32,9 +32,10 @@ This tutorial code's is shown lines below. You can also download it from `here <
    #include "opencv2/core.hpp"
    #include "opencv2/features2d.hpp"
    #include "opencv2/highgui.hpp"
-   #include "opencv2/nonfree.hpp"
+   #include "opencv2/xfeatures2d.hpp"
 
    using namespace cv;
+   using namespace cv::xfeatures2d;
 
    void readme();
 
@@ -50,25 +51,19 @@ This tutorial code's is shown lines below. You can also download it from `here <
      if( !img_1.data || !img_2.data )
       { return -1; }
 
-     //-- Step 1: Detect the keypoints using SURF Detector
+     //-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
      int minHessian = 400;
 
-     SurfFeatureDetector detector( minHessian );
+     Ptr<SURF> detector = SURF::create();
+     detector->setMinHessian(minHessian);
 
      std::vector<KeyPoint> keypoints_1, keypoints_2;
-
-     detector.detect( img_1, keypoints_1 );
-     detector.detect( img_2, keypoints_2 );
-
-     //-- Step 2: Calculate descriptors (feature vectors)
-     SurfDescriptorExtractor extractor;
-
      Mat descriptors_1, descriptors_2;
 
-     extractor.compute( img_1, keypoints_1, descriptors_1 );
-     extractor.compute( img_2, keypoints_2, descriptors_2 );
+     detector->detectAndCompute( img_1, keypoints_1, descriptors_1 );
+     detector->detectAndCompute( img_2, keypoints_2, descriptors_2 );
 
-     //-- Step 3: Matching descriptor vectors with a brute force matcher
+     //-- Step 2: Matching descriptor vectors with a brute force matcher
      BFMatcher matcher(NORM_L2);
      std::vector< DMatch > matches;
      matcher.match( descriptors_1, descriptors_2, matches );
index 7705a13..685a058 100644 (file)
@@ -22,7 +22,7 @@ Theory
 Code
 ====
 
-This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/features2D/SURF_detector.cpp>`_
+This tutorial code's is shown lines below.
 
 .. code-block:: cpp
 
@@ -30,11 +30,11 @@ This tutorial code's is shown lines below. You can also download it from `here <
    #include <iostream>
    #include "opencv2/core.hpp"
    #include "opencv2/features2d.hpp"
-   #include "opencv2/nonfree/features2d.hpp"
+   #include "opencv2/xfeatures2d.hpp"
    #include "opencv2/highgui.hpp"
-   #include "opencv2/nonfree.hpp"
 
    using namespace cv;
+   using namespace cv::xfeatures2d;
 
    void readme();
 
@@ -53,12 +53,12 @@ This tutorial code's is shown lines below. You can also download it from `here <
      //-- Step 1: Detect the keypoints using SURF Detector
      int minHessian = 400;
 
-     SurfFeatureDetector detector( minHessian );
+     Ptr<SURF> detector = SURF::create( minHessian );
 
      std::vector<KeyPoint> keypoints_1, keypoints_2;
 
-     detector.detect( img_1, keypoints_1 );
-     detector.detect( img_2, keypoints_2 );
+     detector->detect( img_1, keypoints_1 );
+     detector->detect( img_2, keypoints_2 );
 
      //-- Draw keypoints
      Mat img_keypoints_1; Mat img_keypoints_2;
index e4630c9..6be334b 100644 (file)
@@ -19,10 +19,116 @@ Theory
 Code
 ====
 
-This tutorial code's is shown lines below. You can also download it from `here <https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/features2D/SURF_FlannMatcher.cpp>`_
+This tutorial code's is shown lines below.
+
+.. code-block:: cpp
+
+    /**
+     * @file SURF_FlannMatcher
+     * @brief SURF detector + descriptor + FLANN Matcher
+     * @author A. Huaman
+     */
+
+    #include <stdio.h>
+    #include <iostream>
+    #include <stdio.h>
+    #include <iostream>
+    #include "opencv2/core.hpp"
+    #include "opencv2/features2d.hpp"
+    #include "opencv2/imgcodecs.hpp"
+    #include "opencv2/highgui.hpp"
+    #include "opencv2/xfeatures2d.hpp"
+
+    using namespace std;
+    using namespace cv;
+    using namespace cv::xfeatures2d;
+
+    void readme();
+
+    /**
+     * @function main
+     * @brief Main function
+     */
+    int main( int argc, char** argv )
+    {
+      if( argc != 3 )
+      { readme(); return -1; }
+
+      Mat img_1 = imread( argv[1], IMREAD_GRAYSCALE );
+      Mat img_2 = imread( argv[2], IMREAD_GRAYSCALE );
+
+      if( !img_1.data || !img_2.data )
+      { std::cout<< " --(!) Error reading images " << std::endl; return -1; }
+
+      //-- Step 1: Detect the keypoints using SURF Detector
+      int minHessian = 400;
+
+      SurfFeatureDetector detector( minHessian );
+
+      std::vector<KeyPoint> keypoints_1, keypoints_2;
+
+      detector.detect( img_1, keypoints_1 );
+      detector.detect( img_2, keypoints_2 );
+
+      //-- Step 2: Calculate descriptors (feature vectors)
+      SurfDescriptorExtractor extractor;
+
+      Mat descriptors_1, descriptors_2;
+
+      extractor.compute( img_1, keypoints_1, descriptors_1 );
+      extractor.compute( img_2, keypoints_2, descriptors_2 );
+
+      //-- Step 3: Matching descriptor vectors using FLANN matcher
+      FlannBasedMatcher matcher;
+      std::vector< DMatch > matches;
+      matcher.match( descriptors_1, descriptors_2, matches );
+
+      double max_dist = 0; double min_dist = 100;
+
+      //-- Quick calculation of max and min distances between keypoints
+      for( int i = 0; i < descriptors_1.rows; i++ )
+      { double dist = matches[i].distance;
+        if( dist < min_dist ) min_dist = dist;
+        if( dist > max_dist ) max_dist = dist;
+      }
+
+      printf("-- Max dist : %f \n", max_dist );
+      printf("-- Min dist : %f \n", min_dist );
+
+      //-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
+      //-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
+      //-- small)
+      //-- PS.- radiusMatch can also be used here.
+      std::vector< DMatch > good_matches;
+
+      for( int i = 0; i < descriptors_1.rows; i++ )
+      { if( matches[i].distance <= max(2*min_dist, 0.02) )
+        { good_matches.push_back( matches[i]); }
+      }
+
+      //-- Draw only "good" matches
+      Mat img_matches;
+      drawMatches( img_1, keypoints_1, img_2, keypoints_2,
+                   good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
+                   vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
+
+      //-- Show detected matches
+      imshow( "Good Matches", img_matches );
+
+      for( int i = 0; i < (int)good_matches.size(); i++ )
+      { printf( "-- Good Match [%d] Keypoint 1: %d  -- Keypoint 2: %d  \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }
+
+      waitKey(0);
+
+      return 0;
+    }
+
+    /**
+     * @function readme
+     */
+    void readme()
+    { std::cout << " Usage: ./SURF_FlannMatcher <img1> <img2>" << std::endl; }
 
-.. literalinclude:: ../../../../samples/cpp/tutorial_code/features2D/SURF_FlannMatcher.cpp
-   :language: cpp
 
 Explanation
 ============
index 3040ed7..91e70c9 100644 (file)
@@ -20,7 +20,7 @@ Theory
 Code
 ====
 
-This tutorial code's is shown lines below. You can also download it from `here <http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/features2D/SURF_Homography.cpp>`_
+This tutorial code's is shown lines below.
 
 .. code-block:: cpp
 
@@ -30,9 +30,10 @@ This tutorial code's is shown lines below. You can also download it from `here <
    #include "opencv2/features2d.hpp"
    #include "opencv2/highgui.hpp"
    #include "opencv2/calib3d.hpp"
-   #include "opencv2/nonfree.hpp"
+   #include "opencv2/xfeatures2d.hpp"
 
    using namespace cv;
+   using namespace cv::xfeatures2d;
 
    void readme();
 
index d08b2d6..8a759bc 100644 (file)
@@ -3050,13 +3050,15 @@ The class provides the following features for all derived classes:
 Here is example of SIFT use in your application via Algorithm interface: ::
 
     #include "opencv2/opencv.hpp"
-    #include "opencv2/nonfree.hpp"
+    #include "opencv2/xfeatures2d.hpp"
+    
+    using namespace cv::xfeatures2d;
 
     ...
 
     initModule_nonfree(); // to load SURF/SIFT etc.
 
-    Ptr<Feature2D> sift = Algorithm::create<Feature2D>("Feature2D.SIFT");
+    Ptr<Feature2D> sift = SIFT::create();
 
     FileStorage fs("sift_params.xml", FileStorage::READ);
     if( fs.isOpened() ) // if we have file with parameters, read them
index 4bb2ba1..e99c672 100644 (file)
@@ -94,28 +94,3 @@ The current implementation supports the following types of a descriptor extracto
 A combined format is also supported: descriptor extractor adapter name ( ``"Opponent"`` --
 :ocv:class:`OpponentColorDescriptorExtractor` ) + descriptor extractor name (see above),
 for example: ``"OpponentSIFT"`` .
-
-
-OpponentColorDescriptorExtractor
---------------------------------
-.. ocv:class:: OpponentColorDescriptorExtractor : public DescriptorExtractor
-
-Class adapting a descriptor extractor to compute descriptors in the Opponent Color Space
-(refer to Van de Sande et al., CGIV 2008 *Color Descriptors for Object Category Recognition*).
-Input RGB image is transformed in the Opponent Color Space. Then, an unadapted descriptor extractor
-(set in the constructor) computes descriptors on each of three channels and concatenates
-them into a single color descriptor. ::
-
-    class OpponentColorDescriptorExtractor : public DescriptorExtractor
-    {
-    public:
-        OpponentColorDescriptorExtractor( const Ptr<DescriptorExtractor>& dextractor );
-
-        virtual void read( const FileNode& );
-        virtual void write( FileStorage& ) const;
-        virtual int descriptorSize() const;
-        virtual int descriptorType() const;
-        virtual int defaultNorm() const;
-    protected:
-        ...
-    };
index 62a9907..4f31dc0 100644 (file)
@@ -73,8 +73,6 @@ The following detector types are supported:
 
 * ``"FAST"`` -- :ocv:class:`FastFeatureDetector`
 * ``"STAR"`` -- :ocv:class:`StarFeatureDetector`
-* ``"SIFT"`` -- :ocv:class:`SIFT` (nonfree module)
-* ``"SURF"`` -- :ocv:class:`SURF` (nonfree module)
 * ``"ORB"`` -- :ocv:class:`ORB`
 * ``"BRISK"`` -- :ocv:class:`BRISK`
 * ``"MSER"`` -- :ocv:class:`MSER`
@@ -164,55 +162,6 @@ Wrapping class for feature detection using the
         ...
     };
 
-
-StarFeatureDetector
--------------------
-.. ocv:class:: StarFeatureDetector : public FeatureDetector
-
-The class implements the keypoint detector introduced by [Agrawal08]_, synonym of ``StarDetector``.  ::
-
-    class StarFeatureDetector : public FeatureDetector
-    {
-    public:
-        StarFeatureDetector( int maxSize=16, int responseThreshold=30,
-                             int lineThresholdProjected = 10,
-                             int lineThresholdBinarized=8, int suppressNonmaxSize=5 );
-        virtual void read( const FileNode& fn );
-        virtual void write( FileStorage& fs ) const;
-    protected:
-        ...
-    };
-
-.. [Agrawal08] Agrawal, M., Konolige, K., & Blas, M. R. (2008). Censure: Center surround extremas for realtime feature detection and matching. In Computer Vision–ECCV 2008 (pp. 102-115). Springer Berlin Heidelberg.
-
-
-DenseFeatureDetector
---------------------
-.. ocv:class:: DenseFeatureDetector : public FeatureDetector
-
-Class for generation of image features which are distributed densely and regularly over the image. ::
-
-        class DenseFeatureDetector : public FeatureDetector
-        {
-        public:
-                DenseFeatureDetector( float initFeatureScale=1.f, int featureScaleLevels=1,
-                              float featureScaleMul=0.1f,
-                              int initXyStep=6, int initImgBound=0,
-                              bool varyXyStepWithScale=true,
-                              bool varyImgBoundWithScale=false );
-        protected:
-        ...
-    };
-
-The detector generates several levels (in the amount of ``featureScaleLevels``) of features. Features of each level are located in the nodes of a regular grid over the image (excluding the image boundary of given size). The level parameters (a feature scale, a node size, a size of boundary) are multiplied by ``featureScaleMul`` with level index growing depending on input flags, viz.:
-
-* Feature scale is multiplied always.
-
-* The grid node size is multiplied if ``varyXyStepWithScale`` is ``true``.
-
-* Size of image boundary is multiplied if ``varyImgBoundWithScale`` is ``true``.
-
-
 SimpleBlobDetector
 -------------------
 .. ocv:class:: SimpleBlobDetector : public FeatureDetector
@@ -277,226 +226,3 @@ This class performs several filtrations of returned blobs. You should set ``filt
 
 
 Default values of parameters are tuned to extract dark circular blobs.
-
-GridAdaptedFeatureDetector
---------------------------
-.. ocv:class:: GridAdaptedFeatureDetector : public FeatureDetector
-
-Class adapting a detector to partition the source image into a grid and detect points in each cell. ::
-
-    class GridAdaptedFeatureDetector : public FeatureDetector
-    {
-    public:
-        /*
-         * detector            Detector that will be adapted.
-         * maxTotalKeypoints   Maximum count of keypoints detected on the image.
-         *                     Only the strongest keypoints will be kept.
-         * gridRows            Grid row count.
-         * gridCols            Grid column count.
-         */
-        GridAdaptedFeatureDetector( const Ptr<FeatureDetector>& detector,
-                                    int maxTotalKeypoints, int gridRows=4,
-                                    int gridCols=4 );
-        virtual void read( const FileNode& fn );
-        virtual void write( FileStorage& fs ) const;
-    protected:
-        ...
-    };
-
-PyramidAdaptedFeatureDetector
------------------------------
-.. ocv:class:: PyramidAdaptedFeatureDetector : public FeatureDetector
-
-Class adapting a detector to detect points over multiple levels of a Gaussian pyramid. Consider using this class for detectors that are not inherently scaled. ::
-
-    class PyramidAdaptedFeatureDetector : public FeatureDetector
-    {
-    public:
-        PyramidAdaptedFeatureDetector( const Ptr<FeatureDetector>& detector,
-                                       int levels=2 );
-        virtual void read( const FileNode& fn );
-        virtual void write( FileStorage& fs ) const;
-    protected:
-        ...
-    };
-
-
-DynamicAdaptedFeatureDetector
------------------------------
-.. ocv:class:: DynamicAdaptedFeatureDetector : public FeatureDetector
-
-Adaptively adjusting detector that iteratively detects features until the desired number is found. ::
-
-       class DynamicAdaptedFeatureDetector: public FeatureDetector
-       {
-       public:
-           DynamicAdaptedFeatureDetector( const Ptr<AdjusterAdapter>& adjuster,
-               int min_features=400, int max_features=500, int max_iters=5 );
-           ...
-       };
-
-If the detector is persisted, it "remembers" the parameters
-used for the last detection. In this case, the detector may be used for consistent numbers
-of keypoints in a set of temporally related images, such as video streams or
-panorama series.
-
-``DynamicAdaptedFeatureDetector``  uses another detector, such as FAST or SURF, to do the dirty work,
-with the help of ``AdjusterAdapter`` .
-If the detected number of features is not large enough,
-``AdjusterAdapter`` adjusts the detection parameters so that the next detection
-results in a bigger or smaller number of features.  This is repeated until either the number of desired features are found
-or the parameters are maxed out.
-
-Adapters can be easily implemented for any detector via the
-``AdjusterAdapter`` interface.
-
-Beware that this is not thread-safe since the adjustment of parameters requires modification of the feature detector class instance.
-
-Example of creating ``DynamicAdaptedFeatureDetector`` : ::
-
-    //sample usage:
-    //will create a detector that attempts to find
-    //100 - 110 FAST Keypoints, and will at most run
-    //FAST feature detection 10 times until that
-    //number of keypoints are found
-    Ptr<FeatureDetector> detector(new DynamicAdaptedFeatureDetector (100, 110, 10,
-                                  new FastAdjuster(20,true)));
-
-
-DynamicAdaptedFeatureDetector::DynamicAdaptedFeatureDetector
-------------------------------------------------------------
-The constructor
-
-.. ocv:function:: DynamicAdaptedFeatureDetector::DynamicAdaptedFeatureDetector( const Ptr<AdjusterAdapter>& adjuster, int min_features=400, int max_features=500, int max_iters=5 )
-
-    :param adjuster:  :ocv:class:`AdjusterAdapter`  that detects features and adjusts parameters.
-
-    :param min_features: Minimum desired number of features.
-
-    :param max_features: Maximum desired number of features.
-
-    :param max_iters: Maximum number of times to try adjusting the feature detector parameters. For :ocv:class:`FastAdjuster` , this number can be high, but with ``Star`` or ``Surf``  many iterations can be time-consuming.  At each iteration the detector is rerun.
-
-AdjusterAdapter
----------------
-.. ocv:class:: AdjusterAdapter : public FeatureDetector
-
-Class providing an interface for adjusting parameters of a feature detector. This interface is used by :ocv:class:`DynamicAdaptedFeatureDetector` . It is a wrapper for :ocv:class:`FeatureDetector` that enables adjusting parameters after feature detection. ::
-
-     class AdjusterAdapter: public FeatureDetector
-     {
-     public:
-        virtual ~AdjusterAdapter() {}
-        virtual void tooFew(int min, int n_detected) = 0;
-        virtual void tooMany(int max, int n_detected) = 0;
-        virtual bool good() const = 0;
-        virtual Ptr<AdjusterAdapter> clone() const = 0;
-        static Ptr<AdjusterAdapter> create( const String& detectorType );
-     };
-
-
-See
-:ocv:class:`FastAdjuster`,
-:ocv:class:`StarAdjuster`, and
-:ocv:class:`SurfAdjuster` for concrete implementations.
-
-AdjusterAdapter::tooFew
----------------------------
-Adjusts the detector parameters to detect more features.
-
-.. ocv:function:: void AdjusterAdapter::tooFew(int min, int n_detected)
-
-    :param min: Minimum desired number of features.
-
-    :param n_detected: Number of features detected during the latest run.
-
-Example: ::
-
-    void FastAdjuster::tooFew(int min, int n_detected)
-    {
-            thresh_--;
-    }
-
-AdjusterAdapter::tooMany
-----------------------------
-Adjusts the detector parameters to detect less features.
-
-.. ocv:function:: void AdjusterAdapter::tooMany(int max, int n_detected)
-
-    :param max: Maximum desired number of features.
-
-    :param n_detected: Number of features detected during the latest run.
-
-Example: ::
-
-    void FastAdjuster::tooMany(int min, int n_detected)
-    {
-            thresh_++;
-    }
-
-
-AdjusterAdapter::good
----------------------
-Returns false if the detector parameters cannot be adjusted any more.
-
-.. ocv:function:: bool AdjusterAdapter::good() const
-
-Example: ::
-
-        bool FastAdjuster::good() const
-        {
-                return (thresh_ > 1) && (thresh_ < 200);
-        }
-
-AdjusterAdapter::create
------------------------
-Creates an adjuster adapter by name
-
-.. ocv:function:: Ptr<AdjusterAdapter> AdjusterAdapter::create( const String& detectorType )
-
-    Creates an adjuster adapter by name ``detectorType``. The detector name is the same as in :ocv:func:`FeatureDetector::create`, but now supports ``"FAST"``, ``"STAR"``, and ``"SURF"`` only.
-
-FastAdjuster
-------------
-.. ocv:class:: FastAdjuster : public AdjusterAdapter
-
-:ocv:class:`AdjusterAdapter` for :ocv:class:`FastFeatureDetector`. This class decreases or increases the threshold value by 1. ::
-
-        class FastAdjuster FastAdjuster: public AdjusterAdapter
-        {
-        public:
-                FastAdjuster(int init_thresh = 20, bool nonmax = true);
-                ...
-        };
-
-StarAdjuster
-------------
-.. ocv:class:: StarAdjuster : public AdjusterAdapter
-
-:ocv:class:`AdjusterAdapter` for :ocv:class:`StarFeatureDetector`. This class adjusts the ``responseThreshhold`` of ``StarFeatureDetector``.  ::
-
-        class StarAdjuster: public AdjusterAdapter
-        {
-                StarAdjuster(double initial_thresh = 30.0);
-                ...
-        };
-
-SurfAdjuster
-------------
-.. ocv:class:: SurfAdjuster : public AdjusterAdapter
-
-:ocv:class:`AdjusterAdapter` for ``SurfFeatureDetector``.  ::
-
-    class CV_EXPORTS SurfAdjuster: public AdjusterAdapter
-    {
-    public:
-        SurfAdjuster( double initial_thresh=400.f, double min_thresh=2, double max_thresh=1000 );
-
-        virtual void tooFew(int minv, int n_detected);
-        virtual void tooMany(int maxv, int n_detected);
-        virtual bool good() const;
-
-        virtual Ptr<AdjusterAdapter> clone() const;
-
-        ...
-    };
index 19fb0fa..5e9c4c4 100644 (file)
@@ -49,7 +49,7 @@
 namespace cv
 {
 
-CV_EXPORTS bool initModule_features2d();
+CV_EXPORTS bool initModule_features2d(void);
 
 // //! writes vector of keypoints to the file storage
 // CV_EXPORTS void write(FileStorage& fs, const String& name, const std::vector<KeyPoint>& keypoints);
@@ -354,107 +354,6 @@ typedef ORB OrbFeatureDetector;
 typedef ORB OrbDescriptorExtractor;
 
 /*!
-  FREAK implementation
-*/
-class CV_EXPORTS FREAK : public DescriptorExtractor
-{
-public:
-    /** Constructor
-         * @param orientationNormalized enable orientation normalization
-         * @param scaleNormalized enable scale normalization
-         * @param patternScale scaling of the description pattern
-         * @param nbOctave number of octaves covered by the detected keypoints
-         * @param selectedPairs (optional) user defined selected pairs
-    */
-    explicit FREAK( bool orientationNormalized = true,
-           bool scaleNormalized = true,
-           float patternScale = 22.0f,
-           int nOctaves = 4,
-           const std::vector<int>& selectedPairs = std::vector<int>());
-    FREAK( const FREAK& rhs );
-    FREAK& operator=( const FREAK& );
-
-    virtual ~FREAK();
-
-    /** returns the descriptor length in bytes */
-    virtual int descriptorSize() const;
-
-    /** returns the descriptor type */
-    virtual int descriptorType() const;
-
-    /** returns the default norm type */
-    virtual int defaultNorm() const;
-
-    /** select the 512 "best description pairs"
-         * @param images grayscale images set
-         * @param keypoints set of detected keypoints
-         * @param corrThresh correlation threshold
-         * @param verbose print construction information
-         * @return list of best pair indexes
-    */
-    std::vector<int> selectPairs( const std::vector<Mat>& images, std::vector<std::vector<KeyPoint> >& keypoints,
-                      const double corrThresh = 0.7, bool verbose = true );
-
-    AlgorithmInfo* info() const;
-
-    enum
-    {
-        NB_SCALES = 64, NB_PAIRS = 512, NB_ORIENPAIRS = 45
-    };
-
-protected:
-    virtual void computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
-    void buildPattern();
-
-    template <typename imgType, typename iiType>
-    imgType meanIntensity( InputArray image, InputArray integral, const float kp_x, const float kp_y,
-                           const unsigned int scale, const unsigned int rot, const unsigned int point ) const;
-
-    template <typename srcMatType, typename iiMatType>
-    void computeDescriptors( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
-
-    template <typename srcMatType>
-    void extractDescriptor(srcMatType *pointsValue, void ** ptr) const;
-
-    bool orientationNormalized; //true if the orientation is normalized, false otherwise
-    bool scaleNormalized; //true if the scale is normalized, false otherwise
-    double patternScale; //scaling of the pattern
-    int nOctaves; //number of octaves
-    bool extAll; // true if all pairs need to be extracted for pairs selection
-
-    double patternScale0;
-    int nOctaves0;
-    std::vector<int> selectedPairs0;
-
-    struct PatternPoint
-    {
-        float x; // x coordinate relative to center
-        float y; // x coordinate relative to center
-        float sigma; // Gaussian smoothing sigma
-    };
-
-    struct DescriptionPair
-    {
-        uchar i; // index of the first point
-        uchar j; // index of the second point
-    };
-
-    struct OrientationPair
-    {
-        uchar i; // index of the first point
-        uchar j; // index of the second point
-        int weight_dx; // dx/(norm_sq))*4096
-        int weight_dy; // dy/(norm_sq))*4096
-    };
-
-    std::vector<PatternPoint> patternLookup; // look-up table for the pattern points (position+sigma of all points at all scales and orientation)
-    int patternSizes[NB_SCALES]; // size of the pattern at a specific scale (used to check if a point is within image boundaries)
-    DescriptionPair descriptionPairs[NB_PAIRS];
-    OrientationPair orientationPairs[NB_ORIENPAIRS];
-};
-
-
-/*!
  Maximal Stable Extremal Regions class.
 
  The class implements MSER algorithm introduced by J. Matas.
@@ -493,36 +392,6 @@ protected:
 
 typedef MSER MserFeatureDetector;
 
-/*!
- The "Star" Detector.
-
- The class implements the keypoint detector introduced by K. Konolige.
-*/
-class CV_EXPORTS_W StarDetector : public FeatureDetector
-{
-public:
-    //! the full constructor
-    CV_WRAP StarDetector(int _maxSize=45, int _responseThreshold=30,
-                 int _lineThresholdProjected=10,
-                 int _lineThresholdBinarized=8,
-                 int _suppressNonmaxSize=5);
-
-    //! finds the keypoints in the image
-    CV_WRAP_AS(detect) void operator()(const Mat& image,
-                CV_OUT std::vector<KeyPoint>& keypoints) const;
-
-    AlgorithmInfo* info() const;
-
-protected:
-    void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
-
-    int maxSize;
-    int responseThreshold;
-    int lineThresholdProjected;
-    int lineThresholdBinarized;
-    int suppressNonmaxSize;
-};
-
 //! detects corners using FAST algorithm by E. Rosten
 CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints,
                       int threshold, bool nonmaxSuppression=true );
@@ -570,7 +439,6 @@ protected:
 };
 
 typedef GFTTDetector GoodFeaturesToTrackDetector;
-typedef StarDetector StarFeatureDetector;
 
 class CV_EXPORTS_W SimpleBlobDetector : public FeatureDetector
 {
@@ -624,277 +492,6 @@ protected:
 };
 
 
-class CV_EXPORTS_W DenseFeatureDetector : public FeatureDetector
-{
-public:
-    CV_WRAP explicit DenseFeatureDetector( float initFeatureScale=1.f, int featureScaleLevels=1,
-                                           float featureScaleMul=0.1f,
-                                           int initXyStep=6, int initImgBound=0,
-                                           bool varyXyStepWithScale=true,
-                                           bool varyImgBoundWithScale=false );
-    AlgorithmInfo* info() const;
-
-protected:
-    virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
-
-    double initFeatureScale;
-    int featureScaleLevels;
-    double featureScaleMul;
-
-    int initXyStep;
-    int initImgBound;
-
-    bool varyXyStepWithScale;
-    bool varyImgBoundWithScale;
-};
-
-/*
- * Adapts a detector to partition the source image into a grid and detect
- * points in each cell.
- */
-class CV_EXPORTS_W GridAdaptedFeatureDetector : public FeatureDetector
-{
-public:
-    /*
-     * detector            Detector that will be adapted.
-     * maxTotalKeypoints   Maximum count of keypoints detected on the image. Only the strongest keypoints
-     *                      will be keeped.
-     * gridRows            Grid rows count.
-     * gridCols            Grid column count.
-     */
-    CV_WRAP GridAdaptedFeatureDetector( const Ptr<FeatureDetector>& detector=Ptr<FeatureDetector>(),
-                                        int maxTotalKeypoints=1000,
-                                        int gridRows=4, int gridCols=4 );
-
-    // TODO implement read/write
-    virtual bool empty() const;
-
-    AlgorithmInfo* info() const;
-
-protected:
-    virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
-
-    Ptr<FeatureDetector> detector;
-    int maxTotalKeypoints;
-    int gridRows;
-    int gridCols;
-};
-
-/*
- * Adapts a detector to detect points over multiple levels of a Gaussian
- * pyramid. Useful for detectors that are not inherently scaled.
- */
-class CV_EXPORTS_W PyramidAdaptedFeatureDetector : public FeatureDetector
-{
-public:
-    // maxLevel - The 0-based index of the last pyramid layer
-    CV_WRAP PyramidAdaptedFeatureDetector( const Ptr<FeatureDetector>& detector, int maxLevel=2 );
-
-    // TODO implement read/write
-    virtual bool empty() const;
-
-protected:
-    virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
-
-    Ptr<FeatureDetector> detector;
-    int maxLevel;
-};
-
-/** \brief A feature detector parameter adjuster, this is used by the DynamicAdaptedFeatureDetector
- *  and is a wrapper for FeatureDetector that allow them to be adjusted after a detection
- */
-class CV_EXPORTS AdjusterAdapter: public FeatureDetector
-{
-public:
-    /** pure virtual interface
-     */
-    virtual ~AdjusterAdapter() {}
-    /** too few features were detected so, adjust the detector params accordingly
-     * \param min the minimum number of desired features
-     * \param n_detected the number previously detected
-     */
-    virtual void tooFew(int min, int n_detected) = 0;
-    /** too many features were detected so, adjust the detector params accordingly
-     * \param max the maximum number of desired features
-     * \param n_detected the number previously detected
-     */
-    virtual void tooMany(int max, int n_detected) = 0;
-    /** are params maxed out or still valid?
-     * \return false if the parameters can't be adjusted any more
-     */
-    virtual bool good() const = 0;
-
-    virtual Ptr<AdjusterAdapter> clone() const = 0;
-
-    static Ptr<AdjusterAdapter> create( const String& detectorType );
-};
-/** \brief an adaptively adjusting detector that iteratively detects until the desired number
- * of features are detected.
- *  Beware that this is not thread safe - as the adjustment of parameters breaks the const
- *  of the detection routine...
- *  /TODO Make this const correct and thread safe
- *
- *  sample usage:
- //will create a detector that attempts to find 100 - 110 FAST Keypoints, and will at most run
- //FAST feature detection 10 times until that number of keypoints are found
- Ptr<FeatureDetector> detector(new DynamicAdaptedFeatureDetector(new FastAdjuster(20,true),100, 110, 10));
-
- */
-class CV_EXPORTS DynamicAdaptedFeatureDetector: public FeatureDetector
-{
-public:
-
-    /** \param adjuster an AdjusterAdapter that will do the detection and parameter adjustment
-     *  \param max_features the maximum desired number of features
-     *  \param max_iters the maximum number of times to try to adjust the feature detector params
-     *          for the FastAdjuster this can be high, but with Star or Surf this can get time consuming
-     *  \param min_features the minimum desired features
-     */
-    DynamicAdaptedFeatureDetector( const Ptr<AdjusterAdapter>& adjuster, int min_features=400, int max_features=500, int max_iters=5 );
-
-    virtual bool empty() const;
-
-protected:
-    virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
-
-private:
-    DynamicAdaptedFeatureDetector& operator=(const DynamicAdaptedFeatureDetector&);
-    DynamicAdaptedFeatureDetector(const DynamicAdaptedFeatureDetector&);
-
-    int escape_iters_;
-    int min_features_, max_features_;
-    const Ptr<AdjusterAdapter> adjuster_;
-};
-
-/**\brief an adjust for the FAST detector. This will basically decrement or increment the
- * threshold by 1
- */
-class CV_EXPORTS FastAdjuster: public AdjusterAdapter
-{
-public:
-    /**\param init_thresh the initial threshold to start with, default = 20
-     * \param nonmax whether to use non max or not for fast feature detection
-     */
-    FastAdjuster(int init_thresh=20, bool nonmax=true, int min_thresh=1, int max_thresh=200);
-
-    virtual void tooFew(int minv, int n_detected);
-    virtual void tooMany(int maxv, int n_detected);
-    virtual bool good() const;
-
-    virtual Ptr<AdjusterAdapter> clone() const;
-
-protected:
-    virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
-
-    int thresh_;
-    bool nonmax_;
-    int init_thresh_, min_thresh_, max_thresh_;
-};
-
-
-/** An adjuster for StarFeatureDetector, this one adjusts the responseThreshold for now
- * TODO find a faster way to converge the parameters for Star - use CvStarDetectorParams
- */
-class CV_EXPORTS StarAdjuster: public AdjusterAdapter
-{
-public:
-    StarAdjuster(double initial_thresh=30.0, double min_thresh=2., double max_thresh=200.);
-
-    virtual void tooFew(int minv, int n_detected);
-    virtual void tooMany(int maxv, int n_detected);
-    virtual bool good() const;
-
-    virtual Ptr<AdjusterAdapter> clone() const;
-
-protected:
-    virtual void detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
-
-    double thresh_, init_thresh_, min_thresh_, max_thresh_;
-};
-
-class CV_EXPORTS SurfAdjuster: public AdjusterAdapter
-{
-public:
-    SurfAdjuster( double initial_thresh=400.f, double min_thresh=2, double max_thresh=1000 );
-
-    virtual void tooFew(int minv, int n_detected);
-    virtual void tooMany(int maxv, int n_detected);
-    virtual bool good() const;
-
-    virtual Ptr<AdjusterAdapter> clone() const;
-
-protected:
-    virtual void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask=noArray() ) const;
-
-    double thresh_, init_thresh_, min_thresh_, max_thresh_;
-};
-
-CV_EXPORTS Mat windowedMatchingMask( const std::vector<KeyPoint>& keypoints1, const std::vector<KeyPoint>& keypoints2,
-                                     float maxDeltaX, float maxDeltaY );
-
-
-
-/*
- * OpponentColorDescriptorExtractor
- *
- * Adapts a descriptor extractor to compute descriptors in Opponent Color Space
- * (refer to van de Sande et al., CGIV 2008 "Color Descriptors for Object Category Recognition").
- * Input RGB image is transformed in Opponent Color Space. Then unadapted descriptor extractor
- * (set in constructor) computes descriptors on each of the three channel and concatenate
- * them into a single color descriptor.
- */
-class CV_EXPORTS OpponentColorDescriptorExtractor : public DescriptorExtractor
-{
-public:
-    OpponentColorDescriptorExtractor( const Ptr<DescriptorExtractor>& descriptorExtractor );
-
-    virtual void read( const FileNode& );
-    virtual void write( FileStorage& ) const;
-
-    virtual int descriptorSize() const;
-    virtual int descriptorType() const;
-    virtual int defaultNorm() const;
-
-    virtual bool empty() const;
-
-protected:
-    virtual void computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
-
-    Ptr<DescriptorExtractor> descriptorExtractor;
-};
-
-/*
- * BRIEF Descriptor
- */
-class CV_EXPORTS BriefDescriptorExtractor : public DescriptorExtractor
-{
-public:
-    static const int PATCH_SIZE = 48;
-    static const int KERNEL_SIZE = 9;
-
-    // bytes is a length of descriptor in bytes. It can be equal 16, 32 or 64 bytes.
-    BriefDescriptorExtractor( int bytes = 32 );
-
-    virtual void read( const FileNode& );
-    virtual void write( FileStorage& ) const;
-
-    virtual int descriptorSize() const;
-    virtual int descriptorType() const;
-    virtual int defaultNorm() const;
-
-    /// @todo read and write for brief
-
-    AlgorithmInfo* info() const;
-
-protected:
-    virtual void computeImpl(InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const;
-
-    typedef void(*PixelTestFn)(InputArray, const std::vector<KeyPoint>&, OutputArray);
-
-    int bytes_;
-    PixelTestFn test_fn_;
-};
-
 // KAZE/AKAZE diffusivity
 enum {
     DIFF_PM_G1 = 0,
@@ -1293,208 +890,6 @@ protected:
     int addedDescCount;
 };
 
-/****************************************************************************************\
-*                                GenericDescriptorMatcher                                *
-\****************************************************************************************/
-/*
- *   Abstract interface for a keypoint descriptor and matcher
- */
-class GenericDescriptorMatcher;
-typedef GenericDescriptorMatcher GenericDescriptorMatch;
-
-class CV_EXPORTS GenericDescriptorMatcher
-{
-public:
-    GenericDescriptorMatcher();
-    virtual ~GenericDescriptorMatcher();
-
-    /*
-     * Add train collection: images and keypoints from them.
-     * images       A set of train images.
-     * ketpoints    Keypoint collection that have been detected on train images.
-     *
-     * Keypoints for which a descriptor cannot be computed are removed. Such keypoints
-     * must be filtered in this method befor adding keypoints to train collection "trainPointCollection".
-     * If inheritor class need perform such prefiltering the method add() must be overloaded.
-     * In the other class methods programmer has access to the train keypoints by a constant link.
-     */
-    virtual void add( InputArrayOfArrays images,
-                      std::vector<std::vector<KeyPoint> >& keypoints );
-
-    const std::vector<Mat>& getTrainImages() const;
-    const std::vector<std::vector<KeyPoint> >& getTrainKeypoints() const;
-
-    /*
-     * Clear images and keypoints storing in train collection.
-     */
-    virtual void clear();
-    /*
-     * Returns true if matcher supports mask to match descriptors.
-     */
-    virtual bool isMaskSupported() = 0;
-    /*
-     * Train some inner structures (e.g. flann index or decision trees).
-     * train() methods is run every time in matching methods. So the method implementation
-     * should has a check whether these inner structures need be trained/retrained or not.
-     */
-    virtual void train();
-
-    /*
-     * Classifies query keypoints.
-     * queryImage    The query image
-     * queryKeypoints   Keypoints from the query image
-     * trainImage    The train image
-     * trainKeypoints   Keypoints from the train image
-     */
-    // Classify keypoints from query image under one train image.
-    void classify( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
-                           InputArray trainImage, std::vector<KeyPoint>& trainKeypoints ) const;
-    // Classify keypoints from query image under train image collection.
-    void classify( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints );
-
-    /*
-     * Group of methods to match keypoints from image pair.
-     * Keypoints for which a descriptor cannot be computed are removed.
-     * train() method is called here.
-     */
-    // Find one best match for each query descriptor (if mask is empty).
-    void match( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
-                InputArray trainImage, std::vector<KeyPoint>& trainKeypoints,
-                std::vector<DMatch>& matches, InputArray mask=noArray() ) const;
-    // Find k best matches for each query keypoint (in increasing order of distances).
-    // compactResult is used when mask is not empty. If compactResult is false matches
-    // vector will have the same size as queryDescriptors rows.
-    // If compactResult is true matches vector will not contain matches for fully masked out query descriptors.
-    void knnMatch( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
-                   InputArray trainImage, std::vector<KeyPoint>& trainKeypoints,
-                   std::vector<std::vector<DMatch> >& matches, int k,
-                   InputArray mask=noArray(), bool compactResult=false ) const;
-    // Find best matches for each query descriptor which have distance less than maxDistance (in increasing order of distances).
-    void radiusMatch( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
-                      InputArray trainImage, std::vector<KeyPoint>& trainKeypoints,
-                      std::vector<std::vector<DMatch> >& matches, float maxDistance,
-                      InputArray mask=noArray(), bool compactResult=false ) const;
-    /*
-     * Group of methods to match keypoints from one image to image set.
-     * See description of similar methods for matching image pair above.
-     */
-    void match( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
-                std::vector<DMatch>& matches, InputArrayOfArrays masks=noArray() );
-    void knnMatch( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
-                   std::vector<std::vector<DMatch> >& matches, int k,
-                   InputArrayOfArrays masks=noArray(), bool compactResult=false );
-    void radiusMatch(InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
-                      std::vector<std::vector<DMatch> >& matches, float maxDistance,
-                      InputArrayOfArrays masks=noArray(), bool compactResult=false );
-
-    // Reads matcher object from a file node
-    virtual void read( const FileNode& fn );
-    // Writes matcher object to a file storage
-    virtual void write( FileStorage& fs ) const;
-
-    // Return true if matching object is empty (e.g. feature detector or descriptor matcher are empty)
-    virtual bool empty() const;
-
-    // Clone the matcher. If emptyTrainData is false the method create deep copy of the object, i.e. copies
-    // both parameters and train data. If emptyTrainData is true the method create object copy with current parameters
-    // but with empty train data.
-    virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const = 0;
-
-    static Ptr<GenericDescriptorMatcher> create( const String& genericDescritptorMatcherType,
-                                                 const String &paramsFilename=String() );
-
-protected:
-    // In fact the matching is implemented only by the following two methods. These methods suppose
-    // that the class object has been trained already. Public match methods call these methods
-    // after calling train().
-    virtual void knnMatchImpl( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
-                               std::vector<std::vector<DMatch> >& matches, int k,
-                               InputArrayOfArrays masks, bool compactResult ) = 0;
-    virtual void radiusMatchImpl( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
-                                  std::vector<std::vector<DMatch> >& matches, float maxDistance,
-                                  InputArrayOfArrays masks, bool compactResult ) = 0;
-    /*
-     * A storage for sets of keypoints together with corresponding images and class IDs
-     */
-    class CV_EXPORTS KeyPointCollection
-    {
-    public:
-        KeyPointCollection();
-        KeyPointCollection( const KeyPointCollection& collection );
-        void add( const std::vector<Mat>& images, const std::vector<std::vector<KeyPoint> >& keypoints );
-        void clear();
-
-        // Returns the total number of keypoints in the collection
-        size_t keypointCount() const;
-        size_t imageCount() const;
-
-        const std::vector<std::vector<KeyPoint> >& getKeypoints() const;
-        const std::vector<KeyPoint>& getKeypoints( int imgIdx ) const;
-        const KeyPoint& getKeyPoint( int imgIdx, int localPointIdx ) const;
-        const KeyPoint& getKeyPoint( int globalPointIdx ) const;
-        void getLocalIdx( int globalPointIdx, int& imgIdx, int& localPointIdx ) const;
-
-        const std::vector<Mat>& getImages() const;
-        const Mat& getImage( int imgIdx ) const;
-
-    protected:
-        int pointCount;
-
-        std::vector<Mat> images;
-        std::vector<std::vector<KeyPoint> > keypoints;
-        // global indices of the first points in each image, startIndices.size() = keypoints.size()
-        std::vector<int> startIndices;
-
-    private:
-        static Mat clone_op( Mat m ) { return m.clone(); }
-    };
-
-    KeyPointCollection trainPointCollection;
-};
-
-
-/****************************************************************************************\
-*                                VectorDescriptorMatcher                                 *
-\****************************************************************************************/
-
-/*
- *  A class used for matching descriptors that can be described as vectors in a finite-dimensional space
- */
-class VectorDescriptorMatcher;
-typedef VectorDescriptorMatcher VectorDescriptorMatch;
-
-class CV_EXPORTS VectorDescriptorMatcher : public GenericDescriptorMatcher
-{
-public:
-    VectorDescriptorMatcher( const Ptr<DescriptorExtractor>& extractor, const Ptr<DescriptorMatcher>& matcher );
-    virtual ~VectorDescriptorMatcher();
-
-    virtual void add( InputArrayOfArrays imgCollection,
-                      std::vector<std::vector<KeyPoint> >& pointCollection );
-
-    virtual void clear();
-
-    virtual void train();
-
-    virtual bool isMaskSupported();
-
-    virtual void read( const FileNode& fn );
-    virtual void write( FileStorage& fs ) const;
-    virtual bool empty() const;
-
-    virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const;
-
-protected:
-    virtual void knnMatchImpl( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
-                               std::vector<std::vector<DMatch> >& matches, int k,
-                               InputArrayOfArrays masks, bool compactResult );
-    virtual void radiusMatchImpl( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
-                                  std::vector<std::vector<DMatch> >& matches, float maxDistance,
-                                  InputArrayOfArrays masks, bool compactResult );
-
-    Ptr<DescriptorExtractor> extractor;
-    Ptr<DescriptorMatcher> matcher;
-};
 
 /****************************************************************************************\
 *                                   Drawing functions                                    *
@@ -1547,13 +942,6 @@ CV_EXPORTS void computeRecallPrecisionCurve( const std::vector<std::vector<DMatc
 CV_EXPORTS float getRecall( const std::vector<Point2f>& recallPrecisionCurve, float l_precision );
 CV_EXPORTS int getNearestPoint( const std::vector<Point2f>& recallPrecisionCurve, float l_precision );
 
-CV_EXPORTS void evaluateGenericDescriptorMatcher( const Mat& img1, const Mat& img2, const Mat& H1to2,
-                                                  std::vector<KeyPoint>& keypoints1, std::vector<KeyPoint>& keypoints2,
-                                                  std::vector<std::vector<DMatch> >* matches1to2, std::vector<std::vector<uchar> >* correctMatches1to2Mask,
-                                                  std::vector<Point2f>& recallPrecisionCurve,
-                                                  const Ptr<GenericDescriptorMatcher>& dmatch=Ptr<GenericDescriptorMatcher>() );
-
-
 /****************************************************************************************\
 *                                     Bag of visual words                                *
 \****************************************************************************************/
diff --git a/modules/features2d/src/brief.cpp b/modules/features2d/src/brief.cpp
deleted file mode 100644 (file)
index 0226ffb..0000000
+++ /dev/null
@@ -1,184 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-//  By downloading, copying, installing or using the software you agree to this license.
-//  If you do not agree to this license, do not download, install,
-//  copy or use the software.
-//
-//
-//                           License Agreement
-//                For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009-2010, Willow Garage Inc., all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-//   * Redistribution's of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//
-//   * Redistribution's in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//
-//   * The name of the copyright holders may not be used to endorse or promote products
-//     derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#include "precomp.hpp"
-#include <algorithm>
-#include <vector>
-
-#include <iostream>
-#include <iomanip>
-
-using namespace cv;
-
-inline int smoothedSum(const Mat& sum, const KeyPoint& pt, int y, int x)
-{
-    static const int HALF_KERNEL = BriefDescriptorExtractor::KERNEL_SIZE / 2;
-
-    int img_y = (int)(pt.pt.y + 0.5) + y;
-    int img_x = (int)(pt.pt.x + 0.5) + x;
-    return   sum.at<int>(img_y + HALF_KERNEL + 1, img_x + HALF_KERNEL + 1)
-           - sum.at<int>(img_y + HALF_KERNEL + 1, img_x - HALF_KERNEL)
-           - sum.at<int>(img_y - HALF_KERNEL, img_x + HALF_KERNEL + 1)
-           + sum.at<int>(img_y - HALF_KERNEL, img_x - HALF_KERNEL);
-}
-
-static void pixelTests16(InputArray _sum, const std::vector<KeyPoint>& keypoints, OutputArray _descriptors)
-{
-    Mat sum = _sum.getMat(), descriptors = _descriptors.getMat();
-    for (int i = 0; i < (int)keypoints.size(); ++i)
-    {
-        uchar* desc = descriptors.ptr(i);
-        const KeyPoint& pt = keypoints[i];
-#include "generated_16.i"
-    }
-}
-
-static void pixelTests32(InputArray _sum, const std::vector<KeyPoint>& keypoints, OutputArray _descriptors)
-{
-    Mat sum = _sum.getMat(), descriptors = _descriptors.getMat();
-    for (int i = 0; i < (int)keypoints.size(); ++i)
-    {
-        uchar* desc = descriptors.ptr(i);
-        const KeyPoint& pt = keypoints[i];
-
-#include "generated_32.i"
-    }
-}
-
-static void pixelTests64(InputArray _sum, const std::vector<KeyPoint>& keypoints, OutputArray _descriptors)
-{
-    Mat sum = _sum.getMat(), descriptors = _descriptors.getMat();
-    for (int i = 0; i < (int)keypoints.size(); ++i)
-    {
-        uchar* desc = descriptors.ptr(i);
-        const KeyPoint& pt = keypoints[i];
-
-#include "generated_64.i"
-    }
-}
-
-namespace cv
-{
-
-BriefDescriptorExtractor::BriefDescriptorExtractor(int bytes) :
-    bytes_(bytes), test_fn_(NULL)
-{
-    switch (bytes)
-    {
-        case 16:
-            test_fn_ = pixelTests16;
-            break;
-        case 32:
-            test_fn_ = pixelTests32;
-            break;
-        case 64:
-            test_fn_ = pixelTests64;
-            break;
-        default:
-            CV_Error(Error::StsBadArg, "bytes must be 16, 32, or 64");
-    }
-}
-
-int BriefDescriptorExtractor::descriptorSize() const
-{
-    return bytes_;
-}
-
-int BriefDescriptorExtractor::descriptorType() const
-{
-    return CV_8UC1;
-}
-
-int BriefDescriptorExtractor::defaultNorm() const
-{
-    return NORM_HAMMING;
-}
-
-void BriefDescriptorExtractor::read( const FileNode& fn)
-{
-    int dSize = fn["descriptorSize"];
-    switch (dSize)
-    {
-        case 16:
-            test_fn_ = pixelTests16;
-            break;
-        case 32:
-            test_fn_ = pixelTests32;
-            break;
-        case 64:
-            test_fn_ = pixelTests64;
-            break;
-        default:
-            CV_Error(Error::StsBadArg, "descriptorSize must be 16, 32, or 64");
-    }
-    bytes_ = dSize;
-}
-
-void BriefDescriptorExtractor::write( FileStorage& fs) const
-{
-    fs << "descriptorSize" << bytes_;
-}
-
-void BriefDescriptorExtractor::computeImpl(InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
-{
-    // Construct integral image for fast smoothing (box filter)
-    Mat sum;
-
-    Mat grayImage = image.getMat();
-    if( image.type() != CV_8U ) cvtColor( image, grayImage, COLOR_BGR2GRAY );
-
-    ///TODO allow the user to pass in a precomputed integral image
-    //if(image.type() == CV_32S)
-    //  sum = image;
-    //else
-
-    integral( grayImage, sum, CV_32S);
-
-    //Remove keypoints very close to the border
-    KeyPointsFilter::runByImageBorder(keypoints, image.size(), PATCH_SIZE/2 + KERNEL_SIZE/2);
-
-    descriptors.create((int)keypoints.size(), bytes_, CV_8U);
-    descriptors.setTo(Scalar::all(0));
-    test_fn_(sum, keypoints, descriptors);
-}
-
-} // namespace cv
index c453190..23d9fbb 100644 (file)
@@ -98,13 +98,6 @@ void DescriptorExtractor::removeBorderKeypoints( std::vector<KeyPoint>& keypoint
 
 Ptr<DescriptorExtractor> DescriptorExtractor::create(const String& descriptorExtractorType)
 {
-    if( descriptorExtractorType.find("Opponent") == 0 )
-    {
-        size_t pos = String("Opponent").size();
-        String type = descriptorExtractorType.substr(pos);
-        return makePtr<OpponentColorDescriptorExtractor>(DescriptorExtractor::create(type));
-    }
-
     return Algorithm::create<DescriptorExtractor>("Feature2D." + descriptorExtractorType);
 }
 
@@ -114,151 +107,4 @@ CV_WRAP void Feature2D::compute( InputArray image, CV_OUT CV_IN_OUT std::vector<
    DescriptorExtractor::compute(image, keypoints, descriptors);
 }
 
-/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-/****************************************************************************************\
-*                             OpponentColorDescriptorExtractor                           *
-\****************************************************************************************/
-OpponentColorDescriptorExtractor::OpponentColorDescriptorExtractor( const Ptr<DescriptorExtractor>& _descriptorExtractor ) :
-        descriptorExtractor(_descriptorExtractor)
-{
-    CV_Assert( descriptorExtractor );
-}
-
-static void convertBGRImageToOpponentColorSpace( const Mat& bgrImage, std::vector<Mat>& opponentChannels )
-{
-    if( bgrImage.type() != CV_8UC3 )
-        CV_Error( Error::StsBadArg, "input image must be an BGR image of type CV_8UC3" );
-
-    // Prepare opponent color space storage matrices.
-    opponentChannels.resize( 3 );
-    opponentChannels[0] = cv::Mat(bgrImage.size(), CV_8UC1); // R-G RED-GREEN
-    opponentChannels[1] = cv::Mat(bgrImage.size(), CV_8UC1); // R+G-2B YELLOW-BLUE
-    opponentChannels[2] = cv::Mat(bgrImage.size(), CV_8UC1); // R+G+B
-
-    for(int y = 0; y < bgrImage.rows; ++y)
-        for(int x = 0; x < bgrImage.cols; ++x)
-        {
-            Vec3b v = bgrImage.at<Vec3b>(y, x);
-            uchar& b = v[0];
-            uchar& g = v[1];
-            uchar& r = v[2];
-
-            opponentChannels[0].at<uchar>(y, x) = saturate_cast<uchar>(0.5f    * (255 + g - r));       // (R - G)/sqrt(2), but converted to the destination data type
-            opponentChannels[1].at<uchar>(y, x) = saturate_cast<uchar>(0.25f   * (510 + r + g - 2*b)); // (R + G - 2B)/sqrt(6), but converted to the destination data type
-            opponentChannels[2].at<uchar>(y, x) = saturate_cast<uchar>(1.f/3.f * (r + g + b));         // (R + G + B)/sqrt(3), but converted to the destination data type
-        }
-}
-
-struct KP_LessThan
-{
-    KP_LessThan(const std::vector<KeyPoint>& _kp) : kp(&_kp) {}
-    bool operator()(int i, int j) const
-    {
-        return (*kp)[i].class_id < (*kp)[j].class_id;
-    }
-    const std::vector<KeyPoint>* kp;
-};
-
-void OpponentColorDescriptorExtractor::computeImpl( InputArray _bgrImage, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const
-{
-    Mat bgrImage = _bgrImage.getMat();
-    std::vector<Mat> opponentChannels;
-    convertBGRImageToOpponentColorSpace( bgrImage, opponentChannels );
-
-    const int N = 3; // channels count
-    std::vector<KeyPoint> channelKeypoints[N];
-    Mat channelDescriptors[N];
-    std::vector<int> idxs[N];
-
-    // Compute descriptors three times, once for each Opponent channel to concatenate into a single color descriptor
-    int maxKeypointsCount = 0;
-    for( int ci = 0; ci < N; ci++ )
-    {
-        channelKeypoints[ci].insert( channelKeypoints[ci].begin(), keypoints.begin(), keypoints.end() );
-        // Use class_id member to get indices into initial keypoints vector
-        for( size_t ki = 0; ki < channelKeypoints[ci].size(); ki++ )
-            channelKeypoints[ci][ki].class_id = (int)ki;
-
-        descriptorExtractor->compute( opponentChannels[ci], channelKeypoints[ci], channelDescriptors[ci] );
-        idxs[ci].resize( channelKeypoints[ci].size() );
-        for( size_t ki = 0; ki < channelKeypoints[ci].size(); ki++ )
-        {
-            idxs[ci][ki] = (int)ki;
-        }
-        std::sort( idxs[ci].begin(), idxs[ci].end(), KP_LessThan(channelKeypoints[ci]) );
-        maxKeypointsCount = std::max( maxKeypointsCount, (int)channelKeypoints[ci].size());
-    }
-
-    std::vector<KeyPoint> outKeypoints;
-    outKeypoints.reserve( keypoints.size() );
-
-    int dSize = descriptorExtractor->descriptorSize();
-    Mat mergedDescriptors( maxKeypointsCount, 3*dSize, descriptorExtractor->descriptorType() );
-    int mergedCount = 0;
-    // cp - current channel position
-    size_t cp[] = {0, 0, 0};
-    while( cp[0] < channelKeypoints[0].size() &&
-           cp[1] < channelKeypoints[1].size() &&
-           cp[2] < channelKeypoints[2].size() )
-    {
-        const int maxInitIdx = std::max( 0, std::max( channelKeypoints[0][idxs[0][cp[0]]].class_id,
-                                                      std::max( channelKeypoints[1][idxs[1][cp[1]]].class_id,
-                                                                channelKeypoints[2][idxs[2][cp[2]]].class_id ) ) );
-
-        while( channelKeypoints[0][idxs[0][cp[0]]].class_id < maxInitIdx && cp[0] < channelKeypoints[0].size() ) { cp[0]++; }
-        while( channelKeypoints[1][idxs[1][cp[1]]].class_id < maxInitIdx && cp[1] < channelKeypoints[1].size() ) { cp[1]++; }
-        while( channelKeypoints[2][idxs[2][cp[2]]].class_id < maxInitIdx && cp[2] < channelKeypoints[2].size() ) { cp[2]++; }
-        if( cp[0] >= channelKeypoints[0].size() || cp[1] >= channelKeypoints[1].size() || cp[2] >= channelKeypoints[2].size() )
-            break;
-
-        if( channelKeypoints[0][idxs[0][cp[0]]].class_id == maxInitIdx &&
-            channelKeypoints[1][idxs[1][cp[1]]].class_id == maxInitIdx &&
-            channelKeypoints[2][idxs[2][cp[2]]].class_id == maxInitIdx )
-        {
-            outKeypoints.push_back( keypoints[maxInitIdx] );
-            // merge descriptors
-            for( int ci = 0; ci < N; ci++ )
-            {
-                Mat dst = mergedDescriptors(Range(mergedCount, mergedCount+1), Range(ci*dSize, (ci+1)*dSize));
-                channelDescriptors[ci].row( idxs[ci][cp[ci]] ).copyTo( dst );
-                cp[ci]++;
-            }
-            mergedCount++;
-        }
-    }
-    mergedDescriptors.rowRange(0, mergedCount).copyTo( descriptors );
-    std::swap( outKeypoints, keypoints );
-}
-
-void OpponentColorDescriptorExtractor::read( const FileNode& fn )
-{
-    descriptorExtractor->read(fn);
-}
-
-void OpponentColorDescriptorExtractor::write( FileStorage& fs ) const
-{
-    descriptorExtractor->write(fs);
-}
-
-int OpponentColorDescriptorExtractor::descriptorSize() const
-{
-    return 3*descriptorExtractor->descriptorSize();
-}
-
-int OpponentColorDescriptorExtractor::descriptorType() const
-{
-    return descriptorExtractor->descriptorType();
-}
-
-int OpponentColorDescriptorExtractor::defaultNorm() const
-{
-    return descriptorExtractor->defaultNorm();
-}
-
-bool OpponentColorDescriptorExtractor::empty() const
-{
-    return !descriptorExtractor || descriptorExtractor->empty();
-}
-
 }
index d3c1f3f..866d24d 100644 (file)
@@ -106,24 +106,6 @@ void FeatureDetector::removeInvalidPoints( const Mat& mask, std::vector<KeyPoint
 
 Ptr<FeatureDetector> FeatureDetector::create( const String& detectorType )
 {
-    if( detectorType.find("Grid") == 0 )
-    {
-        return makePtr<GridAdaptedFeatureDetector>(FeatureDetector::create(
-                                detectorType.substr(strlen("Grid"))));
-    }
-
-    if( detectorType.find("Pyramid") == 0 )
-    {
-        return makePtr<PyramidAdaptedFeatureDetector>(FeatureDetector::create(
-                                detectorType.substr(strlen("Pyramid"))));
-    }
-
-    if( detectorType.find("Dynamic") == 0 )
-    {
-        return makePtr<DynamicAdaptedFeatureDetector>(AdjusterAdapter::create(
-                                detectorType.substr(strlen("Dynamic"))));
-    }
-
     if( detectorType.compare( "HARRIS" ) == 0 )
     {
         Ptr<FeatureDetector> fd = FeatureDetector::create("GFTT");
@@ -176,212 +158,4 @@ void GFTTDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoin
 
 }
 
-////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-/*
- *  DenseFeatureDetector
- */
-DenseFeatureDetector::DenseFeatureDetector( float _initFeatureScale, int _featureScaleLevels,
-                                      float _featureScaleMul, int _initXyStep,
-                                      int _initImgBound, bool _varyXyStepWithScale,
-                                      bool _varyImgBoundWithScale ) :
-    initFeatureScale(_initFeatureScale), featureScaleLevels(_featureScaleLevels),
-    featureScaleMul(_featureScaleMul), initXyStep(_initXyStep), initImgBound(_initImgBound),
-    varyXyStepWithScale(_varyXyStepWithScale), varyImgBoundWithScale(_varyImgBoundWithScale)
-{}
-
-
-void DenseFeatureDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) const
-{
-    Mat image = _image.getMat(), mask = _mask.getMat();
-
-    float curScale = static_cast<float>(initFeatureScale);
-    int curStep = initXyStep;
-    int curBound = initImgBound;
-    for( int curLevel = 0; curLevel < featureScaleLevels; curLevel++ )
-    {
-        for( int x = curBound; x < image.cols - curBound; x += curStep )
-        {
-            for( int y = curBound; y < image.rows - curBound; y += curStep )
-            {
-                keypoints.push_back( KeyPoint(static_cast<float>(x), static_cast<float>(y), curScale) );
-            }
-        }
-
-        curScale = static_cast<float>(curScale * featureScaleMul);
-        if( varyXyStepWithScale ) curStep = static_cast<int>( curStep * featureScaleMul + 0.5f );
-        if( varyImgBoundWithScale ) curBound = static_cast<int>( curBound * featureScaleMul + 0.5f );
-    }
-
-    KeyPointsFilter::runByPixelsMask( keypoints, mask );
-}
-
-/*
- *  GridAdaptedFeatureDetector
- */
-GridAdaptedFeatureDetector::GridAdaptedFeatureDetector( const Ptr<FeatureDetector>& _detector,
-                                                        int _maxTotalKeypoints, int _gridRows, int _gridCols )
-    : detector(_detector), maxTotalKeypoints(_maxTotalKeypoints), gridRows(_gridRows), gridCols(_gridCols)
-{}
-
-bool GridAdaptedFeatureDetector::empty() const
-{
-    return !detector || detector->empty();
-}
-
-struct ResponseComparator
-{
-    bool operator() (const KeyPoint& a, const KeyPoint& b)
-    {
-        return std::abs(a.response) > std::abs(b.response);
-    }
-};
-
-static void keepStrongest( int N, std::vector<KeyPoint>& keypoints )
-{
-    if( (int)keypoints.size() > N )
-    {
-        std::vector<KeyPoint>::iterator nth = keypoints.begin() + N;
-        std::nth_element( keypoints.begin(), nth, keypoints.end(), ResponseComparator() );
-        keypoints.erase( nth, keypoints.end() );
-    }
-}
-
-namespace {
-class GridAdaptedFeatureDetectorInvoker : public ParallelLoopBody
-{
-private:
-    int gridRows_, gridCols_;
-    int maxPerCell_;
-    std::vector<KeyPoint>& keypoints_;
-    const Mat& image_;
-    const Mat& mask_;
-    const Ptr<FeatureDetector>& detector_;
-    Mutex* kptLock_;
-
-    GridAdaptedFeatureDetectorInvoker& operator=(const GridAdaptedFeatureDetectorInvoker&); // to quiet MSVC
-
-public:
-
-    GridAdaptedFeatureDetectorInvoker(const Ptr<FeatureDetector>& detector, const Mat& image, const Mat& mask,
-                                      std::vector<KeyPoint>& keypoints, int maxPerCell, int gridRows, int gridCols,
-                                      cv::Mutex* kptLock)
-        : gridRows_(gridRows), gridCols_(gridCols), maxPerCell_(maxPerCell),
-          keypoints_(keypoints), image_(image), mask_(mask), detector_(detector),
-          kptLock_(kptLock)
-    {
-    }
-
-    void operator() (const Range& range) const
-    {
-        for (int i = range.start; i < range.end; ++i)
-        {
-            int celly = i / gridCols_;
-            int cellx = i - celly * gridCols_;
-
-            Range row_range((celly*image_.rows)/gridRows_, ((celly+1)*image_.rows)/gridRows_);
-            Range col_range((cellx*image_.cols)/gridCols_, ((cellx+1)*image_.cols)/gridCols_);
-
-            Mat sub_image = image_(row_range, col_range);
-            Mat sub_mask;
-            if (!mask_.empty()) sub_mask = mask_(row_range, col_range);
-
-            std::vector<KeyPoint> sub_keypoints;
-            sub_keypoints.reserve(maxPerCell_);
-
-            detector_->detect( sub_image, sub_keypoints, sub_mask );
-            keepStrongest( maxPerCell_, sub_keypoints );
-
-            std::vector<cv::KeyPoint>::iterator it = sub_keypoints.begin(),
-                                                end = sub_keypoints.end();
-            for( ; it != end; ++it )
-            {
-                it->pt.x += col_range.start;
-                it->pt.y += row_range.start;
-            }
-
-            cv::AutoLock join_keypoints(*kptLock_);
-            keypoints_.insert( keypoints_.end(), sub_keypoints.begin(), sub_keypoints.end() );
-        }
-    }
-};
-} // namepace
-
-void GridAdaptedFeatureDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) const
-{
-    if (_image.empty() || maxTotalKeypoints < gridRows * gridCols)
-    {
-        keypoints.clear();
-        return;
-    }
-    keypoints.reserve(maxTotalKeypoints);
-    int maxPerCell = maxTotalKeypoints / (gridRows * gridCols);
-
-    Mat image = _image.getMat(), mask = _mask.getMat();
-
-    cv::Mutex kptLock;
-    cv::parallel_for_(cv::Range(0, gridRows * gridCols),
-        GridAdaptedFeatureDetectorInvoker(detector, image, mask, keypoints, maxPerCell, gridRows, gridCols, &kptLock));
-}
-
-/*
- *  PyramidAdaptedFeatureDetector
- */
-PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector( const Ptr<FeatureDetector>& _detector, int _maxLevel )
-    : detector(_detector), maxLevel(_maxLevel)
-{}
-
-bool PyramidAdaptedFeatureDetector::empty() const
-{
-    return !detector || detector->empty();
-}
-
-void PyramidAdaptedFeatureDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) const
-{
-    Mat image = _image.getMat(), mask = _mask.getMat();
-    Mat src = image;
-    Mat src_mask = mask;
-
-    Mat dilated_mask;
-    if( !mask.empty() )
-    {
-        dilate( mask, dilated_mask, Mat() );
-        Mat mask255( mask.size(), CV_8UC1, Scalar(0) );
-        mask255.setTo( Scalar(255), dilated_mask != 0 );
-        dilated_mask = mask255;
-    }
-
-    for( int l = 0, multiplier = 1; l <= maxLevel; ++l, multiplier *= 2 )
-    {
-        // Detect on current level of the pyramid
-        std::vector<KeyPoint> new_pts;
-        detector->detect( src, new_pts, src_mask );
-        std::vector<KeyPoint>::iterator it = new_pts.begin(),
-                                   end = new_pts.end();
-        for( ; it != end; ++it)
-        {
-            it->pt.x *= multiplier;
-            it->pt.y *= multiplier;
-            it->size *= multiplier;
-            it->octave = l;
-        }
-        keypoints.insert( keypoints.end(), new_pts.begin(), new_pts.end() );
-
-        // Downsample
-        if( l < maxLevel )
-        {
-            Mat dst;
-            pyrDown( src, dst );
-            src = dst;
-
-            if( !mask.empty() )
-                resize( dilated_mask, src_mask, src.size(), 0, 0, INTER_AREA );
-        }
-    }
-
-    if( !mask.empty() )
-        KeyPointsFilter::runByPixelsMask( keypoints, mask );
-}
-
-
 }
index 560005f..1806a27 100644 (file)
 namespace cv
 {
 
-DynamicAdaptedFeatureDetector::DynamicAdaptedFeatureDetector(const Ptr<AdjusterAdapter>& a,
-                                         int min_features, int max_features, int max_iters ) :
-        escape_iters_(max_iters), min_features_(min_features), max_features_(max_features), adjuster_(a)
-{}
-
-bool DynamicAdaptedFeatureDetector::empty() const
-{
-    return !adjuster_ || adjuster_->empty();
-}
-
-void DynamicAdaptedFeatureDetector::detectImpl(InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask) const
-{
-    Mat image = _image.getMat(), mask = _mask.getMat();
-
-    //for oscillation testing
-    bool down = false;
-    bool up = false;
-
-    //flag for whether the correct threshhold has been reached
-    bool thresh_good = false;
-
-    Ptr<AdjusterAdapter> adjuster = adjuster_->clone();
-
-    //break if the desired number hasn't been reached.
-    int iter_count = escape_iters_;
-
-    while( iter_count > 0 && !(down && up) && !thresh_good && adjuster->good() )
-    {
-        keypoints.clear();
-
-        //the adjuster takes care of calling the detector with updated parameters
-        adjuster->detect(image, keypoints,mask);
-
-        if( int(keypoints.size()) < min_features_ )
-        {
-            down = true;
-            adjuster->tooFew(min_features_, (int)keypoints.size());
-        }
-        else if( int(keypoints.size()) > max_features_ )
-        {
-            up = true;
-            adjuster->tooMany(max_features_, (int)keypoints.size());
-        }
-        else
-            thresh_good = true;
-
-        iter_count--;
-    }
-
-}
-
-FastAdjuster::FastAdjuster( int init_thresh, bool nonmax, int min_thresh, int max_thresh ) :
-    thresh_(init_thresh), nonmax_(nonmax), init_thresh_(init_thresh),
-    min_thresh_(min_thresh), max_thresh_(max_thresh)
-{}
-
-void FastAdjuster::detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
-{
-    FastFeatureDetector(thresh_, nonmax_).detect(image, keypoints, mask);
-}
-
-void FastAdjuster::tooFew(int, int)
-{
-    //fast is easy to adjust
-    thresh_--;
-}
-
-void FastAdjuster::tooMany(int, int)
-{
-    //fast is easy to adjust
-    thresh_++;
-}
-
-//return whether or not the threshhold is beyond
-//a useful point
-bool FastAdjuster::good() const
-{
-    return (thresh_ > min_thresh_) && (thresh_ < max_thresh_);
-}
-
-Ptr<AdjusterAdapter> FastAdjuster::clone() const
-{
-    Ptr<AdjusterAdapter> cloned_obj(new FastAdjuster( init_thresh_, nonmax_, min_thresh_, max_thresh_ ));
-    return cloned_obj;
-}
-
-StarAdjuster::StarAdjuster(double initial_thresh, double min_thresh, double max_thresh) :
-    thresh_(initial_thresh), init_thresh_(initial_thresh),
-    min_thresh_(min_thresh), max_thresh_(max_thresh)
-{}
-
-void StarAdjuster::detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
-{
-    StarFeatureDetector detector_tmp(16, cvRound(thresh_), 10, 8, 3);
-    detector_tmp.detect(image, keypoints, mask);
-}
-
-void StarAdjuster::tooFew(int, int)
-{
-    thresh_ *= 0.9;
-    if (thresh_ < 1.1)
-            thresh_ = 1.1;
-}
-
-void StarAdjuster::tooMany(int, int)
-{
-    thresh_ *= 1.1;
-}
-
-bool StarAdjuster::good() const
-{
-    return (thresh_ > min_thresh_) && (thresh_ < max_thresh_);
-}
-
-Ptr<AdjusterAdapter> StarAdjuster::clone() const
-{
-    Ptr<AdjusterAdapter> cloned_obj(new StarAdjuster( init_thresh_, min_thresh_, max_thresh_ ));
-    return cloned_obj;
-}
-
-SurfAdjuster::SurfAdjuster( double initial_thresh, double min_thresh, double max_thresh ) :
-    thresh_(initial_thresh), init_thresh_(initial_thresh),
-    min_thresh_(min_thresh), max_thresh_(max_thresh)
-{}
-
-void SurfAdjuster::detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
-{
-    Ptr<FeatureDetector> surf = FeatureDetector::create("SURF");
-    surf->set("hessianThreshold", thresh_);
-    surf->detect(image, keypoints, mask);
-}
-
-void SurfAdjuster::tooFew(int, int)
-{
-    thresh_ *= 0.9;
-    if (thresh_ < 1.1)
-            thresh_ = 1.1;
-}
-
-void SurfAdjuster::tooMany(int, int)
-{
-    thresh_ *= 1.1;
-}
-
-//return whether or not the threshhold is beyond
-//a useful point
-bool SurfAdjuster::good() const
-{
-    return (thresh_ > min_thresh_) && (thresh_ < max_thresh_);
-}
-
-Ptr<AdjusterAdapter> SurfAdjuster::clone() const
-{
-    Ptr<AdjusterAdapter> cloned_obj(new SurfAdjuster( init_thresh_, min_thresh_, max_thresh_ ));
-    return cloned_obj;
-}
-
-Ptr<AdjusterAdapter> AdjusterAdapter::create( const String& detectorType )
-{
-    Ptr<AdjusterAdapter> adapter;
-
-    if( !detectorType.compare( "FAST" ) )
-    {
-        adapter = makePtr<FastAdjuster>();
-    }
-    else if( !detectorType.compare( "STAR" ) )
-    {
-        adapter = makePtr<StarAdjuster>();
-    }
-    else if( !detectorType.compare( "SURF" ) )
-    {
-        adapter = makePtr<SurfAdjuster>();
-    }
-
-    return adapter;
-}
-
 }
index 2afc57b..3863203 100644 (file)
@@ -556,56 +556,3 @@ int cv::getNearestPoint( const std::vector<Point2f>& recallPrecisionCurve, float
 
     return nearestPointIndex;
 }
-
-void cv::evaluateGenericDescriptorMatcher( const Mat& img1, const Mat& img2, const Mat& H1to2,
-                                           std::vector<KeyPoint>& keypoints1, std::vector<KeyPoint>& keypoints2,
-                                           std::vector<std::vector<DMatch> >* _matches1to2, std::vector<std::vector<uchar> >* _correctMatches1to2Mask,
-                                           std::vector<Point2f>& recallPrecisionCurve,
-                                           const Ptr<GenericDescriptorMatcher>& _dmatcher )
-{
-    Ptr<GenericDescriptorMatcher> dmatcher = _dmatcher;
-    dmatcher->clear();
-
-    std::vector<std::vector<DMatch> > *matches1to2, buf1;
-    matches1to2 = _matches1to2 != 0 ? _matches1to2 : &buf1;
-
-    std::vector<std::vector<uchar> > *correctMatches1to2Mask, buf2;
-    correctMatches1to2Mask = _correctMatches1to2Mask != 0 ? _correctMatches1to2Mask : &buf2;
-
-    if( keypoints1.empty() )
-        CV_Error( Error::StsBadArg, "keypoints1 must not be empty" );
-
-    if( matches1to2->empty() && !dmatcher )
-        CV_Error( Error::StsBadArg, "dmatch must not be empty when matches1to2 is empty" );
-
-    bool computeKeypoints2ByPrj = keypoints2.empty();
-    if( computeKeypoints2ByPrj )
-    {
-        CV_Error(Error::StsNotImplemented, "");
-        // TODO: add computing keypoints2 from keypoints1 using H1to2
-    }
-
-    if( matches1to2->empty() || computeKeypoints2ByPrj )
-    {
-        dmatcher->clear();
-        dmatcher->radiusMatch( img1, keypoints1, img2, keypoints2, *matches1to2, std::numeric_limits<float>::max() );
-    }
-    float repeatability;
-    int correspCount;
-    Mat thresholdedOverlapMask; // thresholded allOverlapErrors
-    calculateRepeatability( img1, img2, H1to2, keypoints1, keypoints2, repeatability, correspCount, &thresholdedOverlapMask );
-
-    correctMatches1to2Mask->resize(matches1to2->size());
-    for( size_t i = 0; i < matches1to2->size(); i++ )
-    {
-        (*correctMatches1to2Mask)[i].resize((*matches1to2)[i].size());
-        for( size_t j = 0;j < (*matches1to2)[i].size(); j++ )
-        {
-            int indexQuery = (*matches1to2)[i][j].queryIdx;
-            int indexTrain = (*matches1to2)[i][j].trainIdx;
-            (*correctMatches1to2Mask)[i][j] = thresholdedOverlapMask.at<uchar>( indexQuery, indexTrain );
-        }
-    }
-
-    computeRecallPrecisionCurve( *matches1to2, *correctMatches1to2Mask, recallPrecisionCurve );
-}
index eb71456..470cb3c 100644 (file)
@@ -62,24 +62,11 @@ CV_INIT_ALGORITHM(BRISK, "Feature2D.BRISK",
 
 ///////////////////////////////////////////////////////////////////////////////////////////////////////////
 
-CV_INIT_ALGORITHM(BriefDescriptorExtractor, "Feature2D.BRIEF",
-                  obj.info()->addParam(obj, "bytes", obj.bytes_))
-
-///////////////////////////////////////////////////////////////////////////////////////////////////////////
-
 CV_INIT_ALGORITHM(FastFeatureDetector, "Feature2D.FAST",
                   obj.info()->addParam(obj, "threshold", obj.threshold);
                   obj.info()->addParam(obj, "nonmaxSuppression", obj.nonmaxSuppression);
                   obj.info()->addParam(obj, "type", obj.type))
 
-///////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-CV_INIT_ALGORITHM(StarDetector, "Feature2D.STAR",
-                  obj.info()->addParam(obj, "maxSize", obj.maxSize);
-                  obj.info()->addParam(obj, "responseThreshold", obj.responseThreshold);
-                  obj.info()->addParam(obj, "lineThresholdProjected", obj.lineThresholdProjected);
-                  obj.info()->addParam(obj, "lineThresholdBinarized", obj.lineThresholdBinarized);
-                  obj.info()->addParam(obj, "suppressNonmaxSize", obj.suppressNonmaxSize))
 
 ///////////////////////////////////////////////////////////////////////////////////////////////////////////
 
@@ -108,14 +95,6 @@ CV_INIT_ALGORITHM(ORB, "Feature2D.ORB",
 
 ///////////////////////////////////////////////////////////////////////////////////////////////////////////
 
-CV_INIT_ALGORITHM(FREAK, "Feature2D.FREAK",
-                  obj.info()->addParam(obj, "orientationNormalized", obj.orientationNormalized);
-                  obj.info()->addParam(obj, "scaleNormalized", obj.scaleNormalized);
-                  obj.info()->addParam(obj, "patternScale", obj.patternScale);
-                  obj.info()->addParam(obj, "nbOctave", obj.nOctaves))
-
-///////////////////////////////////////////////////////////////////////////////////////////////////////////
-
 CV_INIT_ALGORITHM(GFTTDetector, "Feature2D.GFTT",
                   obj.info()->addParam(obj, "nfeatures", obj.nfeatures);
                   obj.info()->addParam(obj, "qualityLevel", obj.qualityLevel);
@@ -181,23 +160,6 @@ CV_INIT_ALGORITHM(HarrisDetector, "Feature2D.HARRIS",
 
 ////////////////////////////////////////////////////////////////////////////////////////////////////////////
 
-CV_INIT_ALGORITHM(DenseFeatureDetector, "Feature2D.Dense",
-                  obj.info()->addParam(obj, "initFeatureScale", obj.initFeatureScale);
-                  obj.info()->addParam(obj, "featureScaleLevels", obj.featureScaleLevels);
-                  obj.info()->addParam(obj, "featureScaleMul", obj.featureScaleMul);
-                  obj.info()->addParam(obj, "initXyStep", obj.initXyStep);
-                  obj.info()->addParam(obj, "initImgBound", obj.initImgBound);
-                  obj.info()->addParam(obj, "varyXyStepWithScale", obj.varyXyStepWithScale);
-                  obj.info()->addParam(obj, "varyImgBoundWithScale", obj.varyImgBoundWithScale))
-
-CV_INIT_ALGORITHM(GridAdaptedFeatureDetector, "Feature2D.Grid",
-                  obj.info()->addParam<FeatureDetector>(obj, "detector", obj.detector, false, 0, 0); // Extra params added to avoid VS2013 fatal error in opencv2/core.hpp (decl. of addParam)
-                  obj.info()->addParam(obj, "maxTotalKeypoints", obj.maxTotalKeypoints);
-                  obj.info()->addParam(obj, "gridRows", obj.gridRows);
-                  obj.info()->addParam(obj, "gridCols", obj.gridCols))
-
-////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
 CV_INIT_ALGORITHM(BFMatcher, "DescriptorMatcher.BFMatcher",
                   obj.info()->addParam(obj, "normType", obj.normType);
                   obj.info()->addParam(obj, "crossCheck", obj.crossCheck))
@@ -209,19 +171,14 @@ CV_INIT_ALGORITHM(FlannBasedMatcher, "DescriptorMatcher.FlannBasedMatcher",)
 bool cv::initModule_features2d(void)
 {
     bool all = true;
-    all &= !BriefDescriptorExtractor_info_auto.name().empty();
     all &= !BRISK_info_auto.name().empty();
     all &= !FastFeatureDetector_info_auto.name().empty();
-    all &= !StarDetector_info_auto.name().empty();
     all &= !MSER_info_auto.name().empty();
-    all &= !FREAK_info_auto.name().empty();
     all &= !ORB_info_auto.name().empty();
     all &= !GFTTDetector_info_auto.name().empty();
     all &= !KAZE_info_auto.name().empty();
     all &= !AKAZE_info_auto.name().empty();
-        all &= !HarrisDetector_info_auto.name().empty();
-    all &= !DenseFeatureDetector_info_auto.name().empty();
-    all &= !GridAdaptedFeatureDetector_info_auto.name().empty();
+    all &= !HarrisDetector_info_auto.name().empty();
     all &= !BFMatcher_info_auto.name().empty();
     all &= !FlannBasedMatcher_info_auto.name().empty();
 
diff --git a/modules/features2d/src/freak.cpp b/modules/features2d/src/freak.cpp
deleted file mode 100644 (file)
index 58c1fe1..0000000
+++ /dev/null
@@ -1,733 +0,0 @@
-//  freak.cpp
-//
-//  Copyright (C) 2011-2012  Signal processing laboratory 2, EPFL,
-//  Kirell Benzi (kirell.benzi@epfl.ch),
-//  Raphael Ortiz (raphael.ortiz@a3.epfl.ch)
-//  Alexandre Alahi (alexandre.alahi@epfl.ch)
-//  and Pierre Vandergheynst (pierre.vandergheynst@epfl.ch)
-//
-//  Redistribution and use in source and binary forms, with or without modification,
-//  are permitted provided that the following conditions are met:
-//
-//   * Redistribution's of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//
-//   * Redistribution's in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//
-//   * The name of the copyright holders may not be used to endorse or promote products
-//     derived from this software without specific prior written permission.
-//
-//  This software is provided by the copyright holders and contributors "as is" and
-//  any express or implied warranties, including, but not limited to, the implied
-//  warranties of merchantability and fitness for a particular purpose are disclaimed.
-//  In no event shall the Intel Corporation or contributors be liable for any direct,
-//  indirect, incidental, special, exemplary, or consequential damages
-//  (including, but not limited to, procurement of substitute goods or services;
-//  loss of use, data, or profits; or business interruption) however caused
-//  and on any theory of liability, whether in contract, strict liability,
-//  or tort (including negligence or otherwise) arising in any way out of
-//  the use of this software, even if advised of the possibility of such damage.
-
-#include "precomp.hpp"
-#include <fstream>
-#include <stdlib.h>
-#include <algorithm>
-#include <iostream>
-#include <bitset>
-#include <sstream>
-#include <algorithm>
-#include <iomanip>
-#include <string.h>
-
-namespace cv
-{
-
-static const double FREAK_SQRT2 = 1.4142135623731;
-static const double FREAK_LOG2 = 0.693147180559945;
-static const int FREAK_NB_ORIENTATION = 256;
-static const int FREAK_NB_POINTS = 43;
-static const int FREAK_SMALLEST_KP_SIZE = 7; // smallest size of keypoints
-static const int FREAK_NB_SCALES = FREAK::NB_SCALES;
-static const int FREAK_NB_PAIRS = FREAK::NB_PAIRS;
-static const int FREAK_NB_ORIENPAIRS = FREAK::NB_ORIENPAIRS;
-
-// default pairs
-static const int FREAK_DEF_PAIRS[FREAK::NB_PAIRS] =
-{
-     404,431,818,511,181,52,311,874,774,543,719,230,417,205,11,
-     560,149,265,39,306,165,857,250,8,61,15,55,717,44,412,
-     592,134,761,695,660,782,625,487,549,516,271,665,762,392,178,
-     796,773,31,672,845,548,794,677,654,241,831,225,238,849,83,
-     691,484,826,707,122,517,583,731,328,339,571,475,394,472,580,
-     381,137,93,380,327,619,729,808,218,213,459,141,806,341,95,
-     382,568,124,750,193,749,706,843,79,199,317,329,768,198,100,
-     466,613,78,562,783,689,136,838,94,142,164,679,219,419,366,
-     418,423,77,89,523,259,683,312,555,20,470,684,123,458,453,833,
-     72,113,253,108,313,25,153,648,411,607,618,128,305,232,301,84,
-     56,264,371,46,407,360,38,99,176,710,114,578,66,372,653,
-     129,359,424,159,821,10,323,393,5,340,891,9,790,47,0,175,346,
-     236,26,172,147,574,561,32,294,429,724,755,398,787,288,299,
-     769,565,767,722,757,224,465,723,498,467,235,127,802,446,233,
-     544,482,800,318,16,532,801,441,554,173,60,530,713,469,30,
-     212,630,899,170,266,799,88,49,512,399,23,500,107,524,90,
-     194,143,135,192,206,345,148,71,119,101,563,870,158,254,214,
-     276,464,332,725,188,385,24,476,40,231,620,171,258,67,109,
-     844,244,187,388,701,690,50,7,850,479,48,522,22,154,12,659,
-     736,655,577,737,830,811,174,21,237,335,353,234,53,270,62,
-     182,45,177,245,812,673,355,556,612,166,204,54,248,365,226,
-     242,452,700,685,573,14,842,481,468,781,564,416,179,405,35,
-     819,608,624,367,98,643,448,2,460,676,440,240,130,146,184,
-     185,430,65,807,377,82,121,708,239,310,138,596,730,575,477,
-     851,797,247,27,85,586,307,779,326,494,856,324,827,96,748,
-     13,397,125,688,702,92,293,716,277,140,112,4,80,855,839,1,
-     413,347,584,493,289,696,19,751,379,76,73,115,6,590,183,734,
-     197,483,217,344,330,400,186,243,587,220,780,200,793,246,824,
-     41,735,579,81,703,322,760,720,139,480,490,91,814,813,163,
-     152,488,763,263,425,410,576,120,319,668,150,160,302,491,515,
-     260,145,428,97,251,395,272,252,18,106,358,854,485,144,550,
-     131,133,378,68,102,104,58,361,275,209,697,582,338,742,589,
-     325,408,229,28,304,191,189,110,126,486,211,547,533,70,215,
-     670,249,36,581,389,605,331,518,442,822
-};
-
-// used to sort pairs during pairs selection
-struct PairStat
-{
-    double mean;
-    int idx;
-};
-
-struct sortMean
-{
-    bool operator()( const PairStat& a, const PairStat& b ) const
-    {
-        return a.mean < b.mean;
-    }
-};
-
-void FREAK::buildPattern()
-{
-    if( patternScale == patternScale0 && nOctaves == nOctaves0 && !patternLookup.empty() )
-        return;
-
-    nOctaves0 = nOctaves;
-    patternScale0 = patternScale;
-
-    patternLookup.resize(FREAK_NB_SCALES*FREAK_NB_ORIENTATION*FREAK_NB_POINTS);
-    double scaleStep = std::pow(2.0, (double)(nOctaves)/FREAK_NB_SCALES ); // 2 ^ ( (nOctaves-1) /nbScales)
-    double scalingFactor, alpha, beta, theta = 0;
-
-    // pattern definition, radius normalized to 1.0 (outer point position+sigma=1.0)
-    const int n[8] = {6,6,6,6,6,6,6,1}; // number of points on each concentric circle (from outer to inner)
-    const double bigR(2.0/3.0); // bigger radius
-    const double smallR(2.0/24.0); // smaller radius
-    const double unitSpace( (bigR-smallR)/21.0 ); // define spaces between concentric circles (from center to outer: 1,2,3,4,5,6)
-    // radii of the concentric cirles (from outer to inner)
-    const double radius[8] = {bigR, bigR-6*unitSpace, bigR-11*unitSpace, bigR-15*unitSpace, bigR-18*unitSpace, bigR-20*unitSpace, smallR, 0.0};
-    // sigma of pattern points (each group of 6 points on a concentric cirle has the same sigma)
-    const double sigma[8] = {radius[0]/2.0, radius[1]/2.0, radius[2]/2.0,
-                             radius[3]/2.0, radius[4]/2.0, radius[5]/2.0,
-                             radius[6]/2.0, radius[6]/2.0
-                            };
-    // fill the lookup table
-    for( int scaleIdx=0; scaleIdx < FREAK_NB_SCALES; ++scaleIdx )
-    {
-        patternSizes[scaleIdx] = 0; // proper initialization
-        scalingFactor = std::pow(scaleStep,scaleIdx); //scale of the pattern, scaleStep ^ scaleIdx
-
-        for( int orientationIdx = 0; orientationIdx < FREAK_NB_ORIENTATION; ++orientationIdx )
-        {
-            theta = double(orientationIdx)* 2*CV_PI/double(FREAK_NB_ORIENTATION); // orientation of the pattern
-            int pointIdx = 0;
-
-            PatternPoint* patternLookupPtr = &patternLookup[0];
-            for( size_t i = 0; i < 8; ++i )
-            {
-                for( int k = 0 ; k < n[i]; ++k )
-                {
-                    beta = CV_PI/n[i] * (i%2); // orientation offset so that groups of points on each circles are staggered
-                    alpha = double(k)* 2*CV_PI/double(n[i])+beta+theta;
-
-                    // add the point to the look-up table
-                    PatternPoint& point = patternLookupPtr[ scaleIdx*FREAK_NB_ORIENTATION*FREAK_NB_POINTS+orientationIdx*FREAK_NB_POINTS+pointIdx ];
-                    point.x = static_cast<float>(radius[i] * cos(alpha) * scalingFactor * patternScale);
-                    point.y = static_cast<float>(radius[i] * sin(alpha) * scalingFactor * patternScale);
-                    point.sigma = static_cast<float>(sigma[i] * scalingFactor * patternScale);
-
-                    // adapt the sizeList if necessary
-                    const int sizeMax = static_cast<int>(ceil((radius[i]+sigma[i])*scalingFactor*patternScale)) + 1;
-                    if( patternSizes[scaleIdx] < sizeMax )
-                        patternSizes[scaleIdx] = sizeMax;
-
-                    ++pointIdx;
-                }
-            }
-        }
-    }
-
-    // build the list of orientation pairs
-    orientationPairs[0].i=0; orientationPairs[0].j=3; orientationPairs[1].i=1; orientationPairs[1].j=4; orientationPairs[2].i=2; orientationPairs[2].j=5;
-    orientationPairs[3].i=0; orientationPairs[3].j=2; orientationPairs[4].i=1; orientationPairs[4].j=3; orientationPairs[5].i=2; orientationPairs[5].j=4;
-    orientationPairs[6].i=3; orientationPairs[6].j=5; orientationPairs[7].i=4; orientationPairs[7].j=0; orientationPairs[8].i=5; orientationPairs[8].j=1;
-
-    orientationPairs[9].i=6; orientationPairs[9].j=9; orientationPairs[10].i=7; orientationPairs[10].j=10; orientationPairs[11].i=8; orientationPairs[11].j=11;
-    orientationPairs[12].i=6; orientationPairs[12].j=8; orientationPairs[13].i=7; orientationPairs[13].j=9; orientationPairs[14].i=8; orientationPairs[14].j=10;
-    orientationPairs[15].i=9; orientationPairs[15].j=11; orientationPairs[16].i=10; orientationPairs[16].j=6; orientationPairs[17].i=11; orientationPairs[17].j=7;
-
-    orientationPairs[18].i=12; orientationPairs[18].j=15; orientationPairs[19].i=13; orientationPairs[19].j=16; orientationPairs[20].i=14; orientationPairs[20].j=17;
-    orientationPairs[21].i=12; orientationPairs[21].j=14; orientationPairs[22].i=13; orientationPairs[22].j=15; orientationPairs[23].i=14; orientationPairs[23].j=16;
-    orientationPairs[24].i=15; orientationPairs[24].j=17; orientationPairs[25].i=16; orientationPairs[25].j=12; orientationPairs[26].i=17; orientationPairs[26].j=13;
-
-    orientationPairs[27].i=18; orientationPairs[27].j=21; orientationPairs[28].i=19; orientationPairs[28].j=22; orientationPairs[29].i=20; orientationPairs[29].j=23;
-    orientationPairs[30].i=18; orientationPairs[30].j=20; orientationPairs[31].i=19; orientationPairs[31].j=21; orientationPairs[32].i=20; orientationPairs[32].j=22;
-    orientationPairs[33].i=21; orientationPairs[33].j=23; orientationPairs[34].i=22; orientationPairs[34].j=18; orientationPairs[35].i=23; orientationPairs[35].j=19;
-
-    orientationPairs[36].i=24; orientationPairs[36].j=27; orientationPairs[37].i=25; orientationPairs[37].j=28; orientationPairs[38].i=26; orientationPairs[38].j=29;
-    orientationPairs[39].i=30; orientationPairs[39].j=33; orientationPairs[40].i=31; orientationPairs[40].j=34; orientationPairs[41].i=32; orientationPairs[41].j=35;
-    orientationPairs[42].i=36; orientationPairs[42].j=39; orientationPairs[43].i=37; orientationPairs[43].j=40; orientationPairs[44].i=38; orientationPairs[44].j=41;
-
-    for( unsigned m = FREAK_NB_ORIENPAIRS; m--; )
-    {
-        const float dx = patternLookup[orientationPairs[m].i].x-patternLookup[orientationPairs[m].j].x;
-        const float dy = patternLookup[orientationPairs[m].i].y-patternLookup[orientationPairs[m].j].y;
-        const float norm_sq = (dx*dx+dy*dy);
-        orientationPairs[m].weight_dx = int((dx/(norm_sq))*4096.0+0.5);
-        orientationPairs[m].weight_dy = int((dy/(norm_sq))*4096.0+0.5);
-    }
-
-    // build the list of description pairs
-    std::vector<DescriptionPair> allPairs;
-    for( unsigned int i = 1; i < (unsigned int)FREAK_NB_POINTS; ++i )
-    {
-        // (generate all the pairs)
-        for( unsigned int j = 0; (unsigned int)j < i; ++j )
-        {
-            DescriptionPair pair = {(uchar)i,(uchar)j};
-            allPairs.push_back(pair);
-        }
-    }
-    // Input vector provided
-    if( !selectedPairs0.empty() )
-    {
-        if( (int)selectedPairs0.size() == FREAK_NB_PAIRS )
-        {
-            for( int i = 0; i < FREAK_NB_PAIRS; ++i )
-                 descriptionPairs[i] = allPairs[selectedPairs0.at(i)];
-        }
-        else
-        {
-            CV_Error(Error::StsVecLengthErr, "Input vector does not match the required size");
-        }
-    }
-    else // default selected pairs
-    {
-        for( int i = 0; i < FREAK_NB_PAIRS; ++i )
-             descriptionPairs[i] = allPairs[FREAK_DEF_PAIRS[i]];
-    }
-}
-
-void FREAK::computeImpl( InputArray _image, std::vector<KeyPoint>& keypoints, OutputArray _descriptors ) const
-{
-    Mat image = _image.getMat();
-    if( image.empty() )
-        return;
-    if( keypoints.empty() )
-        return;
-
-    ((FREAK*)this)->buildPattern();
-
-    // Convert to gray if not already
-    Mat grayImage = image;
-//    if( image.channels() > 1 )
-//        cvtColor( image, grayImage, COLOR_BGR2GRAY );
-
-    // Use 32-bit integers if we won't overflow in the integral image
-    if ((image.depth() == CV_8U || image.depth() == CV_8S) &&
-        (image.rows * image.cols) < 8388608 ) // 8388608 = 2 ^ (32 - 8(bit depth) - 1(sign bit))
-    {
-        // Create the integral image appropriate for our type & usage
-        if (image.depth() == CV_8U)
-            computeDescriptors<uchar, int>(grayImage, keypoints, _descriptors);
-        else if (image.depth() == CV_8S)
-            computeDescriptors<char, int>(grayImage, keypoints, _descriptors);
-        else
-            CV_Error( Error::StsUnsupportedFormat, "" );
-    } else {
-        // Create the integral image appropriate for our type & usage
-        if ( image.depth() == CV_8U )
-            computeDescriptors<uchar, double>(grayImage, keypoints, _descriptors);
-        else if ( image.depth() == CV_8S )
-            computeDescriptors<char, double>(grayImage, keypoints, _descriptors);
-        else if ( image.depth() == CV_16U )
-            computeDescriptors<ushort, double>(grayImage, keypoints, _descriptors);
-        else if ( image.depth() == CV_16S )
-            computeDescriptors<short, double>(grayImage, keypoints, _descriptors);
-        else
-            CV_Error( Error::StsUnsupportedFormat, "" );
-    }
-}
-
-template <typename srcMatType>
-void FREAK::extractDescriptor(srcMatType *pointsValue, void ** ptr) const
-{
-    std::bitset<FREAK_NB_PAIRS>** ptrScalar = (std::bitset<FREAK_NB_PAIRS>**) ptr;
-
-    // extracting descriptor preserving the order of SSE version
-    int cnt = 0;
-    for( int n = 7; n < FREAK_NB_PAIRS; n += 128)
-    {
-        for( int m = 8; m--; )
-        {
-            int nm = n-m;
-            for(int kk = nm+15*8; kk >= nm; kk-=8, ++cnt)
-            {
-                (*ptrScalar)->set(kk, pointsValue[descriptionPairs[cnt].i] >= pointsValue[descriptionPairs[cnt].j]);
-            }
-        }
-    }
-    --(*ptrScalar);
-}
-
-#if CV_SSE2
-template <>
-void FREAK::extractDescriptor(uchar *pointsValue, void ** ptr) const
-{
-    __m128i** ptrSSE = (__m128i**) ptr;
-
-    // note that comparisons order is modified in each block (but first 128 comparisons remain globally the same-->does not affect the 128,384 bits segmanted matching strategy)
-    int cnt = 0;
-    for( int n = FREAK_NB_PAIRS/128; n-- ; )
-    {
-        __m128i result128 = _mm_setzero_si128();
-        for( int m = 128/16; m--; cnt += 16 )
-        {
-            __m128i operand1 = _mm_set_epi8(pointsValue[descriptionPairs[cnt+0].i],
-                                            pointsValue[descriptionPairs[cnt+1].i],
-                                            pointsValue[descriptionPairs[cnt+2].i],
-                                            pointsValue[descriptionPairs[cnt+3].i],
-                                            pointsValue[descriptionPairs[cnt+4].i],
-                                            pointsValue[descriptionPairs[cnt+5].i],
-                                            pointsValue[descriptionPairs[cnt+6].i],
-                                            pointsValue[descriptionPairs[cnt+7].i],
-                                            pointsValue[descriptionPairs[cnt+8].i],
-                                            pointsValue[descriptionPairs[cnt+9].i],
-                                            pointsValue[descriptionPairs[cnt+10].i],
-                                            pointsValue[descriptionPairs[cnt+11].i],
-                                            pointsValue[descriptionPairs[cnt+12].i],
-                                            pointsValue[descriptionPairs[cnt+13].i],
-                                            pointsValue[descriptionPairs[cnt+14].i],
-                                            pointsValue[descriptionPairs[cnt+15].i]);
-
-            __m128i operand2 = _mm_set_epi8(pointsValue[descriptionPairs[cnt+0].j],
-                                            pointsValue[descriptionPairs[cnt+1].j],
-                                            pointsValue[descriptionPairs[cnt+2].j],
-                                            pointsValue[descriptionPairs[cnt+3].j],
-                                            pointsValue[descriptionPairs[cnt+4].j],
-                                            pointsValue[descriptionPairs[cnt+5].j],
-                                            pointsValue[descriptionPairs[cnt+6].j],
-                                            pointsValue[descriptionPairs[cnt+7].j],
-                                            pointsValue[descriptionPairs[cnt+8].j],
-                                            pointsValue[descriptionPairs[cnt+9].j],
-                                            pointsValue[descriptionPairs[cnt+10].j],
-                                            pointsValue[descriptionPairs[cnt+11].j],
-                                            pointsValue[descriptionPairs[cnt+12].j],
-                                            pointsValue[descriptionPairs[cnt+13].j],
-                                            pointsValue[descriptionPairs[cnt+14].j],
-                                            pointsValue[descriptionPairs[cnt+15].j]);
-
-            __m128i workReg = _mm_min_epu8(operand1, operand2); // emulated "not less than" for 8-bit UNSIGNED integers
-            workReg = _mm_cmpeq_epi8(workReg, operand2);        // emulated "not less than" for 8-bit UNSIGNED integers
-
-            workReg = _mm_and_si128(_mm_set1_epi16(short(0x8080 >> m)), workReg); // merge the last 16 bits with the 128bits std::vector until full
-            result128 = _mm_or_si128(result128, workReg);
-        }
-        (**ptrSSE) = result128;
-        ++(*ptrSSE);
-    }
-    (*ptrSSE) -= 8;
-}
-#endif
-
-template <typename srcMatType, typename iiMatType>
-void FREAK::computeDescriptors( InputArray _image, std::vector<KeyPoint>& keypoints, OutputArray _descriptors ) const {
-
-    Mat image = _image.getMat();
-    Mat imgIntegral;
-    integral(image, imgIntegral, DataType<iiMatType>::type);
-    std::vector<int> kpScaleIdx(keypoints.size()); // used to save pattern scale index corresponding to each keypoints
-    const std::vector<int>::iterator ScaleIdxBegin = kpScaleIdx.begin(); // used in std::vector erase function
-    const std::vector<cv::KeyPoint>::iterator kpBegin = keypoints.begin(); // used in std::vector erase function
-    const float sizeCst = static_cast<float>(FREAK_NB_SCALES/(FREAK_LOG2* nOctaves));
-    srcMatType pointsValue[FREAK_NB_POINTS];
-    int thetaIdx = 0;
-    int direction0;
-    int direction1;
-
-    // compute the scale index corresponding to the keypoint size and remove keypoints close to the border
-    if( scaleNormalized )
-    {
-        for( size_t k = keypoints.size(); k--; )
-        {
-            //Is k non-zero? If so, decrement it and continue"
-            kpScaleIdx[k] = std::max( (int)(std::log(keypoints[k].size/FREAK_SMALLEST_KP_SIZE)*sizeCst+0.5) ,0);
-            if( kpScaleIdx[k] >= FREAK_NB_SCALES )
-                kpScaleIdx[k] = FREAK_NB_SCALES-1;
-
-            if( keypoints[k].pt.x <= patternSizes[kpScaleIdx[k]] || //check if the description at this specific position and scale fits inside the image
-                 keypoints[k].pt.y <= patternSizes[kpScaleIdx[k]] ||
-                 keypoints[k].pt.x >= image.cols-patternSizes[kpScaleIdx[k]] ||
-                 keypoints[k].pt.y >= image.rows-patternSizes[kpScaleIdx[k]]
-               )
-            {
-                keypoints.erase(kpBegin+k);
-                kpScaleIdx.erase(ScaleIdxBegin+k);
-            }
-        }
-    }
-    else
-    {
-        const int scIdx = std::max( (int)(1.0986122886681*sizeCst+0.5) ,0);
-        for( size_t k = keypoints.size(); k--; )
-        {
-            kpScaleIdx[k] = scIdx; // equivalent to the formule when the scale is normalized with a constant size of keypoints[k].size=3*SMALLEST_KP_SIZE
-            if( kpScaleIdx[k] >= FREAK_NB_SCALES )
-            {
-                kpScaleIdx[k] = FREAK_NB_SCALES-1;
-            }
-            if( keypoints[k].pt.x <= patternSizes[kpScaleIdx[k]] ||
-                keypoints[k].pt.y <= patternSizes[kpScaleIdx[k]] ||
-                keypoints[k].pt.x >= image.cols-patternSizes[kpScaleIdx[k]] ||
-                keypoints[k].pt.y >= image.rows-patternSizes[kpScaleIdx[k]]
-               )
-            {
-                keypoints.erase(kpBegin+k);
-                kpScaleIdx.erase(ScaleIdxBegin+k);
-            }
-        }
-    }
-
-    // allocate descriptor memory, estimate orientations, extract descriptors
-    if( !extAll )
-    {
-        // extract the best comparisons only
-        _descriptors.create((int)keypoints.size(), FREAK_NB_PAIRS/8, CV_8U);
-        _descriptors.setTo(Scalar::all(0));
-        Mat descriptors = _descriptors.getMat();
-
-        void *ptr = descriptors.data+(keypoints.size()-1)*descriptors.step[0];
-
-        for( size_t k = keypoints.size(); k--; ) {
-            // estimate orientation (gradient)
-            if( !orientationNormalized )
-            {
-                thetaIdx = 0; // assign 0° to all keypoints
-                keypoints[k].angle = 0.0;
-            }
-            else
-            {
-                // get the points intensity value in the un-rotated pattern
-                for( int i = FREAK_NB_POINTS; i--; ) {
-                    pointsValue[i] = meanIntensity<srcMatType, iiMatType>(image, imgIntegral,
-                                                                          keypoints[k].pt.x, keypoints[k].pt.y,
-                                                                          kpScaleIdx[k], 0, i);
-                }
-                direction0 = 0;
-                direction1 = 0;
-                for( int m = 45; m--; )
-                {
-                    //iterate through the orientation pairs
-                    const int delta = (pointsValue[ orientationPairs[m].i ]-pointsValue[ orientationPairs[m].j ]);
-                    direction0 += delta*(orientationPairs[m].weight_dx)/2048;
-                    direction1 += delta*(orientationPairs[m].weight_dy)/2048;
-                }
-
-                keypoints[k].angle = static_cast<float>(atan2((float)direction1,(float)direction0)*(180.0/CV_PI));//estimate orientation
-                thetaIdx = int(FREAK_NB_ORIENTATION*keypoints[k].angle*(1/360.0)+0.5);
-                if( thetaIdx < 0 )
-                    thetaIdx += FREAK_NB_ORIENTATION;
-
-                if( thetaIdx >= FREAK_NB_ORIENTATION )
-                    thetaIdx -= FREAK_NB_ORIENTATION;
-            }
-            // extract descriptor at the computed orientation
-            for( int i = FREAK_NB_POINTS; i--; ) {
-                pointsValue[i] = meanIntensity<srcMatType, iiMatType>(image, imgIntegral,
-                                                                      keypoints[k].pt.x, keypoints[k].pt.y,
-                                                                      kpScaleIdx[k], thetaIdx, i);
-            }
-
-            // Extract descriptor
-            extractDescriptor<srcMatType>(pointsValue, &ptr);
-        }
-    }
-    else // extract all possible comparisons for selection
-    {
-        _descriptors.create((int)keypoints.size(), 128, CV_8U);
-        _descriptors.setTo(Scalar::all(0));
-        Mat descriptors = _descriptors.getMat();
-        std::bitset<1024>* ptr = (std::bitset<1024>*) (descriptors.data+(keypoints.size()-1)*descriptors.step[0]);
-
-        for( size_t k = keypoints.size(); k--; )
-        {
-            //estimate orientation (gradient)
-            if( !orientationNormalized )
-            {
-                thetaIdx = 0;//assign 0° to all keypoints
-                keypoints[k].angle = 0.0;
-            }
-            else
-            {
-                //get the points intensity value in the un-rotated pattern
-                for( int i = FREAK_NB_POINTS;i--; )
-                    pointsValue[i] = meanIntensity<srcMatType, iiMatType>(image, imgIntegral,
-                                                                          keypoints[k].pt.x,keypoints[k].pt.y,
-                                                                          kpScaleIdx[k], 0, i);
-
-                direction0 = 0;
-                direction1 = 0;
-                for( int m = 45; m--; )
-                {
-                    //iterate through the orientation pairs
-                    const int delta = (pointsValue[ orientationPairs[m].i ]-pointsValue[ orientationPairs[m].j ]);
-                    direction0 += delta*(orientationPairs[m].weight_dx)/2048;
-                    direction1 += delta*(orientationPairs[m].weight_dy)/2048;
-                }
-
-                keypoints[k].angle = static_cast<float>(atan2((float)direction1,(float)direction0)*(180.0/CV_PI)); //estimate orientation
-                thetaIdx = int(FREAK_NB_ORIENTATION*keypoints[k].angle*(1/360.0)+0.5);
-
-                if( thetaIdx < 0 )
-                    thetaIdx += FREAK_NB_ORIENTATION;
-
-                if( thetaIdx >= FREAK_NB_ORIENTATION )
-                    thetaIdx -= FREAK_NB_ORIENTATION;
-            }
-            // get the points intensity value in the rotated pattern
-            for( int i = FREAK_NB_POINTS; i--; ) {
-                pointsValue[i] = meanIntensity<srcMatType, iiMatType>(image, imgIntegral,
-                                                                      keypoints[k].pt.x, keypoints[k].pt.y,
-                                                                      kpScaleIdx[k], thetaIdx, i);
-            }
-
-            int cnt(0);
-            for( int i = 1; i < FREAK_NB_POINTS; ++i )
-            {
-                //(generate all the pairs)
-                for( int j = 0; j < i; ++j )
-                {
-                    ptr->set(cnt, pointsValue[i] >= pointsValue[j] );
-                    ++cnt;
-                }
-            }
-            --ptr;
-        }
-    }
-}
-
-// simply take average on a square patch, not even gaussian approx
-template <typename imgType, typename iiType>
-imgType FREAK::meanIntensity( InputArray _image, InputArray _integral,
-                              const float kp_x,
-                              const float kp_y,
-                              const unsigned int scale,
-                              const unsigned int rot,
-                              const unsigned int point) const {
-    Mat image = _image.getMat(), integral = _integral.getMat();
-    // get point position in image
-    const PatternPoint& FreakPoint = patternLookup[scale*FREAK_NB_ORIENTATION*FREAK_NB_POINTS + rot*FREAK_NB_POINTS + point];
-    const float xf = FreakPoint.x+kp_x;
-    const float yf = FreakPoint.y+kp_y;
-    const int x = int(xf);
-    const int y = int(yf);
-
-    // get the sigma:
-    const float radius = FreakPoint.sigma;
-
-    // calculate output:
-    if( radius < 0.5 )
-    {
-        // interpolation multipliers:
-        const int r_x = static_cast<int>((xf-x)*1024);
-        const int r_y = static_cast<int>((yf-y)*1024);
-        const int r_x_1 = (1024-r_x);
-        const int r_y_1 = (1024-r_y);
-        unsigned int ret_val;
-        // linear interpolation:
-        ret_val = r_x_1*r_y_1*int(image.at<imgType>(y  , x  ))
-                + r_x  *r_y_1*int(image.at<imgType>(y  , x+1))
-                + r_x_1*r_y  *int(image.at<imgType>(y+1, x  ))
-                + r_x  *r_y  *int(image.at<imgType>(y+1, x+1));
-        //return the rounded mean
-        ret_val += 2 * 1024 * 1024;
-        return static_cast<imgType>(ret_val / (4 * 1024 * 1024));
-    }
-
-    // expected case:
-
-    // calculate borders
-    const int x_left = int(xf-radius+0.5);
-    const int y_top = int(yf-radius+0.5);
-    const int x_right = int(xf+radius+1.5);//integral image is 1px wider
-    const int y_bottom = int(yf+radius+1.5);//integral image is 1px higher
-    iiType ret_val;
-
-    ret_val = integral.at<iiType>(y_bottom,x_right);//bottom right corner
-    ret_val -= integral.at<iiType>(y_bottom,x_left);
-    ret_val += integral.at<iiType>(y_top,x_left);
-    ret_val -= integral.at<iiType>(y_top,x_right);
-    ret_val = ret_val/( (x_right-x_left)* (y_bottom-y_top) );
-    //~ std::cout<<integral.step[1]<<std::endl;
-    return static_cast<imgType>(ret_val);
-}
-
-// pair selection algorithm from a set of training images and corresponding keypoints
-std::vector<int> FREAK::selectPairs(const std::vector<Mat>& images
-                                        , std::vector<std::vector<KeyPoint> >& keypoints
-                                        , const double corrTresh
-                                        , bool verbose )
-{
-    extAll = true;
-    // compute descriptors with all pairs
-    Mat descriptors;
-
-    if( verbose )
-        std::cout << "Number of images: " << images.size() << std::endl;
-
-    for( size_t i = 0;i < images.size(); ++i )
-    {
-        Mat descriptorsTmp;
-        computeImpl(images[i],keypoints[i],descriptorsTmp);
-        descriptors.push_back(descriptorsTmp);
-    }
-
-    if( verbose )
-        std::cout << "number of keypoints: " << descriptors.rows << std::endl;
-
-    //descriptor in floating point format (each bit is a float)
-    Mat descriptorsFloat = Mat::zeros(descriptors.rows, 903, CV_32F);
-
-    std::bitset<1024>* ptr = (std::bitset<1024>*) (descriptors.data+(descriptors.rows-1)*descriptors.step[0]);
-    for( int m = descriptors.rows; m--; )
-    {
-        for( int n = 903; n--; )
-        {
-            if( ptr->test(n) == true )
-                descriptorsFloat.at<float>(m,n)=1.0f;
-        }
-        --ptr;
-    }
-
-    std::vector<PairStat> pairStat;
-    for( int n = 903; n--; )
-    {
-        // the higher the variance, the better --> mean = 0.5
-        PairStat tmp = { fabs( mean(descriptorsFloat.col(n))[0]-0.5 ) ,n};
-        pairStat.push_back(tmp);
-    }
-
-    std::sort( pairStat.begin(),pairStat.end(), sortMean() );
-
-    std::vector<PairStat> bestPairs;
-    for( int m = 0; m < 903; ++m )
-    {
-        if( verbose )
-            std::cout << m << ":" << bestPairs.size() << " " << std::flush;
-        double corrMax(0);
-
-        for( size_t n = 0; n < bestPairs.size(); ++n )
-        {
-            int idxA = bestPairs[n].idx;
-            int idxB = pairStat[m].idx;
-            double corr(0);
-            // compute correlation between 2 pairs
-            corr = fabs(compareHist(descriptorsFloat.col(idxA), descriptorsFloat.col(idxB), HISTCMP_CORREL));
-
-            if( corr > corrMax )
-            {
-                corrMax = corr;
-                if( corrMax >= corrTresh )
-                    break;
-            }
-        }
-
-        if( corrMax < corrTresh/*0.7*/ )
-            bestPairs.push_back(pairStat[m]);
-
-        if( bestPairs.size() >= 512 )
-        {
-            if( verbose )
-                std::cout << m << std::endl;
-            break;
-        }
-    }
-
-    std::vector<int> idxBestPairs;
-    if( (int)bestPairs.size() >= FREAK_NB_PAIRS )
-    {
-        for( int i = 0; i < FREAK_NB_PAIRS; ++i )
-            idxBestPairs.push_back(bestPairs[i].idx);
-    }
-    else
-    {
-        if( verbose )
-            std::cout << "correlation threshold too small (restrictive)" << std::endl;
-        CV_Error(Error::StsError, "correlation threshold too small (restrictive)");
-    }
-    extAll = false;
-    return idxBestPairs;
-}
-
-
-/*
-// create an image showing the brisk pattern
-void FREAKImpl::drawPattern()
-{
-    Mat pattern = Mat::zeros(1000, 1000, CV_8UC3) + Scalar(255,255,255);
-    int sFac = 500 / patternScale;
-    for( int n = 0; n < kNB_POINTS; ++n )
-    {
-        PatternPoint& pt = patternLookup[n];
-        circle(pattern, Point( pt.x*sFac,pt.y*sFac)+Point(500,500), pt.sigma*sFac, Scalar(0,0,255),2);
-        // rectangle(pattern, Point( (pt.x-pt.sigma)*sFac,(pt.y-pt.sigma)*sFac)+Point(500,500), Point( (pt.x+pt.sigma)*sFac,(pt.y+pt.sigma)*sFac)+Point(500,500), Scalar(0,0,255),2);
-
-        circle(pattern, Point( pt.x*sFac,pt.y*sFac)+Point(500,500), 1, Scalar(0,0,0),3);
-        std::ostringstream oss;
-        oss << n;
-        putText( pattern, oss.str(), Point( pt.x*sFac,pt.y*sFac)+Point(500,500), FONT_HERSHEY_SIMPLEX,0.5, Scalar(0,0,0), 1);
-    }
-    imshow( "FreakDescriptorExtractor pattern", pattern );
-    waitKey(0);
-}
-*/
-
-// -------------------------------------------------
-/* FREAK interface implementation */
-FREAK::FREAK( bool _orientationNormalized, bool _scaleNormalized
-            , float _patternScale, int _nOctaves, const std::vector<int>& _selectedPairs )
-    : orientationNormalized(_orientationNormalized), scaleNormalized(_scaleNormalized),
-    patternScale(_patternScale), nOctaves(_nOctaves), extAll(false), nOctaves0(0), selectedPairs0(_selectedPairs)
-{
-}
-
-FREAK::~FREAK()
-{
-}
-
-int FREAK::descriptorSize() const
-{
-    return FREAK_NB_PAIRS / 8; // descriptor length in bytes
-}
-
-int FREAK::descriptorType() const
-{
-    return CV_8U;
-}
-
-int FREAK::defaultNorm() const
-{
-    return NORM_HAMMING;
-}
-
-} // END NAMESPACE CV
diff --git a/modules/features2d/src/generated_16.i b/modules/features2d/src/generated_16.i
deleted file mode 100644 (file)
index b85bf06..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-// Code generated with '$ scripts/generate_code.py src/test_pairs.txt 16'
-#define SMOOTHED(y,x) smoothedSum(sum, pt, y, x)
-    desc[0] = (uchar)(((SMOOTHED(-2, -1) < SMOOTHED(7, -1)) << 7) + ((SMOOTHED(-14, -1) < SMOOTHED(-3, 3)) << 6) + ((SMOOTHED(1, -2) < SMOOTHED(11, 2)) << 5) + ((SMOOTHED(1, 6) < SMOOTHED(-10, -7)) << 4) + ((SMOOTHED(13, 2) < SMOOTHED(-1, 0)) << 3) + ((SMOOTHED(-14, 5) < SMOOTHED(5, -3)) << 2) + ((SMOOTHED(-2, 8) < SMOOTHED(2, 4)) << 1) + ((SMOOTHED(-11, 8) < SMOOTHED(-15, 5)) << 0));
-    desc[1] = (uchar)(((SMOOTHED(-6, -23) < SMOOTHED(8, -9)) << 7) + ((SMOOTHED(-12, 6) < SMOOTHED(-10, 8)) << 6) + ((SMOOTHED(-3, -1) < SMOOTHED(8, 1)) << 5) + ((SMOOTHED(3, 6) < SMOOTHED(5, 6)) << 4) + ((SMOOTHED(-7, -6) < SMOOTHED(5, -5)) << 3) + ((SMOOTHED(22, -2) < SMOOTHED(-11, -8)) << 2) + ((SMOOTHED(14, 7) < SMOOTHED(8, 5)) << 1) + ((SMOOTHED(-1, 14) < SMOOTHED(-5, -14)) << 0));
-    desc[2] = (uchar)(((SMOOTHED(-14, 9) < SMOOTHED(2, 0)) << 7) + ((SMOOTHED(7, -3) < SMOOTHED(22, 6)) << 6) + ((SMOOTHED(-6, 6) < SMOOTHED(-8, -5)) << 5) + ((SMOOTHED(-5, 9) < SMOOTHED(7, -1)) << 4) + ((SMOOTHED(-3, -7) < SMOOTHED(-10, -18)) << 3) + ((SMOOTHED(4, -5) < SMOOTHED(0, 11)) << 2) + ((SMOOTHED(2, 3) < SMOOTHED(9, 10)) << 1) + ((SMOOTHED(-10, 3) < SMOOTHED(4, 9)) << 0));
-    desc[3] = (uchar)(((SMOOTHED(0, 12) < SMOOTHED(-3, 19)) << 7) + ((SMOOTHED(1, 15) < SMOOTHED(-11, -5)) << 6) + ((SMOOTHED(14, -1) < SMOOTHED(7, 8)) << 5) + ((SMOOTHED(7, -23) < SMOOTHED(-5, 5)) << 4) + ((SMOOTHED(0, -6) < SMOOTHED(-10, 17)) << 3) + ((SMOOTHED(13, -4) < SMOOTHED(-3, -4)) << 2) + ((SMOOTHED(-12, 1) < SMOOTHED(-12, 2)) << 1) + ((SMOOTHED(0, 8) < SMOOTHED(3, 22)) << 0));
-    desc[4] = (uchar)(((SMOOTHED(-13, 13) < SMOOTHED(3, -1)) << 7) + ((SMOOTHED(-16, 17) < SMOOTHED(6, 10)) << 6) + ((SMOOTHED(7, 15) < SMOOTHED(-5, 0)) << 5) + ((SMOOTHED(2, -12) < SMOOTHED(19, -2)) << 4) + ((SMOOTHED(3, -6) < SMOOTHED(-4, -15)) << 3) + ((SMOOTHED(8, 3) < SMOOTHED(0, 14)) << 2) + ((SMOOTHED(4, -11) < SMOOTHED(5, 5)) << 1) + ((SMOOTHED(11, -7) < SMOOTHED(7, 1)) << 0));
-    desc[5] = (uchar)(((SMOOTHED(6, 12) < SMOOTHED(21, 3)) << 7) + ((SMOOTHED(-3, 2) < SMOOTHED(14, 1)) << 6) + ((SMOOTHED(5, 1) < SMOOTHED(-5, 11)) << 5) + ((SMOOTHED(3, -17) < SMOOTHED(-6, 2)) << 4) + ((SMOOTHED(6, 8) < SMOOTHED(5, -10)) << 3) + ((SMOOTHED(-14, -2) < SMOOTHED(0, 4)) << 2) + ((SMOOTHED(5, -7) < SMOOTHED(-6, 5)) << 1) + ((SMOOTHED(10, 4) < SMOOTHED(4, -7)) << 0));
-    desc[6] = (uchar)(((SMOOTHED(22, 0) < SMOOTHED(7, -18)) << 7) + ((SMOOTHED(-1, -3) < SMOOTHED(0, 18)) << 6) + ((SMOOTHED(-4, 22) < SMOOTHED(-5, 3)) << 5) + ((SMOOTHED(1, -7) < SMOOTHED(2, -3)) << 4) + ((SMOOTHED(19, -20) < SMOOTHED(17, -2)) << 3) + ((SMOOTHED(3, -10) < SMOOTHED(-8, 24)) << 2) + ((SMOOTHED(-5, -14) < SMOOTHED(7, 5)) << 1) + ((SMOOTHED(-2, 12) < SMOOTHED(-4, -15)) << 0));
-    desc[7] = (uchar)(((SMOOTHED(4, 12) < SMOOTHED(0, -19)) << 7) + ((SMOOTHED(20, 13) < SMOOTHED(3, 5)) << 6) + ((SMOOTHED(-8, -12) < SMOOTHED(5, 0)) << 5) + ((SMOOTHED(-5, 6) < SMOOTHED(-7, -11)) << 4) + ((SMOOTHED(6, -11) < SMOOTHED(-3, -22)) << 3) + ((SMOOTHED(15, 4) < SMOOTHED(10, 1)) << 2) + ((SMOOTHED(-7, -4) < SMOOTHED(15, -6)) << 1) + ((SMOOTHED(5, 10) < SMOOTHED(0, 24)) << 0));
-    desc[8] = (uchar)(((SMOOTHED(3, 6) < SMOOTHED(22, -2)) << 7) + ((SMOOTHED(-13, 14) < SMOOTHED(4, -4)) << 6) + ((SMOOTHED(-13, 8) < SMOOTHED(-18, -22)) << 5) + ((SMOOTHED(-1, -1) < SMOOTHED(-7, 3)) << 4) + ((SMOOTHED(-19, -12) < SMOOTHED(4, 3)) << 3) + ((SMOOTHED(8, 10) < SMOOTHED(13, -2)) << 2) + ((SMOOTHED(-6, -1) < SMOOTHED(-6, -5)) << 1) + ((SMOOTHED(2, -21) < SMOOTHED(-3, 2)) << 0));
-    desc[9] = (uchar)(((SMOOTHED(4, -7) < SMOOTHED(0, 16)) << 7) + ((SMOOTHED(-6, -5) < SMOOTHED(-12, -1)) << 6) + ((SMOOTHED(1, -1) < SMOOTHED(9, 18)) << 5) + ((SMOOTHED(-7, 10) < SMOOTHED(-11, 6)) << 4) + ((SMOOTHED(4, 3) < SMOOTHED(19, -7)) << 3) + ((SMOOTHED(-18, 5) < SMOOTHED(-4, 5)) << 2) + ((SMOOTHED(4, 0) < SMOOTHED(-20, 4)) << 1) + ((SMOOTHED(7, -11) < SMOOTHED(18, 12)) << 0));
-    desc[10] = (uchar)(((SMOOTHED(-20, 17) < SMOOTHED(-18, 7)) << 7) + ((SMOOTHED(2, 15) < SMOOTHED(19, -11)) << 6) + ((SMOOTHED(-18, 6) < SMOOTHED(-7, 3)) << 5) + ((SMOOTHED(-4, 1) < SMOOTHED(-14, 13)) << 4) + ((SMOOTHED(17, 3) < SMOOTHED(2, -8)) << 3) + ((SMOOTHED(-7, 2) < SMOOTHED(1, 6)) << 2) + ((SMOOTHED(17, -9) < SMOOTHED(-2, 8)) << 1) + ((SMOOTHED(-8, -6) < SMOOTHED(-1, 12)) << 0));
-    desc[11] = (uchar)(((SMOOTHED(-2, 4) < SMOOTHED(-1, 6)) << 7) + ((SMOOTHED(-2, 7) < SMOOTHED(6, 8)) << 6) + ((SMOOTHED(-8, -1) < SMOOTHED(-7, -9)) << 5) + ((SMOOTHED(8, -9) < SMOOTHED(15, 0)) << 4) + ((SMOOTHED(0, 22) < SMOOTHED(-4, -15)) << 3) + ((SMOOTHED(-14, -1) < SMOOTHED(3, -2)) << 2) + ((SMOOTHED(-7, -4) < SMOOTHED(17, -7)) << 1) + ((SMOOTHED(-8, -2) < SMOOTHED(9, -4)) << 0));
-    desc[12] = (uchar)(((SMOOTHED(5, -7) < SMOOTHED(7, 7)) << 7) + ((SMOOTHED(-5, 13) < SMOOTHED(-8, 11)) << 6) + ((SMOOTHED(11, -4) < SMOOTHED(0, 8)) << 5) + ((SMOOTHED(5, -11) < SMOOTHED(-9, -6)) << 4) + ((SMOOTHED(2, -6) < SMOOTHED(3, -20)) << 3) + ((SMOOTHED(-6, 2) < SMOOTHED(6, 10)) << 2) + ((SMOOTHED(-6, -6) < SMOOTHED(-15, 7)) << 1) + ((SMOOTHED(-6, -3) < SMOOTHED(2, 1)) << 0));
-    desc[13] = (uchar)(((SMOOTHED(11, 0) < SMOOTHED(-3, 2)) << 7) + ((SMOOTHED(7, -12) < SMOOTHED(14, 5)) << 6) + ((SMOOTHED(0, -7) < SMOOTHED(-1, -1)) << 5) + ((SMOOTHED(-16, 0) < SMOOTHED(6, 8)) << 4) + ((SMOOTHED(22, 11) < SMOOTHED(0, -3)) << 3) + ((SMOOTHED(19, 0) < SMOOTHED(5, -17)) << 2) + ((SMOOTHED(-23, -14) < SMOOTHED(-13, -19)) << 1) + ((SMOOTHED(-8, 10) < SMOOTHED(-11, -2)) << 0));
-    desc[14] = (uchar)(((SMOOTHED(-11, 6) < SMOOTHED(-10, 13)) << 7) + ((SMOOTHED(1, -7) < SMOOTHED(14, 0)) << 6) + ((SMOOTHED(-12, 1) < SMOOTHED(-5, -5)) << 5) + ((SMOOTHED(4, 7) < SMOOTHED(8, -1)) << 4) + ((SMOOTHED(-1, -5) < SMOOTHED(15, 2)) << 3) + ((SMOOTHED(-3, -1) < SMOOTHED(7, -10)) << 2) + ((SMOOTHED(3, -6) < SMOOTHED(10, -18)) << 1) + ((SMOOTHED(-7, -13) < SMOOTHED(-13, 10)) << 0));
-    desc[15] = (uchar)(((SMOOTHED(1, -1) < SMOOTHED(13, -10)) << 7) + ((SMOOTHED(-19, 14) < SMOOTHED(8, -14)) << 6) + ((SMOOTHED(-4, -13) < SMOOTHED(7, 1)) << 5) + ((SMOOTHED(1, -2) < SMOOTHED(12, -7)) << 4) + ((SMOOTHED(3, -5) < SMOOTHED(1, -5)) << 3) + ((SMOOTHED(-2, -2) < SMOOTHED(8, -10)) << 2) + ((SMOOTHED(2, 14) < SMOOTHED(8, 7)) << 1) + ((SMOOTHED(3, 9) < SMOOTHED(8, 2)) << 0));
-#undef SMOOTHED
diff --git a/modules/features2d/src/generated_32.i b/modules/features2d/src/generated_32.i
deleted file mode 100644 (file)
index 19952d2..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-// Code generated with '$ scripts/generate_code.py src/test_pairs.txt 32'
-#define SMOOTHED(y,x) smoothedSum(sum, pt, y, x)
-    desc[0] = (uchar)(((SMOOTHED(-2, -1) < SMOOTHED(7, -1)) << 7) + ((SMOOTHED(-14, -1) < SMOOTHED(-3, 3)) << 6) + ((SMOOTHED(1, -2) < SMOOTHED(11, 2)) << 5) + ((SMOOTHED(1, 6) < SMOOTHED(-10, -7)) << 4) + ((SMOOTHED(13, 2) < SMOOTHED(-1, 0)) << 3) + ((SMOOTHED(-14, 5) < SMOOTHED(5, -3)) << 2) + ((SMOOTHED(-2, 8) < SMOOTHED(2, 4)) << 1) + ((SMOOTHED(-11, 8) < SMOOTHED(-15, 5)) << 0));
-    desc[1] = (uchar)(((SMOOTHED(-6, -23) < SMOOTHED(8, -9)) << 7) + ((SMOOTHED(-12, 6) < SMOOTHED(-10, 8)) << 6) + ((SMOOTHED(-3, -1) < SMOOTHED(8, 1)) << 5) + ((SMOOTHED(3, 6) < SMOOTHED(5, 6)) << 4) + ((SMOOTHED(-7, -6) < SMOOTHED(5, -5)) << 3) + ((SMOOTHED(22, -2) < SMOOTHED(-11, -8)) << 2) + ((SMOOTHED(14, 7) < SMOOTHED(8, 5)) << 1) + ((SMOOTHED(-1, 14) < SMOOTHED(-5, -14)) << 0));
-    desc[2] = (uchar)(((SMOOTHED(-14, 9) < SMOOTHED(2, 0)) << 7) + ((SMOOTHED(7, -3) < SMOOTHED(22, 6)) << 6) + ((SMOOTHED(-6, 6) < SMOOTHED(-8, -5)) << 5) + ((SMOOTHED(-5, 9) < SMOOTHED(7, -1)) << 4) + ((SMOOTHED(-3, -7) < SMOOTHED(-10, -18)) << 3) + ((SMOOTHED(4, -5) < SMOOTHED(0, 11)) << 2) + ((SMOOTHED(2, 3) < SMOOTHED(9, 10)) << 1) + ((SMOOTHED(-10, 3) < SMOOTHED(4, 9)) << 0));
-    desc[3] = (uchar)(((SMOOTHED(0, 12) < SMOOTHED(-3, 19)) << 7) + ((SMOOTHED(1, 15) < SMOOTHED(-11, -5)) << 6) + ((SMOOTHED(14, -1) < SMOOTHED(7, 8)) << 5) + ((SMOOTHED(7, -23) < SMOOTHED(-5, 5)) << 4) + ((SMOOTHED(0, -6) < SMOOTHED(-10, 17)) << 3) + ((SMOOTHED(13, -4) < SMOOTHED(-3, -4)) << 2) + ((SMOOTHED(-12, 1) < SMOOTHED(-12, 2)) << 1) + ((SMOOTHED(0, 8) < SMOOTHED(3, 22)) << 0));
-    desc[4] = (uchar)(((SMOOTHED(-13, 13) < SMOOTHED(3, -1)) << 7) + ((SMOOTHED(-16, 17) < SMOOTHED(6, 10)) << 6) + ((SMOOTHED(7, 15) < SMOOTHED(-5, 0)) << 5) + ((SMOOTHED(2, -12) < SMOOTHED(19, -2)) << 4) + ((SMOOTHED(3, -6) < SMOOTHED(-4, -15)) << 3) + ((SMOOTHED(8, 3) < SMOOTHED(0, 14)) << 2) + ((SMOOTHED(4, -11) < SMOOTHED(5, 5)) << 1) + ((SMOOTHED(11, -7) < SMOOTHED(7, 1)) << 0));
-    desc[5] = (uchar)(((SMOOTHED(6, 12) < SMOOTHED(21, 3)) << 7) + ((SMOOTHED(-3, 2) < SMOOTHED(14, 1)) << 6) + ((SMOOTHED(5, 1) < SMOOTHED(-5, 11)) << 5) + ((SMOOTHED(3, -17) < SMOOTHED(-6, 2)) << 4) + ((SMOOTHED(6, 8) < SMOOTHED(5, -10)) << 3) + ((SMOOTHED(-14, -2) < SMOOTHED(0, 4)) << 2) + ((SMOOTHED(5, -7) < SMOOTHED(-6, 5)) << 1) + ((SMOOTHED(10, 4) < SMOOTHED(4, -7)) << 0));
-    desc[6] = (uchar)(((SMOOTHED(22, 0) < SMOOTHED(7, -18)) << 7) + ((SMOOTHED(-1, -3) < SMOOTHED(0, 18)) << 6) + ((SMOOTHED(-4, 22) < SMOOTHED(-5, 3)) << 5) + ((SMOOTHED(1, -7) < SMOOTHED(2, -3)) << 4) + ((SMOOTHED(19, -20) < SMOOTHED(17, -2)) << 3) + ((SMOOTHED(3, -10) < SMOOTHED(-8, 24)) << 2) + ((SMOOTHED(-5, -14) < SMOOTHED(7, 5)) << 1) + ((SMOOTHED(-2, 12) < SMOOTHED(-4, -15)) << 0));
-    desc[7] = (uchar)(((SMOOTHED(4, 12) < SMOOTHED(0, -19)) << 7) + ((SMOOTHED(20, 13) < SMOOTHED(3, 5)) << 6) + ((SMOOTHED(-8, -12) < SMOOTHED(5, 0)) << 5) + ((SMOOTHED(-5, 6) < SMOOTHED(-7, -11)) << 4) + ((SMOOTHED(6, -11) < SMOOTHED(-3, -22)) << 3) + ((SMOOTHED(15, 4) < SMOOTHED(10, 1)) << 2) + ((SMOOTHED(-7, -4) < SMOOTHED(15, -6)) << 1) + ((SMOOTHED(5, 10) < SMOOTHED(0, 24)) << 0));
-    desc[8] = (uchar)(((SMOOTHED(3, 6) < SMOOTHED(22, -2)) << 7) + ((SMOOTHED(-13, 14) < SMOOTHED(4, -4)) << 6) + ((SMOOTHED(-13, 8) < SMOOTHED(-18, -22)) << 5) + ((SMOOTHED(-1, -1) < SMOOTHED(-7, 3)) << 4) + ((SMOOTHED(-19, -12) < SMOOTHED(4, 3)) << 3) + ((SMOOTHED(8, 10) < SMOOTHED(13, -2)) << 2) + ((SMOOTHED(-6, -1) < SMOOTHED(-6, -5)) << 1) + ((SMOOTHED(2, -21) < SMOOTHED(-3, 2)) << 0));
-    desc[9] = (uchar)(((SMOOTHED(4, -7) < SMOOTHED(0, 16)) << 7) + ((SMOOTHED(-6, -5) < SMOOTHED(-12, -1)) << 6) + ((SMOOTHED(1, -1) < SMOOTHED(9, 18)) << 5) + ((SMOOTHED(-7, 10) < SMOOTHED(-11, 6)) << 4) + ((SMOOTHED(4, 3) < SMOOTHED(19, -7)) << 3) + ((SMOOTHED(-18, 5) < SMOOTHED(-4, 5)) << 2) + ((SMOOTHED(4, 0) < SMOOTHED(-20, 4)) << 1) + ((SMOOTHED(7, -11) < SMOOTHED(18, 12)) << 0));
-    desc[10] = (uchar)(((SMOOTHED(-20, 17) < SMOOTHED(-18, 7)) << 7) + ((SMOOTHED(2, 15) < SMOOTHED(19, -11)) << 6) + ((SMOOTHED(-18, 6) < SMOOTHED(-7, 3)) << 5) + ((SMOOTHED(-4, 1) < SMOOTHED(-14, 13)) << 4) + ((SMOOTHED(17, 3) < SMOOTHED(2, -8)) << 3) + ((SMOOTHED(-7, 2) < SMOOTHED(1, 6)) << 2) + ((SMOOTHED(17, -9) < SMOOTHED(-2, 8)) << 1) + ((SMOOTHED(-8, -6) < SMOOTHED(-1, 12)) << 0));
-    desc[11] = (uchar)(((SMOOTHED(-2, 4) < SMOOTHED(-1, 6)) << 7) + ((SMOOTHED(-2, 7) < SMOOTHED(6, 8)) << 6) + ((SMOOTHED(-8, -1) < SMOOTHED(-7, -9)) << 5) + ((SMOOTHED(8, -9) < SMOOTHED(15, 0)) << 4) + ((SMOOTHED(0, 22) < SMOOTHED(-4, -15)) << 3) + ((SMOOTHED(-14, -1) < SMOOTHED(3, -2)) << 2) + ((SMOOTHED(-7, -4) < SMOOTHED(17, -7)) << 1) + ((SMOOTHED(-8, -2) < SMOOTHED(9, -4)) << 0));
-    desc[12] = (uchar)(((SMOOTHED(5, -7) < SMOOTHED(7, 7)) << 7) + ((SMOOTHED(-5, 13) < SMOOTHED(-8, 11)) << 6) + ((SMOOTHED(11, -4) < SMOOTHED(0, 8)) << 5) + ((SMOOTHED(5, -11) < SMOOTHED(-9, -6)) << 4) + ((SMOOTHED(2, -6) < SMOOTHED(3, -20)) << 3) + ((SMOOTHED(-6, 2) < SMOOTHED(6, 10)) << 2) + ((SMOOTHED(-6, -6) < SMOOTHED(-15, 7)) << 1) + ((SMOOTHED(-6, -3) < SMOOTHED(2, 1)) << 0));
-    desc[13] = (uchar)(((SMOOTHED(11, 0) < SMOOTHED(-3, 2)) << 7) + ((SMOOTHED(7, -12) < SMOOTHED(14, 5)) << 6) + ((SMOOTHED(0, -7) < SMOOTHED(-1, -1)) << 5) + ((SMOOTHED(-16, 0) < SMOOTHED(6, 8)) << 4) + ((SMOOTHED(22, 11) < SMOOTHED(0, -3)) << 3) + ((SMOOTHED(19, 0) < SMOOTHED(5, -17)) << 2) + ((SMOOTHED(-23, -14) < SMOOTHED(-13, -19)) << 1) + ((SMOOTHED(-8, 10) < SMOOTHED(-11, -2)) << 0));
-    desc[14] = (uchar)(((SMOOTHED(-11, 6) < SMOOTHED(-10, 13)) << 7) + ((SMOOTHED(1, -7) < SMOOTHED(14, 0)) << 6) + ((SMOOTHED(-12, 1) < SMOOTHED(-5, -5)) << 5) + ((SMOOTHED(4, 7) < SMOOTHED(8, -1)) << 4) + ((SMOOTHED(-1, -5) < SMOOTHED(15, 2)) << 3) + ((SMOOTHED(-3, -1) < SMOOTHED(7, -10)) << 2) + ((SMOOTHED(3, -6) < SMOOTHED(10, -18)) << 1) + ((SMOOTHED(-7, -13) < SMOOTHED(-13, 10)) << 0));
-    desc[15] = (uchar)(((SMOOTHED(1, -1) < SMOOTHED(13, -10)) << 7) + ((SMOOTHED(-19, 14) < SMOOTHED(8, -14)) << 6) + ((SMOOTHED(-4, -13) < SMOOTHED(7, 1)) << 5) + ((SMOOTHED(1, -2) < SMOOTHED(12, -7)) << 4) + ((SMOOTHED(3, -5) < SMOOTHED(1, -5)) << 3) + ((SMOOTHED(-2, -2) < SMOOTHED(8, -10)) << 2) + ((SMOOTHED(2, 14) < SMOOTHED(8, 7)) << 1) + ((SMOOTHED(3, 9) < SMOOTHED(8, 2)) << 0));
-    desc[16] = (uchar)(((SMOOTHED(-9, 1) < SMOOTHED(-18, 0)) << 7) + ((SMOOTHED(4, 0) < SMOOTHED(1, 12)) << 6) + ((SMOOTHED(0, 9) < SMOOTHED(-14, -10)) << 5) + ((SMOOTHED(-13, -9) < SMOOTHED(-2, 6)) << 4) + ((SMOOTHED(1, 5) < SMOOTHED(10, 10)) << 3) + ((SMOOTHED(-3, -6) < SMOOTHED(-16, -5)) << 2) + ((SMOOTHED(11, 6) < SMOOTHED(-5, 0)) << 1) + ((SMOOTHED(-23, 10) < SMOOTHED(1, 2)) << 0));
-    desc[17] = (uchar)(((SMOOTHED(13, -5) < SMOOTHED(-3, 9)) << 7) + ((SMOOTHED(-4, -1) < SMOOTHED(-13, -5)) << 6) + ((SMOOTHED(10, 13) < SMOOTHED(-11, 8)) << 5) + ((SMOOTHED(19, 20) < SMOOTHED(-9, 2)) << 4) + ((SMOOTHED(4, -8) < SMOOTHED(0, -9)) << 3) + ((SMOOTHED(-14, 10) < SMOOTHED(15, 19)) << 2) + ((SMOOTHED(-14, -12) < SMOOTHED(-10, -3)) << 1) + ((SMOOTHED(-23, -3) < SMOOTHED(17, -2)) << 0));
-    desc[18] = (uchar)(((SMOOTHED(-3, -11) < SMOOTHED(6, -14)) << 7) + ((SMOOTHED(19, -2) < SMOOTHED(-4, 2)) << 6) + ((SMOOTHED(-5, 5) < SMOOTHED(3, -13)) << 5) + ((SMOOTHED(2, -2) < SMOOTHED(-5, 4)) << 4) + ((SMOOTHED(17, 4) < SMOOTHED(17, -11)) << 3) + ((SMOOTHED(-7, -2) < SMOOTHED(1, 23)) << 2) + ((SMOOTHED(8, 13) < SMOOTHED(1, -16)) << 1) + ((SMOOTHED(-13, -5) < SMOOTHED(1, -17)) << 0));
-    desc[19] = (uchar)(((SMOOTHED(4, 6) < SMOOTHED(-8, -3)) << 7) + ((SMOOTHED(-5, -9) < SMOOTHED(-2, -10)) << 6) + ((SMOOTHED(-9, 0) < SMOOTHED(-7, -2)) << 5) + ((SMOOTHED(5, 0) < SMOOTHED(5, 2)) << 4) + ((SMOOTHED(-4, -16) < SMOOTHED(6, 3)) << 3) + ((SMOOTHED(2, -15) < SMOOTHED(-2, 12)) << 2) + ((SMOOTHED(4, -1) < SMOOTHED(6, 2)) << 1) + ((SMOOTHED(1, 1) < SMOOTHED(-2, -8)) << 0));
-    desc[20] = (uchar)(((SMOOTHED(-2, 12) < SMOOTHED(-5, -2)) << 7) + ((SMOOTHED(-8, 8) < SMOOTHED(-9, 9)) << 6) + ((SMOOTHED(2, -10) < SMOOTHED(3, 1)) << 5) + ((SMOOTHED(-4, 10) < SMOOTHED(-9, 4)) << 4) + ((SMOOTHED(6, 12) < SMOOTHED(2, 5)) << 3) + ((SMOOTHED(-3, -8) < SMOOTHED(0, 5)) << 2) + ((SMOOTHED(-13, 1) < SMOOTHED(-7, 2)) << 1) + ((SMOOTHED(-1, -10) < SMOOTHED(7, -18)) << 0));
-    desc[21] = (uchar)(((SMOOTHED(-1, 8) < SMOOTHED(-9, -10)) << 7) + ((SMOOTHED(-23, -1) < SMOOTHED(6, 2)) << 6) + ((SMOOTHED(-5, -3) < SMOOTHED(3, 2)) << 5) + ((SMOOTHED(0, 11) < SMOOTHED(-4, -7)) << 4) + ((SMOOTHED(15, 2) < SMOOTHED(-10, -3)) << 3) + ((SMOOTHED(-20, -8) < SMOOTHED(-13, 3)) << 2) + ((SMOOTHED(-19, -12) < SMOOTHED(5, -11)) << 1) + ((SMOOTHED(-17, -13) < SMOOTHED(-3, 2)) << 0));
-    desc[22] = (uchar)(((SMOOTHED(7, 4) < SMOOTHED(-12, 0)) << 7) + ((SMOOTHED(5, -1) < SMOOTHED(-14, -6)) << 6) + ((SMOOTHED(-4, 11) < SMOOTHED(0, -4)) << 5) + ((SMOOTHED(3, 10) < SMOOTHED(7, -3)) << 4) + ((SMOOTHED(13, 21) < SMOOTHED(-11, 6)) << 3) + ((SMOOTHED(-12, 24) < SMOOTHED(-7, -4)) << 2) + ((SMOOTHED(4, 16) < SMOOTHED(3, -14)) << 1) + ((SMOOTHED(-3, 5) < SMOOTHED(-7, -12)) << 0));
-    desc[23] = (uchar)(((SMOOTHED(0, -4) < SMOOTHED(7, -5)) << 7) + ((SMOOTHED(-17, -9) < SMOOTHED(13, -7)) << 6) + ((SMOOTHED(22, -6) < SMOOTHED(-11, 5)) << 5) + ((SMOOTHED(2, -8) < SMOOTHED(23, -11)) << 4) + ((SMOOTHED(7, -10) < SMOOTHED(-1, 14)) << 3) + ((SMOOTHED(-3, -10) < SMOOTHED(8, 3)) << 2) + ((SMOOTHED(-13, 1) < SMOOTHED(-6, 0)) << 1) + ((SMOOTHED(-7, -21) < SMOOTHED(6, -14)) << 0));
-    desc[24] = (uchar)(((SMOOTHED(18, 19) < SMOOTHED(-4, -6)) << 7) + ((SMOOTHED(10, 7) < SMOOTHED(-1, -4)) << 6) + ((SMOOTHED(-1, 21) < SMOOTHED(1, -5)) << 5) + ((SMOOTHED(-10, 6) < SMOOTHED(-11, -2)) << 4) + ((SMOOTHED(18, -3) < SMOOTHED(-1, 7)) << 3) + ((SMOOTHED(-3, -9) < SMOOTHED(-5, 10)) << 2) + ((SMOOTHED(-13, 14) < SMOOTHED(17, -3)) << 1) + ((SMOOTHED(11, -19) < SMOOTHED(-1, -18)) << 0));
-    desc[25] = (uchar)(((SMOOTHED(8, -2) < SMOOTHED(-18, -23)) << 7) + ((SMOOTHED(0, -5) < SMOOTHED(-2, -9)) << 6) + ((SMOOTHED(-4, -11) < SMOOTHED(2, -8)) << 5) + ((SMOOTHED(14, 6) < SMOOTHED(-3, -6)) << 4) + ((SMOOTHED(-3, 0) < SMOOTHED(-15, 0)) << 3) + ((SMOOTHED(-9, 4) < SMOOTHED(-15, -9)) << 2) + ((SMOOTHED(-1, 11) < SMOOTHED(3, 11)) << 1) + ((SMOOTHED(-10, -16) < SMOOTHED(-7, 7)) << 0));
-    desc[26] = (uchar)(((SMOOTHED(-2, -10) < SMOOTHED(-10, -2)) << 7) + ((SMOOTHED(-5, -3) < SMOOTHED(5, -23)) << 6) + ((SMOOTHED(13, -8) < SMOOTHED(-15, -11)) << 5) + ((SMOOTHED(-15, 11) < SMOOTHED(6, -6)) << 4) + ((SMOOTHED(-16, -3) < SMOOTHED(-2, 2)) << 3) + ((SMOOTHED(6, 12) < SMOOTHED(-16, 24)) << 2) + ((SMOOTHED(-10, 0) < SMOOTHED(8, 11)) << 1) + ((SMOOTHED(-7, 7) < SMOOTHED(-19, -7)) << 0));
-    desc[27] = (uchar)(((SMOOTHED(5, 16) < SMOOTHED(9, -3)) << 7) + ((SMOOTHED(9, 7) < SMOOTHED(-7, -16)) << 6) + ((SMOOTHED(3, 2) < SMOOTHED(-10, 9)) << 5) + ((SMOOTHED(21, 1) < SMOOTHED(8, 7)) << 4) + ((SMOOTHED(7, 0) < SMOOTHED(1, 17)) << 3) + ((SMOOTHED(-8, 12) < SMOOTHED(9, 6)) << 2) + ((SMOOTHED(11, -7) < SMOOTHED(-8, -6)) << 1) + ((SMOOTHED(19, 0) < SMOOTHED(9, 3)) << 0));
-    desc[28] = (uchar)(((SMOOTHED(1, -7) < SMOOTHED(-5, -11)) << 7) + ((SMOOTHED(0, 8) < SMOOTHED(-2, 14)) << 6) + ((SMOOTHED(12, -2) < SMOOTHED(-15, -6)) << 5) + ((SMOOTHED(4, 12) < SMOOTHED(0, -21)) << 4) + ((SMOOTHED(17, -4) < SMOOTHED(-6, -7)) << 3) + ((SMOOTHED(-10, -9) < SMOOTHED(-14, -7)) << 2) + ((SMOOTHED(-15, -10) < SMOOTHED(-15, -14)) << 1) + ((SMOOTHED(-7, -5) < SMOOTHED(5, -12)) << 0));
-    desc[29] = (uchar)(((SMOOTHED(-4, 0) < SMOOTHED(15, -4)) << 7) + ((SMOOTHED(5, 2) < SMOOTHED(-6, -23)) << 6) + ((SMOOTHED(-4, -21) < SMOOTHED(-6, 4)) << 5) + ((SMOOTHED(-10, 5) < SMOOTHED(-15, 6)) << 4) + ((SMOOTHED(4, -3) < SMOOTHED(-1, 5)) << 3) + ((SMOOTHED(-4, 19) < SMOOTHED(-23, -4)) << 2) + ((SMOOTHED(-4, 17) < SMOOTHED(13, -11)) << 1) + ((SMOOTHED(1, 12) < SMOOTHED(4, -14)) << 0));
-    desc[30] = (uchar)(((SMOOTHED(-11, -6) < SMOOTHED(-20, 10)) << 7) + ((SMOOTHED(4, 5) < SMOOTHED(3, 20)) << 6) + ((SMOOTHED(-8, -20) < SMOOTHED(3, 1)) << 5) + ((SMOOTHED(-19, 9) < SMOOTHED(9, -3)) << 4) + ((SMOOTHED(18, 15) < SMOOTHED(11, -4)) << 3) + ((SMOOTHED(12, 16) < SMOOTHED(8, 7)) << 2) + ((SMOOTHED(-14, -8) < SMOOTHED(-3, 9)) << 1) + ((SMOOTHED(-6, 0) < SMOOTHED(2, -4)) << 0));
-    desc[31] = (uchar)(((SMOOTHED(1, -10) < SMOOTHED(-1, 2)) << 7) + ((SMOOTHED(8, -7) < SMOOTHED(-6, 18)) << 6) + ((SMOOTHED(9, 12) < SMOOTHED(-7, -23)) << 5) + ((SMOOTHED(8, -6) < SMOOTHED(5, 2)) << 4) + ((SMOOTHED(-9, 6) < SMOOTHED(-12, -7)) << 3) + ((SMOOTHED(-1, -2) < SMOOTHED(-7, 2)) << 2) + ((SMOOTHED(9, 9) < SMOOTHED(7, 15)) << 1) + ((SMOOTHED(6, 2) < SMOOTHED(-6, 6)) << 0));
-#undef SMOOTHED
diff --git a/modules/features2d/src/generated_64.i b/modules/features2d/src/generated_64.i
deleted file mode 100644 (file)
index 2262e2d..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-// Code generated with '$ scripts/generate_code.py src/test_pairs.txt 64'
-#define SMOOTHED(y,x) smoothedSum(sum, pt, y, x)
-    desc[0] = (uchar)(((SMOOTHED(-2, -1) < SMOOTHED(7, -1)) << 7) + ((SMOOTHED(-14, -1) < SMOOTHED(-3, 3)) << 6) + ((SMOOTHED(1, -2) < SMOOTHED(11, 2)) << 5) + ((SMOOTHED(1, 6) < SMOOTHED(-10, -7)) << 4) + ((SMOOTHED(13, 2) < SMOOTHED(-1, 0)) << 3) + ((SMOOTHED(-14, 5) < SMOOTHED(5, -3)) << 2) + ((SMOOTHED(-2, 8) < SMOOTHED(2, 4)) << 1) + ((SMOOTHED(-11, 8) < SMOOTHED(-15, 5)) << 0));
-    desc[1] = (uchar)(((SMOOTHED(-6, -23) < SMOOTHED(8, -9)) << 7) + ((SMOOTHED(-12, 6) < SMOOTHED(-10, 8)) << 6) + ((SMOOTHED(-3, -1) < SMOOTHED(8, 1)) << 5) + ((SMOOTHED(3, 6) < SMOOTHED(5, 6)) << 4) + ((SMOOTHED(-7, -6) < SMOOTHED(5, -5)) << 3) + ((SMOOTHED(22, -2) < SMOOTHED(-11, -8)) << 2) + ((SMOOTHED(14, 7) < SMOOTHED(8, 5)) << 1) + ((SMOOTHED(-1, 14) < SMOOTHED(-5, -14)) << 0));
-    desc[2] = (uchar)(((SMOOTHED(-14, 9) < SMOOTHED(2, 0)) << 7) + ((SMOOTHED(7, -3) < SMOOTHED(22, 6)) << 6) + ((SMOOTHED(-6, 6) < SMOOTHED(-8, -5)) << 5) + ((SMOOTHED(-5, 9) < SMOOTHED(7, -1)) << 4) + ((SMOOTHED(-3, -7) < SMOOTHED(-10, -18)) << 3) + ((SMOOTHED(4, -5) < SMOOTHED(0, 11)) << 2) + ((SMOOTHED(2, 3) < SMOOTHED(9, 10)) << 1) + ((SMOOTHED(-10, 3) < SMOOTHED(4, 9)) << 0));
-    desc[3] = (uchar)(((SMOOTHED(0, 12) < SMOOTHED(-3, 19)) << 7) + ((SMOOTHED(1, 15) < SMOOTHED(-11, -5)) << 6) + ((SMOOTHED(14, -1) < SMOOTHED(7, 8)) << 5) + ((SMOOTHED(7, -23) < SMOOTHED(-5, 5)) << 4) + ((SMOOTHED(0, -6) < SMOOTHED(-10, 17)) << 3) + ((SMOOTHED(13, -4) < SMOOTHED(-3, -4)) << 2) + ((SMOOTHED(-12, 1) < SMOOTHED(-12, 2)) << 1) + ((SMOOTHED(0, 8) < SMOOTHED(3, 22)) << 0));
-    desc[4] = (uchar)(((SMOOTHED(-13, 13) < SMOOTHED(3, -1)) << 7) + ((SMOOTHED(-16, 17) < SMOOTHED(6, 10)) << 6) + ((SMOOTHED(7, 15) < SMOOTHED(-5, 0)) << 5) + ((SMOOTHED(2, -12) < SMOOTHED(19, -2)) << 4) + ((SMOOTHED(3, -6) < SMOOTHED(-4, -15)) << 3) + ((SMOOTHED(8, 3) < SMOOTHED(0, 14)) << 2) + ((SMOOTHED(4, -11) < SMOOTHED(5, 5)) << 1) + ((SMOOTHED(11, -7) < SMOOTHED(7, 1)) << 0));
-    desc[5] = (uchar)(((SMOOTHED(6, 12) < SMOOTHED(21, 3)) << 7) + ((SMOOTHED(-3, 2) < SMOOTHED(14, 1)) << 6) + ((SMOOTHED(5, 1) < SMOOTHED(-5, 11)) << 5) + ((SMOOTHED(3, -17) < SMOOTHED(-6, 2)) << 4) + ((SMOOTHED(6, 8) < SMOOTHED(5, -10)) << 3) + ((SMOOTHED(-14, -2) < SMOOTHED(0, 4)) << 2) + ((SMOOTHED(5, -7) < SMOOTHED(-6, 5)) << 1) + ((SMOOTHED(10, 4) < SMOOTHED(4, -7)) << 0));
-    desc[6] = (uchar)(((SMOOTHED(22, 0) < SMOOTHED(7, -18)) << 7) + ((SMOOTHED(-1, -3) < SMOOTHED(0, 18)) << 6) + ((SMOOTHED(-4, 22) < SMOOTHED(-5, 3)) << 5) + ((SMOOTHED(1, -7) < SMOOTHED(2, -3)) << 4) + ((SMOOTHED(19, -20) < SMOOTHED(17, -2)) << 3) + ((SMOOTHED(3, -10) < SMOOTHED(-8, 24)) << 2) + ((SMOOTHED(-5, -14) < SMOOTHED(7, 5)) << 1) + ((SMOOTHED(-2, 12) < SMOOTHED(-4, -15)) << 0));
-    desc[7] = (uchar)(((SMOOTHED(4, 12) < SMOOTHED(0, -19)) << 7) + ((SMOOTHED(20, 13) < SMOOTHED(3, 5)) << 6) + ((SMOOTHED(-8, -12) < SMOOTHED(5, 0)) << 5) + ((SMOOTHED(-5, 6) < SMOOTHED(-7, -11)) << 4) + ((SMOOTHED(6, -11) < SMOOTHED(-3, -22)) << 3) + ((SMOOTHED(15, 4) < SMOOTHED(10, 1)) << 2) + ((SMOOTHED(-7, -4) < SMOOTHED(15, -6)) << 1) + ((SMOOTHED(5, 10) < SMOOTHED(0, 24)) << 0));
-    desc[8] = (uchar)(((SMOOTHED(3, 6) < SMOOTHED(22, -2)) << 7) + ((SMOOTHED(-13, 14) < SMOOTHED(4, -4)) << 6) + ((SMOOTHED(-13, 8) < SMOOTHED(-18, -22)) << 5) + ((SMOOTHED(-1, -1) < SMOOTHED(-7, 3)) << 4) + ((SMOOTHED(-19, -12) < SMOOTHED(4, 3)) << 3) + ((SMOOTHED(8, 10) < SMOOTHED(13, -2)) << 2) + ((SMOOTHED(-6, -1) < SMOOTHED(-6, -5)) << 1) + ((SMOOTHED(2, -21) < SMOOTHED(-3, 2)) << 0));
-    desc[9] = (uchar)(((SMOOTHED(4, -7) < SMOOTHED(0, 16)) << 7) + ((SMOOTHED(-6, -5) < SMOOTHED(-12, -1)) << 6) + ((SMOOTHED(1, -1) < SMOOTHED(9, 18)) << 5) + ((SMOOTHED(-7, 10) < SMOOTHED(-11, 6)) << 4) + ((SMOOTHED(4, 3) < SMOOTHED(19, -7)) << 3) + ((SMOOTHED(-18, 5) < SMOOTHED(-4, 5)) << 2) + ((SMOOTHED(4, 0) < SMOOTHED(-20, 4)) << 1) + ((SMOOTHED(7, -11) < SMOOTHED(18, 12)) << 0));
-    desc[10] = (uchar)(((SMOOTHED(-20, 17) < SMOOTHED(-18, 7)) << 7) + ((SMOOTHED(2, 15) < SMOOTHED(19, -11)) << 6) + ((SMOOTHED(-18, 6) < SMOOTHED(-7, 3)) << 5) + ((SMOOTHED(-4, 1) < SMOOTHED(-14, 13)) << 4) + ((SMOOTHED(17, 3) < SMOOTHED(2, -8)) << 3) + ((SMOOTHED(-7, 2) < SMOOTHED(1, 6)) << 2) + ((SMOOTHED(17, -9) < SMOOTHED(-2, 8)) << 1) + ((SMOOTHED(-8, -6) < SMOOTHED(-1, 12)) << 0));
-    desc[11] = (uchar)(((SMOOTHED(-2, 4) < SMOOTHED(-1, 6)) << 7) + ((SMOOTHED(-2, 7) < SMOOTHED(6, 8)) << 6) + ((SMOOTHED(-8, -1) < SMOOTHED(-7, -9)) << 5) + ((SMOOTHED(8, -9) < SMOOTHED(15, 0)) << 4) + ((SMOOTHED(0, 22) < SMOOTHED(-4, -15)) << 3) + ((SMOOTHED(-14, -1) < SMOOTHED(3, -2)) << 2) + ((SMOOTHED(-7, -4) < SMOOTHED(17, -7)) << 1) + ((SMOOTHED(-8, -2) < SMOOTHED(9, -4)) << 0));
-    desc[12] = (uchar)(((SMOOTHED(5, -7) < SMOOTHED(7, 7)) << 7) + ((SMOOTHED(-5, 13) < SMOOTHED(-8, 11)) << 6) + ((SMOOTHED(11, -4) < SMOOTHED(0, 8)) << 5) + ((SMOOTHED(5, -11) < SMOOTHED(-9, -6)) << 4) + ((SMOOTHED(2, -6) < SMOOTHED(3, -20)) << 3) + ((SMOOTHED(-6, 2) < SMOOTHED(6, 10)) << 2) + ((SMOOTHED(-6, -6) < SMOOTHED(-15, 7)) << 1) + ((SMOOTHED(-6, -3) < SMOOTHED(2, 1)) << 0));
-    desc[13] = (uchar)(((SMOOTHED(11, 0) < SMOOTHED(-3, 2)) << 7) + ((SMOOTHED(7, -12) < SMOOTHED(14, 5)) << 6) + ((SMOOTHED(0, -7) < SMOOTHED(-1, -1)) << 5) + ((SMOOTHED(-16, 0) < SMOOTHED(6, 8)) << 4) + ((SMOOTHED(22, 11) < SMOOTHED(0, -3)) << 3) + ((SMOOTHED(19, 0) < SMOOTHED(5, -17)) << 2) + ((SMOOTHED(-23, -14) < SMOOTHED(-13, -19)) << 1) + ((SMOOTHED(-8, 10) < SMOOTHED(-11, -2)) << 0));
-    desc[14] = (uchar)(((SMOOTHED(-11, 6) < SMOOTHED(-10, 13)) << 7) + ((SMOOTHED(1, -7) < SMOOTHED(14, 0)) << 6) + ((SMOOTHED(-12, 1) < SMOOTHED(-5, -5)) << 5) + ((SMOOTHED(4, 7) < SMOOTHED(8, -1)) << 4) + ((SMOOTHED(-1, -5) < SMOOTHED(15, 2)) << 3) + ((SMOOTHED(-3, -1) < SMOOTHED(7, -10)) << 2) + ((SMOOTHED(3, -6) < SMOOTHED(10, -18)) << 1) + ((SMOOTHED(-7, -13) < SMOOTHED(-13, 10)) << 0));
-    desc[15] = (uchar)(((SMOOTHED(1, -1) < SMOOTHED(13, -10)) << 7) + ((SMOOTHED(-19, 14) < SMOOTHED(8, -14)) << 6) + ((SMOOTHED(-4, -13) < SMOOTHED(7, 1)) << 5) + ((SMOOTHED(1, -2) < SMOOTHED(12, -7)) << 4) + ((SMOOTHED(3, -5) < SMOOTHED(1, -5)) << 3) + ((SMOOTHED(-2, -2) < SMOOTHED(8, -10)) << 2) + ((SMOOTHED(2, 14) < SMOOTHED(8, 7)) << 1) + ((SMOOTHED(3, 9) < SMOOTHED(8, 2)) << 0));
-    desc[16] = (uchar)(((SMOOTHED(-9, 1) < SMOOTHED(-18, 0)) << 7) + ((SMOOTHED(4, 0) < SMOOTHED(1, 12)) << 6) + ((SMOOTHED(0, 9) < SMOOTHED(-14, -10)) << 5) + ((SMOOTHED(-13, -9) < SMOOTHED(-2, 6)) << 4) + ((SMOOTHED(1, 5) < SMOOTHED(10, 10)) << 3) + ((SMOOTHED(-3, -6) < SMOOTHED(-16, -5)) << 2) + ((SMOOTHED(11, 6) < SMOOTHED(-5, 0)) << 1) + ((SMOOTHED(-23, 10) < SMOOTHED(1, 2)) << 0));
-    desc[17] = (uchar)(((SMOOTHED(13, -5) < SMOOTHED(-3, 9)) << 7) + ((SMOOTHED(-4, -1) < SMOOTHED(-13, -5)) << 6) + ((SMOOTHED(10, 13) < SMOOTHED(-11, 8)) << 5) + ((SMOOTHED(19, 20) < SMOOTHED(-9, 2)) << 4) + ((SMOOTHED(4, -8) < SMOOTHED(0, -9)) << 3) + ((SMOOTHED(-14, 10) < SMOOTHED(15, 19)) << 2) + ((SMOOTHED(-14, -12) < SMOOTHED(-10, -3)) << 1) + ((SMOOTHED(-23, -3) < SMOOTHED(17, -2)) << 0));
-    desc[18] = (uchar)(((SMOOTHED(-3, -11) < SMOOTHED(6, -14)) << 7) + ((SMOOTHED(19, -2) < SMOOTHED(-4, 2)) << 6) + ((SMOOTHED(-5, 5) < SMOOTHED(3, -13)) << 5) + ((SMOOTHED(2, -2) < SMOOTHED(-5, 4)) << 4) + ((SMOOTHED(17, 4) < SMOOTHED(17, -11)) << 3) + ((SMOOTHED(-7, -2) < SMOOTHED(1, 23)) << 2) + ((SMOOTHED(8, 13) < SMOOTHED(1, -16)) << 1) + ((SMOOTHED(-13, -5) < SMOOTHED(1, -17)) << 0));
-    desc[19] = (uchar)(((SMOOTHED(4, 6) < SMOOTHED(-8, -3)) << 7) + ((SMOOTHED(-5, -9) < SMOOTHED(-2, -10)) << 6) + ((SMOOTHED(-9, 0) < SMOOTHED(-7, -2)) << 5) + ((SMOOTHED(5, 0) < SMOOTHED(5, 2)) << 4) + ((SMOOTHED(-4, -16) < SMOOTHED(6, 3)) << 3) + ((SMOOTHED(2, -15) < SMOOTHED(-2, 12)) << 2) + ((SMOOTHED(4, -1) < SMOOTHED(6, 2)) << 1) + ((SMOOTHED(1, 1) < SMOOTHED(-2, -8)) << 0));
-    desc[20] = (uchar)(((SMOOTHED(-2, 12) < SMOOTHED(-5, -2)) << 7) + ((SMOOTHED(-8, 8) < SMOOTHED(-9, 9)) << 6) + ((SMOOTHED(2, -10) < SMOOTHED(3, 1)) << 5) + ((SMOOTHED(-4, 10) < SMOOTHED(-9, 4)) << 4) + ((SMOOTHED(6, 12) < SMOOTHED(2, 5)) << 3) + ((SMOOTHED(-3, -8) < SMOOTHED(0, 5)) << 2) + ((SMOOTHED(-13, 1) < SMOOTHED(-7, 2)) << 1) + ((SMOOTHED(-1, -10) < SMOOTHED(7, -18)) << 0));
-    desc[21] = (uchar)(((SMOOTHED(-1, 8) < SMOOTHED(-9, -10)) << 7) + ((SMOOTHED(-23, -1) < SMOOTHED(6, 2)) << 6) + ((SMOOTHED(-5, -3) < SMOOTHED(3, 2)) << 5) + ((SMOOTHED(0, 11) < SMOOTHED(-4, -7)) << 4) + ((SMOOTHED(15, 2) < SMOOTHED(-10, -3)) << 3) + ((SMOOTHED(-20, -8) < SMOOTHED(-13, 3)) << 2) + ((SMOOTHED(-19, -12) < SMOOTHED(5, -11)) << 1) + ((SMOOTHED(-17, -13) < SMOOTHED(-3, 2)) << 0));
-    desc[22] = (uchar)(((SMOOTHED(7, 4) < SMOOTHED(-12, 0)) << 7) + ((SMOOTHED(5, -1) < SMOOTHED(-14, -6)) << 6) + ((SMOOTHED(-4, 11) < SMOOTHED(0, -4)) << 5) + ((SMOOTHED(3, 10) < SMOOTHED(7, -3)) << 4) + ((SMOOTHED(13, 21) < SMOOTHED(-11, 6)) << 3) + ((SMOOTHED(-12, 24) < SMOOTHED(-7, -4)) << 2) + ((SMOOTHED(4, 16) < SMOOTHED(3, -14)) << 1) + ((SMOOTHED(-3, 5) < SMOOTHED(-7, -12)) << 0));
-    desc[23] = (uchar)(((SMOOTHED(0, -4) < SMOOTHED(7, -5)) << 7) + ((SMOOTHED(-17, -9) < SMOOTHED(13, -7)) << 6) + ((SMOOTHED(22, -6) < SMOOTHED(-11, 5)) << 5) + ((SMOOTHED(2, -8) < SMOOTHED(23, -11)) << 4) + ((SMOOTHED(7, -10) < SMOOTHED(-1, 14)) << 3) + ((SMOOTHED(-3, -10) < SMOOTHED(8, 3)) << 2) + ((SMOOTHED(-13, 1) < SMOOTHED(-6, 0)) << 1) + ((SMOOTHED(-7, -21) < SMOOTHED(6, -14)) << 0));
-    desc[24] = (uchar)(((SMOOTHED(18, 19) < SMOOTHED(-4, -6)) << 7) + ((SMOOTHED(10, 7) < SMOOTHED(-1, -4)) << 6) + ((SMOOTHED(-1, 21) < SMOOTHED(1, -5)) << 5) + ((SMOOTHED(-10, 6) < SMOOTHED(-11, -2)) << 4) + ((SMOOTHED(18, -3) < SMOOTHED(-1, 7)) << 3) + ((SMOOTHED(-3, -9) < SMOOTHED(-5, 10)) << 2) + ((SMOOTHED(-13, 14) < SMOOTHED(17, -3)) << 1) + ((SMOOTHED(11, -19) < SMOOTHED(-1, -18)) << 0));
-    desc[25] = (uchar)(((SMOOTHED(8, -2) < SMOOTHED(-18, -23)) << 7) + ((SMOOTHED(0, -5) < SMOOTHED(-2, -9)) << 6) + ((SMOOTHED(-4, -11) < SMOOTHED(2, -8)) << 5) + ((SMOOTHED(14, 6) < SMOOTHED(-3, -6)) << 4) + ((SMOOTHED(-3, 0) < SMOOTHED(-15, 0)) << 3) + ((SMOOTHED(-9, 4) < SMOOTHED(-15, -9)) << 2) + ((SMOOTHED(-1, 11) < SMOOTHED(3, 11)) << 1) + ((SMOOTHED(-10, -16) < SMOOTHED(-7, 7)) << 0));
-    desc[26] = (uchar)(((SMOOTHED(-2, -10) < SMOOTHED(-10, -2)) << 7) + ((SMOOTHED(-5, -3) < SMOOTHED(5, -23)) << 6) + ((SMOOTHED(13, -8) < SMOOTHED(-15, -11)) << 5) + ((SMOOTHED(-15, 11) < SMOOTHED(6, -6)) << 4) + ((SMOOTHED(-16, -3) < SMOOTHED(-2, 2)) << 3) + ((SMOOTHED(6, 12) < SMOOTHED(-16, 24)) << 2) + ((SMOOTHED(-10, 0) < SMOOTHED(8, 11)) << 1) + ((SMOOTHED(-7, 7) < SMOOTHED(-19, -7)) << 0));
-    desc[27] = (uchar)(((SMOOTHED(5, 16) < SMOOTHED(9, -3)) << 7) + ((SMOOTHED(9, 7) < SMOOTHED(-7, -16)) << 6) + ((SMOOTHED(3, 2) < SMOOTHED(-10, 9)) << 5) + ((SMOOTHED(21, 1) < SMOOTHED(8, 7)) << 4) + ((SMOOTHED(7, 0) < SMOOTHED(1, 17)) << 3) + ((SMOOTHED(-8, 12) < SMOOTHED(9, 6)) << 2) + ((SMOOTHED(11, -7) < SMOOTHED(-8, -6)) << 1) + ((SMOOTHED(19, 0) < SMOOTHED(9, 3)) << 0));
-    desc[28] = (uchar)(((SMOOTHED(1, -7) < SMOOTHED(-5, -11)) << 7) + ((SMOOTHED(0, 8) < SMOOTHED(-2, 14)) << 6) + ((SMOOTHED(12, -2) < SMOOTHED(-15, -6)) << 5) + ((SMOOTHED(4, 12) < SMOOTHED(0, -21)) << 4) + ((SMOOTHED(17, -4) < SMOOTHED(-6, -7)) << 3) + ((SMOOTHED(-10, -9) < SMOOTHED(-14, -7)) << 2) + ((SMOOTHED(-15, -10) < SMOOTHED(-15, -14)) << 1) + ((SMOOTHED(-7, -5) < SMOOTHED(5, -12)) << 0));
-    desc[29] = (uchar)(((SMOOTHED(-4, 0) < SMOOTHED(15, -4)) << 7) + ((SMOOTHED(5, 2) < SMOOTHED(-6, -23)) << 6) + ((SMOOTHED(-4, -21) < SMOOTHED(-6, 4)) << 5) + ((SMOOTHED(-10, 5) < SMOOTHED(-15, 6)) << 4) + ((SMOOTHED(4, -3) < SMOOTHED(-1, 5)) << 3) + ((SMOOTHED(-4, 19) < SMOOTHED(-23, -4)) << 2) + ((SMOOTHED(-4, 17) < SMOOTHED(13, -11)) << 1) + ((SMOOTHED(1, 12) < SMOOTHED(4, -14)) << 0));
-    desc[30] = (uchar)(((SMOOTHED(-11, -6) < SMOOTHED(-20, 10)) << 7) + ((SMOOTHED(4, 5) < SMOOTHED(3, 20)) << 6) + ((SMOOTHED(-8, -20) < SMOOTHED(3, 1)) << 5) + ((SMOOTHED(-19, 9) < SMOOTHED(9, -3)) << 4) + ((SMOOTHED(18, 15) < SMOOTHED(11, -4)) << 3) + ((SMOOTHED(12, 16) < SMOOTHED(8, 7)) << 2) + ((SMOOTHED(-14, -8) < SMOOTHED(-3, 9)) << 1) + ((SMOOTHED(-6, 0) < SMOOTHED(2, -4)) << 0));
-    desc[31] = (uchar)(((SMOOTHED(1, -10) < SMOOTHED(-1, 2)) << 7) + ((SMOOTHED(8, -7) < SMOOTHED(-6, 18)) << 6) + ((SMOOTHED(9, 12) < SMOOTHED(-7, -23)) << 5) + ((SMOOTHED(8, -6) < SMOOTHED(5, 2)) << 4) + ((SMOOTHED(-9, 6) < SMOOTHED(-12, -7)) << 3) + ((SMOOTHED(-1, -2) < SMOOTHED(-7, 2)) << 2) + ((SMOOTHED(9, 9) < SMOOTHED(7, 15)) << 1) + ((SMOOTHED(6, 2) < SMOOTHED(-6, 6)) << 0));
-    desc[32] = (uchar)(((SMOOTHED(16, 12) < SMOOTHED(0, 19)) << 7) + ((SMOOTHED(4, 3) < SMOOTHED(6, 0)) << 6) + ((SMOOTHED(-2, -1) < SMOOTHED(2, 17)) << 5) + ((SMOOTHED(8, 1) < SMOOTHED(3, 1)) << 4) + ((SMOOTHED(-12, -1) < SMOOTHED(-11, 0)) << 3) + ((SMOOTHED(-11, 2) < SMOOTHED(7, 9)) << 2) + ((SMOOTHED(-1, 3) < SMOOTHED(-19, 4)) << 1) + ((SMOOTHED(-1, -11) < SMOOTHED(-1, 3)) << 0));
-    desc[33] = (uchar)(((SMOOTHED(1, -10) < SMOOTHED(-10, -4)) << 7) + ((SMOOTHED(-2, 3) < SMOOTHED(6, 11)) << 6) + ((SMOOTHED(3, 7) < SMOOTHED(-9, -8)) << 5) + ((SMOOTHED(24, -14) < SMOOTHED(-2, -10)) << 4) + ((SMOOTHED(-3, -3) < SMOOTHED(-18, -6)) << 3) + ((SMOOTHED(-13, -10) < SMOOTHED(-7, -1)) << 2) + ((SMOOTHED(2, -7) < SMOOTHED(9, -6)) << 1) + ((SMOOTHED(2, -4) < SMOOTHED(6, -13)) << 0));
-    desc[34] = (uchar)(((SMOOTHED(4, -4) < SMOOTHED(-2, 3)) << 7) + ((SMOOTHED(-4, 2) < SMOOTHED(9, 13)) << 6) + ((SMOOTHED(-11, 5) < SMOOTHED(-6, -11)) << 5) + ((SMOOTHED(4, -2) < SMOOTHED(11, -9)) << 4) + ((SMOOTHED(-19, 0) < SMOOTHED(-23, -5)) << 3) + ((SMOOTHED(-5, -7) < SMOOTHED(-3, -6)) << 2) + ((SMOOTHED(-6, -4) < SMOOTHED(12, 14)) << 1) + ((SMOOTHED(12, -11) < SMOOTHED(-8, -16)) << 0));
-    desc[35] = (uchar)(((SMOOTHED(-21, 15) < SMOOTHED(-12, 6)) << 7) + ((SMOOTHED(-2, -1) < SMOOTHED(-8, 16)) << 6) + ((SMOOTHED(6, -1) < SMOOTHED(-8, -2)) << 5) + ((SMOOTHED(1, -1) < SMOOTHED(-9, 8)) << 4) + ((SMOOTHED(3, -4) < SMOOTHED(-2, -2)) << 3) + ((SMOOTHED(-7, 0) < SMOOTHED(4, -8)) << 2) + ((SMOOTHED(11, -11) < SMOOTHED(-12, 2)) << 1) + ((SMOOTHED(2, 3) < SMOOTHED(11, 7)) << 0));
-    desc[36] = (uchar)(((SMOOTHED(-7, -4) < SMOOTHED(-9, -6)) << 7) + ((SMOOTHED(3, -7) < SMOOTHED(-5, 0)) << 6) + ((SMOOTHED(3, -7) < SMOOTHED(-10, -5)) << 5) + ((SMOOTHED(-3, -1) < SMOOTHED(8, -10)) << 4) + ((SMOOTHED(0, 8) < SMOOTHED(5, 1)) << 3) + ((SMOOTHED(9, 0) < SMOOTHED(1, 16)) << 2) + ((SMOOTHED(8, 4) < SMOOTHED(-11, -3)) << 1) + ((SMOOTHED(-15, 9) < SMOOTHED(8, 17)) << 0));
-    desc[37] = (uchar)(((SMOOTHED(0, 2) < SMOOTHED(-9, 17)) << 7) + ((SMOOTHED(-6, -11) < SMOOTHED(-10, -3)) << 6) + ((SMOOTHED(1, 1) < SMOOTHED(15, -8)) << 5) + ((SMOOTHED(-12, -13) < SMOOTHED(-2, 4)) << 4) + ((SMOOTHED(-6, 4) < SMOOTHED(-6, -10)) << 3) + ((SMOOTHED(5, -7) < SMOOTHED(7, -5)) << 2) + ((SMOOTHED(10, 6) < SMOOTHED(8, 9)) << 1) + ((SMOOTHED(-5, 7) < SMOOTHED(-18, -3)) << 0));
-    desc[38] = (uchar)(((SMOOTHED(-6, 3) < SMOOTHED(5, 4)) << 7) + ((SMOOTHED(-10, -13) < SMOOTHED(-5, -3)) << 6) + ((SMOOTHED(-11, 2) < SMOOTHED(-16, 0)) << 5) + ((SMOOTHED(7, -21) < SMOOTHED(-5, -13)) << 4) + ((SMOOTHED(-14, -14) < SMOOTHED(-4, -4)) << 3) + ((SMOOTHED(4, 9) < SMOOTHED(7, -3)) << 2) + ((SMOOTHED(4, 11) < SMOOTHED(10, -4)) << 1) + ((SMOOTHED(6, 17) < SMOOTHED(9, 17)) << 0));
-    desc[39] = (uchar)(((SMOOTHED(-10, 8) < SMOOTHED(0, -11)) << 7) + ((SMOOTHED(-6, -16) < SMOOTHED(-6, 8)) << 6) + ((SMOOTHED(-13, 5) < SMOOTHED(10, -5)) << 5) + ((SMOOTHED(3, 2) < SMOOTHED(12, 16)) << 4) + ((SMOOTHED(13, -8) < SMOOTHED(0, -6)) << 3) + ((SMOOTHED(10, 0) < SMOOTHED(4, -11)) << 2) + ((SMOOTHED(8, 5) < SMOOTHED(10, -2)) << 1) + ((SMOOTHED(11, -7) < SMOOTHED(-13, 3)) << 0));
-    desc[40] = (uchar)(((SMOOTHED(2, 4) < SMOOTHED(-7, -3)) << 7) + ((SMOOTHED(-14, -2) < SMOOTHED(-11, 16)) << 6) + ((SMOOTHED(11, -6) < SMOOTHED(7, 6)) << 5) + ((SMOOTHED(-3, 15) < SMOOTHED(8, -10)) << 4) + ((SMOOTHED(-3, 8) < SMOOTHED(12, -12)) << 3) + ((SMOOTHED(-13, 6) < SMOOTHED(-14, 7)) << 2) + ((SMOOTHED(-11, -5) < SMOOTHED(-8, -6)) << 1) + ((SMOOTHED(7, -6) < SMOOTHED(6, 3)) << 0));
-    desc[41] = (uchar)(((SMOOTHED(-4, 10) < SMOOTHED(5, 1)) << 7) + ((SMOOTHED(9, 16) < SMOOTHED(10, 13)) << 6) + ((SMOOTHED(-17, 10) < SMOOTHED(2, 8)) << 5) + ((SMOOTHED(-5, 1) < SMOOTHED(4, -4)) << 4) + ((SMOOTHED(-14, 8) < SMOOTHED(-5, 2)) << 3) + ((SMOOTHED(4, -9) < SMOOTHED(-6, -3)) << 2) + ((SMOOTHED(3, -7) < SMOOTHED(-10, 0)) << 1) + ((SMOOTHED(-2, -8) < SMOOTHED(-10, 4)) << 0));
-    desc[42] = (uchar)(((SMOOTHED(-8, 5) < SMOOTHED(-9, 24)) << 7) + ((SMOOTHED(2, -8) < SMOOTHED(8, -9)) << 6) + ((SMOOTHED(-4, 17) < SMOOTHED(-5, 2)) << 5) + ((SMOOTHED(14, 0) < SMOOTHED(-9, 9)) << 4) + ((SMOOTHED(11, 15) < SMOOTHED(-6, 5)) << 3) + ((SMOOTHED(-8, 1) < SMOOTHED(-3, 4)) << 2) + ((SMOOTHED(9, -21) < SMOOTHED(10, 2)) << 1) + ((SMOOTHED(2, -1) < SMOOTHED(4, 11)) << 0));
-    desc[43] = (uchar)(((SMOOTHED(24, 3) < SMOOTHED(2, -2)) << 7) + ((SMOOTHED(-8, 17) < SMOOTHED(-14, -10)) << 6) + ((SMOOTHED(6, 5) < SMOOTHED(-13, 7)) << 5) + ((SMOOTHED(11, 10) < SMOOTHED(0, -1)) << 4) + ((SMOOTHED(4, 6) < SMOOTHED(-10, 6)) << 3) + ((SMOOTHED(-12, -2) < SMOOTHED(5, 6)) << 2) + ((SMOOTHED(3, -1) < SMOOTHED(8, -15)) << 1) + ((SMOOTHED(1, -4) < SMOOTHED(-7, 11)) << 0));
-    desc[44] = (uchar)(((SMOOTHED(1, 11) < SMOOTHED(5, 0)) << 7) + ((SMOOTHED(6, -12) < SMOOTHED(10, 1)) << 6) + ((SMOOTHED(-3, -2) < SMOOTHED(-1, 4)) << 5) + ((SMOOTHED(-2, -11) < SMOOTHED(-1, 12)) << 4) + ((SMOOTHED(7, -8) < SMOOTHED(-20, -18)) << 3) + ((SMOOTHED(2, 0) < SMOOTHED(-9, 2)) << 2) + ((SMOOTHED(-13, -1) < SMOOTHED(-16, 2)) << 1) + ((SMOOTHED(3, -1) < SMOOTHED(-5, -17)) << 0));
-    desc[45] = (uchar)(((SMOOTHED(15, 8) < SMOOTHED(3, -14)) << 7) + ((SMOOTHED(-13, -12) < SMOOTHED(6, 15)) << 6) + ((SMOOTHED(2, -8) < SMOOTHED(2, 6)) << 5) + ((SMOOTHED(6, 22) < SMOOTHED(-3, -23)) << 4) + ((SMOOTHED(-2, -7) < SMOOTHED(-6, 0)) << 3) + ((SMOOTHED(13, -10) < SMOOTHED(-6, 6)) << 2) + ((SMOOTHED(6, 7) < SMOOTHED(-10, 12)) << 1) + ((SMOOTHED(-6, 7) < SMOOTHED(-2, 11)) << 0));
-    desc[46] = (uchar)(((SMOOTHED(0, -22) < SMOOTHED(-2, -17)) << 7) + ((SMOOTHED(-4, -1) < SMOOTHED(-11, -14)) << 6) + ((SMOOTHED(-2, -8) < SMOOTHED(7, 12)) << 5) + ((SMOOTHED(12, -5) < SMOOTHED(7, -13)) << 4) + ((SMOOTHED(2, -2) < SMOOTHED(-7, 6)) << 3) + ((SMOOTHED(0, 8) < SMOOTHED(-3, 23)) << 2) + ((SMOOTHED(6, 12) < SMOOTHED(13, -11)) << 1) + ((SMOOTHED(-21, -10) < SMOOTHED(10, 8)) << 0));
-    desc[47] = (uchar)(((SMOOTHED(-3, 0) < SMOOTHED(7, 15)) << 7) + ((SMOOTHED(7, -6) < SMOOTHED(-5, -12)) << 6) + ((SMOOTHED(-21, -10) < SMOOTHED(12, -11)) << 5) + ((SMOOTHED(-5, -11) < SMOOTHED(8, -11)) << 4) + ((SMOOTHED(5, 0) < SMOOTHED(-11, -1)) << 3) + ((SMOOTHED(8, -9) < SMOOTHED(7, -1)) << 2) + ((SMOOTHED(11, -23) < SMOOTHED(21, -5)) << 1) + ((SMOOTHED(0, -5) < SMOOTHED(-8, 6)) << 0));
-    desc[48] = (uchar)(((SMOOTHED(-6, 8) < SMOOTHED(8, 12)) << 7) + ((SMOOTHED(-7, 5) < SMOOTHED(3, -2)) << 6) + ((SMOOTHED(-5, -20) < SMOOTHED(-12, 9)) << 5) + ((SMOOTHED(-6, 12) < SMOOTHED(-11, 3)) << 4) + ((SMOOTHED(4, 5) < SMOOTHED(13, 11)) << 3) + ((SMOOTHED(2, 12) < SMOOTHED(13, -12)) << 2) + ((SMOOTHED(-4, -13) < SMOOTHED(4, 7)) << 1) + ((SMOOTHED(0, 15) < SMOOTHED(-3, -16)) << 0));
-    desc[49] = (uchar)(((SMOOTHED(-3, 2) < SMOOTHED(-2, 14)) << 7) + ((SMOOTHED(4, -14) < SMOOTHED(16, -11)) << 6) + ((SMOOTHED(-13, 3) < SMOOTHED(23, 10)) << 5) + ((SMOOTHED(9, -19) < SMOOTHED(2, 5)) << 4) + ((SMOOTHED(5, 3) < SMOOTHED(14, -7)) << 3) + ((SMOOTHED(19, -13) < SMOOTHED(-11, 15)) << 2) + ((SMOOTHED(14, 0) < SMOOTHED(-2, -5)) << 1) + ((SMOOTHED(11, -4) < SMOOTHED(0, -6)) << 0));
-    desc[50] = (uchar)(((SMOOTHED(-2, 5) < SMOOTHED(-13, -8)) << 7) + ((SMOOTHED(-11, -15) < SMOOTHED(-7, -17)) << 6) + ((SMOOTHED(1, 3) < SMOOTHED(-10, -8)) << 5) + ((SMOOTHED(-13, -10) < SMOOTHED(7, -12)) << 4) + ((SMOOTHED(0, -13) < SMOOTHED(23, -6)) << 3) + ((SMOOTHED(2, -17) < SMOOTHED(-7, -3)) << 2) + ((SMOOTHED(1, 3) < SMOOTHED(4, -10)) << 1) + ((SMOOTHED(13, 4) < SMOOTHED(14, -6)) << 0));
-    desc[51] = (uchar)(((SMOOTHED(-19, -2) < SMOOTHED(-1, 5)) << 7) + ((SMOOTHED(9, -8) < SMOOTHED(10, -5)) << 6) + ((SMOOTHED(7, -1) < SMOOTHED(5, 7)) << 5) + ((SMOOTHED(9, -10) < SMOOTHED(19, 0)) << 4) + ((SMOOTHED(7, 5) < SMOOTHED(-4, -7)) << 3) + ((SMOOTHED(-11, 1) < SMOOTHED(-1, -11)) << 2) + ((SMOOTHED(2, -1) < SMOOTHED(-4, 11)) << 1) + ((SMOOTHED(-1, 7) < SMOOTHED(2, -2)) << 0));
-    desc[52] = (uchar)(((SMOOTHED(1, -20) < SMOOTHED(-9, -6)) << 7) + ((SMOOTHED(-4, -18) < SMOOTHED(8, -18)) << 6) + ((SMOOTHED(-16, -2) < SMOOTHED(7, -6)) << 5) + ((SMOOTHED(-3, -6) < SMOOTHED(-1, -4)) << 4) + ((SMOOTHED(0, -16) < SMOOTHED(24, -5)) << 3) + ((SMOOTHED(-4, -2) < SMOOTHED(-1, 9)) << 2) + ((SMOOTHED(-8, 2) < SMOOTHED(-6, 15)) << 1) + ((SMOOTHED(11, 4) < SMOOTHED(0, -3)) << 0));
-    desc[53] = (uchar)(((SMOOTHED(7, 6) < SMOOTHED(2, -10)) << 7) + ((SMOOTHED(-7, -9) < SMOOTHED(12, -6)) << 6) + ((SMOOTHED(24, 15) < SMOOTHED(-8, -1)) << 5) + ((SMOOTHED(15, -9) < SMOOTHED(-3, -15)) << 4) + ((SMOOTHED(17, -5) < SMOOTHED(11, -10)) << 3) + ((SMOOTHED(-2, 13) < SMOOTHED(-15, 4)) << 2) + ((SMOOTHED(-2, -1) < SMOOTHED(4, -23)) << 1) + ((SMOOTHED(-16, 3) < SMOOTHED(-7, -14)) << 0));
-    desc[54] = (uchar)(((SMOOTHED(-3, -5) < SMOOTHED(-10, -9)) << 7) + ((SMOOTHED(-5, 3) < SMOOTHED(-2, -1)) << 6) + ((SMOOTHED(-1, 4) < SMOOTHED(1, 8)) << 5) + ((SMOOTHED(12, 9) < SMOOTHED(9, -14)) << 4) + ((SMOOTHED(-9, 17) < SMOOTHED(-3, 0)) << 3) + ((SMOOTHED(5, 4) < SMOOTHED(13, -6)) << 2) + ((SMOOTHED(-1, -8) < SMOOTHED(19, 10)) << 1) + ((SMOOTHED(8, -5) < SMOOTHED(-15, 2)) << 0));
-    desc[55] = (uchar)(((SMOOTHED(-12, -9) < SMOOTHED(-4, -5)) << 7) + ((SMOOTHED(12, 0) < SMOOTHED(24, 4)) << 6) + ((SMOOTHED(8, -2) < SMOOTHED(14, 4)) << 5) + ((SMOOTHED(8, -4) < SMOOTHED(-7, 16)) << 4) + ((SMOOTHED(5, -1) < SMOOTHED(-8, -4)) << 3) + ((SMOOTHED(-2, 18) < SMOOTHED(-5, 17)) << 2) + ((SMOOTHED(8, -2) < SMOOTHED(-9, -2)) << 1) + ((SMOOTHED(3, -7) < SMOOTHED(1, -6)) << 0));
-    desc[56] = (uchar)(((SMOOTHED(-5, -22) < SMOOTHED(-5, -2)) << 7) + ((SMOOTHED(-8, -10) < SMOOTHED(14, 1)) << 6) + ((SMOOTHED(-3, -13) < SMOOTHED(3, 9)) << 5) + ((SMOOTHED(-4, -1) < SMOOTHED(-1, 0)) << 4) + ((SMOOTHED(-7, -21) < SMOOTHED(12, -19)) << 3) + ((SMOOTHED(-8, 8) < SMOOTHED(24, 8)) << 2) + ((SMOOTHED(12, -6) < SMOOTHED(-2, 3)) << 1) + ((SMOOTHED(-5, -11) < SMOOTHED(-22, -4)) << 0));
-    desc[57] = (uchar)(((SMOOTHED(-3, 5) < SMOOTHED(-4, 4)) << 7) + ((SMOOTHED(-16, 24) < SMOOTHED(7, -9)) << 6) + ((SMOOTHED(-10, 23) < SMOOTHED(-9, 18)) << 5) + ((SMOOTHED(1, 12) < SMOOTHED(17, 21)) << 4) + ((SMOOTHED(24, -6) < SMOOTHED(-3, -11)) << 3) + ((SMOOTHED(-7, 17) < SMOOTHED(1, -6)) << 2) + ((SMOOTHED(4, 4) < SMOOTHED(2, -7)) << 1) + ((SMOOTHED(14, 6) < SMOOTHED(-12, 3)) << 0));
-    desc[58] = (uchar)(((SMOOTHED(-6, 0) < SMOOTHED(-16, 13)) << 7) + ((SMOOTHED(-10, 5) < SMOOTHED(7, 12)) << 6) + ((SMOOTHED(5, 2) < SMOOTHED(6, -3)) << 5) + ((SMOOTHED(7, 0) < SMOOTHED(-23, 1)) << 4) + ((SMOOTHED(15, -5) < SMOOTHED(1, 14)) << 3) + ((SMOOTHED(-3, -1) < SMOOTHED(6, 6)) << 2) + ((SMOOTHED(6, -9) < SMOOTHED(-9, 12)) << 1) + ((SMOOTHED(4, -2) < SMOOTHED(-4, 7)) << 0));
-    desc[59] = (uchar)(((SMOOTHED(-4, -5) < SMOOTHED(4, 4)) << 7) + ((SMOOTHED(-13, 0) < SMOOTHED(6, -10)) << 6) + ((SMOOTHED(2, -12) < SMOOTHED(-6, -3)) << 5) + ((SMOOTHED(16, 0) < SMOOTHED(-3, 3)) << 4) + ((SMOOTHED(5, -14) < SMOOTHED(6, 11)) << 3) + ((SMOOTHED(5, 11) < SMOOTHED(0, -13)) << 2) + ((SMOOTHED(7, 5) < SMOOTHED(-1, -5)) << 1) + ((SMOOTHED(12, 4) < SMOOTHED(6, 10)) << 0));
-    desc[60] = (uchar)(((SMOOTHED(-10, 4) < SMOOTHED(-1, -11)) << 7) + ((SMOOTHED(4, 10) < SMOOTHED(-14, 5)) << 6) + ((SMOOTHED(11, -14) < SMOOTHED(-13, 0)) << 5) + ((SMOOTHED(2, 8) < SMOOTHED(12, 24)) << 4) + ((SMOOTHED(-1, 3) < SMOOTHED(-1, 2)) << 3) + ((SMOOTHED(9, -14) < SMOOTHED(-23, 3)) << 2) + ((SMOOTHED(-8, -6) < SMOOTHED(0, 9)) << 1) + ((SMOOTHED(-15, 14) < SMOOTHED(10, -10)) << 0));
-    desc[61] = (uchar)(((SMOOTHED(-10, -6) < SMOOTHED(-7, -5)) << 7) + ((SMOOTHED(11, 5) < SMOOTHED(-3, -15)) << 6) + ((SMOOTHED(1, 0) < SMOOTHED(1, 8)) << 5) + ((SMOOTHED(-11, -6) < SMOOTHED(-4, -18)) << 4) + ((SMOOTHED(9, 0) < SMOOTHED(22, -4)) << 3) + ((SMOOTHED(-5, -1) < SMOOTHED(-9, 4)) << 2) + ((SMOOTHED(-20, 2) < SMOOTHED(1, 6)) << 1) + ((SMOOTHED(1, 2) < SMOOTHED(-9, -12)) << 0));
-    desc[62] = (uchar)(((SMOOTHED(5, 15) < SMOOTHED(4, -6)) << 7) + ((SMOOTHED(19, 4) < SMOOTHED(4, 11)) << 6) + ((SMOOTHED(17, -4) < SMOOTHED(-8, -1)) << 5) + ((SMOOTHED(-8, -12) < SMOOTHED(7, -3)) << 4) + ((SMOOTHED(11, 9) < SMOOTHED(8, 1)) << 3) + ((SMOOTHED(9, 22) < SMOOTHED(-15, 15)) << 2) + ((SMOOTHED(-7, -7) < SMOOTHED(1, -23)) << 1) + ((SMOOTHED(-5, 13) < SMOOTHED(-8, 2)) << 0));
-    desc[63] = (uchar)(((SMOOTHED(3, -5) < SMOOTHED(11, -11)) << 7) + ((SMOOTHED(3, -18) < SMOOTHED(14, -5)) << 6) + ((SMOOTHED(-20, 7) < SMOOTHED(-10, -23)) << 5) + ((SMOOTHED(-2, -5) < SMOOTHED(6, 0)) << 4) + ((SMOOTHED(-17, -13) < SMOOTHED(-3, 2)) << 3) + ((SMOOTHED(-6, -1) < SMOOTHED(14, -2)) << 2) + ((SMOOTHED(-12, -16) < SMOOTHED(15, 6)) << 1) + ((SMOOTHED(-12, -2) < SMOOTHED(3, -19)) << 0));
-#undef SMOOTHED
index 1ae1340..363bea3 100644 (file)
 namespace cv
 {
 
-Mat windowedMatchingMask( const std::vector<KeyPoint>& keypoints1, const std::vector<KeyPoint>& keypoints2,
-                          float maxDeltaX, float maxDeltaY )
-{
-    if( keypoints1.empty() || keypoints2.empty() )
-        return Mat();
-
-    int n1 = (int)keypoints1.size(), n2 = (int)keypoints2.size();
-    Mat mask( n1, n2, CV_8UC1 );
-    for( int i = 0; i < n1; i++ )
-    {
-        for( int j = 0; j < n2; j++ )
-        {
-            Point2f diff = keypoints2[j].pt - keypoints1[i].pt;
-            mask.at<uchar>(i, j) = std::abs(diff.x) < maxDeltaX && std::abs(diff.y) < maxDeltaY;
-        }
-    }
-    return mask;
-}
-
-//////////////////////////////////////////////////////////////////ocl functions for BFMatcher ///////////////////////////////////////////////////////////////
+/////////////////////// ocl functions for BFMatcher ///////////////////////////
 
 static void ensureSizeIsEnough(int rows, int cols, int type, UMat &m)
 {
@@ -1507,382 +1488,4 @@ void FlannBasedMatcher::radiusMatchImpl( InputArray _queryDescriptors, std::vect
     convertToDMatches( mergedDescriptors, indices, dists, matches );
 }
 
-/****************************************************************************************\
-*                                GenericDescriptorMatcher                                *
-\****************************************************************************************/
-/*
- * KeyPointCollection
- */
-GenericDescriptorMatcher::KeyPointCollection::KeyPointCollection() : pointCount(0)
-{}
-
-GenericDescriptorMatcher::KeyPointCollection::KeyPointCollection( const KeyPointCollection& collection )
-{
-    pointCount = collection.pointCount;
-
-    std::transform( collection.images.begin(), collection.images.end(), images.begin(), clone_op );
-
-    keypoints.resize( collection.keypoints.size() );
-    for( size_t i = 0; i < keypoints.size(); i++ )
-        std::copy( collection.keypoints[i].begin(), collection.keypoints[i].end(), keypoints[i].begin() );
-
-    std::copy( collection.startIndices.begin(), collection.startIndices.end(), startIndices.begin() );
-}
-
-void GenericDescriptorMatcher::KeyPointCollection::add( const std::vector<Mat>& _images,
-                                                        const std::vector<std::vector<KeyPoint> >& _points )
-{
-    CV_Assert( !_images.empty() );
-    CV_Assert( _images.size() == _points.size() );
-
-    images.insert( images.end(), _images.begin(), _images.end() );
-    keypoints.insert( keypoints.end(), _points.begin(), _points.end() );
-    for( size_t i = 0; i < _points.size(); i++ )
-        pointCount += (int)_points[i].size();
-
-    size_t prevSize = startIndices.size(), addSize = _images.size();
-    startIndices.resize( prevSize + addSize );
-
-    if( prevSize == 0 )
-        startIndices[prevSize] = 0; //first
-    else
-        startIndices[prevSize] = (int)(startIndices[prevSize-1] + keypoints[prevSize-1].size());
-
-    for( size_t i = prevSize + 1; i < prevSize + addSize; i++ )
-    {
-        startIndices[i] = (int)(startIndices[i - 1] + keypoints[i - 1].size());
-    }
-}
-
-void GenericDescriptorMatcher::KeyPointCollection::clear()
-{
-    pointCount = 0;
-
-    images.clear();
-    keypoints.clear();
-    startIndices.clear();
-}
-
-size_t GenericDescriptorMatcher::KeyPointCollection::keypointCount() const
-{
-    return pointCount;
-}
-
-size_t GenericDescriptorMatcher::KeyPointCollection::imageCount() const
-{
-    return images.size();
-}
-
-const std::vector<std::vector<KeyPoint> >& GenericDescriptorMatcher::KeyPointCollection::getKeypoints() const
-{
-    return keypoints;
-}
-
-const std::vector<KeyPoint>& GenericDescriptorMatcher::KeyPointCollection::getKeypoints( int imgIdx ) const
-{
-    CV_Assert( imgIdx < (int)imageCount() );
-    return keypoints[imgIdx];
-}
-
-const KeyPoint& GenericDescriptorMatcher::KeyPointCollection::getKeyPoint( int imgIdx, int localPointIdx ) const
-{
-    CV_Assert( imgIdx < (int)images.size() );
-    CV_Assert( localPointIdx < (int)keypoints[imgIdx].size() );
-    return keypoints[imgIdx][localPointIdx];
-}
-
-const KeyPoint& GenericDescriptorMatcher::KeyPointCollection::getKeyPoint( int globalPointIdx ) const
-{
-    int imgIdx, localPointIdx;
-    getLocalIdx( globalPointIdx, imgIdx, localPointIdx );
-    return keypoints[imgIdx][localPointIdx];
-}
-
-void GenericDescriptorMatcher::KeyPointCollection::getLocalIdx( int globalPointIdx, int& imgIdx, int& localPointIdx ) const
-{
-    imgIdx = -1;
-    CV_Assert( globalPointIdx < (int)keypointCount() );
-    for( size_t i = 1; i < startIndices.size(); i++ )
-    {
-        if( globalPointIdx < startIndices[i] )
-        {
-            imgIdx = (int)(i - 1);
-            break;
-        }
-    }
-    imgIdx = imgIdx == -1 ? (int)(startIndices.size() - 1) : imgIdx;
-    localPointIdx = globalPointIdx - startIndices[imgIdx];
-}
-
-const std::vector<Mat>& GenericDescriptorMatcher::KeyPointCollection::getImages() const
-{
-    return images;
-}
-
-const Mat& GenericDescriptorMatcher::KeyPointCollection::getImage( int imgIdx ) const
-{
-    CV_Assert( imgIdx < (int)imageCount() );
-    return images[imgIdx];
-}
-
-/*
- * GenericDescriptorMatcher
- */
-GenericDescriptorMatcher::GenericDescriptorMatcher()
-{}
-
-GenericDescriptorMatcher::~GenericDescriptorMatcher()
-{}
-
-void GenericDescriptorMatcher::add( InputArrayOfArrays _images,
-                                    std::vector<std::vector<KeyPoint> >& keypoints )
-{
-    std::vector<Mat> images;
-    _images.getMatVector(images);
-    CV_Assert( !images.empty() );
-    CV_Assert( images.size() == keypoints.size() );
-
-    for( size_t i = 0; i < images.size(); i++ )
-    {
-        CV_Assert( !images[i].empty() );
-        KeyPointsFilter::runByImageBorder( keypoints[i], images[i].size(), 0 );
-        KeyPointsFilter::runByKeypointSize( keypoints[i], std::numeric_limits<float>::epsilon() );
-    }
-
-    trainPointCollection.add( images, keypoints );
-}
-
-const std::vector<Mat>& GenericDescriptorMatcher::getTrainImages() const
-{
-    return trainPointCollection.getImages();
-}
-
-const std::vector<std::vector<KeyPoint> >& GenericDescriptorMatcher::getTrainKeypoints() const
-{
-    return trainPointCollection.getKeypoints();
-}
-
-void GenericDescriptorMatcher::clear()
-{
-    trainPointCollection.clear();
-}
-
-void GenericDescriptorMatcher::train()
-{}
-
-void GenericDescriptorMatcher::classify( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
-                                         InputArray trainImage, std::vector<KeyPoint>& trainKeypoints ) const
-{
-    std::vector<DMatch> matches;
-    match( queryImage, queryKeypoints, trainImage, trainKeypoints, matches );
-
-    // remap keypoint indices to descriptors
-    for( size_t i = 0; i < matches.size(); i++ )
-        queryKeypoints[matches[i].queryIdx].class_id = trainKeypoints[matches[i].trainIdx].class_id;
-}
-
-void GenericDescriptorMatcher::classify( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints )
-{
-    std::vector<DMatch> matches;
-    match( queryImage, queryKeypoints, matches );
-
-    // remap keypoint indices to descriptors
-    for( size_t i = 0; i < matches.size(); i++ )
-        queryKeypoints[matches[i].queryIdx].class_id = trainPointCollection.getKeyPoint( matches[i].trainIdx, matches[i].trainIdx ).class_id;
-}
-
-void GenericDescriptorMatcher::match( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
-                                      InputArray _trainImage, std::vector<KeyPoint>& trainKeypoints,
-                                      std::vector<DMatch>& matches, InputArray mask ) const
-{
-    Mat trainImage = _trainImage.getMat();
-    Ptr<GenericDescriptorMatcher> tempMatcher = clone( true );
-    std::vector<std::vector<KeyPoint> > vecTrainPoints(1, trainKeypoints);
-    tempMatcher->add( std::vector<Mat>(1, trainImage), vecTrainPoints );
-    tempMatcher->match( queryImage, queryKeypoints, matches, std::vector<Mat>(1, mask.getMat()) );
-    vecTrainPoints[0].swap( trainKeypoints );
-}
-
-void GenericDescriptorMatcher::knnMatch( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
-                                         InputArray _trainImage, std::vector<KeyPoint>& trainKeypoints,
-                                         std::vector<std::vector<DMatch> >& matches, int knn, InputArray mask, bool compactResult ) const
-{
-    Mat trainImage = _trainImage.getMat();
-    Ptr<GenericDescriptorMatcher> tempMatcher = clone( true );
-    std::vector<std::vector<KeyPoint> > vecTrainPoints(1, trainKeypoints);
-    tempMatcher->add( std::vector<Mat>(1, trainImage), vecTrainPoints );
-    tempMatcher->knnMatch( queryImage, queryKeypoints, matches, knn, std::vector<Mat>(1, mask.getMat()), compactResult );
-    vecTrainPoints[0].swap( trainKeypoints );
-}
-
-void GenericDescriptorMatcher::radiusMatch( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
-                                            InputArray _trainImage, std::vector<KeyPoint>& trainKeypoints,
-                                            std::vector<std::vector<DMatch> >& matches, float maxDistance,
-                                            InputArray mask, bool compactResult ) const
-{
-    Mat trainImage = _trainImage.getMat();
-    Ptr<GenericDescriptorMatcher> tempMatcher = clone( true );
-    std::vector<std::vector<KeyPoint> > vecTrainPoints(1, trainKeypoints);
-    tempMatcher->add( std::vector<Mat>(1, trainImage), vecTrainPoints );
-    tempMatcher->radiusMatch( queryImage, queryKeypoints, matches, maxDistance, std::vector<Mat>(1, mask.getMat()), compactResult );
-    vecTrainPoints[0].swap( trainKeypoints );
-}
-
-void GenericDescriptorMatcher::match( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
-                                      std::vector<DMatch>& matches, InputArrayOfArrays masks )
-{
-    std::vector<std::vector<DMatch> > knnMatches;
-    knnMatch( queryImage, queryKeypoints, knnMatches, 1, masks, false );
-    convertMatches( knnMatches, matches );
-}
-
-void GenericDescriptorMatcher::knnMatch( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
-                                         std::vector<std::vector<DMatch> >& matches, int knn,
-                                         InputArrayOfArrays masks, bool compactResult )
-{
-    matches.clear();
-
-    if( queryImage.empty() || queryKeypoints.empty() )
-        return;
-
-    KeyPointsFilter::runByImageBorder( queryKeypoints, queryImage.size(), 0 );
-    KeyPointsFilter::runByKeypointSize( queryKeypoints, std::numeric_limits<float>::epsilon() );
-
-    train();
-    knnMatchImpl( queryImage, queryKeypoints, matches, knn, masks, compactResult );
-}
-
-void GenericDescriptorMatcher::radiusMatch( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
-                                            std::vector<std::vector<DMatch> >& matches, float maxDistance,
-                                            InputArrayOfArrays masks, bool compactResult )
-{
-    matches.clear();
-
-    if( queryImage.empty() || queryKeypoints.empty() )
-        return;
-
-    KeyPointsFilter::runByImageBorder( queryKeypoints, queryImage.size(), 0 );
-    KeyPointsFilter::runByKeypointSize( queryKeypoints, std::numeric_limits<float>::epsilon() );
-
-    train();
-    radiusMatchImpl( queryImage, queryKeypoints, matches, maxDistance, masks, compactResult );
-}
-
-void GenericDescriptorMatcher::read( const FileNode& )
-{}
-
-void GenericDescriptorMatcher::write( FileStorage& ) const
-{}
-
-bool GenericDescriptorMatcher::empty() const
-{
-    return true;
-}
-
-/*
- * Factory function for GenericDescriptorMatch creating
- */
-Ptr<GenericDescriptorMatcher> GenericDescriptorMatcher::create( const String& genericDescritptorMatcherType,
-                                                                const String &paramsFilename )
-{
-    Ptr<GenericDescriptorMatcher> descriptorMatcher =
-        Algorithm::create<GenericDescriptorMatcher>("DescriptorMatcher." + genericDescritptorMatcherType);
-
-    if( !paramsFilename.empty() && descriptorMatcher )
-    {
-        FileStorage fs = FileStorage( paramsFilename, FileStorage::READ );
-        if( fs.isOpened() )
-        {
-            descriptorMatcher->read( fs.root() );
-            fs.release();
-        }
-    }
-    return descriptorMatcher;
-}
-
-
-/****************************************************************************************\
-*                                  VectorDescriptorMatcher                               *
-\****************************************************************************************/
-VectorDescriptorMatcher::VectorDescriptorMatcher( const Ptr<DescriptorExtractor>& _extractor,
-                                                  const Ptr<DescriptorMatcher>& _matcher )
-                                : extractor( _extractor ), matcher( _matcher )
-{
-    CV_Assert( extractor && matcher );
-}
-
-VectorDescriptorMatcher::~VectorDescriptorMatcher()
-{}
-
-void VectorDescriptorMatcher::add( InputArrayOfArrays _imgCollection,
-                                   std::vector<std::vector<KeyPoint> >& pointCollection )
-{
-    std::vector<Mat> imgCollection, descriptors;
-    _imgCollection.getMatVector(imgCollection);
-    extractor->compute( imgCollection, pointCollection, descriptors );
-
-    matcher->add( descriptors );
-
-    trainPointCollection.add( imgCollection, pointCollection );
-}
-
-void VectorDescriptorMatcher::clear()
-{
-    //extractor->clear();
-    matcher->clear();
-    GenericDescriptorMatcher::clear();
-}
-
-void VectorDescriptorMatcher::train()
-{
-    matcher->train();
-}
-
-bool VectorDescriptorMatcher::isMaskSupported()
-{
-    return matcher->isMaskSupported();
-}
-
-void VectorDescriptorMatcher::knnMatchImpl( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
-                                            std::vector<std::vector<DMatch> >& matches, int knn,
-                                            InputArrayOfArrays masks, bool compactResult )
-{
-    Mat queryDescriptors;
-    extractor->compute( queryImage, queryKeypoints, queryDescriptors );
-    matcher->knnMatch( queryDescriptors, matches, knn, masks, compactResult );
-}
-
-void VectorDescriptorMatcher::radiusMatchImpl( InputArray queryImage, std::vector<KeyPoint>& queryKeypoints,
-                                               std::vector<std::vector<DMatch> >& matches, float maxDistance,
-                                               InputArrayOfArrays masks, bool compactResult )
-{
-    Mat queryDescriptors;
-    extractor->compute( queryImage, queryKeypoints, queryDescriptors );
-    matcher->radiusMatch( queryDescriptors, matches, maxDistance, masks, compactResult );
-}
-
-void VectorDescriptorMatcher::read( const FileNode& fn )
-{
-    GenericDescriptorMatcher::read(fn);
-    extractor->read(fn);
-}
-
-void VectorDescriptorMatcher::write (FileStorage& fs) const
-{
-    GenericDescriptorMatcher::write(fs);
-    extractor->write (fs);
-}
-
-bool VectorDescriptorMatcher::empty() const
-{
-    return !extractor || extractor->empty() ||
-           !matcher || matcher->empty();
-}
-
-Ptr<GenericDescriptorMatcher> VectorDescriptorMatcher::clone( bool emptyTrainData ) const
-{
-    // TODO clone extractor
-    return makePtr<VectorDescriptorMatcher>( extractor, matcher->clone(emptyTrainData) );
-}
-
 }
diff --git a/modules/features2d/src/stardetector.cpp b/modules/features2d/src/stardetector.cpp
deleted file mode 100644 (file)
index 0b09b86..0000000
+++ /dev/null
@@ -1,472 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-//  By downloading, copying, installing or using the software you agree to this license.
-//  If you do not agree to this license, do not download, install,
-//  copy or use the software.
-//
-//
-//                           License Agreement
-//                For Open Source Computer Vision Library
-//
-// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-//   * Redistribution's of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//
-//   * Redistribution's in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//
-//   * The name of Intel Corporation may not be used to endorse or promote products
-//     derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#include "precomp.hpp"
-
-namespace cv
-{
-
-template <typename inMatType, typename outMatType> static void
-computeIntegralImages( const Mat& matI, Mat& matS, Mat& matT, Mat& _FT,
-                       int iiType )
-{
-    int x, y, rows = matI.rows, cols = matI.cols;
-
-    matS.create(rows + 1, cols + 1, iiType );
-    matT.create(rows + 1, cols + 1, iiType );
-    _FT.create(rows + 1, cols + 1, iiType );
-
-    const inMatType* I = matI.ptr<inMatType>();
-
-    outMatType *S = matS.ptr<outMatType>();
-    outMatType *T = matT.ptr<outMatType>();
-    outMatType *FT = _FT.ptr<outMatType>();
-
-    int istep = (int)(matI.step/matI.elemSize());
-    int step = (int)(matS.step/matS.elemSize());
-
-    for( x = 0; x <= cols; x++ )
-        S[x] = T[x] = FT[x] = 0;
-
-    S += step; T += step; FT += step;
-    S[0] = T[0] = 0;
-    FT[0] = I[0];
-    for( x = 1; x < cols; x++ )
-    {
-        S[x] = S[x-1] + I[x-1];
-        T[x] = I[x-1];
-        FT[x] = I[x] + I[x-1];
-    }
-    S[cols] = S[cols-1] + I[cols-1];
-    T[cols] = FT[cols] = I[cols-1];
-
-    for( y = 2; y <= rows; y++ )
-    {
-        I += istep, S += step, T += step, FT += step;
-
-        S[0] = S[-step]; S[1] = S[-step+1] + I[0];
-        T[0] = T[-step + 1];
-        T[1] = FT[0] = T[-step + 2] + I[-istep] + I[0];
-        FT[1] = FT[-step + 2] + I[-istep] + I[1] + I[0];
-
-        for( x = 2; x < cols; x++ )
-        {
-            S[x] = S[x - 1] + S[-step + x] - S[-step + x - 1] + I[x - 1];
-            T[x] = T[-step + x - 1] + T[-step + x + 1] - T[-step*2 + x] + I[-istep + x - 1] + I[x - 1];
-            FT[x] = FT[-step + x - 1] + FT[-step + x + 1] - FT[-step*2 + x] + I[x] + I[x-1];
-        }
-
-        S[cols] = S[cols - 1] + S[-step + cols] - S[-step + cols - 1] + I[cols - 1];
-        T[cols] = FT[cols] = T[-step + cols - 1] + I[-istep + cols - 1] + I[cols - 1];
-    }
-}
-
-template <typename iiMatType> static int
-StarDetectorComputeResponses( const Mat& img, Mat& responses, Mat& sizes,
-                              int maxSize, int iiType )
-{
-    const int MAX_PATTERN = 17;
-    static const int sizes0[] = {1, 2, 3, 4, 6, 8, 11, 12, 16, 22, 23, 32, 45, 46, 64, 90, 128, -1};
-    static const int pairs[][2] = {{1, 0}, {3, 1}, {4, 2}, {5, 3}, {7, 4}, {8, 5}, {9, 6},
-                                   {11, 8}, {13, 10}, {14, 11}, {15, 12}, {16, 14}, {-1, -1}};
-    float invSizes[MAX_PATTERN][2];
-    int sizes1[MAX_PATTERN];
-
-#if CV_SSE2
-    __m128 invSizes4[MAX_PATTERN][2];
-    __m128 sizes1_4[MAX_PATTERN];
-    union { int i; float f; } absmask;
-    absmask.i = 0x7fffffff;
-    volatile bool useSIMD = cv::checkHardwareSupport(CV_CPU_SSE2) && iiType == CV_32S;
-#endif
-
-    struct StarFeature
-    {
-        int area;
-        iiMatType* p[8];
-    };
-
-    StarFeature f[MAX_PATTERN];
-
-    Mat sum, tilted, flatTilted;
-    int y, rows = img.rows, cols = img.cols;
-    int border, npatterns=0, maxIdx=0;
-
-    responses.create( img.size(), CV_32F );
-    sizes.create( img.size(), CV_16S );
-
-    while( pairs[npatterns][0] >= 0 && !
-          ( sizes0[pairs[npatterns][0]] >= maxSize
-           || sizes0[pairs[npatterns+1][0]] + sizes0[pairs[npatterns+1][0]]/2 >= std::min(rows, cols) ) )
-    {
-        ++npatterns;
-    }
-
-    npatterns += (pairs[npatterns-1][0] >= 0);
-    maxIdx = pairs[npatterns-1][0];
-
-    // Create the integral image appropriate for our type & usage
-    if ( img.type() == CV_8U )
-        computeIntegralImages<uchar, iiMatType>( img, sum, tilted, flatTilted, iiType );
-    else if ( img.type() == CV_8S )
-        computeIntegralImages<char, iiMatType>( img, sum, tilted, flatTilted, iiType );
-    else if ( img.type() == CV_16U )
-        computeIntegralImages<ushort, iiMatType>( img, sum, tilted, flatTilted, iiType );
-    else if ( img.type() == CV_16S )
-        computeIntegralImages<short, iiMatType>( img, sum, tilted, flatTilted, iiType );
-    else
-        CV_Error( Error::StsUnsupportedFormat, "" );
-
-    int step = (int)(sum.step/sum.elemSize());
-
-    for(int i = 0; i <= maxIdx; i++ )
-    {
-        int ur_size = sizes0[i], t_size = sizes0[i] + sizes0[i]/2;
-        int ur_area = (2*ur_size + 1)*(2*ur_size + 1);
-        int t_area = t_size*t_size + (t_size + 1)*(t_size + 1);
-
-        f[i].p[0] = sum.ptr<iiMatType>() + (ur_size + 1)*step + ur_size + 1;
-        f[i].p[1] = sum.ptr<iiMatType>() - ur_size*step + ur_size + 1;
-        f[i].p[2] = sum.ptr<iiMatType>() + (ur_size + 1)*step - ur_size;
-        f[i].p[3] = sum.ptr<iiMatType>() - ur_size*step - ur_size;
-
-        f[i].p[4] = tilted.ptr<iiMatType>() + (t_size + 1)*step + 1;
-        f[i].p[5] = flatTilted.ptr<iiMatType>() - t_size;
-        f[i].p[6] = flatTilted.ptr<iiMatType>() + t_size + 1;
-        f[i].p[7] = tilted.ptr<iiMatType>() - t_size*step + 1;
-
-        f[i].area = ur_area + t_area;
-        sizes1[i] = sizes0[i];
-    }
-    // negate end points of the size range
-    // for a faster rejection of very small or very large features in non-maxima suppression.
-    sizes1[0] = -sizes1[0];
-    sizes1[1] = -sizes1[1];
-    sizes1[maxIdx] = -sizes1[maxIdx];
-    border = sizes0[maxIdx] + sizes0[maxIdx]/2;
-
-    for(int i = 0; i < npatterns; i++ )
-    {
-        int innerArea = f[pairs[i][1]].area;
-        int outerArea = f[pairs[i][0]].area - innerArea;
-        invSizes[i][0] = 1.f/outerArea;
-        invSizes[i][1] = 1.f/innerArea;
-    }
-
-#if CV_SSE2
-    if( useSIMD )
-    {
-        for(int i = 0; i < npatterns; i++ )
-        {
-            _mm_store_ps((float*)&invSizes4[i][0], _mm_set1_ps(invSizes[i][0]));
-            _mm_store_ps((float*)&invSizes4[i][1], _mm_set1_ps(invSizes[i][1]));
-        }
-
-        for(int i = 0; i <= maxIdx; i++ )
-            _mm_store_ps((float*)&sizes1_4[i], _mm_set1_ps((float)sizes1[i]));
-    }
-#endif
-
-    for( y = 0; y < border; y++ )
-    {
-        float* r_ptr = responses.ptr<float>(y);
-        float* r_ptr2 = responses.ptr<float>(rows - 1 - y);
-        short* s_ptr = sizes.ptr<short>(y);
-        short* s_ptr2 = sizes.ptr<short>(rows - 1 - y);
-
-        memset( r_ptr, 0, cols*sizeof(r_ptr[0]));
-        memset( r_ptr2, 0, cols*sizeof(r_ptr2[0]));
-        memset( s_ptr, 0, cols*sizeof(s_ptr[0]));
-        memset( s_ptr2, 0, cols*sizeof(s_ptr2[0]));
-    }
-
-    for( y = border; y < rows - border; y++ )
-    {
-        int x = border;
-        float* r_ptr = responses.ptr<float>(y);
-        short* s_ptr = sizes.ptr<short>(y);
-
-        memset( r_ptr, 0, border*sizeof(r_ptr[0]));
-        memset( s_ptr, 0, border*sizeof(s_ptr[0]));
-        memset( r_ptr + cols - border, 0, border*sizeof(r_ptr[0]));
-        memset( s_ptr + cols - border, 0, border*sizeof(s_ptr[0]));
-
-#if CV_SSE2
-        if( useSIMD )
-        {
-            __m128 absmask4 = _mm_set1_ps(absmask.f);
-            for( ; x <= cols - border - 4; x += 4 )
-            {
-                int ofs = y*step + x;
-                __m128 vals[MAX_PATTERN];
-                __m128 bestResponse = _mm_setzero_ps();
-                __m128 bestSize = _mm_setzero_ps();
-
-                for(int i = 0; i <= maxIdx; i++ )
-                {
-                    const iiMatType** p = (const iiMatType**)&f[i].p[0];
-                    __m128i r0 = _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(p[0]+ofs)),
-                                               _mm_loadu_si128((const __m128i*)(p[1]+ofs)));
-                    __m128i r1 = _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(p[3]+ofs)),
-                                               _mm_loadu_si128((const __m128i*)(p[2]+ofs)));
-                    __m128i r2 = _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(p[4]+ofs)),
-                                               _mm_loadu_si128((const __m128i*)(p[5]+ofs)));
-                    __m128i r3 = _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(p[7]+ofs)),
-                                               _mm_loadu_si128((const __m128i*)(p[6]+ofs)));
-                    r0 = _mm_add_epi32(_mm_add_epi32(r0,r1), _mm_add_epi32(r2,r3));
-                    _mm_store_ps((float*)&vals[i], _mm_cvtepi32_ps(r0));
-                }
-
-                for(int i = 0; i < npatterns; i++ )
-                {
-                    __m128 inner_sum = vals[pairs[i][1]];
-                    __m128 outer_sum = _mm_sub_ps(vals[pairs[i][0]], inner_sum);
-                    __m128 response = _mm_sub_ps(_mm_mul_ps(inner_sum, invSizes4[i][1]),
-                        _mm_mul_ps(outer_sum, invSizes4[i][0]));
-                    __m128 swapmask = _mm_cmpgt_ps(_mm_and_ps(response,absmask4),
-                        _mm_and_ps(bestResponse,absmask4));
-                    bestResponse = _mm_xor_ps(bestResponse,
-                        _mm_and_ps(_mm_xor_ps(response,bestResponse), swapmask));
-                    bestSize = _mm_xor_ps(bestSize,
-                        _mm_and_ps(_mm_xor_ps(sizes1_4[pairs[i][0]], bestSize), swapmask));
-                }
-
-                _mm_storeu_ps(r_ptr + x, bestResponse);
-                _mm_storel_epi64((__m128i*)(s_ptr + x),
-                    _mm_packs_epi32(_mm_cvtps_epi32(bestSize),_mm_setzero_si128()));
-            }
-        }
-#endif
-        for( ; x < cols - border; x++ )
-        {
-            int ofs = y*step + x;
-            int vals[MAX_PATTERN];
-            float bestResponse = 0;
-            int bestSize = 0;
-
-            for(int i = 0; i <= maxIdx; i++ )
-            {
-                const iiMatType** p = (const iiMatType**)&f[i].p[0];
-                vals[i] = (int)(p[0][ofs] - p[1][ofs] - p[2][ofs] + p[3][ofs] +
-                    p[4][ofs] - p[5][ofs] - p[6][ofs] + p[7][ofs]);
-            }
-            for(int i = 0; i < npatterns; i++ )
-            {
-                int inner_sum = vals[pairs[i][1]];
-                int outer_sum = vals[pairs[i][0]] - inner_sum;
-                float response = inner_sum*invSizes[i][1] - outer_sum*invSizes[i][0];
-                if( fabs(response) > fabs(bestResponse) )
-                {
-                    bestResponse = response;
-                    bestSize = sizes1[pairs[i][0]];
-                }
-            }
-
-            r_ptr[x] = bestResponse;
-            s_ptr[x] = (short)bestSize;
-        }
-    }
-
-    return border;
-}
-
-
-static bool StarDetectorSuppressLines( const Mat& responses, const Mat& sizes, Point pt,
-                                       int lineThresholdProjected, int lineThresholdBinarized )
-{
-    const float* r_ptr = responses.ptr<float>();
-    int rstep = (int)(responses.step/sizeof(r_ptr[0]));
-    const short* s_ptr = sizes.ptr<short>();
-    int sstep = (int)(sizes.step/sizeof(s_ptr[0]));
-    int sz = s_ptr[pt.y*sstep + pt.x];
-    int x, y, delta = sz/4, radius = delta*4;
-    float Lxx = 0, Lyy = 0, Lxy = 0;
-    int Lxxb = 0, Lyyb = 0, Lxyb = 0;
-
-    for( y = pt.y - radius; y <= pt.y + radius; y += delta )
-        for( x = pt.x - radius; x <= pt.x + radius; x += delta )
-        {
-            float Lx = r_ptr[y*rstep + x + 1] - r_ptr[y*rstep + x - 1];
-            float Ly = r_ptr[(y+1)*rstep + x] - r_ptr[(y-1)*rstep + x];
-            Lxx += Lx*Lx; Lyy += Ly*Ly; Lxy += Lx*Ly;
-        }
-
-    if( (Lxx + Lyy)*(Lxx + Lyy) >= lineThresholdProjected*(Lxx*Lyy - Lxy*Lxy) )
-        return true;
-
-    for( y = pt.y - radius; y <= pt.y + radius; y += delta )
-        for( x = pt.x - radius; x <= pt.x + radius; x += delta )
-        {
-            int Lxb = (s_ptr[y*sstep + x + 1] == sz) - (s_ptr[y*sstep + x - 1] == sz);
-            int Lyb = (s_ptr[(y+1)*sstep + x] == sz) - (s_ptr[(y-1)*sstep + x] == sz);
-            Lxxb += Lxb * Lxb; Lyyb += Lyb * Lyb; Lxyb += Lxb * Lyb;
-        }
-
-    if( (Lxxb + Lyyb)*(Lxxb + Lyyb) >= lineThresholdBinarized*(Lxxb*Lyyb - Lxyb*Lxyb) )
-        return true;
-
-    return false;
-}
-
-
-static void
-StarDetectorSuppressNonmax( const Mat& responses, const Mat& sizes,
-                            std::vector<KeyPoint>& keypoints, int border,
-                            int responseThreshold,
-                            int lineThresholdProjected,
-                            int lineThresholdBinarized,
-                            int suppressNonmaxSize )
-{
-    int x, y, x1, y1, delta = suppressNonmaxSize/2;
-    int rows = responses.rows, cols = responses.cols;
-    const float* r_ptr = responses.ptr<float>();
-    int rstep = (int)(responses.step/sizeof(r_ptr[0]));
-    const short* s_ptr = sizes.ptr<short>();
-    int sstep = (int)(sizes.step/sizeof(s_ptr[0]));
-    short featureSize = 0;
-
-    for( y = border; y < rows - border; y += delta+1 )
-        for( x = border; x < cols - border; x += delta+1 )
-        {
-            float maxResponse = (float)responseThreshold;
-            float minResponse = (float)-responseThreshold;
-            Point maxPt(-1, -1), minPt(-1, -1);
-            int tileEndY = MIN(y + delta, rows - border - 1);
-            int tileEndX = MIN(x + delta, cols - border - 1);
-
-            for( y1 = y; y1 <= tileEndY; y1++ )
-                for( x1 = x; x1 <= tileEndX; x1++ )
-                {
-                    float val = r_ptr[y1*rstep + x1];
-                    if( maxResponse < val )
-                    {
-                        maxResponse = val;
-                        maxPt = Point(x1, y1);
-                    }
-                    else if( minResponse > val )
-                    {
-                        minResponse = val;
-                        minPt = Point(x1, y1);
-                    }
-                }
-
-            if( maxPt.x >= 0 )
-            {
-                for( y1 = maxPt.y - delta; y1 <= maxPt.y + delta; y1++ )
-                    for( x1 = maxPt.x - delta; x1 <= maxPt.x + delta; x1++ )
-                    {
-                        float val = r_ptr[y1*rstep + x1];
-                        if( val >= maxResponse && (y1 != maxPt.y || x1 != maxPt.x))
-                            goto skip_max;
-                    }
-
-                if( (featureSize = s_ptr[maxPt.y*sstep + maxPt.x]) >= 4 &&
-                    !StarDetectorSuppressLines( responses, sizes, maxPt, lineThresholdProjected,
-                                                lineThresholdBinarized ))
-                {
-                    KeyPoint kpt((float)maxPt.x, (float)maxPt.y, featureSize, -1, maxResponse);
-                    keypoints.push_back(kpt);
-                }
-            }
-        skip_max:
-            if( minPt.x >= 0 )
-            {
-                for( y1 = minPt.y - delta; y1 <= minPt.y + delta; y1++ )
-                    for( x1 = minPt.x - delta; x1 <= minPt.x + delta; x1++ )
-                    {
-                        float val = r_ptr[y1*rstep + x1];
-                        if( val <= minResponse && (y1 != minPt.y || x1 != minPt.x))
-                            goto skip_min;
-                    }
-
-                if( (featureSize = s_ptr[minPt.y*sstep + minPt.x]) >= 4 &&
-                    !StarDetectorSuppressLines( responses, sizes, minPt,
-                                               lineThresholdProjected, lineThresholdBinarized))
-                {
-                    KeyPoint kpt((float)minPt.x, (float)minPt.y, featureSize, -1, maxResponse);
-                    keypoints.push_back(kpt);
-                }
-            }
-        skip_min:
-            ;
-        }
-}
-
-StarDetector::StarDetector(int _maxSize, int _responseThreshold,
-                           int _lineThresholdProjected,
-                           int _lineThresholdBinarized,
-                           int _suppressNonmaxSize)
-: maxSize(_maxSize), responseThreshold(_responseThreshold),
-    lineThresholdProjected(_lineThresholdProjected),
-    lineThresholdBinarized(_lineThresholdBinarized),
-    suppressNonmaxSize(_suppressNonmaxSize)
-{}
-
-
-void StarDetector::detectImpl( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask ) const
-{
-    Mat image = _image.getMat(), mask = _mask.getMat(), grayImage = image;
-    if( image.channels() > 1 ) cvtColor( image, grayImage, COLOR_BGR2GRAY );
-
-    (*this)(grayImage, keypoints);
-    KeyPointsFilter::runByPixelsMask( keypoints, mask );
-}
-
-void StarDetector::operator()(const Mat& img, std::vector<KeyPoint>& keypoints) const
-{
-    Mat responses, sizes;
-    int border;
-
-    // Use 32-bit integers if we won't overflow in the integral image
-    if ((img.depth() == CV_8U || img.depth() == CV_8S) &&
-        (img.rows * img.cols) < 8388608 ) // 8388608 = 2 ^ (32 - 8(bit depth) - 1(sign bit))
-        border = StarDetectorComputeResponses<int>( img, responses, sizes, maxSize, CV_32S );
-    else
-        border = StarDetectorComputeResponses<double>( img, responses, sizes, maxSize, CV_64F );
-
-    keypoints.clear();
-    if( border >= 0 )
-        StarDetectorSuppressNonmax( responses, sizes, keypoints, border,
-                                    responseThreshold, lineThresholdProjected,
-                                    lineThresholdBinarized, suppressNonmaxSize );
-}
-
-}
index 78114ae..dba9fe3 100755 (executable)
@@ -2,7 +2,7 @@
 
 from __future__ import print_function
 import os, sys, re, string, fnmatch
-allmodules = ["core", "flann", "imgproc", "imgcodecs", "videoio", "highgui", "video", "features2d", "calib3d", "objdetect", "legacy", "contrib", "cuda", "androidcamera", "java", "python", "stitching", "ts", "photo", "nonfree", "videostab", "softcascade", "superres"]
+allmodules = ["core", "flann", "imgproc", "imgcodecs", "videoio", "highgui", "video", "features2d", "calib3d", "objdetect", "legacy", "contrib", "cuda", "androidcamera", "java", "python", "stitching", "ts", "photo", "videostab", "softcascade", "superres"]
 verbose = False
 show_warnings = True
 show_errors = True
@@ -12,7 +12,6 @@ params_blacklist = {
     "fromarray" : ("object", "allowND"), # python only function
     "reprojectImageTo3D" : ("ddepth"),   # python only argument
     "composeRT" : ("d*d*"),              # wildchards in parameter names are not supported by this parser
-    "CvSVM::train_auto" : ("\\*Grid"),   # wildchards in parameter names are not supported by this parser
     "error" : "args", # parameter of supporting macro
     "getConvertElem" : ("from", "cn", "to", "beta", "alpha"), # arguments of returned functions
     "gpu::swapChannels" : ("dstOrder") # parameter is not parsed correctly by the hdr_parser
index a4ac0d5..c8475d0 100644 (file)
@@ -2,10 +2,6 @@
 
 #include "opencv2/opencv_modules.hpp"
 
-#ifdef HAVE_OPENCV_NONFREE
-#  include "opencv2/nonfree.hpp"
-#endif
-
 #ifdef HAVE_OPENCV_FEATURES2D
 #  include "opencv2/features2d.hpp"
 #endif
@@ -28,9 +24,6 @@ JNI_OnLoad(JavaVM* vm, void* )
         return -1;
 
     bool init = true;
-#ifdef HAVE_OPENCV_NONFREE
-    init &= cv::initModule_nonfree();
-#endif
 #ifdef HAVE_OPENCV_FEATURES2D
     init &= cv::initModule_features2d();
 #endif
diff --git a/modules/nonfree/CMakeLists.txt b/modules/nonfree/CMakeLists.txt
deleted file mode 100644 (file)
index 851646f..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-if(BUILD_ANDROID_PACKAGE)
-  ocv_module_disable(nonfree)
-endif()
-
-set(the_description "Functionality with possible limitations on the use")
-ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef -Wshadow)
-ocv_define_module(nonfree opencv_imgproc opencv_features2d opencv_calib3d OPTIONAL opencv_cudaarithm)
diff --git a/modules/nonfree/doc/feature_detection.rst b/modules/nonfree/doc/feature_detection.rst
deleted file mode 100644 (file)
index b61b85c..0000000
+++ /dev/null
@@ -1,250 +0,0 @@
-Feature Detection and Description
-=================================
-
-SIFT
-----
-.. ocv:class:: SIFT : public Feature2D
-
-Class for extracting keypoints and computing descriptors using the Scale Invariant Feature Transform (SIFT) algorithm by D. Lowe [Lowe04]_.
-
-.. [Lowe04] Lowe, D. G., “Distinctive Image Features from Scale-Invariant Keypoints”, International Journal of Computer Vision, 60, 2, pp. 91-110, 2004.
-
-
-SIFT::SIFT
-----------
-The SIFT constructors.
-
-.. ocv:function:: SIFT::SIFT( int nfeatures=0, int nOctaveLayers=3, double contrastThreshold=0.04, double edgeThreshold=10, double sigma=1.6)
-
-.. ocv:pyfunction:: cv2.SIFT([, nfeatures[, nOctaveLayers[, contrastThreshold[, edgeThreshold[, sigma]]]]]) -> <SIFT object>
-
-    :param nfeatures: The number of best features to retain. The features are ranked by their scores (measured in SIFT algorithm as the local contrast)
-
-    :param nOctaveLayers: The number of layers in each octave. 3 is the value used in D. Lowe paper. The number of octaves is computed automatically from the image resolution.
-
-    :param contrastThreshold: The contrast threshold used to filter out weak features in semi-uniform (low-contrast) regions. The larger the threshold, the less features are produced by the detector.
-
-    :param edgeThreshold: The threshold used to filter out edge-like features. Note that the its meaning is different from the contrastThreshold, i.e. the larger the ``edgeThreshold``, the less features are filtered out (more features are retained).
-
-    :param sigma: The sigma of the Gaussian applied to the input image at the octave #0. If your image is captured with a weak camera with soft lenses, you might want to reduce the number.
-
-
-SIFT::operator ()
------------------
-Extract features and computes their descriptors using SIFT algorithm
-
-.. ocv:function:: void SIFT::operator()(InputArray img, InputArray mask, vector<KeyPoint>& keypoints, OutputArray descriptors, bool useProvidedKeypoints=false)
-
-.. ocv:pyfunction:: cv2.SIFT.detect(image[, mask]) -> keypoints
-
-.. ocv:pyfunction:: cv2.SIFT.compute(image, keypoints[, descriptors]) -> keypoints, descriptors
-
-.. ocv:pyfunction:: cv2.SIFT.detectAndCompute(image, mask[, descriptors[, useProvidedKeypoints]]) -> keypoints, descriptors
-
-    :param img: Input 8-bit grayscale image
-
-    :param mask: Optional input mask that marks the regions where we should detect features.
-
-    :param keypoints: The input/output vector of keypoints
-
-    :param descriptors: The output matrix of descriptors. Pass ``cv::noArray()`` if you do not need them.
-
-    :param useProvidedKeypoints: Boolean flag. If it is true, the keypoint detector is not run. Instead, the provided vector of keypoints is used and the algorithm just computes their descriptors.
-
-.. note:: Python API provides three functions. First one finds keypoints only. Second function computes the descriptors based on the keypoints we provide. Third function detects the keypoints and computes their descriptors. If you want both keypoints and descriptors, directly use third function as ``kp, des = cv2.SIFT.detectAndCompute(image, None)``
-
-SURF
-----
-.. ocv:class:: SURF : public Feature2D
-
-  Class for extracting Speeded Up Robust Features from an image [Bay06]_. The class is derived from ``CvSURFParams`` structure, which specifies the algorithm parameters:
-
-  .. ocv:member:: int extended
-
-     * 0 means that the basic descriptors (64 elements each) shall be computed
-     * 1 means that the extended descriptors (128 elements each) shall be computed
-
-  .. ocv:member:: int upright
-
-     * 0 means that detector computes orientation of each feature.
-     * 1 means that the orientation is not computed (which is much, much faster). For example, if you match images from a stereo pair, or do image stitching, the matched features likely have very similar angles, and you can speed up feature extraction by setting ``upright=1``.
-
-  .. ocv:member:: double hessianThreshold
-
-     Threshold for the keypoint detector. Only features, whose hessian is larger than ``hessianThreshold`` are retained by the detector. Therefore, the larger the value, the less keypoints you will get. A good default value could be from 300 to 500, depending from the image contrast.
-
-  .. ocv:member:: int nOctaves
-
-     The number of a gaussian pyramid octaves that the detector uses. It is set to 4 by default. If you want to get very large features, use the larger value. If you want just small features, decrease it.
-
-  .. ocv:member:: int nOctaveLayers
-
-     The number of images within each octave of a gaussian pyramid. It is set to 2 by default.
-
-
-.. [Bay06] Bay, H. and Tuytelaars, T. and Van Gool, L. "SURF: Speeded Up Robust Features", 9th European Conference on Computer Vision, 2006
-
-.. note::
-
-   * An example using the SURF feature detector can be found at opencv_source_code/samples/cpp/generic_descriptor_match.cpp
-   * Another example using the SURF feature detector, extractor and matcher can be found at opencv_source_code/samples/cpp/matcher_simple.cpp
-
-SURF::SURF
-----------
-The SURF extractor constructors.
-
-.. ocv:function:: SURF::SURF()
-
-.. ocv:function:: SURF::SURF( double hessianThreshold, int nOctaves=4, int nOctaveLayers=2, bool extended=true, bool upright=false )
-
-.. ocv:pyfunction:: cv2.SURF([hessianThreshold[, nOctaves[, nOctaveLayers[, extended[, upright]]]]]) -> <SURF object>
-
-    :param hessianThreshold: Threshold for hessian keypoint detector used in SURF.
-
-    :param nOctaves: Number of pyramid octaves the keypoint detector will use.
-
-    :param nOctaveLayers: Number of octave layers within each octave.
-
-    :param extended: Extended descriptor flag (true - use extended 128-element descriptors; false - use 64-element descriptors).
-
-    :param upright: Up-right or rotated features flag (true - do not compute orientation of features; false - compute orientation).
-
-
-SURF::operator()
-----------------
-Detects keypoints and computes SURF descriptors for them.
-
-.. ocv:function:: void SURF::operator()(InputArray img, InputArray mask, vector<KeyPoint>& keypoints) const
-.. ocv:function:: void SURF::operator()(InputArray img, InputArray mask, vector<KeyPoint>& keypoints, OutputArray descriptors, bool useProvidedKeypoints=false)
-
-.. ocv:pyfunction:: cv2.SURF.detect(image[, mask]) -> keypoints
-.. ocv:pyfunction:: cv2.SURF.compute(image, keypoints[, descriptors]) -> keypoints, descriptors
-.. ocv:pyfunction:: cv2.SURF.detectAndCompute(image, mask[, descriptors[, useProvidedKeypoints]]) -> keypoints, descriptors
-
-.. ocv:pyfunction:: cv2.SURF.detectAndCompute(image[, mask]) -> keypoints, descriptors
-
-.. ocv:cfunction:: void cvExtractSURF( const CvArr* image, const CvArr* mask, CvSeq** keypoints, CvSeq** descriptors, CvMemStorage* storage, CvSURFParams params )
-
-    :param image: Input 8-bit grayscale image
-
-    :param mask: Optional input mask that marks the regions where we should detect features.
-
-    :param keypoints: The input/output vector of keypoints
-
-    :param descriptors: The output matrix of descriptors. Pass ``cv::noArray()`` if you do not need them.
-
-    :param useProvidedKeypoints: Boolean flag. If it is true, the keypoint detector is not run. Instead, the provided vector of keypoints is used and the algorithm just computes their descriptors.
-
-    :param storage: Memory storage for the output keypoints and descriptors in OpenCV 1.x API.
-
-    :param params: SURF algorithm parameters in OpenCV 1.x API.
-
-The function is parallelized with the TBB library.
-
-If you are using the C version, make sure you call ``cv::initModule_nonfree()`` from ``nonfree/nonfree.hpp``.
-
-
-cuda::SURF_CUDA
----------------
-.. ocv:class:: cuda::SURF_CUDA
-
-Class used for extracting Speeded Up Robust Features (SURF) from an image. ::
-
-    class SURF_CUDA
-    {
-    public:
-        enum KeypointLayout
-        {
-            X_ROW = 0,
-            Y_ROW,
-            LAPLACIAN_ROW,
-            OCTAVE_ROW,
-            SIZE_ROW,
-            ANGLE_ROW,
-            HESSIAN_ROW,
-            ROWS_COUNT
-        };
-
-        //! the default constructor
-        SURF_CUDA();
-        //! the full constructor taking all the necessary parameters
-        explicit SURF_CUDA(double _hessianThreshold, int _nOctaves=4,
-             int _nOctaveLayers=2, bool _extended=false, float _keypointsRatio=0.01f);
-
-        //! returns the descriptor size in float's (64 or 128)
-        int descriptorSize() const;
-
-        //! upload host keypoints to device memory
-        void uploadKeypoints(const vector<KeyPoint>& keypoints,
-            GpuMat& keypointsGPU);
-        //! download keypoints from device to host memory
-        void downloadKeypoints(const GpuMat& keypointsGPU,
-            vector<KeyPoint>& keypoints);
-
-        //! download descriptors from device to host memory
-        void downloadDescriptors(const GpuMat& descriptorsGPU,
-            vector<float>& descriptors);
-
-        void operator()(const GpuMat& img, const GpuMat& mask,
-            GpuMat& keypoints);
-
-        void operator()(const GpuMat& img, const GpuMat& mask,
-            GpuMat& keypoints, GpuMat& descriptors,
-            bool useProvidedKeypoints = false,
-            bool calcOrientation = true);
-
-        void operator()(const GpuMat& img, const GpuMat& mask,
-            std::vector<KeyPoint>& keypoints);
-
-        void operator()(const GpuMat& img, const GpuMat& mask,
-            std::vector<KeyPoint>& keypoints, GpuMat& descriptors,
-            bool useProvidedKeypoints = false,
-            bool calcOrientation = true);
-
-        void operator()(const GpuMat& img, const GpuMat& mask,
-            std::vector<KeyPoint>& keypoints,
-            std::vector<float>& descriptors,
-            bool useProvidedKeypoints = false,
-            bool calcOrientation = true);
-
-        void releaseMemory();
-
-        // SURF parameters
-        double hessianThreshold;
-        int nOctaves;
-        int nOctaveLayers;
-        bool extended;
-        bool upright;
-
-        //! max keypoints = keypointsRatio * img.size().area()
-        float keypointsRatio;
-
-        GpuMat sum, mask1, maskSum, intBuffer;
-
-        GpuMat det, trace;
-
-        GpuMat maxPosBuffer;
-    };
-
-
-The class ``SURF_CUDA`` implements Speeded Up Robust Features descriptor. There is a fast multi-scale Hessian keypoint detector that can be used to find the keypoints (which is the default option). But the descriptors can also be computed for the user-specified keypoints. Only 8-bit grayscale images are supported.
-
-The class ``SURF_CUDA`` can store results in the GPU and CPU memory. It provides functions to convert results between CPU and GPU version ( ``uploadKeypoints``, ``downloadKeypoints``, ``downloadDescriptors`` ). The format of CPU results is the same as ``SURF`` results. GPU results are stored in ``GpuMat``. The ``keypoints`` matrix is :math:`\texttt{nFeatures} \times 7` matrix with the ``CV_32FC1`` type.
-
-* ``keypoints.ptr<float>(X_ROW)[i]`` contains x coordinate of the i-th feature.
-* ``keypoints.ptr<float>(Y_ROW)[i]`` contains y coordinate of the i-th feature.
-* ``keypoints.ptr<float>(LAPLACIAN_ROW)[i]``  contains the laplacian sign of the i-th feature.
-* ``keypoints.ptr<float>(OCTAVE_ROW)[i]`` contains the octave of the i-th feature.
-* ``keypoints.ptr<float>(SIZE_ROW)[i]`` contains the size of the i-th feature.
-* ``keypoints.ptr<float>(ANGLE_ROW)[i]`` contain orientation of the i-th feature.
-* ``keypoints.ptr<float>(HESSIAN_ROW)[i]`` contains the response of the i-th feature.
-
-The ``descriptors`` matrix is :math:`\texttt{nFeatures} \times \texttt{descriptorSize}` matrix with the ``CV_32FC1`` type.
-
-The class ``SURF_CUDA`` uses some buffers and provides access to it. All buffers can be safely released between function calls.
-
-.. seealso:: :ocv:class:`SURF`
-
-.. note::
-
-   * An example for using the SURF keypoint matcher on GPU can be found at opencv_source_code/samples/gpu/surf_keypoint_matcher.cpp
diff --git a/modules/nonfree/doc/nonfree.rst b/modules/nonfree/doc/nonfree.rst
deleted file mode 100644 (file)
index e524ea8..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-********************************
-nonfree. Non-free functionality
-********************************
-
-The module contains algorithms that may be patented in some countries or have some other limitations on the use.
-
-.. toctree::
-    :maxdepth: 2
-
-    feature_detection
diff --git a/modules/nonfree/include/opencv2/nonfree.hpp b/modules/nonfree/include/opencv2/nonfree.hpp
deleted file mode 100644 (file)
index da8e535..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-//  By downloading, copying, installing or using the software you agree to this license.
-//  If you do not agree to this license, do not download, install,
-//  copy or use the software.
-//
-//
-//                           License Agreement
-//                For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009-2012, Willow Garage Inc., all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-//   * Redistribution's of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//
-//   * Redistribution's in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//
-//   * The name of the copyright holders may not be used to endorse or promote products
-//     derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#ifndef __OPENCV_NONFREE_HPP__
-#define __OPENCV_NONFREE_HPP__
-
-#include "opencv2/nonfree/features2d.hpp"
-
-namespace cv
-{
-
-CV_EXPORTS bool initModule_nonfree();
-
-}
-
-#endif
-
-/* End of file. */
diff --git a/modules/nonfree/include/opencv2/nonfree/cuda.hpp b/modules/nonfree/include/opencv2/nonfree/cuda.hpp
deleted file mode 100644 (file)
index 0e1f8e5..0000000
+++ /dev/null
@@ -1,128 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-//  By downloading, copying, installing or using the software you agree to this license.
-//  If you do not agree to this license, do not download, install,
-//  copy or use the software.
-//
-//
-//                           License Agreement
-//                For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-//   * Redistribution's of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//
-//   * Redistribution's in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//
-//   * The name of the copyright holders may not be used to endorse or promote products
-//     derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#ifndef __OPENCV_NONFREE_CUDA_HPP__
-#define __OPENCV_NONFREE_CUDA_HPP__
-
-#include "opencv2/core/cuda.hpp"
-
-namespace cv { namespace cuda {
-
-class CV_EXPORTS SURF_CUDA
-{
-public:
-    enum KeypointLayout
-    {
-        X_ROW = 0,
-        Y_ROW,
-        LAPLACIAN_ROW,
-        OCTAVE_ROW,
-        SIZE_ROW,
-        ANGLE_ROW,
-        HESSIAN_ROW,
-        ROWS_COUNT
-    };
-
-    //! the default constructor
-    SURF_CUDA();
-    //! the full constructor taking all the necessary parameters
-    explicit SURF_CUDA(double _hessianThreshold, int _nOctaves=4,
-         int _nOctaveLayers=2, bool _extended=false, float _keypointsRatio=0.01f, bool _upright = false);
-
-    //! returns the descriptor size in float's (64 or 128)
-    int descriptorSize() const;
-    //! returns the default norm type
-    int defaultNorm() const;
-
-    //! upload host keypoints to device memory
-    void uploadKeypoints(const std::vector<KeyPoint>& keypoints, GpuMat& keypointsGPU);
-    //! download keypoints from device to host memory
-    void downloadKeypoints(const GpuMat& keypointsGPU, std::vector<KeyPoint>& keypoints);
-
-    //! download descriptors from device to host memory
-    void downloadDescriptors(const GpuMat& descriptorsGPU, std::vector<float>& descriptors);
-
-    //! finds the keypoints using fast hessian detector used in SURF
-    //! supports CV_8UC1 images
-    //! keypoints will have nFeature cols and 6 rows
-    //! keypoints.ptr<float>(X_ROW)[i] will contain x coordinate of i'th feature
-    //! keypoints.ptr<float>(Y_ROW)[i] will contain y coordinate of i'th feature
-    //! keypoints.ptr<float>(LAPLACIAN_ROW)[i] will contain laplacian sign of i'th feature
-    //! keypoints.ptr<float>(OCTAVE_ROW)[i] will contain octave of i'th feature
-    //! keypoints.ptr<float>(SIZE_ROW)[i] will contain size of i'th feature
-    //! keypoints.ptr<float>(ANGLE_ROW)[i] will contain orientation of i'th feature
-    //! keypoints.ptr<float>(HESSIAN_ROW)[i] will contain response of i'th feature
-    void operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints);
-    //! finds the keypoints and computes their descriptors.
-    //! Optionally it can compute descriptors for the user-provided keypoints and recompute keypoints direction
-    void operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints, GpuMat& descriptors,
-        bool useProvidedKeypoints = false);
-
-    void operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints);
-    void operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints, GpuMat& descriptors,
-        bool useProvidedKeypoints = false);
-
-    void operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints, std::vector<float>& descriptors,
-        bool useProvidedKeypoints = false);
-
-    void releaseMemory();
-
-    // SURF parameters
-    double hessianThreshold;
-    int nOctaves;
-    int nOctaveLayers;
-    bool extended;
-    bool upright;
-
-    //! max keypoints = min(keypointsRatio * img.size().area(), 65535)
-    float keypointsRatio;
-
-    GpuMat sum, mask1, maskSum, intBuffer;
-
-    GpuMat det, trace;
-
-    GpuMat maxPosBuffer;
-};
-
-}} // namespace cv { namespace cuda {
-
-#endif // __OPENCV_NONFREE_CUDA_HPP__
diff --git a/modules/nonfree/include/opencv2/nonfree/features2d.hpp b/modules/nonfree/include/opencv2/nonfree/features2d.hpp
deleted file mode 100644 (file)
index 6a75e99..0000000
+++ /dev/null
@@ -1,154 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-//  By downloading, copying, installing or using the software you agree to this license.
-//  If you do not agree to this license, do not download, install,
-//  copy or use the software.
-//
-//
-//                           License Agreement
-//                For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-//   * Redistribution's of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//
-//   * Redistribution's in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//
-//   * The name of the copyright holders may not be used to endorse or promote products
-//     derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#ifndef __OPENCV_NONFREE_FEATURES_2D_HPP__
-#define __OPENCV_NONFREE_FEATURES_2D_HPP__
-
-#include "opencv2/features2d.hpp"
-
-namespace cv
-{
-
-/*!
- SIFT implementation.
-
- The class implements SIFT algorithm by D. Lowe.
-*/
-class CV_EXPORTS_W SIFT : public Feature2D
-{
-public:
-    CV_WRAP explicit SIFT( int nfeatures = 0, int nOctaveLayers = 3,
-          double contrastThreshold = 0.04, double edgeThreshold = 10,
-          double sigma = 1.6);
-
-    //! returns the descriptor size in floats (128)
-    CV_WRAP int descriptorSize() const;
-
-    //! returns the descriptor type
-    CV_WRAP int descriptorType() const;
-
-    //! returns the default norm type
-    CV_WRAP int defaultNorm() const;
-
-    //! finds the keypoints using SIFT algorithm
-    void operator()(InputArray img, InputArray mask,
-                    std::vector<KeyPoint>& keypoints) const;
-    //! finds the keypoints and computes descriptors for them using SIFT algorithm.
-    //! Optionally it can compute descriptors for the user-provided keypoints
-    void operator()(InputArray img, InputArray mask,
-                    std::vector<KeyPoint>& keypoints,
-                    OutputArray descriptors,
-                    bool useProvidedKeypoints = false) const;
-
-    AlgorithmInfo* info() const;
-
-    void buildGaussianPyramid( const Mat& base, std::vector<Mat>& pyr, int nOctaves ) const;
-    void buildDoGPyramid( const std::vector<Mat>& pyr, std::vector<Mat>& dogpyr ) const;
-    void findScaleSpaceExtrema( const std::vector<Mat>& gauss_pyr, const std::vector<Mat>& dog_pyr,
-                                std::vector<KeyPoint>& keypoints ) const;
-
-protected:
-    void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask = noArray() ) const;
-    void computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
-
-    CV_PROP_RW int nfeatures;
-    CV_PROP_RW int nOctaveLayers;
-    CV_PROP_RW double contrastThreshold;
-    CV_PROP_RW double edgeThreshold;
-    CV_PROP_RW double sigma;
-};
-
-typedef SIFT SiftFeatureDetector;
-typedef SIFT SiftDescriptorExtractor;
-
-/*!
- SURF implementation.
-
- The class implements SURF algorithm by H. Bay et al.
- */
-class CV_EXPORTS_W SURF : public Feature2D
-{
-public:
-    //! the default constructor
-    CV_WRAP SURF();
-    //! the full constructor taking all the necessary parameters
-    explicit CV_WRAP SURF(double hessianThreshold,
-                  int nOctaves = 4, int nOctaveLayers = 2,
-                  bool extended = true, bool upright = false);
-
-    //! returns the descriptor size in float's (64 or 128)
-    CV_WRAP int descriptorSize() const;
-
-    //! returns the descriptor type
-    CV_WRAP int descriptorType() const;
-
-    //! returns the descriptor type
-    CV_WRAP int defaultNorm() const;
-
-    //! finds the keypoints using fast hessian detector used in SURF
-    void operator()(InputArray img, InputArray mask,
-                    CV_OUT std::vector<KeyPoint>& keypoints) const;
-    //! finds the keypoints and computes their descriptors. Optionally it can compute descriptors for the user-provided keypoints
-    void operator()(InputArray img, InputArray mask,
-                    CV_OUT std::vector<KeyPoint>& keypoints,
-                    OutputArray descriptors,
-                    bool useProvidedKeypoints = false) const;
-
-    AlgorithmInfo* info() const;
-
-    CV_PROP_RW double hessianThreshold;
-    CV_PROP_RW int nOctaves;
-    CV_PROP_RW int nOctaveLayers;
-    CV_PROP_RW bool extended;
-    CV_PROP_RW bool upright;
-
-protected:
-    void detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask = noArray() ) const;
-    void computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const;
-};
-
-typedef SURF SurfFeatureDetector;
-typedef SURF SurfDescriptorExtractor;
-
-} /* namespace cv */
-
-#endif
diff --git a/modules/nonfree/include/opencv2/nonfree/nonfree.hpp b/modules/nonfree/include/opencv2/nonfree/nonfree.hpp
deleted file mode 100644 (file)
index c1bb651..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-//  By downloading, copying, installing or using the software you agree to this license.
-//  If you do not agree to this license, do not download, install,
-//  copy or use the software.
-//
-//
-//                          License Agreement
-//                For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
-// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-//   * Redistribution's of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//
-//   * Redistribution's in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//
-//   * The name of the copyright holders may not be used to endorse or promote products
-//     derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#ifdef __OPENCV_BUILD
-#error this is a compatibility header which should not be used inside the OpenCV library
-#endif
-
-#include "opencv2/nonfree.hpp"
diff --git a/modules/nonfree/perf/perf_main.cpp b/modules/nonfree/perf/perf_main.cpp
deleted file mode 100644 (file)
index a5a76af..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#include "perf_precomp.hpp"
-#include "opencv2/ts/cuda_perf.hpp"
-
-static const char * impls[] = {
-#ifdef HAVE_CUDA
-    "cuda",
-#endif
-    "plain"
-};
-
-CV_PERF_TEST_MAIN_WITH_IMPLS(nonfree, impls, perf::printCudaInfo())
diff --git a/modules/nonfree/perf/perf_precomp.hpp b/modules/nonfree/perf/perf_precomp.hpp
deleted file mode 100644 (file)
index ed84be8..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifdef __GNUC__
-#  pragma GCC diagnostic ignored "-Wmissing-declarations"
-#  if defined __clang__ || defined __APPLE__
-#    pragma GCC diagnostic ignored "-Wmissing-prototypes"
-#    pragma GCC diagnostic ignored "-Wextra"
-#  endif
-#endif
-
-#ifndef __OPENCV_PERF_PRECOMP_HPP__
-#define __OPENCV_PERF_PRECOMP_HPP__
-
-#include "cvconfig.h"
-
-#include "opencv2/ts.hpp"
-#include "opencv2/nonfree.hpp"
-#include "opencv2/imgcodecs.hpp"
-
-#include "opencv2/opencv_modules.hpp"
-
-#ifdef HAVE_OPENCV_OCL
-#  include "opencv2/nonfree/ocl.hpp"
-#endif
-
-#ifdef HAVE_CUDA
-#  include "opencv2/nonfree/cuda.hpp"
-#endif
-
-#ifdef GTEST_CREATE_SHARED_LIBRARY
-#error no modules except ts should have GTEST_CREATE_SHARED_LIBRARY defined
-#endif
-
-#endif
diff --git a/modules/nonfree/perf/perf_surf.cpp b/modules/nonfree/perf/perf_surf.cpp
deleted file mode 100644 (file)
index 09de523..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-#include "perf_precomp.hpp"
-
-using namespace std;
-using namespace cv;
-using namespace perf;
-using std::tr1::make_tuple;
-using std::tr1::get;
-
-typedef perf::TestBaseWithParam<std::string> surf;
-
-#define SURF_IMAGES \
-    "cv/detectors_descriptors_evaluation/images_datasets/leuven/img1.png",\
-    "stitching/a3.png"
-
-PERF_TEST_P(surf, detect, testing::Values(SURF_IMAGES))
-{
-    string filename = getDataPath(GetParam());
-    Mat frame = imread(filename, IMREAD_GRAYSCALE);
-    ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
-
-    Mat mask;
-    declare.in(frame).time(90);
-    SURF detector;
-    vector<KeyPoint> points;
-
-    TEST_CYCLE() detector(frame, mask, points);
-
-    SANITY_CHECK_KEYPOINTS(points, 1e-3);
-}
-
-PERF_TEST_P(surf, extract, testing::Values(SURF_IMAGES))
-{
-    string filename = getDataPath(GetParam());
-    Mat frame = imread(filename, IMREAD_GRAYSCALE);
-    ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
-
-    Mat mask;
-    declare.in(frame).time(90);
-
-    SURF detector;
-    vector<KeyPoint> points;
-    vector<float> descriptors;
-    detector(frame, mask, points);
-
-    TEST_CYCLE() detector(frame, mask, points, descriptors, true);
-
-    SANITY_CHECK(descriptors, 1e-4);
-}
-
-PERF_TEST_P(surf, full, testing::Values(SURF_IMAGES))
-{
-    string filename = getDataPath(GetParam());
-    Mat frame = imread(filename, IMREAD_GRAYSCALE);
-    ASSERT_FALSE(frame.empty()) << "Unable to load source image " << filename;
-
-    Mat mask;
-    declare.in(frame).time(90);
-    SURF detector;
-    vector<KeyPoint> points;
-    vector<float> descriptors;
-
-    TEST_CYCLE() detector(frame, mask, points, descriptors, false);
-
-    SANITY_CHECK_KEYPOINTS(points, 1e-3);
-    SANITY_CHECK(descriptors, 1e-4);
-}
diff --git a/modules/nonfree/perf/perf_surf.cuda.cpp b/modules/nonfree/perf/perf_surf.cuda.cpp
deleted file mode 100644 (file)
index 4cdbea9..0000000
+++ /dev/null
@@ -1,103 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-//  By downloading, copying, installing or using the software you agree to this license.
-//  If you do not agree to this license, do not download, install,
-//  copy or use the software.
-//
-//
-//                           License Agreement
-//                For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-//   * Redistribution's of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//
-//   * Redistribution's in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//
-//   * The name of the copyright holders may not be used to endorse or promote products
-//     derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#include "perf_precomp.hpp"
-
-#ifdef HAVE_CUDA
-
-#include "opencv2/ts/cuda_perf.hpp"
-
-using namespace std;
-using namespace testing;
-using namespace perf;
-
-//////////////////////////////////////////////////////////////////////
-// SURF
-
-#ifdef HAVE_OPENCV_CUDAARITHM
-
-DEF_PARAM_TEST_1(Image, string);
-
-PERF_TEST_P(Image, CUDA_SURF,
-            Values<std::string>("gpu/perf/aloe.png"))
-{
-    declare.time(50.0);
-
-    const cv::Mat img = readImage(GetParam(), cv::IMREAD_GRAYSCALE);
-    ASSERT_FALSE(img.empty());
-
-    if (PERF_RUN_CUDA())
-    {
-        cv::cuda::SURF_CUDA d_surf;
-
-        const cv::cuda::GpuMat d_img(img);
-        cv::cuda::GpuMat d_keypoints, d_descriptors;
-
-        TEST_CYCLE() d_surf(d_img, cv::cuda::GpuMat(), d_keypoints, d_descriptors);
-
-        std::vector<cv::KeyPoint> gpu_keypoints;
-        d_surf.downloadKeypoints(d_keypoints, gpu_keypoints);
-
-        cv::Mat gpu_descriptors(d_descriptors);
-
-        sortKeyPoints(gpu_keypoints, gpu_descriptors);
-
-        SANITY_CHECK_KEYPOINTS(gpu_keypoints);
-        SANITY_CHECK(gpu_descriptors, 1e-3);
-    }
-    else
-    {
-        cv::SURF surf;
-
-        std::vector<cv::KeyPoint> cpu_keypoints;
-        cv::Mat cpu_descriptors;
-
-        TEST_CYCLE() surf(img, cv::noArray(), cpu_keypoints, cpu_descriptors);
-
-        SANITY_CHECK_KEYPOINTS(cpu_keypoints);
-        SANITY_CHECK(cpu_descriptors);
-    }
-}
-
-#endif // HAVE_OPENCV_CUDAARITHM
-
-#endif // HAVE_CUDA
diff --git a/modules/nonfree/perf/perf_surf.ocl.cpp b/modules/nonfree/perf/perf_surf.ocl.cpp
deleted file mode 100644 (file)
index cc48aa2..0000000
+++ /dev/null
@@ -1,111 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-//  By downloading, copying, installing or using the software you agree to this license.
-//  If you do not agree to this license, do not download, install,
-//  copy or use the software.
-//
-//
-//                           License Agreement
-//                For Open Source Computer Vision Library
-//
-// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
-// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// @Authors
-//    Peng Xiao, pengxiao@multicorewareinc.com
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-//   * Redistribution's of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//
-//   * Redistribution's in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//
-//   * The name of the copyright holders may not be used to endorse or promote products
-//     derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors as is and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#include "perf_precomp.hpp"
-
-#ifdef HAVE_OPENCV_OCL
-
-using namespace cv;
-using namespace cv::ocl;
-using namespace std;
-
-typedef perf::TestBaseWithParam<std::string> OCL_SURF;
-
-#define SURF_IMAGES \
-    "cv/detectors_descriptors_evaluation/images_datasets/leuven/img1.png",\
-    "stitching/a3.png"
-
-PERF_TEST_P(OCL_SURF, DISABLED_with_data_transfer, testing::Values(SURF_IMAGES))
-{
-    string filename = getDataPath(GetParam());
-    Mat img = imread(filename, IMREAD_GRAYSCALE);
-    ASSERT_FALSE(img.empty());
-
-    SURF_OCL d_surf;
-    oclMat d_keypoints;
-    oclMat d_descriptors;
-    Mat cpu_kp;
-    Mat cpu_dp;
-
-    declare.time(60);
-
-    TEST_CYCLE()
-    {
-        oclMat d_src(img);
-
-        d_surf(d_src, oclMat(), d_keypoints, d_descriptors);
-
-        d_keypoints.download(cpu_kp);
-        d_descriptors.download(cpu_dp);
-    }
-
-    SANITY_CHECK(cpu_kp, 1);
-    SANITY_CHECK(cpu_dp, 1);
-}
-
-PERF_TEST_P(OCL_SURF, DISABLED_without_data_transfer, testing::Values(SURF_IMAGES))
-{
-    string filename = getDataPath(GetParam());
-    Mat img = imread(filename, IMREAD_GRAYSCALE);
-    ASSERT_FALSE(img.empty());
-
-    SURF_OCL d_surf;
-    oclMat d_keypoints;
-    oclMat d_descriptors;
-    oclMat d_src(img);
-
-    declare.time(60);
-
-    TEST_CYCLE() d_surf(d_src, oclMat(), d_keypoints, d_descriptors);
-
-    Mat cpu_kp;
-    Mat cpu_dp;
-    d_keypoints.download(cpu_kp);
-    d_descriptors.download(cpu_dp);
-    SANITY_CHECK(cpu_kp, 1);
-    SANITY_CHECK(cpu_dp, 1);
-}
-
-#endif // HAVE_OPENCV_OCL
diff --git a/modules/nonfree/src/cuda/surf.cu b/modules/nonfree/src/cuda/surf.cu
deleted file mode 100644 (file)
index 9a141ab..0000000
+++ /dev/null
@@ -1,960 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-//  By downloading, copying, installing or using the software you agree to this license.
-//  If you do not agree to this license, do not download, install,
-//  copy or use the software.
-//
-//
-//                           License Agreement
-//                For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-//   * Redistribution's of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//
-//   * Redistribution's in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//
-//   * The name of the copyright holders may not be used to endorse or promote products
-//     derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#include "opencv2/opencv_modules.hpp"
-
-#ifdef HAVE_OPENCV_CUDAARITHM
-
-#include "opencv2/core/cuda/common.hpp"
-#include "opencv2/core/cuda/limits.hpp"
-#include "opencv2/core/cuda/saturate_cast.hpp"
-#include "opencv2/core/cuda/reduce.hpp"
-#include "opencv2/core/cuda/utility.hpp"
-#include "opencv2/core/cuda/functional.hpp"
-#include "opencv2/core/cuda/filters.hpp"
-
-namespace cv { namespace cuda { namespace device
-{
-    namespace surf
-    {
-        void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold);
-        void loadOctaveConstants(int octave, int layer_rows, int layer_cols);
-
-        void bindImgTex(PtrStepSzb img);
-        size_t bindSumTex(PtrStepSz<unsigned int> sum);
-        size_t bindMaskSumTex(PtrStepSz<unsigned int> maskSum);
-
-        void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols,
-            int octave, int nOctaveLayer);
-
-        void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter,
-            int img_rows, int img_cols, int octave, bool use_mask, int nLayers);
-
-        void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter,
-            float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
-            unsigned int* featureCounter);
-
-        void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures);
-
-        void compute_descriptors_gpu(PtrStepSz<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures);
-    }
-}}}
-
-namespace cv { namespace cuda { namespace device
-{
-    namespace surf
-    {
-        ////////////////////////////////////////////////////////////////////////
-        // Global parameters
-
-        // The maximum number of features (before subpixel interpolation) that memory is reserved for.
-        __constant__ int c_max_candidates;
-        // The maximum number of features that memory is reserved for.
-        __constant__ int c_max_features;
-        // The image size.
-        __constant__ int c_img_rows;
-        __constant__ int c_img_cols;
-        // The number of layers.
-        __constant__ int c_nOctaveLayers;
-        // The hessian threshold.
-        __constant__ float c_hessianThreshold;
-
-        // The current octave.
-        __constant__ int c_octave;
-        // The current layer size.
-        __constant__ int c_layer_rows;
-        __constant__ int c_layer_cols;
-
-        void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold)
-        {
-            cudaSafeCall( cudaMemcpyToSymbol(c_max_candidates, &maxCandidates, sizeof(maxCandidates)) );
-            cudaSafeCall( cudaMemcpyToSymbol(c_max_features, &maxFeatures, sizeof(maxFeatures)) );
-            cudaSafeCall( cudaMemcpyToSymbol(c_img_rows, &img_rows, sizeof(img_rows)) );
-            cudaSafeCall( cudaMemcpyToSymbol(c_img_cols, &img_cols, sizeof(img_cols)) );
-            cudaSafeCall( cudaMemcpyToSymbol(c_nOctaveLayers, &nOctaveLayers, sizeof(nOctaveLayers)) );
-            cudaSafeCall( cudaMemcpyToSymbol(c_hessianThreshold, &hessianThreshold, sizeof(hessianThreshold)) );
-        }
-
-        void loadOctaveConstants(int octave, int layer_rows, int layer_cols)
-        {
-            cudaSafeCall( cudaMemcpyToSymbol(c_octave, &octave, sizeof(octave)) );
-            cudaSafeCall( cudaMemcpyToSymbol(c_layer_rows, &layer_rows, sizeof(layer_rows)) );
-            cudaSafeCall( cudaMemcpyToSymbol(c_layer_cols, &layer_cols, sizeof(layer_cols)) );
-        }
-
-        ////////////////////////////////////////////////////////////////////////
-        // Integral image texture
-
-        texture<unsigned char, 2, cudaReadModeElementType> imgTex(0, cudaFilterModePoint, cudaAddressModeClamp);
-        texture<unsigned int, 2, cudaReadModeElementType> sumTex(0, cudaFilterModePoint, cudaAddressModeClamp);
-        texture<unsigned int, 2, cudaReadModeElementType> maskSumTex(0, cudaFilterModePoint, cudaAddressModeClamp);
-
-        void bindImgTex(PtrStepSzb img)
-        {
-            bindTexture(&imgTex, img);
-        }
-
-        size_t bindSumTex(PtrStepSz<uint> sum)
-        {
-            size_t offset;
-            cudaChannelFormatDesc desc_sum = cudaCreateChannelDesc<uint>();
-            cudaSafeCall( cudaBindTexture2D(&offset, sumTex, sum.data, desc_sum, sum.cols, sum.rows, sum.step));
-            return offset / sizeof(uint);
-        }
-        size_t bindMaskSumTex(PtrStepSz<uint> maskSum)
-        {
-            size_t offset;
-            cudaChannelFormatDesc desc_sum = cudaCreateChannelDesc<uint>();
-            cudaSafeCall( cudaBindTexture2D(&offset, maskSumTex, maskSum.data, desc_sum, maskSum.cols, maskSum.rows, maskSum.step));
-            return offset / sizeof(uint);
-        }
-
-        template <int N> __device__ float icvCalcHaarPatternSum(const float src[][5], int oldSize, int newSize, int y, int x)
-        {
-        #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 200
-            typedef double real_t;
-        #else
-            typedef float  real_t;
-        #endif
-
-            float ratio = (float)newSize / oldSize;
-
-            real_t d = 0;
-
-            #pragma unroll
-            for (int k = 0; k < N; ++k)
-            {
-                int dx1 = __float2int_rn(ratio * src[k][0]);
-                int dy1 = __float2int_rn(ratio * src[k][1]);
-                int dx2 = __float2int_rn(ratio * src[k][2]);
-                int dy2 = __float2int_rn(ratio * src[k][3]);
-
-                real_t t = 0;
-                t += tex2D(sumTex, x + dx1, y + dy1);
-                t -= tex2D(sumTex, x + dx1, y + dy2);
-                t -= tex2D(sumTex, x + dx2, y + dy1);
-                t += tex2D(sumTex, x + dx2, y + dy2);
-
-                d += t * src[k][4] / ((dx2 - dx1) * (dy2 - dy1));
-            }
-
-            return (float)d;
-        }
-
-        ////////////////////////////////////////////////////////////////////////
-        // Hessian
-
-        __constant__ float c_DX [3][5] = { {0, 2, 3, 7, 1}, {3, 2, 6, 7, -2}, {6, 2, 9, 7, 1} };
-        __constant__ float c_DY [3][5] = { {2, 0, 7, 3, 1}, {2, 3, 7, 6, -2}, {2, 6, 7, 9, 1} };
-        __constant__ float c_DXY[4][5] = { {1, 1, 4, 4, 1}, {5, 1, 8, 4, -1}, {1, 5, 4, 8, -1}, {5, 5, 8, 8, 1} };
-
-        __host__ __device__ __forceinline__ int calcSize(int octave, int layer)
-        {
-            /* Wavelet size at first layer of first octave. */
-            const int HAAR_SIZE0 = 9;
-
-            /* Wavelet size increment between layers. This should be an even number,
-             such that the wavelet sizes in an octave are either all even or all odd.
-             This ensures that when looking for the neighbours of a sample, the layers
-             above and below are aligned correctly. */
-            const int HAAR_SIZE_INC = 6;
-
-            return (HAAR_SIZE0 + HAAR_SIZE_INC * layer) << octave;
-        }
-
-        __global__ void icvCalcLayerDetAndTrace(PtrStepf det, PtrStepf trace)
-        {
-            // Determine the indices
-            const int gridDim_y = gridDim.y / (c_nOctaveLayers + 2);
-            const int blockIdx_y = blockIdx.y % gridDim_y;
-            const int blockIdx_z = blockIdx.y / gridDim_y;
-
-            const int j = threadIdx.x + blockIdx.x * blockDim.x;
-            const int i = threadIdx.y + blockIdx_y * blockDim.y;
-            const int layer = blockIdx_z;
-
-            const int size = calcSize(c_octave, layer);
-
-            const int samples_i = 1 + ((c_img_rows - size) >> c_octave);
-            const int samples_j = 1 + ((c_img_cols - size) >> c_octave);
-
-            // Ignore pixels where some of the kernel is outside the image
-            const int margin = (size >> 1) >> c_octave;
-
-            if (size <= c_img_rows && size <= c_img_cols && i < samples_i && j < samples_j)
-            {
-                const float dx  = icvCalcHaarPatternSum<3>(c_DX , 9, size, (i << c_octave), (j << c_octave));
-                const float dy  = icvCalcHaarPatternSum<3>(c_DY , 9, size, (i << c_octave), (j << c_octave));
-                const float dxy = icvCalcHaarPatternSum<4>(c_DXY, 9, size, (i << c_octave), (j << c_octave));
-
-                det.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx * dy - 0.81f * dxy * dxy;
-                trace.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx + dy;
-            }
-        }
-
-        void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols,
-            int octave, int nOctaveLayers)
-        {
-            const int min_size = calcSize(octave, 0);
-            const int max_samples_i = 1 + ((img_rows - min_size) >> octave);
-            const int max_samples_j = 1 + ((img_cols - min_size) >> octave);
-
-            dim3 threads(16, 16);
-
-            dim3 grid;
-            grid.x = divUp(max_samples_j, threads.x);
-            grid.y = divUp(max_samples_i, threads.y) * (nOctaveLayers + 2);
-
-            icvCalcLayerDetAndTrace<<<grid, threads>>>(det, trace);
-            cudaSafeCall( cudaGetLastError() );
-
-            cudaSafeCall( cudaDeviceSynchronize() );
-        }
-
-        ////////////////////////////////////////////////////////////////////////
-        // NONMAX
-
-        __constant__ float c_DM[5] = {0, 0, 9, 9, 1};
-
-        struct WithMask
-        {
-            static __device__ bool check(int sum_i, int sum_j, int size)
-            {
-                float ratio = (float)size / 9.0f;
-
-                float d = 0;
-
-                int dx1 = __float2int_rn(ratio * c_DM[0]);
-                int dy1 = __float2int_rn(ratio * c_DM[1]);
-                int dx2 = __float2int_rn(ratio * c_DM[2]);
-                int dy2 = __float2int_rn(ratio * c_DM[3]);
-
-                float t = 0;
-                t += tex2D(maskSumTex, sum_j + dx1, sum_i + dy1);
-                t -= tex2D(maskSumTex, sum_j + dx1, sum_i + dy2);
-                t -= tex2D(maskSumTex, sum_j + dx2, sum_i + dy1);
-                t += tex2D(maskSumTex, sum_j + dx2, sum_i + dy2);
-
-                d += t * c_DM[4] / ((dx2 - dx1) * (dy2 - dy1));
-
-                return (d >= 0.5f);
-            }
-        };
-
-        template <typename Mask>
-        __global__ void icvFindMaximaInLayer(const PtrStepf det, const PtrStepf trace, int4* maxPosBuffer,
-            unsigned int* maxCounter)
-        {
-            #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110
-
-            extern __shared__ float N9[];
-
-            // The hidx variables are the indices to the hessian buffer.
-            const int gridDim_y = gridDim.y / c_nOctaveLayers;
-            const int blockIdx_y = blockIdx.y % gridDim_y;
-            const int blockIdx_z = blockIdx.y / gridDim_y;
-
-            const int layer = blockIdx_z + 1;
-
-            const int size = calcSize(c_octave, layer);
-
-            // Ignore pixels without a 3x3x3 neighbourhood in the layer above
-            const int margin = ((calcSize(c_octave, layer + 1) >> 1) >> c_octave) + 1;
-
-            const int j = threadIdx.x + blockIdx.x * (blockDim.x - 2) + margin - 1;
-            const int i = threadIdx.y + blockIdx_y * (blockDim.y - 2) + margin - 1;
-
-            // Is this thread within the hessian buffer?
-            const int zoff = blockDim.x * blockDim.y;
-            const int localLin = threadIdx.x + threadIdx.y * blockDim.x + zoff;
-            N9[localLin - zoff] = det.ptr(c_layer_rows * (layer - 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
-            N9[localLin       ] = det.ptr(c_layer_rows * (layer    ) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
-            N9[localLin + zoff] = det.ptr(c_layer_rows * (layer + 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
-            __syncthreads();
-
-            if (i < c_layer_rows - margin && j < c_layer_cols - margin && threadIdx.x > 0 && threadIdx.x < blockDim.x - 1 && threadIdx.y > 0 && threadIdx.y < blockDim.y - 1)
-            {
-                float val0 = N9[localLin];
-
-                if (val0 > c_hessianThreshold)
-                {
-                    // Coordinates for the start of the wavelet in the sum image. There
-                    // is some integer division involved, so don't try to simplify this
-                    // (cancel out sampleStep) without checking the result is the same
-                    const int sum_i = (i - ((size >> 1) >> c_octave)) << c_octave;
-                    const int sum_j = (j - ((size >> 1) >> c_octave)) << c_octave;
-
-                    if (Mask::check(sum_i, sum_j, size))
-                    {
-                        // Check to see if we have a max (in its 26 neighbours)
-                        const bool condmax = val0 > N9[localLin - 1 - blockDim.x - zoff]
-                        &&                   val0 > N9[localLin     - blockDim.x - zoff]
-                        &&                   val0 > N9[localLin + 1 - blockDim.x - zoff]
-                        &&                   val0 > N9[localLin - 1              - zoff]
-                        &&                   val0 > N9[localLin                  - zoff]
-                        &&                   val0 > N9[localLin + 1              - zoff]
-                        &&                   val0 > N9[localLin - 1 + blockDim.x - zoff]
-                        &&                   val0 > N9[localLin     + blockDim.x - zoff]
-                        &&                   val0 > N9[localLin + 1 + blockDim.x - zoff]
-
-                        &&                   val0 > N9[localLin - 1 - blockDim.x]
-                        &&                   val0 > N9[localLin     - blockDim.x]
-                        &&                   val0 > N9[localLin + 1 - blockDim.x]
-                        &&                   val0 > N9[localLin - 1             ]
-                        &&                   val0 > N9[localLin + 1             ]
-                        &&                   val0 > N9[localLin - 1 + blockDim.x]
-                        &&                   val0 > N9[localLin     + blockDim.x]
-                        &&                   val0 > N9[localLin + 1 + blockDim.x]
-
-                        &&                   val0 > N9[localLin - 1 - blockDim.x + zoff]
-                        &&                   val0 > N9[localLin     - blockDim.x + zoff]
-                        &&                   val0 > N9[localLin + 1 - blockDim.x + zoff]
-                        &&                   val0 > N9[localLin - 1              + zoff]
-                        &&                   val0 > N9[localLin                  + zoff]
-                        &&                   val0 > N9[localLin + 1              + zoff]
-                        &&                   val0 > N9[localLin - 1 + blockDim.x + zoff]
-                        &&                   val0 > N9[localLin     + blockDim.x + zoff]
-                        &&                   val0 > N9[localLin + 1 + blockDim.x + zoff]
-                        ;
-
-                        if(condmax)
-                        {
-                            unsigned int ind = atomicInc(maxCounter,(unsigned int) -1);
-
-                            if (ind < c_max_candidates)
-                            {
-                                const int laplacian = (int) copysignf(1.0f, trace.ptr(layer * c_layer_rows + i)[j]);
-
-                                maxPosBuffer[ind] = make_int4(j, i, layer, laplacian);
-                            }
-                        }
-                    }
-                }
-            }
-
-            #endif
-        }
-
-        void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter,
-            int img_rows, int img_cols, int octave, bool use_mask, int nOctaveLayers)
-        {
-            const int layer_rows = img_rows >> octave;
-            const int layer_cols = img_cols >> octave;
-
-            const int min_margin = ((calcSize(octave, 2) >> 1) >> octave) + 1;
-
-            dim3 threads(16, 16);
-
-            dim3 grid;
-            grid.x = divUp(layer_cols - 2 * min_margin, threads.x - 2);
-            grid.y = divUp(layer_rows - 2 * min_margin, threads.y - 2) * nOctaveLayers;
-
-            const size_t smem_size = threads.x * threads.y * 3 * sizeof(float);
-
-            if (use_mask)
-                icvFindMaximaInLayer<WithMask><<<grid, threads, smem_size>>>(det, trace, maxPosBuffer, maxCounter);
-            else
-                icvFindMaximaInLayer<WithOutMask><<<grid, threads, smem_size>>>(det, trace, maxPosBuffer, maxCounter);
-
-            cudaSafeCall( cudaGetLastError() );
-
-            cudaSafeCall( cudaDeviceSynchronize() );
-        }
-
-        ////////////////////////////////////////////////////////////////////////
-        // INTERPOLATION
-
-        __global__ void icvInterpolateKeypoint(const PtrStepf det, const int4* maxPosBuffer,
-            float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
-            unsigned int* featureCounter)
-        {
-            #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110
-
-            const int4 maxPos = maxPosBuffer[blockIdx.x];
-
-            const int j = maxPos.x - 1 + threadIdx.x;
-            const int i = maxPos.y - 1 + threadIdx.y;
-            const int layer = maxPos.z - 1 + threadIdx.z;
-
-            __shared__ float N9[3][3][3];
-
-            N9[threadIdx.z][threadIdx.y][threadIdx.x] = det.ptr(c_layer_rows * layer + i)[j];
-            __syncthreads();
-
-            if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0)
-            {
-                __shared__ float dD[3];
-
-                //dx
-                dD[0] = -0.5f * (N9[1][1][2] - N9[1][1][0]);
-                //dy
-                dD[1] = -0.5f * (N9[1][2][1] - N9[1][0][1]);
-                //ds
-                dD[2] = -0.5f * (N9[2][1][1] - N9[0][1][1]);
-
-                __shared__ float H[3][3];
-
-                //dxx
-                H[0][0] = N9[1][1][0] - 2.0f * N9[1][1][1] + N9[1][1][2];
-                //dxy
-                H[0][1]= 0.25f * (N9[1][2][2] - N9[1][2][0] - N9[1][0][2] + N9[1][0][0]);
-                //dxs
-                H[0][2]= 0.25f * (N9[2][1][2] - N9[2][1][0] - N9[0][1][2] + N9[0][1][0]);
-                //dyx = dxy
-                H[1][0] = H[0][1];
-                //dyy
-                H[1][1] = N9[1][0][1] - 2.0f * N9[1][1][1] + N9[1][2][1];
-                //dys
-                H[1][2]= 0.25f * (N9[2][2][1] - N9[2][0][1] - N9[0][2][1] + N9[0][0][1]);
-                //dsx = dxs
-                H[2][0] = H[0][2];
-                //dsy = dys
-                H[2][1] = H[1][2];
-                //dss
-                H[2][2] = N9[0][1][1] - 2.0f * N9[1][1][1] + N9[2][1][1];
-
-                __shared__ float x[3];
-
-                if (solve3x3(H, dD, x))
-                {
-                    if (::fabs(x[0]) <= 1.f && ::fabs(x[1]) <= 1.f && ::fabs(x[2]) <= 1.f)
-                    {
-                        // if the step is within the interpolation region, perform it
-
-                        const int size = calcSize(c_octave, maxPos.z);
-
-                        const int sum_i = (maxPos.y - ((size >> 1) >> c_octave)) << c_octave;
-                        const int sum_j = (maxPos.x - ((size >> 1) >> c_octave)) << c_octave;
-
-                        const float center_i = sum_i + (float)(size - 1) / 2;
-                        const float center_j = sum_j + (float)(size - 1) / 2;
-
-                        const float px = center_j + x[0] * (1 << c_octave);
-                        const float py = center_i + x[1] * (1 << c_octave);
-
-                        const int ds = size - calcSize(c_octave, maxPos.z - 1);
-                        const float psize = roundf(size + x[2] * ds);
-
-                        /* The sampling intervals and wavelet sized for selecting an orientation
-                         and building the keypoint descriptor are defined relative to 's' */
-                        const float s = psize * 1.2f / 9.0f;
-
-                        /* To find the dominant orientation, the gradients in x and y are
-                         sampled in a circle of radius 6s using wavelets of size 4s.
-                         We ensure the gradient wavelet size is even to ensure the
-                         wavelet pattern is balanced and symmetric around its center */
-                        const int grad_wav_size = 2 * __float2int_rn(2.0f * s);
-
-                        // check when grad_wav_size is too big
-                        if ((c_img_rows + 1) >= grad_wav_size && (c_img_cols + 1) >= grad_wav_size)
-                        {
-                            // Get a new feature index.
-                            unsigned int ind = atomicInc(featureCounter, (unsigned int)-1);
-
-                            if (ind < c_max_features)
-                            {
-                                featureX[ind] = px;
-                                featureY[ind] = py;
-                                featureLaplacian[ind] = maxPos.w;
-                                featureOctave[ind] = c_octave;
-                                featureSize[ind] = psize;
-                                featureHessian[ind] = N9[1][1][1];
-                            }
-                        } // grad_wav_size check
-                    } // If the subpixel interpolation worked
-                }
-            } // If this is thread 0.
-
-            #endif
-        }
-
-        void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter,
-            float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
-            unsigned int* featureCounter)
-        {
-            dim3 threads;
-            threads.x = 3;
-            threads.y = 3;
-            threads.z = 3;
-
-            dim3 grid;
-            grid.x = maxCounter;
-
-            icvInterpolateKeypoint<<<grid, threads>>>(det, maxPosBuffer, featureX, featureY, featureLaplacian, featureOctave, featureSize, featureHessian, featureCounter);
-            cudaSafeCall( cudaGetLastError() );
-
-            cudaSafeCall( cudaDeviceSynchronize() );
-        }
-
-        ////////////////////////////////////////////////////////////////////////
-        // Orientation
-
-        #define ORI_SEARCH_INC 5
-        #define ORI_WIN        60
-        #define ORI_SAMPLES    113
-
-        __constant__ float c_aptX[ORI_SAMPLES] = {-6, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6};
-        __constant__ float c_aptY[ORI_SAMPLES] = {0, -3, -2, -1, 0, 1, 2, 3, -4, -3, -2, -1, 0, 1, 2, 3, 4, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -4, -3, -2, -1, 0, 1, 2, 3, 4, -3, -2, -1, 0, 1, 2, 3, 0};
-        __constant__ float c_aptW[ORI_SAMPLES] = {0.001455130288377404f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.001455130288377404f, 0.0035081731621176f, 0.00720730796456337f, 0.01261763460934162f, 0.0188232995569706f, 0.02392910048365593f, 0.02592208795249462f, 0.02392910048365593f, 0.0188232995569706f, 0.01261763460934162f, 0.00720730796456337f, 0.0035081731621176f, 0.001455130288377404f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.001455130288377404f};
-
-        __constant__ float c_NX[2][5] = {{0, 0, 2, 4, -1}, {2, 0, 4, 4, 1}};
-        __constant__ float c_NY[2][5] = {{0, 0, 4, 2, 1}, {0, 2, 4, 4, -1}};
-
-        __global__ void icvCalcOrientation(const float* featureX, const float* featureY, const float* featureSize, float* featureDir)
-        {
-            __shared__ float s_X[128];
-            __shared__ float s_Y[128];
-            __shared__ float s_angle[128];
-
-            __shared__ float s_sumx[32 * 4];
-            __shared__ float s_sumy[32 * 4];
-
-            /* The sampling intervals and wavelet sized for selecting an orientation
-             and building the keypoint descriptor are defined relative to 's' */
-            const float s = featureSize[blockIdx.x] * 1.2f / 9.0f;
-
-            /* To find the dominant orientation, the gradients in x and y are
-             sampled in a circle of radius 6s using wavelets of size 4s.
-             We ensure the gradient wavelet size is even to ensure the
-             wavelet pattern is balanced and symmetric around its center */
-            const int grad_wav_size = 2 * __float2int_rn(2.0f * s);
-
-            // check when grad_wav_size is too big
-            if ((c_img_rows + 1) < grad_wav_size || (c_img_cols + 1) < grad_wav_size)
-                return;
-
-            // Calc X, Y, angle and store it to shared memory
-            const int tid = threadIdx.y * blockDim.x + threadIdx.x;
-
-            float X = 0.0f, Y = 0.0f, angle = 0.0f;
-
-            if (tid < ORI_SAMPLES)
-            {
-                const float margin = (float)(grad_wav_size - 1) / 2.0f;
-                const int x = __float2int_rn(featureX[blockIdx.x] + c_aptX[tid] * s - margin);
-                const int y = __float2int_rn(featureY[blockIdx.x] + c_aptY[tid] * s - margin);
-
-                if (y >= 0 && y < (c_img_rows + 1) - grad_wav_size &&
-                    x >= 0 && x < (c_img_cols + 1) - grad_wav_size)
-                {
-                    X = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NX, 4, grad_wav_size, y, x);
-                    Y = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NY, 4, grad_wav_size, y, x);
-
-                    angle = atan2f(Y, X);
-                    if (angle < 0)
-                        angle += 2.0f * CV_PI_F;
-                    angle *= 180.0f / CV_PI_F;
-                }
-            }
-            s_X[tid] = X;
-            s_Y[tid] = Y;
-            s_angle[tid] = angle;
-            __syncthreads();
-
-            float bestx = 0, besty = 0, best_mod = 0;
-
-        #if __CUDA_ARCH__ >= 200
-            #pragma unroll
-        #endif
-            for (int i = 0; i < 18; ++i)
-            {
-                const int dir = (i * 4 + threadIdx.y) * ORI_SEARCH_INC;
-
-                float sumx = 0.0f, sumy = 0.0f;
-                int d = ::abs(__float2int_rn(s_angle[threadIdx.x]) - dir);
-                if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
-                {
-                    sumx = s_X[threadIdx.x];
-                    sumy = s_Y[threadIdx.x];
-                }
-                d = ::abs(__float2int_rn(s_angle[threadIdx.x + 32]) - dir);
-                if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
-                {
-                    sumx += s_X[threadIdx.x + 32];
-                    sumy += s_Y[threadIdx.x + 32];
-                }
-                d = ::abs(__float2int_rn(s_angle[threadIdx.x + 64]) - dir);
-                if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
-                {
-                    sumx += s_X[threadIdx.x + 64];
-                    sumy += s_Y[threadIdx.x + 64];
-                }
-                d = ::abs(__float2int_rn(s_angle[threadIdx.x + 96]) - dir);
-                if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
-                {
-                    sumx += s_X[threadIdx.x + 96];
-                    sumy += s_Y[threadIdx.x + 96];
-                }
-
-                plus<float> op;
-                device::reduce<32>(smem_tuple(s_sumx + threadIdx.y * 32, s_sumy + threadIdx.y * 32),
-                                   thrust::tie(sumx, sumy), threadIdx.x, thrust::make_tuple(op, op));
-
-                const float temp_mod = sumx * sumx + sumy * sumy;
-                if (temp_mod > best_mod)
-                {
-                    best_mod = temp_mod;
-                    bestx = sumx;
-                    besty = sumy;
-                }
-
-                __syncthreads();
-            }
-
-            if (threadIdx.x == 0)
-            {
-                s_X[threadIdx.y] = bestx;
-                s_Y[threadIdx.y] = besty;
-                s_angle[threadIdx.y] = best_mod;
-            }
-            __syncthreads();
-
-            if (threadIdx.x == 0 && threadIdx.y == 0)
-            {
-                int bestIdx = 0;
-
-                if (s_angle[1] > s_angle[bestIdx])
-                    bestIdx = 1;
-                if (s_angle[2] > s_angle[bestIdx])
-                    bestIdx = 2;
-                if (s_angle[3] > s_angle[bestIdx])
-                    bestIdx = 3;
-
-                float kp_dir = atan2f(s_Y[bestIdx], s_X[bestIdx]);
-                if (kp_dir < 0)
-                    kp_dir += 2.0f * CV_PI_F;
-                kp_dir *= 180.0f / CV_PI_F;
-
-                kp_dir = 360.0f - kp_dir;
-                if (::fabsf(kp_dir - 360.f) < numeric_limits<float>::epsilon())
-                    kp_dir = 0.f;
-
-                featureDir[blockIdx.x] = kp_dir;
-            }
-        }
-
-        #undef ORI_SEARCH_INC
-        #undef ORI_WIN
-        #undef ORI_SAMPLES
-
-        void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures)
-        {
-            dim3 threads;
-            threads.x = 32;
-            threads.y = 4;
-
-            dim3 grid;
-            grid.x = nFeatures;
-
-            icvCalcOrientation<<<grid, threads>>>(featureX, featureY, featureSize, featureDir);
-            cudaSafeCall( cudaGetLastError() );
-
-            cudaSafeCall( cudaDeviceSynchronize() );
-        }
-
-        ////////////////////////////////////////////////////////////////////////
-        // Descriptors
-
-        #define PATCH_SZ 20
-
-        __constant__ float c_DW[PATCH_SZ * PATCH_SZ] =
-        {
-            3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f,
-            8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f,
-            1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f,
-            3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f,
-            5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f,
-            9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f,
-            0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f,
-            0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f,
-            0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f,
-            0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f,
-            0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f,
-            0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f,
-            0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f,
-            0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f,
-            9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f,
-            5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f,
-            3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f,
-            1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f,
-            8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f,
-            3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f
-        };
-
-        struct WinReader
-        {
-            typedef uchar elem_type;
-
-            __device__ __forceinline__ uchar operator ()(int i, int j) const
-            {
-                float pixel_x = centerX + (win_offset + j) * cos_dir + (win_offset + i) * sin_dir;
-                float pixel_y = centerY - (win_offset + j) * sin_dir + (win_offset + i) * cos_dir;
-
-                return tex2D(imgTex, pixel_x, pixel_y);
-            }
-
-            float centerX;
-            float centerY;
-            float win_offset;
-            float cos_dir;
-            float sin_dir;
-            int width;
-            int height;
-        };
-
-        __device__ void calc_dx_dy(const float* featureX, const float* featureY, const float* featureSize, const float* featureDir,
-                                   float& dx, float& dy);
-
-        __device__ void calc_dx_dy(const float* featureX, const float* featureY, const float* featureSize, const float* featureDir,
-                                   float& dx, float& dy)
-        {
-            __shared__ float s_PATCH[PATCH_SZ + 1][PATCH_SZ + 1];
-
-            dx = dy = 0.0f;
-
-            WinReader win;
-
-            win.centerX = featureX[blockIdx.x];
-            win.centerY = featureY[blockIdx.x];
-
-            // The sampling intervals and wavelet sized for selecting an orientation
-            // and building the keypoint descriptor are defined relative to 's'
-            const float s = featureSize[blockIdx.x] * 1.2f / 9.0f;
-
-            // Extract a window of pixels around the keypoint of size 20s
-            const int win_size = (int)((PATCH_SZ + 1) * s);
-
-            win.width = win.height = win_size;
-
-            // Nearest neighbour version (faster)
-            win.win_offset = -(win_size - 1.0f) / 2.0f;
-
-            float descriptor_dir = 360.0f - featureDir[blockIdx.x];
-            if (::fabsf(descriptor_dir - 360.f) < numeric_limits<float>::epsilon())
-                descriptor_dir = 0.f;
-            descriptor_dir *= CV_PI_F / 180.0f;
-            sincosf(descriptor_dir, &win.sin_dir, &win.cos_dir);
-
-            const int tid = threadIdx.y * blockDim.x + threadIdx.x;
-
-            const int xLoadInd = tid % (PATCH_SZ + 1);
-            const int yLoadInd = tid / (PATCH_SZ + 1);
-
-            if (yLoadInd < (PATCH_SZ + 1))
-            {
-                if (s > 1)
-                {
-                    AreaFilter<WinReader> filter(win, s, s);
-                    s_PATCH[yLoadInd][xLoadInd] = filter(yLoadInd, xLoadInd);
-                }
-                else
-                {
-                    LinearFilter<WinReader> filter(win);
-                    s_PATCH[yLoadInd][xLoadInd] = filter(yLoadInd * s, xLoadInd * s);
-                }
-            }
-
-            __syncthreads();
-
-            const int xPatchInd = threadIdx.x % 5;
-            const int yPatchInd = threadIdx.x / 5;
-
-            if (yPatchInd < 5)
-            {
-                const int xBlockInd = threadIdx.y % 4;
-                const int yBlockInd = threadIdx.y / 4;
-
-                const int xInd = xBlockInd * 5 + xPatchInd;
-                const int yInd = yBlockInd * 5 + yPatchInd;
-
-                const float dw = c_DW[yInd * PATCH_SZ + xInd];
-
-                dx = (s_PATCH[yInd    ][xInd + 1] - s_PATCH[yInd][xInd] + s_PATCH[yInd + 1][xInd + 1] - s_PATCH[yInd + 1][xInd    ]) * dw;
-                dy = (s_PATCH[yInd + 1][xInd    ] - s_PATCH[yInd][xInd] + s_PATCH[yInd + 1][xInd + 1] - s_PATCH[yInd    ][xInd + 1]) * dw;
-            }
-        }
-
-        __global__ void compute_descriptors_64(PtrStep<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir)
-        {
-            __shared__ float smem[32 * 16];
-
-            float* sRow = smem + threadIdx.y * 32;
-
-            float dx, dy;
-            calc_dx_dy(featureX, featureY, featureSize, featureDir, dx, dy);
-
-            float dxabs = ::fabsf(dx);
-            float dyabs = ::fabsf(dy);
-
-            plus<float> op;
-
-            reduce<32>(sRow, dx, threadIdx.x, op);
-            reduce<32>(sRow, dy, threadIdx.x, op);
-            reduce<32>(sRow, dxabs, threadIdx.x, op);
-            reduce<32>(sRow, dyabs, threadIdx.x, op);
-
-            float4* descriptors_block = descriptors.ptr(blockIdx.x) + threadIdx.y;
-
-            // write dx, dy, |dx|, |dy|
-            if (threadIdx.x == 0)
-                *descriptors_block = make_float4(dx, dy, dxabs, dyabs);
-        }
-
-        __global__ void compute_descriptors_128(PtrStep<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir)
-        {
-            __shared__ float smem[32 * 16];
-
-            float* sRow = smem + threadIdx.y * 32;
-
-            float dx, dy;
-            calc_dx_dy(featureX, featureY, featureSize, featureDir, dx, dy);
-
-            float4* descriptors_block = descriptors.ptr(blockIdx.x) + threadIdx.y * 2;
-
-            plus<float> op;
-
-            float d1 = 0.0f;
-            float d2 = 0.0f;
-            float abs1 = 0.0f;
-            float abs2 = 0.0f;
-
-            if (dy >= 0)
-            {
-                d1 = dx;
-                abs1 = ::fabsf(dx);
-            }
-            else
-            {
-                d2 = dx;
-                abs2 = ::fabsf(dx);
-            }
-
-            reduce<32>(sRow, d1, threadIdx.x, op);
-            reduce<32>(sRow, d2, threadIdx.x, op);
-            reduce<32>(sRow, abs1, threadIdx.x, op);
-            reduce<32>(sRow, abs2, threadIdx.x, op);
-
-            // write dx (dy >= 0), |dx| (dy >= 0), dx (dy < 0), |dx| (dy < 0)
-            if (threadIdx.x == 0)
-                descriptors_block[0] = make_float4(d1, abs1, d2, abs2);
-
-            if (dx >= 0)
-            {
-                d1 = dy;
-                abs1 = ::fabsf(dy);
-                d2 = 0.0f;
-                abs2 = 0.0f;
-            }
-            else
-            {
-                d1 = 0.0f;
-                abs1 = 0.0f;
-                d2 = dy;
-                abs2 = ::fabsf(dy);
-            }
-
-            reduce<32>(sRow, d1, threadIdx.x, op);
-            reduce<32>(sRow, d2, threadIdx.x, op);
-            reduce<32>(sRow, abs1, threadIdx.x, op);
-            reduce<32>(sRow, abs2, threadIdx.x, op);
-
-            // write dy (dx >= 0), |dy| (dx >= 0), dy (dx < 0), |dy| (dx < 0)
-            if (threadIdx.x == 0)
-                descriptors_block[1] = make_float4(d1, abs1, d2, abs2);
-        }
-
-        template <int BLOCK_DIM_X> __global__ void normalize_descriptors(PtrStepf descriptors)
-        {
-            __shared__ float smem[BLOCK_DIM_X];
-            __shared__ float s_len;
-
-            // no need for thread ID
-            float* descriptor_base = descriptors.ptr(blockIdx.x);
-
-            // read in the unnormalized descriptor values (squared)
-            const float val = descriptor_base[threadIdx.x];
-
-            float len = val * val;
-            reduce<BLOCK_DIM_X>(smem, len, threadIdx.x, plus<float>());
-
-            if (threadIdx.x == 0)
-                s_len = ::sqrtf(len);
-
-            __syncthreads();
-
-            // normalize and store in output
-            descriptor_base[threadIdx.x] = val / s_len;
-        }
-
-        void compute_descriptors_gpu(PtrStepSz<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures)
-        {
-            // compute unnormalized descriptors, then normalize them - odd indexing since grid must be 2D
-
-            if (descriptors.cols == 64)
-            {
-                compute_descriptors_64<<<nFeatures, dim3(32, 16)>>>(descriptors, featureX, featureY, featureSize, featureDir);
-                cudaSafeCall( cudaGetLastError() );
-
-                cudaSafeCall( cudaDeviceSynchronize() );
-
-                normalize_descriptors<64><<<nFeatures, 64>>>((PtrStepSzf) descriptors);
-                cudaSafeCall( cudaGetLastError() );
-
-                cudaSafeCall( cudaDeviceSynchronize() );
-            }
-            else
-            {
-                compute_descriptors_128<<<nFeatures, dim3(32, 16)>>>(descriptors, featureX, featureY, featureSize, featureDir);
-                cudaSafeCall( cudaGetLastError() );
-
-                cudaSafeCall( cudaDeviceSynchronize() );
-
-                normalize_descriptors<128><<<nFeatures, 128>>>((PtrStepSzf) descriptors);
-                cudaSafeCall( cudaGetLastError() );
-
-                cudaSafeCall( cudaDeviceSynchronize() );
-            }
-        }
-    } // namespace surf
-}}} // namespace cv { namespace cuda { namespace cudev
-
-#endif // HAVE_OPENCV_CUDAARITHM
diff --git a/modules/nonfree/src/nonfree_init.cpp b/modules/nonfree/src/nonfree_init.cpp
deleted file mode 100644 (file)
index c59e735..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-//  By downloading, copying, installing or using the software you agree to this license.
-//  If you do not agree to this license, do not download, install,
-//  copy or use the software.
-//
-//
-//                          License Agreement
-//                For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-//   * Redistribution's of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//
-//   * Redistribution's in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//
-//   * The name of the copyright holders may not be used to endorse or promote products
-//     derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#include "precomp.hpp"
-
-namespace cv
-{
-
-///////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-CV_INIT_ALGORITHM(SURF, "Feature2D.SURF",
-                  obj.info()->addParam(obj, "hessianThreshold", obj.hessianThreshold);
-                  obj.info()->addParam(obj, "nOctaves", obj.nOctaves);
-                  obj.info()->addParam(obj, "nOctaveLayers", obj.nOctaveLayers);
-                  obj.info()->addParam(obj, "extended", obj.extended);
-                  obj.info()->addParam(obj, "upright", obj.upright))
-
-///////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-CV_INIT_ALGORITHM(SIFT, "Feature2D.SIFT",
-                  obj.info()->addParam(obj, "nFeatures", obj.nfeatures);
-                  obj.info()->addParam(obj, "nOctaveLayers", obj.nOctaveLayers);
-                  obj.info()->addParam(obj, "contrastThreshold", obj.contrastThreshold);
-                  obj.info()->addParam(obj, "edgeThreshold", obj.edgeThreshold);
-                  obj.info()->addParam(obj, "sigma", obj.sigma))
-
-///////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-bool initModule_nonfree(void)
-{
-    Ptr<Algorithm> sift = createSIFT_ptr_hidden(), surf = createSURF_ptr_hidden();
-    return sift->info() != 0 && surf->info() != 0;
-}
-
-}
diff --git a/modules/nonfree/src/opencl/surf.cl b/modules/nonfree/src/opencl/surf.cl
deleted file mode 100644 (file)
index 608a677..0000000
+++ /dev/null
@@ -1,1347 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-//  By downloading, copying, installing or using the software you agree to this license.
-//  If you do not agree to this license, do not download, install,
-//  copy or use the software.
-//
-//
-//                           License Agreement
-//                For Open Source Computer Vision Library
-//
-// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
-// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
-// Copyright (C) 2013, Intel Corporation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// @Authors
-//    Peng Xiao, pengxiao@multicorewareinc.com
-//    Sen Liu, swjtuls1987@126.com
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-//   * Redistribution's of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//
-//   * Redistribution's in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//
-//   * The name of the copyright holders may not be used to endorse or promote products
-//     derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors as is and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-// The number of degrees between orientation samples in calcOrientation
-#define ORI_SEARCH_INC  5
-
-// The local size of the calcOrientation kernel
-#define ORI_LOCAL_SIZE  (360 / ORI_SEARCH_INC)
-
-// specialized for non-image2d_t supported platform, intel HD4000, for example
-#ifndef HAVE_IMAGE2D
-__inline uint read_sumTex_(__global uint* sumTex, int sum_step, int img_rows, int img_cols, int2 coord)
-{
-    int x = clamp(coord.x, 0, img_cols);
-    int y = clamp(coord.y, 0, img_rows);
-    return sumTex[sum_step * y + x];
-}
-
-__inline uchar read_imgTex_(__global uchar* imgTex, int img_step, int img_rows, int img_cols, float2 coord)
-{
-    int x = clamp(convert_int_rte(coord.x), 0, img_cols-1);
-    int y = clamp(convert_int_rte(coord.y), 0, img_rows-1);
-    return imgTex[img_step * y + x];
-}
-
-#define read_sumTex(coord) read_sumTex_(sumTex, sum_step, img_rows, img_cols, coord)
-#define read_imgTex(coord) read_imgTex_(imgTex, img_step, img_rows, img_cols, coord)
-
-#define __PARAM_sumTex__ __global uint* sumTex, int sum_step, int sum_offset
-#define __PARAM_imgTex__ __global uchar* imgTex, int img_step, int img_offset
-
-#define __PASS_sumTex__ sumTex, sum_step, sum_offset
-#define __PASS_imgTex__ imgTex, img_step, img_offset
-
-#else
-__inline uint read_sumTex_(image2d_t sumTex, sampler_t sam, int2 coord)
-{
-    return read_imageui(sumTex, sam, coord).x;
-}
-
-__inline uchar read_imgTex_(image2d_t imgTex, sampler_t sam, float2 coord)
-{
-    return (uchar)read_imageui(imgTex, sam, coord).x;
-}
-
-#define read_sumTex(coord) read_sumTex_(sumTex, sampler, coord)
-#define read_imgTex(coord) read_imgTex_(imgTex, sampler, coord)
-
-#define __PARAM_sumTex__ image2d_t sumTex
-#define __PARAM_imgTex__ image2d_t imgTex
-
-#define __PASS_sumTex__ sumTex
-#define __PASS_imgTex__ imgTex
-
-#endif
-
-// dynamically change the precision used for floating type
-
-#if defined (DOUBLE_SUPPORT)
-#ifdef cl_khr_fp64
-#pragma OPENCL EXTENSION cl_khr_fp64:enable
-#elif defined (cl_amd_fp64)
-#pragma OPENCL EXTENSION cl_amd_fp64:enable
-#endif
-#define F double
-#else
-#define F float
-#endif
-
-// Image read mode
-__constant sampler_t sampler = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_NEAREST;
-
-#ifndef FLT_EPSILON
-#define FLT_EPSILON (1e-15)
-#endif
-
-#ifndef CV_PI_F
-#define CV_PI_F 3.14159265f
-#endif
-
-////////////////////////////////////////////////////////////////////////
-// Hessian
-
-__inline int calcSize(int octave, int layer)
-{
-    /* Wavelet size at first layer of first octave. */
-    const int HAAR_SIZE0 = 9;
-
-    /* Wavelet size increment between layers. This should be an even number,
-    such that the wavelet sizes in an octave are either all even or all odd.
-    This ensures that when looking for the neighbours of a sample, the layers
-    above and below are aligned correctly. */
-    const int HAAR_SIZE_INC = 6;
-
-    return (HAAR_SIZE0 + HAAR_SIZE_INC * layer) << octave;
-}
-
-// Calculate a derivative in an axis-aligned direction (x or y).  The "plus1"
-// boxes contribute 1 * (area), and the "minus2" box contributes -2 * (area).
-// So the final computation is plus1a + plus1b - 2 * minus2.  The corners are
-// labeled A, B, C, and D, with A being the top left, B being top right, C
-// being bottom left, and D being bottom right.
-F calcAxisAlignedDerivative(
-        int plus1a_A, int plus1a_B, int plus1a_C, int plus1a_D, F plus1a_scale,
-        int plus1b_A, int plus1b_B, int plus1b_C, int plus1b_D, F plus1b_scale,
-        int minus2_A, int minus2_B, int minus2_C, int minus2_D, F minus2_scale)
-{
-    F plus1a = plus1a_A - plus1a_B - plus1a_C + plus1a_D;
-    F plus1b = plus1b_A - plus1b_B - plus1b_C + plus1b_D;
-    F minus2 = minus2_A - minus2_B - minus2_C + minus2_D;
-
-    return (plus1a / plus1a_scale -
-            2.0f * minus2 / minus2_scale +
-            plus1b / plus1b_scale);
-}
-
-//calculate targeted layer per-pixel determinant and trace with an integral image
-__kernel void SURF_calcLayerDetAndTrace(
-    __PARAM_sumTex__, // input integral image
-    int img_rows, int img_cols,
-    int c_nOctaveLayers, int c_octave, int c_layer_rows,
-
-    __global float * det,      // output determinant
-    int det_step, int det_offset,
-    __global float * trace,    // output trace
-    int trace_step, int trace_offset)
-{
-    det_step   /= sizeof(*det);
-    trace_step /= sizeof(*trace);
-    #ifndef HAVE_IMAGE2D
-    sum_step/= sizeof(uint);
-    #endif
-    // Determine the indices
-    const int gridDim_y  = get_num_groups(1) / (c_nOctaveLayers + 2);
-    const int blockIdx_y = get_group_id(1) % gridDim_y;
-    const int blockIdx_z = get_group_id(1) / gridDim_y;
-
-    const int j = get_local_id(0) + get_group_id(0) * get_local_size(0);
-    const int i = get_local_id(1) + blockIdx_y * get_local_size(1);
-    const int layer = blockIdx_z;
-
-    const int size = calcSize(c_octave, layer);
-
-    const int samples_i = 1 + ((img_rows - size) >> c_octave);
-    const int samples_j = 1 + ((img_cols - size) >> c_octave);
-
-    // Ignore pixels where some of the kernel is outside the image
-    const int margin = (size >> 1) >> c_octave;
-
-    if (size <= img_rows && size <= img_cols && i < samples_i && j < samples_j)
-    {
-        int x = j << c_octave;
-        int y = i << c_octave;
-
-        float ratio = (float)size / 9;
-
-        // Precompute some commonly used values, which are used to offset
-        // texture coordinates in the integral image.
-        int r1 = round(ratio);
-        int r2 = round(ratio * 2.0f);
-        int r3 = round(ratio * 3.0f);
-        int r4 = round(ratio * 4.0f);
-        int r5 = round(ratio * 5.0f);
-        int r6 = round(ratio * 6.0f);
-        int r7 = round(ratio * 7.0f);
-        int r8 = round(ratio * 8.0f);
-        int r9 = round(ratio * 9.0f);
-
-        // Calculate the approximated derivative in the x-direction
-        F d = 0;
-        {
-            // Some of the pixels needed to compute the derivative are
-            // repeated, so we only don't duplicate the fetch here.
-            int t02 = read_sumTex( (int2)(x, y + r2));
-            int t07 = read_sumTex( (int2)(x, y + r7));
-            int t32 = read_sumTex( (int2)(x + r3, y + r2));
-            int t37 = read_sumTex( (int2)(x + r3, y + r7));
-            int t62 = read_sumTex( (int2)(x + r6, y + r2));
-            int t67 = read_sumTex( (int2)(x + r6, y + r7));
-            int t92 = read_sumTex( (int2)(x + r9, y + r2));
-            int t97 = read_sumTex( (int2)(x + r9, y + r7));
-
-            d = calcAxisAlignedDerivative(t02, t07, t32, t37, (r3) * (r7 - r2),
-                                          t62, t67, t92, t97, (r9 - r6) * (r7 - r2),
-                                          t32, t37, t62, t67, (r6 - r3) * (r7 - r2));
-        }
-        const float dx  = (float)d;
-
-        // Calculate the approximated derivative in the y-direction
-        d = 0;
-        {
-            // Some of the pixels needed to compute the derivative are
-            // repeated, so we only don't duplicate the fetch here.
-            int t20 = read_sumTex( (int2)(x + r2, y) );
-            int t23 = read_sumTex( (int2)(x + r2, y + r3) );
-            int t70 = read_sumTex( (int2)(x + r7, y) );
-            int t73 = read_sumTex( (int2)(x + r7, y + r3) );
-            int t26 = read_sumTex( (int2)(x + r2, y + r6) );
-            int t76 = read_sumTex( (int2)(x + r7, y + r6) );
-            int t29 = read_sumTex( (int2)(x + r2, y + r9) );
-            int t79 = read_sumTex( (int2)(x + r7, y + r9) );
-
-            d = calcAxisAlignedDerivative(t20, t23, t70, t73, (r7 - r2) * (r3),
-                                          t26, t29, t76, t79, (r7 - r2) * (r9 - r6),
-                                          t23, t26, t73, t76, (r7 - r2) * (r6 - r3));
-        }
-        const float dy  = (float)d;
-
-        // Calculate the approximated derivative in the xy-direction
-        d = 0;
-        {
-            // There's no saving us here, we just have to get all of the pixels in
-            // separate fetches
-            F t = 0;
-            t += read_sumTex( (int2)(x + r1, y + r1) );
-            t -= read_sumTex( (int2)(x + r1, y + r4) );
-            t -= read_sumTex( (int2)(x + r4, y + r1) );
-            t += read_sumTex( (int2)(x + r4, y + r4) );
-            d += t / ((r4 - r1) * (r4 - r1));
-
-            t = 0;
-            t += read_sumTex( (int2)(x + r5, y + r1) );
-            t -= read_sumTex( (int2)(x + r5, y + r4) );
-            t -= read_sumTex( (int2)(x + r8, y + r1) );
-            t += read_sumTex( (int2)(x + r8, y + r4) );
-            d -= t / ((r8 - r5) * (r4 - r1));
-
-            t = 0;
-            t += read_sumTex( (int2)(x + r1, y + r5) );
-            t -= read_sumTex( (int2)(x + r1, y + r8) );
-            t -= read_sumTex( (int2)(x + r4, y + r5) );
-            t += read_sumTex( (int2)(x + r4, y + r8) );
-            d -= t / ((r4 - r1) * (r8 - r5));
-
-            t = 0;
-            t += read_sumTex( (int2)(x + r5, y + r5) );
-            t -= read_sumTex( (int2)(x + r5, y + r8) );
-            t -= read_sumTex( (int2)(x + r8, y + r5) );
-            t += read_sumTex( (int2)(x + r8, y + r8) );
-            d += t / ((r8 - r5) * (r8 - r5));
-        }
-        const float dxy = (float)d;
-
-        det  [j + margin + det_step   * (layer * c_layer_rows + i + margin)] = dx * dy - 0.81f * dxy * dxy;
-        trace[j + margin + trace_step * (layer * c_layer_rows + i + margin)] = dx + dy;
-    }
-}
-
-////////////////////////////////////////////////////////////////////////
-// NONMAX
-
-__kernel
-void SURF_findMaximaInLayer(
-    __global float * det,
-    int det_step, int det_offset,
-    __global float * trace,
-    int trace_step, int trace_offset,
-    __global int4 * maxPosBuffer,
-    volatile __global  int* maxCounter,
-    int counter_offset,
-    int img_rows,
-    int img_cols,
-    int c_nOctaveLayers,
-    int c_octave,
-    int c_layer_rows,
-    int c_layer_cols,
-    int c_max_candidates,
-    float c_hessianThreshold
-)
-{
-    volatile __local  float N9[768]; // threads.x * threads.y * 3
-
-    det_step   /= sizeof(float);
-    trace_step /= sizeof(float);
-    maxCounter += counter_offset;
-
-    // Determine the indices
-    const int gridDim_y  = get_num_groups(1) / c_nOctaveLayers;
-    const int blockIdx_y = get_group_id(1)   % gridDim_y;
-    const int blockIdx_z = get_group_id(1)   / gridDim_y;
-
-    const int layer = blockIdx_z + 1;
-
-    const int size = calcSize(c_octave, layer);
-
-    // Ignore pixels without a 3x3x3 neighbourhood in the layer above
-    const int margin = ((calcSize(c_octave, layer + 1) >> 1) >> c_octave) + 1;
-
-    const int j = get_local_id(0) + get_group_id(0) * (get_local_size(0) - 2) + margin - 1;
-    const int i = get_local_id(1) + blockIdx_y      * (get_local_size(1) - 2) + margin - 1;
-
-    // Is this thread within the hessian buffer?
-    const int zoff     = get_local_size(0) * get_local_size(1);
-    const int localLin = get_local_id(0) + get_local_id(1) * get_local_size(0) + zoff;
-
-    int l_x = min(max(j, 0), img_cols - 1);
-    int l_y = c_layer_rows * layer + min(max(i, 0), img_rows - 1);
-
-    N9[localLin - zoff] =
-        det[det_step * (l_y - c_layer_rows) + l_x];
-    N9[localLin       ] =
-        det[det_step * (l_y               ) + l_x];
-    N9[localLin + zoff] =
-        det[det_step * (l_y + c_layer_rows) + l_x];
-    barrier(CLK_LOCAL_MEM_FENCE);
-
-    if (i < c_layer_rows - margin
-            && j < c_layer_cols - margin
-            && get_local_id(0) > 0
-            && get_local_id(0) < get_local_size(0) - 1
-            && get_local_id(1) > 0
-            && get_local_id(1) < get_local_size(1) - 1 // these are unnecessary conditions ported from CUDA
-       )
-    {
-        float val0 = N9[localLin];
-        if (val0 > c_hessianThreshold)
-        {
-            // Coordinates for the start of the wavelet in the sum image. There
-            // is some integer division involved, so don't try to simplify this
-            // (cancel out sampleStep) without checking the result is the same
-
-            // Check to see if we have a max (in its 26 neighbours)
-            const bool condmax = val0 > N9[localLin - 1 - get_local_size(0) - zoff]
-                                 &&                   val0 > N9[localLin     - get_local_size(0) - zoff]
-                                 &&                   val0 > N9[localLin + 1 - get_local_size(0) - zoff]
-                                 &&                   val0 > N9[localLin - 1                     - zoff]
-                                 &&                   val0 > N9[localLin                         - zoff]
-                                 &&                   val0 > N9[localLin + 1                     - zoff]
-                                 &&                   val0 > N9[localLin - 1 + get_local_size(0) - zoff]
-                                 &&                   val0 > N9[localLin     + get_local_size(0) - zoff]
-                                 &&                   val0 > N9[localLin + 1 + get_local_size(0) - zoff]
-
-                                 &&                   val0 > N9[localLin - 1 - get_local_size(0)]
-                                 &&                   val0 > N9[localLin     - get_local_size(0)]
-                                 &&                   val0 > N9[localLin + 1 - get_local_size(0)]
-                                 &&                   val0 > N9[localLin - 1                    ]
-                                 &&                   val0 > N9[localLin + 1                    ]
-                                 &&                   val0 > N9[localLin - 1 + get_local_size(0)]
-                                 &&                   val0 > N9[localLin     + get_local_size(0)]
-                                 &&                   val0 > N9[localLin + 1 + get_local_size(0)]
-
-                                 &&                   val0 > N9[localLin - 1 - get_local_size(0) + zoff]
-                                 &&                   val0 > N9[localLin     - get_local_size(0) + zoff]
-                                 &&                   val0 > N9[localLin + 1 - get_local_size(0) + zoff]
-                                 &&                   val0 > N9[localLin - 1                     + zoff]
-                                 &&                   val0 > N9[localLin                         + zoff]
-                                 &&                   val0 > N9[localLin + 1                     + zoff]
-                                 &&                   val0 > N9[localLin - 1 + get_local_size(0) + zoff]
-                                 &&                   val0 > N9[localLin     + get_local_size(0) + zoff]
-                                 &&                   val0 > N9[localLin + 1 + get_local_size(0) + zoff]
-                                 ;
-
-            if(condmax)
-            {
-                int ind = atomic_inc(maxCounter);
-
-                if (ind < c_max_candidates)
-                {
-                    const int laplacian = (int) copysign(1.0f, trace[trace_step* (layer * c_layer_rows + i) + j]);
-
-                    maxPosBuffer[ind] = (int4)(j, i, layer, laplacian);
-                }
-            }
-        }
-    }
-}
-
-// solve 3x3 linear system Ax=b for floating point input
-inline bool solve3x3_float(const float4 *A, const float *b, float *x)
-{
-    float det = A[0].x * (A[1].y * A[2].z - A[1].z * A[2].y)
-                - A[0].y * (A[1].x * A[2].z - A[1].z * A[2].x)
-                + A[0].z * (A[1].x * A[2].y - A[1].y * A[2].x);
-
-    if (det != 0)
-    {
-        F invdet = 1.0f / det;
-
-        x[0] = invdet *
-               (b[0]    * (A[1].y * A[2].z - A[1].z * A[2].y) -
-                A[0].y * (b[1]    * A[2].z - A[1].z * b[2]   ) +
-                A[0].z * (b[1]    * A[2].y - A[1].y * b[2]   ));
-
-        x[1] = invdet *
-               (A[0].x * (b[1]    * A[2].z - A[1].z * b[2]   ) -
-                b[0]    * (A[1].x * A[2].z - A[1].z * A[2].x) +
-                A[0].z * (A[1].x * b[2]    - b[1]    * A[2].x));
-
-        x[2] = invdet *
-               (A[0].x * (A[1].y * b[2]    - b[1]    * A[2].y) -
-                A[0].y * (A[1].x * b[2]    - b[1]    * A[2].x) +
-                b[0]    * (A[1].x * A[2].y - A[1].y * A[2].x));
-
-        return true;
-    }
-    return false;
-}
-
-#define X_ROW          0
-#define Y_ROW          1
-#define LAPLACIAN_ROW  2
-#define OCTAVE_ROW     3
-#define SIZE_ROW       4
-#define ANGLE_ROW      5
-#define HESSIAN_ROW    6
-#define ROWS_COUNT     7
-
-////////////////////////////////////////////////////////////////////////
-// INTERPOLATION
-__kernel
-void SURF_interpolateKeypoint(
-    __global const float * det,
-    int det_step, int det_offset,
-    __global const int4 * maxPosBuffer,
-    __global float * keypoints,
-    int keypoints_step, int keypoints_offset,
-    volatile __global int* featureCounter,
-    int img_rows,
-    int img_cols,
-    int c_octave,
-    int c_layer_rows,
-    int c_max_features
-)
-{
-    det_step /= sizeof(*det);
-    keypoints_step /= sizeof(*keypoints);
-    __global float * featureX       = keypoints + X_ROW * keypoints_step;
-    __global float * featureY       = keypoints + Y_ROW * keypoints_step;
-    __global int * featureLaplacian = (__global int *)keypoints + LAPLACIAN_ROW * keypoints_step;
-    __global int * featureOctave    = (__global int *)keypoints + OCTAVE_ROW * keypoints_step;
-    __global float * featureSize    = keypoints + SIZE_ROW * keypoints_step;
-    __global float * featureHessian = keypoints + HESSIAN_ROW * keypoints_step;
-
-    const int4 maxPos = maxPosBuffer[get_group_id(0)];
-
-    const int j = maxPos.x - 1 + get_local_id(0);
-    const int i = maxPos.y - 1 + get_local_id(1);
-    const int layer = maxPos.z - 1 + get_local_id(2);
-
-    volatile __local  float N9[3][3][3];
-
-    N9[get_local_id(2)][get_local_id(1)][get_local_id(0)] =
-        det[det_step * (c_layer_rows * layer + i) + j];
-    barrier(CLK_LOCAL_MEM_FENCE);
-
-    if (get_local_id(0) == 0 && get_local_id(1) == 0 && get_local_id(2) == 0)
-    {
-        float dD[3];
-
-        //dx
-        dD[0] = -0.5f * (N9[1][1][2] - N9[1][1][0]);
-        //dy
-        dD[1] = -0.5f * (N9[1][2][1] - N9[1][0][1]);
-        //ds
-        dD[2] = -0.5f * (N9[2][1][1] - N9[0][1][1]);
-
-        float4 H[3];
-
-        //dxx
-        H[0].x = N9[1][1][0] - 2.0f * N9[1][1][1] + N9[1][1][2];
-        //dxy
-        H[0].y= 0.25f * (N9[1][2][2] - N9[1][2][0] - N9[1][0][2] + N9[1][0][0]);
-        //dxs
-        H[0].z= 0.25f * (N9[2][1][2] - N9[2][1][0] - N9[0][1][2] + N9[0][1][0]);
-        //dyx = dxy
-        H[1].x = H[0].y;
-        //dyy
-        H[1].y = N9[1][0][1] - 2.0f * N9[1][1][1] + N9[1][2][1];
-        //dys
-        H[1].z= 0.25f * (N9[2][2][1] - N9[2][0][1] - N9[0][2][1] + N9[0][0][1]);
-        //dsx = dxs
-        H[2].x = H[0].z;
-        //dsy = dys
-        H[2].y = H[1].z;
-        //dss
-        H[2].z = N9[0][1][1] - 2.0f * N9[1][1][1] + N9[2][1][1];
-
-        float x[3];
-
-        if (solve3x3_float(H, dD, x))
-        {
-            if (fabs(x[0]) <= 1.f && fabs(x[1]) <= 1.f && fabs(x[2]) <= 1.f)
-            {
-                // if the step is within the interpolation region, perform it
-
-                const int size = calcSize(c_octave, maxPos.z);
-
-                const int sum_i = (maxPos.y - ((size >> 1) >> c_octave)) << c_octave;
-                const int sum_j = (maxPos.x - ((size >> 1) >> c_octave)) << c_octave;
-
-                const float center_i = sum_i + (float)(size - 1) / 2;
-                const float center_j = sum_j + (float)(size - 1) / 2;
-
-                const float px = center_j + x[0] * (1 << c_octave);
-                const float py = center_i + x[1] * (1 << c_octave);
-
-                const int ds = size - calcSize(c_octave, maxPos.z - 1);
-                const float psize = round(size + x[2] * ds);
-
-                /* The sampling intervals and wavelet sized for selecting an orientation
-                and building the keypoint descriptor are defined relative to 's' */
-                const float s = psize * 1.2f / 9.0f;
-
-                /* To find the dominant orientation, the gradients in x and y are
-                sampled in a circle of radius 6s using wavelets of size 4s.
-                We ensure the gradient wavelet size is even to ensure the
-                wavelet pattern is balanced and symmetric around its center */
-                const int grad_wav_size = 2 * round(2.0f * s);
-
-                // check when grad_wav_size is too big
-                if ((img_rows + 1) >= grad_wav_size && (img_cols + 1) >= grad_wav_size)
-                {
-                    // Get a new feature index.
-                    int ind = atomic_inc(featureCounter);
-
-                    if (ind < c_max_features)
-                    {
-                        featureX[ind] = px;
-                        featureY[ind] = py;
-                        featureLaplacian[ind] = maxPos.w;
-                        featureOctave[ind] = c_octave;
-                        featureSize[ind] = psize;
-                        featureHessian[ind] = N9[1][1][1];
-                    }
-                } // grad_wav_size check
-            } // If the subpixel interpolation worked
-        }
-    } // If this is thread 0.
-}
-
-////////////////////////////////////////////////////////////////////////
-// Orientation
-
-#define ORI_WIN                         60
-#define ORI_SAMPLES             113
-
-// The distance between samples in the beginning of the the reduction
-#define ORI_RESPONSE_REDUCTION_WIDTH            48
-#define ORI_RESPONSE_ARRAY_SIZE                             (ORI_RESPONSE_REDUCTION_WIDTH * 2)
-
-__constant float c_aptX[ORI_SAMPLES] = {-6, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6};
-__constant float c_aptY[ORI_SAMPLES] = {0, -3, -2, -1, 0, 1, 2, 3, -4, -3, -2, -1, 0, 1, 2, 3, 4, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -4, -3, -2, -1, 0, 1, 2, 3, 4, -3, -2, -1, 0, 1, 2, 3, 0};
-__constant float c_aptW[ORI_SAMPLES] = {0.001455130288377404f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f,
-                                        0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f,
-                                        0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f,
-                                        0.002003900473937392f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f,
-                                        0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f,
-                                        0.0035081731621176f, 0.001707611023448408f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f,
-                                        0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f,
-                                        0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.003238451667129993f, 0.00665318313986063f,
-                                        0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f,
-                                        0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.001455130288377404f,
-                                        0.0035081731621176f, 0.00720730796456337f, 0.01261763460934162f, 0.0188232995569706f, 0.02392910048365593f,
-                                        0.02592208795249462f, 0.02392910048365593f, 0.0188232995569706f, 0.01261763460934162f, 0.00720730796456337f,
-                                        0.0035081731621176f, 0.001455130288377404f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f,
-                                        0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f,
-                                        0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.002547456417232752f, 0.005233579315245152f,
-                                        0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f,
-                                        0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.001707611023448408f,
-                                        0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f,
-                                        0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f,
-                                        0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f,
-                                        0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f,
-                                        0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f,
-                                        0.001707611023448408f, 0.001455130288377404f
-                                       };
-
-__constant float2 c_NX[5] = { (float2)(0, 2), (float2)(0, 0), (float2)(2, 4), (float2)(4, 4), (float2)(-1, 1) };
-__constant float2 c_NY[5] = { (float2)(0, 0), (float2)(0, 2), (float2)(4, 4), (float2)(2, 4), (float2)(1, -1) };
-
-void reduce_32_sum(volatile __local  float * data, volatile float* partial_reduction, int tid)
-{
-#define op(A, B) (*A)+(B)
-    data[tid] = *partial_reduction;
-    barrier(CLK_LOCAL_MEM_FENCE);
-#ifndef WAVE_SIZE
-#define WAVE_SIZE 1
-#endif
-    if (tid < 16)
-    {
-        data[tid] = *partial_reduction = op(partial_reduction, data[tid + 16]);
-#if WAVE_SIZE < 16
-    }
-    barrier(CLK_LOCAL_MEM_FENCE);
-    if (tid < 8)
-    {
-#endif
-        data[tid] = *partial_reduction = op(partial_reduction, data[tid + 8]);
-#if WAVE_SIZE < 8
-    }
-    barrier(CLK_LOCAL_MEM_FENCE);
-    if (tid < 4)
-    {
-#endif
-        data[tid] = *partial_reduction = op(partial_reduction, data[tid + 4]);
-#if WAVE_SIZE < 4
-    }
-    barrier(CLK_LOCAL_MEM_FENCE);
-    if (tid < 2)
-    {
-#endif
-        data[tid] = *partial_reduction = op(partial_reduction, data[tid + 2 ]);
-#if WAVE_SIZE < 2
-    }
-    barrier(CLK_LOCAL_MEM_FENCE);
-    if (tid < 1)
-    {
-#endif
-        data[tid] = *partial_reduction = op(partial_reduction, data[tid + 1 ]);
-    }
-#undef WAVE_SIZE
-#undef op
-}
-
-__kernel
-void SURF_calcOrientation(
-    __PARAM_sumTex__, int img_rows, int img_cols,
-    __global float * keypoints, int keypoints_step, int keypoints_offset )
-{
-    keypoints_step /= sizeof(*keypoints);
-    #ifndef HAVE_IMAGE2D
-    sum_step       /= sizeof(uint);
-    #endif
-    __global float* featureX    = keypoints + X_ROW * keypoints_step;
-    __global float* featureY    = keypoints + Y_ROW * keypoints_step;
-    __global float* featureSize = keypoints + SIZE_ROW * keypoints_step;
-    __global float* featureDir  = keypoints + ANGLE_ROW * keypoints_step;
-
-    __local  float s_X[ORI_SAMPLES];
-    __local  float s_Y[ORI_SAMPLES];
-    __local  float s_angle[ORI_SAMPLES];
-
-    // Need to allocate enough to make the reduction work without accessing
-    // past the end of the array.
-    __local  float s_sumx[ORI_RESPONSE_ARRAY_SIZE];
-    __local  float s_sumy[ORI_RESPONSE_ARRAY_SIZE];
-    __local  float s_mod[ORI_RESPONSE_ARRAY_SIZE];
-
-    /* The sampling intervals and wavelet sized for selecting an orientation
-    and building the keypoint descriptor are defined relative to 's' */
-    const float s = featureSize[get_group_id(0)] * 1.2f / 9.0f;
-
-    /* To find the dominant orientation, the gradients in x and y are
-    sampled in a circle of radius 6s using wavelets of size 4s.
-    We ensure the gradient wavelet size is even to ensure the
-    wavelet pattern is balanced and symmetric around its center */
-    const int grad_wav_size = 2 * round(2.0f * s);
-
-    // check when grad_wav_size is too big
-    if ((img_rows + 1) < grad_wav_size || (img_cols + 1) < grad_wav_size)
-        return;
-
-    // Calc X, Y, angle and store it to shared memory
-    const int tid = get_local_id(0);
-    // Initialize values that are only used as part of the reduction later.
-    if (tid < ORI_RESPONSE_ARRAY_SIZE - ORI_LOCAL_SIZE) {
-        s_mod[tid + ORI_LOCAL_SIZE] = 0.0f;
-    }
-
-    float ratio = (float)grad_wav_size / 4;
-
-    int r2 = round(ratio * 2.0f);
-    int r4 = round(ratio * 4.0f);
-    for (int i = tid; i < ORI_SAMPLES; i += ORI_LOCAL_SIZE )
-    {
-        float X = 0.0f, Y = 0.0f, angle = 0.0f;
-        const float margin = (float)(grad_wav_size - 1) / 2.0f;
-        const int x = round(featureX[get_group_id(0)] + c_aptX[i] * s - margin);
-        const int y = round(featureY[get_group_id(0)] + c_aptY[i] * s - margin);
-
-        if (y >= 0 && y < (img_rows + 1) - grad_wav_size &&
-            x >= 0 && x < (img_cols + 1) - grad_wav_size)
-        {
-            float apt = c_aptW[i];
-
-            // Compute the haar sum without fetching duplicate pixels.
-            float t00 = read_sumTex( (int2)(x, y));
-            float t02 = read_sumTex( (int2)(x, y + r2));
-            float t04 = read_sumTex( (int2)(x, y + r4));
-            float t20 = read_sumTex( (int2)(x + r2, y));
-            float t24 = read_sumTex( (int2)(x + r2, y + r4));
-            float t40 = read_sumTex( (int2)(x + r4, y));
-            float t42 = read_sumTex( (int2)(x + r4, y + r2));
-            float t44 = read_sumTex( (int2)(x + r4, y + r4));
-
-            F t = t00 - t04 - t20 + t24;
-            X -= t / ((r2) * (r4));
-
-            t = t20 - t24 - t40 + t44;
-            X += t / ((r4 - r2) * (r4));
-
-            t = t00 - t02 - t40 + t42;
-            Y += t / ((r2) * (r4));
-
-            t = t02 - t04 - t42 + t44;
-            Y -= t  / ((r4) * (r4 - r2));
-
-            X = apt*X;
-            Y = apt*Y;
-
-            angle = atan2(Y, X);
-
-            if (angle < 0)
-                angle += 2.0f * CV_PI_F;
-            angle *= 180.0f / CV_PI_F;
-
-        }
-
-        s_X[i] = X;
-        s_Y[i] = Y;
-        s_angle[i] = angle;
-    }
-    barrier(CLK_LOCAL_MEM_FENCE);
-
-    float bestx = 0, besty = 0, best_mod = 0;
-    float sumx = 0.0f, sumy = 0.0f;
-    const int dir = tid * ORI_SEARCH_INC;
-    #pragma unroll
-    for (int i = 0; i < ORI_SAMPLES; ++i) {
-        int angle = round(s_angle[i]);
-
-        int d = abs(angle - dir);
-        if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
-        {
-            sumx += s_X[i];
-            sumy += s_Y[i];
-        }
-    }
-    s_sumx[tid] = sumx;
-    s_sumy[tid] = sumy;
-    s_mod[tid] = sumx*sumx + sumy*sumy;
-    barrier(CLK_LOCAL_MEM_FENCE);
-
-    // This reduction searches for the longest wavelet response vector.  The first
-    // step uses all of the work items in the workgroup to narrow the search
-    // down to the three candidates.  It requires s_mod to have a few more
-    // elements allocated past the work-group size, which are pre-initialized to
-    // 0.0f above.
-    for(int t = ORI_RESPONSE_REDUCTION_WIDTH; t >= 3; t /= 2) {
-        if (tid < t) {
-            if (s_mod[tid] < s_mod[tid + t]) {
-                s_mod[tid] = s_mod[tid + t];
-                s_sumx[tid] = s_sumx[tid + t];
-                s_sumy[tid] = s_sumy[tid + t];
-            }
-        }
-        barrier(CLK_LOCAL_MEM_FENCE);
-    }
-
-    // Do the final reduction and write out the result.
-    if (tid == 0)
-    {
-        int bestIdx = 0;
-
-        // The loop above narrowed the search of the longest vector to three
-        // possibilities.  Pick the best here.
-        if (s_mod[1] > s_mod[bestIdx])
-            bestIdx = 1;
-        if (s_mod[2] > s_mod[bestIdx])
-            bestIdx = 2;
-
-        float kp_dir = atan2(s_sumy[bestIdx], s_sumx[bestIdx]);
-        if (kp_dir < 0)
-            kp_dir += 2.0f * CV_PI_F;
-        kp_dir *= 180.0f / CV_PI_F;
-
-        kp_dir = 360.0f - kp_dir;
-        if (fabs(kp_dir - 360.f) < FLT_EPSILON)
-            kp_dir = 0.f;
-
-        featureDir[get_group_id(0)] = kp_dir;
-    }
-}
-
-__kernel
-void SURF_setUpRight(
-    __global float * keypoints,
-    int keypoints_step, int keypoints_offset,
-    int rows, int cols )
-{
-    int i = get_global_id(0);
-    keypoints_step /= sizeof(*keypoints);
-
-    if(i < cols)
-    {
-        keypoints[mad24(keypoints_step, ANGLE_ROW, i)] = 270.f;
-    }
-}
-
-
-#undef ORI_SEARCH_INC
-#undef ORI_WIN
-#undef ORI_SAMPLES
-
-////////////////////////////////////////////////////////////////////////
-// Descriptors
-
-#define PATCH_SZ 20
-
-__constant float c_DW[PATCH_SZ * PATCH_SZ] =
-{
-    3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f,
-    8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f,
-    1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f,
-    3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f,
-    5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f,
-    9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f,
-    0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f,
-    0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f,
-    0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f,
-    0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f,
-    0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f,
-    0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f,
-    0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f,
-    0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f,
-    9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f,
-    5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f,
-    3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f,
-    1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f,
-    8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f,
-    3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f
-};
-
-// utility for linear filter
-#define readerGet(centerX, centerY, win_offset, cos_dir, sin_dir, i, j) \
-    read_imgTex((float2)(centerX + (win_offset + j) * cos_dir + (win_offset + i) * sin_dir, \
-                         centerY - (win_offset + j) * sin_dir + (win_offset + i) * cos_dir))
-
-inline float linearFilter(
-    __PARAM_imgTex__, int img_rows, int img_cols,
-    float centerX, float centerY, float win_offset,
-    float cos_dir, float sin_dir, float y, float x )
-{
-    x -= 0.5f;
-    y -= 0.5f;
-
-    float out = 0.0f;
-
-    const int x1 = round(x);
-    const int y1 = round(y);
-    const int x2 = x1 + 1;
-    const int y2 = y1 + 1;
-
-    uchar src_reg = readerGet(centerX, centerY, win_offset, cos_dir, sin_dir, y1, x1);
-    out = out + src_reg * ((x2 - x) * (y2 - y));
-
-    src_reg = readerGet(centerX, centerY, win_offset, cos_dir, sin_dir, y1, x2);
-    out = out + src_reg * ((x - x1) * (y2 - y));
-
-    src_reg = readerGet(centerX, centerY, win_offset, cos_dir, sin_dir, y2, x1);
-    out = out + src_reg * ((x2 - x) * (y - y1));
-
-    src_reg = readerGet(centerX, centerY, win_offset, cos_dir, sin_dir, y2, x2);
-    out = out + src_reg * ((x - x1) * (y - y1));
-
-    return out;
-}
-
-void calc_dx_dy(
-    __PARAM_imgTex__,
-    int img_rows, int img_cols,
-    volatile __local  float *s_dx_bin,
-    volatile __local  float *s_dy_bin,
-    volatile __local  float *s_PATCH,
-    __global const float* featureX,
-    __global const float* featureY,
-    __global const float* featureSize,
-    __global const float* featureDir )
-{
-    const float centerX = featureX[get_group_id(0)];
-    const float centerY = featureY[get_group_id(0)];
-    const float size = featureSize[get_group_id(0)];
-    float descriptor_dir = 360.0f - featureDir[get_group_id(0)];
-    if(fabs(descriptor_dir - 360.0f) < FLT_EPSILON)
-    {
-        descriptor_dir = 0.0f;
-    }
-
-    descriptor_dir *= (float)(CV_PI_F / 180.0f);
-
-    /* The sampling intervals and wavelet sized for selecting an orientation
-    and building the keypoint descriptor are defined relative to 's' */
-    const float s = size * 1.2f / 9.0f;
-
-    /* Extract a window of pixels around the keypoint of size 20s */
-    const int win_size = (int)((PATCH_SZ + 1) * s);
-
-    float sin_dir;
-    float cos_dir;
-    sin_dir = sincos(descriptor_dir, &cos_dir);
-
-    /* Nearest neighbour version (faster) */
-    const float win_offset = -(float)(win_size - 1) / 2;
-
-    // Compute sampling points
-    // since grids are 2D, need to compute xBlock and yBlock indices
-    const int xBlock = (get_group_id(1) & 3);  // get_group_id(1) % 4
-    const int yBlock = (get_group_id(1) >> 2); // floor(get_group_id(1)/4)
-    const int xIndex = xBlock * 5 + get_local_id(0);
-    const int yIndex = yBlock * 5 + get_local_id(1);
-
-    const float icoo = ((float)yIndex / (PATCH_SZ + 1)) * win_size;
-    const float jcoo = ((float)xIndex / (PATCH_SZ + 1)) * win_size;
-
-    s_PATCH[get_local_id(1) * 6 + get_local_id(0)] =
-        linearFilter(__PASS_imgTex__, img_rows, img_cols, centerX, centerY,
-                     win_offset, cos_dir, sin_dir, icoo, jcoo);
-
-    barrier(CLK_LOCAL_MEM_FENCE);
-
-    if (get_local_id(0) < 5 && get_local_id(1) < 5)
-    {
-        const int tid = get_local_id(1) * 5 + get_local_id(0);
-
-        const float dw = c_DW[yIndex * PATCH_SZ + xIndex];
-
-        const float vx = (
-                             s_PATCH[      get_local_id(1) * 6 + get_local_id(0) + 1] -
-                             s_PATCH[      get_local_id(1) * 6 + get_local_id(0)    ] +
-                             s_PATCH[(get_local_id(1) + 1) * 6 + get_local_id(0) + 1] -
-                             s_PATCH[(get_local_id(1) + 1) * 6 + get_local_id(0)    ])
-                         * dw;
-        const float vy = (
-                             s_PATCH[(get_local_id(1) + 1) * 6 + get_local_id(0)    ] -
-                             s_PATCH[      get_local_id(1) * 6 + get_local_id(0)    ] +
-                             s_PATCH[(get_local_id(1) + 1) * 6 + get_local_id(0) + 1] -
-                             s_PATCH[      get_local_id(1) * 6 + get_local_id(0) + 1])
-                         * dw;
-        s_dx_bin[tid] = vx;
-        s_dy_bin[tid] = vy;
-    }
-}
-
-void reduce_sum25(
-    volatile __local  float* sdata1,
-    volatile __local  float* sdata2,
-    volatile __local  float* sdata3,
-    volatile __local  float* sdata4,
-    int tid
-)
-{
-#ifndef WAVE_SIZE
-#define WAVE_SIZE 1
-#endif
-    // first step is to reduce from 25 to 16
-    if (tid < 9)
-    {
-        sdata1[tid] += sdata1[tid + 16];
-        sdata2[tid] += sdata2[tid + 16];
-        sdata3[tid] += sdata3[tid + 16];
-        sdata4[tid] += sdata4[tid + 16];
-#if WAVE_SIZE < 16
-    }
-    barrier(CLK_LOCAL_MEM_FENCE);
-    if (tid < 8)
-    {
-#endif
-        sdata1[tid] += sdata1[tid + 8];
-        sdata2[tid] += sdata2[tid + 8];
-        sdata3[tid] += sdata3[tid + 8];
-        sdata4[tid] += sdata4[tid + 8];
-#if WAVE_SIZE < 8
-    }
-    barrier(CLK_LOCAL_MEM_FENCE);
-    if (tid < 4)
-    {
-#endif
-        sdata1[tid] += sdata1[tid + 4];
-        sdata2[tid] += sdata2[tid + 4];
-        sdata3[tid] += sdata3[tid + 4];
-        sdata4[tid] += sdata4[tid + 4];
-#if WAVE_SIZE < 4
-    }
-    barrier(CLK_LOCAL_MEM_FENCE);
-    if (tid < 2)
-    {
-#endif
-        sdata1[tid] += sdata1[tid + 2];
-        sdata2[tid] += sdata2[tid + 2];
-        sdata3[tid] += sdata3[tid + 2];
-        sdata4[tid] += sdata4[tid + 2];
-#if WAVE_SIZE < 2
-    }
-    barrier(CLK_LOCAL_MEM_FENCE);
-    if (tid < 1)
-    {
-#endif
-        sdata1[tid] += sdata1[tid + 1];
-        sdata2[tid] += sdata2[tid + 1];
-        sdata3[tid] += sdata3[tid + 1];
-        sdata4[tid] += sdata4[tid + 1];
-    }
-#undef WAVE_SIZE
-}
-
-__kernel
-void SURF_computeDescriptors64(
-    __PARAM_imgTex__,
-    int img_rows, int img_cols,
-    __global const float* keypoints,
-    int keypoints_step, int keypoints_offset,
-    __global float * descriptors,
-    int descriptors_step, int descriptors_offset)
-{
-    descriptors_step /= sizeof(float);
-    keypoints_step   /= sizeof(float);
-    __global const float * featureX    = keypoints + X_ROW * keypoints_step;
-    __global const float * featureY    = keypoints + Y_ROW * keypoints_step;
-    __global const float * featureSize = keypoints + SIZE_ROW * keypoints_step;
-    __global const float * featureDir  = keypoints + ANGLE_ROW * keypoints_step;
-
-    // 2 floats (dx,dy) for each thread (5x5 sample points in each sub-region)
-    volatile __local  float sdx[25];
-    volatile __local  float sdy[25];
-    volatile __local  float sdxabs[25];
-    volatile __local  float sdyabs[25];
-    volatile __local  float s_PATCH[6*6];
-
-    calc_dx_dy(__PASS_imgTex__, img_rows, img_cols, sdx, sdy, s_PATCH, featureX, featureY, featureSize, featureDir);
-    barrier(CLK_LOCAL_MEM_FENCE);
-
-    const int tid = get_local_id(1) * get_local_size(0) + get_local_id(0);
-
-    if (tid < 25)
-    {
-        sdxabs[tid] = fabs(sdx[tid]); // |dx| array
-        sdyabs[tid] = fabs(sdy[tid]); // |dy| array
-    }
-    barrier(CLK_LOCAL_MEM_FENCE);
-
-    reduce_sum25(sdx, sdy, sdxabs, sdyabs, tid);
-
-    barrier(CLK_LOCAL_MEM_FENCE);
-    if (tid < 25)
-    {
-        __global float* descriptors_block = descriptors + descriptors_step * get_group_id(0) + (get_group_id(1) << 2);
-
-        // write dx, dy, |dx|, |dy|
-        if (tid == 0)
-        {
-            descriptors_block[0] = sdx[0];
-            descriptors_block[1] = sdy[0];
-            descriptors_block[2] = sdxabs[0];
-            descriptors_block[3] = sdyabs[0];
-        }
-    }
-}
-
-__kernel
-void SURF_computeDescriptors128(
-    __PARAM_imgTex__,
-    int img_rows, int img_cols,
-    __global const float* keypoints,
-    int keypoints_step, int keypoints_offset,
-    __global float* descriptors,
-    int descriptors_step, int descriptors_offset)
-{
-    descriptors_step /= sizeof(*descriptors);
-    keypoints_step   /= sizeof(*keypoints);
-
-    __global float * featureX   = keypoints + X_ROW * keypoints_step;
-    __global float * featureY   = keypoints + Y_ROW * keypoints_step;
-    __global float* featureSize = keypoints + SIZE_ROW * keypoints_step;
-    __global float* featureDir  = keypoints + ANGLE_ROW * keypoints_step;
-
-    // 2 floats (dx,dy) for each thread (5x5 sample points in each sub-region)
-    volatile __local  float sdx[25];
-    volatile __local  float sdy[25];
-
-    // sum (reduce) 5x5 area response
-    volatile __local  float sd1[25];
-    volatile __local  float sd2[25];
-    volatile __local  float sdabs1[25];
-    volatile __local  float sdabs2[25];
-    volatile __local  float s_PATCH[6*6];
-
-    calc_dx_dy(__PASS_imgTex__, img_rows, img_cols, sdx, sdy, s_PATCH, featureX, featureY, featureSize, featureDir);
-    barrier(CLK_LOCAL_MEM_FENCE);
-
-    const int tid = get_local_id(1) * get_local_size(0) + get_local_id(0);
-
-    if (tid < 25)
-    {
-        if (sdy[tid] >= 0)
-        {
-            sd1[tid] = sdx[tid];
-            sdabs1[tid] = fabs(sdx[tid]);
-            sd2[tid] = 0;
-            sdabs2[tid] = 0;
-        }
-        else
-        {
-            sd1[tid] = 0;
-            sdabs1[tid] = 0;
-            sd2[tid] = sdx[tid];
-            sdabs2[tid] = fabs(sdx[tid]);
-        }
-    }
-    barrier(CLK_LOCAL_MEM_FENCE);
-
-    reduce_sum25(sd1, sd2, sdabs1, sdabs2, tid);
-    barrier(CLK_LOCAL_MEM_FENCE);
-
-    __global float* descriptors_block = descriptors + descriptors_step * get_group_id(0) + (get_group_id(1) << 3);
-    if (tid < 25)
-    {
-        // write dx (dy >= 0), |dx| (dy >= 0), dx (dy < 0), |dx| (dy < 0)
-        if (tid == 0)
-        {
-            descriptors_block[0] = sd1[0];
-            descriptors_block[1] = sdabs1[0];
-            descriptors_block[2] = sd2[0];
-            descriptors_block[3] = sdabs2[0];
-        }
-
-        if (sdx[tid] >= 0)
-        {
-            sd1[tid] = sdy[tid];
-            sdabs1[tid] = fabs(sdy[tid]);
-            sd2[tid] = 0;
-            sdabs2[tid] = 0;
-        }
-        else
-        {
-            sd1[tid] = 0;
-            sdabs1[tid] = 0;
-            sd2[tid] = sdy[tid];
-            sdabs2[tid] = fabs(sdy[tid]);
-        }
-    }
-    barrier(CLK_LOCAL_MEM_FENCE);
-    reduce_sum25(sd1, sd2, sdabs1, sdabs2, tid);
-    barrier(CLK_LOCAL_MEM_FENCE);
-
-    if (tid < 25)
-    {
-        // write dy (dx >= 0), |dy| (dx >= 0), dy (dx < 0), |dy| (dx < 0)
-        if (tid == 0)
-        {
-            descriptors_block[4] = sd1[0];
-            descriptors_block[5] = sdabs1[0];
-            descriptors_block[6] = sd2[0];
-            descriptors_block[7] = sdabs2[0];
-        }
-    }
-}
-
-void reduce_sum128(volatile __local  float* smem, int tid)
-{
-#ifndef WAVE_SIZE
-#define WAVE_SIZE 1
-#endif
-
-    if (tid < 64)
-    {
-        smem[tid] += smem[tid + 64];
-#if WAVE_SIZE < 64
-    }
-    barrier(CLK_LOCAL_MEM_FENCE);
-    if (tid < 32)
-    {
-#endif
-        smem[tid] += smem[tid + 32];
-#if WAVE_SIZE < 32
-    }
-    barrier(CLK_LOCAL_MEM_FENCE);
-    if (tid < 16)
-    {
-#endif
-        smem[tid] += smem[tid + 16];
-#if WAVE_SIZE < 16
-    }
-    barrier(CLK_LOCAL_MEM_FENCE);
-    if (tid < 8)
-    {
-#endif
-        smem[tid] += smem[tid + 8];
-#if WAVE_SIZE < 8
-    }
-    barrier(CLK_LOCAL_MEM_FENCE);
-    if (tid < 4)
-    {
-#endif
-        smem[tid] += smem[tid + 4];
-#if WAVE_SIZE < 4
-    }
-    barrier(CLK_LOCAL_MEM_FENCE);
-    if (tid < 2)
-    {
-#endif
-        smem[tid] += smem[tid + 2];
-#if WAVE_SIZE < 2
-    }
-    barrier(CLK_LOCAL_MEM_FENCE);
-    if (tid < 1)
-    {
-#endif
-        smem[tid] += smem[tid + 1];
-    }
-}
-
-
-void reduce_sum64(volatile __local  float* smem, int tid)
-{
-#ifndef WAVE_SIZE
-#define WAVE_SIZE 1
-#endif
-    if (tid < 32)
-    {
-        smem[tid] += smem[tid + 32];
-#if WAVE_SIZE < 32
-    }
-    barrier(CLK_LOCAL_MEM_FENCE);
-    if (tid < 16)
-    {
-#endif
-        smem[tid] += smem[tid + 16];
-#if WAVE_SIZE < 16
-    }
-    barrier(CLK_LOCAL_MEM_FENCE);
-    if (tid < 8)
-    {
-#endif
-        smem[tid] += smem[tid + 8];
-#if WAVE_SIZE < 8
-    }
-    barrier(CLK_LOCAL_MEM_FENCE);
-    if (tid < 4)
-    {
-#endif
-        smem[tid] += smem[tid + 4];
-#if WAVE_SIZE < 4
-    }
-    barrier(CLK_LOCAL_MEM_FENCE);
-    if (tid < 2)
-    {
-#endif
-        smem[tid] += smem[tid + 2];
-#if WAVE_SIZE < 2
-    }
-    barrier(CLK_LOCAL_MEM_FENCE);
-    if (tid < 1)
-    {
-#endif
-        smem[tid] += smem[tid + 1];
-    }
-}
-
-__kernel
-void SURF_normalizeDescriptors128(__global float * descriptors, int descriptors_step, int descriptors_offset)
-{
-    descriptors_step /= sizeof(*descriptors);
-    // no need for thread ID
-    __global float* descriptor_base = descriptors + descriptors_step * get_group_id(0);
-
-    // read in the unnormalized descriptor values (squared)
-    volatile __local  float sqDesc[128];
-    const float lookup = descriptor_base[get_local_id(0)];
-    sqDesc[get_local_id(0)] = lookup * lookup;
-    barrier(CLK_LOCAL_MEM_FENCE);
-
-    reduce_sum128(sqDesc, get_local_id(0));
-    barrier(CLK_LOCAL_MEM_FENCE);
-
-    // compute length (square root)
-    volatile __local  float len;
-    if (get_local_id(0) == 0)
-    {
-        len = sqrt(sqDesc[0]);
-    }
-    barrier(CLK_LOCAL_MEM_FENCE);
-
-    // normalize and store in output
-    descriptor_base[get_local_id(0)] = lookup / len;
-}
-
-__kernel
-void SURF_normalizeDescriptors64(__global float * descriptors, int descriptors_step, int descriptors_offset)
-{
-    descriptors_step /= sizeof(*descriptors);
-    // no need for thread ID
-    __global float* descriptor_base = descriptors + descriptors_step * get_group_id(0);
-
-    // read in the unnormalized descriptor values (squared)
-    volatile __local  float sqDesc[64];
-    const float lookup = descriptor_base[get_local_id(0)];
-    sqDesc[get_local_id(0)] = lookup * lookup;
-    barrier(CLK_LOCAL_MEM_FENCE);
-
-    reduce_sum64(sqDesc, get_local_id(0));
-    barrier(CLK_LOCAL_MEM_FENCE);
-
-    // compute length (square root)
-    volatile __local  float len;
-    if (get_local_id(0) == 0)
-    {
-        len = sqrt(sqDesc[0]);
-    }
-    barrier(CLK_LOCAL_MEM_FENCE);
-
-    // normalize and store in output
-    descriptor_base[get_local_id(0)] = lookup / len;
-}
diff --git a/modules/nonfree/src/precomp.hpp b/modules/nonfree/src/precomp.hpp
deleted file mode 100644 (file)
index 001b500..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-//  By downloading, copying, installing or using the software you agree to this license.
-//  If you do not agree to this license, do not download, install,
-//  copy or use the software.
-//
-//
-//                          License Agreement
-//                For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-//   * Redistribution's of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//
-//   * Redistribution's in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//
-//   * The name of the copyright holders may not be used to endorse or promote products
-//     derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#ifndef __OPENCV_PRECOMP_H__
-#define __OPENCV_PRECOMP_H__
-
-#include "opencv2/nonfree.hpp"
-#include "opencv2/imgproc.hpp"
-
-#include "opencv2/core/utility.hpp"
-#include "opencv2/core/private.hpp"
-
-#include "opencv2/nonfree/cuda.hpp"
-#include "opencv2/core/private.cuda.hpp"
-
-#include "opencv2/core/ocl.hpp"
-
-#include "opencv2/opencv_modules.hpp"
-
-#ifdef HAVE_OPENCV_CUDAARITHM
-#  include "opencv2/cudaarithm.hpp"
-#endif
-
-#include "opencv2/core/private.hpp"
-
-#endif
diff --git a/modules/nonfree/src/sift.cpp b/modules/nonfree/src/sift.cpp
deleted file mode 100644 (file)
index 2112971..0000000
+++ /dev/null
@@ -1,816 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-//  By downloading, copying, installing or using the software you agree to this license.
-//  If you do not agree to this license, do not download, install,
-//  copy or use the software.
-//
-//
-//                          License Agreement
-//                For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-//   * Redistribution's of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//
-//   * Redistribution's in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//
-//   * The name of the copyright holders may not be used to endorse or promote products
-//     derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-/**********************************************************************************************\
- Implementation of SIFT is based on the code from http://blogs.oregonstate.edu/hess/code/sift/
- Below is the original copyright.
-
-//    Copyright (c) 2006-2010, Rob Hess <hess@eecs.oregonstate.edu>
-//    All rights reserved.
-
-//    The following patent has been issued for methods embodied in this
-//    software: "Method and apparatus for identifying scale invariant features
-//    in an image and use of same for locating an object in an image," David
-//    G. Lowe, US Patent 6,711,293 (March 23, 2004). Provisional application
-//    filed March 8, 1999. Asignee: The University of British Columbia. For
-//    further details, contact David Lowe (lowe@cs.ubc.ca) or the
-//    University-Industry Liaison Office of the University of British
-//    Columbia.
-
-//    Note that restrictions imposed by this patent (and possibly others)
-//    exist independently of and may be in conflict with the freedoms granted
-//    in this license, which refers to copyright of the program, not patents
-//    for any methods that it implements.  Both copyright and patent law must
-//    be obeyed to legally use and redistribute this program and it is not the
-//    purpose of this license to induce you to infringe any patents or other
-//    property right claims or to contest validity of any such claims.  If you
-//    redistribute or use the program, then this license merely protects you
-//    from committing copyright infringement.  It does not protect you from
-//    committing patent infringement.  So, before you do anything with this
-//    program, make sure that you have permission to do so not merely in terms
-//    of copyright, but also in terms of patent law.
-
-//    Please note that this license is not to be understood as a guarantee
-//    either.  If you use the program according to this license, but in
-//    conflict with patent law, it does not mean that the licensor will refund
-//    you for any losses that you incur if you are sued for your patent
-//    infringement.
-
-//    Redistribution and use in source and binary forms, with or without
-//    modification, are permitted provided that the following conditions are
-//    met:
-//        * Redistributions of source code must retain the above copyright and
-//          patent notices, this list of conditions and the following
-//          disclaimer.
-//        * Redistributions in binary form must reproduce the above copyright
-//          notice, this list of conditions and the following disclaimer in
-//          the documentation and/or other materials provided with the
-//          distribution.
-//        * Neither the name of Oregon State University nor the names of its
-//          contributors may be used to endorse or promote products derived
-//          from this software without specific prior written permission.
-
-//    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-//    IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
-//    TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
-//    PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-//    HOLDER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-//    EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-//    PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-//    PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-//    LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-//    NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-//    SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-\**********************************************************************************************/
-
-#include "precomp.hpp"
-#include <iostream>
-#include <stdarg.h>
-
-namespace cv
-{
-
-/******************************* Defs and macros *****************************/
-
-// default width of descriptor histogram array
-static const int SIFT_DESCR_WIDTH = 4;
-
-// default number of bins per histogram in descriptor array
-static const int SIFT_DESCR_HIST_BINS = 8;
-
-// assumed gaussian blur for input image
-static const float SIFT_INIT_SIGMA = 0.5f;
-
-// width of border in which to ignore keypoints
-static const int SIFT_IMG_BORDER = 5;
-
-// maximum steps of keypoint interpolation before failure
-static const int SIFT_MAX_INTERP_STEPS = 5;
-
-// default number of bins in histogram for orientation assignment
-static const int SIFT_ORI_HIST_BINS = 36;
-
-// determines gaussian sigma for orientation assignment
-static const float SIFT_ORI_SIG_FCTR = 1.5f;
-
-// determines the radius of the region used in orientation assignment
-static const float SIFT_ORI_RADIUS = 3 * SIFT_ORI_SIG_FCTR;
-
-// orientation magnitude relative to max that results in new feature
-static const float SIFT_ORI_PEAK_RATIO = 0.8f;
-
-// determines the size of a single descriptor orientation histogram
-static const float SIFT_DESCR_SCL_FCTR = 3.f;
-
-// threshold on magnitude of elements of descriptor vector
-static const float SIFT_DESCR_MAG_THR = 0.2f;
-
-// factor used to convert floating-point descriptor to unsigned char
-static const float SIFT_INT_DESCR_FCTR = 512.f;
-
-#if 0
-// intermediate type used for DoG pyramids
-typedef short sift_wt;
-static const int SIFT_FIXPT_SCALE = 48;
-#else
-// intermediate type used for DoG pyramids
-typedef float sift_wt;
-static const int SIFT_FIXPT_SCALE = 1;
-#endif
-
-static inline void
-unpackOctave(const KeyPoint& kpt, int& octave, int& layer, float& scale)
-{
-    octave = kpt.octave & 255;
-    layer = (kpt.octave >> 8) & 255;
-    octave = octave < 128 ? octave : (-128 | octave);
-    scale = octave >= 0 ? 1.f/(1 << octave) : (float)(1 << -octave);
-}
-
-static Mat createInitialImage( const Mat& img, bool doubleImageSize, float sigma )
-{
-    Mat gray, gray_fpt;
-    if( img.channels() == 3 || img.channels() == 4 )
-        cvtColor(img, gray, COLOR_BGR2GRAY);
-    else
-        img.copyTo(gray);
-    gray.convertTo(gray_fpt, DataType<sift_wt>::type, SIFT_FIXPT_SCALE, 0);
-
-    float sig_diff;
-
-    if( doubleImageSize )
-    {
-        sig_diff = sqrtf( std::max(sigma * sigma - SIFT_INIT_SIGMA * SIFT_INIT_SIGMA * 4, 0.01f) );
-        Mat dbl;
-        resize(gray_fpt, dbl, Size(gray.cols*2, gray.rows*2), 0, 0, INTER_LINEAR);
-        GaussianBlur(dbl, dbl, Size(), sig_diff, sig_diff);
-        return dbl;
-    }
-    else
-    {
-        sig_diff = sqrtf( std::max(sigma * sigma - SIFT_INIT_SIGMA * SIFT_INIT_SIGMA, 0.01f) );
-        GaussianBlur(gray_fpt, gray_fpt, Size(), sig_diff, sig_diff);
-        return gray_fpt;
-    }
-}
-
-
-void SIFT::buildGaussianPyramid( const Mat& base, std::vector<Mat>& pyr, int nOctaves ) const
-{
-    std::vector<double> sig(nOctaveLayers + 3);
-    pyr.resize(nOctaves*(nOctaveLayers + 3));
-
-    // precompute Gaussian sigmas using the following formula:
-    //  \sigma_{total}^2 = \sigma_{i}^2 + \sigma_{i-1}^2
-    sig[0] = sigma;
-    double k = std::pow( 2., 1. / nOctaveLayers );
-    for( int i = 1; i < nOctaveLayers + 3; i++ )
-    {
-        double sig_prev = std::pow(k, (double)(i-1))*sigma;
-        double sig_total = sig_prev*k;
-        sig[i] = std::sqrt(sig_total*sig_total - sig_prev*sig_prev);
-    }
-
-    for( int o = 0; o < nOctaves; o++ )
-    {
-        for( int i = 0; i < nOctaveLayers + 3; i++ )
-        {
-            Mat& dst = pyr[o*(nOctaveLayers + 3) + i];
-            if( o == 0  &&  i == 0 )
-                dst = base;
-            // base of new octave is halved image from end of previous octave
-            else if( i == 0 )
-            {
-                const Mat& src = pyr[(o-1)*(nOctaveLayers + 3) + nOctaveLayers];
-                resize(src, dst, Size(src.cols/2, src.rows/2),
-                       0, 0, INTER_NEAREST);
-            }
-            else
-            {
-                const Mat& src = pyr[o*(nOctaveLayers + 3) + i-1];
-                GaussianBlur(src, dst, Size(), sig[i], sig[i]);
-            }
-        }
-    }
-}
-
-
-void SIFT::buildDoGPyramid( const std::vector<Mat>& gpyr, std::vector<Mat>& dogpyr ) const
-{
-    int nOctaves = (int)gpyr.size()/(nOctaveLayers + 3);
-    dogpyr.resize( nOctaves*(nOctaveLayers + 2) );
-
-    for( int o = 0; o < nOctaves; o++ )
-    {
-        for( int i = 0; i < nOctaveLayers + 2; i++ )
-        {
-            const Mat& src1 = gpyr[o*(nOctaveLayers + 3) + i];
-            const Mat& src2 = gpyr[o*(nOctaveLayers + 3) + i + 1];
-            Mat& dst = dogpyr[o*(nOctaveLayers + 2) + i];
-            subtract(src2, src1, dst, noArray(), DataType<sift_wt>::type);
-        }
-    }
-}
-
-
-// Computes a gradient orientation histogram at a specified pixel
-static float calcOrientationHist( const Mat& img, Point pt, int radius,
-                                  float sigma, float* hist, int n )
-{
-    int i, j, k, len = (radius*2+1)*(radius*2+1);
-
-    float expf_scale = -1.f/(2.f * sigma * sigma);
-    AutoBuffer<float> buf(len*4 + n+4);
-    float *X = buf, *Y = X + len, *Mag = X, *Ori = Y + len, *W = Ori + len;
-    float* temphist = W + len + 2;
-
-    for( i = 0; i < n; i++ )
-        temphist[i] = 0.f;
-
-    for( i = -radius, k = 0; i <= radius; i++ )
-    {
-        int y = pt.y + i;
-        if( y <= 0 || y >= img.rows - 1 )
-            continue;
-        for( j = -radius; j <= radius; j++ )
-        {
-            int x = pt.x + j;
-            if( x <= 0 || x >= img.cols - 1 )
-                continue;
-
-            float dx = (float)(img.at<sift_wt>(y, x+1) - img.at<sift_wt>(y, x-1));
-            float dy = (float)(img.at<sift_wt>(y-1, x) - img.at<sift_wt>(y+1, x));
-
-            X[k] = dx; Y[k] = dy; W[k] = (i*i + j*j)*expf_scale;
-            k++;
-        }
-    }
-
-    len = k;
-
-    // compute gradient values, orientations and the weights over the pixel neighborhood
-    exp(W, W, len);
-    fastAtan2(Y, X, Ori, len, true);
-    magnitude(X, Y, Mag, len);
-
-    for( k = 0; k < len; k++ )
-    {
-        int bin = cvRound((n/360.f)*Ori[k]);
-        if( bin >= n )
-            bin -= n;
-        if( bin < 0 )
-            bin += n;
-        temphist[bin] += W[k]*Mag[k];
-    }
-
-    // smooth the histogram
-    temphist[-1] = temphist[n-1];
-    temphist[-2] = temphist[n-2];
-    temphist[n] = temphist[0];
-    temphist[n+1] = temphist[1];
-    for( i = 0; i < n; i++ )
-    {
-        hist[i] = (temphist[i-2] + temphist[i+2])*(1.f/16.f) +
-            (temphist[i-1] + temphist[i+1])*(4.f/16.f) +
-            temphist[i]*(6.f/16.f);
-    }
-
-    float maxval = hist[0];
-    for( i = 1; i < n; i++ )
-        maxval = std::max(maxval, hist[i]);
-
-    return maxval;
-}
-
-
-//
-// Interpolates a scale-space extremum's location and scale to subpixel
-// accuracy to form an image feature. Rejects features with low contrast.
-// Based on Section 4 of Lowe's paper.
-static bool adjustLocalExtrema( const std::vector<Mat>& dog_pyr, KeyPoint& kpt, int octv,
-                                int& layer, int& r, int& c, int nOctaveLayers,
-                                float contrastThreshold, float edgeThreshold, float sigma )
-{
-    const float img_scale = 1.f/(255*SIFT_FIXPT_SCALE);
-    const float deriv_scale = img_scale*0.5f;
-    const float second_deriv_scale = img_scale;
-    const float cross_deriv_scale = img_scale*0.25f;
-
-    float xi=0, xr=0, xc=0, contr=0;
-    int i = 0;
-
-    for( ; i < SIFT_MAX_INTERP_STEPS; i++ )
-    {
-        int idx = octv*(nOctaveLayers+2) + layer;
-        const Mat& img = dog_pyr[idx];
-        const Mat& prev = dog_pyr[idx-1];
-        const Mat& next = dog_pyr[idx+1];
-
-        Vec3f dD((img.at<sift_wt>(r, c+1) - img.at<sift_wt>(r, c-1))*deriv_scale,
-                 (img.at<sift_wt>(r+1, c) - img.at<sift_wt>(r-1, c))*deriv_scale,
-                 (next.at<sift_wt>(r, c) - prev.at<sift_wt>(r, c))*deriv_scale);
-
-        float v2 = (float)img.at<sift_wt>(r, c)*2;
-        float dxx = (img.at<sift_wt>(r, c+1) + img.at<sift_wt>(r, c-1) - v2)*second_deriv_scale;
-        float dyy = (img.at<sift_wt>(r+1, c) + img.at<sift_wt>(r-1, c) - v2)*second_deriv_scale;
-        float dss = (next.at<sift_wt>(r, c) + prev.at<sift_wt>(r, c) - v2)*second_deriv_scale;
-        float dxy = (img.at<sift_wt>(r+1, c+1) - img.at<sift_wt>(r+1, c-1) -
-                     img.at<sift_wt>(r-1, c+1) + img.at<sift_wt>(r-1, c-1))*cross_deriv_scale;
-        float dxs = (next.at<sift_wt>(r, c+1) - next.at<sift_wt>(r, c-1) -
-                     prev.at<sift_wt>(r, c+1) + prev.at<sift_wt>(r, c-1))*cross_deriv_scale;
-        float dys = (next.at<sift_wt>(r+1, c) - next.at<sift_wt>(r-1, c) -
-                     prev.at<sift_wt>(r+1, c) + prev.at<sift_wt>(r-1, c))*cross_deriv_scale;
-
-        Matx33f H(dxx, dxy, dxs,
-                  dxy, dyy, dys,
-                  dxs, dys, dss);
-
-        Vec3f X = H.solve(dD, DECOMP_LU);
-
-        xi = -X[2];
-        xr = -X[1];
-        xc = -X[0];
-
-        if( std::abs(xi) < 0.5f && std::abs(xr) < 0.5f && std::abs(xc) < 0.5f )
-            break;
-
-        if( std::abs(xi) > (float)(INT_MAX/3) ||
-            std::abs(xr) > (float)(INT_MAX/3) ||
-            std::abs(xc) > (float)(INT_MAX/3) )
-            return false;
-
-        c += cvRound(xc);
-        r += cvRound(xr);
-        layer += cvRound(xi);
-
-        if( layer < 1 || layer > nOctaveLayers ||
-            c < SIFT_IMG_BORDER || c >= img.cols - SIFT_IMG_BORDER  ||
-            r < SIFT_IMG_BORDER || r >= img.rows - SIFT_IMG_BORDER )
-            return false;
-    }
-
-    // ensure convergence of interpolation
-    if( i >= SIFT_MAX_INTERP_STEPS )
-        return false;
-
-    {
-        int idx = octv*(nOctaveLayers+2) + layer;
-        const Mat& img = dog_pyr[idx];
-        const Mat& prev = dog_pyr[idx-1];
-        const Mat& next = dog_pyr[idx+1];
-        Matx31f dD((img.at<sift_wt>(r, c+1) - img.at<sift_wt>(r, c-1))*deriv_scale,
-                   (img.at<sift_wt>(r+1, c) - img.at<sift_wt>(r-1, c))*deriv_scale,
-                   (next.at<sift_wt>(r, c) - prev.at<sift_wt>(r, c))*deriv_scale);
-        float t = dD.dot(Matx31f(xc, xr, xi));
-
-        contr = img.at<sift_wt>(r, c)*img_scale + t * 0.5f;
-        if( std::abs( contr ) * nOctaveLayers < contrastThreshold )
-            return false;
-
-        // principal curvatures are computed using the trace and det of Hessian
-        float v2 = img.at<sift_wt>(r, c)*2.f;
-        float dxx = (img.at<sift_wt>(r, c+1) + img.at<sift_wt>(r, c-1) - v2)*second_deriv_scale;
-        float dyy = (img.at<sift_wt>(r+1, c) + img.at<sift_wt>(r-1, c) - v2)*second_deriv_scale;
-        float dxy = (img.at<sift_wt>(r+1, c+1) - img.at<sift_wt>(r+1, c-1) -
-                     img.at<sift_wt>(r-1, c+1) + img.at<sift_wt>(r-1, c-1)) * cross_deriv_scale;
-        float tr = dxx + dyy;
-        float det = dxx * dyy - dxy * dxy;
-
-        if( det <= 0 || tr*tr*edgeThreshold >= (edgeThreshold + 1)*(edgeThreshold + 1)*det )
-            return false;
-    }
-
-    kpt.pt.x = (c + xc) * (1 << octv);
-    kpt.pt.y = (r + xr) * (1 << octv);
-    kpt.octave = octv + (layer << 8) + (cvRound((xi + 0.5)*255) << 16);
-    kpt.size = sigma*powf(2.f, (layer + xi) / nOctaveLayers)*(1 << octv)*2;
-    kpt.response = std::abs(contr);
-
-    return true;
-}
-
-
-//
-// Detects features at extrema in DoG scale space.  Bad features are discarded
-// based on contrast and ratio of principal curvatures.
-void SIFT::findScaleSpaceExtrema( const std::vector<Mat>& gauss_pyr, const std::vector<Mat>& dog_pyr,
-                                  std::vector<KeyPoint>& keypoints ) const
-{
-    int nOctaves = (int)gauss_pyr.size()/(nOctaveLayers + 3);
-    int threshold = cvFloor(0.5 * contrastThreshold / nOctaveLayers * 255 * SIFT_FIXPT_SCALE);
-    const int n = SIFT_ORI_HIST_BINS;
-    float hist[n];
-    KeyPoint kpt;
-
-    keypoints.clear();
-
-    for( int o = 0; o < nOctaves; o++ )
-        for( int i = 1; i <= nOctaveLayers; i++ )
-        {
-            int idx = o*(nOctaveLayers+2)+i;
-            const Mat& img = dog_pyr[idx];
-            const Mat& prev = dog_pyr[idx-1];
-            const Mat& next = dog_pyr[idx+1];
-            int step = (int)img.step1();
-            int rows = img.rows, cols = img.cols;
-
-            for( int r = SIFT_IMG_BORDER; r < rows-SIFT_IMG_BORDER; r++)
-            {
-                const sift_wt* currptr = img.ptr<sift_wt>(r);
-                const sift_wt* prevptr = prev.ptr<sift_wt>(r);
-                const sift_wt* nextptr = next.ptr<sift_wt>(r);
-
-                for( int c = SIFT_IMG_BORDER; c < cols-SIFT_IMG_BORDER; c++)
-                {
-                    sift_wt val = currptr[c];
-
-                    // find local extrema with pixel accuracy
-                    if( std::abs(val) > threshold &&
-                       ((val > 0 && val >= currptr[c-1] && val >= currptr[c+1] &&
-                         val >= currptr[c-step-1] && val >= currptr[c-step] && val >= currptr[c-step+1] &&
-                         val >= currptr[c+step-1] && val >= currptr[c+step] && val >= currptr[c+step+1] &&
-                         val >= nextptr[c] && val >= nextptr[c-1] && val >= nextptr[c+1] &&
-                         val >= nextptr[c-step-1] && val >= nextptr[c-step] && val >= nextptr[c-step+1] &&
-                         val >= nextptr[c+step-1] && val >= nextptr[c+step] && val >= nextptr[c+step+1] &&
-                         val >= prevptr[c] && val >= prevptr[c-1] && val >= prevptr[c+1] &&
-                         val >= prevptr[c-step-1] && val >= prevptr[c-step] && val >= prevptr[c-step+1] &&
-                         val >= prevptr[c+step-1] && val >= prevptr[c+step] && val >= prevptr[c+step+1]) ||
-                        (val < 0 && val <= currptr[c-1] && val <= currptr[c+1] &&
-                         val <= currptr[c-step-1] && val <= currptr[c-step] && val <= currptr[c-step+1] &&
-                         val <= currptr[c+step-1] && val <= currptr[c+step] && val <= currptr[c+step+1] &&
-                         val <= nextptr[c] && val <= nextptr[c-1] && val <= nextptr[c+1] &&
-                         val <= nextptr[c-step-1] && val <= nextptr[c-step] && val <= nextptr[c-step+1] &&
-                         val <= nextptr[c+step-1] && val <= nextptr[c+step] && val <= nextptr[c+step+1] &&
-                         val <= prevptr[c] && val <= prevptr[c-1] && val <= prevptr[c+1] &&
-                         val <= prevptr[c-step-1] && val <= prevptr[c-step] && val <= prevptr[c-step+1] &&
-                         val <= prevptr[c+step-1] && val <= prevptr[c+step] && val <= prevptr[c+step+1])))
-                    {
-                        int r1 = r, c1 = c, layer = i;
-                        if( !adjustLocalExtrema(dog_pyr, kpt, o, layer, r1, c1,
-                                                nOctaveLayers, (float)contrastThreshold,
-                                                (float)edgeThreshold, (float)sigma) )
-                            continue;
-                        float scl_octv = kpt.size*0.5f/(1 << o);
-                        float omax = calcOrientationHist(gauss_pyr[o*(nOctaveLayers+3) + layer],
-                                                         Point(c1, r1),
-                                                         cvRound(SIFT_ORI_RADIUS * scl_octv),
-                                                         SIFT_ORI_SIG_FCTR * scl_octv,
-                                                         hist, n);
-                        float mag_thr = (float)(omax * SIFT_ORI_PEAK_RATIO);
-                        for( int j = 0; j < n; j++ )
-                        {
-                            int l = j > 0 ? j - 1 : n - 1;
-                            int r2 = j < n-1 ? j + 1 : 0;
-
-                            if( hist[j] > hist[l]  &&  hist[j] > hist[r2]  &&  hist[j] >= mag_thr )
-                            {
-                                float bin = j + 0.5f * (hist[l]-hist[r2]) / (hist[l] - 2*hist[j] + hist[r2]);
-                                bin = bin < 0 ? n + bin : bin >= n ? bin - n : bin;
-                                kpt.angle = 360.f - (float)((360.f/n) * bin);
-                                if(std::abs(kpt.angle - 360.f) < FLT_EPSILON)
-                                    kpt.angle = 0.f;
-                                keypoints.push_back(kpt);
-                            }
-                        }
-                    }
-                }
-            }
-        }
-}
-
-
-static void calcSIFTDescriptor( const Mat& img, Point2f ptf, float ori, float scl,
-                               int d, int n, float* dst )
-{
-    Point pt(cvRound(ptf.x), cvRound(ptf.y));
-    float cos_t = cosf(ori*(float)(CV_PI/180));
-    float sin_t = sinf(ori*(float)(CV_PI/180));
-    float bins_per_rad = n / 360.f;
-    float exp_scale = -1.f/(d * d * 0.5f);
-    float hist_width = SIFT_DESCR_SCL_FCTR * scl;
-    int radius = cvRound(hist_width * 1.4142135623730951f * (d + 1) * 0.5f);
-    // Clip the radius to the diagonal of the image to avoid autobuffer too large exception
-    radius = std::min(radius, (int) sqrt((double) img.cols*img.cols + img.rows*img.rows));
-    cos_t /= hist_width;
-    sin_t /= hist_width;
-
-    int i, j, k, len = (radius*2+1)*(radius*2+1), histlen = (d+2)*(d+2)*(n+2);
-    int rows = img.rows, cols = img.cols;
-
-    AutoBuffer<float> buf(len*6 + histlen);
-    float *X = buf, *Y = X + len, *Mag = Y, *Ori = Mag + len, *W = Ori + len;
-    float *RBin = W + len, *CBin = RBin + len, *hist = CBin + len;
-
-    for( i = 0; i < d+2; i++ )
-    {
-        for( j = 0; j < d+2; j++ )
-            for( k = 0; k < n+2; k++ )
-                hist[(i*(d+2) + j)*(n+2) + k] = 0.;
-    }
-
-    for( i = -radius, k = 0; i <= radius; i++ )
-        for( j = -radius; j <= radius; j++ )
-        {
-            // Calculate sample's histogram array coords rotated relative to ori.
-            // Subtract 0.5 so samples that fall e.g. in the center of row 1 (i.e.
-            // r_rot = 1.5) have full weight placed in row 1 after interpolation.
-            float c_rot = j * cos_t - i * sin_t;
-            float r_rot = j * sin_t + i * cos_t;
-            float rbin = r_rot + d/2 - 0.5f;
-            float cbin = c_rot + d/2 - 0.5f;
-            int r = pt.y + i, c = pt.x + j;
-
-            if( rbin > -1 && rbin < d && cbin > -1 && cbin < d &&
-                r > 0 && r < rows - 1 && c > 0 && c < cols - 1 )
-            {
-                float dx = (float)(img.at<sift_wt>(r, c+1) - img.at<sift_wt>(r, c-1));
-                float dy = (float)(img.at<sift_wt>(r-1, c) - img.at<sift_wt>(r+1, c));
-                X[k] = dx; Y[k] = dy; RBin[k] = rbin; CBin[k] = cbin;
-                W[k] = (c_rot * c_rot + r_rot * r_rot)*exp_scale;
-                k++;
-            }
-        }
-
-    len = k;
-    fastAtan2(Y, X, Ori, len, true);
-    magnitude(X, Y, Mag, len);
-    exp(W, W, len);
-
-    for( k = 0; k < len; k++ )
-    {
-        float rbin = RBin[k], cbin = CBin[k];
-        float obin = (Ori[k] - ori)*bins_per_rad;
-        float mag = Mag[k]*W[k];
-
-        int r0 = cvFloor( rbin );
-        int c0 = cvFloor( cbin );
-        int o0 = cvFloor( obin );
-        rbin -= r0;
-        cbin -= c0;
-        obin -= o0;
-
-        if( o0 < 0 )
-            o0 += n;
-        if( o0 >= n )
-            o0 -= n;
-
-        // histogram update using tri-linear interpolation
-        float v_r1 = mag*rbin, v_r0 = mag - v_r1;
-        float v_rc11 = v_r1*cbin, v_rc10 = v_r1 - v_rc11;
-        float v_rc01 = v_r0*cbin, v_rc00 = v_r0 - v_rc01;
-        float v_rco111 = v_rc11*obin, v_rco110 = v_rc11 - v_rco111;
-        float v_rco101 = v_rc10*obin, v_rco100 = v_rc10 - v_rco101;
-        float v_rco011 = v_rc01*obin, v_rco010 = v_rc01 - v_rco011;
-        float v_rco001 = v_rc00*obin, v_rco000 = v_rc00 - v_rco001;
-
-        int idx = ((r0+1)*(d+2) + c0+1)*(n+2) + o0;
-        hist[idx] += v_rco000;
-        hist[idx+1] += v_rco001;
-        hist[idx+(n+2)] += v_rco010;
-        hist[idx+(n+3)] += v_rco011;
-        hist[idx+(d+2)*(n+2)] += v_rco100;
-        hist[idx+(d+2)*(n+2)+1] += v_rco101;
-        hist[idx+(d+3)*(n+2)] += v_rco110;
-        hist[idx+(d+3)*(n+2)+1] += v_rco111;
-    }
-
-    // finalize histogram, since the orientation histograms are circular
-    for( i = 0; i < d; i++ )
-        for( j = 0; j < d; j++ )
-        {
-            int idx = ((i+1)*(d+2) + (j+1))*(n+2);
-            hist[idx] += hist[idx+n];
-            hist[idx+1] += hist[idx+n+1];
-            for( k = 0; k < n; k++ )
-                dst[(i*d + j)*n + k] = hist[idx+k];
-        }
-    // copy histogram to the descriptor,
-    // apply hysteresis thresholding
-    // and scale the result, so that it can be easily converted
-    // to byte array
-    float nrm2 = 0;
-    len = d*d*n;
-    for( k = 0; k < len; k++ )
-        nrm2 += dst[k]*dst[k];
-    float thr = std::sqrt(nrm2)*SIFT_DESCR_MAG_THR;
-    for( i = 0, nrm2 = 0; i < k; i++ )
-    {
-        float val = std::min(dst[i], thr);
-        dst[i] = val;
-        nrm2 += val*val;
-    }
-    nrm2 = SIFT_INT_DESCR_FCTR/std::max(std::sqrt(nrm2), FLT_EPSILON);
-
-#if 1
-    for( k = 0; k < len; k++ )
-    {
-        dst[k] = saturate_cast<uchar>(dst[k]*nrm2);
-    }
-#else
-    float nrm1 = 0;
-    for( k = 0; k < len; k++ )
-    {
-        dst[k] *= nrm2;
-        nrm1 += dst[k];
-    }
-    nrm1 = 1.f/std::max(nrm1, FLT_EPSILON);
-    for( k = 0; k < len; k++ )
-    {
-        dst[k] = std::sqrt(dst[k] * nrm1);//saturate_cast<uchar>(std::sqrt(dst[k] * nrm1)*SIFT_INT_DESCR_FCTR);
-    }
-#endif
-}
-
-static void calcDescriptors(const std::vector<Mat>& gpyr, const std::vector<KeyPoint>& keypoints,
-                            Mat& descriptors, int nOctaveLayers, int firstOctave )
-{
-    int d = SIFT_DESCR_WIDTH, n = SIFT_DESCR_HIST_BINS;
-
-    for( size_t i = 0; i < keypoints.size(); i++ )
-    {
-        KeyPoint kpt = keypoints[i];
-        int octave, layer;
-        float scale;
-        unpackOctave(kpt, octave, layer, scale);
-        CV_Assert(octave >= firstOctave && layer <= nOctaveLayers+2);
-        float size=kpt.size*scale;
-        Point2f ptf(kpt.pt.x*scale, kpt.pt.y*scale);
-        const Mat& img = gpyr[(octave - firstOctave)*(nOctaveLayers + 3) + layer];
-
-        float angle = 360.f - kpt.angle;
-        if(std::abs(angle - 360.f) < FLT_EPSILON)
-            angle = 0.f;
-        calcSIFTDescriptor(img, ptf, angle, size*0.5f, d, n, descriptors.ptr<float>((int)i));
-    }
-}
-
-//////////////////////////////////////////////////////////////////////////////////////////
-
-SIFT::SIFT( int _nfeatures, int _nOctaveLayers,
-           double _contrastThreshold, double _edgeThreshold, double _sigma )
-    : nfeatures(_nfeatures), nOctaveLayers(_nOctaveLayers),
-    contrastThreshold(_contrastThreshold), edgeThreshold(_edgeThreshold), sigma(_sigma)
-{
-}
-
-int SIFT::descriptorSize() const
-{
-    return SIFT_DESCR_WIDTH*SIFT_DESCR_WIDTH*SIFT_DESCR_HIST_BINS;
-}
-
-int SIFT::descriptorType() const
-{
-    return CV_32F;
-}
-
-int SIFT::defaultNorm() const
-{
-    return NORM_L2;
-}
-
-
-void SIFT::operator()(InputArray _image, InputArray _mask,
-                      std::vector<KeyPoint>& keypoints) const
-{
-    (*this)(_image, _mask, keypoints, noArray());
-}
-
-
-void SIFT::operator()(InputArray _image, InputArray _mask,
-                      std::vector<KeyPoint>& keypoints,
-                      OutputArray _descriptors,
-                      bool useProvidedKeypoints) const
-{
-    int firstOctave = -1, actualNOctaves = 0, actualNLayers = 0;
-    Mat image = _image.getMat(), mask = _mask.getMat();
-
-    if( image.empty() || image.depth() != CV_8U )
-        CV_Error( Error::StsBadArg, "image is empty or has incorrect depth (!=CV_8U)" );
-
-    if( !mask.empty() && mask.type() != CV_8UC1 )
-        CV_Error( Error::StsBadArg, "mask has incorrect type (!=CV_8UC1)" );
-
-    if( useProvidedKeypoints )
-    {
-        firstOctave = 0;
-        int maxOctave = INT_MIN;
-        for( size_t i = 0; i < keypoints.size(); i++ )
-        {
-            int octave, layer;
-            float scale;
-            unpackOctave(keypoints[i], octave, layer, scale);
-            firstOctave = std::min(firstOctave, octave);
-            maxOctave = std::max(maxOctave, octave);
-            actualNLayers = std::max(actualNLayers, layer-2);
-        }
-
-        firstOctave = std::min(firstOctave, 0);
-        CV_Assert( firstOctave >= -1 && actualNLayers <= nOctaveLayers );
-        actualNOctaves = maxOctave - firstOctave + 1;
-    }
-
-    Mat base = createInitialImage(image, firstOctave < 0, (float)sigma);
-    std::vector<Mat> gpyr, dogpyr;
-    int nOctaves = actualNOctaves > 0 ? actualNOctaves : cvRound(std::log( (double)std::min( base.cols, base.rows ) ) / std::log(2.) - 2) - firstOctave;
-
-    //double t, tf = getTickFrequency();
-    //t = (double)getTickCount();
-    buildGaussianPyramid(base, gpyr, nOctaves);
-    buildDoGPyramid(gpyr, dogpyr);
-
-    //t = (double)getTickCount() - t;
-    //printf("pyramid construction time: %g\n", t*1000./tf);
-
-    if( !useProvidedKeypoints )
-    {
-        //t = (double)getTickCount();
-        findScaleSpaceExtrema(gpyr, dogpyr, keypoints);
-        KeyPointsFilter::removeDuplicated( keypoints );
-
-        if( nfeatures > 0 )
-            KeyPointsFilter::retainBest(keypoints, nfeatures);
-        //t = (double)getTickCount() - t;
-        //printf("keypoint detection time: %g\n", t*1000./tf);
-
-        if( firstOctave < 0 )
-            for( size_t i = 0; i < keypoints.size(); i++ )
-            {
-                KeyPoint& kpt = keypoints[i];
-                float scale = 1.f/(float)(1 << -firstOctave);
-                kpt.octave = (kpt.octave & ~255) | ((kpt.octave + firstOctave) & 255);
-                kpt.pt *= scale;
-                kpt.size *= scale;
-            }
-
-        if( !mask.empty() )
-            KeyPointsFilter::runByPixelsMask( keypoints, mask );
-    }
-    else
-    {
-        // filter keypoints by mask
-        //KeyPointsFilter::runByPixelsMask( keypoints, mask );
-    }
-
-    if( _descriptors.needed() )
-    {
-        //t = (double)getTickCount();
-        int dsize = descriptorSize();
-        _descriptors.create((int)keypoints.size(), dsize, CV_32F);
-        Mat descriptors = _descriptors.getMat();
-
-        calcDescriptors(gpyr, keypoints, descriptors, nOctaveLayers, firstOctave);
-        //t = (double)getTickCount() - t;
-        //printf("descriptor extraction time: %g\n", t*1000./tf);
-    }
-}
-
-void SIFT::detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
-{
-    (*this)(image.getMat(), mask.getMat(), keypoints, noArray());
-}
-
-void SIFT::computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
-{
-    (*this)(image, Mat(), keypoints, descriptors, true);
-}
-
-}
diff --git a/modules/nonfree/src/surf.cpp b/modules/nonfree/src/surf.cpp
deleted file mode 100644 (file)
index 05978e4..0000000
+++ /dev/null
@@ -1,1024 +0,0 @@
-/* Original code has been submitted by Liu Liu. Here is the copyright.
-----------------------------------------------------------------------------------
- * An OpenCV Implementation of SURF
- * Further Information Refer to "SURF: Speed-Up Robust Feature"
- * Author: Liu Liu
- * liuliu.1987+opencv@gmail.com
- *
- * There are still serveral lacks for this experimental implementation:
- * 1.The interpolation of sub-pixel mentioned in article was not implemented yet;
- * 2.A comparision with original libSurf.so shows that the hessian detector is not a 100% match to their implementation;
- * 3.Due to above reasons, I recommanded the original one for study and reuse;
- *
- * However, the speed of this implementation is something comparable to original one.
- *
- * Copyright© 2008, Liu Liu All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *  Redistributions of source code must retain the above
- *  copyright notice, this list of conditions and the following
- *  disclaimer.
- *  Redistributions in binary form must reproduce the above
- *  copyright notice, this list of conditions and the following
- *  disclaimer in the documentation and/or other materials
- *  provided with the distribution.
- *  The name of Contributor may not be used to endorse or
- *  promote products derived from this software without
- *  specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
- * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
- * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
- * OF SUCH DAMAGE.
- */
-
-/*
-   The following changes have been made, comparing to the original contribution:
-   1. A lot of small optimizations, less memory allocations, got rid of global buffers
-   2. Reversed order of cvGetQuadrangleSubPix and cvResize calls; probably less accurate, but much faster
-   3. The descriptor computing part (which is most expensive) is threaded using OpenMP
-   (subpixel-accurate keypoint localization and scale estimation are still TBD)
-*/
-
-/*
-KeyPoint position and scale interpolation has been implemented as described in
-the Brown and Lowe paper cited by the SURF paper.
-
-The sampling step along the x and y axes of the image for the determinant of the
-Hessian is now the same for each layer in an octave. While this increases the
-computation time, it ensures that a true 3x3x3 neighbourhood exists, with
-samples calculated at the same position in the layers above and below. This
-results in improved maxima detection and non-maxima suppression, and I think it
-is consistent with the description in the SURF paper.
-
-The wavelet size sampling interval has also been made consistent. The wavelet
-size at the first layer of the first octave is now 9 instead of 7. Along with
-regular position sampling steps, this makes location and scale interpolation
-easy. I think this is consistent with the SURF paper and original
-implementation.
-
-The scaling of the wavelet parameters has been fixed to ensure that the patterns
-are symmetric around the centre. Previously the truncation caused by integer
-division in the scaling ratio caused a bias towards the top left of the wavelet,
-resulting in inconsistent keypoint positions.
-
-The matrices for the determinant and trace of the Hessian are now reused in each
-octave.
-
-The extraction of the patch of pixels surrounding a keypoint used to build a
-descriptor has been simplified.
-
-KeyPoint descriptor normalisation has been changed from normalising each 4x4
-cell (resulting in a descriptor of magnitude 16) to normalising the entire
-descriptor to magnitude 1.
-
-The default number of octaves has been increased from 3 to 4 to match the
-original SURF binary default. The increase in computation time is minimal since
-the higher octaves are sampled sparsely.
-
-The default number of layers per octave has been reduced from 3 to 2, to prevent
-redundant calculation of similar sizes in consecutive octaves.  This decreases
-computation time. The number of features extracted may be less, however the
-additional features were mostly redundant.
-
-The radius of the circle of gradient samples used to assign an orientation has
-been increased from 4 to 6 to match the description in the SURF paper. This is
-now defined by ORI_RADIUS, and could be made into a parameter.
-
-The size of the sliding window used in orientation assignment has been reduced
-from 120 to 60 degrees to match the description in the SURF paper. This is now
-defined by ORI_WIN, and could be made into a parameter.
-
-Other options like  HAAR_SIZE0, HAAR_SIZE_INC, SAMPLE_STEP0, ORI_SEARCH_INC,
-ORI_SIGMA and DESC_SIGMA have been separated from the code and documented.
-These could also be made into parameters.
-
-Modifications by Ian Mahon
-
-*/
-#include "precomp.hpp"
-#include "surf.hpp"
-
-namespace cv
-{
-
-static const int   SURF_ORI_SEARCH_INC = 5;
-static const float SURF_ORI_SIGMA      = 2.5f;
-static const float SURF_DESC_SIGMA     = 3.3f;
-
-// Wavelet size at first layer of first octave.
-static const int SURF_HAAR_SIZE0 = 9;
-
-// Wavelet size increment between layers. This should be an even number,
-// such that the wavelet sizes in an octave are either all even or all odd.
-// This ensures that when looking for the neighbours of a sample, the layers
-// above and below are aligned correctly.
-static const int SURF_HAAR_SIZE_INC = 6;
-
-
-struct SurfHF
-{
-    int p0, p1, p2, p3;
-    float w;
-
-    SurfHF(): p0(0), p1(0), p2(0), p3(0), w(0) {}
-};
-
-inline float calcHaarPattern( const int* origin, const SurfHF* f, int n )
-{
-    double d = 0;
-    for( int k = 0; k < n; k++ )
-        d += (origin[f[k].p0] + origin[f[k].p3] - origin[f[k].p1] - origin[f[k].p2])*f[k].w;
-    return (float)d;
-}
-
-static void
-resizeHaarPattern( const int src[][5], SurfHF* dst, int n, int oldSize, int newSize, int widthStep )
-{
-    float ratio = (float)newSize/oldSize;
-    for( int k = 0; k < n; k++ )
-    {
-        int dx1 = cvRound( ratio*src[k][0] );
-        int dy1 = cvRound( ratio*src[k][1] );
-        int dx2 = cvRound( ratio*src[k][2] );
-        int dy2 = cvRound( ratio*src[k][3] );
-        dst[k].p0 = dy1*widthStep + dx1;
-        dst[k].p1 = dy2*widthStep + dx1;
-        dst[k].p2 = dy1*widthStep + dx2;
-        dst[k].p3 = dy2*widthStep + dx2;
-        dst[k].w = src[k][4]/((float)(dx2-dx1)*(dy2-dy1));
-    }
-}
-
-/*
- * Calculate the determinant and trace of the Hessian for a layer of the
- * scale-space pyramid
- */
-static void calcLayerDetAndTrace( const Mat& sum, int size, int sampleStep,
-                                  Mat& det, Mat& trace )
-{
-    const int NX=3, NY=3, NXY=4;
-    const int dx_s[NX][5] = { {0, 2, 3, 7, 1}, {3, 2, 6, 7, -2}, {6, 2, 9, 7, 1} };
-    const int dy_s[NY][5] = { {2, 0, 7, 3, 1}, {2, 3, 7, 6, -2}, {2, 6, 7, 9, 1} };
-    const int dxy_s[NXY][5] = { {1, 1, 4, 4, 1}, {5, 1, 8, 4, -1}, {1, 5, 4, 8, -1}, {5, 5, 8, 8, 1} };
-
-    SurfHF Dx[NX], Dy[NY], Dxy[NXY];
-
-    if( size > sum.rows-1 || size > sum.cols-1 )
-       return;
-
-    resizeHaarPattern( dx_s , Dx , NX , 9, size, sum.cols );
-    resizeHaarPattern( dy_s , Dy , NY , 9, size, sum.cols );
-    resizeHaarPattern( dxy_s, Dxy, NXY, 9, size, sum.cols );
-
-    /* The integral image 'sum' is one pixel bigger than the source image */
-    int samples_i = 1+(sum.rows-1-size)/sampleStep;
-    int samples_j = 1+(sum.cols-1-size)/sampleStep;
-
-    /* Ignore pixels where some of the kernel is outside the image */
-    int margin = (size/2)/sampleStep;
-
-    for( int i = 0; i < samples_i; i++ )
-    {
-        const int* sum_ptr = sum.ptr<int>(i*sampleStep);
-        float* det_ptr = &det.at<float>(i+margin, margin);
-        float* trace_ptr = &trace.at<float>(i+margin, margin);
-        for( int j = 0; j < samples_j; j++ )
-        {
-            float dx  = calcHaarPattern( sum_ptr, Dx , 3 );
-            float dy  = calcHaarPattern( sum_ptr, Dy , 3 );
-            float dxy = calcHaarPattern( sum_ptr, Dxy, 4 );
-            sum_ptr += sampleStep;
-            det_ptr[j] = dx*dy - 0.81f*dxy*dxy;
-            trace_ptr[j] = dx + dy;
-        }
-    }
-}
-
-
-/*
- * Maxima location interpolation as described in "Invariant Features from
- * Interest Point Groups" by Matthew Brown and David Lowe. This is performed by
- * fitting a 3D quadratic to a set of neighbouring samples.
- *
- * The gradient vector and Hessian matrix at the initial keypoint location are
- * approximated using central differences. The linear system Ax = b is then
- * solved, where A is the Hessian, b is the negative gradient, and x is the
- * offset of the interpolated maxima coordinates from the initial estimate.
- * This is equivalent to an iteration of Netwon's optimisation algorithm.
- *
- * N9 contains the samples in the 3x3x3 neighbourhood of the maxima
- * dx is the sampling step in x
- * dy is the sampling step in y
- * ds is the sampling step in size
- * point contains the keypoint coordinates and scale to be modified
- *
- * Return value is 1 if interpolation was successful, 0 on failure.
- */
-static int
-interpolateKeypoint( float N9[3][9], int dx, int dy, int ds, KeyPoint& kpt )
-{
-    Vec3f b(-(N9[1][5]-N9[1][3])/2,  // Negative 1st deriv with respect to x
-            -(N9[1][7]-N9[1][1])/2,  // Negative 1st deriv with respect to y
-            -(N9[2][4]-N9[0][4])/2); // Negative 1st deriv with respect to s
-
-    Matx33f A(
-        N9[1][3]-2*N9[1][4]+N9[1][5],            // 2nd deriv x, x
-        (N9[1][8]-N9[1][6]-N9[1][2]+N9[1][0])/4, // 2nd deriv x, y
-        (N9[2][5]-N9[2][3]-N9[0][5]+N9[0][3])/4, // 2nd deriv x, s
-        (N9[1][8]-N9[1][6]-N9[1][2]+N9[1][0])/4, // 2nd deriv x, y
-        N9[1][1]-2*N9[1][4]+N9[1][7],            // 2nd deriv y, y
-        (N9[2][7]-N9[2][1]-N9[0][7]+N9[0][1])/4, // 2nd deriv y, s
-        (N9[2][5]-N9[2][3]-N9[0][5]+N9[0][3])/4, // 2nd deriv x, s
-        (N9[2][7]-N9[2][1]-N9[0][7]+N9[0][1])/4, // 2nd deriv y, s
-        N9[0][4]-2*N9[1][4]+N9[2][4]);           // 2nd deriv s, s
-
-    Vec3f x = A.solve(b, DECOMP_LU);
-
-    bool ok = (x[0] != 0 || x[1] != 0 || x[2] != 0) &&
-        std::abs(x[0]) <= 1 && std::abs(x[1]) <= 1 && std::abs(x[2]) <= 1;
-
-    if( ok )
-    {
-        kpt.pt.x += x[0]*dx;
-        kpt.pt.y += x[1]*dy;
-        kpt.size = (float)cvRound( kpt.size + x[2]*ds );
-    }
-    return ok;
-}
-
-// Multi-threaded construction of the scale-space pyramid
-struct SURFBuildInvoker : ParallelLoopBody
-{
-    SURFBuildInvoker( const Mat& _sum, const std::vector<int>& _sizes,
-                      const std::vector<int>& _sampleSteps,
-                      std::vector<Mat>& _dets, std::vector<Mat>& _traces )
-    {
-        sum = &_sum;
-        sizes = &_sizes;
-        sampleSteps = &_sampleSteps;
-        dets = &_dets;
-        traces = &_traces;
-    }
-
-    void operator()(const Range& range) const
-    {
-        for( int i=range.start; i<range.end; i++ )
-            calcLayerDetAndTrace( *sum, (*sizes)[i], (*sampleSteps)[i], (*dets)[i], (*traces)[i] );
-    }
-
-    const Mat *sum;
-    const std::vector<int> *sizes;
-    const std::vector<int> *sampleSteps;
-    std::vector<Mat>* dets;
-    std::vector<Mat>* traces;
-};
-
-// Multi-threaded search of the scale-space pyramid for keypoints
-struct SURFFindInvoker : ParallelLoopBody
-{
-    SURFFindInvoker( const Mat& _sum, const Mat& _mask_sum,
-                     const std::vector<Mat>& _dets, const std::vector<Mat>& _traces,
-                     const std::vector<int>& _sizes, const std::vector<int>& _sampleSteps,
-                     const std::vector<int>& _middleIndices, std::vector<KeyPoint>& _keypoints,
-                     int _nOctaveLayers, float _hessianThreshold )
-    {
-        sum = &_sum;
-        mask_sum = &_mask_sum;
-        dets = &_dets;
-        traces = &_traces;
-        sizes = &_sizes;
-        sampleSteps = &_sampleSteps;
-        middleIndices = &_middleIndices;
-        keypoints = &_keypoints;
-        nOctaveLayers = _nOctaveLayers;
-        hessianThreshold = _hessianThreshold;
-    }
-
-    static void findMaximaInLayer( const Mat& sum, const Mat& mask_sum,
-                   const std::vector<Mat>& dets, const std::vector<Mat>& traces,
-                   const std::vector<int>& sizes, std::vector<KeyPoint>& keypoints,
-                   int octave, int layer, float hessianThreshold, int sampleStep );
-
-    void operator()(const Range& range) const
-    {
-        for( int i=range.start; i<range.end; i++ )
-        {
-            int layer = (*middleIndices)[i];
-            int octave = i / nOctaveLayers;
-            findMaximaInLayer( *sum, *mask_sum, *dets, *traces, *sizes,
-                               *keypoints, octave, layer, hessianThreshold,
-                               (*sampleSteps)[layer] );
-        }
-    }
-
-    const Mat *sum;
-    const Mat *mask_sum;
-    const std::vector<Mat>* dets;
-    const std::vector<Mat>* traces;
-    const std::vector<int>* sizes;
-    const std::vector<int>* sampleSteps;
-    const std::vector<int>* middleIndices;
-    std::vector<KeyPoint>* keypoints;
-    int nOctaveLayers;
-    float hessianThreshold;
-
-    static Mutex findMaximaInLayer_m;
-};
-
-Mutex SURFFindInvoker::findMaximaInLayer_m;
-
-
-/*
- * Find the maxima in the determinant of the Hessian in a layer of the
- * scale-space pyramid
- */
-void SURFFindInvoker::findMaximaInLayer( const Mat& sum, const Mat& mask_sum,
-                   const std::vector<Mat>& dets, const std::vector<Mat>& traces,
-                   const std::vector<int>& sizes, std::vector<KeyPoint>& keypoints,
-                   int octave, int layer, float hessianThreshold, int sampleStep )
-{
-    // Wavelet Data
-    const int NM=1;
-    const int dm[NM][5] = { {0, 0, 9, 9, 1} };
-    SurfHF Dm;
-
-    int size = sizes[layer];
-
-    // The integral image 'sum' is one pixel bigger than the source image
-    int layer_rows = (sum.rows-1)/sampleStep;
-    int layer_cols = (sum.cols-1)/sampleStep;
-
-    // Ignore pixels without a 3x3x3 neighbourhood in the layer above
-    int margin = (sizes[layer+1]/2)/sampleStep+1;
-
-    if( !mask_sum.empty() )
-       resizeHaarPattern( dm, &Dm, NM, 9, size, mask_sum.cols );
-
-    int step = (int)(dets[layer].step/dets[layer].elemSize());
-
-    for( int i = margin; i < layer_rows - margin; i++ )
-    {
-        const float* det_ptr = dets[layer].ptr<float>(i);
-        const float* trace_ptr = traces[layer].ptr<float>(i);
-        for( int j = margin; j < layer_cols-margin; j++ )
-        {
-            float val0 = det_ptr[j];
-            if( val0 > hessianThreshold )
-            {
-                /* Coordinates for the start of the wavelet in the sum image. There
-                   is some integer division involved, so don't try to simplify this
-                   (cancel out sampleStep) without checking the result is the same */
-                int sum_i = sampleStep*(i-(size/2)/sampleStep);
-                int sum_j = sampleStep*(j-(size/2)/sampleStep);
-
-                /* The 3x3x3 neighbouring samples around the maxima.
-                   The maxima is included at N9[1][4] */
-
-                const float *det1 = &dets[layer-1].at<float>(i, j);
-                const float *det2 = &dets[layer].at<float>(i, j);
-                const float *det3 = &dets[layer+1].at<float>(i, j);
-                float N9[3][9] = { { det1[-step-1], det1[-step], det1[-step+1],
-                                     det1[-1]  , det1[0] , det1[1],
-                                     det1[step-1] , det1[step] , det1[step+1]  },
-                                   { det2[-step-1], det2[-step], det2[-step+1],
-                                     det2[-1]  , det2[0] , det2[1],
-                                     det2[step-1] , det2[step] , det2[step+1]  },
-                                   { det3[-step-1], det3[-step], det3[-step+1],
-                                     det3[-1]  , det3[0] , det3[1],
-                                     det3[step-1] , det3[step] , det3[step+1]  } };
-
-                /* Check the mask - why not just check the mask at the center of the wavelet? */
-                if( !mask_sum.empty() )
-                {
-                    const int* mask_ptr = &mask_sum.at<int>(sum_i, sum_j);
-                    float mval = calcHaarPattern( mask_ptr, &Dm, 1 );
-                    if( mval < 0.5 )
-                        continue;
-                }
-
-                /* Non-maxima suppression. val0 is at N9[1][4]*/
-                if( val0 > N9[0][0] && val0 > N9[0][1] && val0 > N9[0][2] &&
-                    val0 > N9[0][3] && val0 > N9[0][4] && val0 > N9[0][5] &&
-                    val0 > N9[0][6] && val0 > N9[0][7] && val0 > N9[0][8] &&
-                    val0 > N9[1][0] && val0 > N9[1][1] && val0 > N9[1][2] &&
-                    val0 > N9[1][3]                    && val0 > N9[1][5] &&
-                    val0 > N9[1][6] && val0 > N9[1][7] && val0 > N9[1][8] &&
-                    val0 > N9[2][0] && val0 > N9[2][1] && val0 > N9[2][2] &&
-                    val0 > N9[2][3] && val0 > N9[2][4] && val0 > N9[2][5] &&
-                    val0 > N9[2][6] && val0 > N9[2][7] && val0 > N9[2][8] )
-                {
-                    /* Calculate the wavelet center coordinates for the maxima */
-                    float center_i = sum_i + (size-1)*0.5f;
-                    float center_j = sum_j + (size-1)*0.5f;
-
-                    KeyPoint kpt( center_j, center_i, (float)sizes[layer],
-                                  -1, val0, octave, (trace_ptr[j] > 0) - (trace_ptr[j] < 0) );
-
-                    /* Interpolate maxima location within the 3x3x3 neighbourhood  */
-                    int ds = size - sizes[layer-1];
-                    int interp_ok = interpolateKeypoint( N9, sampleStep, sampleStep, ds, kpt );
-
-                    /* Sometimes the interpolation step gives a negative size etc. */
-                    if( interp_ok  )
-                    {
-                        /*printf( "KeyPoint %f %f %d\n", point.pt.x, point.pt.y, point.size );*/
-                        cv::AutoLock lock(findMaximaInLayer_m);
-                        keypoints.push_back(kpt);
-                    }
-                }
-            }
-        }
-    }
-}
-
-struct KeypointGreater
-{
-    inline bool operator()(const KeyPoint& kp1, const KeyPoint& kp2) const
-    {
-        if(kp1.response > kp2.response) return true;
-        if(kp1.response < kp2.response) return false;
-        if(kp1.size > kp2.size) return true;
-        if(kp1.size < kp2.size) return false;
-        if(kp1.octave > kp2.octave) return true;
-        if(kp1.octave < kp2.octave) return false;
-        if(kp1.pt.y < kp2.pt.y) return false;
-        if(kp1.pt.y > kp2.pt.y) return true;
-        return kp1.pt.x < kp2.pt.x;
-    }
-};
-
-
-static void fastHessianDetector( const Mat& sum, const Mat& mask_sum, std::vector<KeyPoint>& keypoints,
-                                 int nOctaves, int nOctaveLayers, float hessianThreshold )
-{
-    /* Sampling step along image x and y axes at first octave. This is doubled
-       for each additional octave. WARNING: Increasing this improves speed,
-       however keypoint extraction becomes unreliable. */
-    const int SAMPLE_STEP0 = 1;
-
-    int nTotalLayers = (nOctaveLayers+2)*nOctaves;
-    int nMiddleLayers = nOctaveLayers*nOctaves;
-
-    std::vector<Mat> dets(nTotalLayers);
-    std::vector<Mat> traces(nTotalLayers);
-    std::vector<int> sizes(nTotalLayers);
-    std::vector<int> sampleSteps(nTotalLayers);
-    std::vector<int> middleIndices(nMiddleLayers);
-
-    keypoints.clear();
-
-    // Allocate space and calculate properties of each layer
-    int index = 0, middleIndex = 0, step = SAMPLE_STEP0;
-
-    for( int octave = 0; octave < nOctaves; octave++ )
-    {
-        for( int layer = 0; layer < nOctaveLayers+2; layer++ )
-        {
-            /* The integral image sum is one pixel bigger than the source image*/
-            dets[index].create( (sum.rows-1)/step, (sum.cols-1)/step, CV_32F );
-            traces[index].create( (sum.rows-1)/step, (sum.cols-1)/step, CV_32F );
-            sizes[index] = (SURF_HAAR_SIZE0 + SURF_HAAR_SIZE_INC*layer) << octave;
-            sampleSteps[index] = step;
-
-            if( 0 < layer && layer <= nOctaveLayers )
-                middleIndices[middleIndex++] = index;
-            index++;
-        }
-        step *= 2;
-    }
-
-    // Calculate hessian determinant and trace samples in each layer
-    parallel_for_( Range(0, nTotalLayers),
-                   SURFBuildInvoker(sum, sizes, sampleSteps, dets, traces) );
-
-    // Find maxima in the determinant of the hessian
-    parallel_for_( Range(0, nMiddleLayers),
-                   SURFFindInvoker(sum, mask_sum, dets, traces, sizes,
-                                   sampleSteps, middleIndices, keypoints,
-                                   nOctaveLayers, hessianThreshold) );
-
-    std::sort(keypoints.begin(), keypoints.end(), KeypointGreater());
-}
-
-
-struct SURFInvoker : ParallelLoopBody
-{
-    enum { ORI_RADIUS = 6, ORI_WIN = 60, PATCH_SZ = 20 };
-
-    SURFInvoker( const Mat& _img, const Mat& _sum,
-                 std::vector<KeyPoint>& _keypoints, Mat& _descriptors,
-                 bool _extended, bool _upright )
-    {
-        keypoints = &_keypoints;
-        descriptors = &_descriptors;
-        img = &_img;
-        sum = &_sum;
-        extended = _extended;
-        upright = _upright;
-
-        // Simple bound for number of grid points in circle of radius ORI_RADIUS
-        const int nOriSampleBound = (2*ORI_RADIUS+1)*(2*ORI_RADIUS+1);
-
-        // Allocate arrays
-        apt.resize(nOriSampleBound);
-        aptw.resize(nOriSampleBound);
-        DW.resize(PATCH_SZ*PATCH_SZ);
-
-        /* Coordinates and weights of samples used to calculate orientation */
-        Mat G_ori = getGaussianKernel( 2*ORI_RADIUS+1, SURF_ORI_SIGMA, CV_32F );
-        nOriSamples = 0;
-        for( int i = -ORI_RADIUS; i <= ORI_RADIUS; i++ )
-        {
-            for( int j = -ORI_RADIUS; j <= ORI_RADIUS; j++ )
-            {
-                if( i*i + j*j <= ORI_RADIUS*ORI_RADIUS )
-                {
-                    apt[nOriSamples] = Point(i,j);
-                    aptw[nOriSamples++] = G_ori.at<float>(i+ORI_RADIUS,0) * G_ori.at<float>(j+ORI_RADIUS,0);
-                }
-            }
-        }
-        CV_Assert( nOriSamples <= nOriSampleBound );
-
-        /* Gaussian used to weight descriptor samples */
-        Mat G_desc = getGaussianKernel( PATCH_SZ, SURF_DESC_SIGMA, CV_32F );
-        for( int i = 0; i < PATCH_SZ; i++ )
-        {
-            for( int j = 0; j < PATCH_SZ; j++ )
-                DW[i*PATCH_SZ+j] = G_desc.at<float>(i,0) * G_desc.at<float>(j,0);
-        }
-    }
-
-    void operator()(const Range& range) const
-    {
-        /* X and Y gradient wavelet data */
-        const int NX=2, NY=2;
-        const int dx_s[NX][5] = {{0, 0, 2, 4, -1}, {2, 0, 4, 4, 1}};
-        const int dy_s[NY][5] = {{0, 0, 4, 2, 1}, {0, 2, 4, 4, -1}};
-
-        // Optimisation is better using nOriSampleBound than nOriSamples for
-        // array lengths.  Maybe because it is a constant known at compile time
-        const int nOriSampleBound =(2*ORI_RADIUS+1)*(2*ORI_RADIUS+1);
-
-        float X[nOriSampleBound], Y[nOriSampleBound], angle[nOriSampleBound];
-        uchar PATCH[PATCH_SZ+1][PATCH_SZ+1];
-        float DX[PATCH_SZ][PATCH_SZ], DY[PATCH_SZ][PATCH_SZ];
-        Mat _patch(PATCH_SZ+1, PATCH_SZ+1, CV_8U, PATCH);
-
-        int dsize = extended ? 128 : 64;
-
-        int k, k1 = range.start, k2 = range.end;
-        float maxSize = 0;
-        for( k = k1; k < k2; k++ )
-        {
-            maxSize = std::max(maxSize, (*keypoints)[k].size);
-        }
-        int imaxSize = std::max(cvCeil((PATCH_SZ+1)*maxSize*1.2f/9.0f), 1);
-        cv::AutoBuffer<uchar> winbuf(imaxSize*imaxSize);
-
-        for( k = k1; k < k2; k++ )
-        {
-            int i, j, kk, nangle;
-            float* vec;
-            SurfHF dx_t[NX], dy_t[NY];
-            KeyPoint& kp = (*keypoints)[k];
-            float size = kp.size;
-            Point2f center = kp.pt;
-            /* The sampling intervals and wavelet sized for selecting an orientation
-             and building the keypoint descriptor are defined relative to 's' */
-            float s = size*1.2f/9.0f;
-            /* To find the dominant orientation, the gradients in x and y are
-             sampled in a circle of radius 6s using wavelets of size 4s.
-             We ensure the gradient wavelet size is even to ensure the
-             wavelet pattern is balanced and symmetric around its center */
-            int grad_wav_size = 2*cvRound( 2*s );
-            if( sum->rows < grad_wav_size || sum->cols < grad_wav_size )
-            {
-                /* when grad_wav_size is too big,
-                 * the sampling of gradient will be meaningless
-                 * mark keypoint for deletion. */
-                kp.size = -1;
-                continue;
-            }
-
-            float descriptor_dir = 360.f - 90.f;
-            if (upright == 0)
-            {
-                resizeHaarPattern( dx_s, dx_t, NX, 4, grad_wav_size, sum->cols );
-                resizeHaarPattern( dy_s, dy_t, NY, 4, grad_wav_size, sum->cols );
-                for( kk = 0, nangle = 0; kk < nOriSamples; kk++ )
-                {
-                    int x = cvRound( center.x + apt[kk].x*s - (float)(grad_wav_size-1)/2 );
-                    int y = cvRound( center.y + apt[kk].y*s - (float)(grad_wav_size-1)/2 );
-                    if( y < 0 || y >= sum->rows - grad_wav_size ||
-                        x < 0 || x >= sum->cols - grad_wav_size )
-                        continue;
-                    const int* ptr = &sum->at<int>(y, x);
-                    float vx = calcHaarPattern( ptr, dx_t, 2 );
-                    float vy = calcHaarPattern( ptr, dy_t, 2 );
-                    X[nangle] = vx*aptw[kk];
-                    Y[nangle] = vy*aptw[kk];
-                    nangle++;
-                }
-                if( nangle == 0 )
-                {
-                    // No gradient could be sampled because the keypoint is too
-                    // near too one or more of the sides of the image. As we
-                    // therefore cannot find a dominant direction, we skip this
-                    // keypoint and mark it for later deletion from the sequence.
-                    kp.size = -1;
-                    continue;
-                }
-
-                phase( Mat(1, nangle, CV_32F, X), Mat(1, nangle, CV_32F, Y), Mat(1, nangle, CV_32F, angle), true );
-
-                float bestx = 0, besty = 0, descriptor_mod = 0;
-                for( i = 0; i < 360; i += SURF_ORI_SEARCH_INC )
-                {
-                    float sumx = 0, sumy = 0, temp_mod;
-                    for( j = 0; j < nangle; j++ )
-                    {
-                        int d = std::abs(cvRound(angle[j]) - i);
-                        if( d < ORI_WIN/2 || d > 360-ORI_WIN/2 )
-                        {
-                            sumx += X[j];
-                            sumy += Y[j];
-                        }
-                    }
-                    temp_mod = sumx*sumx + sumy*sumy;
-                    if( temp_mod > descriptor_mod )
-                    {
-                        descriptor_mod = temp_mod;
-                        bestx = sumx;
-                        besty = sumy;
-                    }
-                }
-                descriptor_dir = fastAtan2( -besty, bestx );
-            }
-            kp.angle = descriptor_dir;
-            if( !descriptors || !descriptors->data )
-                continue;
-
-            /* Extract a window of pixels around the keypoint of size 20s */
-            int win_size = (int)((PATCH_SZ+1)*s);
-            CV_Assert( imaxSize >= win_size );
-            Mat win(win_size, win_size, CV_8U, winbuf);
-
-            if( !upright )
-            {
-                descriptor_dir *= (float)(CV_PI/180);
-                float sin_dir = -std::sin(descriptor_dir);
-                float cos_dir =  std::cos(descriptor_dir);
-
-                /* Subpixel interpolation version (slower). Subpixel not required since
-                the pixels will all get averaged when we scale down to 20 pixels */
-                /*
-                float w[] = { cos_dir, sin_dir, center.x,
-                -sin_dir, cos_dir , center.y };
-                CvMat W = cvMat(2, 3, CV_32F, w);
-                cvGetQuadrangleSubPix( img, &win, &W );
-                */
-
-                float win_offset = -(float)(win_size-1)/2;
-                float start_x = center.x + win_offset*cos_dir + win_offset*sin_dir;
-                float start_y = center.y - win_offset*sin_dir + win_offset*cos_dir;
-                uchar* WIN = win.data;
-#if 0
-                // Nearest neighbour version (faster)
-                for( i = 0; i < win_size; i++, start_x += sin_dir, start_y += cos_dir )
-                {
-                    float pixel_x = start_x;
-                    float pixel_y = start_y;
-                    for( j = 0; j < win_size; j++, pixel_x += cos_dir, pixel_y -= sin_dir )
-                    {
-                        int x = std::min(std::max(cvRound(pixel_x), 0), img->cols-1);
-                        int y = std::min(std::max(cvRound(pixel_y), 0), img->rows-1);
-                        WIN[i*win_size + j] = img->at<uchar>(y, x);
-                    }
-                }
-#else
-                int ncols1 = img->cols-1, nrows1 = img->rows-1;
-                size_t imgstep = img->step;
-                for( i = 0; i < win_size; i++, start_x += sin_dir, start_y += cos_dir )
-                {
-                    double pixel_x = start_x;
-                    double pixel_y = start_y;
-                    for( j = 0; j < win_size; j++, pixel_x += cos_dir, pixel_y -= sin_dir )
-                    {
-                        int ix = cvFloor(pixel_x), iy = cvFloor(pixel_y);
-                        if( (unsigned)ix < (unsigned)ncols1 &&
-                            (unsigned)iy < (unsigned)nrows1 )
-                        {
-                            float a = (float)(pixel_x - ix), b = (float)(pixel_y - iy);
-                            const uchar* imgptr = &img->at<uchar>(iy, ix);
-                            WIN[i*win_size + j] = (uchar)
-                                cvRound(imgptr[0]*(1.f - a)*(1.f - b) +
-                                        imgptr[1]*a*(1.f - b) +
-                                        imgptr[imgstep]*(1.f - a)*b +
-                                        imgptr[imgstep+1]*a*b);
-                        }
-                        else
-                        {
-                            int x = std::min(std::max(cvRound(pixel_x), 0), ncols1);
-                            int y = std::min(std::max(cvRound(pixel_y), 0), nrows1);
-                            WIN[i*win_size + j] = img->at<uchar>(y, x);
-                        }
-                    }
-                }
-#endif
-            }
-            else
-            {
-                // extract rect - slightly optimized version of the code above
-                // TODO: find faster code, as this is simply an extract rect operation,
-                //       e.g. by using cvGetSubRect, problem is the border processing
-                // descriptor_dir == 90 grad
-                // sin_dir == 1
-                // cos_dir == 0
-
-                float win_offset = -(float)(win_size-1)/2;
-                int start_x = cvRound(center.x + win_offset);
-                int start_y = cvRound(center.y - win_offset);
-                uchar* WIN = win.data;
-                for( i = 0; i < win_size; i++, start_x++ )
-                {
-                    int pixel_x = start_x;
-                    int pixel_y = start_y;
-                    for( j = 0; j < win_size; j++, pixel_y-- )
-                    {
-                        int x = MAX( pixel_x, 0 );
-                        int y = MAX( pixel_y, 0 );
-                        x = MIN( x, img->cols-1 );
-                        y = MIN( y, img->rows-1 );
-                        WIN[i*win_size + j] = img->at<uchar>(y, x);
-                    }
-                }
-            }
-            // Scale the window to size PATCH_SZ so each pixel's size is s. This
-            // makes calculating the gradients with wavelets of size 2s easy
-            resize(win, _patch, _patch.size(), 0, 0, INTER_AREA);
-
-            // Calculate gradients in x and y with wavelets of size 2s
-            for( i = 0; i < PATCH_SZ; i++ )
-                for( j = 0; j < PATCH_SZ; j++ )
-                {
-                    float dw = DW[i*PATCH_SZ + j];
-                    float vx = (PATCH[i][j+1] - PATCH[i][j] + PATCH[i+1][j+1] - PATCH[i+1][j])*dw;
-                    float vy = (PATCH[i+1][j] - PATCH[i][j] + PATCH[i+1][j+1] - PATCH[i][j+1])*dw;
-                    DX[i][j] = vx;
-                    DY[i][j] = vy;
-                }
-
-            // Construct the descriptor
-            vec = descriptors->ptr<float>(k);
-            for( kk = 0; kk < dsize; kk++ )
-                vec[kk] = 0;
-            double square_mag = 0;
-            if( extended )
-            {
-                // 128-bin descriptor
-                for( i = 0; i < 4; i++ )
-                    for( j = 0; j < 4; j++ )
-                    {
-                        for(int y = i*5; y < i*5+5; y++ )
-                        {
-                            for(int x = j*5; x < j*5+5; x++ )
-                            {
-                                float tx = DX[y][x], ty = DY[y][x];
-                                if( ty >= 0 )
-                                {
-                                    vec[0] += tx;
-                                    vec[1] += (float)fabs(tx);
-                                } else {
-                                    vec[2] += tx;
-                                    vec[3] += (float)fabs(tx);
-                                }
-                                if ( tx >= 0 )
-                                {
-                                    vec[4] += ty;
-                                    vec[5] += (float)fabs(ty);
-                                } else {
-                                    vec[6] += ty;
-                                    vec[7] += (float)fabs(ty);
-                                }
-                            }
-                        }
-                        for( kk = 0; kk < 8; kk++ )
-                            square_mag += vec[kk]*vec[kk];
-                        vec += 8;
-                    }
-            }
-            else
-            {
-                // 64-bin descriptor
-                for( i = 0; i < 4; i++ )
-                    for( j = 0; j < 4; j++ )
-                    {
-                        for(int y = i*5; y < i*5+5; y++ )
-                        {
-                            for(int x = j*5; x < j*5+5; x++ )
-                            {
-                                float tx = DX[y][x], ty = DY[y][x];
-                                vec[0] += tx; vec[1] += ty;
-                                vec[2] += (float)fabs(tx); vec[3] += (float)fabs(ty);
-                            }
-                        }
-                        for( kk = 0; kk < 4; kk++ )
-                            square_mag += vec[kk]*vec[kk];
-                        vec+=4;
-                    }
-            }
-
-            // unit vector is essential for contrast invariance
-            vec = descriptors->ptr<float>(k);
-            float scale = (float)(1./(std::sqrt(square_mag) + DBL_EPSILON));
-            for( kk = 0; kk < dsize; kk++ )
-                vec[kk] *= scale;
-        }
-    }
-
-    // Parameters
-    const Mat* img;
-    const Mat* sum;
-    std::vector<KeyPoint>* keypoints;
-    Mat* descriptors;
-    bool extended;
-    bool upright;
-
-    // Pre-calculated values
-    int nOriSamples;
-    std::vector<Point> apt;
-    std::vector<float> aptw;
-    std::vector<float> DW;
-};
-
-
-SURF::SURF()
-{
-    hessianThreshold = 100;
-    extended = false;
-    upright = false;
-    nOctaves = 4;
-    nOctaveLayers = 3;
-}
-
-SURF::SURF(double _threshold, int _nOctaves, int _nOctaveLayers, bool _extended, bool _upright)
-{
-    hessianThreshold = _threshold;
-    extended = _extended;
-    upright = _upright;
-    nOctaves = _nOctaves;
-    nOctaveLayers = _nOctaveLayers;
-}
-
-int SURF::descriptorSize() const { return extended ? 128 : 64; }
-int SURF::descriptorType() const { return CV_32F; }
-int SURF::defaultNorm() const { return NORM_L2; }
-
-void SURF::operator()(InputArray imgarg, InputArray maskarg,
-                      CV_OUT std::vector<KeyPoint>& keypoints) const
-{
-    (*this)(imgarg, maskarg, keypoints, noArray(), false);
-}
-
-void SURF::operator()(InputArray _img, InputArray _mask,
-                      CV_OUT std::vector<KeyPoint>& keypoints,
-                      OutputArray _descriptors,
-                      bool useProvidedKeypoints) const
-{
-    int imgtype = _img.type(), imgcn = CV_MAT_CN(imgtype);
-    bool doDescriptors = _descriptors.needed();
-
-    CV_Assert(!_img.empty() && CV_MAT_DEPTH(imgtype) == CV_8U && (imgcn == 1 || imgcn == 3 || imgcn == 4));
-    CV_Assert(_descriptors.needed() || !useProvidedKeypoints);
-
-    if( ocl::useOpenCL() )
-    {
-        SURF_OCL ocl_surf;
-        UMat gpu_kpt;
-        bool ok = ocl_surf.init(this);
-
-        if( ok )
-        {
-            if( !_descriptors.needed() )
-            {
-                ok = ocl_surf.detect(_img, _mask, gpu_kpt);
-            }
-            else
-            {
-                if(useProvidedKeypoints)
-                    ocl_surf.uploadKeypoints(keypoints, gpu_kpt);
-                ok = ocl_surf.detectAndCompute(_img, _mask, gpu_kpt, _descriptors, useProvidedKeypoints);
-            }
-        }
-        if( ok )
-        {
-            if(!useProvidedKeypoints)
-                ocl_surf.downloadKeypoints(gpu_kpt, keypoints);
-            return;
-        }
-    }
-
-    Mat img = _img.getMat(), mask = _mask.getMat(), mask1, sum, msum;
-
-    if( imgcn > 1 )
-        cvtColor(img, img, COLOR_BGR2GRAY);
-
-    CV_Assert(mask.empty() || (mask.type() == CV_8U && mask.size() == img.size()));
-    CV_Assert(hessianThreshold >= 0);
-    CV_Assert(nOctaves > 0);
-    CV_Assert(nOctaveLayers > 0);
-
-    integral(img, sum, CV_32S);
-
-    // Compute keypoints only if we are not asked for evaluating the descriptors are some given locations:
-    if( !useProvidedKeypoints )
-    {
-        if( !mask.empty() )
-        {
-            cv::min(mask, 1, mask1);
-            integral(mask1, msum, CV_32S);
-        }
-        fastHessianDetector( sum, msum, keypoints, nOctaves, nOctaveLayers, (float)hessianThreshold );
-    }
-
-    int i, j, N = (int)keypoints.size();
-    if( N > 0 )
-    {
-        Mat descriptors;
-        bool _1d = false;
-        int dcols = extended ? 128 : 64;
-        size_t dsize = dcols*sizeof(float);
-
-        if( doDescriptors )
-        {
-            _1d = _descriptors.kind() == _InputArray::STD_VECTOR && _descriptors.type() == CV_32F;
-            if( _1d )
-            {
-                _descriptors.create(N*dcols, 1, CV_32F);
-                descriptors = _descriptors.getMat().reshape(1, N);
-            }
-            else
-            {
-                _descriptors.create(N, dcols, CV_32F);
-                descriptors = _descriptors.getMat();
-            }
-        }
-
-        // we call SURFInvoker in any case, even if we do not need descriptors,
-        // since it computes orientation of each feature.
-        parallel_for_(Range(0, N), SURFInvoker(img, sum, keypoints, descriptors, extended, upright) );
-
-        // remove keypoints that were marked for deletion
-        for( i = j = 0; i < N; i++ )
-        {
-            if( keypoints[i].size > 0 )
-            {
-                if( i > j )
-                {
-                    keypoints[j] = keypoints[i];
-                    if( doDescriptors )
-                        memcpy( descriptors.ptr(j), descriptors.ptr(i), dsize);
-                }
-                j++;
-            }
-        }
-        if( N > j )
-        {
-            N = j;
-            keypoints.resize(N);
-            if( doDescriptors )
-            {
-                Mat d = descriptors.rowRange(0, N);
-                if( _1d )
-                    d = d.reshape(1, N*dcols);
-                d.copyTo(_descriptors);
-            }
-        }
-    }
-}
-
-
-void SURF::detectImpl( InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
-{
-    (*this)(image.getMat(), mask.getMat(), keypoints, noArray(), false);
-}
-
-void SURF::computeImpl( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
-{
-    (*this)(image, Mat(), keypoints, descriptors, true);
-}
-
-}
diff --git a/modules/nonfree/src/surf.cuda.cpp b/modules/nonfree/src/surf.cuda.cpp
deleted file mode 100644 (file)
index 461ba0f..0000000
+++ /dev/null
@@ -1,432 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-//  By downloading, copying, installing or using the software you agree to this license.
-//  If you do not agree to this license, do not download, install,
-//  copy or use the software.
-//
-//
-//                           License Agreement
-//                For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-//   * Redistribution's of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//
-//   * Redistribution's in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//
-//   * The name of the copyright holders may not be used to endorse or promote products
-//     derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#include "precomp.hpp"
-
-using namespace cv;
-using namespace cv::cuda;
-
-#if !defined (HAVE_CUDA) || !defined (HAVE_OPENCV_CUDAARITHM)
-
-cv::cuda::SURF_CUDA::SURF_CUDA() { throw_no_cuda(); }
-cv::cuda::SURF_CUDA::SURF_CUDA(double, int, int, bool, float, bool) { throw_no_cuda(); }
-int cv::cuda::SURF_CUDA::descriptorSize() const { throw_no_cuda(); return 0;}
-void cv::cuda::SURF_CUDA::uploadKeypoints(const std::vector<KeyPoint>&, GpuMat&) { throw_no_cuda(); }
-void cv::cuda::SURF_CUDA::downloadKeypoints(const GpuMat&, std::vector<KeyPoint>&) { throw_no_cuda(); }
-void cv::cuda::SURF_CUDA::downloadDescriptors(const GpuMat&, std::vector<float>&) { throw_no_cuda(); }
-void cv::cuda::SURF_CUDA::operator()(const GpuMat&, const GpuMat&, GpuMat&) { throw_no_cuda(); }
-void cv::cuda::SURF_CUDA::operator()(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, bool) { throw_no_cuda(); }
-void cv::cuda::SURF_CUDA::operator()(const GpuMat&, const GpuMat&, std::vector<KeyPoint>&) { throw_no_cuda(); }
-void cv::cuda::SURF_CUDA::operator()(const GpuMat&, const GpuMat&, std::vector<KeyPoint>&, GpuMat&, bool) { throw_no_cuda(); }
-void cv::cuda::SURF_CUDA::operator()(const GpuMat&, const GpuMat&, std::vector<KeyPoint>&, std::vector<float>&, bool) { throw_no_cuda(); }
-void cv::cuda::SURF_CUDA::releaseMemory() { throw_no_cuda(); }
-
-#else // !defined (HAVE_CUDA)
-
-namespace cv { namespace cuda { namespace device
-{
-    namespace surf
-    {
-        void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold);
-        void loadOctaveConstants(int octave, int layer_rows, int layer_cols);
-
-        void bindImgTex(PtrStepSzb img);
-        size_t bindSumTex(PtrStepSz<unsigned int> sum);
-        size_t bindMaskSumTex(PtrStepSz<unsigned int> maskSum);
-
-        void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols,
-            int octave, int nOctaveLayer);
-
-        void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter,
-            int img_rows, int img_cols, int octave, bool use_mask, int nLayers);
-
-        void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter,
-            float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
-            unsigned int* featureCounter);
-
-        void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures);
-
-        void compute_descriptors_gpu(PtrStepSz<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures);
-    }
-}}}
-
-using namespace ::cv::cuda::device::surf;
-
-namespace
-{
-    Mutex mtx;
-
-    int calcSize(int octave, int layer)
-    {
-        /* Wavelet size at first layer of first octave. */
-        const int HAAR_SIZE0 = 9;
-
-        /* Wavelet size increment between layers. This should be an even number,
-         such that the wavelet sizes in an octave are either all even or all odd.
-         This ensures that when looking for the neighbours of a sample, the layers
-
-         above and below are aligned correctly. */
-        const int HAAR_SIZE_INC = 6;
-
-        return (HAAR_SIZE0 + HAAR_SIZE_INC * layer) << octave;
-    }
-
-    class SURF_CUDA_Invoker
-    {
-    public:
-        SURF_CUDA_Invoker(SURF_CUDA& surf, const GpuMat& img, const GpuMat& mask) :
-            surf_(surf),
-            img_cols(img.cols), img_rows(img.rows),
-            use_mask(!mask.empty())
-        {
-            CV_Assert(!img.empty() && img.type() == CV_8UC1);
-            CV_Assert(mask.empty() || (mask.size() == img.size() && mask.type() == CV_8UC1));
-            CV_Assert(surf_.nOctaves > 0 && surf_.nOctaveLayers > 0);
-
-            const int min_size = calcSize(surf_.nOctaves - 1, 0);
-            CV_Assert(img_rows - min_size >= 0);
-            CV_Assert(img_cols - min_size >= 0);
-
-            const int layer_rows = img_rows >> (surf_.nOctaves - 1);
-            const int layer_cols = img_cols >> (surf_.nOctaves - 1);
-            const int min_margin = ((calcSize((surf_.nOctaves - 1), 2) >> 1) >> (surf_.nOctaves - 1)) + 1;
-            CV_Assert(layer_rows - 2 * min_margin > 0);
-            CV_Assert(layer_cols - 2 * min_margin > 0);
-
-            maxFeatures = std::min(static_cast<int>(img.size().area() * surf.keypointsRatio), 65535);
-            maxCandidates = std::min(static_cast<int>(1.5 * maxFeatures), 65535);
-
-            CV_Assert(maxFeatures > 0);
-
-            counters.create(1, surf_.nOctaves + 1, CV_32SC1);
-            counters.setTo(Scalar::all(0));
-
-            loadGlobalConstants(maxCandidates, maxFeatures, img_rows, img_cols, surf_.nOctaveLayers, static_cast<float>(surf_.hessianThreshold));
-
-            bindImgTex(img);
-
-            cuda::integral(img, surf_.sum, surf_.intBuffer);
-            sumOffset = bindSumTex(surf_.sum);
-
-            if (use_mask)
-            {
-                cuda::min(mask, 1.0, surf_.mask1);
-                cuda::integral(surf_.mask1, surf_.maskSum, surf_.intBuffer);
-                maskOffset = bindMaskSumTex(surf_.maskSum);
-            }
-        }
-
-        void detectKeypoints(GpuMat& keypoints)
-        {
-            ensureSizeIsEnough(img_rows * (surf_.nOctaveLayers + 2), img_cols, CV_32FC1, surf_.det);
-            ensureSizeIsEnough(img_rows * (surf_.nOctaveLayers + 2), img_cols, CV_32FC1, surf_.trace);
-
-            ensureSizeIsEnough(1, maxCandidates, CV_32SC4, surf_.maxPosBuffer);
-            ensureSizeIsEnough(SURF_CUDA::ROWS_COUNT, maxFeatures, CV_32FC1, keypoints);
-            keypoints.setTo(Scalar::all(0));
-
-            for (int octave = 0; octave < surf_.nOctaves; ++octave)
-            {
-                const int layer_rows = img_rows >> octave;
-                const int layer_cols = img_cols >> octave;
-                loadOctaveConstants(octave, layer_rows, layer_cols);
-
-                icvCalcLayerDetAndTrace_gpu(surf_.det, surf_.trace, img_rows, img_cols, octave, surf_.nOctaveLayers);
-
-                icvFindMaximaInLayer_gpu(surf_.det, surf_.trace, surf_.maxPosBuffer.ptr<int4>(), counters.ptr<unsigned int>() + 1 + octave,
-                    img_rows, img_cols, octave, use_mask, surf_.nOctaveLayers);
-
-                unsigned int maxCounter;
-                cudaSafeCall( cudaMemcpy(&maxCounter, counters.ptr<unsigned int>() + 1 + octave, sizeof(unsigned int), cudaMemcpyDeviceToHost) );
-                maxCounter = std::min(maxCounter, static_cast<unsigned int>(maxCandidates));
-
-                if (maxCounter > 0)
-                {
-                    icvInterpolateKeypoint_gpu(surf_.det, surf_.maxPosBuffer.ptr<int4>(), maxCounter,
-                        keypoints.ptr<float>(SURF_CUDA::X_ROW), keypoints.ptr<float>(SURF_CUDA::Y_ROW),
-                        keypoints.ptr<int>(SURF_CUDA::LAPLACIAN_ROW), keypoints.ptr<int>(SURF_CUDA::OCTAVE_ROW),
-                        keypoints.ptr<float>(SURF_CUDA::SIZE_ROW), keypoints.ptr<float>(SURF_CUDA::HESSIAN_ROW),
-                        counters.ptr<unsigned int>());
-                }
-            }
-            unsigned int featureCounter;
-            cudaSafeCall( cudaMemcpy(&featureCounter, counters.ptr<unsigned int>(), sizeof(unsigned int), cudaMemcpyDeviceToHost) );
-            featureCounter = std::min(featureCounter, static_cast<unsigned int>(maxFeatures));
-
-            keypoints.cols = featureCounter;
-
-            if (surf_.upright)
-                keypoints.row(SURF_CUDA::ANGLE_ROW).setTo(Scalar::all(360.0 - 90.0));
-            else
-                findOrientation(keypoints);
-        }
-
-        void findOrientation(GpuMat& keypoints)
-        {
-            const int nFeatures = keypoints.cols;
-            if (nFeatures > 0)
-            {
-                icvCalcOrientation_gpu(keypoints.ptr<float>(SURF_CUDA::X_ROW), keypoints.ptr<float>(SURF_CUDA::Y_ROW),
-                    keypoints.ptr<float>(SURF_CUDA::SIZE_ROW), keypoints.ptr<float>(SURF_CUDA::ANGLE_ROW), nFeatures);
-            }
-        }
-
-        void computeDescriptors(const GpuMat& keypoints, GpuMat& descriptors, int descriptorSize)
-        {
-            const int nFeatures = keypoints.cols;
-            if (nFeatures > 0)
-            {
-                ensureSizeIsEnough(nFeatures, descriptorSize, CV_32F, descriptors);
-                compute_descriptors_gpu(descriptors, keypoints.ptr<float>(SURF_CUDA::X_ROW), keypoints.ptr<float>(SURF_CUDA::Y_ROW),
-                    keypoints.ptr<float>(SURF_CUDA::SIZE_ROW), keypoints.ptr<float>(SURF_CUDA::ANGLE_ROW), nFeatures);
-            }
-        }
-
-    private:
-        SURF_CUDA_Invoker(const SURF_CUDA_Invoker&);
-        SURF_CUDA_Invoker& operator =(const SURF_CUDA_Invoker&);
-
-        SURF_CUDA& surf_;
-
-        int img_cols, img_rows;
-
-        bool use_mask;
-
-        int maxCandidates;
-        int maxFeatures;
-
-        size_t maskOffset;
-        size_t sumOffset;
-
-        GpuMat counters;
-    };
-}
-
-cv::cuda::SURF_CUDA::SURF_CUDA()
-{
-    hessianThreshold = 100;
-    extended = true;
-    nOctaves = 4;
-    nOctaveLayers = 2;
-    keypointsRatio = 0.01f;
-    upright = false;
-}
-
-cv::cuda::SURF_CUDA::SURF_CUDA(double _threshold, int _nOctaves, int _nOctaveLayers, bool _extended, float _keypointsRatio, bool _upright)
-{
-    hessianThreshold = _threshold;
-    extended = _extended;
-    nOctaves = _nOctaves;
-    nOctaveLayers = _nOctaveLayers;
-    keypointsRatio = _keypointsRatio;
-    upright = _upright;
-}
-
-int cv::cuda::SURF_CUDA::descriptorSize() const
-{
-    return extended ? 128 : 64;
-}
-
-int cv::cuda::SURF_CUDA::defaultNorm() const
-{
-    return NORM_L2;
-}
-
-void cv::cuda::SURF_CUDA::uploadKeypoints(const std::vector<KeyPoint>& keypoints, GpuMat& keypointsGPU)
-{
-    if (keypoints.empty())
-        keypointsGPU.release();
-    else
-    {
-        Mat keypointsCPU(SURF_CUDA::ROWS_COUNT, static_cast<int>(keypoints.size()), CV_32FC1);
-
-        float* kp_x = keypointsCPU.ptr<float>(SURF_CUDA::X_ROW);
-        float* kp_y = keypointsCPU.ptr<float>(SURF_CUDA::Y_ROW);
-        int* kp_laplacian = keypointsCPU.ptr<int>(SURF_CUDA::LAPLACIAN_ROW);
-        int* kp_octave = keypointsCPU.ptr<int>(SURF_CUDA::OCTAVE_ROW);
-        float* kp_size = keypointsCPU.ptr<float>(SURF_CUDA::SIZE_ROW);
-        float* kp_dir = keypointsCPU.ptr<float>(SURF_CUDA::ANGLE_ROW);
-        float* kp_hessian = keypointsCPU.ptr<float>(SURF_CUDA::HESSIAN_ROW);
-
-        for (size_t i = 0, size = keypoints.size(); i < size; ++i)
-        {
-            const KeyPoint& kp = keypoints[i];
-            kp_x[i] = kp.pt.x;
-            kp_y[i] = kp.pt.y;
-            kp_octave[i] = kp.octave;
-            kp_size[i] = kp.size;
-            kp_dir[i] = kp.angle;
-            kp_hessian[i] = kp.response;
-            kp_laplacian[i] = 1;
-        }
-
-        keypointsGPU.upload(keypointsCPU);
-    }
-}
-
-void cv::cuda::SURF_CUDA::downloadKeypoints(const GpuMat& keypointsGPU, std::vector<KeyPoint>& keypoints)
-{
-    const int nFeatures = keypointsGPU.cols;
-
-    if (nFeatures == 0)
-        keypoints.clear();
-    else
-    {
-        CV_Assert(keypointsGPU.type() == CV_32FC1 && keypointsGPU.rows == ROWS_COUNT);
-
-        Mat keypointsCPU(keypointsGPU);
-
-        keypoints.resize(nFeatures);
-
-        float* kp_x = keypointsCPU.ptr<float>(SURF_CUDA::X_ROW);
-        float* kp_y = keypointsCPU.ptr<float>(SURF_CUDA::Y_ROW);
-        int* kp_laplacian = keypointsCPU.ptr<int>(SURF_CUDA::LAPLACIAN_ROW);
-        int* kp_octave = keypointsCPU.ptr<int>(SURF_CUDA::OCTAVE_ROW);
-        float* kp_size = keypointsCPU.ptr<float>(SURF_CUDA::SIZE_ROW);
-        float* kp_dir = keypointsCPU.ptr<float>(SURF_CUDA::ANGLE_ROW);
-        float* kp_hessian = keypointsCPU.ptr<float>(SURF_CUDA::HESSIAN_ROW);
-
-        for (int i = 0; i < nFeatures; ++i)
-        {
-            KeyPoint& kp = keypoints[i];
-            kp.pt.x = kp_x[i];
-            kp.pt.y = kp_y[i];
-            kp.class_id = kp_laplacian[i];
-            kp.octave = kp_octave[i];
-            kp.size = kp_size[i];
-            kp.angle = kp_dir[i];
-            kp.response = kp_hessian[i];
-        }
-    }
-}
-
-void cv::cuda::SURF_CUDA::downloadDescriptors(const GpuMat& descriptorsGPU, std::vector<float>& descriptors)
-{
-    if (descriptorsGPU.empty())
-        descriptors.clear();
-    else
-    {
-        CV_Assert(descriptorsGPU.type() == CV_32F);
-
-        descriptors.resize(descriptorsGPU.rows * descriptorsGPU.cols);
-        Mat descriptorsCPU(descriptorsGPU.size(), CV_32F, &descriptors[0]);
-        descriptorsGPU.download(descriptorsCPU);
-    }
-}
-
-void cv::cuda::SURF_CUDA::operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints)
-{
-    AutoLock lock(mtx);
-    if (!img.empty())
-    {
-        SURF_CUDA_Invoker surf(*this, img, mask);
-
-        surf.detectKeypoints(keypoints);
-    }
-}
-
-void cv::cuda::SURF_CUDA::operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints, GpuMat& descriptors,
-                                   bool useProvidedKeypoints)
-{
-    AutoLock lock(mtx);
-    if (!img.empty())
-    {
-        SURF_CUDA_Invoker surf(*this, img, mask);
-
-        if (!useProvidedKeypoints)
-            surf.detectKeypoints(keypoints);
-        else if (!upright)
-        {
-            surf.findOrientation(keypoints);
-        }
-
-        surf.computeDescriptors(keypoints, descriptors, descriptorSize());
-    }
-}
-
-void cv::cuda::SURF_CUDA::operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints)
-{
-    AutoLock lock(mtx);
-    GpuMat keypointsGPU;
-
-    (*this)(img, mask, keypointsGPU);
-
-    downloadKeypoints(keypointsGPU, keypoints);
-}
-
-void cv::cuda::SURF_CUDA::operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints,
-    GpuMat& descriptors, bool useProvidedKeypoints)
-{
-    AutoLock lock(mtx);
-    GpuMat keypointsGPU;
-
-    if (useProvidedKeypoints)
-        uploadKeypoints(keypoints, keypointsGPU);
-
-    (*this)(img, mask, keypointsGPU, descriptors, useProvidedKeypoints);
-
-    downloadKeypoints(keypointsGPU, keypoints);
-}
-
-void cv::cuda::SURF_CUDA::operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints,
-    std::vector<float>& descriptors, bool useProvidedKeypoints)
-{
-    AutoLock lock(mtx);
-    GpuMat descriptorsGPU;
-
-    (*this)(img, mask, keypoints, descriptorsGPU, useProvidedKeypoints);
-
-    downloadDescriptors(descriptorsGPU, descriptors);
-}
-
-void cv::cuda::SURF_CUDA::releaseMemory()
-{
-    sum.release();
-    mask1.release();
-    maskSum.release();
-    intBuffer.release();
-    det.release();
-    trace.release();
-    maxPosBuffer.release();
-}
-
-#endif // !defined (HAVE_CUDA)
diff --git a/modules/nonfree/src/surf.hpp b/modules/nonfree/src/surf.hpp
deleted file mode 100644 (file)
index ee56fb6..0000000
+++ /dev/null
@@ -1,118 +0,0 @@
-///////////// see LICENSE.txt in the OpenCV root directory //////////////
-
-#ifndef __OPENCV_NONFREE_SURF_HPP__
-#define __OPENCV_NONFREE_SURF_HPP__
-
-namespace cv
-{
-//! Speeded up robust features, port from CUDA module.
-////////////////////////////////// SURF //////////////////////////////////////////
-
-class SURF_OCL
-{
-public:
-    enum KeypointLayout
-    {
-        X_ROW = 0,
-        Y_ROW,
-        LAPLACIAN_ROW,
-        OCTAVE_ROW,
-        SIZE_ROW,
-        ANGLE_ROW,
-        HESSIAN_ROW,
-        ROWS_COUNT
-    };
-
-    //! the full constructor taking all the necessary parameters
-    SURF_OCL();
-
-    bool init(const SURF* params);
-
-    //! returns the descriptor size in float's (64 or 128)
-    int descriptorSize() const { return params->extended ? 128 : 64; }
-
-    void uploadKeypoints(const std::vector<KeyPoint> &keypoints, UMat &keypointsGPU);
-    void downloadKeypoints(const UMat &keypointsGPU, std::vector<KeyPoint> &keypoints);
-
-    //! finds the keypoints using fast hessian detector used in SURF
-    //! supports CV_8UC1 images
-    //! keypoints will have nFeature cols and 6 rows
-    //! keypoints.ptr<float>(X_ROW)[i] will contain x coordinate of i'th feature
-    //! keypoints.ptr<float>(Y_ROW)[i] will contain y coordinate of i'th feature
-    //! keypoints.ptr<float>(LAPLACIAN_ROW)[i] will contain laplacian sign of i'th feature
-    //! keypoints.ptr<float>(OCTAVE_ROW)[i] will contain octave of i'th feature
-    //! keypoints.ptr<float>(SIZE_ROW)[i] will contain size of i'th feature
-    //! keypoints.ptr<float>(ANGLE_ROW)[i] will contain orientation of i'th feature
-    //! keypoints.ptr<float>(HESSIAN_ROW)[i] will contain response of i'th feature
-    bool detect(InputArray img, InputArray mask, UMat& keypoints);
-    //! finds the keypoints and computes their descriptors.
-    //! Optionally it can compute descriptors for the user-provided keypoints and recompute keypoints direction
-    bool detectAndCompute(InputArray img, InputArray mask, UMat& keypoints,
-                          OutputArray descriptors, bool useProvidedKeypoints = false);
-
-protected:
-    bool setImage(InputArray img, InputArray mask);
-
-    // kernel callers declarations
-    bool calcLayerDetAndTrace(int octave, int layer_rows);
-
-    bool findMaximaInLayer(int counterOffset, int octave, int layer_rows, int layer_cols);
-
-    bool interpolateKeypoint(int maxCounter, UMat &keypoints, int octave, int layer_rows, int maxFeatures);
-
-    bool calcOrientation(UMat &keypoints);
-
-    bool setUpRight(UMat &keypoints);
-
-    bool computeDescriptors(const UMat &keypoints, OutputArray descriptors);
-
-    bool detectKeypoints(UMat &keypoints);
-
-    const SURF* params;
-
-    //! max keypoints = min(keypointsRatio * img.size().area(), 65535)
-    UMat sum, intBuffer;
-    UMat det, trace;
-    UMat maxPosBuffer;
-
-    int img_cols, img_rows;
-
-    int maxCandidates;
-    int maxFeatures;
-
-    UMat img, counters;
-
-    // texture buffers
-    ocl::Image2D imgTex, sumTex;
-    bool haveImageSupport;
-    String kerOpts;
-
-    int status;
-};
-
-/*
-template<typename _Tp> void copyVectorToUMat(const std::vector<_Tp>& v, UMat& um)
-{
-    if(v.empty())
-        um.release();
-    else
-        Mat(1, (int)(v.size()*sizeof(v[0])), CV_8U, (void*)&v[0]).copyTo(um);
-}
-
-template<typename _Tp> void copyUMatToVector(const UMat& um, std::vector<_Tp>& v)
-{
-    if(um.empty())
-        v.clear();
-    else
-    {
-        size_t sz = um.total()*um.elemSize();
-        CV_Assert(um.isContinuous() && (sz % sizeof(_Tp) == 0));
-        v.resize(sz/sizeof(_Tp));
-        Mat m(um.size(), um.type(), &v[0]);
-        um.copyTo(m);
-    }
-}*/
-
-}
-
-#endif
diff --git a/modules/nonfree/src/surf.ocl.cpp b/modules/nonfree/src/surf.ocl.cpp
deleted file mode 100644 (file)
index f46fc50..0000000
+++ /dev/null
@@ -1,459 +0,0 @@
-/*M/////////////////////////////////////////////////////////////////////////////////////////
-//
-//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-//  By downloading, copying, installing or using the software you agree to this license.
-//  If you do not agree to this license, do not download, install,
-//  copy or use the software.
-//
-//
-//                           License Agreement
-//                For Open Source Computer Vision Library
-//
-// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
-// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// @Authors
-//    Peng Xiao, pengxiao@multicorewareinc.com
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-//   * Redistribution's of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//
-//   * Redistribution's in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//
-//   * The name of the copyright holders may not be used to endorse or promote products
-//     derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors as is and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-#include "precomp.hpp"
-#include "surf.hpp"
-
-#include <cstdio>
-#include <sstream>
-#include "opencl_kernels_nonfree.hpp"
-
-namespace cv
-{
-
-enum { ORI_SEARCH_INC=5, ORI_LOCAL_SIZE=(360 / ORI_SEARCH_INC) };
-
-static inline int calcSize(int octave, int layer)
-{
-    /* Wavelet size at first layer of first octave. */
-    const int HAAR_SIZE0 = 9;
-
-    /* Wavelet size increment between layers. This should be an even number,
-    such that the wavelet sizes in an octave are either all even or all odd.
-    This ensures that when looking for the neighbors of a sample, the layers
-
-    above and below are aligned correctly. */
-    const int HAAR_SIZE_INC = 6;
-
-    return (HAAR_SIZE0 + HAAR_SIZE_INC * layer) << octave;
-}
-
-
-SURF_OCL::SURF_OCL()
-{
-    img_cols = img_rows = maxCandidates = maxFeatures = 0;
-    haveImageSupport = false;
-    status = -1;
-}
-
-bool SURF_OCL::init(const SURF* p)
-{
-    params = p;
-    if(status < 0)
-    {
-        status = 0;
-        if(ocl::haveOpenCL())
-        {
-            const ocl::Device& dev = ocl::Device::getDefault();
-            if( dev.type() == ocl::Device::TYPE_CPU || dev.doubleFPConfig() == 0 )
-                return false;
-            haveImageSupport = false;//dev.imageSupport();
-            kerOpts = haveImageSupport ? "-D HAVE_IMAGE2D -D DOUBLE_SUPPORT" : "";
-//            status = 1;
-        }
-    }
-    return status > 0;
-}
-
-
-bool SURF_OCL::setImage(InputArray _img, InputArray _mask)
-{
-    if( status <= 0 )
-        return false;
-    if( !_mask.empty())
-        return false;
-    int imgtype = _img.type();
-    CV_Assert(!_img.empty());
-    CV_Assert(params && params->nOctaves > 0 && params->nOctaveLayers > 0);
-
-    int min_size = calcSize(params->nOctaves - 1, 0);
-    Size sz = _img.size();
-    img_cols = sz.width;
-    img_rows = sz.height;
-    CV_Assert(img_rows >= min_size && img_cols >= min_size);
-
-    const int layer_rows = img_rows >> (params->nOctaves - 1);
-    const int layer_cols = img_cols >> (params->nOctaves - 1);
-    const int min_margin = ((calcSize((params->nOctaves - 1), 2) >> 1) >> (params->nOctaves - 1)) + 1;
-    CV_Assert(layer_rows - 2 * min_margin > 0);
-    CV_Assert(layer_cols - 2 * min_margin > 0);
-
-    maxFeatures   = std::min(static_cast<int>(img_cols*img_rows * 0.01f), 65535);
-    maxCandidates = std::min(static_cast<int>(1.5 * maxFeatures), 65535);
-
-    CV_Assert(maxFeatures > 0);
-
-    counters.create(1, params->nOctaves + 1, CV_32SC1);
-    counters.setTo(Scalar::all(0));
-
-    img.release();
-    if(_img.isUMat() && imgtype == CV_8UC1)
-        img = _img.getUMat();
-    else if( imgtype == CV_8UC1 )
-        _img.copyTo(img);
-    else
-        cvtColor(_img, img, COLOR_BGR2GRAY);
-
-    integral(img, sum);
-
-    if(haveImageSupport)
-    {
-        imgTex = ocl::Image2D(img);
-        sumTex = ocl::Image2D(sum);
-    }
-
-    return true;
-}
-
-
-bool SURF_OCL::detectKeypoints(UMat &keypoints)
-{
-    // create image pyramid buffers
-    // different layers have same sized buffers, but they are sampled from Gaussian kernel.
-    det.create(img_rows * (params->nOctaveLayers + 2), img_cols, CV_32F);
-    trace.create(img_rows * (params->nOctaveLayers + 2), img_cols, CV_32FC1);
-
-    maxPosBuffer.create(1, maxCandidates, CV_32SC4);
-    keypoints.create(SURF_OCL::ROWS_COUNT, maxFeatures, CV_32F);
-    keypoints.setTo(Scalar::all(0));
-    Mat cpuCounters;
-
-    for (int octave = 0; octave < params->nOctaves; ++octave)
-    {
-        const int layer_rows = img_rows >> octave;
-        const int layer_cols = img_cols >> octave;
-
-        if(!calcLayerDetAndTrace(octave, layer_rows))
-            return false;
-
-        if(!findMaximaInLayer(1 + octave, octave, layer_rows, layer_cols))
-            return false;
-
-        cpuCounters = counters.getMat(ACCESS_READ);
-        int maxCounter = cpuCounters.at<int>(1 + octave);
-        maxCounter = std::min(maxCounter, maxCandidates);
-        cpuCounters.release();
-
-        if (maxCounter > 0)
-        {
-            if(!interpolateKeypoint(maxCounter, keypoints, octave, layer_rows, maxFeatures))
-                return false;
-        }
-    }
-
-    cpuCounters = counters.getMat(ACCESS_READ);
-    int featureCounter = cpuCounters.at<int>(0);
-    featureCounter = std::min(featureCounter, maxFeatures);
-    cpuCounters.release();
-
-    keypoints = UMat(keypoints, Rect(0, 0, featureCounter, keypoints.rows));
-
-    if (params->upright)
-        return setUpRight(keypoints);
-    else
-        return calcOrientation(keypoints);
-}
-
-
-bool SURF_OCL::setUpRight(UMat &keypoints)
-{
-    int nFeatures = keypoints.cols;
-    if( nFeatures == 0 )
-        return true;
-
-    size_t globalThreads[3] = {nFeatures, 1};
-    ocl::Kernel kerUpRight("SURF_setUpRight", ocl::nonfree::surf_oclsrc, kerOpts);
-    return kerUpRight.args(ocl::KernelArg::ReadWrite(keypoints)).run(2, globalThreads, 0, true);
-}
-
-bool SURF_OCL::computeDescriptors(const UMat &keypoints, OutputArray _descriptors)
-{
-    int dsize = params->descriptorSize();
-    int nFeatures = keypoints.cols;
-    if (nFeatures == 0)
-    {
-        _descriptors.release();
-        return true;
-    }
-    _descriptors.create(nFeatures, dsize, CV_32F);
-    UMat descriptors;
-    if( _descriptors.isUMat() )
-        descriptors = _descriptors.getUMat();
-    else
-        descriptors.create(nFeatures, dsize, CV_32F);
-
-    ocl::Kernel kerCalcDesc, kerNormDesc;
-
-    if( dsize == 64 )
-    {
-        kerCalcDesc.create("SURF_computeDescriptors64", ocl::nonfree::surf_oclsrc, kerOpts);
-        kerNormDesc.create("SURF_normalizeDescriptors64", ocl::nonfree::surf_oclsrc, kerOpts);
-    }
-    else
-    {
-        CV_Assert(dsize == 128);
-        kerCalcDesc.create("SURF_computeDescriptors128", ocl::nonfree::surf_oclsrc, kerOpts);
-        kerNormDesc.create("SURF_normalizeDescriptors128", ocl::nonfree::surf_oclsrc, kerOpts);
-    }
-
-    size_t localThreads[] = {6, 6};
-    size_t globalThreads[] = {nFeatures*localThreads[0], localThreads[1]};
-
-    if(haveImageSupport)
-    {
-        kerCalcDesc.args(imgTex,
-                         img_rows, img_cols,
-                         ocl::KernelArg::ReadOnlyNoSize(keypoints),
-                         ocl::KernelArg::WriteOnlyNoSize(descriptors));
-    }
-    else
-    {
-        kerCalcDesc.args(ocl::KernelArg::ReadOnlyNoSize(img),
-                         img_rows, img_cols,
-                         ocl::KernelArg::ReadOnlyNoSize(keypoints),
-                         ocl::KernelArg::WriteOnlyNoSize(descriptors));
-    }
-
-    if(!kerCalcDesc.run(2, globalThreads, localThreads, true))
-        return false;
-
-    size_t localThreads_n[] = {dsize, 1};
-    size_t globalThreads_n[] = {nFeatures*localThreads_n[0], localThreads_n[1]};
-
-    globalThreads[0] = nFeatures * localThreads[0];
-    globalThreads[1] = localThreads[1];
-    bool ok = kerNormDesc.args(ocl::KernelArg::ReadWriteNoSize(descriptors)).
-                        run(2, globalThreads_n, localThreads_n, true);
-    if(ok && !_descriptors.isUMat())
-        descriptors.copyTo(_descriptors);
-    return ok;
-}
-
-
-void SURF_OCL::uploadKeypoints(const std::vector<KeyPoint> &keypoints, UMat &keypointsGPU)
-{
-    if (keypoints.empty())
-        keypointsGPU.release();
-    else
-    {
-        Mat keypointsCPU(SURF_OCL::ROWS_COUNT, static_cast<int>(keypoints.size()), CV_32FC1);
-
-        float *kp_x = keypointsCPU.ptr<float>(SURF_OCL::X_ROW);
-        float *kp_y = keypointsCPU.ptr<float>(SURF_OCL::Y_ROW);
-        int *kp_laplacian = keypointsCPU.ptr<int>(SURF_OCL::LAPLACIAN_ROW);
-        int *kp_octave = keypointsCPU.ptr<int>(SURF_OCL::OCTAVE_ROW);
-        float *kp_size = keypointsCPU.ptr<float>(SURF_OCL::SIZE_ROW);
-        float *kp_dir = keypointsCPU.ptr<float>(SURF_OCL::ANGLE_ROW);
-        float *kp_hessian = keypointsCPU.ptr<float>(SURF_OCL::HESSIAN_ROW);
-
-        for (size_t i = 0, size = keypoints.size(); i < size; ++i)
-        {
-            const KeyPoint &kp = keypoints[i];
-            kp_x[i] = kp.pt.x;
-            kp_y[i] = kp.pt.y;
-            kp_octave[i] = kp.octave;
-            kp_size[i] = kp.size;
-            kp_dir[i] = kp.angle;
-            kp_hessian[i] = kp.response;
-            kp_laplacian[i] = 1;
-        }
-
-        keypointsCPU.copyTo(keypointsGPU);
-    }
-}
-
-void SURF_OCL::downloadKeypoints(const UMat &keypointsGPU, std::vector<KeyPoint> &keypoints)
-{
-    const int nFeatures = keypointsGPU.cols;
-
-    if (nFeatures == 0)
-        keypoints.clear();
-    else
-    {
-        CV_Assert(keypointsGPU.type() == CV_32FC1 && keypointsGPU.rows == ROWS_COUNT);
-
-        Mat keypointsCPU = keypointsGPU.getMat(ACCESS_READ);
-        keypoints.resize(nFeatures);
-
-        float *kp_x = keypointsCPU.ptr<float>(SURF_OCL::X_ROW);
-        float *kp_y = keypointsCPU.ptr<float>(SURF_OCL::Y_ROW);
-        int *kp_laplacian = keypointsCPU.ptr<int>(SURF_OCL::LAPLACIAN_ROW);
-        int *kp_octave = keypointsCPU.ptr<int>(SURF_OCL::OCTAVE_ROW);
-        float *kp_size = keypointsCPU.ptr<float>(SURF_OCL::SIZE_ROW);
-        float *kp_dir = keypointsCPU.ptr<float>(SURF_OCL::ANGLE_ROW);
-        float *kp_hessian = keypointsCPU.ptr<float>(SURF_OCL::HESSIAN_ROW);
-
-        for (int i = 0; i < nFeatures; ++i)
-        {
-            KeyPoint &kp = keypoints[i];
-            kp.pt.x = kp_x[i];
-            kp.pt.y = kp_y[i];
-            kp.class_id = kp_laplacian[i];
-            kp.octave = kp_octave[i];
-            kp.size = kp_size[i];
-            kp.angle = kp_dir[i];
-            kp.response = kp_hessian[i];
-        }
-    }
-}
-
-bool SURF_OCL::detect(InputArray _img, InputArray _mask, UMat& keypoints)
-{
-    if( !setImage(_img, _mask) )
-        return false;
-
-    return detectKeypoints(keypoints);
-}
-
-
-bool SURF_OCL::detectAndCompute(InputArray _img, InputArray _mask, UMat& keypoints,
-                                OutputArray _descriptors, bool useProvidedKeypoints )
-{
-    if( !setImage(_img, _mask) )
-        return false;
-
-    if( !useProvidedKeypoints && !detectKeypoints(keypoints) )
-        return false;
-
-    return computeDescriptors(keypoints, _descriptors);
-}
-
-inline int divUp(int a, int b) { return (a + b-1)/b; }
-
-////////////////////////////
-// kernel caller definitions
-bool SURF_OCL::calcLayerDetAndTrace(int octave, int c_layer_rows)
-{
-    int nOctaveLayers = params->nOctaveLayers;
-    const int min_size = calcSize(octave, 0);
-    const int max_samples_i = 1 + ((img_rows - min_size) >> octave);
-    const int max_samples_j = 1 + ((img_cols - min_size) >> octave);
-
-    size_t localThreads[]  = {16, 16};
-    size_t globalThreads[] =
-    {
-        divUp(max_samples_j, (int)localThreads[0]) * localThreads[0],
-        divUp(max_samples_i, (int)localThreads[1]) * localThreads[1] * (nOctaveLayers + 2)
-    };
-    ocl::Kernel kerCalcDetTrace("SURF_calcLayerDetAndTrace", ocl::nonfree::surf_oclsrc, kerOpts);
-    if(haveImageSupport)
-    {
-        kerCalcDetTrace.args(sumTex,
-                             img_rows, img_cols, nOctaveLayers,
-                             octave, c_layer_rows,
-                             ocl::KernelArg::WriteOnlyNoSize(det),
-                             ocl::KernelArg::WriteOnlyNoSize(trace));
-    }
-    else
-    {
-        kerCalcDetTrace.args(ocl::KernelArg::ReadOnlyNoSize(sum),
-                             img_rows, img_cols, nOctaveLayers,
-                             octave, c_layer_rows,
-                             ocl::KernelArg::WriteOnlyNoSize(det),
-                             ocl::KernelArg::WriteOnlyNoSize(trace));
-    }
-    return kerCalcDetTrace.run(2, globalThreads, localThreads, true);
-}
-
-bool SURF_OCL::findMaximaInLayer(int counterOffset, int octave,
-                                 int layer_rows, int layer_cols)
-{
-    const int min_margin = ((calcSize(octave, 2) >> 1) >> octave) + 1;
-    int nOctaveLayers = params->nOctaveLayers;
-
-    size_t localThreads[3]  = {16, 16};
-    size_t globalThreads[3] =
-    {
-        divUp(layer_cols - 2 * min_margin, (int)localThreads[0] - 2) * localThreads[0],
-        divUp(layer_rows - 2 * min_margin, (int)localThreads[1] - 2) * nOctaveLayers * localThreads[1]
-    };
-
-    ocl::Kernel kerFindMaxima("SURF_findMaximaInLayer", ocl::nonfree::surf_oclsrc, kerOpts);
-    return kerFindMaxima.args(ocl::KernelArg::ReadOnlyNoSize(det),
-                              ocl::KernelArg::ReadOnlyNoSize(trace),
-                              ocl::KernelArg::PtrReadWrite(maxPosBuffer),
-                              ocl::KernelArg::PtrReadWrite(counters),
-                              counterOffset, img_rows, img_cols,
-                              octave, nOctaveLayers,
-                              layer_rows, layer_cols,
-                              maxCandidates,
-                              (float)params->hessianThreshold).run(2, globalThreads, localThreads, true);
-}
-
-bool SURF_OCL::interpolateKeypoint(int maxCounter, UMat &keypoints, int octave, int layer_rows, int max_features)
-{
-    size_t localThreads[3]  = {3, 3, 3};
-    size_t globalThreads[3] = {maxCounter*localThreads[0], localThreads[1], 3};
-
-    ocl::Kernel kerInterp("SURF_interpolateKeypoint", ocl::nonfree::surf_oclsrc, kerOpts);
-
-    return kerInterp.args(ocl::KernelArg::ReadOnlyNoSize(det),
-                   ocl::KernelArg::PtrReadOnly(maxPosBuffer),
-                   ocl::KernelArg::ReadWriteNoSize(keypoints),
-                   ocl::KernelArg::PtrReadWrite(counters),
-                   img_rows, img_cols, octave, layer_rows, max_features).
-        run(3, globalThreads, localThreads, true);
-}
-
-bool SURF_OCL::calcOrientation(UMat &keypoints)
-{
-    int nFeatures = keypoints.cols;
-    if( nFeatures == 0 )
-        return true;
-    ocl::Kernel kerOri("SURF_calcOrientation", ocl::nonfree::surf_oclsrc, kerOpts);
-
-    if( haveImageSupport )
-        kerOri.args(sumTex, img_rows, img_cols,
-                    ocl::KernelArg::ReadWriteNoSize(keypoints));
-    else
-        kerOri.args(ocl::KernelArg::ReadOnlyNoSize(sum),
-                    img_rows, img_cols,
-                    ocl::KernelArg::ReadWriteNoSize(keypoints));
-
-    size_t localThreads[3]  = {ORI_LOCAL_SIZE, 1};
-    size_t globalThreads[3] = {nFeatures * localThreads[0], 1};
-    return kerOri.run(2, globalThreads, localThreads, true);
-}
-
-}
diff --git a/modules/nonfree/test/test_detectors.cpp b/modules/nonfree/test/test_detectors.cpp
deleted file mode 100644 (file)
index 5854207..0000000
+++ /dev/null
@@ -1,314 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-//  By downloading, copying, installing or using the software you agree to this license.
-//  If you do not agree to this license, do not download, install,
-//  copy or use the software.
-//
-//
-//                           License Agreement
-//                For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-//   * Redistribution's of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//
-//   * Redistribution's in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//
-//   * The name of the copyright holders may not be used to endorse or promote products
-//     derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#include "test_precomp.hpp"
-
-#include <string>
-#include <iostream>
-#include <iterator>
-#include <fstream>
-#include <numeric>
-#include <algorithm>
-#include <iterator>
-
-using namespace cv;
-using namespace std;
-
-class CV_DetectorsTest : public cvtest::BaseTest
-{
-public:
-    CV_DetectorsTest();
-    ~CV_DetectorsTest();
-protected:
-    void run(int);
-    template <class T> bool testDetector(const Mat& img, const T& detector, vector<KeyPoint>& expected);
-
-    void LoadExpected(const string& file, vector<KeyPoint>& out);
-};
-
-CV_DetectorsTest::CV_DetectorsTest()
-{
-}
-CV_DetectorsTest::~CV_DetectorsTest() {}
-
-void getRotation(const Mat& img, Mat& aff, Mat& out)
-{
-    Point center(img.cols/2, img.rows/2);
-    aff = getRotationMatrix2D(center, 30, 1);
-    warpAffine( img, out, aff, img.size());
-}
-
-void getZoom(const Mat& img, Mat& aff, Mat& out)
-{
-    const double mult = 1.2;
-
-    aff.create(2, 3, CV_64F);
-    double *data = aff.ptr<double>();
-    data[0] = mult; data[1] =    0; data[2] = 0;
-    data[3] =    0; data[4] = mult; data[5] = 0;
-
-    warpAffine( img, out, aff, img.size());
-}
-
-void getBlur(const Mat& img, Mat& aff, Mat& out)
-{
-    aff.create(2, 3, CV_64F);
-    double *data = aff.ptr<double>();
-    data[0] = 1; data[1] = 0; data[2] = 0;
-    data[3] = 0; data[4] = 1; data[5] = 0;
-
-    GaussianBlur(img, out, Size(5, 5), 2);
-}
-
-void getBrightness(const Mat& img, Mat& aff, Mat& out)
-{
-    aff.create(2, 3, CV_64F);
-    double *data = aff.ptr<double>();
-    data[0] = 1; data[1] = 0; data[2] = 0;
-    data[3] = 0; data[4] = 1; data[5] = 0;
-
-    add(img, Mat(img.size(), img.type(), Scalar(15)), out);
-}
-
-void showOrig(const Mat& img, const vector<KeyPoint>& orig_pts)
-{
-
-    Mat img_color;
-    cvtColor(img, img_color, COLOR_GRAY2BGR);
-
-    for(size_t i = 0; i < orig_pts.size(); ++i)
-        circle(img_color, orig_pts[i].pt, (int)orig_pts[i].size/2, Scalar(0, 255, 0));
-
-    namedWindow("O"); imshow("O", img_color);
-}
-
-void show(const string& name, const Mat& new_img, const vector<KeyPoint>& new_pts, const vector<KeyPoint>& transf_pts)
-{
-
-    Mat new_img_color;
-    cvtColor(new_img, new_img_color, COLOR_GRAY2BGR);
-
-    for(size_t i = 0; i < transf_pts.size(); ++i)
-        circle(new_img_color, transf_pts[i].pt, (int)transf_pts[i].size/2, Scalar(255, 0, 0));
-
-    for(size_t i = 0; i < new_pts.size(); ++i)
-        circle(new_img_color, new_pts[i].pt, (int)new_pts[i].size/2, Scalar(0, 0, 255));
-
-    namedWindow(name + "_T"); imshow(name + "_T", new_img_color);
-}
-
-struct WrapPoint
-{
-    const double* R;
-    WrapPoint(const Mat& rmat) : R(rmat.ptr<double>()) { };
-
-    KeyPoint operator()(const KeyPoint& kp) const
-    {
-        KeyPoint res = kp;
-        res.pt.x = static_cast<float>(kp.pt.x * R[0] + kp.pt.y * R[1] + R[2]);
-        res.pt.y = static_cast<float>(kp.pt.x * R[3] + kp.pt.y * R[4] + R[5]);
-        return res;
-    }
-};
-
-struct sortByR { bool operator()(const KeyPoint& kp1, const KeyPoint& kp2) { return norm(kp1.pt) < norm(kp2.pt); } };
-
-template <class T> bool CV_DetectorsTest::testDetector(const Mat& img, const T& detector, vector<KeyPoint>& exp)
-{
-    vector<KeyPoint> orig_kpts;
-    detector(img, orig_kpts);
-
-    typedef void (*TransfFunc )(const Mat&, Mat&, Mat& FransfFunc);
-    const TransfFunc transfFunc[] = { getRotation, getZoom, getBlur, getBrightness };
-    //const string names[] =  { "Rotation", "Zoom", "Blur", "Brightness" };
-    const size_t case_num = sizeof(transfFunc)/sizeof(transfFunc[0]);
-
-    vector<Mat> affs(case_num);
-    vector<Mat> new_imgs(case_num);
-
-    vector< vector<KeyPoint> > new_kpts(case_num);
-    vector< vector<KeyPoint> > transf_kpts(case_num);
-
-    //showOrig(img, orig_kpts);
-    for(size_t i = 0; i < case_num; ++i)
-    {
-        transfFunc[i](img, affs[i], new_imgs[i]);
-        detector(new_imgs[i], new_kpts[i]);
-        transform(orig_kpts.begin(), orig_kpts.end(), back_inserter(transf_kpts[i]), WrapPoint(affs[i]));
-        //show(names[i], new_imgs[i], new_kpts[i], transf_kpts[i]);
-    }
-
-    const float thres = 3;
-    const float nthres = 3;
-
-    vector<KeyPoint> result;
-    for(size_t i = 0; i < orig_kpts.size(); ++i)
-    {
-        const KeyPoint& okp = orig_kpts[i];
-        int foundCounter = 0;
-        for(size_t j = 0; j < case_num; ++j)
-        {
-            const KeyPoint& tkp = transf_kpts[j][i];
-
-            size_t k = 0;
-
-            for(; k < new_kpts[j].size(); ++k)
-                if (norm(new_kpts[j][k].pt - tkp.pt) < nthres && fabs(new_kpts[j][k].size - tkp.size) < thres)
-                    break;
-
-            if (k != new_kpts[j].size())
-                ++foundCounter;
-
-        }
-        if (foundCounter == (int)case_num)
-            result.push_back(okp);
-    }
-
-    sort(result.begin(), result.end(), sortByR());
-    sort(exp.begin(), exp.end(), sortByR());
-
-    if (result.size() != exp.size())
-    {
-      ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
-      return false;
-    }
-
-    int foundCounter1 = 0;
-    for(size_t i = 0; i < exp.size(); ++i)
-    {
-        const KeyPoint& e = exp[i];
-        size_t j = 0;
-        for(; j < result.size(); ++j)
-        {
-            const KeyPoint& r = result[i];
-            if (norm(r.pt-e.pt) < nthres && fabs(r.size - e.size) < thres)
-                break;
-        }
-        if (j != result.size())
-            ++foundCounter1;
-    }
-
-    int foundCounter2 = 0;
-    for(size_t i = 0; i < result.size(); ++i)
-    {
-        const KeyPoint& r = result[i];
-        size_t j = 0;
-        for(; j < exp.size(); ++j)
-        {
-            const KeyPoint& e = exp[i];
-            if (norm(r.pt-e.pt) < nthres && fabs(r.size - e.size) < thres)
-                break;
-        }
-        if (j != exp.size())
-            ++foundCounter2;
-    }
-    //showOrig(img, result); waitKey();
-
-    const float errorRate = 0.9f;
-    if (float(foundCounter1)/exp.size() < errorRate || float(foundCounter2)/result.size() < errorRate)
-    {
-        ts->set_failed_test_info( cvtest::TS::FAIL_MISMATCH);
-        return false;
-    }
-    return true;
-}
-
-struct SurfNoMaskWrap
-{
-    const SURF& detector;
-    SurfNoMaskWrap(const SURF& surf) : detector(surf) {}
-    SurfNoMaskWrap& operator=(const SurfNoMaskWrap&);
-    void operator()(const Mat& img, vector<KeyPoint>& kpts) const { detector(img, Mat(), kpts); }
-};
-
-void CV_DetectorsTest::LoadExpected(const string& file, vector<KeyPoint>& out)
-{
-    Mat mat_exp;
-    FileStorage fs(file, FileStorage::READ);
-    if (fs.isOpened())
-    {
-        read( fs["ResultVectorData"], mat_exp, Mat() );
-        out.resize(mat_exp.cols / sizeof(KeyPoint));
-        copy(mat_exp.ptr<KeyPoint>(), mat_exp.ptr<KeyPoint>() + out.size(), out.begin());
-    }
-    else
-    {
-        ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA);
-        out.clear();
-    }
-}
-
-void CV_DetectorsTest::run( int /*start_from*/ )
-{
-    Mat img = imread(string(ts->get_data_path()) + "shared/graffiti.png", 0);
-
-    if (img.empty())
-    {
-        ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
-        return;
-    }
-
-    Mat to_test(img.size() * 2, img.type(), Scalar(0));
-    Mat roi = to_test(Rect(img.rows/2, img.cols/2, img.cols, img.rows));
-    img.copyTo(roi);
-    GaussianBlur(to_test, to_test, Size(3, 3), 1.5);
-
-    vector<KeyPoint> exp;
-    LoadExpected(string(ts->get_data_path()) + "detectors/surf.xml", exp);
-    if (exp.empty())
-        return;
-
-    if (!testDetector(to_test, SurfNoMaskWrap(SURF(1536+512+512, 2)), exp))
-        return;
-
-    LoadExpected(string(ts->get_data_path()) + "detectors/star.xml", exp);
-    if (exp.empty())
-        return;
-
-    if (!testDetector(to_test, StarDetector(45, 30, 10, 8, 5), exp))
-        return;
-
-    ts->set_failed_test_info( cvtest::TS::OK);
-}
-
-
-TEST(Features2d_Detectors, regression) { CV_DetectorsTest test; test.safe_run(); }
diff --git a/modules/nonfree/test/test_features2d.cpp b/modules/nonfree/test/test_features2d.cpp
deleted file mode 100644 (file)
index 3cc5b47..0000000
+++ /dev/null
@@ -1,1224 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-//  By downloading, copying, installing or using the software you agree to this license.
-//  If you do not agree to this license, do not download, install,
-//  copy or use the software.
-//
-//
-//                        Intel License Agreement
-//                For Open Source Computer Vision Library
-//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-//   * Redistribution's of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//
-//   * Redistribution's in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//
-//   * The name of Intel Corporation may not be used to endorse or promote products
-//     derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#include "test_precomp.hpp"
-#include "opencv2/calib3d.hpp"
-
-using namespace std;
-using namespace cv;
-
-const string FEATURES2D_DIR = "features2d";
-const string DETECTOR_DIR = FEATURES2D_DIR + "/feature_detectors";
-const string DESCRIPTOR_DIR = FEATURES2D_DIR + "/descriptor_extractors";
-const string IMAGE_FILENAME = "tsukuba.png";
-
-/****************************************************************************************\
-*            Regression tests for feature detectors comparing keypoints.                 *
-\****************************************************************************************/
-
-class CV_FeatureDetectorTest : public cvtest::BaseTest
-{
-public:
-    CV_FeatureDetectorTest( const string& _name, const Ptr<FeatureDetector>& _fdetector ) :
-        name(_name), fdetector(_fdetector) {}
-
-protected:
-    bool isSimilarKeypoints( const KeyPoint& p1, const KeyPoint& p2 );
-    void compareKeypointSets( const vector<KeyPoint>& validKeypoints, const vector<KeyPoint>& calcKeypoints );
-
-    void emptyDataTest();
-    void regressionTest(); // TODO test of detect() with mask
-
-    virtual void run( int );
-
-    string name;
-    Ptr<FeatureDetector> fdetector;
-};
-
-void CV_FeatureDetectorTest::emptyDataTest()
-{
-    // One image.
-    Mat image;
-    vector<KeyPoint> keypoints;
-    try
-    {
-        fdetector->detect( image, keypoints );
-    }
-    catch(...)
-    {
-        ts->printf( cvtest::TS::LOG, "detect() on empty image must not generate exception (1).\n" );
-        ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
-    }
-
-    if( !keypoints.empty() )
-    {
-        ts->printf( cvtest::TS::LOG, "detect() on empty image must return empty keypoints vector (1).\n" );
-        ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
-        return;
-    }
-
-    // Several images.
-    vector<Mat> images;
-    vector<vector<KeyPoint> > keypointCollection;
-    try
-    {
-        fdetector->detect( images, keypointCollection );
-    }
-    catch(...)
-    {
-        ts->printf( cvtest::TS::LOG, "detect() on empty image vector must not generate exception (2).\n" );
-        ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
-    }
-}
-
-bool CV_FeatureDetectorTest::isSimilarKeypoints( const KeyPoint& p1, const KeyPoint& p2 )
-{
-    const float maxPtDif = 1.f;
-    const float maxSizeDif = 1.f;
-    const float maxAngleDif = 2.f;
-    const float maxResponseDif = 0.1f;
-
-    float dist = (float)norm( p1.pt - p2.pt );
-    return (dist < maxPtDif &&
-            fabs(p1.size - p2.size) < maxSizeDif &&
-            abs(p1.angle - p2.angle) < maxAngleDif &&
-            abs(p1.response - p2.response) < maxResponseDif &&
-            p1.octave == p2.octave &&
-            p1.class_id == p2.class_id );
-}
-
-void CV_FeatureDetectorTest::compareKeypointSets( const vector<KeyPoint>& validKeypoints, const vector<KeyPoint>& calcKeypoints )
-{
-    const float maxCountRatioDif = 0.01f;
-
-    // Compare counts of validation and calculated keypoints.
-    float countRatio = (float)validKeypoints.size() / (float)calcKeypoints.size();
-    if( countRatio < 1 - maxCountRatioDif || countRatio > 1.f + maxCountRatioDif )
-    {
-        ts->printf( cvtest::TS::LOG, "Bad keypoints count ratio (validCount = %d, calcCount = %d).\n",
-                    validKeypoints.size(), calcKeypoints.size() );
-        ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
-        return;
-    }
-
-    int progress = 0, progressCount = (int)(validKeypoints.size() * calcKeypoints.size());
-    int badPointCount = 0, commonPointCount = max((int)validKeypoints.size(), (int)calcKeypoints.size());
-    for( size_t v = 0; v < validKeypoints.size(); v++ )
-    {
-        int nearestIdx = -1;
-        float minDist = std::numeric_limits<float>::max();
-
-        for( size_t c = 0; c < calcKeypoints.size(); c++ )
-        {
-            progress = update_progress( progress, (int)(v*calcKeypoints.size() + c), progressCount, 0 );
-            float curDist = (float)norm( calcKeypoints[c].pt - validKeypoints[v].pt );
-            if( curDist < minDist )
-            {
-                minDist = curDist;
-                nearestIdx = (int)c;
-            }
-        }
-
-        assert( minDist >= 0 );
-        if( !isSimilarKeypoints( validKeypoints[v], calcKeypoints[nearestIdx] ) )
-            badPointCount++;
-    }
-    ts->printf( cvtest::TS::LOG, "badPointCount = %d; validPointCount = %d; calcPointCount = %d\n",
-                badPointCount, validKeypoints.size(), calcKeypoints.size() );
-    if( badPointCount > 0.9 * commonPointCount )
-    {
-        ts->printf( cvtest::TS::LOG, " - Bad accuracy!\n" );
-        ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
-        return;
-    }
-    ts->printf( cvtest::TS::LOG, " - OK\n" );
-}
-
-void CV_FeatureDetectorTest::regressionTest()
-{
-    assert( !fdetector.empty() );
-    string imgFilename = string(ts->get_data_path()) + FEATURES2D_DIR + "/" + IMAGE_FILENAME;
-    string resFilename = string(ts->get_data_path()) + DETECTOR_DIR + "/" + string(name) + ".xml.gz";
-
-    // Read the test image.
-    Mat image = imread( imgFilename );
-    if( image.empty() )
-    {
-        ts->printf( cvtest::TS::LOG, "Image %s can not be read.\n", imgFilename.c_str() );
-        ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
-        return;
-    }
-
-    FileStorage fs( resFilename, FileStorage::READ );
-
-    // Compute keypoints.
-    vector<KeyPoint> calcKeypoints;
-    fdetector->detect( image, calcKeypoints );
-
-    if( fs.isOpened() ) // Compare computed and valid keypoints.
-    {
-        // TODO compare saved feature detector params with current ones
-
-        // Read validation keypoints set.
-        vector<KeyPoint> validKeypoints;
-        read( fs["keypoints"], validKeypoints );
-        if( validKeypoints.empty() )
-        {
-            ts->printf( cvtest::TS::LOG, "Keypoints can not be read.\n" );
-            ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
-            return;
-        }
-
-        compareKeypointSets( validKeypoints, calcKeypoints );
-    }
-    else // Write detector parameters and computed keypoints as validation data.
-    {
-        fs.open( resFilename, FileStorage::WRITE );
-        if( !fs.isOpened() )
-        {
-            ts->printf( cvtest::TS::LOG, "File %s can not be opened to write.\n", resFilename.c_str() );
-            ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
-            return;
-        }
-        else
-        {
-            fs << "detector_params" << "{";
-            fdetector->write( fs );
-            fs << "}";
-
-            write( fs, "keypoints", calcKeypoints );
-        }
-    }
-}
-
-void CV_FeatureDetectorTest::run( int /*start_from*/ )
-{
-    if( !fdetector )
-    {
-        ts->printf( cvtest::TS::LOG, "Feature detector is empty.\n" );
-        ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
-        return;
-    }
-
-    emptyDataTest();
-    regressionTest();
-
-    ts->set_failed_test_info( cvtest::TS::OK );
-}
-
-/****************************************************************************************\
-*                     Regression tests for descriptor extractors.                        *
-\****************************************************************************************/
-static void writeMatInBin( const Mat& mat, const string& filename )
-{
-    FILE* f = fopen( filename.c_str(), "wb");
-    if( f )
-    {
-        int type = mat.type();
-        fwrite( (void*)&mat.rows, sizeof(int), 1, f );
-        fwrite( (void*)&mat.cols, sizeof(int), 1, f );
-        fwrite( (void*)&type, sizeof(int), 1, f );
-        int dataSize = (int)(mat.step * mat.rows * mat.channels());
-        fwrite( (void*)&dataSize, sizeof(int), 1, f );
-        fwrite( (void*)mat.data, 1, dataSize, f );
-        fclose(f);
-    }
-}
-
-static Mat readMatFromBin( const string& filename )
-{
-    FILE* f = fopen( filename.c_str(), "rb" );
-    if( f )
-    {
-        int rows, cols, type, dataSize;
-        size_t elements_read1 = fread( (void*)&rows, sizeof(int), 1, f );
-        size_t elements_read2 = fread( (void*)&cols, sizeof(int), 1, f );
-        size_t elements_read3 = fread( (void*)&type, sizeof(int), 1, f );
-        size_t elements_read4 = fread( (void*)&dataSize, sizeof(int), 1, f );
-        CV_Assert(elements_read1 == 1 && elements_read2 == 1 && elements_read3 == 1 && elements_read4 == 1);
-
-        int step = dataSize / rows / CV_ELEM_SIZE(type);
-        CV_Assert(step >= cols);
-
-        Mat m = Mat( rows, step, type).colRange(0, cols);
-
-        size_t elements_read = fread( m.ptr(), 1, dataSize, f );
-        CV_Assert(elements_read == (size_t)(dataSize));
-        fclose(f);
-
-        return m;
-    }
-    return Mat();
-}
-
-template<class Distance>
-class CV_DescriptorExtractorTest : public cvtest::BaseTest
-{
-public:
-    typedef typename Distance::ValueType ValueType;
-    typedef typename Distance::ResultType DistanceType;
-
-    CV_DescriptorExtractorTest( const string _name, DistanceType _maxDist, const Ptr<DescriptorExtractor>& _dextractor,
-                                Distance d = Distance() ):
-            name(_name), maxDist(_maxDist), dextractor(_dextractor), distance(d) {}
-protected:
-    virtual void createDescriptorExtractor() {}
-
-    void compareDescriptors( const Mat& validDescriptors, const Mat& calcDescriptors )
-    {
-        if( validDescriptors.size != calcDescriptors.size || validDescriptors.type() != calcDescriptors.type() )
-        {
-            ts->printf(cvtest::TS::LOG, "Valid and computed descriptors matrices must have the same size and type.\n");
-            ts->printf(cvtest::TS::LOG, "Valid size is (%d x %d) actual size is (%d x %d).\n", validDescriptors.rows, validDescriptors.cols, calcDescriptors.rows, calcDescriptors.cols);
-            ts->printf(cvtest::TS::LOG, "Valid type is %d  actual type is %d.\n", validDescriptors.type(), calcDescriptors.type());
-            ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
-            return;
-        }
-
-        CV_Assert( DataType<ValueType>::type == validDescriptors.type() );
-
-        int dimension = validDescriptors.cols;
-        DistanceType curMaxDist = std::numeric_limits<DistanceType>::min();
-        for( int y = 0; y < validDescriptors.rows; y++ )
-        {
-            DistanceType dist = distance( validDescriptors.ptr<ValueType>(y), calcDescriptors.ptr<ValueType>(y), dimension );
-            if( dist > curMaxDist )
-                curMaxDist = dist;
-        }
-
-        stringstream ss;
-        ss << "Max distance between valid and computed descriptors " << curMaxDist;
-        if( curMaxDist < maxDist )
-            ss << "." << endl;
-        else
-        {
-            ss << ">" << maxDist  << " - bad accuracy!"<< endl;
-            ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
-        }
-        ts->printf(cvtest::TS::LOG,  ss.str().c_str() );
-    }
-
-    void emptyDataTest()
-    {
-        assert( !dextractor.empty() );
-
-        // One image.
-        Mat image;
-        vector<KeyPoint> keypoints;
-        Mat descriptors;
-
-        try
-        {
-            dextractor->compute( image, keypoints, descriptors );
-        }
-        catch(...)
-        {
-            ts->printf( cvtest::TS::LOG, "compute() on empty image and empty keypoints must not generate exception (1).\n");
-            ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
-        }
-
-        image.create( 50, 50, CV_8UC3 );
-        try
-        {
-            dextractor->compute( image, keypoints, descriptors );
-        }
-        catch(...)
-        {
-            ts->printf( cvtest::TS::LOG, "compute() on nonempty image and empty keypoints must not generate exception (1).\n");
-            ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
-        }
-
-        // Several images.
-        vector<Mat> images;
-        vector<vector<KeyPoint> > keypointsCollection;
-        vector<Mat> descriptorsCollection;
-        try
-        {
-            dextractor->compute( images, keypointsCollection, descriptorsCollection );
-        }
-        catch(...)
-        {
-            ts->printf( cvtest::TS::LOG, "compute() on empty images and empty keypoints collection must not generate exception (2).\n");
-            ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
-        }
-    }
-
-    void regressionTest()
-    {
-        assert( !dextractor.empty() );
-
-        // Read the test image.
-        string imgFilename =  string(ts->get_data_path()) + FEATURES2D_DIR + "/" + IMAGE_FILENAME;
-
-        Mat img = imread( imgFilename );
-        if( img.empty() )
-        {
-            ts->printf( cvtest::TS::LOG, "Image %s can not be read.\n", imgFilename.c_str() );
-            ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
-            return;
-        }
-
-        vector<KeyPoint> keypoints;
-        FileStorage fs( string(ts->get_data_path()) + FEATURES2D_DIR + "/keypoints.xml.gz", FileStorage::READ );
-        if( fs.isOpened() )
-        {
-            read( fs.getFirstTopLevelNode(), keypoints );
-
-            Mat calcDescriptors;
-            double t = (double)getTickCount();
-            dextractor->compute( img, keypoints, calcDescriptors );
-            t = getTickCount() - t;
-            ts->printf(cvtest::TS::LOG, "\nAverage time of computing one descriptor = %g ms.\n", t/((double)getTickFrequency()*1000.)/calcDescriptors.rows );
-
-            if( calcDescriptors.rows != (int)keypoints.size() )
-            {
-                ts->printf( cvtest::TS::LOG, "Count of computed descriptors and keypoints count must be equal.\n" );
-                ts->printf( cvtest::TS::LOG, "Count of keypoints is            %d.\n", (int)keypoints.size() );
-                ts->printf( cvtest::TS::LOG, "Count of computed descriptors is %d.\n", calcDescriptors.rows );
-                ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
-                return;
-            }
-
-            if( calcDescriptors.cols != dextractor->descriptorSize() || calcDescriptors.type() != dextractor->descriptorType() )
-            {
-                ts->printf( cvtest::TS::LOG, "Incorrect descriptor size or descriptor type.\n" );
-                ts->printf( cvtest::TS::LOG, "Expected size is   %d.\n", dextractor->descriptorSize() );
-                ts->printf( cvtest::TS::LOG, "Calculated size is %d.\n", calcDescriptors.cols );
-                ts->printf( cvtest::TS::LOG, "Expected type is   %d.\n", dextractor->descriptorType() );
-                ts->printf( cvtest::TS::LOG, "Calculated type is %d.\n", calcDescriptors.type() );
-                ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
-                return;
-            }
-
-            // TODO read and write descriptor extractor parameters and check them
-            Mat validDescriptors = readDescriptors();
-            if( !validDescriptors.empty() )
-                compareDescriptors( validDescriptors, calcDescriptors );
-            else
-            {
-                if( !writeDescriptors( calcDescriptors ) )
-                {
-                    ts->printf( cvtest::TS::LOG, "Descriptors can not be written.\n" );
-                    ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
-                    return;
-                }
-            }
-        }
-        else
-        {
-            ts->printf( cvtest::TS::LOG, "Compute and write keypoints.\n" );
-            fs.open( string(ts->get_data_path()) + FEATURES2D_DIR + "/keypoints.xml.gz", FileStorage::WRITE );
-            if( fs.isOpened() )
-            {
-                SurfFeatureDetector fd;
-                fd.detect(img, keypoints);
-                write( fs, "keypoints", keypoints );
-            }
-            else
-            {
-                ts->printf(cvtest::TS::LOG, "File for writting keypoints can not be opened.\n");
-                ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
-                return;
-            }
-        }
-    }
-
-    void run(int)
-    {
-        createDescriptorExtractor();
-        if( !dextractor )
-        {
-            ts->printf(cvtest::TS::LOG, "Descriptor extractor is empty.\n");
-            ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
-            return;
-        }
-
-        emptyDataTest();
-        regressionTest();
-
-        ts->set_failed_test_info( cvtest::TS::OK );
-    }
-
-    virtual Mat readDescriptors()
-    {
-        Mat res = readMatFromBin( string(ts->get_data_path()) + DESCRIPTOR_DIR + "/" + string(name) );
-        return res;
-    }
-
-    virtual bool writeDescriptors( Mat& descs )
-    {
-        writeMatInBin( descs,  string(ts->get_data_path()) + DESCRIPTOR_DIR + "/" + string(name) );
-        return true;
-    }
-
-    string name;
-    const DistanceType maxDist;
-    Ptr<DescriptorExtractor> dextractor;
-    Distance distance;
-
-private:
-    CV_DescriptorExtractorTest& operator=(const CV_DescriptorExtractorTest&) { return *this; }
-};
-
-/*template<typename T, typename Distance>
-class CV_CalonderDescriptorExtractorTest : public CV_DescriptorExtractorTest<Distance>
-{
-public:
-    CV_CalonderDescriptorExtractorTest( const char* testName, float _normDif, float _prevTime ) :
-            CV_DescriptorExtractorTest<Distance>( testName, _normDif, Ptr<DescriptorExtractor>(), _prevTime )
-    {}
-
-protected:
-    virtual void createDescriptorExtractor()
-    {
-        CV_DescriptorExtractorTest<Distance>::dextractor =
-                new CalonderDescriptorExtractor<T>( string(CV_DescriptorExtractorTest<Distance>::ts->get_data_path()) +
-                                                    FEATURES2D_DIR + "/calonder_classifier.rtc");
-    }
-};*/
-
-/****************************************************************************************\
-*                       Algorithmic tests for descriptor matchers                        *
-\****************************************************************************************/
-class CV_DescriptorMatcherTest : public cvtest::BaseTest
-{
-public:
-    CV_DescriptorMatcherTest( const string& _name, const Ptr<DescriptorMatcher>& _dmatcher, float _badPart ) :
-        badPart(_badPart), name(_name), dmatcher(_dmatcher)
-        {}
-protected:
-    static const int dim = 500;
-    static const int queryDescCount = 300; // must be even number because we split train data in some cases in two
-    static const int countFactor = 4; // do not change it
-    const float badPart;
-
-    virtual void run( int );
-    void generateData( Mat& query, Mat& train );
-
-    void emptyDataTest();
-    void matchTest( const Mat& query, const Mat& train );
-    void knnMatchTest( const Mat& query, const Mat& train );
-    void radiusMatchTest( const Mat& query, const Mat& train );
-
-    string name;
-    Ptr<DescriptorMatcher> dmatcher;
-
-private:
-    CV_DescriptorMatcherTest& operator=(const CV_DescriptorMatcherTest&) { return *this; }
-};
-
-void CV_DescriptorMatcherTest::emptyDataTest()
-{
-    assert( !dmatcher.empty() );
-    Mat queryDescriptors, trainDescriptors, mask;
-    vector<Mat> trainDescriptorCollection, masks;
-    vector<DMatch> matches;
-    vector<vector<DMatch> > vmatches;
-
-    try
-    {
-        dmatcher->match( queryDescriptors, trainDescriptors, matches, mask );
-    }
-    catch(...)
-    {
-        ts->printf( cvtest::TS::LOG, "match() on empty descriptors must not generate exception (1).\n" );
-        ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
-    }
-
-    try
-    {
-        dmatcher->knnMatch( queryDescriptors, trainDescriptors, vmatches, 2, mask );
-    }
-    catch(...)
-    {
-        ts->printf( cvtest::TS::LOG, "knnMatch() on empty descriptors must not generate exception (1).\n" );
-        ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
-    }
-
-    try
-    {
-        dmatcher->radiusMatch( queryDescriptors, trainDescriptors, vmatches, 10.f, mask );
-    }
-    catch(...)
-    {
-        ts->printf( cvtest::TS::LOG, "radiusMatch() on empty descriptors must not generate exception (1).\n" );
-        ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
-    }
-
-    try
-    {
-        dmatcher->add( trainDescriptorCollection );
-    }
-    catch(...)
-    {
-        ts->printf( cvtest::TS::LOG, "add() on empty descriptors must not generate exception.\n" );
-        ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
-    }
-
-    try
-    {
-        dmatcher->match( queryDescriptors, matches, masks );
-    }
-    catch(...)
-    {
-        ts->printf( cvtest::TS::LOG, "match() on empty descriptors must not generate exception (2).\n" );
-        ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
-    }
-
-    try
-    {
-        dmatcher->knnMatch( queryDescriptors, vmatches, 2, masks );
-    }
-    catch(...)
-    {
-        ts->printf( cvtest::TS::LOG, "knnMatch() on empty descriptors must not generate exception (2).\n" );
-        ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
-    }
-
-    try
-    {
-        dmatcher->radiusMatch( queryDescriptors, vmatches, 10.f, masks );
-    }
-    catch(...)
-    {
-        ts->printf( cvtest::TS::LOG, "radiusMatch() on empty descriptors must not generate exception (2).\n" );
-        ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
-    }
-
-}
-
-void CV_DescriptorMatcherTest::generateData( Mat& query, Mat& train )
-{
-    RNG& rng = theRNG();
-
-    // Generate query descriptors randomly.
-    // Descriptor vector elements are integer values.
-    Mat buf( queryDescCount, dim, CV_32SC1 );
-    rng.fill( buf, RNG::UNIFORM, Scalar::all(0), Scalar(3) );
-    buf.convertTo( query, CV_32FC1 );
-
-    // Generate train decriptors as follows:
-    // copy each query descriptor to train set countFactor times
-    // and perturb some one element of the copied descriptors in
-    // in ascending order. General boundaries of the perturbation
-    // are (0.f, 1.f).
-    train.create( query.rows*countFactor, query.cols, CV_32FC1 );
-    float step = 1.f / countFactor;
-    for( int qIdx = 0; qIdx < query.rows; qIdx++ )
-    {
-        Mat queryDescriptor = query.row(qIdx);
-        for( int c = 0; c < countFactor; c++ )
-        {
-            int tIdx = qIdx * countFactor + c;
-            Mat trainDescriptor = train.row(tIdx);
-            queryDescriptor.copyTo( trainDescriptor );
-            int elem = rng(dim);
-            float diff = rng.uniform( step*c, step*(c+1) );
-            trainDescriptor.at<float>(0, elem) += diff;
-        }
-    }
-}
-
-void CV_DescriptorMatcherTest::matchTest( const Mat& query, const Mat& train )
-{
-    dmatcher->clear();
-
-    // test const version of match()
-    {
-        vector<DMatch> matches;
-        dmatcher->match( query, train, matches );
-
-        if( (int)matches.size() != queryDescCount )
-        {
-            ts->printf(cvtest::TS::LOG, "Incorrect matches count while test match() function (1).\n");
-            ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
-        }
-        else
-        {
-            int badCount = 0;
-            for( size_t i = 0; i < matches.size(); i++ )
-            {
-                DMatch match = matches[i];
-                if( (match.queryIdx != (int)i) || (match.trainIdx != (int)i*countFactor) || (match.imgIdx != 0) )
-                    badCount++;
-            }
-            if( (float)badCount > (float)queryDescCount*badPart )
-            {
-                ts->printf( cvtest::TS::LOG, "%f - too large bad matches part while test match() function (1).\n",
-                            (float)badCount/(float)queryDescCount );
-                ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
-            }
-        }
-    }
-
-    // test version of match() with add()
-    {
-        vector<DMatch> matches;
-        // make add() twice to test such case
-        dmatcher->add( vector<Mat>(1,train.rowRange(0, train.rows/2)) );
-        dmatcher->add( vector<Mat>(1,train.rowRange(train.rows/2, train.rows)) );
-        // prepare masks (make first nearest match illegal)
-        vector<Mat> masks(2);
-        for(int mi = 0; mi < 2; mi++ )
-        {
-            masks[mi] = Mat(query.rows, train.rows/2, CV_8UC1, Scalar::all(1));
-            for( int di = 0; di < queryDescCount/2; di++ )
-                masks[mi].col(di*countFactor).setTo(Scalar::all(0));
-        }
-
-        dmatcher->match( query, matches, masks );
-
-        if( (int)matches.size() != queryDescCount )
-        {
-            ts->printf(cvtest::TS::LOG, "Incorrect matches count while test match() function (2).\n");
-            ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
-        }
-        else
-        {
-            int badCount = 0;
-            for( size_t i = 0; i < matches.size(); i++ )
-            {
-                DMatch match = matches[i];
-                int shift = dmatcher->isMaskSupported() ? 1 : 0;
-                {
-                    if( i < queryDescCount/2 )
-                    {
-                        if( (match.queryIdx != (int)i) || (match.trainIdx != (int)i*countFactor + shift) || (match.imgIdx != 0) )
-                            badCount++;
-                    }
-                    else
-                    {
-                        if( (match.queryIdx != (int)i) || (match.trainIdx != ((int)i-queryDescCount/2)*countFactor + shift) || (match.imgIdx != 1) )
-                            badCount++;
-                    }
-                }
-            }
-            if( (float)badCount > (float)queryDescCount*badPart )
-            {
-                ts->printf( cvtest::TS::LOG, "%f - too large bad matches part while test match() function (2).\n",
-                            (float)badCount/(float)queryDescCount );
-                ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
-            }
-        }
-    }
-}
-
-void CV_DescriptorMatcherTest::knnMatchTest( const Mat& query, const Mat& train )
-{
-    dmatcher->clear();
-
-    // test const version of knnMatch()
-    {
-        const int knn = 3;
-
-        vector<vector<DMatch> > matches;
-        dmatcher->knnMatch( query, train, matches, knn );
-
-        if( (int)matches.size() != queryDescCount )
-        {
-            ts->printf(cvtest::TS::LOG, "Incorrect matches count while test knnMatch() function (1).\n");
-            ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
-        }
-        else
-        {
-            int badCount = 0;
-            for( size_t i = 0; i < matches.size(); i++ )
-            {
-                if( (int)matches[i].size() != knn )
-                    badCount++;
-                else
-                {
-                    int localBadCount = 0;
-                    for( int k = 0; k < knn; k++ )
-                    {
-                        DMatch match = matches[i][k];
-                        if( (match.queryIdx != (int)i) || (match.trainIdx != (int)i*countFactor+k) || (match.imgIdx != 0) )
-                            localBadCount++;
-                    }
-                    badCount += localBadCount > 0 ? 1 : 0;
-                }
-            }
-            if( (float)badCount > (float)queryDescCount*badPart )
-            {
-                ts->printf( cvtest::TS::LOG, "%f - too large bad matches part while test knnMatch() function (1).\n",
-                            (float)badCount/(float)queryDescCount );
-                ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
-            }
-        }
-    }
-
-    // test version of knnMatch() with add()
-    {
-        const int knn = 2;
-        vector<vector<DMatch> > matches;
-        // make add() twice to test such case
-        dmatcher->add( vector<Mat>(1,train.rowRange(0, train.rows/2)) );
-        dmatcher->add( vector<Mat>(1,train.rowRange(train.rows/2, train.rows)) );
-        // prepare masks (make first nearest match illegal)
-        vector<Mat> masks(2);
-        for(int mi = 0; mi < 2; mi++ )
-        {
-            masks[mi] = Mat(query.rows, train.rows/2, CV_8UC1, Scalar::all(1));
-            for( int di = 0; di < queryDescCount/2; di++ )
-                masks[mi].col(di*countFactor).setTo(Scalar::all(0));
-        }
-
-        dmatcher->knnMatch( query, matches, knn, masks );
-
-        if( (int)matches.size() != queryDescCount )
-        {
-            ts->printf(cvtest::TS::LOG, "Incorrect matches count while test knnMatch() function (2).\n");
-            ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
-        }
-        else
-        {
-            int badCount = 0;
-            int shift = dmatcher->isMaskSupported() ? 1 : 0;
-            for( size_t i = 0; i < matches.size(); i++ )
-            {
-                if( (int)matches[i].size() != knn )
-                    badCount++;
-                else
-                {
-                    int localBadCount = 0;
-                    for( int k = 0; k < knn; k++ )
-                    {
-                        DMatch match = matches[i][k];
-                        {
-                            if( i < queryDescCount/2 )
-                            {
-                                if( (match.queryIdx != (int)i) || (match.trainIdx != (int)i*countFactor + k + shift) ||
-                                    (match.imgIdx != 0) )
-                                    localBadCount++;
-                            }
-                            else
-                            {
-                                if( (match.queryIdx != (int)i) || (match.trainIdx != ((int)i-queryDescCount/2)*countFactor + k + shift) ||
-                                    (match.imgIdx != 1) )
-                                    localBadCount++;
-                            }
-                        }
-                    }
-                    badCount += localBadCount > 0 ? 1 : 0;
-                }
-            }
-            if( (float)badCount > (float)queryDescCount*badPart )
-            {
-                ts->printf( cvtest::TS::LOG, "%f - too large bad matches part while test knnMatch() function (2).\n",
-                            (float)badCount/(float)queryDescCount );
-                ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
-            }
-        }
-    }
-}
-
-void CV_DescriptorMatcherTest::radiusMatchTest( const Mat& query, const Mat& train )
-{
-    dmatcher->clear();
-    // test const version of match()
-    {
-        const float radius = 1.f/countFactor;
-        vector<vector<DMatch> > matches;
-        dmatcher->radiusMatch( query, train, matches, radius );
-
-        if( (int)matches.size() != queryDescCount )
-        {
-            ts->printf(cvtest::TS::LOG, "Incorrect matches count while test radiusMatch() function (1).\n");
-            ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
-        }
-        else
-        {
-            int badCount = 0;
-            for( size_t i = 0; i < matches.size(); i++ )
-            {
-                if( (int)matches[i].size() != 1 )
-                    badCount++;
-                else
-                {
-                    DMatch match = matches[i][0];
-                    if( (match.queryIdx != (int)i) || (match.trainIdx != (int)i*countFactor) || (match.imgIdx != 0) )
-                        badCount++;
-                }
-            }
-            if( (float)badCount > (float)queryDescCount*badPart )
-            {
-                ts->printf( cvtest::TS::LOG, "%f - too large bad matches part while test radiusMatch() function (1).\n",
-                            (float)badCount/(float)queryDescCount );
-                ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
-            }
-        }
-    }
-
-    // test version of match() with add()
-    {
-        int n = 3;
-        const float radius = 1.f/countFactor * n;
-        vector<vector<DMatch> > matches;
-        // make add() twice to test such case
-        dmatcher->add( vector<Mat>(1,train.rowRange(0, train.rows/2)) );
-        dmatcher->add( vector<Mat>(1,train.rowRange(train.rows/2, train.rows)) );
-        // prepare masks (make first nearest match illegal)
-        vector<Mat> masks(2);
-        for(int mi = 0; mi < 2; mi++ )
-        {
-            masks[mi] = Mat(query.rows, train.rows/2, CV_8UC1, Scalar::all(1));
-            for( int di = 0; di < queryDescCount/2; di++ )
-                masks[mi].col(di*countFactor).setTo(Scalar::all(0));
-        }
-
-        dmatcher->radiusMatch( query, matches, radius, masks );
-
-        //int curRes = cvtest::TS::OK;
-        if( (int)matches.size() != queryDescCount )
-        {
-            ts->printf(cvtest::TS::LOG, "Incorrect matches count while test radiusMatch() function (1).\n");
-            ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
-        }
-
-        int badCount = 0;
-        int shift = dmatcher->isMaskSupported() ? 1 : 0;
-        int needMatchCount = dmatcher->isMaskSupported() ? n-1 : n;
-        for( size_t i = 0; i < matches.size(); i++ )
-        {
-            if( (int)matches[i].size() != needMatchCount )
-                badCount++;
-            else
-            {
-                int localBadCount = 0;
-                for( int k = 0; k < needMatchCount; k++ )
-                {
-                    DMatch match = matches[i][k];
-                    {
-                        if( i < queryDescCount/2 )
-                        {
-                            if( (match.queryIdx != (int)i) || (match.trainIdx != (int)i*countFactor + k + shift) ||
-                                (match.imgIdx != 0) )
-                                localBadCount++;
-                        }
-                        else
-                        {
-                            if( (match.queryIdx != (int)i) || (match.trainIdx != ((int)i-queryDescCount/2)*countFactor + k + shift) ||
-                                (match.imgIdx != 1) )
-                                localBadCount++;
-                        }
-                    }
-                }
-                badCount += localBadCount > 0 ? 1 : 0;
-            }
-        }
-        if( (float)badCount > (float)queryDescCount*badPart )
-        {
-            ts->printf( cvtest::TS::LOG, "%f - too large bad matches part while test radiusMatch() function (2).\n",
-                        (float)badCount/(float)queryDescCount );
-            ts->set_failed_test_info( cvtest::TS::FAIL_BAD_ACCURACY );
-        }
-    }
-}
-
-void CV_DescriptorMatcherTest::run( int )
-{
-    Mat query, train;
-    generateData( query, train );
-
-    matchTest( query, train );
-
-    knnMatchTest( query, train );
-
-    radiusMatchTest( query, train );
-}
-
-/****************************************************************************************\
-*                                Tests registrations                                     *
-\****************************************************************************************/
-
-/*
- * Detectors
- */
-
-
-TEST( Features2d_Detector_SIFT, regression )
-{
-    CV_FeatureDetectorTest test( "detector-sift", FeatureDetector::create("SIFT") );
-    test.safe_run();
-}
-
-TEST( Features2d_Detector_SURF, regression )
-{
-    CV_FeatureDetectorTest test( "detector-surf", FeatureDetector::create("SURF") );
-    test.safe_run();
-}
-
-/*
- * Descriptors
- */
-TEST( Features2d_DescriptorExtractor_SIFT, regression )
-{
-    CV_DescriptorExtractorTest<L2<float> > test( "descriptor-sift", 0.03f,
-                                                  DescriptorExtractor::create("SIFT") );
-    test.safe_run();
-}
-
-TEST( Features2d_DescriptorExtractor_SURF, regression )
-{
-    CV_DescriptorExtractorTest<L2<float> > test( "descriptor-surf",  0.05f,
-                                                 DescriptorExtractor::create("SURF") );
-    test.safe_run();
-}
-
-TEST( Features2d_DescriptorExtractor_OpponentSIFT, regression )
-{
-    CV_DescriptorExtractorTest<L2<float> > test( "descriptor-opponent-sift", 0.18f,
-                                                 DescriptorExtractor::create("OpponentSIFT") );
-    test.safe_run();
-}
-
-TEST( Features2d_DescriptorExtractor_OpponentSURF, regression )
-{
-    CV_DescriptorExtractorTest<L2<float> > test( "descriptor-opponent-surf",  0.3f,
-                                                 DescriptorExtractor::create("OpponentSURF") );
-    test.safe_run();
-}
-
-/*#if CV_SSE2
-TEST( Features2d_DescriptorExtractor_Calonder_uchar, regression )
-{
-    CV_CalonderDescriptorExtractorTest<uchar, L2<uchar> > test( "descriptor-calonder-uchar",
-                                                                std::numeric_limits<float>::epsilon() + 1,
-                                                                0.0132175f );
-    test.safe_run();
-}
-
-TEST( Features2d_DescriptorExtractor_Calonder_float, regression )
-{
-    CV_CalonderDescriptorExtractorTest<float, L2<float> > test( "descriptor-calonder-float",
-                                                                std::numeric_limits<float>::epsilon(),
-                                                                0.0221308f );
-    test.safe_run();
-}
-#endif*/ // CV_SSE2
-
-TEST(Features2d_BruteForceDescriptorMatcher_knnMatch, regression)
-{
-    const int sz = 100;
-    const int k = 3;
-
-    Ptr<DescriptorExtractor> ext = DescriptorExtractor::create("SURF");
-    ASSERT_TRUE(ext != NULL);
-
-    Ptr<FeatureDetector> det = FeatureDetector::create("SURF");
-    //"%YAML:1.0\nhessianThreshold: 8000.\noctaves: 3\noctaveLayers: 4\nupright: 0\n"
-    ASSERT_TRUE(det != NULL);
-
-    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce");
-    ASSERT_TRUE(matcher != NULL);
-
-    Mat imgT(sz, sz, CV_8U, Scalar(255));
-    line(imgT, Point(20, sz/2), Point(sz-21, sz/2), Scalar(100), 2);
-    line(imgT, Point(sz/2, 20), Point(sz/2, sz-21), Scalar(100), 2);
-    vector<KeyPoint> kpT;
-    kpT.push_back( KeyPoint(50, 50, 16, 0, 20000, 1, -1) );
-    kpT.push_back( KeyPoint(42, 42, 16, 160, 10000, 1, -1) );
-    Mat descT;
-    ext->compute(imgT, kpT, descT);
-
-    Mat imgQ(sz, sz, CV_8U, Scalar(255));
-    line(imgQ, Point(30, sz/2), Point(sz-31, sz/2), Scalar(100), 3);
-    line(imgQ, Point(sz/2, 30), Point(sz/2, sz-31), Scalar(100), 3);
-    vector<KeyPoint> kpQ;
-    det->detect(imgQ, kpQ);
-    Mat descQ;
-    ext->compute(imgQ, kpQ, descQ);
-
-    vector<vector<DMatch> > matches;
-
-    matcher->knnMatch(descQ, descT, matches, k);
-
-    //cout << "\nBest " << k << " matches to " << descT.rows << " train desc-s." << endl;
-    ASSERT_EQ(descQ.rows, static_cast<int>(matches.size()));
-    for(size_t i = 0; i<matches.size(); i++)
-    {
-        //cout << "\nmatches[" << i << "].size()==" << matches[i].size() << endl;
-        ASSERT_GE(min(k, descT.rows), static_cast<int>(matches[i].size()));
-        for(size_t j = 0; j<matches[i].size(); j++)
-        {
-            //cout << "\t" << matches[i][j].queryIdx << " -> " << matches[i][j].trainIdx << endl;
-            ASSERT_EQ(matches[i][j].queryIdx, static_cast<int>(i));
-        }
-    }
-}
-
-/*TEST(Features2d_DescriptorExtractorParamTest, regression)
-{
-    Ptr<DescriptorExtractor> s = DescriptorExtractor::create("SURF");
-    ASSERT_STREQ(s->paramHelp("extended").c_str(), "");
-}
-*/
-
-class CV_DetectPlanarTest : public cvtest::BaseTest
-{
-public:
-    CV_DetectPlanarTest(const string& _fname, int _min_ninliers) : fname(_fname), min_ninliers(_min_ninliers) {}
-
-protected:
-    void run(int)
-    {
-        Ptr<Feature2D> f = Algorithm::create<Feature2D>("Feature2D." + fname);
-        if(!f)
-            return;
-        string path = string(ts->get_data_path()) + "detectors_descriptors_evaluation/planar/";
-        string imgname1 = path + "box.png";
-        string imgname2 = path + "box_in_scene.png";
-        Mat img1 = imread(imgname1, 0);
-        Mat img2 = imread(imgname2, 0);
-        if( img1.empty() || img2.empty() )
-        {
-            ts->printf( cvtest::TS::LOG, "missing %s and/or %s\n", imgname1.c_str(), imgname2.c_str());
-            ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
-            return;
-        }
-        vector<KeyPoint> kpt1, kpt2;
-        Mat d1, d2;
-        f->operator()(img1, Mat(), kpt1, d1);
-        f->operator()(img1, Mat(), kpt2, d2);
-        for( size_t i = 0; i < kpt1.size(); i++ )
-            CV_Assert(kpt1[i].response > 0 );
-        for( size_t i = 0; i < kpt2.size(); i++ )
-            CV_Assert(kpt2[i].response > 0 );
-
-        vector<DMatch> matches;
-        BFMatcher(f->defaultNorm(), true).match(d1, d2, matches);
-
-        vector<Point2f> pt1, pt2;
-        for( size_t i = 0; i < matches.size(); i++ ) {
-            pt1.push_back(kpt1[matches[i].queryIdx].pt);
-            pt2.push_back(kpt2[matches[i].trainIdx].pt);
-        }
-
-        Mat inliers, H = findHomography(pt1, pt2, RANSAC, 10, inliers);
-        int ninliers = countNonZero(inliers);
-
-        if( ninliers < min_ninliers )
-        {
-            ts->printf( cvtest::TS::LOG, "too little inliers (%d) vs expected %d\n", ninliers, min_ninliers);
-            ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
-            return;
-        }
-    }
-
-    string fname;
-    int min_ninliers;
-};
-
-TEST(Features2d_SIFTHomographyTest, regression) { CV_DetectPlanarTest test("SIFT", 80); test.safe_run(); }
-TEST(Features2d_SURFHomographyTest, regression) { CV_DetectPlanarTest test("SURF", 80); test.safe_run(); }
-
-class FeatureDetectorUsingMaskTest : public cvtest::BaseTest
-{
-public:
-    FeatureDetectorUsingMaskTest(const Ptr<FeatureDetector>& featureDetector) :
-        featureDetector_(featureDetector)
-    {
-        CV_Assert(featureDetector_);
-    }
-
-protected:
-
-    void run(int)
-    {
-        const int nStepX = 2;
-        const int nStepY = 2;
-
-        const string imageFilename = string(ts->get_data_path()) + "/features2d/tsukuba.png";
-
-        Mat image = imread(imageFilename);
-        if(image.empty())
-        {
-            ts->printf(cvtest::TS::LOG, "Image %s can not be read.\n", imageFilename.c_str());
-            ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
-            return;
-        }
-
-        Mat mask(image.size(), CV_8U);
-
-        const int stepX = image.size().width / nStepX;
-        const int stepY = image.size().height / nStepY;
-
-        vector<KeyPoint> keyPoints;
-        vector<Point2f> points;
-        for(int i=0; i<nStepX; ++i)
-            for(int j=0; j<nStepY; ++j)
-            {
-
-                mask.setTo(0);
-                Rect whiteArea(i * stepX, j * stepY, stepX, stepY);
-                mask(whiteArea).setTo(255);
-
-                featureDetector_->detect(image, keyPoints, mask);
-                KeyPoint::convert(keyPoints, points);
-
-                for(size_t k=0; k<points.size(); ++k)
-                {
-                    if ( !whiteArea.contains(points[k]) )
-                    {
-                        ts->printf(cvtest::TS::LOG, "The feature point is outside of the mask.");
-                        ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
-                        return;
-                    }
-                }
-            }
-
-        ts->set_failed_test_info( cvtest::TS::OK );
-    }
-
-    Ptr<FeatureDetector> featureDetector_;
-};
-
-TEST(Features2d_SIFT_using_mask, regression)
-{
-    FeatureDetectorUsingMaskTest test(Algorithm::create<FeatureDetector>("Feature2D.SIFT"));
-    test.safe_run();
-}
-
-TEST(DISABLED_Features2d_SURF_using_mask, regression)
-{
-    FeatureDetectorUsingMaskTest test(Algorithm::create<FeatureDetector>("Feature2D.SURF"));
-    test.safe_run();
-}
diff --git a/modules/nonfree/test/test_keypoints.cpp b/modules/nonfree/test/test_keypoints.cpp
deleted file mode 100644 (file)
index b046d75..0000000
+++ /dev/null
@@ -1,132 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-//  By downloading, copying, installing or using the software you agree to this license.
-//  If you do not agree to this license, do not download, install,
-//  copy or use the software.
-//
-//
-//                        Intel License Agreement
-//                For Open Source Computer Vision Library
-//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-//   * Redistribution's of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//
-//   * Redistribution's in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//
-//   * The name of Intel Corporation may not be used to endorse or promote products
-//     derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#include "test_precomp.hpp"
-#include "opencv2/highgui.hpp"
-
-using namespace std;
-using namespace cv;
-
-const string FEATURES2D_DIR = "features2d";
-const string IMAGE_FILENAME = "tsukuba.png";
-
-/****************************************************************************************\
-*                                     Test for KeyPoint                                  *
-\****************************************************************************************/
-
-class CV_FeatureDetectorKeypointsTest : public cvtest::BaseTest
-{
-public:
-    CV_FeatureDetectorKeypointsTest(const Ptr<FeatureDetector>& _detector) :
-        detector(_detector) {}
-
-protected:
-    virtual void run(int)
-    {
-        cv::initModule_features2d();
-        CV_Assert(detector);
-        string imgFilename = string(ts->get_data_path()) + FEATURES2D_DIR + "/" + IMAGE_FILENAME;
-
-        // Read the test image.
-        Mat image = imread(imgFilename);
-        if(image.empty())
-        {
-            ts->printf(cvtest::TS::LOG, "Image %s can not be read.\n", imgFilename.c_str());
-            ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
-            return;
-        }
-
-        vector<KeyPoint> keypoints;
-        detector->detect(image, keypoints);
-
-        if(keypoints.empty())
-        {
-            ts->printf(cvtest::TS::LOG, "Detector can't find keypoints in image.\n");
-            ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
-            return;
-        }
-
-        Rect r(0, 0, image.cols, image.rows);
-        for(size_t i = 0; i < keypoints.size(); i++)
-        {
-            const KeyPoint& kp = keypoints[i];
-
-            if(!r.contains(kp.pt))
-            {
-                ts->printf(cvtest::TS::LOG, "KeyPoint::pt is out of image (x=%f, y=%f).\n", kp.pt.x, kp.pt.y);
-                ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
-                return;
-            }
-
-            if(kp.size <= 0.f)
-            {
-                ts->printf(cvtest::TS::LOG, "KeyPoint::size is not positive (%f).\n", kp.size);
-                ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
-                return;
-            }
-
-            if((kp.angle < 0.f && kp.angle != -1.f) || kp.angle >= 360.f)
-            {
-                ts->printf(cvtest::TS::LOG, "KeyPoint::angle is out of range [0, 360). It's %f.\n", kp.angle);
-                ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
-                return;
-            }
-        }
-        ts->set_failed_test_info(cvtest::TS::OK);
-    }
-
-    Ptr<FeatureDetector> detector;
-};
-
-
-// Registration of tests
-
-TEST(Features2d_Detector_Keypoints_SURF, validation)
-{
-    CV_FeatureDetectorKeypointsTest test(Algorithm::create<FeatureDetector>("Feature2D.SURF"));
-    test.safe_run();
-}
-
-TEST(Features2d_Detector_Keypoints_SIFT, validation)
-{
-    CV_FeatureDetectorKeypointsTest test(FeatureDetector::create("SIFT"));
-    test.safe_run();
-}
diff --git a/modules/nonfree/test/test_main.cpp b/modules/nonfree/test/test_main.cpp
deleted file mode 100644 (file)
index 6b24993..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "test_precomp.hpp"
-
-CV_TEST_MAIN("cv")
diff --git a/modules/nonfree/test/test_precomp.hpp b/modules/nonfree/test/test_precomp.hpp
deleted file mode 100644 (file)
index ba289b1..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifdef __GNUC__
-#  pragma GCC diagnostic ignored "-Wmissing-declarations"
-#  if defined __clang__ || defined __APPLE__
-#    pragma GCC diagnostic ignored "-Wmissing-prototypes"
-#    pragma GCC diagnostic ignored "-Wextra"
-#  endif
-#endif
-
-#ifndef __OPENCV_TEST_PRECOMP_HPP__
-#define __OPENCV_TEST_PRECOMP_HPP__
-
-#include <iostream>
-#include "opencv2/ts.hpp"
-#include "opencv2/ts/cuda_test.hpp"
-#include "opencv2/imgproc.hpp"
-#include "opencv2/highgui.hpp"
-#include "opencv2/nonfree.hpp"
-
-#include "opencv2/ts/cuda_test.hpp"
-
-#include "opencv2/opencv_modules.hpp"
-#include "cvconfig.h"
-
-#ifdef HAVE_OPENCV_OCL
-#  include "opencv2/nonfree/ocl.hpp"
-#endif
-
-#ifdef HAVE_CUDA
-#  include "opencv2/nonfree/cuda.hpp"
-#endif
-
-#endif
diff --git a/modules/nonfree/test/test_rotation_and_scale_invariance.cpp b/modules/nonfree/test/test_rotation_and_scale_invariance.cpp
deleted file mode 100644 (file)
index 47efc60..0000000
+++ /dev/null
@@ -1,710 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-//  By downloading, copying, installing or using the software you agree to this license.
-//  If you do not agree to this license, do not download, install,
-//  copy or use the software.
-//
-//
-//                        Intel License Agreement
-//                For Open Source Computer Vision Library
-//
-// Copyright (C) 2000, Intel Corporation, all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-//   * Redistribution's of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//
-//   * Redistribution's in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//
-//   * The name of Intel Corporation may not be used to endorse or promote products
-//     derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#include "test_precomp.hpp"
-#include "opencv2/highgui.hpp"
-
-using namespace std;
-using namespace cv;
-
-const string IMAGE_TSUKUBA = "/features2d/tsukuba.png";
-const string IMAGE_BIKES = "/detectors_descriptors_evaluation/images_datasets/bikes/img1.png";
-
-#define SHOW_DEBUG_LOG 0
-
-static
-Mat generateHomography(float angle)
-{
-    // angle - rotation around Oz in degrees
-    float angleRadian = static_cast<float>(angle * CV_PI / 180);
-    Mat H = Mat::eye(3, 3, CV_32FC1);
-    H.at<float>(0,0) = H.at<float>(1,1) = std::cos(angleRadian);
-    H.at<float>(0,1) = -std::sin(angleRadian);
-    H.at<float>(1,0) =  std::sin(angleRadian);
-
-    return H;
-}
-
-static
-Mat rotateImage(const Mat& srcImage, float angle, Mat& dstImage, Mat& dstMask)
-{
-    // angle - rotation around Oz in degrees
-    float diag = std::sqrt(static_cast<float>(srcImage.cols * srcImage.cols + srcImage.rows * srcImage.rows));
-    Mat LUShift = Mat::eye(3, 3, CV_32FC1); // left up
-    LUShift.at<float>(0,2) = static_cast<float>(-srcImage.cols/2);
-    LUShift.at<float>(1,2) = static_cast<float>(-srcImage.rows/2);
-    Mat RDShift = Mat::eye(3, 3, CV_32FC1); // right down
-    RDShift.at<float>(0,2) = diag/2;
-    RDShift.at<float>(1,2) = diag/2;
-    Size sz(cvRound(diag), cvRound(diag));
-
-    Mat srcMask(srcImage.size(), CV_8UC1, Scalar(255));
-
-    Mat H = RDShift * generateHomography(angle) * LUShift;
-    warpPerspective(srcImage, dstImage, H, sz);
-    warpPerspective(srcMask, dstMask, H, sz);
-
-    return H;
-}
-
-void rotateKeyPoints(const vector<KeyPoint>& src, const Mat& H, float angle, vector<KeyPoint>& dst)
-{
-    // suppose that H is rotation given from rotateImage() and angle has value passed to rotateImage()
-    vector<Point2f> srcCenters, dstCenters;
-    KeyPoint::convert(src, srcCenters);
-
-    perspectiveTransform(srcCenters, dstCenters, H);
-
-    dst = src;
-    for(size_t i = 0; i < dst.size(); i++)
-    {
-        dst[i].pt = dstCenters[i];
-        float dstAngle = src[i].angle + angle;
-        if(dstAngle >= 360.f)
-            dstAngle -= 360.f;
-        dst[i].angle = dstAngle;
-    }
-}
-
-void scaleKeyPoints(const vector<KeyPoint>& src, vector<KeyPoint>& dst, float scale)
-{
-    dst.resize(src.size());
-    for(size_t i = 0; i < src.size(); i++)
-        dst[i] = KeyPoint(src[i].pt.x * scale, src[i].pt.y * scale, src[i].size * scale, src[i].angle);
-}
-
-static
-float calcCirclesIntersectArea(const Point2f& p0, float r0, const Point2f& p1, float r1)
-{
-    float c = static_cast<float>(norm(p0 - p1)), sqr_c = c * c;
-
-    float sqr_r0 = r0 * r0;
-    float sqr_r1 = r1 * r1;
-
-    if(r0 + r1 <= c)
-       return 0;
-
-    float minR = std::min(r0, r1);
-    float maxR = std::max(r0, r1);
-    if(c + minR <= maxR)
-        return static_cast<float>(CV_PI * minR * minR);
-
-    float cos_halfA0 = (sqr_r0 + sqr_c - sqr_r1) / (2 * r0 * c);
-    float cos_halfA1 = (sqr_r1 + sqr_c - sqr_r0) / (2 * r1 * c);
-
-    float A0 = 2 * acos(cos_halfA0);
-    float A1 = 2 * acos(cos_halfA1);
-
-    return  0.5f * sqr_r0 * (A0 - sin(A0)) +
-            0.5f * sqr_r1 * (A1 - sin(A1));
-}
-
-static
-float calcIntersectRatio(const Point2f& p0, float r0, const Point2f& p1, float r1)
-{
-    float intersectArea = calcCirclesIntersectArea(p0, r0, p1, r1);
-    float unionArea = static_cast<float>(CV_PI) * (r0 * r0 + r1 * r1) - intersectArea;
-    return intersectArea / unionArea;
-}
-
-static
-void matchKeyPoints(const vector<KeyPoint>& keypoints0, const Mat& H,
-                    const vector<KeyPoint>& keypoints1,
-                    vector<DMatch>& matches)
-{
-    vector<Point2f> points0;
-    KeyPoint::convert(keypoints0, points0);
-    Mat points0t;
-    if(H.empty())
-        points0t = Mat(points0);
-    else
-        perspectiveTransform(Mat(points0), points0t, H);
-
-    matches.clear();
-    vector<uchar> usedMask(keypoints1.size(), 0);
-    for(int i0 = 0; i0 < static_cast<int>(keypoints0.size()); i0++)
-    {
-        int nearestPointIndex = -1;
-        float maxIntersectRatio = 0.f;
-        const float r0 =  0.5f * keypoints0[i0].size;
-        for(size_t i1 = 0; i1 < keypoints1.size(); i1++)
-        {
-            if(nearestPointIndex >= 0 && usedMask[i1])
-                continue;
-
-            float r1 = 0.5f * keypoints1[i1].size;
-            float intersectRatio = calcIntersectRatio(points0t.at<Point2f>(i0), r0,
-                                                      keypoints1[i1].pt, r1);
-            if(intersectRatio > maxIntersectRatio)
-            {
-                maxIntersectRatio = intersectRatio;
-                nearestPointIndex = static_cast<int>(i1);
-            }
-        }
-
-        matches.push_back(DMatch(i0, nearestPointIndex, maxIntersectRatio));
-        if(nearestPointIndex >= 0)
-            usedMask[nearestPointIndex] = 1;
-    }
-}
-
-static void removeVerySmallKeypoints(vector<KeyPoint>& keypoints)
-{
-    size_t i, j = 0, n = keypoints.size();
-    for( i = 0; i < n; i++ )
-    {
-        if( (keypoints[i].octave & 128) != 0 )
-            ;
-        else
-            keypoints[j++] = keypoints[i];
-    }
-    keypoints.resize(j);
-}
-
-
-class DetectorRotationInvarianceTest : public cvtest::BaseTest
-{
-public:
-    DetectorRotationInvarianceTest(const Ptr<FeatureDetector>& _featureDetector,
-                                     float _minKeyPointMatchesRatio,
-                                     float _minAngleInliersRatio) :
-        featureDetector(_featureDetector),
-        minKeyPointMatchesRatio(_minKeyPointMatchesRatio),
-        minAngleInliersRatio(_minAngleInliersRatio)
-    {
-        CV_Assert(featureDetector);
-    }
-
-protected:
-
-    void run(int)
-    {
-        const string imageFilename = string(ts->get_data_path()) + IMAGE_TSUKUBA;
-
-        // Read test data
-        Mat image0 = imread(imageFilename), image1, mask1;
-        if(image0.empty())
-        {
-            ts->printf(cvtest::TS::LOG, "Image %s can not be read.\n", imageFilename.c_str());
-            ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
-            return;
-        }
-
-        vector<KeyPoint> keypoints0;
-        featureDetector->detect(image0, keypoints0);
-        removeVerySmallKeypoints(keypoints0);
-        if(keypoints0.size() < 15)
-            CV_Error(Error::StsAssert, "Detector gives too few points in a test image\n");
-
-        const int maxAngle = 360, angleStep = 15;
-        for(int angle = 0; angle < maxAngle; angle += angleStep)
-        {
-            Mat H = rotateImage(image0, static_cast<float>(angle), image1, mask1);
-
-            vector<KeyPoint> keypoints1;
-            featureDetector->detect(image1, keypoints1, mask1);
-            removeVerySmallKeypoints(keypoints1);
-
-            vector<DMatch> matches;
-            matchKeyPoints(keypoints0, H, keypoints1, matches);
-
-            int angleInliersCount = 0;
-
-            const float minIntersectRatio = 0.5f;
-            int keyPointMatchesCount = 0;
-            for(size_t m = 0; m < matches.size(); m++)
-            {
-                if(matches[m].distance < minIntersectRatio)
-                    continue;
-
-                keyPointMatchesCount++;
-
-                // Check does this inlier have consistent angles
-                const float maxAngleDiff = 15.f; // grad
-                float angle0 = keypoints0[matches[m].queryIdx].angle;
-                float angle1 = keypoints1[matches[m].trainIdx].angle;
-                if(angle0 == -1 || angle1 == -1)
-                    CV_Error(Error::StsBadArg, "Given FeatureDetector is not rotation invariant, it can not be tested here.\n");
-                CV_Assert(angle0 >= 0.f && angle0 < 360.f);
-                CV_Assert(angle1 >= 0.f && angle1 < 360.f);
-
-                float rotAngle0 = angle0 + angle;
-                if(rotAngle0 >= 360.f)
-                    rotAngle0 -= 360.f;
-
-                float angleDiff = std::max(rotAngle0, angle1) - std::min(rotAngle0, angle1);
-                angleDiff = std::min(angleDiff, static_cast<float>(360.f - angleDiff));
-                CV_Assert(angleDiff >= 0.f);
-                bool isAngleCorrect = angleDiff < maxAngleDiff;
-                if(isAngleCorrect)
-                    angleInliersCount++;
-            }
-
-            float keyPointMatchesRatio = static_cast<float>(keyPointMatchesCount) / keypoints0.size();
-            if(keyPointMatchesRatio < minKeyPointMatchesRatio)
-            {
-                ts->printf(cvtest::TS::LOG, "Incorrect keyPointMatchesRatio: curr = %f, min = %f.\n",
-                           keyPointMatchesRatio, minKeyPointMatchesRatio);
-                ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
-                return;
-            }
-
-            if(keyPointMatchesCount)
-            {
-                float angleInliersRatio = static_cast<float>(angleInliersCount) / keyPointMatchesCount;
-                if(angleInliersRatio < minAngleInliersRatio)
-                {
-                    ts->printf(cvtest::TS::LOG, "Incorrect angleInliersRatio: curr = %f, min = %f.\n",
-                               angleInliersRatio, minAngleInliersRatio);
-                    ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
-                    return;
-                }
-            }
-#if SHOW_DEBUG_LOG
-            std::cout << "keyPointMatchesRatio - " << keyPointMatchesRatio
-                << " - angleInliersRatio " << static_cast<float>(angleInliersCount) / keyPointMatchesCount << std::endl;
-#endif
-        }
-        ts->set_failed_test_info( cvtest::TS::OK );
-    }
-
-    Ptr<FeatureDetector> featureDetector;
-    float minKeyPointMatchesRatio;
-    float minAngleInliersRatio;
-};
-
-class DescriptorRotationInvarianceTest : public cvtest::BaseTest
-{
-public:
-    DescriptorRotationInvarianceTest(const Ptr<FeatureDetector>& _featureDetector,
-                                     const Ptr<DescriptorExtractor>& _descriptorExtractor,
-                                     int _normType,
-                                     float _minDescInliersRatio) :
-        featureDetector(_featureDetector),
-        descriptorExtractor(_descriptorExtractor),
-        normType(_normType),
-        minDescInliersRatio(_minDescInliersRatio)
-    {
-        CV_Assert(featureDetector);
-        CV_Assert(descriptorExtractor);
-    }
-
-protected:
-
-    void run(int)
-    {
-        const string imageFilename = string(ts->get_data_path()) + IMAGE_TSUKUBA;
-
-        // Read test data
-        Mat image0 = imread(imageFilename), image1, mask1;
-        if(image0.empty())
-        {
-            ts->printf(cvtest::TS::LOG, "Image %s can not be read.\n", imageFilename.c_str());
-            ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
-            return;
-        }
-
-        vector<KeyPoint> keypoints0;
-        Mat descriptors0;
-        featureDetector->detect(image0, keypoints0);
-        removeVerySmallKeypoints(keypoints0);
-        if(keypoints0.size() < 15)
-            CV_Error(Error::StsAssert, "Detector gives too few points in a test image\n");
-        descriptorExtractor->compute(image0, keypoints0, descriptors0);
-
-        BFMatcher bfmatcher(normType);
-
-        const float minIntersectRatio = 0.5f;
-        const int maxAngle = 360, angleStep = 15;
-        for(int angle = 0; angle < maxAngle; angle += angleStep)
-        {
-            Mat H = rotateImage(image0, static_cast<float>(angle), image1, mask1);
-
-            vector<KeyPoint> keypoints1;
-            rotateKeyPoints(keypoints0, H, static_cast<float>(angle), keypoints1);
-            Mat descriptors1;
-            descriptorExtractor->compute(image1, keypoints1, descriptors1);
-
-            vector<DMatch> descMatches;
-            bfmatcher.match(descriptors0, descriptors1, descMatches);
-
-            int descInliersCount = 0;
-            for(size_t m = 0; m < descMatches.size(); m++)
-            {
-                const KeyPoint& transformed_p0 = keypoints1[descMatches[m].queryIdx];
-                const KeyPoint& p1 = keypoints1[descMatches[m].trainIdx];
-                if(calcIntersectRatio(transformed_p0.pt, 0.5f * transformed_p0.size,
-                                      p1.pt, 0.5f * p1.size) >= minIntersectRatio)
-                {
-                    descInliersCount++;
-                }
-            }
-
-            float descInliersRatio = static_cast<float>(descInliersCount) / keypoints0.size();
-            if(descInliersRatio < minDescInliersRatio)
-            {
-                ts->printf(cvtest::TS::LOG, "Incorrect descInliersRatio: curr = %f, min = %f.\n",
-                           descInliersRatio, minDescInliersRatio);
-                ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
-                return;
-            }
-#if SHOW_DEBUG_LOG
-            std::cout << "descInliersRatio " << static_cast<float>(descInliersCount) / keypoints0.size() << std::endl;
-#endif
-        }
-        ts->set_failed_test_info( cvtest::TS::OK );
-    }
-
-    Ptr<FeatureDetector> featureDetector;
-    Ptr<DescriptorExtractor> descriptorExtractor;
-    int normType;
-    float minDescInliersRatio;
-};
-
-
-class DetectorScaleInvarianceTest : public cvtest::BaseTest
-{
-public:
-    DetectorScaleInvarianceTest(const Ptr<FeatureDetector>& _featureDetector,
-                                float _minKeyPointMatchesRatio,
-                                float _minScaleInliersRatio) :
-        featureDetector(_featureDetector),
-        minKeyPointMatchesRatio(_minKeyPointMatchesRatio),
-        minScaleInliersRatio(_minScaleInliersRatio)
-    {
-        CV_Assert(featureDetector);
-    }
-
-protected:
-
-    void run(int)
-    {
-        const string imageFilename = string(ts->get_data_path()) + IMAGE_BIKES;
-
-        // Read test data
-        Mat image0 = imread(imageFilename);
-        if(image0.empty())
-        {
-            ts->printf(cvtest::TS::LOG, "Image %s can not be read.\n", imageFilename.c_str());
-            ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
-            return;
-        }
-
-        vector<KeyPoint> keypoints0;
-        featureDetector->detect(image0, keypoints0);
-        removeVerySmallKeypoints(keypoints0);
-        if(keypoints0.size() < 15)
-            CV_Error(Error::StsAssert, "Detector gives too few points in a test image\n");
-
-        for(int scaleIdx = 1; scaleIdx <= 3; scaleIdx++)
-        {
-            float scale = 1.f + scaleIdx * 0.5f;
-            Mat image1;
-            resize(image0, image1, Size(), 1./scale, 1./scale);
-
-            vector<KeyPoint> keypoints1, osiKeypoints1; // osi - original size image
-            featureDetector->detect(image1, keypoints1);
-            removeVerySmallKeypoints(keypoints1);
-            if(keypoints1.size() < 15)
-                CV_Error(Error::StsAssert, "Detector gives too few points in a test image\n");
-
-            if(keypoints1.size() > keypoints0.size())
-            {
-                ts->printf(cvtest::TS::LOG, "Strange behavior of the detector. "
-                    "It gives more points count in an image of the smaller size.\n"
-                    "original size (%d, %d), keypoints count = %d\n"
-                    "reduced size (%d, %d), keypoints count = %d\n",
-                    image0.cols, image0.rows, keypoints0.size(),
-                    image1.cols, image1.rows, keypoints1.size());
-                ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_OUTPUT);
-                return;
-            }
-
-            scaleKeyPoints(keypoints1, osiKeypoints1, scale);
-
-            vector<DMatch> matches;
-            // image1 is query image (it's reduced image0)
-            // image0 is train image
-            matchKeyPoints(osiKeypoints1, Mat(), keypoints0, matches);
-
-            const float minIntersectRatio = 0.5f;
-            int keyPointMatchesCount = 0;
-            int scaleInliersCount = 0;
-
-            for(size_t m = 0; m < matches.size(); m++)
-            {
-                if(matches[m].distance < minIntersectRatio)
-                    continue;
-
-                keyPointMatchesCount++;
-
-                // Check does this inlier have consistent sizes
-                const float maxSizeDiff = 0.8f;//0.9f; // grad
-                float size0 = keypoints0[matches[m].trainIdx].size;
-                float size1 = osiKeypoints1[matches[m].queryIdx].size;
-                CV_Assert(size0 > 0 && size1 > 0);
-                if(std::min(size0, size1) > maxSizeDiff * std::max(size0, size1))
-                    scaleInliersCount++;
-            }
-
-            float keyPointMatchesRatio = static_cast<float>(keyPointMatchesCount) / keypoints1.size();
-            if(keyPointMatchesRatio < minKeyPointMatchesRatio)
-            {
-                ts->printf(cvtest::TS::LOG, "Incorrect keyPointMatchesRatio: curr = %f, min = %f.\n",
-                           keyPointMatchesRatio, minKeyPointMatchesRatio);
-                ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
-                return;
-            }
-
-            if(keyPointMatchesCount)
-            {
-                float scaleInliersRatio = static_cast<float>(scaleInliersCount) / keyPointMatchesCount;
-                if(scaleInliersRatio < minScaleInliersRatio)
-                {
-                    ts->printf(cvtest::TS::LOG, "Incorrect scaleInliersRatio: curr = %f, min = %f.\n",
-                               scaleInliersRatio, minScaleInliersRatio);
-                    ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
-                    return;
-                }
-            }
-#if SHOW_DEBUG_LOG
-            std::cout << "keyPointMatchesRatio - " << keyPointMatchesRatio
-                << " - scaleInliersRatio " << static_cast<float>(scaleInliersCount) / keyPointMatchesCount << std::endl;
-#endif
-        }
-        ts->set_failed_test_info( cvtest::TS::OK );
-    }
-
-    Ptr<FeatureDetector> featureDetector;
-    float minKeyPointMatchesRatio;
-    float minScaleInliersRatio;
-};
-
-class DescriptorScaleInvarianceTest : public cvtest::BaseTest
-{
-public:
-    DescriptorScaleInvarianceTest(const Ptr<FeatureDetector>& _featureDetector,
-                                const Ptr<DescriptorExtractor>& _descriptorExtractor,
-                                int _normType,
-                                float _minDescInliersRatio) :
-        featureDetector(_featureDetector),
-        descriptorExtractor(_descriptorExtractor),
-        normType(_normType),
-        minDescInliersRatio(_minDescInliersRatio)
-    {
-        CV_Assert(featureDetector);
-        CV_Assert(descriptorExtractor);
-    }
-
-protected:
-
-    void run(int)
-    {
-        const string imageFilename = string(ts->get_data_path()) + IMAGE_BIKES;
-
-        // Read test data
-        Mat image0 = imread(imageFilename);
-        if(image0.empty())
-        {
-            ts->printf(cvtest::TS::LOG, "Image %s can not be read.\n", imageFilename.c_str());
-            ts->set_failed_test_info(cvtest::TS::FAIL_INVALID_TEST_DATA);
-            return;
-        }
-
-        vector<KeyPoint> keypoints0;
-        featureDetector->detect(image0, keypoints0);
-        removeVerySmallKeypoints(keypoints0);
-        if(keypoints0.size() < 15)
-            CV_Error(Error::StsAssert, "Detector gives too few points in a test image\n");
-        Mat descriptors0;
-        descriptorExtractor->compute(image0, keypoints0, descriptors0);
-
-        BFMatcher bfmatcher(normType);
-        for(int scaleIdx = 1; scaleIdx <= 3; scaleIdx++)
-        {
-            float scale = 1.f + scaleIdx * 0.5f;
-
-            Mat image1;
-            resize(image0, image1, Size(), 1./scale, 1./scale);
-
-            vector<KeyPoint> keypoints1;
-            scaleKeyPoints(keypoints0, keypoints1, 1.0f/scale);
-            Mat descriptors1;
-            descriptorExtractor->compute(image1, keypoints1, descriptors1);
-
-            vector<DMatch> descMatches;
-            bfmatcher.match(descriptors0, descriptors1, descMatches);
-
-            const float minIntersectRatio = 0.5f;
-            int descInliersCount = 0;
-            for(size_t m = 0; m < descMatches.size(); m++)
-            {
-                const KeyPoint& transformed_p0 = keypoints0[descMatches[m].queryIdx];
-                const KeyPoint& p1 = keypoints0[descMatches[m].trainIdx];
-                if(calcIntersectRatio(transformed_p0.pt, 0.5f * transformed_p0.size,
-                                      p1.pt, 0.5f * p1.size) >= minIntersectRatio)
-                {
-                    descInliersCount++;
-                }
-            }
-
-            float descInliersRatio = static_cast<float>(descInliersCount) / keypoints0.size();
-            if(descInliersRatio < minDescInliersRatio)
-            {
-                ts->printf(cvtest::TS::LOG, "Incorrect descInliersRatio: curr = %f, min = %f.\n",
-                           descInliersRatio, minDescInliersRatio);
-                ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
-                return;
-            }
-#if SHOW_DEBUG_LOG
-            std::cout << "descInliersRatio " << static_cast<float>(descInliersCount) / keypoints0.size() << std::endl;
-#endif
-        }
-        ts->set_failed_test_info( cvtest::TS::OK );
-    }
-
-    Ptr<FeatureDetector> featureDetector;
-    Ptr<DescriptorExtractor> descriptorExtractor;
-    int normType;
-    float minKeyPointMatchesRatio;
-    float minDescInliersRatio;
-};
-
-// Tests registration
-
-/*
- * Detector's rotation invariance check
- */
-TEST(Features2d_RotationInvariance_Detector_SURF, regression)
-{
-    DetectorRotationInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SURF"),
-                                        0.44f,
-                                        0.76f);
-    test.safe_run();
-}
-
-TEST(Features2d_RotationInvariance_Detector_SIFT, DISABLED_regression)
-{
-    DetectorRotationInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SIFT"),
-                                        0.45f,
-                                        0.70f);
-    test.safe_run();
-}
-
-/*
- * Descriptors's rotation invariance check
- */
-TEST(Features2d_RotationInvariance_Descriptor_SURF, regression)
-{
-    DescriptorRotationInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SURF"),
-                                          Algorithm::create<DescriptorExtractor>("Feature2D.SURF"),
-                                          NORM_L1,
-                                          0.83f);
-    test.safe_run();
-}
-
-TEST(Features2d_RotationInvariance_Descriptor_SIFT, regression)
-{
-    DescriptorRotationInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SIFT"),
-                                          Algorithm::create<DescriptorExtractor>("Feature2D.SIFT"),
-                                          NORM_L1,
-                                          0.98f);
-    test.safe_run();
-}
-
-/*
- * Detector's scale invariance check
- */
-TEST(Features2d_ScaleInvariance_Detector_SURF, regression)
-{
-    DetectorScaleInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SURF"),
-                                     0.64f,
-                                     0.84f);
-    test.safe_run();
-}
-
-TEST(Features2d_ScaleInvariance_Detector_SIFT, regression)
-{
-    DetectorScaleInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SIFT"),
-                                     0.69f,
-                                     0.99f);
-    test.safe_run();
-}
-
-/*
- * Descriptor's scale invariance check
- */
-TEST(Features2d_ScaleInvariance_Descriptor_SURF, regression)
-{
-    DescriptorScaleInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SURF"),
-                                       Algorithm::create<DescriptorExtractor>("Feature2D.SURF"),
-                                       NORM_L1,
-                                       0.61f);
-    test.safe_run();
-}
-
-TEST(Features2d_ScaleInvariance_Descriptor_SIFT, regression)
-{
-    DescriptorScaleInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.SIFT"),
-                                       Algorithm::create<DescriptorExtractor>("Feature2D.SIFT"),
-                                       NORM_L1,
-                                       0.78f);
-    test.safe_run();
-}
-
-
-TEST(Features2d_RotationInvariance2_Detector_SURF, regression)
-{
-    Mat cross(100, 100, CV_8UC1, Scalar(255));
-    line(cross, Point(30, 50), Point(69, 50), Scalar(100), 3);
-    line(cross, Point(50, 30), Point(50, 69), Scalar(100), 3);
-
-    SURF surf(8000., 3, 4, true, false);
-
-    vector<KeyPoint> keypoints;
-
-    surf(cross, noArray(), keypoints);
-
-    ASSERT_EQ(keypoints.size(), (vector<KeyPoint>::size_type) 5);
-    ASSERT_LT( fabs(keypoints[1].response - keypoints[2].response), 1e-6);
-    ASSERT_LT( fabs(keypoints[1].response - keypoints[3].response), 1e-6);
-    ASSERT_LT( fabs(keypoints[1].response - keypoints[4].response), 1e-6);
-}
diff --git a/modules/nonfree/test/test_surf.cuda.cpp b/modules/nonfree/test/test_surf.cuda.cpp
deleted file mode 100644 (file)
index 4011f3c..0000000
+++ /dev/null
@@ -1,198 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-//  By downloading, copying, installing or using the software you agree to this license.
-//  If you do not agree to this license, do not download, install,
-//  copy or use the software.
-//
-//
-//                           License Agreement
-//                For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-//   * Redistribution's of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//
-//   * Redistribution's in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//
-//   * The name of the copyright holders may not be used to endorse or promote products
-//     derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#include "test_precomp.hpp"
-
-#ifdef HAVE_CUDA
-
-using namespace cvtest;
-
-/////////////////////////////////////////////////////////////////////////////////////////////////
-// SURF
-
-#ifdef HAVE_OPENCV_CUDAARITHM
-
-namespace
-{
-    IMPLEMENT_PARAM_CLASS(SURF_HessianThreshold, double)
-    IMPLEMENT_PARAM_CLASS(SURF_Octaves, int)
-    IMPLEMENT_PARAM_CLASS(SURF_OctaveLayers, int)
-    IMPLEMENT_PARAM_CLASS(SURF_Extended, bool)
-    IMPLEMENT_PARAM_CLASS(SURF_Upright, bool)
-}
-
-PARAM_TEST_CASE(SURF, SURF_HessianThreshold, SURF_Octaves, SURF_OctaveLayers, SURF_Extended, SURF_Upright)
-{
-    double hessianThreshold;
-    int nOctaves;
-    int nOctaveLayers;
-    bool extended;
-    bool upright;
-
-    virtual void SetUp()
-    {
-        hessianThreshold = GET_PARAM(0);
-        nOctaves = GET_PARAM(1);
-        nOctaveLayers = GET_PARAM(2);
-        extended = GET_PARAM(3);
-        upright = GET_PARAM(4);
-    }
-};
-
-CUDA_TEST_P(SURF, Detector)
-{
-    cv::Mat image = readImage("../gpu/features2d/aloe.png", cv::IMREAD_GRAYSCALE);
-    ASSERT_FALSE(image.empty());
-
-    cv::cuda::SURF_CUDA surf;
-    surf.hessianThreshold = hessianThreshold;
-    surf.nOctaves = nOctaves;
-    surf.nOctaveLayers = nOctaveLayers;
-    surf.extended = extended;
-    surf.upright = upright;
-    surf.keypointsRatio = 0.05f;
-
-    std::vector<cv::KeyPoint> keypoints;
-    surf(loadMat(image), cv::cuda::GpuMat(), keypoints);
-
-    cv::SURF surf_gold;
-    surf_gold.hessianThreshold = hessianThreshold;
-    surf_gold.nOctaves = nOctaves;
-    surf_gold.nOctaveLayers = nOctaveLayers;
-    surf_gold.extended = extended;
-    surf_gold.upright = upright;
-
-    std::vector<cv::KeyPoint> keypoints_gold;
-    surf_gold(image, cv::noArray(), keypoints_gold);
-
-    ASSERT_EQ(keypoints_gold.size(), keypoints.size());
-    int matchedCount = getMatchedPointsCount(keypoints_gold, keypoints);
-    double matchedRatio = static_cast<double>(matchedCount) / keypoints_gold.size();
-
-    EXPECT_GT(matchedRatio, 0.95);
-}
-
-CUDA_TEST_P(SURF, Detector_Masked)
-{
-    cv::Mat image = readImage("../gpu/features2d/aloe.png", cv::IMREAD_GRAYSCALE);
-    ASSERT_FALSE(image.empty());
-
-    cv::Mat mask(image.size(), CV_8UC1, cv::Scalar::all(1));
-    mask(cv::Range(0, image.rows / 2), cv::Range(0, image.cols / 2)).setTo(cv::Scalar::all(0));
-
-    cv::cuda::SURF_CUDA surf;
-    surf.hessianThreshold = hessianThreshold;
-    surf.nOctaves = nOctaves;
-    surf.nOctaveLayers = nOctaveLayers;
-    surf.extended = extended;
-    surf.upright = upright;
-    surf.keypointsRatio = 0.05f;
-
-    std::vector<cv::KeyPoint> keypoints;
-    surf(loadMat(image), loadMat(mask), keypoints);
-
-    cv::SURF surf_gold;
-    surf_gold.hessianThreshold = hessianThreshold;
-    surf_gold.nOctaves = nOctaves;
-    surf_gold.nOctaveLayers = nOctaveLayers;
-    surf_gold.extended = extended;
-    surf_gold.upright = upright;
-
-    std::vector<cv::KeyPoint> keypoints_gold;
-    surf_gold(image, mask, keypoints_gold);
-
-    ASSERT_EQ(keypoints_gold.size(), keypoints.size());
-    int matchedCount = getMatchedPointsCount(keypoints_gold, keypoints);
-    double matchedRatio = static_cast<double>(matchedCount) / keypoints_gold.size();
-
-    EXPECT_GT(matchedRatio, 0.95);
-}
-
-CUDA_TEST_P(SURF, Descriptor)
-{
-    cv::Mat image = readImage("../gpu/features2d/aloe.png", cv::IMREAD_GRAYSCALE);
-    ASSERT_FALSE(image.empty());
-
-    cv::cuda::SURF_CUDA surf;
-    surf.hessianThreshold = hessianThreshold;
-    surf.nOctaves = nOctaves;
-    surf.nOctaveLayers = nOctaveLayers;
-    surf.extended = extended;
-    surf.upright = upright;
-    surf.keypointsRatio = 0.05f;
-
-    cv::SURF surf_gold;
-    surf_gold.hessianThreshold = hessianThreshold;
-    surf_gold.nOctaves = nOctaves;
-    surf_gold.nOctaveLayers = nOctaveLayers;
-    surf_gold.extended = extended;
-    surf_gold.upright = upright;
-
-    std::vector<cv::KeyPoint> keypoints;
-    surf_gold(image, cv::noArray(), keypoints);
-
-    cv::cuda::GpuMat descriptors;
-    surf(loadMat(image), cv::cuda::GpuMat(), keypoints, descriptors, true);
-
-    cv::Mat descriptors_gold;
-    surf_gold(image, cv::noArray(), keypoints, descriptors_gold, true);
-
-    cv::BFMatcher matcher(surf.defaultNorm());
-    std::vector<cv::DMatch> matches;
-    matcher.match(descriptors_gold, cv::Mat(descriptors), matches);
-
-    int matchedCount = getMatchedPointsCount(keypoints, keypoints, matches);
-    double matchedRatio = static_cast<double>(matchedCount) / keypoints.size();
-
-    EXPECT_GT(matchedRatio, 0.6);
-}
-
-INSTANTIATE_TEST_CASE_P(CUDA_Features2D, SURF, testing::Combine(
-    testing::Values(SURF_HessianThreshold(100.0), SURF_HessianThreshold(500.0), SURF_HessianThreshold(1000.0)),
-    testing::Values(SURF_Octaves(3), SURF_Octaves(4)),
-    testing::Values(SURF_OctaveLayers(2), SURF_OctaveLayers(3)),
-    testing::Values(SURF_Extended(false), SURF_Extended(true)),
-    testing::Values(SURF_Upright(false), SURF_Upright(true))));
-
-#endif // HAVE_OPENCV_CUDAARITHM
-
-#endif // HAVE_CUDA
diff --git a/modules/nonfree/test/test_surf.ocl.cpp b/modules/nonfree/test/test_surf.ocl.cpp
deleted file mode 100644 (file)
index 217460a..0000000
+++ /dev/null
@@ -1,215 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-//  By downloading, copying, installing or using the software you agree to this license.
-//  If you do not agree to this license, do not download, install,
-//  copy or use the software.
-//
-//
-//                           License Agreement
-//                For Open Source Computer Vision Library
-//
-// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
-// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
-// Third party copyrights are property of their respective owners.
-//
-// @Authors
-//    Peng Xiao, pengxiao@multicorewareinc.com
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-//   * Redistribution's of source code must retain the above copyright notice,
-//     this list of conditions and the following disclaimer.
-//
-//   * Redistribution's in binary form must reproduce the above copyright notice,
-//     this list of conditions and the following disclaimer in the documentation
-//     and/or other materials provided with the distribution.
-//
-//   * The name of the copyright holders may not be used to endorse or promote products
-//     derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors as is and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-#include "test_precomp.hpp"
-
-#ifdef HAVE_OPENCV_OCL
-
-using namespace std;
-using std::tr1::get;
-
-static bool keyPointsEquals(const cv::KeyPoint& p1, const cv::KeyPoint& p2)
-{
-    const double maxPtDif = 0.1;
-    const double maxSizeDif = 0.1;
-    const double maxAngleDif = 0.1;
-    const double maxResponseDif = 0.01;
-
-    double dist = cv::norm(p1.pt - p2.pt);
-
-    if (dist < maxPtDif &&
-        fabs(p1.size - p2.size) < maxSizeDif &&
-        abs(p1.angle - p2.angle) < maxAngleDif &&
-        abs(p1.response - p2.response) < maxResponseDif &&
-        p1.octave == p2.octave &&
-        p1.class_id == p2.class_id)
-    {
-        return true;
-    }
-
-    return false;
-}
-
-static int getMatchedPointsCount(std::vector<cv::KeyPoint>& gold, std::vector<cv::KeyPoint>& actual)
-{
-    std::sort(actual.begin(), actual.end(), perf::comparators::KeypointGreater());
-    std::sort(gold.begin(), gold.end(), perf::comparators::KeypointGreater());
-
-    int validCount = 0;
-
-    for (size_t i = 0; i < gold.size(); ++i)
-    {
-        const cv::KeyPoint& p1 = gold[i];
-        const cv::KeyPoint& p2 = actual[i];
-
-        if (keyPointsEquals(p1, p2))
-            ++validCount;
-    }
-
-    return validCount;
-}
-
-static int getMatchedPointsCount(const std::vector<cv::KeyPoint>& keypoints1, const std::vector<cv::KeyPoint>& keypoints2, const std::vector<cv::DMatch>& matches)
-{
-    int validCount = 0;
-
-    for (size_t i = 0; i < matches.size(); ++i)
-    {
-        const cv::DMatch& m = matches[i];
-
-        const cv::KeyPoint& p1 = keypoints1[m.queryIdx];
-        const cv::KeyPoint& p2 = keypoints2[m.trainIdx];
-
-        if (keyPointsEquals(p1, p2))
-            ++validCount;
-    }
-
-    return validCount;
-}
-
-IMPLEMENT_PARAM_CLASS(HessianThreshold, double)
-IMPLEMENT_PARAM_CLASS(Octaves, int)
-IMPLEMENT_PARAM_CLASS(OctaveLayers, int)
-IMPLEMENT_PARAM_CLASS(Extended, bool)
-IMPLEMENT_PARAM_CLASS(Upright, bool)
-
-PARAM_TEST_CASE(SURF, HessianThreshold, Octaves, OctaveLayers, Extended, Upright)
-{
-    double hessianThreshold;
-    int nOctaves;
-    int nOctaveLayers;
-    bool extended;
-    bool upright;
-
-    virtual void SetUp()
-    {
-        hessianThreshold = get<0>(GetParam());
-        nOctaves = get<1>(GetParam());
-        nOctaveLayers = get<2>(GetParam());
-        extended = get<3>(GetParam());
-        upright = get<4>(GetParam());
-    }
-};
-
-TEST_P(SURF, DISABLED_Detector)
-{
-    cv::Mat image  = cv::imread(string(cvtest::TS::ptr()->get_data_path()) + "shared/fruits.png", cv::IMREAD_GRAYSCALE);
-    ASSERT_FALSE(image.empty());
-
-    cv::ocl::SURF_OCL surf;
-    surf.hessianThreshold = static_cast<float>(hessianThreshold);
-    surf.nOctaves = nOctaves;
-    surf.nOctaveLayers = nOctaveLayers;
-    surf.extended = extended;
-    surf.upright = upright;
-    surf.keypointsRatio = 0.05f;
-
-    std::vector<cv::KeyPoint> keypoints;
-    surf(cv::ocl::oclMat(image), cv::ocl::oclMat(), keypoints);
-
-    cv::SURF surf_gold;
-    surf_gold.hessianThreshold = hessianThreshold;
-    surf_gold.nOctaves = nOctaves;
-    surf_gold.nOctaveLayers = nOctaveLayers;
-    surf_gold.extended = extended;
-    surf_gold.upright = upright;
-
-    std::vector<cv::KeyPoint> keypoints_gold;
-    surf_gold(image, cv::noArray(), keypoints_gold);
-
-    ASSERT_EQ(keypoints_gold.size(), keypoints.size());
-    int matchedCount = getMatchedPointsCount(keypoints_gold, keypoints);
-    double matchedRatio = static_cast<double>(matchedCount) / keypoints_gold.size();
-
-    EXPECT_GT(matchedRatio, 0.99);
-}
-
-TEST_P(SURF, DISABLED_Descriptor)
-{
-    cv::Mat image  = cv::imread(string(cvtest::TS::ptr()->get_data_path()) + "shared/fruits.png", cv::IMREAD_GRAYSCALE);
-    ASSERT_FALSE(image.empty());
-
-    cv::ocl::SURF_OCL surf;
-    surf.hessianThreshold = static_cast<float>(hessianThreshold);
-    surf.nOctaves = nOctaves;
-    surf.nOctaveLayers = nOctaveLayers;
-    surf.extended = extended;
-    surf.upright = upright;
-    surf.keypointsRatio = 0.05f;
-
-    cv::SURF surf_gold;
-    surf_gold.hessianThreshold = hessianThreshold;
-    surf_gold.nOctaves = nOctaves;
-    surf_gold.nOctaveLayers = nOctaveLayers;
-    surf_gold.extended = extended;
-    surf_gold.upright = upright;
-
-    std::vector<cv::KeyPoint> keypoints;
-    surf_gold(image, cv::noArray(), keypoints);
-
-    cv::ocl::oclMat descriptors;
-    surf(cv::ocl::oclMat(image), cv::ocl::oclMat(), keypoints, descriptors, true);
-
-    cv::Mat descriptors_gold;
-    surf_gold(image, cv::noArray(), keypoints, descriptors_gold, true);
-
-    cv::BFMatcher matcher(surf.defaultNorm());
-    std::vector<cv::DMatch> matches;
-    matcher.match(descriptors_gold, cv::Mat(descriptors), matches);
-
-    int matchedCount = getMatchedPointsCount(keypoints, keypoints, matches);
-    double matchedRatio = static_cast<double>(matchedCount) / keypoints.size();
-
-    EXPECT_GT(matchedRatio, 0.35);
-}
-
-INSTANTIATE_TEST_CASE_P(OCL_Features2D, SURF, testing::Combine(
-    testing::Values(HessianThreshold(500.0), HessianThreshold(1000.0)),
-    testing::Values(Octaves(3), Octaves(4)),
-    testing::Values(OctaveLayers(2), OctaveLayers(3)),
-    testing::Values(Extended(false), Extended(true)),
-    testing::Values(Upright(false), Upright(true))));
-
-#endif // HAVE_OPENCV_OCL
index b751298..164515c 100644 (file)
@@ -20,6 +20,7 @@ ocv_list_filterout(candidate_deps "^opencv_matlab$")
 ocv_list_filterout(candidate_deps "^opencv_tracking$")
 ocv_list_filterout(candidate_deps "^opencv_optflow$")
 ocv_list_filterout(candidate_deps "^opencv_bgsegm$")
+ocv_list_filterout(candidate_deps "^opencv_xfeatures2d$")
 
 ocv_add_module(${MODULE_NAME} BINDINGS OPTIONAL ${candidate_deps})
 
index 2aeafb2..e1a61b8 100644 (file)
@@ -1,3 +1,3 @@
 set(the_description "Images stitching")
 ocv_define_module(stitching opencv_imgproc opencv_features2d opencv_calib3d opencv_objdetect
-                  OPTIONAL opencv_cuda opencv_cudaarithm opencv_cudafilters opencv_cudafeatures2d opencv_nonfree)
+                  OPTIONAL opencv_cuda opencv_cudaarithm opencv_cudafilters opencv_cudafeatures2d)
index 26acad1..a163d90 100644 (file)
@@ -5,7 +5,7 @@
 
 SET(OPENCV_CPP_SAMPLES_REQUIRED_DEPS opencv_core opencv_imgproc opencv_flann
     opencv_imgcodecs opencv_videoio opencv_highgui opencv_ml opencv_video
-    opencv_objdetect opencv_photo opencv_nonfree opencv_features2d opencv_calib3d
+    opencv_objdetect opencv_photo opencv_features2d opencv_calib3d
     opencv_stitching opencv_videostab opencv_shape)
 
 ocv_check_dependencies(${OPENCV_CPP_SAMPLES_REQUIRED_DEPS})
diff --git a/samples/cpp/bagofwords_classification.cpp b/samples/cpp/bagofwords_classification.cpp
deleted file mode 100644 (file)
index 1c50a0e..0000000
+++ /dev/null
@@ -1,2611 +0,0 @@
-#include "opencv2/opencv_modules.hpp"
-#include "opencv2/imgcodecs.hpp"
-#include "opencv2/highgui/highgui.hpp"
-#include "opencv2/imgproc/imgproc.hpp"
-#include "opencv2/features2d/features2d.hpp"
-#include "opencv2/nonfree/nonfree.hpp"
-#include "opencv2/ml/ml.hpp"
-
-#include <fstream>
-#include <iostream>
-#include <memory>
-#include <functional>
-
-#if defined WIN32 || defined _WIN32
-#define WIN32_LEAN_AND_MEAN
-#include <windows.h>
-#undef min
-#undef max
-#include "sys/types.h"
-#endif
-#include <sys/stat.h>
-
-#define DEBUG_DESC_PROGRESS
-
-using namespace cv;
-using namespace cv::ml;
-using namespace std;
-
-const string paramsFile = "params.xml";
-const string vocabularyFile = "vocabulary.xml.gz";
-const string bowImageDescriptorsDir = "/bowImageDescriptors";
-const string svmsDir = "/svms";
-const string plotsDir = "/plots";
-
-static void help(char** argv)
-{
-    cout << "\nThis program shows how to read in, train on and produce test results for the PASCAL VOC (Visual Object Challenge) data. \n"
-     << "It shows how to use detectors, descriptors and recognition methods \n"
-        "Using OpenCV version %s\n" << CV_VERSION << "\n"
-     << "Call: \n"
-    << "Format:\n ./" << argv[0] << " [VOC path] [result directory]  \n"
-    << "       or:  \n"
-    << " ./" << argv[0] << " [VOC path] [result directory] [feature detector] [descriptor extractor] [descriptor matcher] \n"
-    << "\n"
-    << "Input parameters: \n"
-    << "[VOC path]             Path to Pascal VOC data (e.g. /home/my/VOCdevkit/VOC2010). Note: VOC2007-VOC2010 are supported. \n"
-    << "[result directory]     Path to result diractory. Following folders will be created in [result directory]: \n"
-    << "                         bowImageDescriptors - to store image descriptors, \n"
-    << "                         svms - to store trained svms, \n"
-    << "                         plots - to store files for plots creating. \n"
-    << "[feature detector]     Feature detector name (e.g. SURF, FAST...) - see createFeatureDetector() function in detectors.cpp \n"
-    << "                         Currently 12/2010, this is FAST, STAR, SIFT, SURF, MSER, GFTT, HARRIS \n"
-    << "[descriptor extractor] Descriptor extractor name (e.g. SURF, SIFT) - see createDescriptorExtractor() function in descriptors.cpp \n"
-    << "                         Currently 12/2010, this is SURF, OpponentSIFT, SIFT, OpponentSURF, BRIEF \n"
-    << "[descriptor matcher]   Descriptor matcher name (e.g. BruteForce) - see createDescriptorMatcher() function in matchers.cpp \n"
-    << "                         Currently 12/2010, this is BruteForce, BruteForce-L1, FlannBased, BruteForce-Hamming, BruteForce-HammingLUT \n"
-    << "\n";
-}
-
-static void makeDir( const string& dir )
-{
-#if defined WIN32 || defined _WIN32
-    CreateDirectory( dir.c_str(), 0 );
-#else
-    mkdir( dir.c_str(), S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH );
-#endif
-}
-
-static void makeUsedDirs( const string& rootPath )
-{
-    makeDir(rootPath + bowImageDescriptorsDir);
-    makeDir(rootPath + svmsDir);
-    makeDir(rootPath + plotsDir);
-}
-
-/****************************************************************************************\
-*                    Classes to work with PASCAL VOC dataset                             *
-\****************************************************************************************/
-//
-// TODO: refactor this part of the code
-//
-
-
-//used to specify the (sub-)dataset over which operations are performed
-enum ObdDatasetType {CV_OBD_TRAIN, CV_OBD_TEST};
-
-class ObdObject
-{
-public:
-    string object_class;
-    Rect boundingBox;
-};
-
-//extended object data specific to VOC
-enum VocPose {CV_VOC_POSE_UNSPECIFIED, CV_VOC_POSE_FRONTAL, CV_VOC_POSE_REAR, CV_VOC_POSE_LEFT, CV_VOC_POSE_RIGHT};
-class VocObjectData
-{
-public:
-    bool difficult;
-    bool occluded;
-    bool truncated;
-    VocPose pose;
-};
-//enum VocDataset {CV_VOC2007, CV_VOC2008, CV_VOC2009, CV_VOC2010};
-enum VocPlotType {CV_VOC_PLOT_SCREEN, CV_VOC_PLOT_PNG};
-enum VocGT {CV_VOC_GT_NONE, CV_VOC_GT_DIFFICULT, CV_VOC_GT_PRESENT};
-enum VocConfCond {CV_VOC_CCOND_RECALL, CV_VOC_CCOND_SCORETHRESH};
-enum VocTask {CV_VOC_TASK_CLASSIFICATION, CV_VOC_TASK_DETECTION};
-
-class ObdImage
-{
-public:
-    ObdImage(string p_id, string p_path) : id(p_id), path(p_path) {}
-    string id;
-    string path;
-};
-
-//used by getDetectorGroundTruth to sort a two dimensional list of floats in descending order
-class ObdScoreIndexSorter
-{
-public:
-    float score;
-    int image_idx;
-    int obj_idx;
-    bool operator < (const ObdScoreIndexSorter& compare) const {return (score < compare.score);}
-};
-
-class VocData
-{
-public:
-    VocData( const string& vocPath, bool useTestDataset )
-        { initVoc( vocPath, useTestDataset ); }
-    ~VocData(){}
-    /* functions for returning classification/object data for multiple images given an object class */
-    void getClassImages(const string& obj_class, const ObdDatasetType dataset, vector<ObdImage>& images, vector<char>& object_present);
-    void getClassObjects(const string& obj_class, const ObdDatasetType dataset, vector<ObdImage>& images, vector<vector<ObdObject> >& objects);
-    void getClassObjects(const string& obj_class, const ObdDatasetType dataset, vector<ObdImage>& images, vector<vector<ObdObject> >& objects, vector<vector<VocObjectData> >& object_data, vector<VocGT>& ground_truth);
-    /* functions for returning object data for a single image given an image id */
-    ObdImage getObjects(const string& id, vector<ObdObject>& objects);
-    ObdImage getObjects(const string& id, vector<ObdObject>& objects, vector<VocObjectData>& object_data);
-    ObdImage getObjects(const string& obj_class, const string& id, vector<ObdObject>& objects, vector<VocObjectData>& object_data, VocGT& ground_truth);
-    /* functions for returning the ground truth (present/absent) for groups of images */
-    void getClassifierGroundTruth(const string& obj_class, const vector<ObdImage>& images, vector<char>& ground_truth);
-    void getClassifierGroundTruth(const string& obj_class, const vector<string>& images, vector<char>& ground_truth);
-    int getDetectorGroundTruth(const string& obj_class, const ObdDatasetType dataset, const vector<ObdImage>& images, const vector<vector<Rect> >& bounding_boxes, const vector<vector<float> >& scores, vector<vector<char> >& ground_truth, vector<vector<char> >& detection_difficult, bool ignore_difficult = true);
-    /* functions for writing VOC-compatible results files */
-    void writeClassifierResultsFile(const string& out_dir, const string& obj_class, const ObdDatasetType dataset, const vector<ObdImage>& images, const vector<float>& scores, const int competition = 1, const bool overwrite_ifexists = false);
-    /* functions for calculating metrics from a set of classification/detection results */
-    string getResultsFilename(const string& obj_class, const VocTask task, const ObdDatasetType dataset, const int competition = -1, const int number = -1);
-    void calcClassifierPrecRecall(const string& obj_class, const vector<ObdImage>& images, const vector<float>& scores, vector<float>& precision, vector<float>& recall, float& ap, vector<size_t>& ranking);
-    void calcClassifierPrecRecall(const string& obj_class, const vector<ObdImage>& images, const vector<float>& scores, vector<float>& precision, vector<float>& recall, float& ap);
-    void calcClassifierPrecRecall(const string& input_file, vector<float>& precision, vector<float>& recall, float& ap, bool outputRankingFile = false);
-    /* functions for calculating confusion matrices */
-    void calcClassifierConfMatRow(const string& obj_class, const vector<ObdImage>& images, const vector<float>& scores, const VocConfCond cond, const float threshold, vector<string>& output_headers, vector<float>& output_values);
-    void calcDetectorConfMatRow(const string& obj_class, const ObdDatasetType dataset, const vector<ObdImage>& images, const vector<vector<float> >& scores, const vector<vector<Rect> >& bounding_boxes, const VocConfCond cond, const float threshold, vector<string>& output_headers, vector<float>& output_values, bool ignore_difficult = true);
-    /* functions for outputting gnuplot output files */
-    void savePrecRecallToGnuplot(const string& output_file, const vector<float>& precision, const vector<float>& recall, const float ap, const string title = string(), const VocPlotType plot_type = CV_VOC_PLOT_SCREEN);
-    /* functions for reading in result/ground truth files */
-    void readClassifierGroundTruth(const string& obj_class, const ObdDatasetType dataset, vector<ObdImage>& images, vector<char>& object_present);
-    void readClassifierResultsFile(const std:: string& input_file, vector<ObdImage>& images, vector<float>& scores);
-    void readDetectorResultsFile(const string& input_file, vector<ObdImage>& images, vector<vector<float> >& scores, vector<vector<Rect> >& bounding_boxes);
-    /* functions for getting dataset info */
-    const vector<string>& getObjectClasses();
-    string getResultsDirectory();
-protected:
-    void initVoc( const string& vocPath, const bool useTestDataset );
-    void initVoc2007to2010( const string& vocPath, const bool useTestDataset);
-    void readClassifierGroundTruth(const string& filename, vector<string>& image_codes, vector<char>& object_present);
-    void readClassifierResultsFile(const string& input_file, vector<string>& image_codes, vector<float>& scores);
-    void readDetectorResultsFile(const string& input_file, vector<string>& image_codes, vector<vector<float> >& scores, vector<vector<Rect> >& bounding_boxes);
-    void extractVocObjects(const string filename, vector<ObdObject>& objects, vector<VocObjectData>& object_data);
-    string getImagePath(const string& input_str);
-
-    void getClassImages_impl(const string& obj_class, const string& dataset_str, vector<ObdImage>& images, vector<char>& object_present);
-    void calcPrecRecall_impl(const vector<char>& ground_truth, const vector<float>& scores, vector<float>& precision, vector<float>& recall, float& ap, vector<size_t>& ranking, int recall_normalization = -1);
-
-    //test two bounding boxes to see if they meet the overlap criteria defined in the VOC documentation
-    float testBoundingBoxesForOverlap(const Rect detection, const Rect ground_truth);
-    //extract class and dataset name from a VOC-standard classification/detection results filename
-    void extractDataFromResultsFilename(const string& input_file, string& class_name, string& dataset_name);
-    //get classifier ground truth for a single image
-    bool getClassifierGroundTruthImage(const string& obj_class, const string& id);
-
-    //utility functions
-    void getSortOrder(const vector<float>& values, vector<size_t>& order, bool descending = true);
-    int stringToInteger(const string input_str);
-    void readFileToString(const string filename, string& file_contents);
-    string integerToString(const int input_int);
-    string checkFilenamePathsep(const string filename, bool add_trailing_slash = false);
-    void convertImageCodesToObdImages(const vector<string>& image_codes, vector<ObdImage>& images);
-    int extractXMLBlock(const string src, const string tag, const int searchpos, string& tag_contents);
-    //utility sorter
-    struct orderingSorter
-    {
-        bool operator ()(std::pair<size_t, vector<float>::const_iterator> const& a, std::pair<size_t, vector<float>::const_iterator> const& b)
-        {
-            return (*a.second) > (*b.second);
-        }
-    };
-    //data members
-    string m_vocPath;
-    string m_vocName;
-    //string m_resPath;
-
-    string m_annotation_path;
-    string m_image_path;
-    string m_imageset_path;
-    string m_class_imageset_path;
-
-    vector<string> m_classifier_gt_all_ids;
-    vector<char> m_classifier_gt_all_present;
-    string m_classifier_gt_class;
-
-    //data members
-    string m_train_set;
-    string m_test_set;
-
-    vector<string> m_object_classes;
-
-
-    float m_min_overlap;
-    bool m_sampled_ap;
-};
-
-
-//Return the classification ground truth data for all images of a given VOC object class
-//--------------------------------------------------------------------------------------
-//INPUTS:
-// - obj_class          The VOC object class identifier string
-// - dataset            Specifies whether to extract images from the training or test set
-//OUTPUTS:
-// - images             An array of ObdImage containing info of all images extracted from the ground truth file
-// - object_present     An array of bools specifying whether the object defined by 'obj_class' is present in each image or not
-//NOTES:
-// This function is primarily useful for the classification task, where only
-// whether a given object is present or not in an image is required, and not each object instance's
-// position etc.
-void VocData::getClassImages(const string& obj_class, const ObdDatasetType dataset, vector<ObdImage>& images, vector<char>& object_present)
-{
-    string dataset_str;
-    //generate the filename of the classification ground-truth textfile for the object class
-    if (dataset == CV_OBD_TRAIN)
-    {
-        dataset_str = m_train_set;
-    } else {
-        dataset_str = m_test_set;
-    }
-
-    getClassImages_impl(obj_class, dataset_str, images, object_present);
-}
-
-void VocData::getClassImages_impl(const string& obj_class, const string& dataset_str, vector<ObdImage>& images, vector<char>& object_present)
-{
-    //generate the filename of the classification ground-truth textfile for the object class
-    string gtFilename = m_class_imageset_path;
-    gtFilename.replace(gtFilename.find("%s"),2,obj_class);
-    gtFilename.replace(gtFilename.find("%s"),2,dataset_str);
-
-    //parse the ground truth file, storing in two separate vectors
-    //for the image code and the ground truth value
-    vector<string> image_codes;
-    readClassifierGroundTruth(gtFilename, image_codes, object_present);
-
-    //prepare output arrays
-    images.clear();
-
-    convertImageCodesToObdImages(image_codes, images);
-}
-
-//Return the object data for all images of a given VOC object class
-//-----------------------------------------------------------------
-//INPUTS:
-// - obj_class          The VOC object class identifier string
-// - dataset            Specifies whether to extract images from the training or test set
-//OUTPUTS:
-// - images             An array of ObdImage containing info of all images in chosen dataset (tag, path etc.)
-// - objects            Contains the extended object info (bounding box etc.) for each object instance in each image
-// - object_data        Contains VOC-specific extended object info (marked difficult etc.)
-// - ground_truth       Specifies whether there are any difficult/non-difficult instances of the current
-//                          object class within each image
-//NOTES:
-// This function returns extended object information in addition to the absent/present
-// classification data returned by getClassImages. The objects returned for each image in the 'objects'
-// array are of all object classes present in the image, and not just the class defined by 'obj_class'.
-// 'ground_truth' can be used to determine quickly whether an object instance of the given class is present
-// in an image or not.
-void VocData::getClassObjects(const string& obj_class, const ObdDatasetType dataset, vector<ObdImage>& images, vector<vector<ObdObject> >& objects)
-{
-    vector<vector<VocObjectData> > object_data;
-    vector<VocGT> ground_truth;
-
-    getClassObjects(obj_class,dataset,images,objects,object_data,ground_truth);
-}
-
-void VocData::getClassObjects(const string& obj_class, const ObdDatasetType dataset, vector<ObdImage>& images, vector<vector<ObdObject> >& objects, vector<vector<VocObjectData> >& object_data, vector<VocGT>& ground_truth)
-{
-    //generate the filename of the classification ground-truth textfile for the object class
-    string gtFilename = m_class_imageset_path;
-    gtFilename.replace(gtFilename.find("%s"),2,obj_class);
-    if (dataset == CV_OBD_TRAIN)
-    {
-        gtFilename.replace(gtFilename.find("%s"),2,m_train_set);
-    } else {
-        gtFilename.replace(gtFilename.find("%s"),2,m_test_set);
-    }
-
-    //parse the ground truth file, storing in two separate vectors
-    //for the image code and the ground truth value
-    vector<string> image_codes;
-    vector<char> object_present;
-    readClassifierGroundTruth(gtFilename, image_codes, object_present);
-
-    //prepare output arrays
-    images.clear();
-    objects.clear();
-    object_data.clear();
-    ground_truth.clear();
-
-    string annotationFilename;
-    vector<ObdObject> image_objects;
-    vector<VocObjectData> image_object_data;
-    VocGT image_gt;
-
-    //transfer to output arrays and read in object data for each image
-    for (size_t i = 0; i < image_codes.size(); ++i)
-    {
-        ObdImage image = getObjects(obj_class, image_codes[i], image_objects, image_object_data, image_gt);
-
-        images.push_back(image);
-        objects.push_back(image_objects);
-        object_data.push_back(image_object_data);
-        ground_truth.push_back(image_gt);
-    }
-}
-
-//Return ground truth data for the objects present in an image with a given UID
-//-----------------------------------------------------------------------------
-//INPUTS:
-// - id                 VOC Dataset unique identifier (string code in form YYYY_XXXXXX where YYYY is the year)
-//OUTPUTS:
-// - obj_class (*3)     Specifies the object class to use to resolve 'ground_truth'
-// - objects            Contains the extended object info (bounding box etc.) for each object in the image
-// - object_data (*2,3) Contains VOC-specific extended object info (marked difficult etc.)
-// - ground_truth (*3)  Specifies whether there are any difficult/non-difficult instances of the current
-//                          object class within the image
-//RETURN VALUE:
-// ObdImage containing path and other details of image file with given code
-//NOTES:
-// There are three versions of this function
-//  * One returns a simple array of objects given an id [1]
-//  * One returns the same as (1) plus VOC specific object data [2]
-//  * One returns the same as (2) plus the ground_truth flag. This also requires an extra input obj_class [3]
-ObdImage VocData::getObjects(const string& id, vector<ObdObject>& objects)
-{
-    vector<VocObjectData> object_data;
-    ObdImage image = getObjects(id, objects, object_data);
-
-    return image;
-}
-
-ObdImage VocData::getObjects(const string& id, vector<ObdObject>& objects, vector<VocObjectData>& object_data)
-{
-    //first generate the filename of the annotation file
-    string annotationFilename = m_annotation_path;
-
-    annotationFilename.replace(annotationFilename.find("%s"),2,id);
-
-    //extract objects contained in the current image from the xml
-    extractVocObjects(annotationFilename,objects,object_data);
-
-    //generate image path from extracted string code
-    string path = getImagePath(id);
-
-    ObdImage image(id, path);
-    return image;
-}
-
-ObdImage VocData::getObjects(const string& obj_class, const string& id, vector<ObdObject>& objects, vector<VocObjectData>& object_data, VocGT& ground_truth)
-{
-
-    //extract object data (except for ground truth flag)
-    ObdImage image = getObjects(id,objects,object_data);
-
-    //pregenerate a flag to indicate whether the current class is present or not in the image
-    ground_truth = CV_VOC_GT_NONE;
-    //iterate through all objects in current image
-    for (size_t j = 0; j < objects.size(); ++j)
-    {
-        if (objects[j].object_class == obj_class)
-        {
-            if (object_data[j].difficult == false)
-            {
-                //if at least one non-difficult example is present, this flag is always set to CV_VOC_GT_PRESENT
-                ground_truth = CV_VOC_GT_PRESENT;
-                break;
-            } else {
-                //set if at least one object instance is present, but it is marked difficult
-                ground_truth = CV_VOC_GT_DIFFICULT;
-            }
-        }
-    }
-
-    return image;
-}
-
-//Return ground truth data for the presence/absence of a given object class in an arbitrary array of images
-//---------------------------------------------------------------------------------------------------------
-//INPUTS:
-// - obj_class          The VOC object class identifier string
-// - images             An array of ObdImage OR strings containing the images for which ground truth
-//                          will be computed
-//OUTPUTS:
-// - ground_truth       An output array indicating the presence/absence of obj_class within each image
-void VocData::getClassifierGroundTruth(const string& obj_class, const vector<ObdImage>& images, vector<char>& ground_truth)
-{
-    vector<char>(images.size()).swap(ground_truth);
-
-    vector<ObdObject> objects;
-    vector<VocObjectData> object_data;
-    vector<char>::iterator gt_it = ground_truth.begin();
-    for (vector<ObdImage>::const_iterator it = images.begin(); it != images.end(); ++it, ++gt_it)
-    {
-        //getObjects(obj_class, it->id, objects, object_data, voc_ground_truth);
-        (*gt_it) = (getClassifierGroundTruthImage(obj_class, it->id));
-    }
-}
-
-void VocData::getClassifierGroundTruth(const string& obj_class, const vector<string>& images, vector<char>& ground_truth)
-{
-    vector<char>(images.size()).swap(ground_truth);
-
-    vector<ObdObject> objects;
-    vector<VocObjectData> object_data;
-    vector<char>::iterator gt_it = ground_truth.begin();
-    for (vector<string>::const_iterator it = images.begin(); it != images.end(); ++it, ++gt_it)
-    {
-        //getObjects(obj_class, (*it), objects, object_data, voc_ground_truth);
-        (*gt_it) = (getClassifierGroundTruthImage(obj_class, (*it)));
-    }
-}
-
-//Return ground truth data for the accuracy of detection results
-//--------------------------------------------------------------
-//INPUTS:
-// - obj_class          The VOC object class identifier string
-// - images             An array of ObdImage containing the images for which ground truth
-//                          will be computed
-// - bounding_boxes     A 2D input array containing the bounding box rects of the objects of
-//                          obj_class which were detected in each image
-//OUTPUTS:
-// - ground_truth       A 2D output array indicating whether each object detection was accurate
-//                          or not
-// - detection_difficult A 2D output array indicating whether the detection fired on an object
-//                          marked as 'difficult'. This allows it to be ignored if necessary
-//                          (the voc documentation specifies objects marked as difficult
-//                          have no effects on the results and are effectively ignored)
-// - (ignore_difficult) If set to true, objects marked as difficult will be ignored when returning
-//                          the number of hits for p-r normalization (default = true)
-//RETURN VALUE:
-//                      Returns the number of object hits in total in the gt to allow proper normalization
-//                          of a p-r curve
-//NOTES:
-// As stated in the VOC documentation, multiple detections of the same object in an image are
-// considered FALSE detections e.g. 5 detections of a single object is counted as 1 correct
-// detection and 4 false detections - it is the responsibility of the participant's system
-// to filter multiple detections from its output
-int VocData::getDetectorGroundTruth(const string& obj_class, const ObdDatasetType dataset, const vector<ObdImage>& images, const vector<vector<Rect> >& bounding_boxes, const vector<vector<float> >& scores, vector<vector<char> >& ground_truth, vector<vector<char> >& detection_difficult, bool ignore_difficult)
-{
-    int recall_normalization = 0;
-
-    /* first create a list of indices referring to the elements of bounding_boxes and scores in
-     * descending order of scores */
-    vector<ObdScoreIndexSorter> sorted_ids;
-    {
-        /* first count how many objects to allow preallocation */
-        size_t obj_count = 0;
-        CV_Assert(images.size() == bounding_boxes.size());
-        CV_Assert(scores.size() == bounding_boxes.size());
-        for (size_t im_idx = 0; im_idx < scores.size(); ++im_idx)
-        {
-            CV_Assert(scores[im_idx].size() == bounding_boxes[im_idx].size());
-            obj_count += scores[im_idx].size();
-        }
-        /* preallocate id vector */
-        sorted_ids.resize(obj_count);
-        /* now copy across scores and indexes to preallocated vector */
-        int flat_pos = 0;
-        for (size_t im_idx = 0; im_idx < scores.size(); ++im_idx)
-        {
-            for (size_t ob_idx = 0; ob_idx < scores[im_idx].size(); ++ob_idx)
-            {
-                sorted_ids[flat_pos].score = scores[im_idx][ob_idx];
-                sorted_ids[flat_pos].image_idx = (int)im_idx;
-                sorted_ids[flat_pos].obj_idx = (int)ob_idx;
-                ++flat_pos;
-            }
-        }
-        /* and sort the vector in descending order of score */
-        std::sort(sorted_ids.begin(),sorted_ids.end());
-        std::reverse(sorted_ids.begin(),sorted_ids.end());
-    }
-
-    /* prepare ground truth + difficult vector (1st dimension) */
-    vector<vector<char> >(images.size()).swap(ground_truth);
-    vector<vector<char> >(images.size()).swap(detection_difficult);
-    vector<vector<char> > detected(images.size());
-
-    vector<vector<ObdObject> > img_objects(images.size());
-    vector<vector<VocObjectData> > img_object_data(images.size());
-    /* preload object ground truth bounding box data */
-    {
-        vector<vector<ObdObject> > img_objects_all(images.size());
-        vector<vector<VocObjectData> > img_object_data_all(images.size());
-        for (size_t image_idx = 0; image_idx < images.size(); ++image_idx)
-        {
-            /* prepopulate ground truth bounding boxes */
-            getObjects(images[image_idx].id, img_objects_all[image_idx], img_object_data_all[image_idx]);
-            /* meanwhile, also set length of target ground truth + difficult vector to same as number of object detections (2nd dimension) */
-            ground_truth[image_idx].resize(bounding_boxes[image_idx].size());
-            detection_difficult[image_idx].resize(bounding_boxes[image_idx].size());
-        }
-
-        /* save only instances of the object class concerned */
-        for (size_t image_idx = 0; image_idx < images.size(); ++image_idx)
-        {
-            for (size_t obj_idx = 0; obj_idx < img_objects_all[image_idx].size(); ++obj_idx)
-            {
-                if (img_objects_all[image_idx][obj_idx].object_class == obj_class)
-                {
-                    img_objects[image_idx].push_back(img_objects_all[image_idx][obj_idx]);
-                    img_object_data[image_idx].push_back(img_object_data_all[image_idx][obj_idx]);
-                }
-            }
-            detected[image_idx].resize(img_objects[image_idx].size(), false);
-        }
-    }
-
-    /* calculate the total number of objects in the ground truth for the current dataset */
-    {
-        vector<ObdImage> gt_images;
-        vector<char> gt_object_present;
-        getClassImages(obj_class, dataset, gt_images, gt_object_present);
-
-        for (size_t image_idx = 0; image_idx < gt_images.size(); ++image_idx)
-        {
-            vector<ObdObject> gt_img_objects;
-            vector<VocObjectData> gt_img_object_data;
-            getObjects(gt_images[image_idx].id, gt_img_objects, gt_img_object_data);
-            for (size_t obj_idx = 0; obj_idx < gt_img_objects.size(); ++obj_idx)
-            {
-                if (gt_img_objects[obj_idx].object_class == obj_class)
-                {
-                    if ((gt_img_object_data[obj_idx].difficult == false) || (ignore_difficult == false))
-                        ++recall_normalization;
-                }
-            }
-        }
-    }
-
-#ifdef PR_DEBUG
-    int printed_count = 0;
-#endif
-    /* now iterate through detections in descending order of score, assigning to ground truth bounding boxes if possible */
-    for (size_t detect_idx = 0; detect_idx < sorted_ids.size(); ++detect_idx)
-    {
-        //read in indexes to make following code easier to read
-        int im_idx = sorted_ids[detect_idx].image_idx;
-        int ob_idx = sorted_ids[detect_idx].obj_idx;
-        //set ground truth for the current object to false by default
-        ground_truth[im_idx][ob_idx] = false;
-        detection_difficult[im_idx][ob_idx] = false;
-        float maxov = -1.0;
-        bool max_is_difficult = false;
-        int max_gt_obj_idx = -1;
-        //-- for each detected object iterate through objects present in the bounding box ground truth --
-        for (size_t gt_obj_idx = 0; gt_obj_idx < img_objects[im_idx].size(); ++gt_obj_idx)
-        {
-            if (detected[im_idx][gt_obj_idx] == false)
-            {
-                //check if the detected object and ground truth object overlap by a sufficient margin
-                float ov = testBoundingBoxesForOverlap(bounding_boxes[im_idx][ob_idx], img_objects[im_idx][gt_obj_idx].boundingBox);
-                if (ov != -1.0)
-                {
-                    //if all conditions are met store the overlap score and index (as objects are assigned to the highest scoring match)
-                    if (ov > maxov)
-                    {
-                        maxov = ov;
-                        max_gt_obj_idx = (int)gt_obj_idx;
-                        //store whether the maximum detection is marked as difficult or not
-                        max_is_difficult = (img_object_data[im_idx][gt_obj_idx].difficult);
-                    }
-                }
-            }
-        }
-        //-- if a match was found, set the ground truth of the current object to true --
-        if (maxov != -1.0)
-        {
-            CV_Assert(max_gt_obj_idx != -1);
-            ground_truth[im_idx][ob_idx] = true;
-            //store whether the maximum detection was marked as 'difficult' or not
-            detection_difficult[im_idx][ob_idx] = max_is_difficult;
-            //remove the ground truth object so it doesn't match with subsequent detected objects
-            //** this is the behaviour defined by the voc documentation **
-            detected[im_idx][max_gt_obj_idx] = true;
-        }
-#ifdef PR_DEBUG
-        if (printed_count < 10)
-        {
-            cout << printed_count << ": id=" << images[im_idx].id << ", score=" << scores[im_idx][ob_idx] << " (" << ob_idx << ") [" << bounding_boxes[im_idx][ob_idx].x << "," <<
-                    bounding_boxes[im_idx][ob_idx].y << "," << bounding_boxes[im_idx][ob_idx].width + bounding_boxes[im_idx][ob_idx].x <<
-                    "," << bounding_boxes[im_idx][ob_idx].height + bounding_boxes[im_idx][ob_idx].y << "] detected=" << ground_truth[im_idx][ob_idx] <<
-                    ", difficult=" << detection_difficult[im_idx][ob_idx] << endl;
-            ++printed_count;
-            /* print ground truth */
-            for (int gt_obj_idx = 0; gt_obj_idx < img_objects[im_idx].size(); ++gt_obj_idx)
-            {
-                cout << "    GT: [" << img_objects[im_idx][gt_obj_idx].boundingBox.x << "," <<
-                        img_objects[im_idx][gt_obj_idx].boundingBox.y << "," << img_objects[im_idx][gt_obj_idx].boundingBox.width + img_objects[im_idx][gt_obj_idx].boundingBox.x <<
-                        "," << img_objects[im_idx][gt_obj_idx].boundingBox.height + img_objects[im_idx][gt_obj_idx].boundingBox.y << "]";
-                if (gt_obj_idx == max_gt_obj_idx) cout << " <--- (" << maxov << " overlap)";
-                cout << endl;
-            }
-        }
-#endif
-    }
-
-    return recall_normalization;
-}
-
-//Write VOC-compliant classifier results file
-//-------------------------------------------
-//INPUTS:
-// - obj_class          The VOC object class identifier string
-// - dataset            Specifies whether working with the training or test set
-// - images             An array of ObdImage containing the images for which data will be saved to the result file
-// - scores             A corresponding array of confidence scores given a query
-// - (competition)      If specified, defines which competition the results are for (see VOC documentation - default 1)
-//NOTES:
-// The result file path and filename are determined automatically using m_results_directory as a base
-void VocData::writeClassifierResultsFile( const string& out_dir, const string& obj_class, const ObdDatasetType dataset, const vector<ObdImage>& images, const vector<float>& scores, const int competition, const bool overwrite_ifexists)
-{
-    CV_Assert(images.size() == scores.size());
-
-    string output_file_base, output_file;
-    if (dataset == CV_OBD_TRAIN)
-    {
-        output_file_base = out_dir + "/comp" + integerToString(competition) + "_cls_" + m_train_set + "_" + obj_class;
-    } else {
-        output_file_base = out_dir + "/comp" + integerToString(competition) + "_cls_" + m_test_set + "_" + obj_class;
-    }
-    output_file = output_file_base + ".txt";
-
-    //check if file exists, and if so create a numbered new file instead
-    if (overwrite_ifexists == false)
-    {
-        struct stat stFileInfo;
-        if (stat(output_file.c_str(),&stFileInfo) == 0)
-        {
-            string output_file_new;
-            int filenum = 0;
-            do
-            {
-                ++filenum;
-                output_file_new = output_file_base + "_" + integerToString(filenum);
-                output_file = output_file_new + ".txt";
-            } while (stat(output_file.c_str(),&stFileInfo) == 0);
-        }
-    }
-
-    //output data to file
-    std::ofstream result_file(output_file.c_str());
-    if (result_file.is_open())
-    {
-        for (size_t i = 0; i < images.size(); ++i)
-        {
-            result_file << images[i].id << " " << scores[i] << endl;
-        }
-        result_file.close();
-    } else {
-        string err_msg = "could not open classifier results file '" + output_file + "' for writing. Before running for the first time, a 'results' subdirectory should be created within the VOC dataset base directory. e.g. if the VOC data is stored in /VOC/VOC2010 then the path /VOC/results must be created.";
-        CV_Error(Error::StsError,err_msg.c_str());
-    }
-}
-
-//---------------------------------------
-//CALCULATE METRICS FROM VOC RESULTS DATA
-//---------------------------------------
-
-//Utility function to construct a VOC-standard classification results filename
-//----------------------------------------------------------------------------
-//INPUTS:
-// - obj_class          The VOC object class identifier string
-// - task               Specifies whether to generate a filename for the classification or detection task
-// - dataset            Specifies whether working with the training or test set
-// - (competition)      If specified, defines which competition the results are for (see VOC documentation
-//                      default of -1 means this is set to 1 for the classification task and 3 for the detection task)
-// - (number)           If specified and above 0, defines which of a number of duplicate results file produced for a given set of
-//                      of settings should be used (this number will be added as a postfix to the filename)
-//NOTES:
-// This is primarily useful for returning the filename of a classification file previously computed using writeClassifierResultsFile
-// for example when calling calcClassifierPrecRecall
-string VocData::getResultsFilename(const string& obj_class, const VocTask task, const ObdDatasetType dataset, const int competition, const int number)
-{
-    if ((competition < 1) && (competition != -1))
-        CV_Error(Error::StsBadArg,"competition argument should be a positive non-zero number or -1 to accept the default");
-    if ((number < 1) && (number != -1))
-        CV_Error(Error::StsBadArg,"number argument should be a positive non-zero number or -1 to accept the default");
-
-    string dset, task_type;
-
-    if (dataset == CV_OBD_TRAIN)
-    {
-        dset = m_train_set;
-    } else {
-        dset = m_test_set;
-    }
-
-    int comp = competition;
-    if (task == CV_VOC_TASK_CLASSIFICATION)
-    {
-        task_type = "cls";
-        if (comp == -1) comp = 1;
-    } else {
-        task_type = "det";
-        if (comp == -1) comp = 3;
-    }
-
-    stringstream ss;
-    if (number < 1)
-    {
-        ss << "comp" << comp << "_" << task_type << "_" << dset << "_" << obj_class << ".txt";
-    } else {
-        ss << "comp" << comp << "_" << task_type << "_" << dset << "_" << obj_class << "_" << number << ".txt";
-    }
-
-    string filename = ss.str();
-    return filename;
-}
-
-//Calculate metrics for classification results
-//--------------------------------------------
-//INPUTS:
-// - ground_truth       A vector of booleans determining whether the currently tested class is present in each input image
-// - scores             A vector containing the similarity score for each input image (higher is more similar)
-//OUTPUTS:
-// - precision          A vector containing the precision calculated at each datapoint of a p-r curve generated from the result set
-// - recall             A vector containing the recall calculated at each datapoint of a p-r curve generated from the result set
-// - ap                The ap metric calculated from the result set
-// - (ranking)          A vector of the same length as 'ground_truth' and 'scores' containing the order of the indices in both of
-//                      these arrays when sorting by the ranking score in descending order
-//NOTES:
-// The result file path and filename are determined automatically using m_results_directory as a base
-void VocData::calcClassifierPrecRecall(const string& obj_class, const vector<ObdImage>& images, const vector<float>& scores, vector<float>& precision, vector<float>& recall, float& ap, vector<size_t>& ranking)
-{
-    vector<char> res_ground_truth;
-    getClassifierGroundTruth(obj_class, images, res_ground_truth);
-
-    calcPrecRecall_impl(res_ground_truth, scores, precision, recall, ap, ranking);
-}
-
-void VocData::calcClassifierPrecRecall(const string& obj_class, const vector<ObdImage>& images, const vector<float>& scores, vector<float>& precision, vector<float>& recall, float& ap)
-{
-    vector<char> res_ground_truth;
-    getClassifierGroundTruth(obj_class, images, res_ground_truth);
-
-    vector<size_t> ranking;
-    calcPrecRecall_impl(res_ground_truth, scores, precision, recall, ap, ranking);
-}
-
-//< Overloaded version which accepts VOC classification result file input instead of array of scores/ground truth >
-//INPUTS:
-// - input_file         The path to the VOC standard results file to use for calculating precision/recall
-//                      If a full path is not specified, it is assumed this file is in the VOC standard results directory
-//                      A VOC standard filename can be retrieved (as used by writeClassifierResultsFile) by calling  getClassifierResultsFilename
-
-void VocData::calcClassifierPrecRecall(const string& input_file, vector<float>& precision, vector<float>& recall, float& ap, bool outputRankingFile)
-{
-    //read in classification results file
-    vector<string> res_image_codes;
-    vector<float> res_scores;
-
-    string input_file_std = checkFilenamePathsep(input_file);
-    readClassifierResultsFile(input_file_std, res_image_codes, res_scores);
-
-    //extract the object class and dataset from the results file filename
-    string class_name, dataset_name;
-    extractDataFromResultsFilename(input_file_std, class_name, dataset_name);
-
-    //generate the ground truth for the images extracted from the results file
-    vector<char> res_ground_truth;
-
-    getClassifierGroundTruth(class_name, res_image_codes, res_ground_truth);
-
-    if (outputRankingFile)
-    {
-        /* 1. store sorting order by score (descending) in 'order' */
-        vector<std::pair<size_t, vector<float>::const_iterator> > order(res_scores.size());
-
-        size_t n = 0;
-        for (vector<float>::const_iterator it = res_scores.begin(); it != res_scores.end(); ++it, ++n)
-            order[n] = make_pair(n, it);
-
-        std::sort(order.begin(),order.end(),orderingSorter());
-
-        /* 2. save ranking results to text file */
-        string input_file_std1 = checkFilenamePathsep(input_file);
-        size_t fnamestart = input_file_std1.rfind("/");
-        string scoregt_file_str = input_file_std1.substr(0,fnamestart+1) + "scoregt_" + class_name + ".txt";
-        std::ofstream scoregt_file(scoregt_file_str.c_str());
-        if (scoregt_file.is_open())
-        {
-            for (size_t i = 0; i < res_scores.size(); ++i)
-            {
-                scoregt_file << res_image_codes[order[i].first] << " " << res_scores[order[i].first] << " " << res_ground_truth[order[i].first] << endl;
-            }
-            scoregt_file.close();
-        } else {
-            string err_msg = "could not open scoregt file '" + scoregt_file_str + "' for writing.";
-            CV_Error(Error::StsError,err_msg.c_str());
-        }
-    }
-
-    //finally, calculate precision+recall+ap
-    vector<size_t> ranking;
-    calcPrecRecall_impl(res_ground_truth,res_scores,precision,recall,ap,ranking);
-}
-
-//< Protected implementation of Precision-Recall calculation used by both calcClassifierPrecRecall and calcDetectorPrecRecall >
-
-void VocData::calcPrecRecall_impl(const vector<char>& ground_truth, const vector<float>& scores, vector<float>& precision, vector<float>& recall, float& ap, vector<size_t>& ranking, int recall_normalization)
-{
-    CV_Assert(ground_truth.size() == scores.size());
-
-    //add extra element for p-r at 0 recall (in case that first retrieved is positive)
-    vector<float>(scores.size()+1).swap(precision);
-    vector<float>(scores.size()+1).swap(recall);
-
-    // SORT RESULTS BY THEIR SCORE
-    /* 1. store sorting order in 'order' */
-    VocData::getSortOrder(scores, ranking);
-
-#ifdef PR_DEBUG
-    std::ofstream scoregt_file("D:/pr.txt");
-    if (scoregt_file.is_open())
-    {
-       for (int i = 0; i < scores.size(); ++i)
-       {
-           scoregt_file << scores[ranking[i]] << " " << ground_truth[ranking[i]] << endl;
-       }
-       scoregt_file.close();
-    }
-#endif
-
-    // CALCULATE PRECISION+RECALL
-
-    int retrieved_hits = 0;
-
-    int recall_norm;
-    if (recall_normalization != -1)
-    {
-        recall_norm = recall_normalization;
-    } else {
-        recall_norm = (int)std::count_if(ground_truth.begin(),ground_truth.end(),std::bind2nd(std::equal_to<char>(),(char)1));
-    }
-
-    ap = 0;
-    recall[0] = 0;
-    for (size_t idx = 0; idx < ground_truth.size(); ++idx)
-    {
-        if (ground_truth[ranking[idx]] != 0) ++retrieved_hits;
-
-        precision[idx+1] = static_cast<float>(retrieved_hits)/static_cast<float>(idx+1);
-        recall[idx+1] = static_cast<float>(retrieved_hits)/static_cast<float>(recall_norm);
-
-        if (idx == 0)
-        {
-            //add further point at 0 recall with the same precision value as the first computed point
-            precision[idx] = precision[idx+1];
-        }
-        if (recall[idx+1] == 1.0)
-        {
-            //if recall = 1, then end early as all positive images have been found
-            recall.resize(idx+2);
-            precision.resize(idx+2);
-            break;
-        }
-    }
-
-    /* ap calculation */
-    if (m_sampled_ap == false)
-    {
-        // FOR VOC2010+ AP IS CALCULATED FROM ALL DATAPOINTS
-        /* make precision monotonically decreasing for purposes of calculating ap */
-        vector<float> precision_monot(precision.size());
-        vector<float>::iterator prec_m_it = precision_monot.begin();
-        for (vector<float>::iterator prec_it = precision.begin(); prec_it != precision.end(); ++prec_it, ++prec_m_it)
-        {
-            vector<float>::iterator max_elem;
-            max_elem = std::max_element(prec_it,precision.end());
-            (*prec_m_it) = (*max_elem);
-        }
-        /* calculate ap */
-        for (size_t idx = 0; idx < (recall.size()-1); ++idx)
-        {
-            ap += (recall[idx+1] - recall[idx])*precision_monot[idx+1] +   //no need to take min of prec - is monotonically decreasing
-                    0.5f*(recall[idx+1] - recall[idx])*std::abs(precision_monot[idx+1] - precision_monot[idx]);
-        }
-    } else {
-        // FOR BEFORE VOC2010 AP IS CALCULATED BY SAMPLING PRECISION AT RECALL 0.0,0.1,..,1.0
-
-        for (float recall_pos = 0.f; recall_pos <= 1.f; recall_pos += 0.1f)
-        {
-            //find iterator of the precision corresponding to the first recall >= recall_pos
-            vector<float>::iterator recall_it = recall.begin();
-            vector<float>::iterator prec_it = precision.begin();
-
-            while ((*recall_it) < recall_pos)
-            {
-                ++recall_it;
-                ++prec_it;
-                if (recall_it == recall.end()) break;
-            }
-
-            /* if no recall >= recall_pos found, this level of recall is never reached so stop adding to ap */
-            if (recall_it == recall.end()) break;
-
-            /* if the prec_it is valid, compute the max precision at this level of recall or higher */
-            vector<float>::iterator max_prec = std::max_element(prec_it,precision.end());
-
-            ap += (*max_prec)/11;
-        }
-    }
-}
-
-/* functions for calculating confusion matrix rows */
-
-//Calculate rows of a confusion matrix
-//------------------------------------
-//INPUTS:
-// - obj_class          The VOC object class identifier string for the confusion matrix row to compute
-// - images             An array of ObdImage containing the images to use for the computation
-// - scores             A corresponding array of confidence scores for the presence of obj_class in each image
-// - cond               Defines whether to use a cut off point based on recall (CV_VOC_CCOND_RECALL) or score
-//                      (CV_VOC_CCOND_SCORETHRESH) the latter is useful for classifier detections where positive
-//                      values are positive detections and negative values are negative detections
-// - threshold          Threshold value for cond. In case of CV_VOC_CCOND_RECALL, is proportion recall (e.g. 0.5).
-//                      In the case of CV_VOC_CCOND_SCORETHRESH is the value above which to count results.
-//OUTPUTS:
-// - output_headers     An output vector of object class headers for the confusion matrix row
-// - output_values      An output vector of values for the confusion matrix row corresponding to the classes
-//                      defined in output_headers
-//NOTES:
-// The methodology used by the classifier version of this function is that true positives have a single unit
-// added to the obj_class column in the confusion matrix row, whereas false positives have a single unit
-// distributed in proportion between all the columns in the confusion matrix row corresponding to the objects
-// present in the image.
-void VocData::calcClassifierConfMatRow(const string& obj_class, const vector<ObdImage>& images, const vector<float>& scores, const VocConfCond cond, const float threshold, vector<string>& output_headers, vector<float>& output_values)
-{
-    CV_Assert(images.size() == scores.size());
-
-    // SORT RESULTS BY THEIR SCORE
-    /* 1. store sorting order in 'ranking' */
-    vector<size_t> ranking;
-    VocData::getSortOrder(scores, ranking);
-
-    // CALCULATE CONFUSION MATRIX ENTRIES
-    /* prepare object category headers */
-    output_headers = m_object_classes;
-    vector<float>(output_headers.size(),0.0).swap(output_values);
-    /* find the index of the target object class in the headers for later use */
-    int target_idx;
-    {
-        vector<string>::iterator target_idx_it = std::find(output_headers.begin(),output_headers.end(),obj_class);
-        /* if the target class can not be found, raise an exception */
-        if (target_idx_it == output_headers.end())
-        {
-            string err_msg = "could not find the target object class '" + obj_class + "' in list of valid classes.";
-            CV_Error(Error::StsError,err_msg.c_str());
-        }
-        /* convert iterator to index */
-        target_idx = (int)std::distance(output_headers.begin(),target_idx_it);
-    }
-
-    /* prepare variables related to calculating recall if using the recall threshold */
-    int retrieved_hits = 0;
-    int total_relevant = 0;
-    if (cond == CV_VOC_CCOND_RECALL)
-    {
-        vector<char> ground_truth;
-        /* in order to calculate the total number of relevant images for normalization of recall
-            it's necessary to extract the ground truth for the images under consideration */
-        getClassifierGroundTruth(obj_class, images, ground_truth);
-        total_relevant = (int)std::count_if(ground_truth.begin(),ground_truth.end(),std::bind2nd(std::equal_to<char>(),(char)1));
-    }
-
-    /* iterate through images */
-    vector<ObdObject> img_objects;
-    vector<VocObjectData> img_object_data;
-    int total_images = 0;
-    for (size_t image_idx = 0; image_idx < images.size(); ++image_idx)
-    {
-        /* if using the score as the break condition, check for it now */
-        if (cond == CV_VOC_CCOND_SCORETHRESH)
-        {
-            if (scores[ranking[image_idx]] <= threshold) break;
-        }
-        /* if continuing for this iteration, increment the image counter for later normalization */
-        ++total_images;
-        /* for each image retrieve the objects contained */
-        getObjects(images[ranking[image_idx]].id, img_objects, img_object_data);
-        //check if the tested for object class is present
-        if (getClassifierGroundTruthImage(obj_class, images[ranking[image_idx]].id))
-        {
-            //if the target class is present, assign fully to the target class element in the confusion matrix row
-            output_values[target_idx] += 1.0;
-            if (cond == CV_VOC_CCOND_RECALL) ++retrieved_hits;
-        } else {
-            //first delete all objects marked as difficult
-            for (size_t obj_idx = 0; obj_idx < img_objects.size(); ++obj_idx)
-            {
-                if (img_object_data[obj_idx].difficult == true)
-                {
-                    vector<ObdObject>::iterator it1 = img_objects.begin();
-                    std::advance(it1,obj_idx);
-                    img_objects.erase(it1);
-                    vector<VocObjectData>::iterator it2 = img_object_data.begin();
-                    std::advance(it2,obj_idx);
-                    img_object_data.erase(it2);
-                    --obj_idx;
-                }
-            }
-            //if the target class is not present, add values to the confusion matrix row in equal proportions to all objects present in the image
-            for (size_t obj_idx = 0; obj_idx < img_objects.size(); ++obj_idx)
-            {
-                //find the index of the currently considered object
-                vector<string>::iterator class_idx_it = std::find(output_headers.begin(),output_headers.end(),img_objects[obj_idx].object_class);
-                //if the class name extracted from the ground truth file could not be found in the list of available classes, raise an exception
-                if (class_idx_it == output_headers.end())
-                {
-                    string err_msg = "could not find object class '" + img_objects[obj_idx].object_class + "' specified in the ground truth file of '" + images[ranking[image_idx]].id +"'in list of valid classes.";
-                    CV_Error(Error::StsError,err_msg.c_str());
-                }
-                /* convert iterator to index */
-                int class_idx = (int)std::distance(output_headers.begin(),class_idx_it);
-                //add to confusion matrix row in proportion
-                output_values[class_idx] += 1.f/static_cast<float>(img_objects.size());
-            }
-        }
-        //check break conditions if breaking on certain level of recall
-        if (cond == CV_VOC_CCOND_RECALL)
-        {
-            if(static_cast<float>(retrieved_hits)/static_cast<float>(total_relevant) >= threshold) break;
-        }
-    }
-    /* finally, normalize confusion matrix row */
-    for (vector<float>::iterator it = output_values.begin(); it < output_values.end(); ++it)
-    {
-        (*it) /= static_cast<float>(total_images);
-    }
-}
-
-// NOTE: doesn't ignore repeated detections
-void VocData::calcDetectorConfMatRow(const string& obj_class, const ObdDatasetType dataset, const vector<ObdImage>& images, const vector<vector<float> >& scores, const vector<vector<Rect> >& bounding_boxes, const VocConfCond cond, const float threshold, vector<string>& output_headers, vector<float>& output_values, bool ignore_difficult)
-{
-    CV_Assert(images.size() == scores.size());
-    CV_Assert(images.size() == bounding_boxes.size());
-
-    //collapse scores and ground_truth vectors into 1D vectors to allow ranking
-    /* define final flat vectors */
-    vector<string> images_flat;
-    vector<float> scores_flat;
-    vector<Rect> bounding_boxes_flat;
-    {
-        /* first count how many objects to allow preallocation */
-        int obj_count = 0;
-        CV_Assert(scores.size() == bounding_boxes.size());
-        for (size_t img_idx = 0; img_idx < scores.size(); ++img_idx)
-        {
-            CV_Assert(scores[img_idx].size() == bounding_boxes[img_idx].size());
-            for (size_t obj_idx = 0; obj_idx < scores[img_idx].size(); ++obj_idx)
-            {
-                ++obj_count;
-            }
-        }
-        /* preallocate vectors */
-        images_flat.resize(obj_count);
-        scores_flat.resize(obj_count);
-        bounding_boxes_flat.resize(obj_count);
-        /* now copy across to preallocated vectors */
-        int flat_pos = 0;
-        for (size_t img_idx = 0; img_idx < scores.size(); ++img_idx)
-        {
-            for (size_t obj_idx = 0; obj_idx < scores[img_idx].size(); ++obj_idx)
-            {
-                images_flat[flat_pos] = images[img_idx].id;
-                scores_flat[flat_pos] = scores[img_idx][obj_idx];
-                bounding_boxes_flat[flat_pos] = bounding_boxes[img_idx][obj_idx];
-                ++flat_pos;
-            }
-        }
-    }
-
-    // SORT RESULTS BY THEIR SCORE
-    /* 1. store sorting order in 'ranking' */
-    vector<size_t> ranking;
-    VocData::getSortOrder(scores_flat, ranking);
-
-    // CALCULATE CONFUSION MATRIX ENTRIES
-    /* prepare object category headers */
-    output_headers = m_object_classes;
-    output_headers.push_back("background");
-    vector<float>(output_headers.size(),0.0).swap(output_values);
-
-    /* prepare variables related to calculating recall if using the recall threshold */
-    int retrieved_hits = 0;
-    int total_relevant = 0;
-    if (cond == CV_VOC_CCOND_RECALL)
-    {
-//        vector<char> ground_truth;
-//        /* in order to calculate the total number of relevant images for normalization of recall
-//            it's necessary to extract the ground truth for the images under consideration */
-//        getClassifierGroundTruth(obj_class, images, ground_truth);
-//        total_relevant = std::count_if(ground_truth.begin(),ground_truth.end(),std::bind2nd(std::equal_to<bool>(),true));
-        /* calculate the total number of objects in the ground truth for the current dataset */
-        vector<ObdImage> gt_images;
-        vector<char> gt_object_present;
-        getClassImages(obj_class, dataset, gt_images, gt_object_present);
-
-        for (size_t image_idx = 0; image_idx < gt_images.size(); ++image_idx)
-        {
-            vector<ObdObject> gt_img_objects;
-            vector<VocObjectData> gt_img_object_data;
-            getObjects(gt_images[image_idx].id, gt_img_objects, gt_img_object_data);
-            for (size_t obj_idx = 0; obj_idx < gt_img_objects.size(); ++obj_idx)
-            {
-                if (gt_img_objects[obj_idx].object_class == obj_class)
-                {
-                    if ((gt_img_object_data[obj_idx].difficult == false) || (ignore_difficult == false))
-                        ++total_relevant;
-                }
-            }
-        }
-    }
-
-    /* iterate through objects */
-    vector<ObdObject> img_objects;
-    vector<VocObjectData> img_object_data;
-    int total_objects = 0;
-    for (size_t image_idx = 0; image_idx < images.size(); ++image_idx)
-    {
-        /* if using the score as the break condition, check for it now */
-        if (cond == CV_VOC_CCOND_SCORETHRESH)
-        {
-            if (scores_flat[ranking[image_idx]] <= threshold) break;
-        }
-        /* increment the image counter for later normalization */
-        ++total_objects;
-        /* for each image retrieve the objects contained */
-        getObjects(images[ranking[image_idx]].id, img_objects, img_object_data);
-
-        //find the ground truth object which has the highest overlap score with the detected object
-        float maxov = -1.0;
-        int max_gt_obj_idx = -1;
-        //-- for each detected object iterate through objects present in ground truth --
-        for (size_t gt_obj_idx = 0; gt_obj_idx < img_objects.size(); ++gt_obj_idx)
-        {
-            //check difficulty flag
-            if (ignore_difficult || (img_object_data[gt_obj_idx].difficult == false))
-            {
-                //if the class matches, then check if the detected object and ground truth object overlap by a sufficient margin
-                float ov = testBoundingBoxesForOverlap(bounding_boxes_flat[ranking[image_idx]], img_objects[gt_obj_idx].boundingBox);
-                if (ov != -1.f)
-                {
-                    //if all conditions are met store the overlap score and index (as objects are assigned to the highest scoring match)
-                    if (ov > maxov)
-                    {
-                        maxov = ov;
-                        max_gt_obj_idx = (int)gt_obj_idx;
-                    }
-                }
-            }
-        }
-
-        //assign to appropriate object class if an object was detected
-        if (maxov != -1.0)
-        {
-            //find the index of the currently considered object
-            vector<string>::iterator class_idx_it = std::find(output_headers.begin(),output_headers.end(),img_objects[max_gt_obj_idx].object_class);
-            //if the class name extracted from the ground truth file could not be found in the list of available classes, raise an exception
-            if (class_idx_it == output_headers.end())
-            {
-                string err_msg = "could not find object class '" + img_objects[max_gt_obj_idx].object_class + "' specified in the ground truth file of '" + images[ranking[image_idx]].id +"'in list of valid classes.";
-                CV_Error(Error::StsError,err_msg.c_str());
-            }
-            /* convert iterator to index */
-            int class_idx = (int)std::distance(output_headers.begin(),class_idx_it);
-            //add to confusion matrix row in proportion
-            output_values[class_idx] += 1.0;
-        } else {
-            //otherwise assign to background class
-            output_values[output_values.size()-1] += 1.0;
-        }
-
-        //check break conditions if breaking on certain level of recall
-        if (cond == CV_VOC_CCOND_RECALL)
-        {
-            if(static_cast<float>(retrieved_hits)/static_cast<float>(total_relevant) >= threshold) break;
-        }
-    }
-
-    /* finally, normalize confusion matrix row */
-    for (vector<float>::iterator it = output_values.begin(); it < output_values.end(); ++it)
-    {
-        (*it) /= static_cast<float>(total_objects);
-    }
-}
-
-//Save Precision-Recall results to a p-r curve in GNUPlot format
-//--------------------------------------------------------------
-//INPUTS:
-// - output_file        The file to which to save the GNUPlot data file. If only a filename is specified, the data
-//                      file is saved to the standard VOC results directory.
-// - precision          Vector of precisions as returned from calcClassifier/DetectorPrecRecall
-// - recall             Vector of recalls as returned from calcClassifier/DetectorPrecRecall
-// - ap                ap as returned from calcClassifier/DetectorPrecRecall
-// - (title)            Title to use for the plot (if not specified, just the ap is printed as the title)
-//                      This also specifies the filename of the output file if printing to pdf
-// - (plot_type)        Specifies whether to instruct GNUPlot to save to a PDF file (CV_VOC_PLOT_PDF) or directly
-//                      to screen (CV_VOC_PLOT_SCREEN) in the datafile
-//NOTES:
-// The GNUPlot data file can be executed using GNUPlot from the commandline in the following way:
-//      >> GNUPlot <output_file>
-// This will then display the p-r curve on the screen or save it to a pdf file depending on plot_type
-
-void VocData::savePrecRecallToGnuplot(const string& output_file, const vector<float>& precision, const vector<float>& recall, const float ap, const string title, const VocPlotType plot_type)
-{
-    string output_file_std = checkFilenamePathsep(output_file);
-
-    //if no directory is specified, by default save the output file in the results directory
-//    if (output_file_std.find("/") == output_file_std.npos)
-//    {
-//        output_file_std = m_results_directory + output_file_std;
-//    }
-
-    std::ofstream plot_file(output_file_std.c_str());
-
-    if (plot_file.is_open())
-    {
-        plot_file << "set xrange [0:1]" << endl;
-        plot_file << "set yrange [0:1]" << endl;
-        plot_file << "set size square" << endl;
-        string title_text = title;
-        if (title_text.size() == 0) title_text = "Precision-Recall Curve";
-        plot_file << "set title \"" << title_text << " (ap: " << ap << ")\"" << endl;
-        plot_file << "set xlabel \"Recall\"" << endl;
-        plot_file << "set ylabel \"Precision\"" << endl;
-        plot_file << "set style data lines" << endl;
-        plot_file << "set nokey" << endl;
-        if (plot_type == CV_VOC_PLOT_PNG)
-        {
-            plot_file << "set terminal png" << endl;
-            string pdf_filename;
-            if (title.size() != 0)
-            {
-                pdf_filename = title;
-            } else {
-                pdf_filename = "prcurve";
-            }
-            plot_file << "set out \"" << title << ".png\"" << endl;
-        }
-        plot_file << "plot \"-\" using 1:2" << endl;
-        plot_file << "# X Y" << endl;
-        CV_Assert(precision.size() == recall.size());
-        for (size_t i = 0; i < precision.size(); ++i)
-        {
-            plot_file << "  " << recall[i] << " " << precision[i] << endl;
-        }
-        plot_file << "end" << endl;
-        if (plot_type == CV_VOC_PLOT_SCREEN)
-        {
-            plot_file << "pause -1" << endl;
-        }
-        plot_file.close();
-    } else {
-        string err_msg = "could not open plot file '" + output_file_std + "' for writing.";
-        CV_Error(Error::StsError,err_msg.c_str());
-    }
-}
-
-void VocData::readClassifierGroundTruth(const string& obj_class, const ObdDatasetType dataset, vector<ObdImage>& images, vector<char>& object_present)
-{
-    images.clear();
-
-    string gtFilename = m_class_imageset_path;
-    gtFilename.replace(gtFilename.find("%s"),2,obj_class);
-    if (dataset == CV_OBD_TRAIN)
-    {
-        gtFilename.replace(gtFilename.find("%s"),2,m_train_set);
-    } else {
-        gtFilename.replace(gtFilename.find("%s"),2,m_test_set);
-    }
-
-    vector<string> image_codes;
-    readClassifierGroundTruth(gtFilename, image_codes, object_present);
-
-    convertImageCodesToObdImages(image_codes, images);
-}
-
-void VocData::readClassifierResultsFile(const std:: string& input_file, vector<ObdImage>& images, vector<float>& scores)
-{
-    images.clear();
-
-    string input_file_std = checkFilenamePathsep(input_file);
-
-    //if no directory is specified, by default search for the input file in the results directory
-//    if (input_file_std.find("/") == input_file_std.npos)
-//    {
-//        input_file_std = m_results_directory + input_file_std;
-//    }
-
-    vector<string> image_codes;
-    readClassifierResultsFile(input_file_std, image_codes, scores);
-
-    convertImageCodesToObdImages(image_codes, images);
-}
-
-void VocData::readDetectorResultsFile(const string& input_file, vector<ObdImage>& images, vector<vector<float> >& scores, vector<vector<Rect> >& bounding_boxes)
-{
-    images.clear();
-
-    string input_file_std = checkFilenamePathsep(input_file);
-
-    //if no directory is specified, by default search for the input file in the results directory
-//    if (input_file_std.find("/") == input_file_std.npos)
-//    {
-//        input_file_std = m_results_directory + input_file_std;
-//    }
-
-    vector<string> image_codes;
-    readDetectorResultsFile(input_file_std, image_codes, scores, bounding_boxes);
-
-    convertImageCodesToObdImages(image_codes, images);
-}
-
-const vector<string>& VocData::getObjectClasses()
-{
-    return m_object_classes;
-}
-
-//string VocData::getResultsDirectory()
-//{
-//    return m_results_directory;
-//}
-
-//---------------------------------------------------------
-// Protected Functions ------------------------------------
-//---------------------------------------------------------
-
-static string getVocName( const string& vocPath )
-{
-    size_t found = vocPath.rfind( '/' );
-    if( found == string::npos )
-    {
-        found = vocPath.rfind( '\\' );
-        if( found == string::npos )
-            return vocPath;
-    }
-    return vocPath.substr(found + 1, vocPath.size() - found);
-}
-
-void VocData::initVoc( const string& vocPath, const bool useTestDataset )
-{
-    initVoc2007to2010( vocPath, useTestDataset );
-}
-
-//Initialize file paths and settings for the VOC 2010 dataset
-//-----------------------------------------------------------
-void VocData::initVoc2007to2010( const string& vocPath, const bool useTestDataset )
-{
-    //check format of root directory and modify if necessary
-
-    m_vocName = getVocName( vocPath );
-
-    CV_Assert( !m_vocName.compare("VOC2007") || !m_vocName.compare("VOC2008") ||
-               !m_vocName.compare("VOC2009") || !m_vocName.compare("VOC2010") );
-
-    m_vocPath = checkFilenamePathsep( vocPath, true );
-
-    if (useTestDataset)
-    {
-        m_train_set = "trainval";
-        m_test_set = "test";
-    } else {
-        m_train_set = "train";
-        m_test_set = "val";
-    }
-
-    // initialize main classification/detection challenge paths
-    m_annotation_path = m_vocPath + "/Annotations/%s.xml";
-    m_image_path = m_vocPath + "/JPEGImages/%s.jpg";
-    m_imageset_path = m_vocPath + "/ImageSets/Main/%s.txt";
-    m_class_imageset_path = m_vocPath + "/ImageSets/Main/%s_%s.txt";
-
-    //define available object_classes for VOC2010 dataset
-    m_object_classes.push_back("aeroplane");
-    m_object_classes.push_back("bicycle");
-    m_object_classes.push_back("bird");
-    m_object_classes.push_back("boat");
-    m_object_classes.push_back("bottle");
-    m_object_classes.push_back("bus");
-    m_object_classes.push_back("car");
-    m_object_classes.push_back("cat");
-    m_object_classes.push_back("chair");
-    m_object_classes.push_back("cow");
-    m_object_classes.push_back("diningtable");
-    m_object_classes.push_back("dog");
-    m_object_classes.push_back("horse");
-    m_object_classes.push_back("motorbike");
-    m_object_classes.push_back("person");
-    m_object_classes.push_back("pottedplant");
-    m_object_classes.push_back("sheep");
-    m_object_classes.push_back("sofa");
-    m_object_classes.push_back("train");
-    m_object_classes.push_back("tvmonitor");
-
-    m_min_overlap = 0.5;
-
-    //up until VOC 2010, ap was calculated by sampling p-r curve, not taking complete curve
-    m_sampled_ap = ((m_vocName == "VOC2007") || (m_vocName == "VOC2008") || (m_vocName == "VOC2009"));
-}
-
-//Read a VOC classification ground truth text file for a given object class and dataset
-//-------------------------------------------------------------------------------------
-//INPUTS:
-// - filename           The path of the text file to read
-//OUTPUTS:
-// - image_codes        VOC image codes extracted from the GT file in the form 20XX_XXXXXX where the first four
-//                          digits specify the year of the dataset, and the last group specifies a unique ID
-// - object_present     For each image in the 'image_codes' array, specifies whether the object class described
-//                          in the loaded GT file is present or not
-void VocData::readClassifierGroundTruth(const string& filename, vector<string>& image_codes, vector<char>& object_present)
-{
-    image_codes.clear();
-    object_present.clear();
-
-    std::ifstream gtfile(filename.c_str());
-    if (!gtfile.is_open())
-    {
-        string err_msg = "could not open VOC ground truth textfile '" + filename + "'.";
-        CV_Error(Error::StsError,err_msg.c_str());
-    }
-
-    string line;
-    string image;
-    int obj_present = 0;
-    while (!gtfile.eof())
-    {
-        std::getline(gtfile,line);
-        std::istringstream iss(line);
-        iss >> image >> obj_present;
-        if (!iss.fail())
-        {
-            image_codes.push_back(image);
-            object_present.push_back(obj_present == 1);
-        } else {
-            if (!gtfile.eof()) CV_Error(Error::StsParseError,"error parsing VOC ground truth textfile.");
-        }
-    }
-    gtfile.close();
-}
-
-void VocData::readClassifierResultsFile(const string& input_file, vector<string>& image_codes, vector<float>& scores)
-{
-    //check if results file exists
-    std::ifstream result_file(input_file.c_str());
-    if (result_file.is_open())
-    {
-        string line;
-        string image;
-        float score;
-        //read in the results file
-        while (!result_file.eof())
-        {
-            std::getline(result_file,line);
-            std::istringstream iss(line);
-            iss >> image >> score;
-            if (!iss.fail())
-            {
-                image_codes.push_back(image);
-                scores.push_back(score);
-            } else {
-                if(!result_file.eof()) CV_Error(Error::StsParseError,"error parsing VOC classifier results file.");
-            }
-        }
-        result_file.close();
-    } else {
-        string err_msg = "could not open classifier results file '" + input_file + "' for reading.";
-        CV_Error(Error::StsError,err_msg.c_str());
-    }
-}
-
-void VocData::readDetectorResultsFile(const string& input_file, vector<string>& image_codes, vector<vector<float> >& scores, vector<vector<Rect> >& bounding_boxes)
-{
-    image_codes.clear();
-    scores.clear();
-    bounding_boxes.clear();
-
-    //check if results file exists
-    std::ifstream result_file(input_file.c_str());
-    if (result_file.is_open())
-    {
-        string line;
-        string image;
-        Rect bounding_box;
-        float score;
-        //read in the results file
-        while (!result_file.eof())
-        {
-            std::getline(result_file,line);
-            std::istringstream iss(line);
-            iss >> image >> score >> bounding_box.x >> bounding_box.y >> bounding_box.width >> bounding_box.height;
-            if (!iss.fail())
-            {
-                //convert right and bottom positions to width and height
-                bounding_box.width -= bounding_box.x;
-                bounding_box.height -= bounding_box.y;
-                //convert to 0-indexing
-                bounding_box.x -= 1;
-                bounding_box.y -= 1;
-                //store in output vectors
-                /* first check if the current image code has been seen before */
-                vector<string>::iterator image_codes_it = std::find(image_codes.begin(),image_codes.end(),image);
-                if (image_codes_it == image_codes.end())
-                {
-                    image_codes.push_back(image);
-                    vector<float> score_vect(1);
-                    score_vect[0] = score;
-                    scores.push_back(score_vect);
-                    vector<Rect> bounding_box_vect(1);
-                    bounding_box_vect[0] = bounding_box;
-                    bounding_boxes.push_back(bounding_box_vect);
-                } else {
-                    /* if the image index has been seen before, add the current object below it in the 2D arrays */
-                    int image_idx = (int)std::distance(image_codes.begin(),image_codes_it);
-                    scores[image_idx].push_back(score);
-                    bounding_boxes[image_idx].push_back(bounding_box);
-                }
-            } else {
-                if(!result_file.eof()) CV_Error(Error::StsParseError,"error parsing VOC detector results file.");
-            }
-        }
-        result_file.close();
-    } else {
-        string err_msg = "could not open detector results file '" + input_file + "' for reading.";
-        CV_Error(Error::StsError,err_msg.c_str());
-    }
-}
-
-
-//Read a VOC annotation xml file for a given image
-//------------------------------------------------
-//INPUTS:
-// - filename           The path of the xml file to read
-//OUTPUTS:
-// - objects            Array of VocObject describing all object instances present in the given image
-void VocData::extractVocObjects(const string filename, vector<ObdObject>& objects, vector<VocObjectData>& object_data)
-{
-#ifdef PR_DEBUG
-    int block = 1;
-    cout << "SAMPLE VOC OBJECT EXTRACTION for " << filename << ":" << endl;
-#endif
-    objects.clear();
-    object_data.clear();
-
-    string contents, object_contents, tag_contents;
-
-    readFileToString(filename, contents);
-
-    //keep on extracting 'object' blocks until no more can be found
-    if (extractXMLBlock(contents, "annotation", 0, contents) != -1)
-    {
-        int searchpos = 0;
-        searchpos = extractXMLBlock(contents, "object", searchpos, object_contents);
-        while (searchpos != -1)
-        {
-#ifdef PR_DEBUG
-            cout << "SEARCHPOS:" << searchpos << endl;
-            cout << "start block " << block << " ---------" << endl;
-            cout << object_contents << endl;
-            cout << "end block " << block << " -----------" << endl;
-            ++block;
-#endif
-
-            ObdObject object;
-            VocObjectData object_d;
-
-            //object class -------------
-
-            if (extractXMLBlock(object_contents, "name", 0, tag_contents) == -1) CV_Error(Error::StsError,"missing <name> tag in object definition of '" + filename + "'");
-            object.object_class.swap(tag_contents);
-
-            //object bounding box -------------
-
-            int xmax, xmin, ymax, ymin;
-
-            if (extractXMLBlock(object_contents, "xmax", 0, tag_contents) == -1) CV_Error(Error::StsError,"missing <xmax> tag in object definition of '" + filename + "'");
-            xmax = stringToInteger(tag_contents);
-
-            if (extractXMLBlock(object_contents, "xmin", 0, tag_contents) == -1) CV_Error(Error::StsError,"missing <xmin> tag in object definition of '" + filename + "'");
-            xmin = stringToInteger(tag_contents);
-
-            if (extractXMLBlock(object_contents, "ymax", 0, tag_contents) == -1) CV_Error(Error::StsError,"missing <ymax> tag in object definition of '" + filename + "'");
-            ymax = stringToInteger(tag_contents);
-
-            if (extractXMLBlock(object_contents, "ymin", 0, tag_contents) == -1) CV_Error(Error::StsError,"missing <ymin> tag in object definition of '" + filename + "'");
-            ymin = stringToInteger(tag_contents);
-
-            object.boundingBox.x = xmin-1;      //convert to 0-based indexing
-            object.boundingBox.width = xmax - xmin;
-            object.boundingBox.y = ymin-1;
-            object.boundingBox.height = ymax - ymin;
-
-            CV_Assert(xmin != 0);
-            CV_Assert(xmax > xmin);
-            CV_Assert(ymin != 0);
-            CV_Assert(ymax > ymin);
-
-
-            //object tags -------------
-
-            if (extractXMLBlock(object_contents, "difficult", 0, tag_contents) != -1)
-            {
-                object_d.difficult = (tag_contents == "1");
-            } else object_d.difficult = false;
-            if (extractXMLBlock(object_contents, "occluded", 0, tag_contents) != -1)
-            {
-                object_d.occluded = (tag_contents == "1");
-            } else object_d.occluded = false;
-            if (extractXMLBlock(object_contents, "truncated", 0, tag_contents) != -1)
-            {
-                object_d.truncated = (tag_contents == "1");
-            } else object_d.truncated = false;
-            if (extractXMLBlock(object_contents, "pose", 0, tag_contents) != -1)
-            {
-                if (tag_contents == "Frontal") object_d.pose = CV_VOC_POSE_FRONTAL;
-                if (tag_contents == "Rear") object_d.pose = CV_VOC_POSE_REAR;
-                if (tag_contents == "Left") object_d.pose = CV_VOC_POSE_LEFT;
-                if (tag_contents == "Right") object_d.pose = CV_VOC_POSE_RIGHT;
-            }
-
-            //add to array of objects
-            objects.push_back(object);
-            object_data.push_back(object_d);
-
-            //extract next 'object' block from file if it exists
-            searchpos = extractXMLBlock(contents, "object", searchpos, object_contents);
-        }
-    }
-}
-
-//Converts an image identifier string in the format YYYY_XXXXXX to a single index integer of form XXXXXXYYYY
-//where Y represents a year and returns the image path
-//----------------------------------------------------------------------------------------------------------
-string VocData::getImagePath(const string& input_str)
-{
-    string path = m_image_path;
-    path.replace(path.find("%s"),2,input_str);
-    return path;
-}
-
-//Tests two boundary boxes for overlap (using the intersection over union metric) and returns the overlap if the objects
-//defined by the two bounding boxes are considered to be matched according to the criterion outlined in
-//the VOC documentation [namely intersection/union > some threshold] otherwise returns -1.0 (no match)
-//----------------------------------------------------------------------------------------------------------
-float VocData::testBoundingBoxesForOverlap(const Rect detection, const Rect ground_truth)
-{
-    int detection_x2 = detection.x + detection.width;
-    int detection_y2 = detection.y + detection.height;
-    int ground_truth_x2 = ground_truth.x + ground_truth.width;
-    int ground_truth_y2 = ground_truth.y + ground_truth.height;
-    //first calculate the boundaries of the intersection of the rectangles
-    int intersection_x = std::max(detection.x, ground_truth.x); //rightmost left
-    int intersection_y = std::max(detection.y, ground_truth.y); //bottommost top
-    int intersection_x2 = std::min(detection_x2, ground_truth_x2); //leftmost right
-    int intersection_y2 = std::min(detection_y2, ground_truth_y2); //topmost bottom
-    //then calculate the width and height of the intersection rect
-    int intersection_width = intersection_x2 - intersection_x + 1;
-    int intersection_height = intersection_y2 - intersection_y + 1;
-    //if there is no overlap then return false straight away
-    if ((intersection_width <= 0) || (intersection_height <= 0)) return -1.0;
-    //otherwise calculate the intersection
-    int intersection_area = intersection_width*intersection_height;
-
-    //now calculate the union
-    int union_area = (detection.width+1)*(detection.height+1) + (ground_truth.width+1)*(ground_truth.height+1) - intersection_area;
-
-    //calculate the intersection over union and use as threshold as per VOC documentation
-    float overlap = static_cast<float>(intersection_area)/static_cast<float>(union_area);
-    if (overlap > m_min_overlap)
-    {
-        return overlap;
-    } else {
-        return -1.0;
-    }
-}
-
-//Extracts the object class and dataset from the filename of a VOC standard results text file, which takes
-//the format 'comp<n>_{cls/det}_<dataset>_<objclass>.txt'
-//----------------------------------------------------------------------------------------------------------
-void VocData::extractDataFromResultsFilename(const string& input_file, string& class_name, string& dataset_name)
-{
-    string input_file_std = checkFilenamePathsep(input_file);
-
-    size_t fnamestart = input_file_std.rfind("/");
-    size_t fnameend = input_file_std.rfind(".txt");
-
-    if ((fnamestart == input_file_std.npos) || (fnameend == input_file_std.npos))
-        CV_Error(Error::StsError,"Could not extract filename of results file.");
-
-    ++fnamestart;
-    if (fnamestart >= fnameend)
-        CV_Error(Error::StsError,"Could not extract filename of results file.");
-
-    //extract dataset and class names, triggering exception if the filename format is not correct
-    string filename = input_file_std.substr(fnamestart, fnameend-fnamestart);
-    size_t datasetstart = filename.find("_");
-    datasetstart = filename.find("_",datasetstart+1);
-    size_t classstart = filename.find("_",datasetstart+1);
-    //allow for appended index after a further '_' by discarding this part if it exists
-    size_t classend = filename.find("_",classstart+1);
-    if (classend == filename.npos) classend = filename.size();
-    if ((datasetstart == filename.npos) || (classstart == filename.npos))
-        CV_Error(Error::StsError,"Error parsing results filename. Is it in standard format of 'comp<n>_{cls/det}_<dataset>_<objclass>.txt'?");
-    ++datasetstart;
-    ++classstart;
-    if (((datasetstart-classstart) < 1) || ((classend-datasetstart) < 1))
-        CV_Error(Error::StsError,"Error parsing results filename. Is it in standard format of 'comp<n>_{cls/det}_<dataset>_<objclass>.txt'?");
-
-    dataset_name = filename.substr(datasetstart,classstart-datasetstart-1);
-    class_name = filename.substr(classstart,classend-classstart);
-}
-
-bool VocData::getClassifierGroundTruthImage(const string& obj_class, const string& id)
-{
-    /* if the classifier ground truth data for all images of the current class has not been loaded yet, load it now */
-    if (m_classifier_gt_all_ids.empty() || (m_classifier_gt_class != obj_class))
-    {
-        m_classifier_gt_all_ids.clear();
-        m_classifier_gt_all_present.clear();
-        m_classifier_gt_class = obj_class;
-        for (int i=0; i<2; ++i) //run twice (once over test set and once over training set)
-        {
-            //generate the filename of the classification ground-truth textfile for the object class
-            string gtFilename = m_class_imageset_path;
-            gtFilename.replace(gtFilename.find("%s"),2,obj_class);
-            if (i == 0)
-            {
-                gtFilename.replace(gtFilename.find("%s"),2,m_train_set);
-            } else {
-                gtFilename.replace(gtFilename.find("%s"),2,m_test_set);
-            }
-
-            //parse the ground truth file, storing in two separate vectors
-            //for the image code and the ground truth value
-            vector<string> image_codes;
-            vector<char> object_present;
-            readClassifierGroundTruth(gtFilename, image_codes, object_present);
-
-            m_classifier_gt_all_ids.insert(m_classifier_gt_all_ids.end(),image_codes.begin(),image_codes.end());
-            m_classifier_gt_all_present.insert(m_classifier_gt_all_present.end(),object_present.begin(),object_present.end());
-
-            CV_Assert(m_classifier_gt_all_ids.size() == m_classifier_gt_all_present.size());
-        }
-    }
-
-
-    //search for the image code
-    vector<string>::iterator it = find (m_classifier_gt_all_ids.begin(), m_classifier_gt_all_ids.end(), id);
-    if (it != m_classifier_gt_all_ids.end())
-    {
-        //image found, so return corresponding ground truth
-        return m_classifier_gt_all_present[std::distance(m_classifier_gt_all_ids.begin(),it)] != 0;
-    } else {
-        string err_msg = "could not find classifier ground truth for image '" + id + "' and class '" + obj_class + "'";
-        CV_Error(Error::StsError,err_msg.c_str());
-    }
-
-    return true;
-}
-
-//-------------------------------------------------------------------
-// Protected Functions (utility) ------------------------------------
-//-------------------------------------------------------------------
-
-//returns a vector containing indexes of the input vector in sorted ascending/descending order
-void VocData::getSortOrder(const vector<float>& values, vector<size_t>& order, bool descending)
-{
-    /* 1. store sorting order in 'order_pair' */
-    vector<std::pair<size_t, vector<float>::const_iterator> > order_pair(values.size());
-
-    size_t n = 0;
-    for (vector<float>::const_iterator it = values.begin(); it != values.end(); ++it, ++n)
-        order_pair[n] = make_pair(n, it);
-
-    std::sort(order_pair.begin(),order_pair.end(),orderingSorter());
-    if (descending == false) std::reverse(order_pair.begin(),order_pair.end());
-
-    vector<size_t>(order_pair.size()).swap(order);
-    for (size_t i = 0; i < order_pair.size(); ++i)
-    {
-        order[i] = order_pair[i].first;
-    }
-}
-
-void VocData::readFileToString(const string filename, string& file_contents)
-{
-    std::ifstream ifs(filename.c_str());
-    if (!ifs.is_open()) CV_Error(Error::StsError,"could not open text file");
-
-    stringstream oss;
-    oss << ifs.rdbuf();
-
-    file_contents = oss.str();
-}
-
-int VocData::stringToInteger(const string input_str)
-{
-    int result = 0;
-
-    stringstream ss(input_str);
-    if ((ss >> result).fail())
-    {
-        CV_Error(Error::StsBadArg,"could not perform string to integer conversion");
-    }
-    return result;
-}
-
-string VocData::integerToString(const int input_int)
-{
-    string result;
-
-    stringstream ss;
-    if ((ss << input_int).fail())
-    {
-        CV_Error(Error::StsBadArg,"could not perform integer to string conversion");
-    }
-    result = ss.str();
-    return result;
-}
-
-string VocData::checkFilenamePathsep( const string filename, bool add_trailing_slash )
-{
-    string filename_new = filename;
-
-    size_t pos = filename_new.find("\\\\");
-    while (pos != filename_new.npos)
-    {
-        filename_new.replace(pos,2,"/");
-        pos = filename_new.find("\\\\", pos);
-    }
-    pos = filename_new.find("\\");
-    while (pos != filename_new.npos)
-    {
-        filename_new.replace(pos,1,"/");
-        pos = filename_new.find("\\", pos);
-    }
-    if (add_trailing_slash)
-    {
-        //add training slash if this is missing
-        if (filename_new.rfind("/") != filename_new.length()-1) filename_new += "/";
-    }
-
-    return filename_new;
-}
-
-void VocData::convertImageCodesToObdImages(const vector<string>& image_codes, vector<ObdImage>& images)
-{
-    images.clear();
-    images.reserve(image_codes.size());
-
-    string path;
-    //transfer to output arrays
-    for (size_t i = 0; i < image_codes.size(); ++i)
-    {
-        //generate image path and indices from extracted string code
-        path = getImagePath(image_codes[i]);
-        images.push_back(ObdImage(image_codes[i], path));
-    }
-}
-
-//Extract text from within a given tag from an XML file
-//-----------------------------------------------------
-//INPUTS:
-// - src            XML source file
-// - tag            XML tag delimiting block to extract
-// - searchpos      position within src at which to start search
-//OUTPUTS:
-// - tag_contents   text extracted between <tag> and </tag> tags
-//RETURN VALUE:
-// - the position of the final character extracted in tag_contents within src
-//      (can be used to call extractXMLBlock recursively to extract multiple blocks)
-//      returns -1 if the tag could not be found
-int VocData::extractXMLBlock(const string src, const string tag, const int searchpos, string& tag_contents)
-{
-    size_t startpos, next_startpos, endpos;
-    int embed_count = 1;
-
-    //find position of opening tag
-    startpos = src.find("<" + tag + ">", searchpos);
-    if (startpos == string::npos) return -1;
-
-    //initialize endpos -
-    // start searching for end tag anywhere after opening tag
-    endpos = startpos;
-
-    //find position of next opening tag
-    next_startpos = src.find("<" + tag + ">", startpos+1);
-
-    //match opening tags with closing tags, and only
-    //accept final closing tag of same level as original
-    //opening tag
-    while (embed_count > 0)
-    {
-        endpos = src.find("</" + tag + ">", endpos+1);
-        if (endpos == string::npos) return -1;
-
-        //the next code is only executed if there are embedded tags with the same name
-        if (next_startpos != string::npos)
-        {
-            while (next_startpos<endpos)
-            {
-                //counting embedded start tags
-                ++embed_count;
-                next_startpos = src.find("<" + tag + ">", next_startpos+1);
-                if (next_startpos == string::npos) break;
-            }
-        }
-        //passing end tag so decrement nesting level
-        --embed_count;
-    }
-
-    //finally, extract the tag region
-    startpos += tag.length() + 2;
-    if (startpos > src.length()) return -1;
-    if (endpos > src.length()) return -1;
-    tag_contents = src.substr(startpos,endpos-startpos);
-    return static_cast<int>(endpos);
-}
-
-/****************************************************************************************\
-*                            Sample on image classification                             *
-\****************************************************************************************/
-//
-// This part of the code was a little refactor
-//
-struct DDMParams
-{
-    DDMParams() : detectorType("SURF"), descriptorType("SURF"), matcherType("BruteForce") {}
-    DDMParams( const string _detectorType, const string _descriptorType, const string& _matcherType ) :
-        detectorType(_detectorType), descriptorType(_descriptorType), matcherType(_matcherType){}
-    void read( const FileNode& fn )
-    {
-        fn["detectorType"] >> detectorType;
-        fn["descriptorType"] >> descriptorType;
-        fn["matcherType"] >> matcherType;
-    }
-    void write( FileStorage& fs ) const
-    {
-        fs << "detectorType" << detectorType;
-        fs << "descriptorType" << descriptorType;
-        fs << "matcherType" << matcherType;
-    }
-    void print() const
-    {
-        cout << "detectorType: " << detectorType << endl;
-        cout << "descriptorType: " << descriptorType << endl;
-        cout << "matcherType: " << matcherType << endl;
-    }
-
-    string detectorType;
-    string descriptorType;
-    string matcherType;
-};
-
-struct VocabTrainParams
-{
-    VocabTrainParams() : trainObjClass("chair"), vocabSize(1000), memoryUse(200), descProportion(0.3f) {}
-    VocabTrainParams( const string _trainObjClass, size_t _vocabSize, size_t _memoryUse, float _descProportion ) :
-            trainObjClass(_trainObjClass), vocabSize((int)_vocabSize), memoryUse((int)_memoryUse), descProportion(_descProportion) {}
-    void read( const FileNode& fn )
-    {
-        fn["trainObjClass"] >> trainObjClass;
-        fn["vocabSize"] >> vocabSize;
-        fn["memoryUse"] >> memoryUse;
-        fn["descProportion"] >> descProportion;
-    }
-    void write( FileStorage& fs ) const
-    {
-        fs << "trainObjClass" << trainObjClass;
-        fs << "vocabSize" << vocabSize;
-        fs << "memoryUse" << memoryUse;
-        fs << "descProportion" << descProportion;
-    }
-    void print() const
-    {
-        cout << "trainObjClass: " << trainObjClass << endl;
-        cout << "vocabSize: " << vocabSize << endl;
-        cout << "memoryUse: " << memoryUse << endl;
-        cout << "descProportion: " << descProportion << endl;
-    }
-
-
-    string trainObjClass; // Object class used for training visual vocabulary.
-                          // It shouldn't matter which object class is specified here - visual vocab will still be the same.
-    int vocabSize; //number of visual words in vocabulary to train
-    int memoryUse; // Memory to preallocate (in MB) when training vocab.
-                   // Change this depending on the size of the dataset/available memory.
-    float descProportion; // Specifies the number of descriptors to use from each image as a proportion of the total num descs.
-};
-
-struct SVMTrainParamsExt
-{
-    SVMTrainParamsExt() : descPercent(0.5f), targetRatio(0.4f), balanceClasses(true) {}
-    SVMTrainParamsExt( float _descPercent, float _targetRatio, bool _balanceClasses ) :
-            descPercent(_descPercent), targetRatio(_targetRatio), balanceClasses(_balanceClasses) {}
-    void read( const FileNode& fn )
-    {
-        fn["descPercent"] >> descPercent;
-        fn["targetRatio"] >> targetRatio;
-        fn["balanceClasses"] >> balanceClasses;
-    }
-    void write( FileStorage& fs ) const
-    {
-        fs << "descPercent" << descPercent;
-        fs << "targetRatio" << targetRatio;
-        fs << "balanceClasses" << balanceClasses;
-    }
-    void print() const
-    {
-        cout << "descPercent: " << descPercent << endl;
-        cout << "targetRatio: " << targetRatio << endl;
-        cout << "balanceClasses: " << balanceClasses << endl;
-    }
-
-    float descPercent; // Percentage of extracted descriptors to use for training.
-    float targetRatio; // Try to get this ratio of positive to negative samples (minimum).
-    bool balanceClasses;    // Balance class weights by number of samples in each (if true cSvmTrainTargetRatio is ignored).
-};
-
-static void readUsedParams( const FileNode& fn, string& vocName, DDMParams& ddmParams, VocabTrainParams& vocabTrainParams, SVMTrainParamsExt& svmTrainParamsExt )
-{
-    fn["vocName"] >> vocName;
-
-    FileNode currFn = fn;
-
-    currFn = fn["ddmParams"];
-    ddmParams.read( currFn );
-
-    currFn = fn["vocabTrainParams"];
-    vocabTrainParams.read( currFn );
-
-    currFn = fn["svmTrainParamsExt"];
-    svmTrainParamsExt.read( currFn );
-}
-
-static void writeUsedParams( FileStorage& fs, const string& vocName, const DDMParams& ddmParams, const VocabTrainParams& vocabTrainParams, const SVMTrainParamsExt& svmTrainParamsExt )
-{
-    fs << "vocName" << vocName;
-
-    fs << "ddmParams" << "{";
-    ddmParams.write(fs);
-    fs << "}";
-
-    fs << "vocabTrainParams" << "{";
-    vocabTrainParams.write(fs);
-    fs << "}";
-
-    fs << "svmTrainParamsExt" << "{";
-    svmTrainParamsExt.write(fs);
-    fs << "}";
-}
-
-static void printUsedParams( const string& vocPath, const string& resDir,
-                      const DDMParams& ddmParams, const VocabTrainParams& vocabTrainParams,
-                      const SVMTrainParamsExt& svmTrainParamsExt )
-{
-    cout << "CURRENT CONFIGURATION" << endl;
-    cout << "----------------------------------------------------------------" << endl;
-    cout << "vocPath: " << vocPath << endl;
-    cout << "resDir: " << resDir << endl;
-    cout << endl; ddmParams.print();
-    cout << endl; vocabTrainParams.print();
-    cout << endl; svmTrainParamsExt.print();
-    cout << "----------------------------------------------------------------" << endl << endl;
-}
-
-static bool readVocabulary( const string& filename, Mat& vocabulary )
-{
-    cout << "Reading vocabulary...";
-    FileStorage fs( filename, FileStorage::READ );
-    if( fs.isOpened() )
-    {
-        fs["vocabulary"] >> vocabulary;
-        cout << "done" << endl;
-        return true;
-    }
-    return false;
-}
-
-static bool writeVocabulary( const string& filename, const Mat& vocabulary )
-{
-    cout << "Saving vocabulary..." << endl;
-    FileStorage fs( filename, FileStorage::WRITE );
-    if( fs.isOpened() )
-    {
-        fs << "vocabulary" << vocabulary;
-        return true;
-    }
-    return false;
-}
-
-static Mat trainVocabulary( const string& filename, VocData& vocData, const VocabTrainParams& trainParams,
-                     const Ptr<FeatureDetector>& fdetector, const Ptr<DescriptorExtractor>& dextractor )
-{
-    Mat vocabulary;
-    if( !readVocabulary( filename, vocabulary) )
-    {
-        CV_Assert( dextractor->descriptorType() == CV_32FC1 );
-        const int elemSize = CV_ELEM_SIZE(dextractor->descriptorType());
-        const int descByteSize = dextractor->descriptorSize() * elemSize;
-        const int bytesInMB = 1048576;
-        const int maxDescCount = (trainParams.memoryUse * bytesInMB) / descByteSize; // Total number of descs to use for training.
-
-        cout << "Extracting VOC data..." << endl;
-        vector<ObdImage> images;
-        vector<char> objectPresent;
-        vocData.getClassImages( trainParams.trainObjClass, CV_OBD_TRAIN, images, objectPresent );
-
-        cout << "Computing descriptors..." << endl;
-        RNG& rng = theRNG();
-        TermCriteria terminate_criterion;
-        terminate_criterion.epsilon = FLT_EPSILON;
-        BOWKMeansTrainer bowTrainer( trainParams.vocabSize, terminate_criterion, 3, KMEANS_PP_CENTERS );
-
-        while( images.size() > 0 )
-        {
-            if( bowTrainer.descriptorsCount() > maxDescCount )
-            {
-#ifdef DEBUG_DESC_PROGRESS
-                cout << "Breaking due to full memory ( descriptors count = " << bowTrainer.descriptorsCount()
-                        << "; descriptor size in bytes = " << descByteSize << "; all used memory = "
-                        << bowTrainer.descriptorsCount()*descByteSize << endl;
-#endif
-                break;
-            }
-
-            // Randomly pick an image from the dataset which hasn't yet been seen
-            // and compute the descriptors from that image.
-            int randImgIdx = rng( (unsigned)images.size() );
-            Mat colorImage = imread( images[randImgIdx].path );
-            vector<KeyPoint> imageKeypoints;
-            fdetector->detect( colorImage, imageKeypoints );
-            Mat imageDescriptors;
-            dextractor->compute( colorImage, imageKeypoints, imageDescriptors );
-
-            //check that there were descriptors calculated for the current image
-            if( !imageDescriptors.empty() )
-            {
-                int descCount = imageDescriptors.rows;
-                // Extract trainParams.descProportion descriptors from the image, breaking if the 'allDescriptors' matrix becomes full
-                int descsToExtract = static_cast<int>(trainParams.descProportion * static_cast<float>(descCount));
-                // Fill mask of used descriptors
-                vector<char> usedMask( descCount, false );
-                fill( usedMask.begin(), usedMask.begin() + descsToExtract, true );
-                for( int i = 0; i < descCount; i++ )
-                {
-                    int i1 = rng(descCount), i2 = rng(descCount);
-                    char tmp = usedMask[i1]; usedMask[i1] = usedMask[i2]; usedMask[i2] = tmp;
-                }
-
-                for( int i = 0; i < descCount; i++ )
-                {
-                    if( usedMask[i] && bowTrainer.descriptorsCount() < maxDescCount )
-                        bowTrainer.add( imageDescriptors.row(i) );
-                }
-            }
-
-#ifdef DEBUG_DESC_PROGRESS
-            cout << images.size() << " images left, " << images[randImgIdx].id << " processed - "
-                    <</* descs_extracted << "/" << image_descriptors.rows << " extracted - " << */
-                    cvRound((static_cast<double>(bowTrainer.descriptorsCount())/static_cast<double>(maxDescCount))*100.0)
-                    << " % memory used" << ( imageDescriptors.empty() ? " -> no descriptors extracted, skipping" : "") << endl;
-#endif
-
-            // Delete the current element from images so it is not added again
-            images.erase( images.begin() + randImgIdx );
-        }
-
-        cout << "Maximum allowed descriptor count: " << maxDescCount << ", Actual descriptor count: " << bowTrainer.descriptorsCount() << endl;
-
-        cout << "Training vocabulary..." << endl;
-        vocabulary = bowTrainer.cluster();
-
-        if( !writeVocabulary(filename, vocabulary) )
-        {
-            cout << "Error: file " << filename << " can not be opened to write" << endl;
-            exit(-1);
-        }
-    }
-    return vocabulary;
-}
-
-static bool readBowImageDescriptor( const string& file, Mat& bowImageDescriptor )
-{
-    FileStorage fs( file, FileStorage::READ );
-    if( fs.isOpened() )
-    {
-        fs["imageDescriptor"] >> bowImageDescriptor;
-        return true;
-    }
-    return false;
-}
-
-static bool writeBowImageDescriptor( const string& file, const Mat& bowImageDescriptor )
-{
-    FileStorage fs( file, FileStorage::WRITE );
-    if( fs.isOpened() )
-    {
-        fs << "imageDescriptor" << bowImageDescriptor;
-        return true;
-    }
-    return false;
-}
-
-// Load in the bag of words vectors for a set of images, from file if possible
-static void calculateImageDescriptors( const vector<ObdImage>& images, vector<Mat>& imageDescriptors,
-                                Ptr<BOWImgDescriptorExtractor>& bowExtractor, const Ptr<FeatureDetector>& fdetector,
-                                const string& resPath )
-{
-    CV_Assert( !bowExtractor->getVocabulary().empty() );
-    imageDescriptors.resize( images.size() );
-
-    for( size_t i = 0; i < images.size(); i++ )
-    {
-        string filename = resPath + bowImageDescriptorsDir + "/" + images[i].id + ".xml.gz";
-        if( readBowImageDescriptor( filename, imageDescriptors[i] ) )
-        {
-#ifdef DEBUG_DESC_PROGRESS
-            cout << "Loaded bag of word vector for image " << i+1 << " of " << images.size() << " (" << images[i].id << ")" << endl;
-#endif
-        }
-        else
-        {
-            Mat colorImage = imread( images[i].path );
-#ifdef DEBUG_DESC_PROGRESS
-            cout << "Computing descriptors for image " << i+1 << " of " << images.size() << " (" << images[i].id << ")" << flush;
-#endif
-            vector<KeyPoint> keypoints;
-            fdetector->detect( colorImage, keypoints );
-#ifdef DEBUG_DESC_PROGRESS
-                cout << " + generating BoW vector" << std::flush;
-#endif
-            bowExtractor->compute( colorImage, keypoints, imageDescriptors[i] );
-#ifdef DEBUG_DESC_PROGRESS
-            cout << " ...DONE " << static_cast<int>(static_cast<float>(i+1)/static_cast<float>(images.size())*100.0)
-                 << " % complete" << endl;
-#endif
-            if( !imageDescriptors[i].empty() )
-            {
-                if( !writeBowImageDescriptor( filename, imageDescriptors[i] ) )
-                {
-                    cout << "Error: file " << filename << "can not be opened to write bow image descriptor" << endl;
-                    exit(-1);
-                }
-            }
-        }
-    }
-}
-
-static void removeEmptyBowImageDescriptors( vector<ObdImage>& images, vector<Mat>& bowImageDescriptors,
-                                     vector<char>& objectPresent )
-{
-    CV_Assert( !images.empty() );
-    for( int i = (int)images.size() - 1; i >= 0; i-- )
-    {
-        bool res = bowImageDescriptors[i].empty();
-        if( res )
-        {
-            cout << "Removing image " << images[i].id << " due to no descriptors..." << endl;
-            images.erase( images.begin() + i );
-            bowImageDescriptors.erase( bowImageDescriptors.begin() + i );
-            objectPresent.erase( objectPresent.begin() + i );
-        }
-    }
-}
-
-static void removeBowImageDescriptorsByCount( vector<ObdImage>& images, vector<Mat> bowImageDescriptors, vector<char> objectPresent,
-                                       const SVMTrainParamsExt& svmParamsExt, int descsToDelete )
-{
-    RNG& rng = theRNG();
-    int pos_ex = (int)std::count( objectPresent.begin(), objectPresent.end(), (char)1 );
-    int neg_ex = (int)std::count( objectPresent.begin(), objectPresent.end(), (char)0 );
-
-    while( descsToDelete != 0 )
-    {
-        int randIdx = rng((unsigned)images.size());
-
-        // Prefer positive training examples according to svmParamsExt.targetRatio if required
-        if( objectPresent[randIdx] )
-        {
-            if( (static_cast<float>(pos_ex)/static_cast<float>(neg_ex+pos_ex)  < svmParamsExt.targetRatio) &&
-                (neg_ex > 0) && (svmParamsExt.balanceClasses == false) )
-            { continue; }
-            else
-            { pos_ex--; }
-        }
-        else
-        { neg_ex--; }
-
-        images.erase( images.begin() + randIdx );
-        bowImageDescriptors.erase( bowImageDescriptors.begin() + randIdx );
-        objectPresent.erase( objectPresent.begin() + randIdx );
-
-        descsToDelete--;
-    }
-    CV_Assert( bowImageDescriptors.size() == objectPresent.size() );
-}
-
-static void setSVMParams( SVM::Params& svmParams, Mat& class_wts_cv, const Mat& responses, bool balanceClasses )
-{
-    int pos_ex = countNonZero(responses == 1);
-    int neg_ex = countNonZero(responses == -1);
-    cout << pos_ex << " positive training samples; " << neg_ex << " negative training samples" << endl;
-
-    svmParams.svmType = SVM::C_SVC;
-    svmParams.kernelType = SVM::RBF;
-    if( balanceClasses )
-    {
-        Mat class_wts( 2, 1, CV_32FC1 );
-        // The first training sample determines the '+1' class internally, even if it is negative,
-        // so store whether this is the case so that the class weights can be reversed accordingly.
-        bool reversed_classes = (responses.at<float>(0) < 0.f);
-        if( reversed_classes == false )
-        {
-            class_wts.at<float>(0) = static_cast<float>(pos_ex)/static_cast<float>(pos_ex+neg_ex); // weighting for costs of positive class + 1 (i.e. cost of false positive - larger gives greater cost)
-            class_wts.at<float>(1) = static_cast<float>(neg_ex)/static_cast<float>(pos_ex+neg_ex); // weighting for costs of negative class - 1 (i.e. cost of false negative)
-        }
-        else
-        {
-            class_wts.at<float>(0) = static_cast<float>(neg_ex)/static_cast<float>(pos_ex+neg_ex);
-            class_wts.at<float>(1) = static_cast<float>(pos_ex)/static_cast<float>(pos_ex+neg_ex);
-        }
-        class_wts_cv = class_wts;
-        svmParams.classWeights = class_wts_cv;
-    }
-}
-
-static void setSVMTrainAutoParams( ParamGrid& c_grid, ParamGrid& gamma_grid,
-                            ParamGrid& p_grid, ParamGrid& nu_grid,
-                            ParamGrid& coef_grid, ParamGrid& degree_grid )
-{
-    c_grid = SVM::getDefaultGrid(SVM::C);
-
-    gamma_grid = SVM::getDefaultGrid(SVM::GAMMA);
-
-    p_grid = SVM::getDefaultGrid(SVM::P);
-    p_grid.logStep = 0;
-
-    nu_grid = SVM::getDefaultGrid(SVM::NU);
-    nu_grid.logStep = 0;
-
-    coef_grid = SVM::getDefaultGrid(SVM::COEF);
-    coef_grid.logStep = 0;
-
-    degree_grid = SVM::getDefaultGrid(SVM::DEGREE);
-    degree_grid.logStep = 0;
-}
-
-static Ptr<SVM> trainSVMClassifier( const SVMTrainParamsExt& svmParamsExt, const string& objClassName, VocData& vocData,
-                         Ptr<BOWImgDescriptorExtractor>& bowExtractor, const Ptr<FeatureDetector>& fdetector,
-                         const string& resPath )
-{
-    /* first check if a previously trained svm for the current class has been saved to file */
-    string svmFilename = resPath + svmsDir + "/" + objClassName + ".xml.gz";
-    Ptr<SVM> svm;
-
-    FileStorage fs( svmFilename, FileStorage::READ);
-    if( fs.isOpened() )
-    {
-        cout << "*** LOADING SVM CLASSIFIER FOR CLASS " << objClassName << " ***" << endl;
-        svm = StatModel::load<SVM>( svmFilename );
-    }
-    else
-    {
-        cout << "*** TRAINING CLASSIFIER FOR CLASS " << objClassName << " ***" << endl;
-        cout << "CALCULATING BOW VECTORS FOR TRAINING SET OF " << objClassName << "..." << endl;
-
-        // Get classification ground truth for images in the training set
-        vector<ObdImage> images;
-        vector<Mat> bowImageDescriptors;
-        vector<char> objectPresent;
-        vocData.getClassImages( objClassName, CV_OBD_TRAIN, images, objectPresent );
-
-        // Compute the bag of words vector for each image in the training set.
-        calculateImageDescriptors( images, bowImageDescriptors, bowExtractor, fdetector, resPath );
-
-        // Remove any images for which descriptors could not be calculated
-        removeEmptyBowImageDescriptors( images, bowImageDescriptors, objectPresent );
-
-        CV_Assert( svmParamsExt.descPercent > 0.f && svmParamsExt.descPercent <= 1.f );
-        if( svmParamsExt.descPercent < 1.f )
-        {
-            int descsToDelete = static_cast<int>(static_cast<float>(images.size())*(1.0-svmParamsExt.descPercent));
-
-            cout << "Using " << (images.size() - descsToDelete) << " of " << images.size() <<
-                    " descriptors for training (" << svmParamsExt.descPercent*100.0 << " %)" << endl;
-            removeBowImageDescriptorsByCount( images, bowImageDescriptors, objectPresent, svmParamsExt, descsToDelete );
-        }
-
-        // Prepare the input matrices for SVM training.
-        Mat trainData( (int)images.size(), bowExtractor->getVocabulary().rows, CV_32FC1 );
-        Mat responses( (int)images.size(), 1, CV_32SC1 );
-
-        // Transfer bag of words vectors and responses across to the training data matrices
-        for( size_t imageIdx = 0; imageIdx < images.size(); imageIdx++ )
-        {
-            // Transfer image descriptor (bag of words vector) to training data matrix
-            Mat submat = trainData.row((int)imageIdx);
-            if( bowImageDescriptors[imageIdx].cols != bowExtractor->descriptorSize() )
-            {
-                cout << "Error: computed bow image descriptor size " << bowImageDescriptors[imageIdx].cols
-                     << " differs from vocabulary size" << bowExtractor->getVocabulary().cols << endl;
-                exit(-1);
-            }
-            bowImageDescriptors[imageIdx].copyTo( submat );
-
-            // Set response value
-            responses.at<int>((int)imageIdx) = objectPresent[imageIdx] ? 1 : -1;
-        }
-
-        cout << "TRAINING SVM FOR CLASS ..." << objClassName << "..." << endl;
-        SVM::Params svmParams;
-        Mat class_wts_cv;
-        setSVMParams( svmParams, class_wts_cv, responses, svmParamsExt.balanceClasses );
-        svm = SVM::create(svmParams);
-        ParamGrid c_grid, gamma_grid, p_grid, nu_grid, coef_grid, degree_grid;
-        setSVMTrainAutoParams( c_grid, gamma_grid,  p_grid, nu_grid, coef_grid, degree_grid );
-
-        svm->trainAuto(TrainData::create(trainData, ROW_SAMPLE, responses), 10,
-                       c_grid, gamma_grid, p_grid, nu_grid, coef_grid, degree_grid);
-        cout << "SVM TRAINING FOR CLASS " << objClassName << " COMPLETED" << endl;
-
-        svm->save( svmFilename );
-        cout << "SAVED CLASSIFIER TO FILE" << endl;
-    }
-    return svm;
-}
-
-static void computeConfidences( const Ptr<SVM>& svm, const string& objClassName, VocData& vocData,
-                         Ptr<BOWImgDescriptorExtractor>& bowExtractor, const Ptr<FeatureDetector>& fdetector,
-                         const string& resPath )
-{
-    cout << "*** CALCULATING CONFIDENCES FOR CLASS " << objClassName << " ***" << endl;
-    cout << "CALCULATING BOW VECTORS FOR TEST SET OF " << objClassName << "..." << endl;
-    // Get classification ground truth for images in the test set
-    vector<ObdImage> images;
-    vector<Mat> bowImageDescriptors;
-    vector<char> objectPresent;
-    vocData.getClassImages( objClassName, CV_OBD_TEST, images, objectPresent );
-
-    // Compute the bag of words vector for each image in the test set
-    calculateImageDescriptors( images, bowImageDescriptors, bowExtractor, fdetector, resPath );
-    // Remove any images for which descriptors could not be calculated
-    removeEmptyBowImageDescriptors( images, bowImageDescriptors, objectPresent);
-
-    // Use the bag of words vectors to calculate classifier output for each image in test set
-    cout << "CALCULATING CONFIDENCE SCORES FOR CLASS " << objClassName << "..." << endl;
-    vector<float> confidences( images.size() );
-    float signMul = 1.f;
-    for( size_t imageIdx = 0; imageIdx < images.size(); imageIdx++ )
-    {
-        if( imageIdx == 0 )
-        {
-            // In the first iteration, determine the sign of the positive class
-            float classVal = confidences[imageIdx] = svm->predict( bowImageDescriptors[imageIdx], noArray(), 0 );
-            float scoreVal = confidences[imageIdx] = svm->predict( bowImageDescriptors[imageIdx], noArray(), StatModel::RAW_OUTPUT );
-            signMul = (classVal < 0) == (scoreVal < 0) ? 1.f : -1.f;
-        }
-        // svm output of decision function
-        confidences[imageIdx] = signMul * svm->predict( bowImageDescriptors[imageIdx], noArray(), StatModel::RAW_OUTPUT );
-    }
-
-    cout << "WRITING QUERY RESULTS TO VOC RESULTS FILE FOR CLASS " << objClassName << "..." << endl;
-    vocData.writeClassifierResultsFile( resPath + plotsDir, objClassName, CV_OBD_TEST, images, confidences, 1, true );
-
-    cout << "DONE - " << objClassName << endl;
-    cout << "---------------------------------------------------------------" << endl;
-}
-
-static void computeGnuPlotOutput( const string& resPath, const string& objClassName, VocData& vocData )
-{
-    vector<float> precision, recall;
-    float ap;
-
-    const string resultFile = vocData.getResultsFilename( objClassName, CV_VOC_TASK_CLASSIFICATION, CV_OBD_TEST);
-    const string plotFile = resultFile.substr(0, resultFile.size()-4) + ".plt";
-
-    cout << "Calculating precision recall curve for class '" <<objClassName << "'" << endl;
-    vocData.calcClassifierPrecRecall( resPath + plotsDir + "/" + resultFile, precision, recall, ap, true );
-    cout << "Outputting to GNUPlot file..." << endl;
-    vocData.savePrecRecallToGnuplot( resPath + plotsDir + "/" + plotFile, precision, recall, ap, objClassName, CV_VOC_PLOT_PNG );
-}
-
-
-
-
-int main(int argc, char** argv)
-{
-    if( argc != 3 && argc != 6 )
-    {
-        help(argv);
-        return -1;
-    }
-
-    cv::initModule_nonfree();
-
-    const string vocPath = argv[1], resPath = argv[2];
-
-    // Read or set default parameters
-    string vocName;
-    DDMParams ddmParams;
-    VocabTrainParams vocabTrainParams;
-    SVMTrainParamsExt svmTrainParamsExt;
-
-    makeUsedDirs( resPath );
-
-    FileStorage paramsFS( resPath + "/" + paramsFile, FileStorage::READ );
-    if( paramsFS.isOpened() )
-    {
-       readUsedParams( paramsFS.root(), vocName, ddmParams, vocabTrainParams, svmTrainParamsExt );
-       CV_Assert( vocName == getVocName(vocPath) );
-    }
-    else
-    {
-        vocName = getVocName(vocPath);
-        if( argc!= 6 )
-        {
-            cout << "Feature detector, descriptor extractor, descriptor matcher must be set" << endl;
-            return -1;
-        }
-        ddmParams = DDMParams( argv[3], argv[4], argv[5] ); // from command line
-        // vocabTrainParams and svmTrainParamsExt is set by defaults
-        paramsFS.open( resPath + "/" + paramsFile, FileStorage::WRITE );
-        if( paramsFS.isOpened() )
-        {
-            writeUsedParams( paramsFS, vocName, ddmParams, vocabTrainParams, svmTrainParamsExt );
-            paramsFS.release();
-        }
-        else
-        {
-            cout << "File " << (resPath + "/" + paramsFile) << "can not be opened to write" << endl;
-            return -1;
-        }
-    }
-
-    // Create detector, descriptor, matcher.
-    Ptr<FeatureDetector> featureDetector = FeatureDetector::create( ddmParams.detectorType );
-    Ptr<DescriptorExtractor> descExtractor = DescriptorExtractor::create( ddmParams.descriptorType );
-    Ptr<BOWImgDescriptorExtractor> bowExtractor;
-    if( !featureDetector || !descExtractor )
-    {
-        cout << "featureDetector or descExtractor was not created" << endl;
-        return -1;
-    }
-    {
-        Ptr<DescriptorMatcher> descMatcher = DescriptorMatcher::create( ddmParams.matcherType );
-        if( !featureDetector || !descExtractor || !descMatcher )
-        {
-            cout << "descMatcher was not created" << endl;
-            return -1;
-        }
-        bowExtractor = makePtr<BOWImgDescriptorExtractor>( descExtractor, descMatcher );
-    }
-
-    // Print configuration to screen
-    printUsedParams( vocPath, resPath, ddmParams, vocabTrainParams, svmTrainParamsExt );
-    // Create object to work with VOC
-    VocData vocData( vocPath, false );
-
-    // 1. Train visual word vocabulary if a pre-calculated vocabulary file doesn't already exist from previous run
-    Mat vocabulary = trainVocabulary( resPath + "/" + vocabularyFile, vocData, vocabTrainParams,
-                                      featureDetector, descExtractor );
-    bowExtractor->setVocabulary( vocabulary );
-
-    // 2. Train a classifier and run a sample query for each object class
-    const vector<string>& objClasses = vocData.getObjectClasses(); // object class list
-    for( size_t classIdx = 0; classIdx < objClasses.size(); ++classIdx )
-    {
-        // Train a classifier on train dataset
-        Ptr<SVM> svm = trainSVMClassifier( svmTrainParamsExt, objClasses[classIdx], vocData,
-                                           bowExtractor, featureDetector, resPath );
-
-        // Now use the classifier over all images on the test dataset and rank according to score order
-        // also calculating precision-recall etc.
-        computeConfidences( svm, objClasses[classIdx], vocData,
-                            bowExtractor, featureDetector, resPath );
-        // Calculate precision/recall/ap and use GNUPlot to output to a pdf file
-        computeGnuPlotOutput( resPath, objClasses[classIdx], vocData );
-    }
-    return 0;
-}
diff --git a/samples/cpp/descriptor_extractor_matcher.cpp b/samples/cpp/descriptor_extractor_matcher.cpp
deleted file mode 100644 (file)
index 6f25ac4..0000000
+++ /dev/null
@@ -1,305 +0,0 @@
-#include "opencv2/imgcodecs.hpp"
-#include "opencv2/highgui/highgui.hpp"
-#include "opencv2/calib3d/calib3d.hpp"
-#include "opencv2/imgproc/imgproc.hpp"
-#include "opencv2/features2d/features2d.hpp"
-#include "opencv2/nonfree/nonfree.hpp"
-
-#include <iostream>
-
-using namespace cv;
-using namespace std;
-
-static void help(char** argv)
-{
-    cout << "\nThis program demonstrats keypoint finding and matching between 2 images using features2d framework.\n"
-     << "   In one case, the 2nd image is synthesized by homography from the first, in the second case, there are 2 images\n"
-     << "\n"
-     << "Case1: second image is obtained from the first (given) image using random generated homography matrix\n"
-     << argv[0] << " [detectorType] [descriptorType] [matcherType] [matcherFilterType] [image] [evaluate(0 or 1)]\n"
-     << "Example of case1:\n"
-     << "./descriptor_extractor_matcher SURF SURF FlannBased NoneFilter cola.jpg 0\n"
-     << "\n"
-     << "Case2: both images are given. If ransacReprojThreshold>=0 then homography matrix are calculated\n"
-     << argv[0] << " [detectorType] [descriptorType] [matcherType] [matcherFilterType] [image1] [image2] [ransacReprojThreshold]\n"
-     << "\n"
-     << "Matches are filtered using homography matrix in case1 and case2 (if ransacReprojThreshold>=0)\n"
-     << "Example of case2:\n"
-     << "./descriptor_extractor_matcher SURF SURF BruteForce CrossCheckFilter cola1.jpg cola2.jpg 3\n"
-     << "\n"
-     << "Possible detectorType values: see in documentation on createFeatureDetector().\n"
-     << "Possible descriptorType values: see in documentation on createDescriptorExtractor().\n"
-     << "Possible matcherType values: see in documentation on createDescriptorMatcher().\n"
-     << "Possible matcherFilterType values: NoneFilter, CrossCheckFilter." << endl;
-}
-
-#define DRAW_RICH_KEYPOINTS_MODE     0
-#define DRAW_OUTLIERS_MODE           0
-
-const string winName = "correspondences";
-
-enum { NONE_FILTER = 0, CROSS_CHECK_FILTER = 1 };
-
-static int getMatcherFilterType( const string& str )
-{
-    if( str == "NoneFilter" )
-        return NONE_FILTER;
-    if( str == "CrossCheckFilter" )
-        return CROSS_CHECK_FILTER;
-    CV_Error(Error::StsBadArg, "Invalid filter name");
-    return -1;
-}
-
-static void simpleMatching( Ptr<DescriptorMatcher>& descriptorMatcher,
-                     const Mat& descriptors1, const Mat& descriptors2,
-                     vector<DMatch>& matches12 )
-{
-    vector<DMatch> matches;
-    descriptorMatcher->match( descriptors1, descriptors2, matches12 );
-}
-
-static void crossCheckMatching( Ptr<DescriptorMatcher>& descriptorMatcher,
-                         const Mat& descriptors1, const Mat& descriptors2,
-                         vector<DMatch>& filteredMatches12, int knn=1 )
-{
-    filteredMatches12.clear();
-    vector<vector<DMatch> > matches12, matches21;
-    descriptorMatcher->knnMatch( descriptors1, descriptors2, matches12, knn );
-    descriptorMatcher->knnMatch( descriptors2, descriptors1, matches21, knn );
-    for( size_t m = 0; m < matches12.size(); m++ )
-    {
-        bool findCrossCheck = false;
-        for( size_t fk = 0; fk < matches12[m].size(); fk++ )
-        {
-            DMatch forward = matches12[m][fk];
-
-            for( size_t bk = 0; bk < matches21[forward.trainIdx].size(); bk++ )
-            {
-                DMatch backward = matches21[forward.trainIdx][bk];
-                if( backward.trainIdx == forward.queryIdx )
-                {
-                    filteredMatches12.push_back(forward);
-                    findCrossCheck = true;
-                    break;
-                }
-            }
-            if( findCrossCheck ) break;
-        }
-    }
-}
-
-static void warpPerspectiveRand( const Mat& src, Mat& dst, Mat& H, RNG& rng )
-{
-    H.create(3, 3, CV_32FC1);
-    H.at<float>(0,0) = rng.uniform( 0.8f, 1.2f);
-    H.at<float>(0,1) = rng.uniform(-0.1f, 0.1f);
-    H.at<float>(0,2) = rng.uniform(-0.1f, 0.1f)*src.cols;
-    H.at<float>(1,0) = rng.uniform(-0.1f, 0.1f);
-    H.at<float>(1,1) = rng.uniform( 0.8f, 1.2f);
-    H.at<float>(1,2) = rng.uniform(-0.1f, 0.1f)*src.rows;
-    H.at<float>(2,0) = rng.uniform( -1e-4f, 1e-4f);
-    H.at<float>(2,1) = rng.uniform( -1e-4f, 1e-4f);
-    H.at<float>(2,2) = rng.uniform( 0.8f, 1.2f);
-
-    warpPerspective( src, dst, H, src.size() );
-}
-
-static void doIteration( const Mat& img1, Mat& img2, bool isWarpPerspective,
-                  vector<KeyPoint>& keypoints1, const Mat& descriptors1,
-                  Ptr<FeatureDetector>& detector, Ptr<DescriptorExtractor>& descriptorExtractor,
-                  Ptr<DescriptorMatcher>& descriptorMatcher, int matcherFilter, bool eval,
-                  double ransacReprojThreshold, RNG& rng )
-{
-    CV_Assert( !img1.empty() );
-    Mat H12;
-    if( isWarpPerspective )
-        warpPerspectiveRand(img1, img2, H12, rng );
-    else
-        CV_Assert( !img2.empty()/* && img2.cols==img1.cols && img2.rows==img1.rows*/ );
-
-    cout << endl << "< Extracting keypoints from second image..." << endl;
-    vector<KeyPoint> keypoints2;
-    detector->detect( img2, keypoints2 );
-    cout << keypoints2.size() << " points" << endl << ">" << endl;
-
-    if( !H12.empty() && eval )
-    {
-        cout << "< Evaluate feature detector..." << endl;
-        float repeatability;
-        int correspCount;
-        evaluateFeatureDetector( img1, img2, H12, &keypoints1, &keypoints2, repeatability, correspCount );
-        cout << "repeatability = " << repeatability << endl;
-        cout << "correspCount = " << correspCount << endl;
-        cout << ">" << endl;
-    }
-
-    cout << "< Computing descriptors for keypoints from second image..." << endl;
-    Mat descriptors2;
-    descriptorExtractor->compute( img2, keypoints2, descriptors2 );
-    cout << ">" << endl;
-
-    cout << "< Matching descriptors..." << endl;
-    vector<DMatch> filteredMatches;
-    switch( matcherFilter )
-    {
-    case CROSS_CHECK_FILTER :
-        crossCheckMatching( descriptorMatcher, descriptors1, descriptors2, filteredMatches, 1 );
-        break;
-    default :
-        simpleMatching( descriptorMatcher, descriptors1, descriptors2, filteredMatches );
-    }
-    cout << ">" << endl;
-
-    if( !H12.empty() && eval )
-    {
-        cout << "< Evaluate descriptor matcher..." << endl;
-        vector<Point2f> curve;
-        Ptr<GenericDescriptorMatcher> gdm = makePtr<VectorDescriptorMatcher>( descriptorExtractor, descriptorMatcher );
-        evaluateGenericDescriptorMatcher( img1, img2, H12, keypoints1, keypoints2, 0, 0, curve, gdm );
-
-        Point2f firstPoint = *curve.begin();
-        Point2f lastPoint = *curve.rbegin();
-        int prevPointIndex = -1;
-        cout << "1-precision = " << firstPoint.x << "; recall = " << firstPoint.y << endl;
-        for( float l_p = 0; l_p <= 1 + FLT_EPSILON; l_p+=0.05f )
-        {
-            int nearest = getNearestPoint( curve, l_p );
-            if( nearest >= 0 )
-            {
-                Point2f curPoint = curve[nearest];
-                if( curPoint.x > firstPoint.x && curPoint.x < lastPoint.x && nearest != prevPointIndex )
-                {
-                    cout << "1-precision = " << curPoint.x << "; recall = " << curPoint.y << endl;
-                    prevPointIndex = nearest;
-                }
-            }
-        }
-        cout << "1-precision = " << lastPoint.x << "; recall = " << lastPoint.y << endl;
-        cout << ">" << endl;
-    }
-
-    vector<int> queryIdxs( filteredMatches.size() ), trainIdxs( filteredMatches.size() );
-    for( size_t i = 0; i < filteredMatches.size(); i++ )
-    {
-        queryIdxs[i] = filteredMatches[i].queryIdx;
-        trainIdxs[i] = filteredMatches[i].trainIdx;
-    }
-
-    if( !isWarpPerspective && ransacReprojThreshold >= 0 )
-    {
-        cout << "< Computing homography (RANSAC)..." << endl;
-        vector<Point2f> points1; KeyPoint::convert(keypoints1, points1, queryIdxs);
-        vector<Point2f> points2; KeyPoint::convert(keypoints2, points2, trainIdxs);
-        H12 = findHomography( Mat(points1), Mat(points2), RANSAC, ransacReprojThreshold );
-        cout << ">" << endl;
-    }
-
-    Mat drawImg;
-    if( !H12.empty() ) // filter outliers
-    {
-        vector<char> matchesMask( filteredMatches.size(), 0 );
-        vector<Point2f> points1; KeyPoint::convert(keypoints1, points1, queryIdxs);
-        vector<Point2f> points2; KeyPoint::convert(keypoints2, points2, trainIdxs);
-        Mat points1t; perspectiveTransform(Mat(points1), points1t, H12);
-
-        double maxInlierDist = ransacReprojThreshold < 0 ? 3 : ransacReprojThreshold;
-        for( size_t i1 = 0; i1 < points1.size(); i1++ )
-        {
-            if( norm(points2[i1] - points1t.at<Point2f>((int)i1,0)) <= maxInlierDist ) // inlier
-                matchesMask[i1] = 1;
-        }
-        // draw inliers
-        drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, Scalar(0, 255, 0), Scalar(255, 0, 0), matchesMask
-#if DRAW_RICH_KEYPOINTS_MODE
-                     , DrawMatchesFlags::DRAW_RICH_KEYPOINTS
-#endif
-                   );
-
-#if DRAW_OUTLIERS_MODE
-        // draw outliers
-        for( size_t i1 = 0; i1 < matchesMask.size(); i1++ )
-            matchesMask[i1] = !matchesMask[i1];
-        drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, Scalar(255, 0, 0), Scalar(0, 0, 255), matchesMask,
-                     DrawMatchesFlags::DRAW_OVER_OUTIMG | DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
-#endif
-
-        cout << "Number of inliers: " << countNonZero(matchesMask) << endl;
-    }
-    else
-        drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg );
-
-    imshow( winName, drawImg );
-}
-
-
-int main(int argc, char** argv)
-{
-    if( argc != 7 && argc != 8 )
-    {
-        help(argv);
-        return -1;
-    }
-
-    cv::initModule_nonfree();
-
-    bool isWarpPerspective = argc == 7;
-    double ransacReprojThreshold = -1;
-    if( !isWarpPerspective )
-        ransacReprojThreshold = atof(argv[7]);
-
-    cout << "< Creating detector, descriptor extractor and descriptor matcher ..." << endl;
-    Ptr<FeatureDetector> detector = FeatureDetector::create( argv[1] );
-    Ptr<DescriptorExtractor> descriptorExtractor = DescriptorExtractor::create( argv[2] );
-    Ptr<DescriptorMatcher> descriptorMatcher = DescriptorMatcher::create( argv[3] );
-    int mactherFilterType = getMatcherFilterType( argv[4] );
-    bool eval = !isWarpPerspective ? false : (atoi(argv[6]) == 0 ? false : true);
-    cout << ">" << endl;
-    if( !detector || !descriptorExtractor || !descriptorMatcher )
-    {
-        cout << "Can not create detector or descriptor exstractor or descriptor matcher of given types" << endl;
-        return -1;
-    }
-
-    cout << "< Reading the images..." << endl;
-    Mat img1 = imread( argv[5] ), img2;
-    if( !isWarpPerspective )
-        img2 = imread( argv[6] );
-    cout << ">" << endl;
-    if( img1.empty() || (!isWarpPerspective && img2.empty()) )
-    {
-        cout << "Can not read images" << endl;
-        return -1;
-    }
-
-    cout << endl << "< Extracting keypoints from first image..." << endl;
-    vector<KeyPoint> keypoints1;
-    detector->detect( img1, keypoints1 );
-    cout << keypoints1.size() << " points" << endl << ">" << endl;
-
-    cout << "< Computing descriptors for keypoints from first image..." << endl;
-    Mat descriptors1;
-    descriptorExtractor->compute( img1, keypoints1, descriptors1 );
-    cout << ">" << endl;
-
-    namedWindow(winName, 1);
-    RNG rng = theRNG();
-    doIteration( img1, img2, isWarpPerspective, keypoints1, descriptors1,
-                 detector, descriptorExtractor, descriptorMatcher, mactherFilterType, eval,
-                 ransacReprojThreshold, rng );
-    for(;;)
-    {
-        char c = (char)waitKey(0);
-        if( c == '\x1b' ) // esc
-        {
-            cout << "Exiting ..." << endl;
-            break;
-        }
-        else if( isWarpPerspective )
-        {
-            doIteration( img1, img2, isWarpPerspective, keypoints1, descriptors1,
-                         detector, descriptorExtractor, descriptorMatcher, mactherFilterType, eval,
-                         ransacReprojThreshold, rng );
-        }
-    }
-    return 0;
-}
diff --git a/samples/cpp/shape_transformation.cpp b/samples/cpp/shape_transformation.cpp
deleted file mode 100644 (file)
index 62e5554..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * shape_context.cpp -- Shape context demo for shape matching
- */
-
-#include "opencv2/shape.hpp"
-#include "opencv2/imgcodecs.hpp"
-#include "opencv2/highgui.hpp"
-#include "opencv2/imgproc.hpp"
-#include "opencv2/features2d/features2d.hpp"
-#include "opencv2/nonfree/nonfree.hpp"
-#include <opencv2/core/utility.hpp>
-#include <iostream>
-#include <string>
-
-using namespace std;
-using namespace cv;
-
-static void help()
-{
-    printf("\nThis program demonstrates how to use common interface for shape transformers\n"
-           "Call\n"
-           "shape_transformation [image1] [image2]\n");
-}
-
-int main(int argc, char** argv)
-{
-    help();
-    Mat img1 = imread(argv[1], IMREAD_GRAYSCALE);
-    Mat img2 = imread(argv[2], IMREAD_GRAYSCALE);
-    if(img1.empty() || img2.empty() || argc<2)
-    {
-        printf("Can't read one of the images\n");
-        return -1;
-    }
-
-    // detecting keypoints
-    SurfFeatureDetector detector(5000);
-    vector<KeyPoint> keypoints1, keypoints2;
-    detector.detect(img1, keypoints1);
-    detector.detect(img2, keypoints2);
-
-    // computing descriptors
-    SurfDescriptorExtractor extractor;
-    Mat descriptors1, descriptors2;
-    extractor.compute(img1, keypoints1, descriptors1);
-    extractor.compute(img2, keypoints2, descriptors2);
-
-    // matching descriptors
-    BFMatcher matcher(extractor.defaultNorm());
-    vector<DMatch> matches;
-    matcher.match(descriptors1, descriptors2, matches);
-
-    // drawing the results
-    namedWindow("matches", 1);
-    Mat img_matches;
-    drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches);
-    imshow("matches", img_matches);
-
-    // extract points
-    vector<Point2f> pts1, pts2;
-    for (size_t ii=0; ii<keypoints1.size(); ii++)
-        pts1.push_back( keypoints1[ii].pt );
-    for (size_t ii=0; ii<keypoints2.size(); ii++)
-        pts2.push_back( keypoints2[ii].pt );
-
-    // Apply TPS
-    Ptr<ThinPlateSplineShapeTransformer> mytps = createThinPlateSplineShapeTransformer(25000); //TPS with a relaxed constraint
-    mytps->estimateTransformation(pts1, pts2, matches);
-    mytps->warpImage(img2, img2);
-
-    imshow("Tranformed", img2);
-    waitKey(0);
-
-    return 0;
-}
diff --git a/samples/cpp/tutorial_code/features2D/SURF_FlannMatcher.cpp b/samples/cpp/tutorial_code/features2D/SURF_FlannMatcher.cpp
deleted file mode 100644 (file)
index e861a5f..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-/**
- * @file SURF_FlannMatcher
- * @brief SURF detector + descriptor + FLANN Matcher
- * @author A. Huaman
- */
-
-#include <stdio.h>
-#include <iostream>
-#include "opencv2/core/core.hpp"
-#include "opencv2/features2d/features2d.hpp"
-#include "opencv2/imgcodecs.hpp"
-#include "opencv2/highgui/highgui.hpp"
-#include "opencv2/nonfree/features2d.hpp"
-
-using namespace std;
-using namespace cv;
-
-void readme();
-
-/**
- * @function main
- * @brief Main function
- */
-int main( int argc, char** argv )
-{
-  if( argc != 3 )
-  { readme(); return -1; }
-
-  Mat img_1 = imread( argv[1], IMREAD_GRAYSCALE );
-  Mat img_2 = imread( argv[2], IMREAD_GRAYSCALE );
-
-  if( !img_1.data || !img_2.data )
-  { std::cout<< " --(!) Error reading images " << std::endl; return -1; }
-
-  //-- Step 1: Detect the keypoints using SURF Detector
-  int minHessian = 400;
-
-  SurfFeatureDetector detector( minHessian );
-
-  std::vector<KeyPoint> keypoints_1, keypoints_2;
-
-  detector.detect( img_1, keypoints_1 );
-  detector.detect( img_2, keypoints_2 );
-
-  //-- Step 2: Calculate descriptors (feature vectors)
-  SurfDescriptorExtractor extractor;
-
-  Mat descriptors_1, descriptors_2;
-
-  extractor.compute( img_1, keypoints_1, descriptors_1 );
-  extractor.compute( img_2, keypoints_2, descriptors_2 );
-
-  //-- Step 3: Matching descriptor vectors using FLANN matcher
-  FlannBasedMatcher matcher;
-  std::vector< DMatch > matches;
-  matcher.match( descriptors_1, descriptors_2, matches );
-
-  double max_dist = 0; double min_dist = 100;
-
-  //-- Quick calculation of max and min distances between keypoints
-  for( int i = 0; i < descriptors_1.rows; i++ )
-  { double dist = matches[i].distance;
-    if( dist < min_dist ) min_dist = dist;
-    if( dist > max_dist ) max_dist = dist;
-  }
-
-  printf("-- Max dist : %f \n", max_dist );
-  printf("-- Min dist : %f \n", min_dist );
-
-  //-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
-  //-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
-  //-- small)
-  //-- PS.- radiusMatch can also be used here.
-  std::vector< DMatch > good_matches;
-
-  for( int i = 0; i < descriptors_1.rows; i++ )
-  { if( matches[i].distance <= max(2*min_dist, 0.02) )
-    { good_matches.push_back( matches[i]); }
-  }
-
-  //-- Draw only "good" matches
-  Mat img_matches;
-  drawMatches( img_1, keypoints_1, img_2, keypoints_2,
-               good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
-               vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
-
-  //-- Show detected matches
-  imshow( "Good Matches", img_matches );
-
-  for( int i = 0; i < (int)good_matches.size(); i++ )
-  { printf( "-- Good Match [%d] Keypoint 1: %d  -- Keypoint 2: %d  \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }
-
-  waitKey(0);
-
-  return 0;
-}
-
-/**
- * @function readme
- */
-void readme()
-{ std::cout << " Usage: ./SURF_FlannMatcher <img1> <img2>" << std::endl; }
diff --git a/samples/cpp/tutorial_code/features2D/SURF_Homography.cpp b/samples/cpp/tutorial_code/features2D/SURF_Homography.cpp
deleted file mode 100644 (file)
index f3d4df8..0000000
+++ /dev/null
@@ -1,126 +0,0 @@
-/**
- * @file SURF_Homography
- * @brief SURF detector + descriptor + FLANN Matcher + FindHomography
- * @author A. Huaman
- */
-
-#include <stdio.h>
-#include <iostream>
-#include "opencv2/core/core.hpp"
-#include "opencv2/features2d/features2d.hpp"
-#include "opencv2/imgcodecs.hpp"
-#include "opencv2/highgui/highgui.hpp"
-#include "opencv2/calib3d/calib3d.hpp"
-#include "opencv2/nonfree/features2d.hpp"
-
-using namespace std;
-using namespace cv;
-
-void readme();
-
-/**
- * @function main
- * @brief Main function
- */
-int main( int argc, char** argv )
-{
-  if( argc != 3 )
-  { readme(); return -1; }
-
-  Mat img_object = imread( argv[1], IMREAD_GRAYSCALE );
-  Mat img_scene = imread( argv[2], IMREAD_GRAYSCALE );
-
-  if( !img_object.data || !img_scene.data )
-  { std::cout<< " --(!) Error reading images " << std::endl; return -1; }
-
-  //-- Step 1: Detect the keypoints using SURF Detector
-  int minHessian = 400;
-
-  SurfFeatureDetector detector( minHessian );
-
-  std::vector<KeyPoint> keypoints_object, keypoints_scene;
-
-  detector.detect( img_object, keypoints_object );
-  detector.detect( img_scene, keypoints_scene );
-
-  //-- Step 2: Calculate descriptors (feature vectors)
-  SurfDescriptorExtractor extractor;
-
-  Mat descriptors_object, descriptors_scene;
-
-  extractor.compute( img_object, keypoints_object, descriptors_object );
-  extractor.compute( img_scene, keypoints_scene, descriptors_scene );
-
-  //-- Step 3: Matching descriptor vectors using FLANN matcher
-  FlannBasedMatcher matcher;
-  std::vector< DMatch > matches;
-  matcher.match( descriptors_object, descriptors_scene, matches );
-
-  double max_dist = 0; double min_dist = 100;
-
-  //-- Quick calculation of max and min distances between keypoints
-  for( int i = 0; i < descriptors_object.rows; i++ )
-  { double dist = matches[i].distance;
-    if( dist < min_dist ) min_dist = dist;
-    if( dist > max_dist ) max_dist = dist;
-  }
-
-  printf("-- Max dist : %f \n", max_dist );
-  printf("-- Min dist : %f \n", min_dist );
-
-  //-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
-  std::vector< DMatch > good_matches;
-
-  for( int i = 0; i < descriptors_object.rows; i++ )
-  { if( matches[i].distance < 3*min_dist )
-    { good_matches.push_back( matches[i]); }
-  }
-
-  Mat img_matches;
-  drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
-               good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
-               vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
-
-
-  //-- Localize the object from img_1 in img_2
-  std::vector<Point2f> obj;
-  std::vector<Point2f> scene;
-
-  for( size_t i = 0; i < good_matches.size(); i++ )
-  {
-    //-- Get the keypoints from the good matches
-    obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
-    scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
-  }
-
-  Mat H = findHomography( obj, scene, RANSAC );
-
-  //-- Get the corners from the image_1 ( the object to be "detected" )
-  std::vector<Point2f> obj_corners(4);
-  obj_corners[0] = Point(0,0); obj_corners[1] = Point( img_object.cols, 0 );
-  obj_corners[2] = Point( img_object.cols, img_object.rows ); obj_corners[3] = Point( 0, img_object.rows );
-  std::vector<Point2f> scene_corners(4);
-
-  perspectiveTransform( obj_corners, scene_corners, H);
-
-
-  //-- Draw lines between the corners (the mapped object in the scene - image_2 )
-  Point2f offset( (float)img_object.cols, 0);
-  line( img_matches, scene_corners[0] + offset, scene_corners[1] + offset, Scalar(0, 255, 0), 4 );
-  line( img_matches, scene_corners[1] + offset, scene_corners[2] + offset, Scalar( 0, 255, 0), 4 );
-  line( img_matches, scene_corners[2] + offset, scene_corners[3] + offset, Scalar( 0, 255, 0), 4 );
-  line( img_matches, scene_corners[3] + offset, scene_corners[0] + offset, Scalar( 0, 255, 0), 4 );
-
-  //-- Show detected matches
-  imshow( "Good Matches & Object detection", img_matches );
-
-  waitKey(0);
-
-  return 0;
-}
-
-/**
- * @function readme
- */
-void readme()
-{ std::cout << " Usage: ./SURF_Homography <img1> <img2>" << std::endl; }
diff --git a/samples/cpp/tutorial_code/features2D/SURF_descriptor.cpp b/samples/cpp/tutorial_code/features2D/SURF_descriptor.cpp
deleted file mode 100644 (file)
index 140136d..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * @file SURF_descriptor
- * @brief SURF detector + descritpor + BruteForce Matcher + drawing matches with OpenCV functions
- * @author A. Huaman
- */
-
-#include <stdio.h>
-#include <iostream>
-#include "opencv2/core/core.hpp"
-#include "opencv2/features2d/features2d.hpp"
-#include "opencv2/imgcodecs.hpp"
-#include "opencv2/highgui/highgui.hpp"
-#include "opencv2/nonfree/features2d.hpp"
-
-using namespace cv;
-
-void readme();
-
-/**
- * @function main
- * @brief Main function
- */
-int main( int argc, char** argv )
-{
-  if( argc != 3 )
-  { return -1; }
-
-  Mat img_1 = imread( argv[1], IMREAD_GRAYSCALE );
-  Mat img_2 = imread( argv[2], IMREAD_GRAYSCALE );
-
-  if( !img_1.data || !img_2.data )
-  { return -1; }
-
-  //-- Step 1: Detect the keypoints using SURF Detector
-  int minHessian = 400;
-
-  SurfFeatureDetector detector( minHessian );
-
-  std::vector<KeyPoint> keypoints_1, keypoints_2;
-
-  detector.detect( img_1, keypoints_1 );
-  detector.detect( img_2, keypoints_2 );
-
-  //-- Step 2: Calculate descriptors (feature vectors)
-  SurfDescriptorExtractor extractor;
-
-  Mat descriptors_1, descriptors_2;
-
-  extractor.compute( img_1, keypoints_1, descriptors_1 );
-  extractor.compute( img_2, keypoints_2, descriptors_2 );
-
-  //-- Step 3: Matching descriptor vectors with a brute force matcher
-  BFMatcher matcher(extractor.defaultNorm());
-  std::vector< DMatch > matches;
-  matcher.match( descriptors_1, descriptors_2, matches );
-
-  //-- Draw matches
-  Mat img_matches;
-  drawMatches( img_1, keypoints_1, img_2, keypoints_2, matches, img_matches );
-
-  //-- Show detected matches
-  imshow("Matches", img_matches );
-
-  waitKey(0);
-
-  return 0;
-}
-
-/**
- * @function readme
- */
-void readme()
-{ std::cout << " Usage: ./SURF_descriptor <img1> <img2>" << std::endl; }
diff --git a/samples/cpp/tutorial_code/features2D/SURF_detector.cpp b/samples/cpp/tutorial_code/features2D/SURF_detector.cpp
deleted file mode 100644 (file)
index 2a14bdc..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * @file SURF_detector
- * @brief SURF keypoint detection + keypoint drawing with OpenCV functions
- * @author A. Huaman
- */
-
-#include <stdio.h>
-#include <iostream>
-#include "opencv2/core/core.hpp"
-#include "opencv2/features2d/features2d.hpp"
-#include "opencv2/imgcodecs.hpp"
-#include "opencv2/highgui/highgui.hpp"
-#include "opencv2/nonfree/features2d.hpp"
-
-using namespace cv;
-
-void readme();
-
-/**
- * @function main
- * @brief Main function
- */
-int main( int argc, char** argv )
-{
-  if( argc != 3 )
-  { readme(); return -1; }
-
-  Mat img_1 = imread( argv[1], IMREAD_GRAYSCALE );
-  Mat img_2 = imread( argv[2], IMREAD_GRAYSCALE );
-
-  if( !img_1.data || !img_2.data )
-  { std::cout<< " --(!) Error reading images " << std::endl; return -1; }
-
-  //-- Step 1: Detect the keypoints using SURF Detector
-  int minHessian = 400;
-
-  SurfFeatureDetector detector( minHessian );
-
-  std::vector<KeyPoint> keypoints_1, keypoints_2;
-
-  detector.detect( img_1, keypoints_1 );
-  detector.detect( img_2, keypoints_2 );
-
-  //-- Draw keypoints
-  Mat img_keypoints_1; Mat img_keypoints_2;
-
-  drawKeypoints( img_1, keypoints_1, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
-  drawKeypoints( img_2, keypoints_2, img_keypoints_2, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
-
-  //-- Show detected (drawn) keypoints
-  imshow("Keypoints 1", img_keypoints_1 );
-  imshow("Keypoints 2", img_keypoints_2 );
-
-  waitKey(0);
-
-  return 0;
-}
-
-/**
- * @function readme
- */
-void readme()
-{ std::cout << " Usage: ./SURF_detector <img1> <img2>" << std::endl; }
diff --git a/samples/cpp/video_homography.cpp b/samples/cpp/video_homography.cpp
deleted file mode 100644 (file)
index 1b12fa0..0000000
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
-* video_homography.cpp
-*
-*  Created on: Oct 18, 2010
-*      Author: erublee
-*/
-
-#include "opencv2/calib3d/calib3d.hpp"
-#include "opencv2/videoio/videoio.hpp"
-#include "opencv2/highgui/highgui.hpp"
-#include "opencv2/imgproc/imgproc.hpp"
-#include "opencv2/features2d/features2d.hpp"
-#include <iostream>
-#include <list>
-#include <vector>
-
-using namespace std;
-using namespace cv;
-
-static void help(char **av)
-{
-    cout << "\nThis program demonstrated the use of features2d with the Fast corner detector and brief descriptors\n"
-        << "to track planar objects by computing their homography from the key (training) image to the query (test) image\n\n" << endl;
-    cout << "usage: " << av[0] << " <video device number>\n" << endl;
-    cout << "The following keys do stuff:" << endl;
-    cout << "  t : grabs a reference frame to match against" << endl;
-    cout << "  l : makes the reference frame new every frame" << endl;
-    cout << "  q or escape: quit" << endl;
-}
-
-namespace
-{
-    void drawMatchesRelative(const vector<KeyPoint>& train, const vector<KeyPoint>& query,
-        std::vector<cv::DMatch>& matches, Mat& img, const vector<unsigned char>& mask = vector<
-        unsigned char> ())
-    {
-        for (int i = 0; i < (int)matches.size(); i++)
-        {
-            if (mask.empty() || mask[i])
-            {
-                Point2f pt_new = query[matches[i].queryIdx].pt;
-                Point2f pt_old = train[matches[i].trainIdx].pt;
-
-                cv::line(img, pt_new, pt_old, Scalar(125, 255, 125), 1);
-                cv::circle(img, pt_new, 2, Scalar(255, 0, 125), 1);
-
-            }
-        }
-    }
-
-    //Takes a descriptor and turns it into an xy point
-    void keypoints2points(const vector<KeyPoint>& in, vector<Point2f>& out)
-    {
-        out.clear();
-        out.reserve(in.size());
-        for (size_t i = 0; i < in.size(); ++i)
-        {
-            out.push_back(in[i].pt);
-        }
-    }
-
-    //Takes an xy point and appends that to a keypoint structure
-    void points2keypoints(const vector<Point2f>& in, vector<KeyPoint>& out)
-    {
-        out.clear();
-        out.reserve(in.size());
-        for (size_t i = 0; i < in.size(); ++i)
-        {
-            out.push_back(KeyPoint(in[i], 1));
-        }
-    }
-
-    //Uses computed homography H to warp original input points to new planar position
-    void warpKeypoints(const Mat& H, const vector<KeyPoint>& in, vector<KeyPoint>& out)
-    {
-        vector<Point2f> pts;
-        keypoints2points(in, pts);
-        vector<Point2f> pts_w(pts.size());
-        Mat m_pts_w(pts_w);
-        perspectiveTransform(Mat(pts), m_pts_w, H);
-        points2keypoints(pts_w, out);
-    }
-
-    //Converts matching indices to xy points
-    void matches2points(const vector<KeyPoint>& train, const vector<KeyPoint>& query,
-        const std::vector<cv::DMatch>& matches, std::vector<cv::Point2f>& pts_train,
-        std::vector<Point2f>& pts_query)
-    {
-
-        pts_train.clear();
-        pts_query.clear();
-        pts_train.reserve(matches.size());
-        pts_query.reserve(matches.size());
-
-        size_t i = 0;
-
-        for (; i < matches.size(); i++)
-        {
-
-            const DMatch & dmatch = matches[i];
-
-            pts_query.push_back(query[dmatch.queryIdx].pt);
-            pts_train.push_back(train[dmatch.trainIdx].pt);
-
-        }
-
-    }
-
-    void resetH(Mat&H)
-    {
-        H = Mat::eye(3, 3, CV_32FC1);
-    }
-}
-
-int main(int ac, char ** av)
-{
-
-    if (ac != 2)
-    {
-        help(av);
-        return 1;
-    }
-
-    BriefDescriptorExtractor brief(32);
-
-    VideoCapture capture;
-    capture.open(atoi(av[1]));
-    if (!capture.isOpened())
-    {
-        help(av);
-        cout << "capture device " << atoi(av[1]) << " failed to open!" << endl;
-        return 1;
-    }
-
-    cout << "following keys do stuff:" << endl;
-    cout << "t : grabs a reference frame to match against" << endl;
-    cout << "l : makes the reference frame new every frame" << endl;
-    cout << "q or escape: quit" << endl;
-
-    Mat frame;
-
-    vector<DMatch> matches;
-
-    BFMatcher desc_matcher(brief.defaultNorm());
-
-    vector<Point2f> train_pts, query_pts;
-    vector<KeyPoint> train_kpts, query_kpts;
-    vector<unsigned char> match_mask;
-
-    Mat gray;
-
-    bool ref_live = true;
-
-    Mat train_desc, query_desc;
-    const int DESIRED_FTRS = 500;
-    GridAdaptedFeatureDetector detector(makePtr<FastFeatureDetector>(10, true), DESIRED_FTRS, 4, 4);
-
-    Mat H_prev = Mat::eye(3, 3, CV_32FC1);
-    for (;;)
-    {
-        capture >> frame;
-        if (frame.empty())
-            break;
-
-        cvtColor(frame, gray, COLOR_RGB2GRAY);
-
-        detector.detect(gray, query_kpts); //Find interest points
-
-        brief.compute(gray, query_kpts, query_desc); //Compute brief descriptors at each keypoint location
-
-        if (!train_kpts.empty())
-        {
-
-            vector<KeyPoint> test_kpts;
-            warpKeypoints(H_prev.inv(), query_kpts, test_kpts);
-
-            Mat mask = windowedMatchingMask(test_kpts, train_kpts, 25, 25);
-            desc_matcher.match(query_desc, train_desc, matches, mask);
-            drawKeypoints(frame, test_kpts, frame, Scalar(255, 0, 0), DrawMatchesFlags::DRAW_OVER_OUTIMG);
-
-            matches2points(train_kpts, query_kpts, matches, train_pts, query_pts);
-
-            if (matches.size() > 5)
-            {
-                Mat H = findHomography(train_pts, query_pts, RANSAC, 4, match_mask);
-                if (countNonZero(Mat(match_mask)) > 15)
-                {
-                    H_prev = H;
-                }
-                else
-                    resetH(H_prev);
-                drawMatchesRelative(train_kpts, query_kpts, matches, frame, match_mask);
-            }
-            else
-                resetH(H_prev);
-
-        }
-        else
-        {
-            H_prev = Mat::eye(3, 3, CV_32FC1);
-            Mat out;
-            drawKeypoints(gray, query_kpts, out);
-            frame = out;
-        }
-
-        imshow("frame", frame);
-
-        if (ref_live)
-        {
-            train_kpts = query_kpts;
-            query_desc.copyTo(train_desc);
-        }
-        char key = (char)waitKey(2);
-        switch (key)
-        {
-        case 'l':
-            ref_live = true;
-            resetH(H_prev);
-            break;
-        case 't':
-            ref_live = false;
-            train_kpts = query_kpts;
-            query_desc.copyTo(train_desc);
-            resetH(H_prev);
-            break;
-        case 27:
-        case 'q':
-            return 0;
-            break;
-        }
-
-    }
-    return 0;
-}
index 01bd694..0995295 100644 (file)
@@ -1,7 +1,6 @@
 SET(OPENCV_CUDA_SAMPLES_REQUIRED_DEPS opencv_core opencv_flann opencv_imgproc opencv_imgcodecs opencv_videoio opencv_highgui
                                      opencv_ml opencv_video opencv_objdetect opencv_features2d
-                                     opencv_calib3d opencv_legacy opencv_contrib opencv_cuda
-                                     opencv_nonfree opencv_softcascade opencv_superres
+                                     opencv_calib3d opencv_cuda opencv_superres
                                      opencv_cudaarithm opencv_cudafilters opencv_cudawarping opencv_cudaimgproc
                                      opencv_cudafeatures2d opencv_cudaoptflow opencv_cudabgsegm
                                      opencv_cudastereo opencv_cudalegacy)
index 83fd726..9c69ab0 100644 (file)
@@ -1,4 +1,4 @@
-SET(OPENCV_TAPI_SAMPLES_REQUIRED_DEPS opencv_core opencv_imgproc opencv_video opencv_imgcodecs opencv_videoio opencv_highgui opencv_objdetect opencv_features2d opencv_calib3d opencv_nonfree opencv_flann)
+SET(OPENCV_TAPI_SAMPLES_REQUIRED_DEPS opencv_core opencv_imgproc opencv_video opencv_imgcodecs opencv_videoio opencv_highgui opencv_objdetect opencv_features2d opencv_calib3d opencv_flann)
 
 ocv_check_dependencies(${OPENCV_TAPI_SAMPLES_REQUIRED_DEPS})
 
diff --git a/samples/tapi/surf_matcher.cpp b/samples/tapi/surf_matcher.cpp
deleted file mode 100644 (file)
index 2aca96f..0000000
+++ /dev/null
@@ -1,225 +0,0 @@
-#include <iostream>
-#include <stdio.h>
-#include "opencv2/core/core.hpp"
-#include "opencv2/core/utility.hpp"
-#include "opencv2/core/ocl.hpp"
-#include "opencv2/imgcodecs.hpp"
-#include "opencv2/highgui.hpp"
-#include "opencv2/features2d.hpp"
-#include "opencv2/calib3d.hpp"
-#include "opencv2/imgproc.hpp"
-#include "opencv2/nonfree.hpp"
-
-using namespace cv;
-
-const int LOOP_NUM = 10;
-const int GOOD_PTS_MAX = 50;
-const float GOOD_PORTION = 0.15f;
-
-int64 work_begin = 0;
-int64 work_end = 0;
-
-static void workBegin()
-{
-    work_begin = getTickCount();
-}
-
-static void workEnd()
-{
-    work_end = getTickCount() - work_begin;
-}
-
-static double getTime()
-{
-    return work_end /((double)getTickFrequency() )* 1000.;
-}
-
-template<class KPDetector>
-struct SURFDetector
-{
-    KPDetector surf;
-    SURFDetector(double hessian = 800.0)
-        :surf(hessian)
-    {
-    }
-    template<class T>
-    void operator()(const T& in, const T& mask, std::vector<cv::KeyPoint>& pts, T& descriptors, bool useProvided = false)
-    {
-        surf(in, mask, pts, descriptors, useProvided);
-    }
-};
-
-template<class KPMatcher>
-struct SURFMatcher
-{
-    KPMatcher matcher;
-    template<class T>
-    void match(const T& in1, const T& in2, std::vector<cv::DMatch>& matches)
-    {
-        matcher.match(in1, in2, matches);
-    }
-};
-
-static Mat drawGoodMatches(
-    const Mat& img1,
-    const Mat& img2,
-    const std::vector<KeyPoint>& keypoints1,
-    const std::vector<KeyPoint>& keypoints2,
-    std::vector<DMatch>& matches,
-    std::vector<Point2f>& scene_corners_
-    )
-{
-    //-- Sort matches and preserve top 10% matches
-    std::sort(matches.begin(), matches.end());
-    std::vector< DMatch > good_matches;
-    double minDist = matches.front().distance;
-    double maxDist = matches.back().distance;
-
-    const int ptsPairs = std::min(GOOD_PTS_MAX, (int)(matches.size() * GOOD_PORTION));
-    for( int i = 0; i < ptsPairs; i++ )
-    {
-        good_matches.push_back( matches[i] );
-    }
-    std::cout << "\nMax distance: " << maxDist << std::endl;
-    std::cout << "Min distance: " << minDist << std::endl;
-
-    std::cout << "Calculating homography using " << ptsPairs << " point pairs." << std::endl;
-
-    // drawing the results
-    Mat img_matches;
-
-    drawMatches( img1, keypoints1, img2, keypoints2,
-                 good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
-                 std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS  );
-
-    //-- Localize the object
-    std::vector<Point2f> obj;
-    std::vector<Point2f> scene;
-
-    for( size_t i = 0; i < good_matches.size(); i++ )
-    {
-        //-- Get the keypoints from the good matches
-        obj.push_back( keypoints1[ good_matches[i].queryIdx ].pt );
-        scene.push_back( keypoints2[ good_matches[i].trainIdx ].pt );
-    }
-    //-- Get the corners from the image_1 ( the object to be "detected" )
-    std::vector<Point2f> obj_corners(4);
-    obj_corners[0] = Point(0,0);
-    obj_corners[1] = Point( img1.cols, 0 );
-    obj_corners[2] = Point( img1.cols, img1.rows );
-    obj_corners[3] = Point( 0, img1.rows );
-    std::vector<Point2f> scene_corners(4);
-
-    Mat H = findHomography( obj, scene, RANSAC );
-    perspectiveTransform( obj_corners, scene_corners, H);
-
-    scene_corners_ = scene_corners;
-
-    //-- Draw lines between the corners (the mapped object in the scene - image_2 )
-    line( img_matches,
-          scene_corners[0] + Point2f( (float)img1.cols, 0), scene_corners[1] + Point2f( (float)img1.cols, 0),
-          Scalar( 0, 255, 0), 2, LINE_AA );
-    line( img_matches,
-          scene_corners[1] + Point2f( (float)img1.cols, 0), scene_corners[2] + Point2f( (float)img1.cols, 0),
-          Scalar( 0, 255, 0), 2, LINE_AA );
-    line( img_matches,
-          scene_corners[2] + Point2f( (float)img1.cols, 0), scene_corners[3] + Point2f( (float)img1.cols, 0),
-          Scalar( 0, 255, 0), 2, LINE_AA );
-    line( img_matches,
-          scene_corners[3] + Point2f( (float)img1.cols, 0), scene_corners[0] + Point2f( (float)img1.cols, 0),
-          Scalar( 0, 255, 0), 2, LINE_AA );
-    return img_matches;
-}
-
-////////////////////////////////////////////////////
-// This program demonstrates the usage of SURF_OCL.
-// use cpu findHomography interface to calculate the transformation matrix
-int main(int argc, char* argv[])
-{
-    const char* keys =
-        "{ h help     | false            | print help message  }"
-        "{ l left     | box.png          | specify left image  }"
-        "{ r right    | box_in_scene.png | specify right image }"
-        "{ o output   | SURF_output.jpg  | specify output save path }"
-        "{ m cpu_mode | false            | run without OpenCL }";
-
-    CommandLineParser cmd(argc, argv, keys);
-    if (cmd.has("help"))
-    {
-        std::cout << "Usage: surf_matcher [options]" << std::endl;
-        std::cout << "Available options:" << std::endl;
-        cmd.printMessage();
-        return EXIT_SUCCESS;
-    }
-    if (cmd.has("cpu_mode"))
-    {
-        ocl::setUseOpenCL(false);
-        std::cout << "OpenCL was disabled" << std::endl;
-    }
-
-    UMat img1, img2;
-
-    std::string outpath = cmd.get<std::string>("o");
-
-    std::string leftName = cmd.get<std::string>("l");
-    imread(leftName, IMREAD_GRAYSCALE).copyTo(img1);
-    if(img1.empty())
-    {
-        std::cout << "Couldn't load " << leftName << std::endl;
-        cmd.printMessage();
-        return EXIT_FAILURE;
-    }
-
-    std::string rightName = cmd.get<std::string>("r");
-    imread(rightName, IMREAD_GRAYSCALE).copyTo(img2);
-    if(img2.empty())
-    {
-        std::cout << "Couldn't load " << rightName << std::endl;
-        cmd.printMessage();
-        return EXIT_FAILURE;
-    }
-
-    double surf_time = 0.;
-
-    //declare input/output
-    std::vector<KeyPoint> keypoints1, keypoints2;
-    std::vector<DMatch> matches;
-
-    UMat _descriptors1, _descriptors2;
-    Mat descriptors1 = _descriptors1.getMat(ACCESS_RW),
-        descriptors2 = _descriptors2.getMat(ACCESS_RW);
-
-    //instantiate detectors/matchers
-    SURFDetector<SURF> surf;
-
-    SURFMatcher<BFMatcher> matcher;
-
-    //-- start of timing section
-
-    for (int i = 0; i <= LOOP_NUM; i++)
-    {
-        if(i == 1) workBegin();
-        surf(img1.getMat(ACCESS_READ), Mat(), keypoints1, descriptors1);
-        surf(img2.getMat(ACCESS_READ), Mat(), keypoints2, descriptors2);
-        matcher.match(descriptors1, descriptors2, matches);
-    }
-    workEnd();
-    std::cout << "FOUND " << keypoints1.size() << " keypoints on first image" << std::endl;
-    std::cout << "FOUND " << keypoints2.size() << " keypoints on second image" << std::endl;
-
-    surf_time = getTime();
-    std::cout << "SURF run time: " << surf_time / LOOP_NUM << " ms" << std::endl<<"\n";
-
-
-    std::vector<Point2f> corner;
-    Mat img_matches = drawGoodMatches(img1.getMat(ACCESS_READ), img2.getMat(ACCESS_READ), keypoints1, keypoints2, matches, corner);
-
-    //-- Show detected matches
-
-    namedWindow("surf matches", 0);
-    imshow("surf matches", img_matches);
-    imwrite(outpath, img_matches);
-
-    waitKey(0);
-    return EXIT_SUCCESS;
-}