Add Java and Python code for the following features2d tutorials: Harris corner detect...
authorcatree <catree.catreus@outlook.com>
Sun, 27 May 2018 23:33:56 +0000 (01:33 +0200)
committercatree <catree.catreus@outlook.com>
Tue, 29 May 2018 08:35:57 +0000 (10:35 +0200)
43 files changed:
doc/opencv.bib
doc/tutorials/features2d/feature_description/feature_description.markdown
doc/tutorials/features2d/feature_detection/feature_detection.markdown
doc/tutorials/features2d/feature_flann_matcher/feature_flann_matcher.markdown
doc/tutorials/features2d/feature_flann_matcher/images/Feature_FlannMatcher_Lowe_ratio_test.png [new file with mode: 0644]
doc/tutorials/features2d/feature_flann_matcher/images/Feature_FlannMatcher_Result_ratio_test.jpg [new file with mode: 0644]
doc/tutorials/features2d/feature_homography/feature_homography.markdown
doc/tutorials/features2d/feature_homography/images/Feature_Homography_Result.jpg
doc/tutorials/features2d/table_of_content_features2d.markdown
doc/tutorials/features2d/trackingmotion/corner_subpixeles/corner_subpixeles.markdown [deleted file]
doc/tutorials/features2d/trackingmotion/corner_subpixels/corner_subpixels.markdown [new file with mode: 0644]
doc/tutorials/features2d/trackingmotion/corner_subpixels/images/Corner_Subpixels_Original_Image.jpg [moved from doc/tutorials/features2d/trackingmotion/corner_subpixeles/images/Corner_Subpixeles_Original_Image.jpg with 100% similarity]
doc/tutorials/features2d/trackingmotion/corner_subpixels/images/Corner_Subpixels_Result.jpg [moved from doc/tutorials/features2d/trackingmotion/corner_subpixeles/images/Corner_Subpixeles_Result.jpg with 100% similarity]
doc/tutorials/features2d/trackingmotion/generic_corner_detector/generic_corner_detector.markdown
doc/tutorials/features2d/trackingmotion/good_features_to_track/good_features_to_track.markdown
doc/tutorials/features2d/trackingmotion/good_features_to_track/images/Feature_Detection_Result_a.jpg [deleted file]
doc/tutorials/features2d/trackingmotion/good_features_to_track/images/Feature_Detection_Result_b.jpg [deleted file]
doc/tutorials/features2d/trackingmotion/good_features_to_track/images/good_features_to_track_Shi_Tomasi.jpg [new file with mode: 0644]
doc/tutorials/features2d/trackingmotion/harris_detector/harris_detector.markdown
samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp
samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp
samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp
samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp
samples/cpp/tutorial_code/features2D/feature_description/SURF_matching_Demo.cpp [new file with mode: 0755]
samples/cpp/tutorial_code/features2D/feature_detection/SURF_detection_Demo.cpp [new file with mode: 0755]
samples/cpp/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.cpp [new file with mode: 0755]
samples/cpp/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.cpp [new file with mode: 0755]
samples/java/tutorial_code/TrackingMotion/corner_subpixels/CornerSubPixDemo.java [new file with mode: 0644]
samples/java/tutorial_code/TrackingMotion/generic_corner_detector/CornerDetectorDemo.java [new file with mode: 0644]
samples/java/tutorial_code/TrackingMotion/good_features_to_track/GoodFeaturesToTrackDemo.java [new file with mode: 0644]
samples/java/tutorial_code/TrackingMotion/harris_detector/CornerHarrisDemo.java [new file with mode: 0644]
samples/java/tutorial_code/features2D/feature_description/SURFMatchingDemo.java [new file with mode: 0644]
samples/java/tutorial_code/features2D/feature_detection/SURFDetectionDemo.java [new file with mode: 0644]
samples/java/tutorial_code/features2D/feature_flann_matcher/SURFFLANNMatchingDemo.java [new file with mode: 0644]
samples/java/tutorial_code/features2D/feature_homography/SURFFLANNMatchingHomographyDemo.java [new file with mode: 0644]
samples/python/tutorial_code/TrackingMotion/corner_subpixels/cornerSubPix_Demo.py [new file with mode: 0644]
samples/python/tutorial_code/TrackingMotion/generic_corner_detector/cornerDetector_Demo.py [new file with mode: 0644]
samples/python/tutorial_code/TrackingMotion/good_features_to_track/goodFeaturesToTrack_Demo.py [new file with mode: 0644]
samples/python/tutorial_code/TrackingMotion/harris_detector/cornerHarris_Demo.py [new file with mode: 0644]
samples/python/tutorial_code/features2D/feature_description/SURF_matching_Demo.py [new file with mode: 0644]
samples/python/tutorial_code/features2D/feature_detection/SURF_detection_Demo.py [new file with mode: 0644]
samples/python/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.py [new file with mode: 0644]
samples/python/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.py [new file with mode: 0644]

index 1cb3d0b..edb7033 100644 (file)
   volume = {34},
   number = {7}
 }
   volume = {34},
   number = {7}
 }
+@INPROCEEDINGS{Arandjelovic:2012:TTE:2354409.2355123,
+ author = {Arandjelovic, Relja},
+ title = {Three Things Everyone Should Know to Improve Object Retrieval},
+ booktitle = {Proceedings of the 2012 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
+ series = {CVPR '12},
+ year = {2012},
+ isbn = {978-1-4673-1226-4},
+ pages = {2911--2918},
+ numpages = {8},
+ url = {http://dl.acm.org/citation.cfm?id=2354409.2355123},
+ acmid = {2355123},
+ publisher = {IEEE Computer Society},
+ address = {Washington, DC, USA},
+ keywords = {Vectors,Visualization,Kernel,Standards,Support vector machines,Indexes,Euclidean distance},
+}
 @ARTICLE{BA83,
   author = {Burt, Peter J and Adelson, Edward H},
   title = {A multiresolution spline with application to image mosaics},
 @ARTICLE{BA83,
   author = {Burt, Peter J and Adelson, Edward H},
   title = {A multiresolution spline with application to image mosaics},
   volume = {1},
   organization = {IEEE}
 }
   volume = {1},
   organization = {IEEE}
 }
+@ARTICLE{Lowe:2004:DIF:993451.996342,
+ author = {Lowe, David G.},
+ title = {Distinctive Image Features from Scale-Invariant Keypoints},
+ journal = {Int. J. Comput. Vision},
+ issue_date = {November 2004},
+ volume = {60},
+ number = {2},
+ month = nov,
+ year = {2004},
+ issn = {0920-5691},
+ pages = {91--110},
+ numpages = {20},
+ url = {https://doi.org/10.1023/B:VISI.0000029664.99615.94},
+ doi = {10.1023/B:VISI.0000029664.99615.94},
+ acmid = {996342},
+ publisher = {Kluwer Academic Publishers},
+ address = {Hingham, MA, USA},
+ keywords = {image matching, invariant features, object recognition, scale invariance},
+}
 @INPROCEEDINGS{Lucas81,
   author = {Lucas, Bruce D and Kanade, Takeo and others},
   title = {An iterative image registration technique with an application to stereo vision.},
 @INPROCEEDINGS{Lucas81,
   author = {Lucas, Bruce D and Kanade, Takeo and others},
   title = {An iterative image registration technique with an application to stereo vision.},
index eea5a29..ec3cd0e 100644 (file)
@@ -10,74 +10,35 @@ In this tutorial you will learn how to:
     to the keypoints. Specifically:
     -   Use cv::xfeatures2d::SURF and its function cv::xfeatures2d::SURF::compute to perform the
         required calculations.
     to the keypoints. Specifically:
     -   Use cv::xfeatures2d::SURF and its function cv::xfeatures2d::SURF::compute to perform the
         required calculations.
-    -   Use a @ref cv::BFMatcher to match the features vector
+    -   Use a @ref cv::DescriptorMatcher to match the features vector
     -   Use the function @ref cv::drawMatches to draw the detected matches.
 
     -   Use the function @ref cv::drawMatches to draw the detected matches.
 
+\warning You need the <a href="https://github.com/opencv/opencv_contrib">OpenCV contrib modules</a> to be able to use the SURF features
+(alternatives are ORB, KAZE, ... features).
+
 Theory
 ------
 
 Code
 ----
 
 Theory
 ------
 
 Code
 ----
 
-This tutorial code's is shown lines below.
-@code{.cpp}
-#include <stdio.h>
-#include <iostream>
-#include "opencv2/core.hpp"
-#include "opencv2/features2d.hpp"
-#include "opencv2/highgui.hpp"
-#include "opencv2/xfeatures2d.hpp"
-
-using namespace cv;
-using namespace cv::xfeatures2d;
-
-void readme();
-
-/* @function main */
-int main( int argc, char** argv )
-{
-  if( argc != 3 )
-   { return -1; }
-
-  Mat img_1 = imread( argv[1], IMREAD_GRAYSCALE );
-  Mat img_2 = imread( argv[2], IMREAD_GRAYSCALE );
-
-  if( !img_1.data || !img_2.data )
-   { return -1; }
-
-  //-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
-  int minHessian = 400;
-
-  Ptr<SURF> detector = SURF::create();
-  detector->setHessianThreshold(minHessian);
-
-  std::vector<KeyPoint> keypoints_1, keypoints_2;
-  Mat descriptors_1, descriptors_2;
-
-  detector->detectAndCompute( img_1, Mat(), keypoints_1, descriptors_1 );
-  detector->detectAndCompute( img_2, Mat(), keypoints_2, descriptors_2 );
-
-  //-- Step 2: Matching descriptor vectors with a brute force matcher
-  BFMatcher matcher(NORM_L2);
-  std::vector< DMatch > matches;
-  matcher.match( descriptors_1, descriptors_2, matches );
-
-  //-- Draw matches
-  Mat img_matches;
-  drawMatches( img_1, keypoints_1, img_2, keypoints_2, matches, img_matches );
-
-  //-- Show detected matches
-  imshow("Matches", img_matches );
-
-  waitKey(0);
-
-  return 0;
-  }
-
- /* @function readme */
- void readme()
- { std::cout << " Usage: ./SURF_descriptor <img1> <img2>" << std::endl; }
-@endcode
+@add_toggle_cpp
+This tutorial code's is shown lines below. You can also download it from
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/features2D/feature_description/SURF_matching_Demo.cpp)
+@include samples/cpp/tutorial_code/features2D/feature_description/SURF_matching_Demo.cpp
+@end_toggle
+
+@add_toggle_java
+This tutorial code's is shown lines below. You can also download it from
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/features2D/feature_description/SURFMatchingDemo.java)
+@include samples/java/tutorial_code/features2D/feature_description/SURFMatchingDemo.java
+@end_toggle
+
+@add_toggle_python
+This tutorial code's is shown lines below. You can also download it from
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/features2D/feature_description/SURF_matching_Demo.py)
+@include samples/python/tutorial_code/features2D/feature_description/SURF_matching_Demo.py
+@end_toggle
 
 Explanation
 -----------
 
 Explanation
 -----------
index 8b2f423..d099651 100644 (file)
@@ -11,67 +11,32 @@ In this tutorial you will learn how to:
         detection process
     -   Use the function @ref cv::drawKeypoints to draw the detected keypoints
 
         detection process
     -   Use the function @ref cv::drawKeypoints to draw the detected keypoints
 
+\warning You need the <a href="https://github.com/opencv/opencv_contrib">OpenCV contrib modules</a> to be able to use the SURF features
+(alternatives are ORB, KAZE, ... features).
+
 Theory
 ------
 
 Code
 ----
 
 Theory
 ------
 
 Code
 ----
 
-This tutorial code's is shown lines below.
-@code{.cpp}
-#include <stdio.h>
-#include <iostream>
-#include "opencv2/core.hpp"
-#include "opencv2/features2d.hpp"
-#include "opencv2/xfeatures2d.hpp"
-#include "opencv2/highgui.hpp"
-
-using namespace cv;
-using namespace cv::xfeatures2d;
-
-void readme();
-
-/* @function main */
-int main( int argc, char** argv )
-{
-  if( argc != 3 )
-  { readme(); return -1; }
-
-  Mat img_1 = imread( argv[1], IMREAD_GRAYSCALE );
-  Mat img_2 = imread( argv[2], IMREAD_GRAYSCALE );
-
-  if( !img_1.data || !img_2.data )
-  { std::cout<< " --(!) Error reading images " << std::endl; return -1; }
-
-  //-- Step 1: Detect the keypoints using SURF Detector
-  int minHessian = 400;
-
-  Ptr<SURF> detector = SURF::create( minHessian );
-
-  std::vector<KeyPoint> keypoints_1, keypoints_2;
-
-  detector->detect( img_1, keypoints_1 );
-  detector->detect( img_2, keypoints_2 );
-
-  //-- Draw keypoints
-  Mat img_keypoints_1; Mat img_keypoints_2;
-
-  drawKeypoints( img_1, keypoints_1, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
-  drawKeypoints( img_2, keypoints_2, img_keypoints_2, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
-
-  //-- Show detected (drawn) keypoints
-  imshow("Keypoints 1", img_keypoints_1 );
-  imshow("Keypoints 2", img_keypoints_2 );
-
-  waitKey(0);
+@add_toggle_cpp
+This tutorial code's is shown lines below. You can also download it from
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/features2D/feature_detection/SURF_detection_Demo.cpp)
+@include samples/cpp/tutorial_code/features2D/feature_detection/SURF_detection_Demo.cpp
+@end_toggle
 
 
-  return 0;
-  }
+@add_toggle_java
+This tutorial code's is shown lines below. You can also download it from
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/features2D/feature_detection/SURFDetectionDemo.java)
+@include samples/java/tutorial_code/features2D/feature_detection/SURFDetectionDemo.java
+@end_toggle
 
 
-  /* @function readme */
-  void readme()
-  { std::cout << " Usage: ./SURF_detector <img1> <img2>" << std::endl; }
-@endcode
+@add_toggle_python
+This tutorial code's is shown lines below. You can also download it from
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/features2D/feature_detection/SURF_detection_Demo.py)
+@include samples/python/tutorial_code/features2D/feature_detection/SURF_detection_Demo.py
+@end_toggle
 
 Explanation
 -----------
 
 Explanation
 -----------
@@ -79,10 +44,10 @@ Explanation
 Result
 ------
 
 Result
 ------
 
--#  Here is the result of the feature detection applied to the first image:
+-#  Here is the result of the feature detection applied to the `box.png` image:
 
     ![](images/Feature_Detection_Result_a.jpg)
 
 
     ![](images/Feature_Detection_Result_a.jpg)
 
--#  And here is the result for the second image:
+-#  And here is the result for the `box_in_scene.png` image:
 
     ![](images/Feature_Detection_Result_b.jpg)
 
     ![](images/Feature_Detection_Result_b.jpg)
index 8243b43..e7f865c 100644 (file)
@@ -9,114 +9,57 @@ In this tutorial you will learn how to:
 -   Use the @ref cv::FlannBasedMatcher interface in order to perform a quick and efficient matching
     by using the @ref flann module
 
 -   Use the @ref cv::FlannBasedMatcher interface in order to perform a quick and efficient matching
     by using the @ref flann module
 
+\warning You need the <a href="https://github.com/opencv/opencv_contrib">OpenCV contrib modules</a> to be able to use the SURF features
+(alternatives are ORB, KAZE, ... features).
+
 Theory
 ------
 
 Theory
 ------
 
+Classical feature descriptors (SIFT, SURF, ...) are usually compared and matched using the Euclidean distance (or L2-norm).
+Since SIFT and SURF descriptors represent the histogram of oriented gradient (of the Haar wavelet response for SURF)
+in a neighborhood, alternatives of the Euclidean distance are histogram-based metrics (\f$ \chi^{2} \f$, Earth Mover’s Distance (EMD), ...).
+
+Arandjelovic et al. proposed in @cite Arandjelovic:2012:TTE:2354409.2355123 to extend to the RootSIFT descriptor:
+> a square root (Hellinger) kernel instead of the standard Euclidean distance to measure the similarity between SIFT descriptors
+> leads to a dramatic performance boost in all stages of the pipeline.
+
+Binary descriptors (ORB, BRISK, ...) are matched using the <a href="https://en.wikipedia.org/wiki/Hamming_distance">Hamming distance</a>.
+This distance is equivalent to count the number of different elements for binary strings (population count after applying a XOR operation):
+\f[ d_{hamming} \left ( a,b \right ) = \sum_{i=0}^{n-1} \left ( a_i \oplus b_i \right ) \f]
+
+To filter the matches, Lowe proposed in @cite Lowe:2004:DIF:993451.996342 to use a distance ratio test to try to eliminate false matches.
+The distance ratio between the two nearest matches of a considered keypoint is computed and it is a good match when this value is below
+a thresold. Indeed, this ratio allows helping to discriminate between ambiguous matches (distance ratio between the two nearest neighbors is
+close to one) and well discriminated matches. The figure below from the SIFT paper illustrates the probability that a match is correct
+based on the nearest-neighbor distance ratio test.
+
+![](images/Feature_FlannMatcher_Lowe_ratio_test.png)
+
+Alternative or additional filterering tests are:
+-   cross check test (good match \f$ \left( f_a, f_b \right) \f$ if feature \f$ f_b \f$ is the best match for \f$ f_a \f$ in \f$ I_b \f$
+    and feature \f$ f_a \f$ is the best match for \f$ f_b \f$ in \f$ I_a \f$)
+-   geometric test (eliminate matches that do not fit to a geometric model, e.g. RANSAC or robust homography for planar objects)
+
 Code
 ----
 
 Code
 ----
 
-This tutorial code's is shown lines below.
-@code{.cpp}
-/*
- * @file SURF_FlannMatcher
- * @brief SURF detector + descriptor + FLANN Matcher
- * @author A. Huaman
- */
-
-#include <stdio.h>
-#include <iostream>
-#include <stdio.h>
-#include <iostream>
-#include "opencv2/core.hpp"
-#include "opencv2/features2d.hpp"
-#include "opencv2/imgcodecs.hpp"
-#include "opencv2/highgui.hpp"
-#include "opencv2/xfeatures2d.hpp"
-
-using namespace std;
-using namespace cv;
-using namespace cv::xfeatures2d;
-
-void readme();
-
-/*
- * @function main
- * @brief Main function
- */
-int main( int argc, char** argv )
-{
-  if( argc != 3 )
-  { readme(); return -1; }
-
-  Mat img_1 = imread( argv[1], IMREAD_GRAYSCALE );
-  Mat img_2 = imread( argv[2], IMREAD_GRAYSCALE );
-
-  if( !img_1.data || !img_2.data )
-  { std::cout<< " --(!) Error reading images " << std::endl; return -1; }
-
-  //-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
-  int minHessian = 400;
-
-  Ptr<SURF> detector = SURF::create();
-  detector->setHessianThreshold(minHessian);
-
-  std::vector<KeyPoint> keypoints_1, keypoints_2;
-  Mat descriptors_1, descriptors_2;
-
-  detector->detectAndCompute( img_1, Mat(), keypoints_1, descriptors_1 );
-  detector->detectAndCompute( img_2, Mat(), keypoints_2, descriptors_2 );
-
-  //-- Step 2: Matching descriptor vectors using FLANN matcher
-  FlannBasedMatcher matcher;
-  std::vector< DMatch > matches;
-  matcher.match( descriptors_1, descriptors_2, matches );
-
-  double max_dist = 0; double min_dist = 100;
-
-  //-- Quick calculation of max and min distances between keypoints
-  for( int i = 0; i < descriptors_1.rows; i++ )
-  { double dist = matches[i].distance;
-    if( dist < min_dist ) min_dist = dist;
-    if( dist > max_dist ) max_dist = dist;
-  }
-
-  printf("-- Max dist : %f \n", max_dist );
-  printf("-- Min dist : %f \n", min_dist );
-
-  //-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
-  //-- or a small arbitrary value ( 0.02 ) in the event that min_dist is very
-  //-- small)
-  //-- PS.- radiusMatch can also be used here.
-  std::vector< DMatch > good_matches;
-
-  for( int i = 0; i < descriptors_1.rows; i++ )
-  { if( matches[i].distance <= max(2*min_dist, 0.02) )
-    { good_matches.push_back( matches[i]); }
-  }
-
-  //-- Draw only "good" matches
-  Mat img_matches;
-  drawMatches( img_1, keypoints_1, img_2, keypoints_2,
-               good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
-               vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
-
-  //-- Show detected matches
-  imshow( "Good Matches", img_matches );
-
-  for( int i = 0; i < (int)good_matches.size(); i++ )
-  { printf( "-- Good Match [%d] Keypoint 1: %d  -- Keypoint 2: %d  \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }
-
-  waitKey(0);
-
-  return 0;
-}
-
-/*
- * @function readme
- */
-void readme()
-{ std::cout << " Usage: ./SURF_FlannMatcher <img1> <img2>" << std::endl; }
-@endcode
+@add_toggle_cpp
+This tutorial code's is shown lines below. You can also download it from
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.cpp)
+@include samples/cpp/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.cpp
+@end_toggle
+
+@add_toggle_java
+This tutorial code's is shown lines below. You can also download it from
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/features2D/feature_flann_matcher/SURFFLANNMatchingDemo.java)
+@include samples/java/tutorial_code/features2D/feature_flann_matcher/SURFFLANNMatchingDemo.java
+@end_toggle
+
+@add_toggle_python
+This tutorial code's is shown lines below. You can also download it from
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.py)
+@include samples/python/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.py
+@end_toggle
 
 Explanation
 -----------
 
 Explanation
 -----------
@@ -124,10 +67,6 @@ Explanation
 Result
 ------
 
 Result
 ------
 
--#  Here is the result of the feature detection applied to the first image:
-
-    ![](images/Featur_FlannMatcher_Result.jpg)
-
--#  Additionally, we get as console output the keypoints filtered:
+-   Here is the result of the SURF feature matching using the distance ratio test:
 
 
-    ![](images/Feature_FlannMatcher_Keypoints_Result.jpg)
+    ![](images/Feature_FlannMatcher_Result_ratio_test.jpg)
diff --git a/doc/tutorials/features2d/feature_flann_matcher/images/Feature_FlannMatcher_Lowe_ratio_test.png b/doc/tutorials/features2d/feature_flann_matcher/images/Feature_FlannMatcher_Lowe_ratio_test.png
new file mode 100644 (file)
index 0000000..f56a640
Binary files /dev/null and b/doc/tutorials/features2d/feature_flann_matcher/images/Feature_FlannMatcher_Lowe_ratio_test.png differ
diff --git a/doc/tutorials/features2d/feature_flann_matcher/images/Feature_FlannMatcher_Result_ratio_test.jpg b/doc/tutorials/features2d/feature_flann_matcher/images/Feature_FlannMatcher_Result_ratio_test.jpg
new file mode 100644 (file)
index 0000000..e4a88d0
Binary files /dev/null and b/doc/tutorials/features2d/feature_flann_matcher/images/Feature_FlannMatcher_Result_ratio_test.jpg differ
index ec7913c..c4f0c00 100644 (file)
@@ -9,125 +9,40 @@ In this tutorial you will learn how to:
 -   Use the function @ref cv::findHomography to find the transform between matched keypoints.
 -   Use the function @ref cv::perspectiveTransform to map the points.
 
 -   Use the function @ref cv::findHomography to find the transform between matched keypoints.
 -   Use the function @ref cv::perspectiveTransform to map the points.
 
+\warning You need the <a href="https://github.com/opencv/opencv_contrib">OpenCV contrib modules</a> to be able to use the SURF features
+(alternatives are ORB, KAZE, ... features).
+
 Theory
 ------
 
 Code
 ----
 
 Theory
 ------
 
 Code
 ----
 
-This tutorial code's is shown lines below.
-@code{.cpp}
-#include <stdio.h>
-#include <iostream>
-#include "opencv2/core.hpp"
-#include "opencv2/imgproc.hpp"
-#include "opencv2/features2d.hpp"
-#include "opencv2/highgui.hpp"
-#include "opencv2/calib3d.hpp"
-#include "opencv2/xfeatures2d.hpp"
-
-using namespace cv;
-using namespace cv::xfeatures2d;
-
-void readme();
-
-/* @function main */
-int main( int argc, char** argv )
-{
-  if( argc != 3 )
-  { readme(); return -1; }
-
-  Mat img_object = imread( argv[1], IMREAD_GRAYSCALE );
-  Mat img_scene = imread( argv[2], IMREAD_GRAYSCALE );
-
-  if( !img_object.data || !img_scene.data )
-  { std::cout<< " --(!) Error reading images " << std::endl; return -1; }
-
-  //-- Step 1: Detect the keypoints and extract descriptors using SURF
-  int minHessian = 400;
-
-  Ptr<SURF> detector = SURF::create( minHessian );
-
-  std::vector<KeyPoint> keypoints_object, keypoints_scene;
-  Mat descriptors_object, descriptors_scene;
-
-  detector->detectAndCompute( img_object, Mat(), keypoints_object, descriptors_object );
-  detector->detectAndCompute( img_scene, Mat(), keypoints_scene, descriptors_scene );
-
-  //-- Step 2: Matching descriptor vectors using FLANN matcher
-  FlannBasedMatcher matcher;
-  std::vector< DMatch > matches;
-  matcher.match( descriptors_object, descriptors_scene, matches );
-
-  double max_dist = 0; double min_dist = 100;
-
-  //-- Quick calculation of max and min distances between keypoints
-  for( int i = 0; i < descriptors_object.rows; i++ )
-  { double dist = matches[i].distance;
-    if( dist < min_dist ) min_dist = dist;
-    if( dist > max_dist ) max_dist = dist;
-  }
-
-  printf("-- Max dist : %f \n", max_dist );
-  printf("-- Min dist : %f \n", min_dist );
-
-  //-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
-  std::vector< DMatch > good_matches;
-
-  for( int i = 0; i < descriptors_object.rows; i++ )
-  { if( matches[i].distance <= 3*min_dist )
-     { good_matches.push_back( matches[i]); }
-  }
-
-  Mat img_matches;
-  drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
-               good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
-               std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
-
-  //-- Localize the object
-  std::vector<Point2f> obj;
-  std::vector<Point2f> scene;
-
-  for( size_t i = 0; i < good_matches.size(); i++ )
-  {
-    //-- Get the keypoints from the good matches
-    obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
-    scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
-  }
-
-  Mat H = findHomography( obj, scene, RANSAC );
-
-  //-- Get the corners from the image_1 ( the object to be "detected" )
-  std::vector<Point2f> obj_corners(4);
-  obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 );
-  obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows );
-  std::vector<Point2f> scene_corners(4);
-
-  perspectiveTransform( obj_corners, scene_corners, H);
-
-  //-- Draw lines between the corners (the mapped object in the scene - image_2 )
-  line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 );
-  line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), scene_corners[2] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
-  line( img_matches, scene_corners[2] + Point2f( img_object.cols, 0), scene_corners[3] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
-  line( img_matches, scene_corners[3] + Point2f( img_object.cols, 0), scene_corners[0] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
+@add_toggle_cpp
+This tutorial code's is shown lines below. You can also download it from
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.cpp)
+@include samples/cpp/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.cpp
+@end_toggle
 
 
-  //-- Show detected matches
-  imshow( "Good Matches & Object detection", img_matches );
+@add_toggle_java
+This tutorial code's is shown lines below. You can also download it from
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/features2D/feature_homography/SURFFLANNMatchingHomographyDemo.java)
+@include samples/java/tutorial_code/features2D/feature_homography/SURFFLANNMatchingHomographyDemo.java
+@end_toggle
 
 
-  waitKey(0);
-  return 0;
-  }
+@add_toggle_python
+This tutorial code's is shown lines below. You can also download it from
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.py)
+@include samples/python/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.py
+@end_toggle
 
 
-  /* @function readme */
-  void readme()
-  { std::cout << " Usage: ./SURF_descriptor <img1> <img2>" << std::endl; }
-@endcode
 Explanation
 -----------
 
 Result
 ------
 
 Explanation
 -----------
 
 Result
 ------
 
--#  And here is the result for the detected object (highlighted in green)
+-   And here is the result for the detected object (highlighted in green). Note that since the homography is estimated with a RANSAC approach,
+    detected false matches will not impact the homography calculation.
 
     ![](images/Feature_Homography_Result.jpg)
 
     ![](images/Feature_Homography_Result.jpg)
index d043a5a..35b1bb6 100644 (file)
Binary files a/doc/tutorials/features2d/feature_homography/images/Feature_Homography_Result.jpg and b/doc/tutorials/features2d/feature_homography/images/Feature_Homography_Result.jpg differ
index a5d5a91..37e4e41 100644 (file)
@@ -6,39 +6,51 @@ OpenCV.
 
 -   @subpage tutorial_harris_detector
 
 
 -   @subpage tutorial_harris_detector
 
+    *Languages:* C++, Java, Python
+
     *Compatibility:* \> OpenCV 2.0
 
     *Author:* Ana Huamán
 
     *Compatibility:* \> OpenCV 2.0
 
     *Author:* Ana Huamán
 
-    Why is it a good idea to track corners? We learn to use the Harris method to detect
-    corners
+    Why is it a good idea to track corners? We learn how to use the Harris method to detect
+    corners.
 
 -   @subpage tutorial_good_features_to_track
 
 
 -   @subpage tutorial_good_features_to_track
 
+    *Languages:* C++, Java, Python
+
     *Compatibility:* \> OpenCV 2.0
 
     *Author:* Ana Huamán
 
     *Compatibility:* \> OpenCV 2.0
 
     *Author:* Ana Huamán
 
-    Where we use an improved method to detect corners more accuratelyI
+    Where we use an improved method to detect corners more accurately.
 
 -   @subpage tutorial_generic_corner_detector
 
 
 -   @subpage tutorial_generic_corner_detector
 
+    *Languages:* C++, Java, Python
+
     *Compatibility:* \> OpenCV 2.0
 
     *Author:* Ana Huamán
 
     Here you will learn how to use OpenCV functions to make your personalized corner detector!
 
     *Compatibility:* \> OpenCV 2.0
 
     *Author:* Ana Huamán
 
     Here you will learn how to use OpenCV functions to make your personalized corner detector!
 
--   @subpage tutorial_corner_subpixeles
+    *Languages:* C++, Java, Python
+
+-   @subpage tutorial_corner_subpixels
+
+    *Languages:* C++, Java, Python
 
     *Compatibility:* \> OpenCV 2.0
 
     *Author:* Ana Huamán
 
 
     *Compatibility:* \> OpenCV 2.0
 
     *Author:* Ana Huamán
 
-    Is pixel resolution enough? Here we learn a simple method to improve our accuracy.
+    Is pixel resolution enough? Here we learn a simple method to improve our corner location accuracy.
 
 -   @subpage tutorial_feature_detection
 
 
 -   @subpage tutorial_feature_detection
 
+    *Languages:* C++, Java, Python
+
     *Compatibility:* \> OpenCV 2.0
 
     *Author:* Ana Huamán
     *Compatibility:* \> OpenCV 2.0
 
     *Author:* Ana Huamán
@@ -47,6 +59,8 @@ OpenCV.
 
 -   @subpage tutorial_feature_description
 
 
 -   @subpage tutorial_feature_description
 
+    *Languages:* C++, Java, Python
+
     *Compatibility:* \> OpenCV 2.0
 
     *Author:* Ana Huamán
     *Compatibility:* \> OpenCV 2.0
 
     *Author:* Ana Huamán
@@ -55,6 +69,8 @@ OpenCV.
 
 -   @subpage tutorial_feature_flann_matcher
 
 
 -   @subpage tutorial_feature_flann_matcher
 
+    *Languages:* C++, Java, Python
+
     *Compatibility:* \> OpenCV 2.0
 
     *Author:* Ana Huamán
     *Compatibility:* \> OpenCV 2.0
 
     *Author:* Ana Huamán
@@ -63,6 +79,8 @@ OpenCV.
 
 -   @subpage tutorial_feature_homography
 
 
 -   @subpage tutorial_feature_homography
 
+    *Languages:* C++, Java, Python
+
     *Compatibility:* \> OpenCV 2.0
 
     *Author:* Ana Huamán
     *Compatibility:* \> OpenCV 2.0
 
     *Author:* Ana Huamán
diff --git a/doc/tutorials/features2d/trackingmotion/corner_subpixeles/corner_subpixeles.markdown b/doc/tutorials/features2d/trackingmotion/corner_subpixeles/corner_subpixeles.markdown
deleted file mode 100644 (file)
index 946fd77..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-Detecting corners location in subpixeles {#tutorial_corner_subpixeles}
-========================================
-
-Goal
-----
-
-In this tutorial you will learn how to:
-
--   Use the OpenCV function @ref cv::cornerSubPix to find more exact corner positions (more exact
-    than integer pixels).
-
-Theory
-------
-
-Code
-----
-
-This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp)
-@include samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp
-
-Explanation
------------
-
-Result
-------
-
-![](images/Corner_Subpixeles_Original_Image.jpg)
-
-Here is the result:
-
-![](images/Corner_Subpixeles_Result.jpg)
diff --git a/doc/tutorials/features2d/trackingmotion/corner_subpixels/corner_subpixels.markdown b/doc/tutorials/features2d/trackingmotion/corner_subpixels/corner_subpixels.markdown
new file mode 100644 (file)
index 0000000..82b33dd
--- /dev/null
@@ -0,0 +1,46 @@
+Detecting corners location in subpixels {#tutorial_corner_subpixels}
+=======================================
+
+Goal
+----
+
+In this tutorial you will learn how to:
+
+-   Use the OpenCV function @ref cv::cornerSubPix to find more exact corner positions (more exact
+    than integer pixels).
+
+Theory
+------
+
+Code
+----
+
+@add_toggle_cpp
+This tutorial code's is shown lines below. You can also download it from
+[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp)
+@include samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp
+@end_toggle
+
+@add_toggle_java
+This tutorial code's is shown lines below. You can also download it from
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/TrackingMotion/corner_subpixels/CornerSubPixDemo.java)
+@include samples/java/tutorial_code/TrackingMotion/corner_subpixels/CornerSubPixDemo.java
+@end_toggle
+
+@add_toggle_python
+This tutorial code's is shown lines below. You can also download it from
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/TrackingMotion/corner_subpixels/cornerSubPix_Demo.py)
+@include samples/python/tutorial_code/TrackingMotion/corner_subpixels/cornerSubPix_Demo.py
+@end_toggle
+
+Explanation
+-----------
+
+Result
+------
+
+![](images/Corner_Subpixels_Original_Image.jpg)
+
+Here is the result:
+
+![](images/Corner_Subpixels_Result.jpg)
index 7aba636..f10d3ef 100644 (file)
@@ -1,5 +1,5 @@
-Creating yor own corner detector {#tutorial_generic_corner_detector}
-================================
+Creating your own corner detector {#tutorial_generic_corner_detector}
+=================================
 
 Goal
 ----
 
 Goal
 ----
@@ -10,7 +10,7 @@ In this tutorial you will learn how to:
     to determine if a pixel is a corner.
 -   Use the OpenCV function @ref cv::cornerMinEigenVal to find the minimum eigenvalues for corner
     detection.
     to determine if a pixel is a corner.
 -   Use the OpenCV function @ref cv::cornerMinEigenVal to find the minimum eigenvalues for corner
     detection.
--   To implement our own version of the Harris detector as well as the Shi-Tomasi detector, by using
+-   Implement our own version of the Harris detector as well as the Shi-Tomasi detector, by using
     the two functions above.
 
 Theory
     the two functions above.
 
 Theory
@@ -19,10 +19,26 @@ Theory
 Code
 ----
 
 Code
 ----
 
+@add_toggle_cpp
 This tutorial code's is shown lines below. You can also download it from
 [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp)
 
 This tutorial code's is shown lines below. You can also download it from
 [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp)
 
-@include cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp
+@include samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp
+@end_toggle
+
+@add_toggle_java
+This tutorial code's is shown lines below. You can also download it from
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/TrackingMotion/generic_corner_detector/CornerDetectorDemo.java)
+
+@include samples/java/tutorial_code/TrackingMotion/generic_corner_detector/CornerDetectorDemo.java
+@end_toggle
+
+@add_toggle_python
+This tutorial code's is shown lines below. You can also download it from
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/TrackingMotion/generic_corner_detector/cornerDetector_Demo.py)
+
+@include samples/python/tutorial_code/TrackingMotion/generic_corner_detector/cornerDetector_Demo.py
+@end_toggle
 
 Explanation
 -----------
 
 Explanation
 -----------
index 7c48aa1..70d25ab 100644 (file)
@@ -6,7 +6,7 @@ Goal
 
 In this tutorial you will learn how to:
 
 
 In this tutorial you will learn how to:
 
--   Use the function @ref cv::goodFeaturesToTrack to detect corners using the Shi-Tomasi method.
+-   Use the function @ref cv::goodFeaturesToTrack to detect corners using the Shi-Tomasi method (@cite Shi94).
 
 Theory
 ------
 
 Theory
 ------
@@ -14,9 +14,23 @@ Theory
 Code
 ----
 
 Code
 ----
 
+@add_toggle_cpp
 This tutorial code's is shown lines below. You can also download it from
 [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp)
 @include samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp
 This tutorial code's is shown lines below. You can also download it from
 [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp)
 @include samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp
+@end_toggle
+
+@add_toggle_java
+This tutorial code's is shown lines below. You can also download it from
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/TrackingMotion/good_features_to_track/GoodFeaturesToTrackDemo.java)
+@include samples/java/tutorial_code/TrackingMotion/good_features_to_track/GoodFeaturesToTrackDemo.java
+@end_toggle
+
+@add_toggle_python
+This tutorial code's is shown lines below. You can also download it from
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/TrackingMotion/good_features_to_track/goodFeaturesToTrack_Demo.py)
+@include samples/python/tutorial_code/TrackingMotion/good_features_to_track/goodFeaturesToTrack_Demo.py
+@end_toggle
 
 Explanation
 -----------
 
 Explanation
 -----------
@@ -24,4 +38,4 @@ Explanation
 Result
 ------
 
 Result
 ------
 
-![](images/Feature_Detection_Result_a.jpg)
+![](images/good_features_to_track_Shi_Tomasi.jpg)
diff --git a/doc/tutorials/features2d/trackingmotion/good_features_to_track/images/Feature_Detection_Result_a.jpg b/doc/tutorials/features2d/trackingmotion/good_features_to_track/images/Feature_Detection_Result_a.jpg
deleted file mode 100644 (file)
index cca9a2b..0000000
Binary files a/doc/tutorials/features2d/trackingmotion/good_features_to_track/images/Feature_Detection_Result_a.jpg and /dev/null differ
diff --git a/doc/tutorials/features2d/trackingmotion/good_features_to_track/images/Feature_Detection_Result_b.jpg b/doc/tutorials/features2d/trackingmotion/good_features_to_track/images/Feature_Detection_Result_b.jpg
deleted file mode 100644 (file)
index 129450e..0000000
Binary files a/doc/tutorials/features2d/trackingmotion/good_features_to_track/images/Feature_Detection_Result_b.jpg and /dev/null differ
diff --git a/doc/tutorials/features2d/trackingmotion/good_features_to_track/images/good_features_to_track_Shi_Tomasi.jpg b/doc/tutorials/features2d/trackingmotion/good_features_to_track/images/good_features_to_track_Shi_Tomasi.jpg
new file mode 100644 (file)
index 0000000..007f09a
Binary files /dev/null and b/doc/tutorials/features2d/trackingmotion/good_features_to_track/images/good_features_to_track_Shi_Tomasi.jpg differ
index b1b8b67..bbf4fdb 100644 (file)
@@ -118,9 +118,23 @@ In this tutorial we will study the *corner* features, specifically.
 Code
 ----
 
 Code
 ----
 
+@add_toggle_cpp
 This tutorial code's is shown lines below. You can also download it from
 [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp)
 @include samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp
 This tutorial code's is shown lines below. You can also download it from
 [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp)
 @include samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp
+@end_toggle
+
+@add_toggle_java
+This tutorial code's is shown lines below. You can also download it from
+[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/TrackingMotion/harris_detector/CornerHarrisDemo.java)
+@include samples/java/tutorial_code/TrackingMotion/harris_detector/CornerHarrisDemo.java
+@end_toggle
+
+@add_toggle_python
+This tutorial code's is shown lines below. You can also download it from
+[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/TrackingMotion/harris_detector/cornerHarris_Demo.py)
+@include samples/python/tutorial_code/TrackingMotion/harris_detector/cornerHarris_Demo.py
+@end_toggle
 
 Explanation
 -----------
 
 Explanation
 -----------
index 2b8471d..894b01c 100644 (file)
@@ -13,15 +13,15 @@ using namespace std;
 
 /// Global variables
 Mat src, src_gray;
 
 /// Global variables
 Mat src, src_gray;
-Mat myHarris_dst; Mat myHarris_copy; Mat Mc;
-Mat myShiTomasi_dst; Mat myShiTomasi_copy;
+Mat myHarris_dst, myHarris_copy, Mc;
+Mat myShiTomasi_dst, myShiTomasi_copy;
 
 int myShiTomasi_qualityLevel = 50;
 int myHarris_qualityLevel = 50;
 int max_qualityLevel = 100;
 
 
 int myShiTomasi_qualityLevel = 50;
 int myHarris_qualityLevel = 50;
 int max_qualityLevel = 100;
 
-double myHarris_minVal; double myHarris_maxVal;
-double myShiTomasi_minVal; double myShiTomasi_maxVal;
+double myHarris_minVal, myHarris_maxVal;
+double myShiTomasi_minVal, myShiTomasi_maxVal;
 
 RNG rng(12345);
 
 
 RNG rng(12345);
 
@@ -37,56 +37,54 @@ void myHarris_function( int, void* );
  */
 int main( int argc, char** argv )
 {
  */
 int main( int argc, char** argv )
 {
-  /// Load source image and convert it to gray
-  CommandLineParser parser( argc, argv, "{@input | ../data/stuff.jpg | input image}" );
-  src = imread( parser.get<String>( "@input" ), IMREAD_COLOR );
-  if ( src.empty() )
-  {
-    cout << "Could not open or find the image!\n" << endl;
-    cout << "Usage: " << argv[0] << " <Input image>" << endl;
-    return -1;
-  }
-  cvtColor( src, src_gray, COLOR_BGR2GRAY );
-
-  /// Set some parameters
-  int blockSize = 3; int apertureSize = 3;
-
-  /// My Harris matrix -- Using cornerEigenValsAndVecs
-  myHarris_dst = Mat::zeros( src_gray.size(), CV_32FC(6) );
-  Mc = Mat::zeros( src_gray.size(), CV_32FC1 );
-
-  cornerEigenValsAndVecs( src_gray, myHarris_dst, blockSize, apertureSize, BORDER_DEFAULT );
-
-  /* calculate Mc */
-  for( int j = 0; j < src_gray.rows; j++ )
-     { for( int i = 0; i < src_gray.cols; i++ )
-          {
-            float lambda_1 = myHarris_dst.at<Vec6f>(j, i)[0];
-            float lambda_2 = myHarris_dst.at<Vec6f>(j, i)[1];
-            Mc.at<float>(j,i) = lambda_1*lambda_2 - 0.04f*pow( ( lambda_1 + lambda_2 ), 2 );
-          }
-     }
-
-  minMaxLoc( Mc, &myHarris_minVal, &myHarris_maxVal, 0, 0, Mat() );
-
-  /* Create Window and Trackbar */
-  namedWindow( myHarris_window, WINDOW_AUTOSIZE );
-  createTrackbar( " Quality Level:", myHarris_window, &myHarris_qualityLevel, max_qualityLevel, myHarris_function );
-  myHarris_function( 0, 0 );
-
-  /// My Shi-Tomasi -- Using cornerMinEigenVal
-  myShiTomasi_dst = Mat::zeros( src_gray.size(), CV_32FC1 );
-  cornerMinEigenVal( src_gray, myShiTomasi_dst, blockSize, apertureSize, BORDER_DEFAULT );
-
-  minMaxLoc( myShiTomasi_dst, &myShiTomasi_minVal, &myShiTomasi_maxVal, 0, 0, Mat() );
-
-  /* Create Window and Trackbar */
-  namedWindow( myShiTomasi_window, WINDOW_AUTOSIZE );
-  createTrackbar( " Quality Level:", myShiTomasi_window, &myShiTomasi_qualityLevel, max_qualityLevel, myShiTomasi_function );
-  myShiTomasi_function( 0, 0 );
-
-  waitKey(0);
-  return(0);
+    /// Load source image and convert it to gray
+    CommandLineParser parser( argc, argv, "{@input | ../data/building.jpg | input image}" );
+    src = imread( parser.get<String>( "@input" ) );
+    if ( src.empty() )
+    {
+        cout << "Could not open or find the image!\n" << endl;
+        cout << "Usage: " << argv[0] << " <Input image>" << endl;
+        return -1;
+    }
+    cvtColor( src, src_gray, COLOR_BGR2GRAY );
+
+    /// Set some parameters
+    int blockSize = 3, apertureSize = 3;
+
+    /// My Harris matrix -- Using cornerEigenValsAndVecs
+    cornerEigenValsAndVecs( src_gray, myHarris_dst, blockSize, apertureSize );
+
+    /* calculate Mc */
+    Mc = Mat( src_gray.size(), CV_32FC1 );
+    for( int i = 0; i < src_gray.rows; i++ )
+    {
+        for( int j = 0; j < src_gray.cols; j++ )
+        {
+            float lambda_1 = myHarris_dst.at<Vec6f>(i, j)[0];
+            float lambda_2 = myHarris_dst.at<Vec6f>(i, j)[1];
+            Mc.at<float>(i, j) = lambda_1*lambda_2 - 0.04f*pow( ( lambda_1 + lambda_2 ), 2 );
+        }
+    }
+
+    minMaxLoc( Mc, &myHarris_minVal, &myHarris_maxVal );
+
+    /* Create Window and Trackbar */
+    namedWindow( myHarris_window );
+    createTrackbar( "Quality Level:", myHarris_window, &myHarris_qualityLevel, max_qualityLevel, myHarris_function );
+    myHarris_function( 0, 0 );
+
+    /// My Shi-Tomasi -- Using cornerMinEigenVal
+    cornerMinEigenVal( src_gray, myShiTomasi_dst, blockSize, apertureSize );
+
+    minMaxLoc( myShiTomasi_dst, &myShiTomasi_minVal, &myShiTomasi_maxVal );
+
+    /* Create Window and Trackbar */
+    namedWindow( myShiTomasi_window );
+    createTrackbar( "Quality Level:", myShiTomasi_window, &myShiTomasi_qualityLevel, max_qualityLevel, myShiTomasi_function );
+    myShiTomasi_function( 0, 0 );
+
+    waitKey();
+    return 0;
 }
 
 /**
 }
 
 /**
@@ -94,18 +92,20 @@ int main( int argc, char** argv )
  */
 void myShiTomasi_function( int, void* )
 {
  */
 void myShiTomasi_function( int, void* )
 {
-  myShiTomasi_copy = src.clone();
-
-  if( myShiTomasi_qualityLevel < 1 ) { myShiTomasi_qualityLevel = 1; }
-
-  for( int j = 0; j < src_gray.rows; j++ )
-     { for( int i = 0; i < src_gray.cols; i++ )
-          {
-            if( myShiTomasi_dst.at<float>(j,i) > myShiTomasi_minVal + ( myShiTomasi_maxVal - myShiTomasi_minVal )*myShiTomasi_qualityLevel/max_qualityLevel )
-              { circle( myShiTomasi_copy, Point(i,j), 4, Scalar( rng.uniform(0,255), rng.uniform(0,255), rng.uniform(0,255) ), -1, 8, 0 ); }
-          }
-     }
-  imshow( myShiTomasi_window, myShiTomasi_copy );
+    myShiTomasi_copy = src.clone();
+    myShiTomasi_qualityLevel = MAX(myShiTomasi_qualityLevel, 1);
+
+    for( int i = 0; i < src_gray.rows; i++ )
+    {
+        for( int j = 0; j < src_gray.cols; j++ )
+        {
+            if( myShiTomasi_dst.at<float>(i,j) > myShiTomasi_minVal + ( myShiTomasi_maxVal - myShiTomasi_minVal )*myShiTomasi_qualityLevel/max_qualityLevel )
+            {
+                circle( myShiTomasi_copy, Point(j,i), 4, Scalar( rng.uniform(0,256), rng.uniform(0,256), rng.uniform(0,256) ), FILLED );
+            }
+        }
+    }
+    imshow( myShiTomasi_window, myShiTomasi_copy );
 }
 
 /**
 }
 
 /**
@@ -113,16 +113,18 @@ void myShiTomasi_function( int, void* )
  */
 void myHarris_function( int, void* )
 {
  */
 void myHarris_function( int, void* )
 {
-  myHarris_copy = src.clone();
-
-  if( myHarris_qualityLevel < 1 ) { myHarris_qualityLevel = 1; }
-
-  for( int j = 0; j < src_gray.rows; j++ )
-     { for( int i = 0; i < src_gray.cols; i++ )
-          {
-            if( Mc.at<float>(j,i) > myHarris_minVal + ( myHarris_maxVal - myHarris_minVal )*myHarris_qualityLevel/max_qualityLevel )
-              { circle( myHarris_copy, Point(i,j), 4, Scalar( rng.uniform(0,255), rng.uniform(0,255), rng.uniform(0,255) ), -1, 8, 0 ); }
-          }
-     }
-  imshow( myHarris_window, myHarris_copy );
+    myHarris_copy = src.clone();
+    myHarris_qualityLevel = MAX(myHarris_qualityLevel, 1);
+
+    for( int i = 0; i < src_gray.rows; i++ )
+    {
+        for( int j = 0; j < src_gray.cols; j++ )
+        {
+            if( Mc.at<float>(i,j) > myHarris_minVal + ( myHarris_maxVal - myHarris_minVal )*myHarris_qualityLevel/max_qualityLevel )
+            {
+                circle( myHarris_copy, Point(j,i), 4, Scalar( rng.uniform(0,256), rng.uniform(0,256), rng.uniform(0,256) ), FILLED );
+            }
+        }
+    }
+    imshow( myHarris_window, myHarris_copy );
 }
 }
index 2d44eeb..3567270 100644 (file)
@@ -27,26 +27,26 @@ void cornerHarris_demo( int, void* );
  */
 int main( int argc, char** argv )
 {
  */
 int main( int argc, char** argv )
 {
-  /// Load source image and convert it to gray
-  CommandLineParser parser( argc, argv, "{@input | ../data/building.jpg | input image}" );
-  src = imread( parser.get<String>( "@input" ), IMREAD_COLOR );
-  if ( src.empty() )
-  {
-    cout << "Could not open or find the image!\n" << endl;
-    cout << "Usage: " << argv[0] << " <Input image>" << endl;
-    return -1;
-  }
-  cvtColor( src, src_gray, COLOR_BGR2GRAY );
+    /// Load source image and convert it to gray
+    CommandLineParser parser( argc, argv, "{@input | ../data/building.jpg | input image}" );
+    src = imread( parser.get<String>( "@input" ) );
+    if ( src.empty() )
+    {
+        cout << "Could not open or find the image!\n" << endl;
+        cout << "Usage: " << argv[0] << " <Input image>" << endl;
+        return -1;
+    }
+    cvtColor( src, src_gray, COLOR_BGR2GRAY );
 
 
-  /// Create a window and a trackbar
-  namedWindow( source_window, WINDOW_AUTOSIZE );
-  createTrackbar( "Threshold: ", source_window, &thresh, max_thresh, cornerHarris_demo );
-  imshow( source_window, src );
+    /// Create a window and a trackbar
+    namedWindow( source_window );
+    createTrackbar( "Threshold: ", source_window, &thresh, max_thresh, cornerHarris_demo );
+    imshow( source_window, src );
 
 
-  cornerHarris_demo( 0, 0 );
+    cornerHarris_demo( 0, 0 );
 
 
-  waitKey(0);
-  return(0);
+    waitKey();
+    return 0;
 }
 
 /**
 }
 
 /**
@@ -55,33 +55,33 @@ int main( int argc, char** argv )
  */
 void cornerHarris_demo( int, void* )
 {
  */
 void cornerHarris_demo( int, void* )
 {
+    /// Detector parameters
+    int blockSize = 2;
+    int apertureSize = 3;
+    double k = 0.04;
 
 
-  Mat dst, dst_norm, dst_norm_scaled;
-  dst = Mat::zeros( src.size(), CV_32FC1 );
+    /// Detecting corners
+    Mat dst = Mat::zeros( src.size(), CV_32FC1 );
+    cornerHarris( src_gray, dst, blockSize, apertureSize, k );
 
 
-  /// Detector parameters
-  int blockSize = 2;
-  int apertureSize = 3;
-  double k = 0.04;
+    /// Normalizing
+    Mat dst_norm, dst_norm_scaled;
+    normalize( dst, dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat() );
+    convertScaleAbs( dst_norm, dst_norm_scaled );
 
 
-  /// Detecting corners
-  cornerHarris( src_gray, dst, blockSize, apertureSize, k, BORDER_DEFAULT );
+    /// Drawing a circle around corners
+    for( int i = 0; i < dst_norm.rows ; i++ )
+    {
+        for( int j = 0; j < dst_norm.cols; j++ )
+        {
+            if( (int) dst_norm.at<float>(i,j) > thresh )
+            {
+                circle( dst_norm_scaled, Point(j,i), 5,  Scalar(0), 2, 8, 0 );
+            }
+        }
+    }
 
 
-  /// Normalizing
-  normalize( dst, dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat() );
-  convertScaleAbs( dst_norm, dst_norm_scaled );
-
-  /// Drawing a circle around corners
-  for( int j = 0; j < dst_norm.rows ; j++ )
-     { for( int i = 0; i < dst_norm.cols; i++ )
-          {
-            if( (int) dst_norm.at<float>(j,i) > thresh )
-              {
-               circle( dst_norm_scaled, Point( i, j ), 5,  Scalar(0), 2, 8, 0 );
-              }
-          }
-     }
-  /// Showing the result
-  namedWindow( corners_window, WINDOW_AUTOSIZE );
-  imshow( corners_window, dst_norm_scaled );
+    /// Showing the result
+    namedWindow( corners_window );
+    imshow( corners_window, dst_norm_scaled );
 }
 }
index 0addc55..853078b 100644 (file)
@@ -28,29 +28,29 @@ void goodFeaturesToTrack_Demo( int, void* );
  */
 int main( int argc, char** argv )
 {
  */
 int main( int argc, char** argv )
 {
-  /// Load source image and convert it to gray
-  CommandLineParser parser( argc, argv, "{@input | ../data/pic3.png | input image}" );
-  src = imread(parser.get<String>( "@input" ), IMREAD_COLOR);
-  if ( src.empty() )
-  {
-    cout << "Could not open or find the image!\n" << endl;
-    cout << "Usage: " << argv[0] << " <Input image>" << endl;
-    return -1;
-  }
-  cvtColor( src, src_gray, COLOR_BGR2GRAY );
+    /// Load source image and convert it to gray
+    CommandLineParser parser( argc, argv, "{@input | ../data/pic3.png | input image}" );
+    src = imread( parser.get<String>( "@input" ) );
+    if( src.empty() )
+    {
+        cout << "Could not open or find the image!\n" << endl;
+        cout << "Usage: " << argv[0] << " <Input image>" << endl;
+        return -1;
+    }
+    cvtColor( src, src_gray, COLOR_BGR2GRAY );
 
 
-  /// Create Window
-  namedWindow( source_window, WINDOW_AUTOSIZE );
+    /// Create Window
+    namedWindow( source_window );
 
 
-  /// Create Trackbar to set the number of corners
-  createTrackbar( "Max  corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo );
+    /// Create Trackbar to set the number of corners
+    createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo );
 
 
-  imshow( source_window, src );
+    imshow( source_window, src );
 
 
-  goodFeaturesToTrack_Demo( 0, 0 );
+    goodFeaturesToTrack_Demo( 0, 0 );
 
 
-  waitKey(0);
-  return(0);
+    waitKey();
+    return 0;
 }
 
 /**
 }
 
 /**
@@ -59,52 +59,54 @@ int main( int argc, char** argv )
  */
 void goodFeaturesToTrack_Demo( int, void* )
 {
  */
 void goodFeaturesToTrack_Demo( int, void* )
 {
-  if( maxCorners < 1 ) { maxCorners = 1; }
-
-  /// Parameters for Shi-Tomasi algorithm
-  vector<Point2f> corners;
-  double qualityLevel = 0.01;
-  double minDistance = 10;
-  int blockSize = 3, gradiantSize = 3;
-  bool useHarrisDetector = false;
-  double k = 0.04;
-
-  /// Copy the source image
-  Mat copy;
-  copy = src.clone();
-
-  /// Apply corner detection
-  goodFeaturesToTrack( src_gray,
-               corners,
-               maxCorners,
-               qualityLevel,
-               minDistance,
-               Mat(),
-               blockSize,
-               gradiantSize,
-               useHarrisDetector,
-               k );
-
-
-  /// Draw corners detected
-  cout<<"** Number of corners detected: "<<corners.size()<<endl;
-  int r = 4;
-  for( size_t i = 0; i < corners.size(); i++ )
-     { circle( copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255), rng.uniform(0,255)), -1, 8, 0 ); }
-
-  /// Show what you got
-  namedWindow( source_window, WINDOW_AUTOSIZE );
-  imshow( source_window, copy );
-
-  /// Set the needed parameters to find the refined corners
-  Size winSize = Size( 5, 5 );
-  Size zeroZone = Size( -1, -1 );
-  TermCriteria criteria = TermCriteria( TermCriteria::EPS + TermCriteria::COUNT, 40, 0.001 );
-
-  /// Calculate the refined corner locations
-  cornerSubPix( src_gray, corners, winSize, zeroZone, criteria );
-
-  /// Write them down
-  for( size_t i = 0; i < corners.size(); i++ )
-     { cout<<" -- Refined Corner ["<<i<<"]  ("<<corners[i].x<<","<<corners[i].y<<")"<<endl; }
+    /// Parameters for Shi-Tomasi algorithm
+    maxCorners = MAX(maxCorners, 1);
+    vector<Point2f> corners;
+    double qualityLevel = 0.01;
+    double minDistance = 10;
+    int blockSize = 3, gradientSize = 3;
+    bool useHarrisDetector = false;
+    double k = 0.04;
+
+    /// Copy the source image
+    Mat copy = src.clone();
+
+    /// Apply corner detection
+    goodFeaturesToTrack( src_gray,
+                         corners,
+                         maxCorners,
+                         qualityLevel,
+                         minDistance,
+                         Mat(),
+                         blockSize,
+                         gradientSize,
+                         useHarrisDetector,
+                         k );
+
+
+    /// Draw corners detected
+    cout << "** Number of corners detected: " << corners.size() << endl;
+    int radius = 4;
+    for( size_t i = 0; i < corners.size(); i++ )
+    {
+        circle( copy, corners[i], radius, Scalar(rng.uniform(0,255), rng.uniform(0, 256), rng.uniform(0, 256)), FILLED );
+    }
+
+    /// Show what you got
+    namedWindow( source_window );
+    imshow( source_window, copy );
+
+    /// Set the needed parameters to find the refined corners
+    Size winSize = Size( 5, 5 );
+    Size zeroZone = Size( -1, -1 );
+    TermCriteria criteria = TermCriteria( TermCriteria::EPS + TermCriteria::COUNT, 40, 0.001 );
+
+    /// Calculate the refined corner locations
+    cornerSubPix( src_gray, corners, winSize, zeroZone, criteria );
+
+    /// Write them down
+    for( size_t i = 0; i < corners.size(); i++ )
+    {
+        cout << " -- Refined Corner [" << i << "]  (" << corners[i].x << "," << corners[i].y << ")" << endl;
+    }
 }
 }
index e72653f..022cd45 100644 (file)
@@ -29,29 +29,29 @@ void goodFeaturesToTrack_Demo( int, void* );
  */
 int main( int argc, char** argv )
 {
  */
 int main( int argc, char** argv )
 {
-  /// Load source image and convert it to gray
-  CommandLineParser parser( argc, argv, "{@input | ../data/pic3.png | input image}" );
-  src = imread( parser.get<String>( "@input" ), IMREAD_COLOR );
-  if( src.empty() )
-  {
-    cout << "Could not open or find the image!\n" << endl;
-    cout << "Usage: " << argv[0] << " <Input image>" << endl;
-    return -1;
-  }
-  cvtColor( src, src_gray, COLOR_BGR2GRAY );
+    /// Load source image and convert it to gray
+    CommandLineParser parser( argc, argv, "{@input | ../data/pic3.png | input image}" );
+    src = imread( parser.get<String>( "@input" ) );
+    if( src.empty() )
+    {
+        cout << "Could not open or find the image!\n" << endl;
+        cout << "Usage: " << argv[0] << " <Input image>" << endl;
+        return -1;
+    }
+    cvtColor( src, src_gray, COLOR_BGR2GRAY );
 
 
-  /// Create Window
-  namedWindow( source_window, WINDOW_AUTOSIZE );
+    /// Create Window
+    namedWindow( source_window );
 
 
-  /// Create Trackbar to set the number of corners
-  createTrackbar( "Max  corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo );
+    /// Create Trackbar to set the number of corners
+    createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo );
 
 
-  imshow( source_window, src );
+    imshow( source_window, src );
 
 
-  goodFeaturesToTrack_Demo( 0, 0 );
+    goodFeaturesToTrack_Demo( 0, 0 );
 
 
-  waitKey(0);
-  return(0);
+    waitKey();
+    return 0;
 }
 
 /**
 }
 
 /**
@@ -60,40 +60,40 @@ int main( int argc, char** argv )
  */
 void goodFeaturesToTrack_Demo( int, void* )
 {
  */
 void goodFeaturesToTrack_Demo( int, void* )
 {
-  if( maxCorners < 1 ) { maxCorners = 1; }
-
-  /// Parameters for Shi-Tomasi algorithm
-  vector<Point2f> corners;
-  double qualityLevel = 0.01;
-  double minDistance = 10;
-  int blockSize = 3, gradiantSize = 3;
-  bool useHarrisDetector = false;
-  double k = 0.04;
-
-  /// Copy the source image
-  Mat copy;
-  copy = src.clone();
-
-  /// Apply corner detection
-  goodFeaturesToTrack( src_gray,
-               corners,
-               maxCorners,
-               qualityLevel,
-               minDistance,
-               Mat(),
-               blockSize,
-               gradiantSize,
-               useHarrisDetector,
-               k );
-
-
-  /// Draw corners detected
-  cout<<"** Number of corners detected: "<<corners.size()<<endl;
-  int r = 4;
-  for( size_t i = 0; i < corners.size(); i++ )
-     { circle( copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255), rng.uniform(0,255)), -1, 8, 0 ); }
-
-  /// Show what you got
-  namedWindow( source_window, WINDOW_AUTOSIZE );
-  imshow( source_window, copy );
+    /// Parameters for Shi-Tomasi algorithm
+    maxCorners = MAX(maxCorners, 1);
+    vector<Point2f> corners;
+    double qualityLevel = 0.01;
+    double minDistance = 10;
+    int blockSize = 3, gradientSize = 3;
+    bool useHarrisDetector = false;
+    double k = 0.04;
+
+    /// Copy the source image
+    Mat copy = src.clone();
+
+    /// Apply corner detection
+    goodFeaturesToTrack( src_gray,
+                         corners,
+                         maxCorners,
+                         qualityLevel,
+                         minDistance,
+                         Mat(),
+                         blockSize,
+                         gradientSize,
+                         useHarrisDetector,
+                         k );
+
+
+    /// Draw corners detected
+    cout << "** Number of corners detected: " << corners.size() << endl;
+    int radius = 4;
+    for( size_t i = 0; i < corners.size(); i++ )
+    {
+        circle( copy, corners[i], radius, Scalar(rng.uniform(0,255), rng.uniform(0, 256), rng.uniform(0, 256)), FILLED );
+    }
+
+    /// Show what you got
+    namedWindow( source_window );
+    imshow( source_window, copy );
 }
 }
diff --git a/samples/cpp/tutorial_code/features2D/feature_description/SURF_matching_Demo.cpp b/samples/cpp/tutorial_code/features2D/feature_description/SURF_matching_Demo.cpp
new file mode 100755 (executable)
index 0000000..3fb34e9
--- /dev/null
@@ -0,0 +1,60 @@
+#include <iostream>
+#include "opencv2/core.hpp"
+#ifdef HAVE_OPENCV_XFEATURES2D
+#include "opencv2/highgui.hpp"
+#include "opencv2/features2d.hpp"
+#include "opencv2/xfeatures2d.hpp"
+
+using namespace cv;
+using namespace cv::xfeatures2d;
+using std::cout;
+using std::endl;
+
+const char* keys =
+    "{ help h |                          | Print help message. }"
+    "{ input1 | ../data/box.png          | Path to input image 1. }"
+    "{ input2 | ../data/box_in_scene.png | Path to input image 2. }";
+
+int main( int argc, char* argv[] )
+{
+    CommandLineParser parser( argc, argv, keys );
+    Mat img1 = imread( parser.get<String>("input1"), IMREAD_GRAYSCALE );
+    Mat img2 = imread( parser.get<String>("input2"), IMREAD_GRAYSCALE );
+    if ( img1.empty() || img2.empty() )
+    {
+        cout << "Could not open or find the image!\n" << endl;
+        parser.printMessage();
+        return -1;
+    }
+
+    //-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
+    int minHessian = 400;
+    Ptr<SURF> detector = SURF::create( minHessian );
+    std::vector<KeyPoint> keypoints1, keypoints2;
+    Mat descriptors1, descriptors2;
+    detector->detectAndCompute( img1, noArray(), keypoints1, descriptors1 );
+    detector->detectAndCompute( img2, noArray(), keypoints2, descriptors2 );
+
+    //-- Step 2: Matching descriptor vectors with a brute force matcher
+    // Since SURF is a floating-point descriptor NORM_L2 is used
+    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(DescriptorMatcher::BRUTEFORCE);
+    std::vector< DMatch > matches;
+    matcher->match( descriptors1, descriptors2, matches );
+
+    //-- Draw matches
+    Mat img_matches;
+    drawMatches( img1, keypoints1, img2, keypoints2, matches, img_matches );
+
+    //-- Show detected matches
+    imshow("Matches", img_matches );
+
+    waitKey();
+    return 0;
+}
+#else
+int main()
+{
+    std::cout << "This tutorial code needs the xfeatures2d contrib module to be run." << std::endl;
+    return 0;
+}
+#endif
diff --git a/samples/cpp/tutorial_code/features2D/feature_detection/SURF_detection_Demo.cpp b/samples/cpp/tutorial_code/features2D/feature_detection/SURF_detection_Demo.cpp
new file mode 100755 (executable)
index 0000000..ba9494e
--- /dev/null
@@ -0,0 +1,46 @@
+#include <iostream>
+#include "opencv2/core.hpp"
+#ifdef HAVE_OPENCV_XFEATURES2D
+#include "opencv2/highgui.hpp"
+#include "opencv2/features2d.hpp"
+#include "opencv2/xfeatures2d.hpp"
+
+using namespace cv;
+using namespace cv::xfeatures2d;
+using std::cout;
+using std::endl;
+
+int main( int argc, char* argv[] )
+{
+    CommandLineParser parser( argc, argv, "{@input | ../data/box.png | input image}" );
+    Mat src = imread( parser.get<String>( "@input" ), IMREAD_GRAYSCALE );
+    if ( src.empty() )
+    {
+        cout << "Could not open or find the image!\n" << endl;
+        cout << "Usage: " << argv[0] << " <Input image>" << endl;
+        return -1;
+    }
+
+    //-- Step 1: Detect the keypoints using SURF Detector
+    int minHessian = 400;
+    Ptr<SURF> detector = SURF::create( minHessian );
+    std::vector<KeyPoint> keypoints;
+    detector->detect( src, keypoints );
+
+    //-- Draw keypoints
+    Mat img_keypoints;
+    drawKeypoints( src, keypoints, img_keypoints );
+
+    //-- Show detected (drawn) keypoints
+    imshow("SURF Keypoints", img_keypoints );
+
+    waitKey();
+    return 0;
+}
+#else
+int main()
+{
+    std::cout << "This tutorial code needs the xfeatures2d contrib module to be run." << std::endl;
+    return 0;
+}
+#endif
diff --git a/samples/cpp/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.cpp b/samples/cpp/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.cpp
new file mode 100755 (executable)
index 0000000..e22155f
--- /dev/null
@@ -0,0 +1,72 @@
+#include <iostream>
+#include "opencv2/core.hpp"
+#ifdef HAVE_OPENCV_XFEATURES2D
+#include "opencv2/highgui.hpp"
+#include "opencv2/features2d.hpp"
+#include "opencv2/xfeatures2d.hpp"
+
+using namespace cv;
+using namespace cv::xfeatures2d;
+using std::cout;
+using std::endl;
+
+const char* keys =
+    "{ help h |                          | Print help message. }"
+    "{ input1 | ../data/box.png          | Path to input image 1. }"
+    "{ input2 | ../data/box_in_scene.png | Path to input image 2. }";
+
+int main( int argc, char* argv[] )
+{
+    CommandLineParser parser( argc, argv, keys );
+    Mat img1 = imread( parser.get<String>("input1"), IMREAD_GRAYSCALE );
+    Mat img2 = imread( parser.get<String>("input2"), IMREAD_GRAYSCALE );
+    if ( img1.empty() || img2.empty() )
+    {
+        cout << "Could not open or find the image!\n" << endl;
+        parser.printMessage();
+        return -1;
+    }
+
+    //-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
+    int minHessian = 400;
+    Ptr<SURF> detector = SURF::create( minHessian );
+    std::vector<KeyPoint> keypoints1, keypoints2;
+    Mat descriptors1, descriptors2;
+    detector->detectAndCompute( img1, noArray(), keypoints1, descriptors1 );
+    detector->detectAndCompute( img2, noArray(), keypoints2, descriptors2 );
+
+    //-- Step 2: Matching descriptor vectors with a FLANN based matcher
+    // Since SURF is a floating-point descriptor NORM_L2 is used
+    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(DescriptorMatcher::FLANNBASED);
+    std::vector< std::vector<DMatch> > knn_matches;
+    matcher->knnMatch( descriptors1, descriptors2, knn_matches, 2 );
+
+    //-- Filter matches using the Lowe's ratio test
+    const float ratio_thresh = 0.7f;
+    std::vector<DMatch> good_matches;
+    for (size_t i = 0; i < knn_matches.size(); i++)
+    {
+        if (knn_matches[i].size() > 1 && knn_matches[i][0].distance / knn_matches[i][1].distance <= ratio_thresh)
+        {
+            good_matches.push_back(knn_matches[i][0]);
+        }
+    }
+
+    //-- Draw matches
+    Mat img_matches;
+    drawMatches( img1, keypoints1, img2, keypoints2, good_matches, img_matches, Scalar::all(-1),
+                 Scalar::all(-1), std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
+
+    //-- Show detected matches
+    imshow("Good Matches", img_matches );
+
+    waitKey();
+    return 0;
+}
+#else
+int main()
+{
+    std::cout << "This tutorial code needs the xfeatures2d contrib module to be run." << std::endl;
+    return 0;
+}
+#endif
diff --git a/samples/cpp/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.cpp b/samples/cpp/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.cpp
new file mode 100755 (executable)
index 0000000..68b1d2a
--- /dev/null
@@ -0,0 +1,107 @@
+#include <iostream>
+#include "opencv2/core.hpp"
+#ifdef HAVE_OPENCV_XFEATURES2D
+#include "opencv2/calib3d.hpp"
+#include "opencv2/highgui.hpp"
+#include "opencv2/imgproc.hpp"
+#include "opencv2/features2d.hpp"
+#include "opencv2/xfeatures2d.hpp"
+
+using namespace cv;
+using namespace cv::xfeatures2d;
+using std::cout;
+using std::endl;
+
+const char* keys =
+        "{ help h |                          | Print help message. }"
+        "{ input1 | ../data/box.png          | Path to input image 1. }"
+        "{ input2 | ../data/box_in_scene.png | Path to input image 2. }";
+
+int main( int argc, char* argv[] )
+{
+    CommandLineParser parser( argc, argv, keys );
+    Mat img_object = imread( parser.get<String>("input1"), IMREAD_GRAYSCALE );
+    Mat img_scene = imread( parser.get<String>("input2"), IMREAD_GRAYSCALE );
+    if ( img_object.empty() || img_scene.empty() )
+    {
+        cout << "Could not open or find the image!\n" << endl;
+        parser.printMessage();
+        return -1;
+    }
+
+    //-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
+    int minHessian = 400;
+    Ptr<SURF> detector = SURF::create( minHessian );
+    std::vector<KeyPoint> keypoints_object, keypoints_scene;
+    Mat descriptors_object, descriptors_scene;
+    detector->detectAndCompute( img_object, noArray(), keypoints_object, descriptors_object );
+    detector->detectAndCompute( img_scene, noArray(), keypoints_scene, descriptors_scene );
+
+    //-- Step 2: Matching descriptor vectors with a FLANN based matcher
+    // Since SURF is a floating-point descriptor NORM_L2 is used
+    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(DescriptorMatcher::FLANNBASED);
+    std::vector< std::vector<DMatch> > knn_matches;
+    matcher->knnMatch( descriptors_object, descriptors_scene, knn_matches, 2 );
+
+    //-- Filter matches using the Lowe's ratio test
+    const float ratio_thresh = 0.75f;
+    std::vector<DMatch> good_matches;
+    for (size_t i = 0; i < knn_matches.size(); i++)
+    {
+        if (knn_matches[i].size() > 1 && knn_matches[i][0].distance / knn_matches[i][1].distance <= ratio_thresh)
+        {
+            good_matches.push_back(knn_matches[i][0]);
+        }
+    }
+
+    //-- Draw matches
+    Mat img_matches;
+    drawMatches( img_object, keypoints_object, img_scene, keypoints_scene, good_matches, img_matches, Scalar::all(-1),
+                 Scalar::all(-1), std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
+
+    //-- Localize the object
+    std::vector<Point2f> obj;
+    std::vector<Point2f> scene;
+
+    for( size_t i = 0; i < good_matches.size(); i++ )
+    {
+        //-- Get the keypoints from the good matches
+        obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
+        scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
+    }
+
+    Mat H = findHomography( obj, scene, RANSAC );
+
+    //-- Get the corners from the image_1 ( the object to be "detected" )
+    std::vector<Point2f> obj_corners(4);
+    obj_corners[0] = Point2f(0, 0);
+    obj_corners[1] = Point2f( (float)img_object.cols, 0 );
+    obj_corners[2] = Point2f( (float)img_object.cols, (float)img_object.rows );
+    obj_corners[3] = Point2f( 0, (float)img_object.rows );
+    std::vector<Point2f> scene_corners(4);
+
+    perspectiveTransform( obj_corners, scene_corners, H);
+
+    //-- Draw lines between the corners (the mapped object in the scene - image_2 )
+    line( img_matches, scene_corners[0] + Point2f((float)img_object.cols, 0),
+          scene_corners[1] + Point2f((float)img_object.cols, 0), Scalar(0, 255, 0), 4 );
+    line( img_matches, scene_corners[1] + Point2f((float)img_object.cols, 0),
+          scene_corners[2] + Point2f((float)img_object.cols, 0), Scalar( 0, 255, 0), 4 );
+    line( img_matches, scene_corners[2] + Point2f((float)img_object.cols, 0),
+          scene_corners[3] + Point2f((float)img_object.cols, 0), Scalar( 0, 255, 0), 4 );
+    line( img_matches, scene_corners[3] + Point2f((float)img_object.cols, 0),
+          scene_corners[0] + Point2f((float)img_object.cols, 0), Scalar( 0, 255, 0), 4 );
+
+    //-- Show detected matches
+    imshow("Good Matches & Object detection", img_matches );
+
+    waitKey();
+    return 0;
+}
+#else
+int main()
+{
+    std::cout << "This tutorial code needs the xfeatures2d contrib module to be run." << std::endl;
+    return 0;
+}
+#endif
diff --git a/samples/java/tutorial_code/TrackingMotion/corner_subpixels/CornerSubPixDemo.java b/samples/java/tutorial_code/TrackingMotion/corner_subpixels/CornerSubPixDemo.java
new file mode 100644 (file)
index 0000000..3be2e58
--- /dev/null
@@ -0,0 +1,158 @@
+import java.awt.BorderLayout;
+import java.awt.Container;
+import java.awt.Image;
+import java.util.Random;
+
+import javax.swing.BoxLayout;
+import javax.swing.ImageIcon;
+import javax.swing.JFrame;
+import javax.swing.JLabel;
+import javax.swing.JPanel;
+import javax.swing.JSlider;
+import javax.swing.event.ChangeEvent;
+import javax.swing.event.ChangeListener;
+
+import org.opencv.core.Core;
+import org.opencv.core.CvType;
+import org.opencv.core.Mat;
+import org.opencv.core.MatOfPoint;
+import org.opencv.core.Point;
+import org.opencv.core.Scalar;
+import org.opencv.core.Size;
+import org.opencv.core.TermCriteria;
+import org.opencv.highgui.HighGui;
+import org.opencv.imgcodecs.Imgcodecs;
+import org.opencv.imgproc.Imgproc;
+
+class CornerSubPix {
+    private Mat src = new Mat();
+    private Mat srcGray = new Mat();
+    private JFrame frame;
+    private JLabel imgLabel;
+    private static final int MAX_CORNERS = 25;
+    private int maxCorners = 10;
+    private Random rng = new Random(12345);
+
+    public CornerSubPix(String[] args) {
+        /// Load source image and convert it to gray
+        String filename = args.length > 0 ? args[0] : "../data/pic3.png";
+        src = Imgcodecs.imread(filename);
+        if (src.empty()) {
+            System.err.println("Cannot read image: " + filename);
+            System.exit(0);
+        }
+
+        Imgproc.cvtColor(src, srcGray, Imgproc.COLOR_BGR2GRAY);
+
+        // Create and set up the window.
+        frame = new JFrame("Shi-Tomasi corner detector demo");
+        frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
+        // Set up the content pane.
+        Image img = HighGui.toBufferedImage(src);
+        addComponentsToPane(frame.getContentPane(), img);
+        // Use the content pane's default BorderLayout. No need for
+        // setLayout(new BorderLayout());
+        // Display the window.
+        frame.pack();
+        frame.setVisible(true);
+        update();
+    }
+
+    private void addComponentsToPane(Container pane, Image img) {
+        if (!(pane.getLayout() instanceof BorderLayout)) {
+            pane.add(new JLabel("Container doesn't use BorderLayout!"));
+            return;
+        }
+
+        JPanel sliderPanel = new JPanel();
+        sliderPanel.setLayout(new BoxLayout(sliderPanel, BoxLayout.PAGE_AXIS));
+
+        sliderPanel.add(new JLabel("Max  corners:"));
+        JSlider slider = new JSlider(0, MAX_CORNERS, maxCorners);
+        slider.setMajorTickSpacing(20);
+        slider.setMinorTickSpacing(10);
+        slider.setPaintTicks(true);
+        slider.setPaintLabels(true);
+        slider.addChangeListener(new ChangeListener() {
+            @Override
+            public void stateChanged(ChangeEvent e) {
+                JSlider source = (JSlider) e.getSource();
+                maxCorners = source.getValue();
+                update();
+            }
+        });
+        sliderPanel.add(slider);
+        pane.add(sliderPanel, BorderLayout.PAGE_START);
+
+        imgLabel = new JLabel(new ImageIcon(img));
+        pane.add(imgLabel, BorderLayout.CENTER);
+    }
+
+    private void update() {
+        /// Parameters for Shi-Tomasi algorithm
+        maxCorners = Math.max(maxCorners, 1);
+        MatOfPoint corners = new MatOfPoint();
+        double qualityLevel = 0.01;
+        double minDistance = 10;
+        int blockSize = 3, gradientSize = 3;
+        boolean useHarrisDetector = false;
+        double k = 0.04;
+
+        /// Copy the source image
+        Mat copy = src.clone();
+
+        /// Apply corner detection
+        Imgproc.goodFeaturesToTrack(srcGray, corners, maxCorners, qualityLevel, minDistance, new Mat(),
+                blockSize, gradientSize, useHarrisDetector, k);
+
+        /// Draw corners detected
+        System.out.println("** Number of corners detected: " + corners.rows());
+        int[] cornersData = new int[(int) (corners.total() * corners.channels())];
+        corners.get(0, 0, cornersData);
+        int radius = 4;
+        Mat matCorners = new Mat(corners.rows(), 2, CvType.CV_32F);
+        float[] matCornersData = new float[(int) (matCorners.total() * matCorners.channels())];
+        matCorners.get(0, 0, matCornersData);
+        for (int i = 0; i < corners.rows(); i++) {
+            Imgproc.circle(copy, new Point(cornersData[i * 2], cornersData[i * 2 + 1]), radius,
+                    new Scalar(rng.nextInt(256), rng.nextInt(256), rng.nextInt(256)), Core.FILLED);
+            matCornersData[i * 2] = cornersData[i * 2];
+            matCornersData[i * 2 + 1] = cornersData[i * 2 + 1];
+        }
+        matCorners.put(0, 0, matCornersData);
+
+        imgLabel.setIcon(new ImageIcon(HighGui.toBufferedImage(copy)));
+        frame.repaint();
+
+        /// Set the needed parameters to find the refined corners
+        Size winSize = new Size(5, 5);
+        Size zeroZone = new Size(-1, -1);
+        TermCriteria criteria = new TermCriteria(TermCriteria.EPS + TermCriteria.COUNT, 40, 0.001);
+
+        /// Calculate the refined corner locations
+        Imgproc.cornerSubPix(srcGray, matCorners, winSize, zeroZone, criteria);
+
+        /// Write them down
+        matCorners.get(0, 0, matCornersData);
+        for (int i = 0; i < corners.rows(); i++) {
+            System.out.println(
+                    " -- Refined Corner [" + i + "]  (" + matCornersData[i * 2] + "," + matCornersData[i * 2 + 1] + ")");
+        }
+    }
+}
+
+public class CornerSubPixDemo {
+    public static void main(String[] args) {
+        // Load the native OpenCV library
+        System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
+
+        // Schedule a job for the event dispatch thread:
+        // creating and showing this application's GUI.
+        javax.swing.SwingUtilities.invokeLater(new Runnable() {
+            @Override
+            public void run() {
+                new CornerSubPix(args);
+            }
+        });
+    }
+}
diff --git a/samples/java/tutorial_code/TrackingMotion/generic_corner_detector/CornerDetectorDemo.java b/samples/java/tutorial_code/TrackingMotion/generic_corner_detector/CornerDetectorDemo.java
new file mode 100644 (file)
index 0000000..30450f8
--- /dev/null
@@ -0,0 +1,190 @@
+import java.awt.BorderLayout;
+import java.awt.Container;
+import java.awt.Image;
+import java.util.Random;
+
+import javax.swing.BoxLayout;
+import javax.swing.ImageIcon;
+import javax.swing.JFrame;
+import javax.swing.JLabel;
+import javax.swing.JPanel;
+import javax.swing.JSlider;
+import javax.swing.event.ChangeEvent;
+import javax.swing.event.ChangeListener;
+
+import org.opencv.core.Core;
+import org.opencv.core.Core.MinMaxLocResult;
+import org.opencv.core.CvType;
+import org.opencv.core.Mat;
+import org.opencv.core.Point;
+import org.opencv.core.Scalar;
+import org.opencv.highgui.HighGui;
+import org.opencv.imgcodecs.Imgcodecs;
+import org.opencv.imgproc.Imgproc;
+
+class CornerDetector {
+    private Mat src = new Mat();
+    private Mat srcGray = new Mat();
+    private Mat harrisDst = new Mat();
+    private Mat shiTomasiDst = new Mat();
+    private Mat harrisCopy = new Mat();
+    private Mat shiTomasiCopy = new Mat();
+    private Mat Mc = new Mat();
+    private JFrame frame;
+    private JLabel harrisImgLabel;
+    private JLabel shiTomasiImgLabel;
+    private static final int MAX_QUALITY_LEVEL = 100;
+    private int qualityLevel = 50;
+    private double harrisMinVal;
+    private double harrisMaxVal;
+    private double shiTomasiMinVal;
+    private double shiTomasiMaxVal;
+    private Random rng = new Random(12345);
+
+    public CornerDetector(String[] args) {
+        /// Load source image and convert it to gray
+        String filename = args.length > 0 ? args[0] : "../data/building.jpg";
+        src = Imgcodecs.imread(filename);
+        if (src.empty()) {
+            System.err.println("Cannot read image: " + filename);
+            System.exit(0);
+        }
+
+        Imgproc.cvtColor(src, srcGray, Imgproc.COLOR_BGR2GRAY);
+
+        // Create and set up the window.
+        frame = new JFrame("Creating your own corner detector demo");
+        frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
+        // Set up the content pane.
+        Image img = HighGui.toBufferedImage(src);
+        addComponentsToPane(frame.getContentPane(), img);
+        // Use the content pane's default BorderLayout. No need for
+        // setLayout(new BorderLayout());
+        // Display the window.
+        frame.pack();
+        frame.setVisible(true);
+
+        /// Set some parameters
+        int blockSize = 3, apertureSize = 3;
+
+        /// My Harris matrix -- Using cornerEigenValsAndVecs
+        Imgproc.cornerEigenValsAndVecs(srcGray, harrisDst, blockSize, apertureSize);
+
+        /* calculate Mc */
+        Mc = Mat.zeros(srcGray.size(), CvType.CV_32F);
+
+        float[] harrisData = new float[(int) (harrisDst.total() * harrisDst.channels())];
+        harrisDst.get(0, 0, harrisData);
+        float[] McData = new float[(int) (Mc.total() * Mc.channels())];
+        Mc.get(0, 0, McData);
+
+        for( int i = 0; i < srcGray.rows(); i++ ) {
+            for( int j = 0; j < srcGray.cols(); j++ ) {
+                float lambda1 = harrisData[(i*srcGray.cols() + j) * 6];
+                float lambda2 = harrisData[(i*srcGray.cols() + j) * 6 + 1];
+                McData[i*srcGray.cols()+j] = (float) (lambda1*lambda2 - 0.04f*Math.pow( ( lambda1 + lambda2 ), 2 ));
+            }
+        }
+        Mc.put(0, 0, McData);
+
+        MinMaxLocResult res = Core.minMaxLoc(Mc);
+        harrisMinVal = res.minVal;
+        harrisMaxVal = res.maxVal;
+
+        /// My Shi-Tomasi -- Using cornerMinEigenVal
+        Imgproc.cornerMinEigenVal(srcGray, shiTomasiDst, blockSize, apertureSize);
+        res = Core.minMaxLoc(shiTomasiDst);
+        shiTomasiMinVal = res.minVal;
+        shiTomasiMaxVal = res.maxVal;
+
+        update();
+    }
+
+    private void addComponentsToPane(Container pane, Image img) {
+        if (!(pane.getLayout() instanceof BorderLayout)) {
+            pane.add(new JLabel("Container doesn't use BorderLayout!"));
+            return;
+        }
+
+        JPanel sliderPanel = new JPanel();
+        sliderPanel.setLayout(new BoxLayout(sliderPanel, BoxLayout.PAGE_AXIS));
+
+        sliderPanel.add(new JLabel("Max  corners:"));
+        JSlider slider = new JSlider(0, MAX_QUALITY_LEVEL, qualityLevel);
+        slider.setMajorTickSpacing(20);
+        slider.setMinorTickSpacing(10);
+        slider.setPaintTicks(true);
+        slider.setPaintLabels(true);
+        slider.addChangeListener(new ChangeListener() {
+            @Override
+            public void stateChanged(ChangeEvent e) {
+                JSlider source = (JSlider) e.getSource();
+                qualityLevel = source.getValue();
+                update();
+            }
+        });
+        sliderPanel.add(slider);
+        pane.add(sliderPanel, BorderLayout.PAGE_START);
+
+        JPanel imgPanel = new JPanel();
+        harrisImgLabel = new JLabel(new ImageIcon(img));
+        shiTomasiImgLabel = new JLabel(new ImageIcon(img));
+        imgPanel.add(harrisImgLabel);
+        imgPanel.add(shiTomasiImgLabel);
+        pane.add(imgPanel, BorderLayout.CENTER);
+    }
+
+    private void update() {
+        int qualityLevelVal = Math.max(qualityLevel, 1);
+
+        //Harris
+        harrisCopy = src.clone();
+
+        float[] McData = new float[(int) (Mc.total() * Mc.channels())];
+        Mc.get(0, 0, McData);
+        for (int i = 0; i < srcGray.rows(); i++) {
+            for (int j = 0; j < srcGray.cols(); j++) {
+                if (McData[i * srcGray.cols() + j] > harrisMinVal
+                        + (harrisMaxVal - harrisMinVal) * qualityLevelVal / MAX_QUALITY_LEVEL) {
+                    Imgproc.circle(harrisCopy, new Point(j, i), 4,
+                            new Scalar(rng.nextInt(256), rng.nextInt(256), rng.nextInt(256)), Core.FILLED);
+                }
+            }
+        }
+
+        //Shi-Tomasi
+        shiTomasiCopy = src.clone();
+
+        float[] shiTomasiData = new float[(int) (shiTomasiDst.total() * shiTomasiDst.channels())];
+        shiTomasiDst.get(0, 0, shiTomasiData);
+        for (int i = 0; i < srcGray.rows(); i++) {
+            for (int j = 0; j < srcGray.cols(); j++) {
+                if (shiTomasiData[i * srcGray.cols() + j] > shiTomasiMinVal
+                        + (shiTomasiMaxVal - shiTomasiMinVal) * qualityLevelVal / MAX_QUALITY_LEVEL) {
+                    Imgproc.circle(shiTomasiCopy, new Point(j, i), 4,
+                            new Scalar(rng.nextInt(256), rng.nextInt(256), rng.nextInt(256)), Core.FILLED);
+                }
+            }
+        }
+
+        harrisImgLabel.setIcon(new ImageIcon(HighGui.toBufferedImage(harrisCopy)));
+        shiTomasiImgLabel.setIcon(new ImageIcon(HighGui.toBufferedImage(shiTomasiCopy)));
+        frame.repaint();
+    }
+}
+
+public class CornerDetectorDemo {
+    public static void main(String[] args) {
+        // Load the native OpenCV library
+        System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
+
+        // Schedule a job for the event dispatch thread:
+        // creating and showing this application's GUI.
+        javax.swing.SwingUtilities.invokeLater(new Runnable() {
+            @Override
+            public void run() {
+                new CornerDetector(args);
+            }
+        });
+    }
+}
diff --git a/samples/java/tutorial_code/TrackingMotion/good_features_to_track/GoodFeaturesToTrackDemo.java b/samples/java/tutorial_code/TrackingMotion/good_features_to_track/GoodFeaturesToTrackDemo.java
new file mode 100644 (file)
index 0000000..b5ee732
--- /dev/null
@@ -0,0 +1,134 @@
+import java.awt.BorderLayout;
+import java.awt.Container;
+import java.awt.Image;
+import java.util.Random;
+
+import javax.swing.BoxLayout;
+import javax.swing.ImageIcon;
+import javax.swing.JFrame;
+import javax.swing.JLabel;
+import javax.swing.JPanel;
+import javax.swing.JSlider;
+import javax.swing.event.ChangeEvent;
+import javax.swing.event.ChangeListener;
+
+import org.opencv.core.Core;
+import org.opencv.core.Mat;
+import org.opencv.core.MatOfPoint;
+import org.opencv.core.Point;
+import org.opencv.core.Scalar;
+import org.opencv.highgui.HighGui;
+import org.opencv.imgcodecs.Imgcodecs;
+import org.opencv.imgproc.Imgproc;
+
+class GoodFeaturesToTrack {
+    private Mat src = new Mat();
+    private Mat srcGray = new Mat();
+    private JFrame frame;
+    private JLabel imgLabel;
+    private static final int MAX_THRESHOLD = 100;
+    private int maxCorners = 23;
+    private Random rng = new Random(12345);
+
+    public GoodFeaturesToTrack(String[] args) {
+        /// Load source image and convert it to gray
+        String filename = args.length > 0 ? args[0] : "../data/pic3.png";
+        src = Imgcodecs.imread(filename);
+        if (src.empty()) {
+            System.err.println("Cannot read image: " + filename);
+            System.exit(0);
+        }
+
+        Imgproc.cvtColor(src, srcGray, Imgproc.COLOR_BGR2GRAY);
+
+        // Create and set up the window.
+        frame = new JFrame("Shi-Tomasi corner detector demo");
+        frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
+        // Set up the content pane.
+        Image img = HighGui.toBufferedImage(src);
+        addComponentsToPane(frame.getContentPane(), img);
+        // Use the content pane's default BorderLayout. No need for
+        // setLayout(new BorderLayout());
+        // Display the window.
+        frame.pack();
+        frame.setVisible(true);
+        update();
+    }
+
+    private void addComponentsToPane(Container pane, Image img) {
+        if (!(pane.getLayout() instanceof BorderLayout)) {
+            pane.add(new JLabel("Container doesn't use BorderLayout!"));
+            return;
+        }
+
+        JPanel sliderPanel = new JPanel();
+        sliderPanel.setLayout(new BoxLayout(sliderPanel, BoxLayout.PAGE_AXIS));
+
+        sliderPanel.add(new JLabel("Max  corners:"));
+        JSlider slider = new JSlider(0, MAX_THRESHOLD, maxCorners);
+        slider.setMajorTickSpacing(20);
+        slider.setMinorTickSpacing(10);
+        slider.setPaintTicks(true);
+        slider.setPaintLabels(true);
+        slider.addChangeListener(new ChangeListener() {
+            @Override
+            public void stateChanged(ChangeEvent e) {
+                JSlider source = (JSlider) e.getSource();
+                maxCorners = source.getValue();
+                update();
+            }
+        });
+        sliderPanel.add(slider);
+        pane.add(sliderPanel, BorderLayout.PAGE_START);
+
+        imgLabel = new JLabel(new ImageIcon(img));
+        pane.add(imgLabel, BorderLayout.CENTER);
+    }
+
+    private void update() {
+        /// Parameters for Shi-Tomasi algorithm
+        maxCorners = Math.max(maxCorners, 1);
+        MatOfPoint corners = new MatOfPoint();
+        double qualityLevel = 0.01;
+        double minDistance = 10;
+        int blockSize = 3, gradientSize = 3;
+        boolean useHarrisDetector = false;
+        double k = 0.04;
+
+        /// Copy the source image
+        Mat copy = src.clone();
+
+        /// Apply corner detection
+        Imgproc.goodFeaturesToTrack(srcGray, corners, maxCorners, qualityLevel, minDistance, new Mat(),
+                blockSize, gradientSize, useHarrisDetector, k);
+
+        /// Draw corners detected
+        System.out.println("** Number of corners detected: " + corners.rows());
+        int[] cornersData = new int[(int) (corners.total() * corners.channels())];
+        corners.get(0, 0, cornersData);
+        int radius = 4;
+        for (int i = 0; i < corners.rows(); i++) {
+            Imgproc.circle(copy, new Point(cornersData[i * 2], cornersData[i * 2 + 1]), radius,
+                    new Scalar(rng.nextInt(256), rng.nextInt(256), rng.nextInt(256)), Core.FILLED);
+        }
+
+        imgLabel.setIcon(new ImageIcon(HighGui.toBufferedImage(copy)));
+        frame.repaint();
+    }
+}
+
+public class GoodFeaturesToTrackDemo {
+    public static void main(String[] args) {
+        // Load the native OpenCV library
+        System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
+
+        // Schedule a job for the event dispatch thread:
+        // creating and showing this application's GUI.
+        javax.swing.SwingUtilities.invokeLater(new Runnable() {
+            @Override
+            public void run() {
+                new GoodFeaturesToTrack(args);
+            }
+        });
+    }
+}
diff --git a/samples/java/tutorial_code/TrackingMotion/harris_detector/CornerHarrisDemo.java b/samples/java/tutorial_code/TrackingMotion/harris_detector/CornerHarrisDemo.java
new file mode 100644 (file)
index 0000000..b3c759d
--- /dev/null
@@ -0,0 +1,142 @@
+import java.awt.BorderLayout;
+import java.awt.Container;
+import java.awt.Image;
+
+import javax.swing.BoxLayout;
+import javax.swing.ImageIcon;
+import javax.swing.JFrame;
+import javax.swing.JLabel;
+import javax.swing.JPanel;
+import javax.swing.JSlider;
+import javax.swing.event.ChangeEvent;
+import javax.swing.event.ChangeListener;
+
+import org.opencv.core.Core;
+import org.opencv.core.CvType;
+import org.opencv.core.Mat;
+import org.opencv.core.Point;
+import org.opencv.core.Scalar;
+import org.opencv.highgui.HighGui;
+import org.opencv.imgcodecs.Imgcodecs;
+import org.opencv.imgproc.Imgproc;
+
+class CornerHarris {
+    private Mat srcGray = new Mat();
+    private Mat dst = new Mat();
+    private Mat dstNorm = new Mat();
+    private Mat dstNormScaled = new Mat();
+    private JFrame frame;
+    private JLabel imgLabel;
+    private JLabel cornerLabel;
+    private static final int MAX_THRESHOLD = 255;
+    private int threshold = 200;
+
+    public CornerHarris(String[] args) {
+        /// Load source image and convert it to gray
+        String filename = args.length > 0 ? args[0] : "../data/building.jpg";
+        Mat src = Imgcodecs.imread(filename);
+        if (src.empty()) {
+            System.err.println("Cannot read image: " + filename);
+            System.exit(0);
+        }
+
+        Imgproc.cvtColor(src, srcGray, Imgproc.COLOR_BGR2GRAY);
+
+        // Create and set up the window.
+        frame = new JFrame("Harris corner detector demo");
+        frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
+        // Set up the content pane.
+        Image img = HighGui.toBufferedImage(src);
+        addComponentsToPane(frame.getContentPane(), img);
+        // Use the content pane's default BorderLayout. No need for
+        // setLayout(new BorderLayout());
+        // Display the window.
+        frame.pack();
+        frame.setVisible(true);
+        update();
+    }
+
+    private void addComponentsToPane(Container pane, Image img) {
+        if (!(pane.getLayout() instanceof BorderLayout)) {
+            pane.add(new JLabel("Container doesn't use BorderLayout!"));
+            return;
+        }
+
+        JPanel sliderPanel = new JPanel();
+        sliderPanel.setLayout(new BoxLayout(sliderPanel, BoxLayout.PAGE_AXIS));
+
+        sliderPanel.add(new JLabel("Threshold: "));
+        JSlider slider = new JSlider(0, MAX_THRESHOLD, threshold);
+        slider.setMajorTickSpacing(20);
+        slider.setMinorTickSpacing(10);
+        slider.setPaintTicks(true);
+        slider.setPaintLabels(true);
+        slider.addChangeListener(new ChangeListener() {
+            @Override
+            public void stateChanged(ChangeEvent e) {
+                JSlider source = (JSlider) e.getSource();
+                threshold = source.getValue();
+                update();
+            }
+        });
+        sliderPanel.add(slider);
+        pane.add(sliderPanel, BorderLayout.PAGE_START);
+
+        JPanel imgPanel = new JPanel();
+        imgLabel = new JLabel(new ImageIcon(img));
+        imgPanel.add(imgLabel);
+
+        Mat blackImg = Mat.zeros(srcGray.size(), CvType.CV_8U);
+        cornerLabel = new JLabel(new ImageIcon(HighGui.toBufferedImage(blackImg)));
+        imgPanel.add(cornerLabel);
+
+        pane.add(imgPanel, BorderLayout.CENTER);
+    }
+
+    private void update() {
+        dst = Mat.zeros(srcGray.size(), CvType.CV_32F);
+
+        /// Detector parameters
+        int blockSize = 2;
+        int apertureSize = 3;
+        double k = 0.04;
+
+        /// Detecting corners
+        Imgproc.cornerHarris(srcGray, dst, blockSize, apertureSize, k);
+
+        /// Normalizing
+        Core.normalize(dst, dstNorm, 0, 255, Core.NORM_MINMAX);
+        Core.convertScaleAbs(dstNorm, dstNormScaled);
+
+        /// Drawing a circle around corners
+        float[] dstNormData = new float[(int) (dstNorm.total() * dstNorm.channels())];
+        dstNorm.get(0, 0, dstNormData);
+
+        for (int i = 0; i < dstNorm.rows(); i++) {
+            for (int j = 0; j < dstNorm.cols(); j++) {
+                if ((int) dstNormData[i * dstNorm.cols() + j] > threshold) {
+                    Imgproc.circle(dstNormScaled, new Point(j, i), 5, new Scalar(0), 2, 8, 0);
+                }
+            }
+        }
+
+        cornerLabel.setIcon(new ImageIcon(HighGui.toBufferedImage(dstNormScaled)));
+        frame.repaint();
+    }
+}
+
+public class CornerHarrisDemo {
+    public static void main(String[] args) {
+        // Load the native OpenCV library
+        System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
+
+        // Schedule a job for the event dispatch thread:
+        // creating and showing this application's GUI.
+        javax.swing.SwingUtilities.invokeLater(new Runnable() {
+            @Override
+            public void run() {
+                new CornerHarris(args);
+            }
+        });
+    }
+}
diff --git a/samples/java/tutorial_code/features2D/feature_description/SURFMatchingDemo.java b/samples/java/tutorial_code/features2D/feature_description/SURFMatchingDemo.java
new file mode 100644 (file)
index 0000000..ac64417
--- /dev/null
@@ -0,0 +1,56 @@
+import org.opencv.core.Core;
+import org.opencv.core.Mat;
+import org.opencv.core.MatOfDMatch;
+import org.opencv.core.MatOfKeyPoint;
+import org.opencv.features2d.DescriptorMatcher;
+import org.opencv.features2d.Features2d;
+import org.opencv.highgui.HighGui;
+import org.opencv.imgcodecs.Imgcodecs;
+import org.opencv.xfeatures2d.SURF;
+
+class SURFMatching {
+    public void run(String[] args) {
+        String filename1 = args.length > 1 ? args[0] : "../data/box.png";
+        String filename2 = args.length > 1 ? args[1] : "../data/box_in_scene.png";
+        Mat img1 = Imgcodecs.imread(filename1, Imgcodecs.IMREAD_GRAYSCALE);
+        Mat img2 = Imgcodecs.imread(filename2, Imgcodecs.IMREAD_GRAYSCALE);
+        if (img1.empty() || img2.empty()) {
+            System.err.println("Cannot read images!");
+            System.exit(0);
+        }
+
+        //-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
+        double hessianThreshold = 400;
+        int nOctaves = 4, nOctaveLayers = 3;
+        boolean extended = false, upright = false;
+        SURF detector = SURF.create(hessianThreshold, nOctaves, nOctaveLayers, extended, upright);
+        MatOfKeyPoint keypoints1 = new MatOfKeyPoint(), keypoints2 = new MatOfKeyPoint();
+        Mat descriptors1 = new Mat(), descriptors2 = new Mat();
+        detector.detectAndCompute(img1, new Mat(), keypoints1, descriptors1);
+        detector.detectAndCompute(img2, new Mat(), keypoints2, descriptors2);
+
+        //-- Step 2: Matching descriptor vectors with a brute force matcher
+        // Since SURF is a floating-point descriptor NORM_L2 is used
+        DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE);
+        MatOfDMatch matches = new MatOfDMatch();
+        matcher.match(descriptors1, descriptors2, matches);
+
+        //-- Draw matches
+        Mat imgMatches = new Mat();
+        Features2d.drawMatches(img1, keypoints1, img2, keypoints2, matches, imgMatches);
+
+        HighGui.imshow("Matches", imgMatches);
+        HighGui.waitKey(0);
+
+        System.exit(0);
+    }
+}
+
+public class SURFMatchingDemo {
+    public static void main(String[] args) {
+        // Load the native OpenCV library
+        System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
+
+        new SURFMatching().run(args);
+    }
+}
diff --git a/samples/java/tutorial_code/features2D/feature_detection/SURFDetectionDemo.java b/samples/java/tutorial_code/features2D/feature_detection/SURFDetectionDemo.java
new file mode 100644 (file)
index 0000000..c78a0c6
--- /dev/null
@@ -0,0 +1,44 @@
+import org.opencv.core.Core;
+import org.opencv.core.Mat;
+import org.opencv.core.MatOfKeyPoint;
+import org.opencv.features2d.Features2d;
+import org.opencv.highgui.HighGui;
+import org.opencv.imgcodecs.Imgcodecs;
+import org.opencv.xfeatures2d.SURF;
+
+class SURFDetection {
+    public void run(String[] args) {
+        String filename = args.length > 0 ? args[0] : "../data/box.png";
+        Mat src = Imgcodecs.imread(filename, Imgcodecs.IMREAD_GRAYSCALE);
+        if (src.empty()) {
+            System.err.println("Cannot read image: " + filename);
+            System.exit(0);
+        }
+
+        //-- Step 1: Detect the keypoints using SURF Detector
+        double hessianThreshold = 400;
+        int nOctaves = 4, nOctaveLayers = 3;
+        boolean extended = false, upright = false;
+        SURF detector = SURF.create(hessianThreshold, nOctaves, nOctaveLayers, extended, upright);
+        MatOfKeyPoint keypoints = new MatOfKeyPoint();
+        detector.detect(src, keypoints);
+
+        //-- Draw keypoints
+        Features2d.drawKeypoints(src, keypoints, src);
+
+        //-- Show detected (drawn) keypoints
+        HighGui.imshow("SURF Keypoints", src);
+        HighGui.waitKey(0);
+
+        System.exit(0);
+    }
+}
+
+public class SURFDetectionDemo {
+    public static void main(String[] args) {
+        // Load the native OpenCV library
+        System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
+
+        new SURFDetection().run(args);
+    }
+}
diff --git a/samples/java/tutorial_code/features2D/feature_flann_matcher/SURFFLANNMatchingDemo.java b/samples/java/tutorial_code/features2D/feature_flann_matcher/SURFFLANNMatchingDemo.java
new file mode 100644 (file)
index 0000000..e02af9c
--- /dev/null
@@ -0,0 +1,78 @@
+import java.util.ArrayList;
+import java.util.List;
+
+import org.opencv.core.Core;
+import org.opencv.core.DMatch;
+import org.opencv.core.Mat;
+import org.opencv.core.MatOfByte;
+import org.opencv.core.MatOfDMatch;
+import org.opencv.core.MatOfKeyPoint;
+import org.opencv.core.Scalar;
+import org.opencv.features2d.DescriptorMatcher;
+import org.opencv.features2d.Features2d;
+import org.opencv.highgui.HighGui;
+import org.opencv.imgcodecs.Imgcodecs;
+import org.opencv.xfeatures2d.SURF;
+
+class SURFFLANNMatching {
+    public void run(String[] args) {
+        String filename1 = args.length > 1 ? args[0] : "../data/box.png";
+        String filename2 = args.length > 1 ? args[1] : "../data/box_in_scene.png";
+        Mat img1 = Imgcodecs.imread(filename1, Imgcodecs.IMREAD_GRAYSCALE);
+        Mat img2 = Imgcodecs.imread(filename2, Imgcodecs.IMREAD_GRAYSCALE);
+        if (img1.empty() || img2.empty()) {
+            System.err.println("Cannot read images!");
+            System.exit(0);
+        }
+
+        //-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
+        double hessianThreshold = 400;
+        int nOctaves = 4, nOctaveLayers = 3;
+        boolean extended = false, upright = false;
+        SURF detector = SURF.create(hessianThreshold, nOctaves, nOctaveLayers, extended, upright);
+        MatOfKeyPoint keypoints1 = new MatOfKeyPoint(), keypoints2 = new MatOfKeyPoint();
+        Mat descriptors1 = new Mat(), descriptors2 = new Mat();
+        detector.detectAndCompute(img1, new Mat(), keypoints1, descriptors1);
+        detector.detectAndCompute(img2, new Mat(), keypoints2, descriptors2);
+
+        //-- Step 2: Matching descriptor vectors with a FLANN based matcher
+        // Since SURF is a floating-point descriptor NORM_L2 is used
+        DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.FLANNBASED);
+        List<MatOfDMatch> knnMatches = new ArrayList<>();
+        matcher.knnMatch(descriptors1, descriptors2, knnMatches, 2);
+
+        //-- Filter matches using the Lowe's ratio test
+        float ratio_thresh = 0.7f;
+        List<DMatch> listOfGoodMatches = new ArrayList<>();
+        for (int i = 0; i < knnMatches.size(); i++) {
+            if (knnMatches.get(i).rows() > 1) {
+                DMatch[] matches = knnMatches.get(i).toArray();
+                if (matches[0].distance / matches[1].distance <= ratio_thresh) {
+                    listOfGoodMatches.add(matches[0]);
+                }
+            }
+        }
+        MatOfDMatch goodMatches = new MatOfDMatch();
+        goodMatches.fromList(listOfGoodMatches);
+
+        //-- Draw matches
+        Mat imgMatches = new Mat();
+        Features2d.drawMatches(img1, keypoints1, img2, keypoints2, goodMatches, imgMatches, Scalar.all(-1),
+                Scalar.all(-1), new MatOfByte(), Features2d.NOT_DRAW_SINGLE_POINTS);
+
+        //-- Show detected matches
+        HighGui.imshow("Good Matches", imgMatches);
+        HighGui.waitKey(0);
+
+        System.exit(0);
+    }
+}
+
+public class SURFFLANNMatchingDemo {
+    public static void main(String[] args) {
+        // Load the native OpenCV library
+        System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
+
+        new SURFFLANNMatching().run(args);
+    }
+}
diff --git a/samples/java/tutorial_code/features2D/feature_homography/SURFFLANNMatchingHomographyDemo.java b/samples/java/tutorial_code/features2D/feature_homography/SURFFLANNMatchingHomographyDemo.java
new file mode 100644 (file)
index 0000000..1a5cbe7
--- /dev/null
@@ -0,0 +1,130 @@
+import java.util.ArrayList;
+import java.util.List;
+
+import org.opencv.calib3d.Calib3d;
+import org.opencv.core.Core;
+import org.opencv.core.CvType;
+import org.opencv.core.DMatch;
+import org.opencv.core.KeyPoint;
+import org.opencv.core.Mat;
+import org.opencv.core.MatOfByte;
+import org.opencv.core.MatOfDMatch;
+import org.opencv.core.MatOfKeyPoint;
+import org.opencv.core.MatOfPoint2f;
+import org.opencv.core.Point;
+import org.opencv.core.Scalar;
+import org.opencv.features2d.DescriptorMatcher;
+import org.opencv.features2d.Features2d;
+import org.opencv.highgui.HighGui;
+import org.opencv.imgcodecs.Imgcodecs;
+import org.opencv.imgproc.Imgproc;
+import org.opencv.xfeatures2d.SURF;
+
+class SURFFLANNMatchingHomography {
+    public void run(String[] args) {
+        String filenameObject = args.length > 1 ? args[0] : "../data/box.png";
+        String filenameScene = args.length > 1 ? args[1] : "../data/box_in_scene.png";
+        Mat imgObject = Imgcodecs.imread(filenameObject, Imgcodecs.IMREAD_GRAYSCALE);
+        Mat imgScene = Imgcodecs.imread(filenameScene, Imgcodecs.IMREAD_GRAYSCALE);
+        if (imgObject.empty() || imgScene.empty()) {
+            System.err.println("Cannot read images!");
+            System.exit(0);
+        }
+
+        //-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
+        double hessianThreshold = 400;
+        int nOctaves = 4, nOctaveLayers = 3;
+        boolean extended = false, upright = false;
+        SURF detector = SURF.create(hessianThreshold, nOctaves, nOctaveLayers, extended, upright);
+        MatOfKeyPoint keypointsObject = new MatOfKeyPoint(), keypointsScene = new MatOfKeyPoint();
+        Mat descriptorsObject = new Mat(), descriptorsScene = new Mat();
+        detector.detectAndCompute(imgObject, new Mat(), keypointsObject, descriptorsObject);
+        detector.detectAndCompute(imgScene, new Mat(), keypointsScene, descriptorsScene);
+
+        //-- Step 2: Matching descriptor vectors with a FLANN based matcher
+        // Since SURF is a floating-point descriptor NORM_L2 is used
+        DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.FLANNBASED);
+        List<MatOfDMatch> knnMatches = new ArrayList<>();
+        matcher.knnMatch(descriptorsObject, descriptorsScene, knnMatches, 2);
+
+        //-- Filter matches using the Lowe's ratio test
+        float ratio_thresh = 0.75f;
+        List<DMatch> listOfGoodMatches = new ArrayList<>();
+        for (int i = 0; i < knnMatches.size(); i++) {
+            if (knnMatches.get(i).rows() > 1) {
+                DMatch[] matches = knnMatches.get(i).toArray();
+                if (matches[0].distance / matches[1].distance <= ratio_thresh) {
+                    listOfGoodMatches.add(matches[0]);
+                }
+            }
+        }
+        MatOfDMatch goodMatches = new MatOfDMatch();
+        goodMatches.fromList(listOfGoodMatches);
+
+        //-- Draw matches
+        Mat imgMatches = new Mat();
+        Features2d.drawMatches(imgObject, keypointsObject, imgScene, keypointsScene, goodMatches, imgMatches, Scalar.all(-1),
+                Scalar.all(-1), new MatOfByte(), Features2d.NOT_DRAW_SINGLE_POINTS);
+
+        //-- Localize the object
+        List<Point> obj = new ArrayList<>();
+        List<Point> scene = new ArrayList<>();
+
+        List<KeyPoint> listOfKeypointsObject = keypointsObject.toList();
+        List<KeyPoint> listOfKeypointsScene = keypointsScene.toList();
+        for (int i = 0; i < listOfGoodMatches.size(); i++) {
+            //-- Get the keypoints from the good matches
+            obj.add(listOfKeypointsObject.get(listOfGoodMatches.get(i).queryIdx).pt);
+            scene.add(listOfKeypointsScene.get(listOfGoodMatches.get(i).trainIdx).pt);
+        }
+
+        MatOfPoint2f objMat = new MatOfPoint2f(), sceneMat = new MatOfPoint2f();
+        objMat.fromList(obj);
+        sceneMat.fromList(scene);
+        double ransacReprojThreshold = 3.0;
+        Mat H = Calib3d.findHomography( objMat, sceneMat, Calib3d.RANSAC, ransacReprojThreshold );
+
+        //-- Get the corners from the image_1 ( the object to be "detected" )
+        Mat objCorners = new Mat(4, 1, CvType.CV_32FC2), sceneCorners = new Mat();
+        float[] objCornersData = new float[(int) (objCorners.total() * objCorners.channels())];
+        objCorners.get(0, 0, objCornersData);
+        objCornersData[0] = 0;
+        objCornersData[1] = 0;
+        objCornersData[2] = imgObject.cols();
+        objCornersData[3] = 0;
+        objCornersData[4] = imgObject.cols();
+        objCornersData[5] = imgObject.rows();
+        objCornersData[6] = 0;
+        objCornersData[7] = imgObject.rows();
+        objCorners.put(0, 0, objCornersData);
+
+        Core.perspectiveTransform(objCorners, sceneCorners, H);
+        float[] sceneCornersData = new float[(int) (sceneCorners.total() * sceneCorners.channels())];
+        sceneCorners.get(0, 0, sceneCornersData);
+
+        //-- Draw lines between the corners (the mapped object in the scene - image_2 )
+        Imgproc.line(imgMatches, new Point(sceneCornersData[0] + imgObject.cols(), sceneCornersData[1]),
+                new Point(sceneCornersData[2] + imgObject.cols(), sceneCornersData[3]), new Scalar(0, 255, 0), 4);
+        Imgproc.line(imgMatches, new Point(sceneCornersData[2] + imgObject.cols(), sceneCornersData[3]),
+                new Point(sceneCornersData[4] + imgObject.cols(), sceneCornersData[5]), new Scalar(0, 255, 0), 4);
+        Imgproc.line(imgMatches, new Point(sceneCornersData[4] + imgObject.cols(), sceneCornersData[5]),
+                new Point(sceneCornersData[6] + imgObject.cols(), sceneCornersData[7]), new Scalar(0, 255, 0), 4);
+        Imgproc.line(imgMatches, new Point(sceneCornersData[6] + imgObject.cols(), sceneCornersData[7]),
+                new Point(sceneCornersData[0] + imgObject.cols(), sceneCornersData[1]), new Scalar(0, 255, 0), 4);
+
+        //-- Show detected matches
+        HighGui.imshow("Good Matches & Object detection", imgMatches);
+        HighGui.waitKey(0);
+
+        System.exit(0);
+    }
+}
+
+public class SURFFLANNMatchingHomographyDemo {
+    public static void main(String[] args) {
+        // Load the native OpenCV library
+        System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
+
+        new SURFFLANNMatchingHomography().run(args);
+    }
+}
diff --git a/samples/python/tutorial_code/TrackingMotion/corner_subpixels/cornerSubPix_Demo.py b/samples/python/tutorial_code/TrackingMotion/corner_subpixels/cornerSubPix_Demo.py
new file mode 100644 (file)
index 0000000..72ce96b
--- /dev/null
@@ -0,0 +1,70 @@
+from __future__ import print_function
+import cv2 as cv
+import numpy as np
+import argparse
+import random as rng
+
+source_window = 'Image'
+maxTrackbar = 25
+rng.seed(12345)
+
+def goodFeaturesToTrack_Demo(val):
+    maxCorners = max(val, 1)
+
+    # Parameters for Shi-Tomasi algorithm
+    qualityLevel = 0.01
+    minDistance = 10
+    blockSize = 3
+    gradientSize = 3
+    useHarrisDetector = False
+    k = 0.04
+
+    # Copy the source image
+    copy = np.copy(src)
+
+    # Apply corner detection
+    corners = cv.goodFeaturesToTrack(src_gray, maxCorners, qualityLevel, minDistance, None, \
+        blockSize=blockSize, gradientSize=gradientSize, useHarrisDetector=useHarrisDetector, k=k)
+
+    # Draw corners detected
+    print('** Number of corners detected:', corners.shape[0])
+    radius = 4
+    for i in range(corners.shape[0]):
+        cv.circle(copy, (corners[i,0,0], corners[i,0,1]), radius, (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256)), cv.FILLED)
+
+    # Show what you got
+    cv.namedWindow(source_window)
+    cv.imshow(source_window, copy)
+
+    # Set the needed parameters to find the refined corners
+    winSize = (5, 5)
+    zeroZone = (-1, -1)
+    criteria = (cv.TERM_CRITERIA_EPS + cv.TermCriteria_COUNT, 40, 0.001)
+
+    # Calculate the refined corner locations
+    corners = cv.cornerSubPix(src_gray, corners, winSize, zeroZone, criteria)
+
+    # Write them down
+    for i in range(corners.shape[0]):
+        print(" -- Refined Corner [", i, "]  (", corners[i,0,0], ",", corners[i,0,1], ")")
+
+# Load source image and convert it to gray
+parser = argparse.ArgumentParser(description='Code for Shi-Tomasi corner detector tutorial.')
+parser.add_argument('--input', help='Path to input image.', default='../data/pic3.png')
+args = parser.parse_args()
+
+src = cv.imread(args.input)
+if src is None:
+    print('Could not open or find the image:', args.input)
+    exit(0)
+
+src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
+
+# Create a window and a trackbar
+cv.namedWindow(source_window)
+maxCorners = 10 # initial threshold
+cv.createTrackbar('Threshold: ', source_window, maxCorners, maxTrackbar, goodFeaturesToTrack_Demo)
+cv.imshow(source_window, src)
+goodFeaturesToTrack_Demo(maxCorners)
+
+cv.waitKey()
diff --git a/samples/python/tutorial_code/TrackingMotion/generic_corner_detector/cornerDetector_Demo.py b/samples/python/tutorial_code/TrackingMotion/generic_corner_detector/cornerDetector_Demo.py
new file mode 100644 (file)
index 0000000..d135367
--- /dev/null
@@ -0,0 +1,80 @@
+from __future__ import print_function
+import cv2 as cv
+import numpy as np
+import argparse
+import random as rng
+
+myHarris_window = 'My Harris corner detector'
+myShiTomasi_window = 'My Shi Tomasi corner detector'
+myHarris_qualityLevel = 50
+myShiTomasi_qualityLevel = 50
+max_qualityLevel = 100
+rng.seed(12345)
+
+def myHarris_function(val):
+    myHarris_copy = np.copy(src)
+    myHarris_qualityLevel = max(val, 1)
+
+    for i in range(src_gray.shape[0]):
+        for j in range(src_gray.shape[1]):
+            if Mc[i,j] > myHarris_minVal + ( myHarris_maxVal - myHarris_minVal )*myHarris_qualityLevel/max_qualityLevel:
+                cv.circle(myHarris_copy, (j,i), 4, (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256)), cv.FILLED)
+
+    cv.imshow(myHarris_window, myHarris_copy)
+
+def myShiTomasi_function(val):
+    myShiTomasi_copy = np.copy(src)
+    myShiTomasi_qualityLevel = max(val, 1)
+
+    for i in range(src_gray.shape[0]):
+        for j in range(src_gray.shape[1]):
+            if myShiTomasi_dst[i,j] > myShiTomasi_minVal + ( myShiTomasi_maxVal - myShiTomasi_minVal )*myShiTomasi_qualityLevel/max_qualityLevel:
+                cv.circle(myShiTomasi_copy, (j,i), 4, (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256)), cv.FILLED)
+
+    cv.imshow(myShiTomasi_window, myShiTomasi_copy)
+
+# Load source image and convert it to gray
+parser = argparse.ArgumentParser(description='Code for Creating your own corner detector tutorial.')
+parser.add_argument('--input', help='Path to input image.', default='../data/building.jpg')
+args = parser.parse_args()
+
+src = cv.imread(args.input)
+if src is None:
+    print('Could not open or find the image:', args.input)
+    exit(0)
+
+src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
+
+# Set some parameters
+blockSize = 3
+apertureSize = 3
+
+# My Harris matrix -- Using cornerEigenValsAndVecs
+myHarris_dst = cv.cornerEigenValsAndVecs(src_gray, blockSize, apertureSize)
+
+# calculate Mc
+Mc = np.empty(src_gray.shape, dtype=np.float32)
+for i in range(src_gray.shape[0]):
+    for j in range(src_gray.shape[1]):
+        lambda_1 = myHarris_dst[i,j,0]
+        lambda_2 = myHarris_dst[i,j,1]
+        Mc[i,j] = lambda_1*lambda_2 - 0.04*pow( ( lambda_1 + lambda_2 ), 2 )
+
+myHarris_minVal, myHarris_maxVal, _, _ = cv.minMaxLoc(Mc)
+
+# Create Window and Trackbar
+cv.namedWindow(myHarris_window)
+cv.createTrackbar('Quality Level:', myHarris_window, myHarris_qualityLevel, max_qualityLevel, myHarris_function)
+myHarris_function(myHarris_qualityLevel)
+
+# My Shi-Tomasi -- Using cornerMinEigenVal
+myShiTomasi_dst = cv.cornerMinEigenVal(src_gray, blockSize, apertureSize)
+
+myShiTomasi_minVal, myShiTomasi_maxVal, _, _ = cv.minMaxLoc(myShiTomasi_dst)
+
+# Create Window and Trackbar
+cv.namedWindow(myShiTomasi_window)
+cv.createTrackbar('Quality Level:', myShiTomasi_window, myShiTomasi_qualityLevel, max_qualityLevel, myShiTomasi_function)
+myShiTomasi_function(myShiTomasi_qualityLevel)
+
+cv.waitKey()
diff --git a/samples/python/tutorial_code/TrackingMotion/good_features_to_track/goodFeaturesToTrack_Demo.py b/samples/python/tutorial_code/TrackingMotion/good_features_to_track/goodFeaturesToTrack_Demo.py
new file mode 100644 (file)
index 0000000..57e767c
--- /dev/null
@@ -0,0 +1,58 @@
+from __future__ import print_function
+import cv2 as cv
+import numpy as np
+import argparse
+import random as rng
+
+source_window = 'Image'
+maxTrackbar = 100
+rng.seed(12345)
+
+def goodFeaturesToTrack_Demo(val):
+    maxCorners = max(val, 1)
+
+    # Parameters for Shi-Tomasi algorithm
+    qualityLevel = 0.01
+    minDistance = 10
+    blockSize = 3
+    gradientSize = 3
+    useHarrisDetector = False
+    k = 0.04
+
+    # Copy the source image
+    copy = np.copy(src)
+
+    # Apply corner detection
+    corners = cv.goodFeaturesToTrack(src_gray, maxCorners, qualityLevel, minDistance, None, \
+        blockSize=blockSize, gradientSize=gradientSize, useHarrisDetector=useHarrisDetector, k=k)
+
+    # Draw corners detected
+    print('** Number of corners detected:', corners.shape[0])
+    radius = 4
+    for i in range(corners.shape[0]):
+        cv.circle(copy, (corners[i,0,0], corners[i,0,1]), radius, (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256)), cv.FILLED)
+
+    # Show what you got
+    cv.namedWindow(source_window)
+    cv.imshow(source_window, copy)
+
+# Load source image and convert it to gray
+parser = argparse.ArgumentParser(description='Code for Shi-Tomasi corner detector tutorial.')
+parser.add_argument('--input', help='Path to input image.', default='../data/pic3.png')
+args = parser.parse_args()
+
+src = cv.imread(args.input)
+if src is None:
+    print('Could not open or find the image:', args.input)
+    exit(0)
+
+src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
+
+# Create a window and a trackbar
+cv.namedWindow(source_window)
+maxCorners = 23 # initial threshold
+cv.createTrackbar('Threshold: ', source_window, maxCorners, maxTrackbar, goodFeaturesToTrack_Demo)
+cv.imshow(source_window, src)
+goodFeaturesToTrack_Demo(maxCorners)
+
+cv.waitKey()
diff --git a/samples/python/tutorial_code/TrackingMotion/harris_detector/cornerHarris_Demo.py b/samples/python/tutorial_code/TrackingMotion/harris_detector/cornerHarris_Demo.py
new file mode 100644 (file)
index 0000000..cee7679
--- /dev/null
@@ -0,0 +1,55 @@
+from __future__ import print_function
+import cv2 as cv
+import numpy as np
+import argparse
+
+source_window = 'Source image'
+corners_window = 'Corners detected'
+max_thresh = 255
+
+def cornerHarris_demo(val):
+    thresh = val
+
+    # Detector parameters
+    blockSize = 2
+    apertureSize = 3
+    k = 0.04
+
+    # Detecting corners
+    dst = cv.cornerHarris(src_gray, blockSize, apertureSize, k)
+
+    # Normalizing
+    dst_norm = np.empty(dst.shape, dtype=np.float32)
+    cv.normalize(dst, dst_norm, alpha=0, beta=255, norm_type=cv.NORM_MINMAX)
+    dst_norm_scaled = cv.convertScaleAbs(dst_norm)
+
+    # Drawing a circle around corners
+    for i in range(dst_norm.shape[0]):
+        for j in range(dst_norm.shape[1]):
+            if int(dst_norm[i,j]) > thresh:
+                cv.circle(dst_norm_scaled, (j,i), 5, (0), 2)
+
+    # Showing the result
+    cv.namedWindow(corners_window)
+    cv.imshow(corners_window, dst_norm_scaled)
+
+# Load source image and convert it to gray
+parser = argparse.ArgumentParser(description='Code for Harris corner detector tutorial.')
+parser.add_argument('--input', help='Path to input image.', default='../data/building.jpg')
+args = parser.parse_args()
+
+src = cv.imread(args.input)
+if src is None:
+    print('Could not open or find the image:', args.input)
+    exit(0)
+
+src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
+
+# Create a window and a trackbar
+cv.namedWindow(source_window)
+thresh = 200 # initial threshold
+cv.createTrackbar('Threshold: ', source_window, thresh, max_thresh, cornerHarris_demo)
+cv.imshow(source_window, src)
+cornerHarris_demo(thresh)
+
+cv.waitKey()
diff --git a/samples/python/tutorial_code/features2D/feature_description/SURF_matching_Demo.py b/samples/python/tutorial_code/features2D/feature_description/SURF_matching_Demo.py
new file mode 100644 (file)
index 0000000..f50e48d
--- /dev/null
@@ -0,0 +1,35 @@
+from __future__ import print_function
+import cv2 as cv
+import numpy as np
+import argparse
+
+parser = argparse.ArgumentParser(description='Code for Feature Detection tutorial.')
+parser.add_argument('--input1', help='Path to input image 1.', default='../data/box.png')
+parser.add_argument('--input2', help='Path to input image 2.', default='../data/box_in_scene.png')
+args = parser.parse_args()
+
+img1 = cv.imread(args.input1, cv.IMREAD_GRAYSCALE)
+img2 = cv.imread(args.input2, cv.IMREAD_GRAYSCALE)
+if img1 is None or img2 is None:
+    print('Could not open or find the images!')
+    exit(0)
+
+#-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
+minHessian = 400
+detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian)
+keypoints1, descriptors1 = detector.detectAndCompute(img1, None)
+keypoints2, descriptors2 = detector.detectAndCompute(img2, None)
+
+#-- Step 2: Matching descriptor vectors with a brute force matcher
+# Since SURF is a floating-point descriptor NORM_L2 is used
+matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_BRUTEFORCE)
+matches = matcher.match(descriptors1, descriptors2)
+
+#-- Draw matches
+img_matches = np.empty((max(img1.shape[0], img2.shape[0]), img1.shape[1]+img2.shape[1], 3), dtype=np.uint8)
+cv.drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches)
+
+#-- Show detected matches
+cv.imshow('Matches', img_matches)
+
+cv.waitKey()
diff --git a/samples/python/tutorial_code/features2D/feature_detection/SURF_detection_Demo.py b/samples/python/tutorial_code/features2D/feature_detection/SURF_detection_Demo.py
new file mode 100644 (file)
index 0000000..717d9f1
--- /dev/null
@@ -0,0 +1,27 @@
+from __future__ import print_function
+import cv2 as cv
+import numpy as np
+import argparse
+
+parser = argparse.ArgumentParser(description='Code for Feature Detection tutorial.')
+parser.add_argument('--input', help='Path to input image.', default='../data/box.png')
+args = parser.parse_args()
+
+src = cv.imread(args.input, cv.IMREAD_GRAYSCALE)
+if src is None:
+    print('Could not open or find the image:', args.input)
+    exit(0)
+
+#-- Step 1: Detect the keypoints using SURF Detector
+minHessian = 400
+detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian)
+keypoints = detector.detect(src)
+
+#-- Draw keypoints
+img_keypoints = np.empty((src.shape[0], src.shape[1], 3), dtype=np.uint8)
+cv.drawKeypoints(src, keypoints, img_keypoints)
+
+#-- Show detected (drawn) keypoints
+cv.imshow('SURF Keypoints', img_keypoints)
+
+cv.waitKey()
diff --git a/samples/python/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.py b/samples/python/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.py
new file mode 100644 (file)
index 0000000..d22f9a8
--- /dev/null
@@ -0,0 +1,43 @@
+from __future__ import print_function
+import cv2 as cv
+import numpy as np
+import argparse
+
+parser = argparse.ArgumentParser(description='Code for Feature Matching with FLANN tutorial.')
+parser.add_argument('--input1', help='Path to input image 1.', default='../data/box.png')
+parser.add_argument('--input2', help='Path to input image 2.', default='../data/box_in_scene.png')
+args = parser.parse_args()
+
+img1 = cv.imread(args.input1, cv.IMREAD_GRAYSCALE)
+img2 = cv.imread(args.input2, cv.IMREAD_GRAYSCALE)
+if img1 is None or img2 is None:
+    print('Could not open or find the images!')
+    exit(0)
+
+#-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
+minHessian = 400
+detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian)
+keypoints1, descriptors1 = detector.detectAndCompute(img1, None)
+keypoints2, descriptors2 = detector.detectAndCompute(img2, None)
+
+#-- Step 2: Matching descriptor vectors with a FLANN based matcher
+# Since SURF is a floating-point descriptor NORM_L2 is used
+matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_FLANNBASED)
+knn_matches = matcher.knnMatch(descriptors1, descriptors2, 2)
+
+#-- Filter matches using the Lowe's ratio test
+ratio_thresh = 0.7
+good_matches = []
+for matches in knn_matches:
+    if len(matches) > 1:
+        if matches[0].distance / matches[1].distance <= ratio_thresh:
+            good_matches.append(matches[0])
+
+#-- Draw matches
+img_matches = np.empty((max(img1.shape[0], img2.shape[0]), img1.shape[1]+img2.shape[1], 3), dtype=np.uint8)
+cv.drawMatches(img1, keypoints1, img2, keypoints2, good_matches, img_matches, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
+
+#-- Show detected matches
+cv.imshow('Good Matches', img_matches)
+
+cv.waitKey()
diff --git a/samples/python/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.py b/samples/python/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.py
new file mode 100644 (file)
index 0000000..8820add
--- /dev/null
@@ -0,0 +1,78 @@
+from __future__ import print_function
+import cv2 as cv
+import numpy as np
+import argparse
+
+parser = argparse.ArgumentParser(description='Code for Feature Matching with FLANN tutorial.')
+parser.add_argument('--input1', help='Path to input image 1.', default='../data/box.png')
+parser.add_argument('--input2', help='Path to input image 2.', default='../data/box_in_scene.png')
+args = parser.parse_args()
+
+img_object = cv.imread(args.input1, cv.IMREAD_GRAYSCALE)
+img_scene = cv.imread(args.input2, cv.IMREAD_GRAYSCALE)
+if img_object is None or img_scene is None:
+    print('Could not open or find the images!')
+    exit(0)
+
+#-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
+minHessian = 400
+detector = cv.xfeatures2d_SURF.create(hessianThreshold=minHessian)
+keypoints_obj, descriptors_obj = detector.detectAndCompute(img_object, None)
+keypoints_scene, descriptors_scene = detector.detectAndCompute(img_scene, None)
+
+#-- Step 2: Matching descriptor vectors with a FLANN based matcher
+# Since SURF is a floating-point descriptor NORM_L2 is used
+matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_FLANNBASED)
+knn_matches = matcher.knnMatch(descriptors_obj, descriptors_scene, 2)
+
+#-- Filter matches using the Lowe's ratio test
+ratio_thresh = 0.75
+good_matches = []
+for matches in knn_matches:
+    if len(matches) > 1:
+        if matches[0].distance / matches[1].distance <= ratio_thresh:
+            good_matches.append(matches[0])
+
+#-- Draw matches
+img_matches = np.empty((max(img_object.shape[0], img_scene.shape[0]), img_object.shape[1]+img_scene.shape[1], 3), dtype=np.uint8)
+cv.drawMatches(img_object, keypoints_obj, img_scene, keypoints_scene, good_matches, img_matches, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
+
+#-- Localize the object
+obj = np.empty((len(good_matches),2), dtype=np.float32)
+scene = np.empty((len(good_matches),2), dtype=np.float32)
+for i in range(len(good_matches)):
+    #-- Get the keypoints from the good matches
+    obj[i,0] = keypoints_obj[good_matches[i].queryIdx].pt[0]
+    obj[i,1] = keypoints_obj[good_matches[i].queryIdx].pt[1]
+    scene[i,0] = keypoints_scene[good_matches[i].trainIdx].pt[0]
+    scene[i,1] = keypoints_scene[good_matches[i].trainIdx].pt[1]
+
+H, _ =  cv.findHomography(obj, scene, cv.RANSAC)
+
+#-- Get the corners from the image_1 ( the object to be "detected" )
+obj_corners = np.empty((4,1,2), dtype=np.float32)
+obj_corners[0,0,0] = 0
+obj_corners[0,0,1] = 0
+obj_corners[1,0,0] = img_object.shape[1]
+obj_corners[1,0,1] = 0
+obj_corners[2,0,0] = img_object.shape[1]
+obj_corners[2,0,1] = img_object.shape[0]
+obj_corners[3,0,0] = 0
+obj_corners[3,0,1] = img_object.shape[0]
+
+scene_corners = cv.perspectiveTransform(obj_corners, H)
+
+#-- Draw lines between the corners (the mapped object in the scene - image_2 )
+cv.line(img_matches, (int(scene_corners[0,0,0] + img_object.shape[1]), int(scene_corners[0,0,1])),\
+    (int(scene_corners[1,0,0] + img_object.shape[1]), int(scene_corners[1,0,1])), (0,255,0), 4)
+cv.line(img_matches, (int(scene_corners[1,0,0] + img_object.shape[1]), int(scene_corners[1,0,1])),\
+    (int(scene_corners[2,0,0] + img_object.shape[1]), int(scene_corners[2,0,1])), (0,255,0), 4)
+cv.line(img_matches, (int(scene_corners[2,0,0] + img_object.shape[1]), int(scene_corners[2,0,1])),\
+    (int(scene_corners[3,0,0] + img_object.shape[1]), int(scene_corners[3,0,1])), (0,255,0), 4)
+cv.line(img_matches, (int(scene_corners[3,0,0] + img_object.shape[1]), int(scene_corners[3,0,1])),\
+    (int(scene_corners[0,0,0] + img_object.shape[1]), int(scene_corners[0,0,1])), (0,255,0), 4)
+
+#-- Show detected matches
+cv.imshow('Good Matches & Object detection', img_matches)
+
+cv.waitKey()