Clean-up from the dead code
authorIevgen Khvedchenia <ekhvedchenya@gmail.com>
Mon, 5 May 2014 18:48:54 +0000 (21:48 +0300)
committerIevgen Khvedchenia <ekhvedchenya@gmail.com>
Mon, 5 May 2014 18:48:54 +0000 (21:48 +0300)
modules/features2d/src/akaze/AKAZEFeatures.cpp
modules/features2d/src/akaze/AKAZEFeatures.h
modules/features2d/src/kaze/KAZEFeatures.cpp

index 7400b2a..b1a4ba5 100644 (file)
@@ -93,17 +93,9 @@ void AKAZEFeatures::Allocate_Memory_Evolution(void) {
  * @param img Input image for which the nonlinear scale space needs to be created
  * @return 0 if the nonlinear scale space was created successfully, -1 otherwise
  */
-int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) {
-
-    //double t1 = 0.0, t2 = 0.0;
+int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img)
+{
     CV_Assert(evolution_.size() > 0);
-    //if (evolution_.size() == 0) {
-    //    cerr << "Error generating the nonlinear scale space!!" << endl;
-    //    cerr << "Firstly you need to call AKAZEFeatures::Allocate_Memory_Evolution()" << endl;
-    //    return -1;
-    //}
-
-    //t1 = cv::getTickCount();
 
     // Copy the original image to the first level of the evolution
     img.copyTo(evolution_[0].Lt);
@@ -113,9 +105,6 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) {
     // First compute the kcontrast factor
     options_.kcontrast = compute_k_percentile(img, options_.kcontrast_percentile, 1.0f, options_.kcontrast_nbins, 0, 0);
 
-    //t2 = cv::getTickCount();
-    //timing_.kcontrast = 1000.0*(t2 - t1) / cv::getTickFrequency();
-
     // Now generate the rest of evolution levels
     for (size_t i = 1; i < evolution_.size(); i++) {
 
@@ -158,9 +147,6 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) {
         }
     }
 
-    //t2 = cv::getTickCount();
-    //timing_.scale = 1000.0*(t2 - t1) / cv::getTickFrequency();
-
     return 0;
 }
 
@@ -169,20 +155,13 @@ int AKAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat& img) {
  * @brief This method selects interesting keypoints through the nonlinear scale space
  * @param kpts Vector of detected keypoints
  */
-void AKAZEFeatures::Feature_Detection(std::vector<cv::KeyPoint>& kpts) {
-
-    //double t1 = 0.0, t2 = 0.0;
-
-    //t1 = cv::getTickCount();
-
+void AKAZEFeatures::Feature_Detection(std::vector<cv::KeyPoint>& kpts)
+{
     kpts.clear();
 
     Compute_Determinant_Hessian_Response();
     Find_Scale_Space_Extrema(kpts);
     Do_Subpixel_Refinement(kpts);
-
-    //t2 = cv::getTickCount();
-    //timing_.detector = 1000.0*(t2 - t1) / cv::getTickFrequency();
 }
 
 /* ************************************************************************* */
@@ -228,34 +207,10 @@ private:
 /**
  * @brief This method computes the multiscale derivatives for the nonlinear scale space
  */
-void AKAZEFeatures::Compute_Multiscale_Derivatives(void) {
-
-    //double t1 = 0.0, t2 = 0.0;
-
-    //t1 = cv::getTickCount();
-
-    cv::parallel_for_(cv::Range(0, (int)evolution_.size()), MultiscaleDerivativesInvoker(evolution_, options_));
-    /*
-    for (int i = 0; i < (int)(evolution_.size()); i++) {
-
-        float ratio = pow(2.f, (float)evolution_[i].octave);
-        int sigma_size_ = fRound(evolution_[i].esigma*options_.derivative_factor / ratio);
-
-        compute_scharr_derivatives(evolution_[i].Lsmooth, evolution_[i].Lx, 1, 0, sigma_size_);
-        compute_scharr_derivatives(evolution_[i].Lsmooth, evolution_[i].Ly, 0, 1, sigma_size_);
-        compute_scharr_derivatives(evolution_[i].Lx, evolution_[i].Lxx, 1, 0, sigma_size_);
-        compute_scharr_derivatives(evolution_[i].Ly, evolution_[i].Lyy, 0, 1, sigma_size_);
-        compute_scharr_derivatives(evolution_[i].Lx, evolution_[i].Lxy, 0, 1, sigma_size_);
-
-        evolution_[i].Lx = evolution_[i].Lx*((sigma_size_));
-        evolution_[i].Ly = evolution_[i].Ly*((sigma_size_));
-        evolution_[i].Lxx = evolution_[i].Lxx*((sigma_size_)*(sigma_size_));
-        evolution_[i].Lxy = evolution_[i].Lxy*((sigma_size_)*(sigma_size_));
-        evolution_[i].Lyy = evolution_[i].Lyy*((sigma_size_)*(sigma_size_));
-    }
-    */
-    //t2 = cv::getTickCount();
-    //timing_.derivatives = 1000.0*(t2 - t1) / cv::getTickFrequency();
+void AKAZEFeatures::Compute_Multiscale_Derivatives(void)
+{
+    cv::parallel_for_(cv::Range(0, (int)evolution_.size()),
+                      MultiscaleDerivativesInvoker(evolution_, options_));
 }
 
 /* ************************************************************************* */
@@ -268,14 +223,12 @@ void AKAZEFeatures::Compute_Determinant_Hessian_Response(void) {
     // Firstly compute the multiscale derivatives
     Compute_Multiscale_Derivatives();
 
-    for (size_t i = 0; i < evolution_.size(); i++) {
-
-        //if (options_.verbosity == true) {
-        //    cout << "Computing detector response. Determinant of Hessian. Evolution time: " << evolution_[i].etime << endl;
-        //}
-
-        for (int ix = 0; ix < evolution_[i].Ldet.rows; ix++) {
-            for (int jx = 0; jx < evolution_[i].Ldet.cols; jx++) {
+    for (size_t i = 0; i < evolution_.size(); i++)
+    {
+        for (int ix = 0; ix < evolution_[i].Ldet.rows; ix++)
+        {
+            for (int jx = 0; jx < evolution_[i].Ldet.cols; jx++)
+            {
                 float lxx = *(evolution_[i].Lxx.ptr<float>(ix)+jx);
                 float lxy = *(evolution_[i].Lxy.ptr<float>(ix)+jx);
                 float lyy = *(evolution_[i].Lyy.ptr<float>(ix)+jx);
@@ -290,9 +243,9 @@ void AKAZEFeatures::Compute_Determinant_Hessian_Response(void) {
  * @brief This method finds extrema in the nonlinear scale space
  * @param kpts Vector of detected keypoints
  */
-void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector<cv::KeyPoint>& kpts) {
+void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector<cv::KeyPoint>& kpts)
+{
 
-    //double t1 = 0.0, t2 = 0.0;
     float value = 0.0;
     float dist = 0.0, ratio = 0.0, smax = 0.0;
     int npoints = 0, id_repeated = 0;
@@ -310,8 +263,6 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector<cv::KeyPoint>& kpts) {
         smax = 12.0f*sqrtf(2.0f);
     }
 
-    //t1 = cv::getTickCount();
-
     for (size_t i = 0; i < evolution_.size(); i++) {
         for (int ix = 1; ix < evolution_[i].Ldet.rows - 1; ix++) {
             for (int jx = 1; jx < evolution_[i].Ldet.cols - 1; jx++) {
@@ -415,9 +366,6 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector<cv::KeyPoint>& kpts) {
         if (is_repeated == false)
             kpts.push_back(pt);
     }
-
-    //t2 = cv::getTickCount();
-    //timing_.extrema = 1000.0*(t2 - t1) / cv::getTickFrequency();
 }
 
 /* ************************************************************************* */
@@ -425,9 +373,8 @@ void AKAZEFeatures::Find_Scale_Space_Extrema(std::vector<cv::KeyPoint>& kpts) {
  * @brief This method performs subpixel refinement of the detected keypoints
  * @param kpts Vector of detected keypoints
  */
-void AKAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint>& kpts) {
-
-    //double t1 = 0.0, t2 = 0.0;
+void AKAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint>& kpts)
+{
     float Dx = 0.0, Dy = 0.0, ratio = 0.0;
     float Dxx = 0.0, Dyy = 0.0, Dxy = 0.0;
     int x = 0, y = 0;
@@ -435,8 +382,6 @@ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint>& kpts) {
     cv::Mat b = cv::Mat::zeros(2, 1, CV_32F);
     cv::Mat dst = cv::Mat::zeros(2, 1, CV_32F);
 
-    //t1 = cv::getTickCount();
-
     for (size_t i = 0; i < kpts.size(); i++) {
         ratio = pow(2.f, kpts[i].octave);
         x = fRound(kpts[i].pt.x / ratio);
@@ -487,9 +432,6 @@ void AKAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint>& kpts) {
             i--;
         }
     }
-
-    //t2 = cv::getTickCount();
-    //timing_.subpixel = 1000.0*(t2 - t1) / cv::getTickFrequency();
 }
 
 /* ************************************************************************* */
@@ -739,12 +681,8 @@ private:
  * @param kpts Vector of detected keypoints
  * @param desc Matrix to store the descriptors
  */
-void AKAZEFeatures::Compute_Descriptors(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc) {
-
-    //double t1 = 0.0, t2 = 0.0;
-
-    //t1 = cv::getTickCount();
-
+void AKAZEFeatures::Compute_Descriptors(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc)
+{
     // Allocate memory for the matrix with the descriptors
     if (options_.descriptor < MLDB_UPRIGHT) {
         desc = cv::Mat::zeros((int)kpts.size(), 64, CV_32FC1);
@@ -766,39 +704,21 @@ void AKAZEFeatures::Compute_Descriptors(std::vector<cv::KeyPoint>& kpts, cv::Mat
     case SURF_UPRIGHT: // Upright descriptors, not invariant to rotation
     {
         cv::parallel_for_(cv::Range(0, (int)kpts.size()), SURF_Descriptor_Upright_64_Invoker(kpts, desc, evolution_));
-
-        //for (int i = 0; i < (int)(kpts.size()); i++) {
-        //    Get_SURF_Descriptor_Upright_64(kpts[i], desc.ptr<float>(i));
-        //}
     }
         break;
     case SURF:
     {
         cv::parallel_for_(cv::Range(0, (int)kpts.size()), SURF_Descriptor_64_Invoker(kpts, desc, evolution_));
-
-        //for (int i = 0; i < (int)(kpts.size()); i++) {
-        //    Compute_Main_Orientation(kpts[i]);
-        //    Get_SURF_Descriptor_64(kpts[i], desc.ptr<float>(i));
-        //}
     }
         break;
     case MSURF_UPRIGHT: // Upright descriptors, not invariant to rotation
     {
         cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Upright_Descriptor_64_Invoker(kpts, desc, evolution_));
-
-        //for (int i = 0; i < (int)(kpts.size()); i++) {
-        //    Get_MSURF_Upright_Descriptor_64(kpts[i], desc.ptr<float>(i));
-        //}
     }
         break;
     case MSURF:
     {
         cv::parallel_for_(cv::Range(0, (int)kpts.size()), MSURF_Descriptor_64_Invoker(kpts, desc, evolution_));
-
-        //for (int i = 0; i < (int)(kpts.size()); i++) {
-        //    Compute_Main_Orientation(kpts[i]);
-        //    Get_MSURF_Descriptor_64(kpts[i], desc.ptr<float>(i));
-        //}
     }
         break;
     case MLDB_UPRIGHT: // Upright descriptors, not invariant to rotation
@@ -807,13 +727,6 @@ void AKAZEFeatures::Compute_Descriptors(std::vector<cv::KeyPoint>& kpts, cv::Mat
             cv::parallel_for_(cv::Range(0, (int)kpts.size()), Upright_MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_));
         else
             cv::parallel_for_(cv::Range(0, (int)kpts.size()), Upright_MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_));
-
-        //for (int i = 0; i < (int)(kpts.size()); i++) {
-        //    if (options_.descriptor_size == 0)
-        //        Get_Upright_MLDB_Full_Descriptor(kpts[i], desc.ptr<unsigned char>(i));
-        //    else
-        //        Get_Upright_MLDB_Descriptor_Subset(kpts[i], desc.ptr<unsigned char>(i));
-        //}
     }
         break;
     case MLDB:
@@ -822,20 +735,9 @@ void AKAZEFeatures::Compute_Descriptors(std::vector<cv::KeyPoint>& kpts, cv::Mat
             cv::parallel_for_(cv::Range(0, (int)kpts.size()), MLDB_Full_Descriptor_Invoker(kpts, desc, evolution_, options_));
         else
             cv::parallel_for_(cv::Range(0, (int)kpts.size()), MLDB_Descriptor_Subset_Invoker(kpts, desc, evolution_, options_, descriptorSamples_, descriptorBits_));
-
-        //for (int i = 0; i < (int)(kpts.size()); i++) {
-        //    Compute_Main_Orientation(kpts[i]);
-        //    if (options_.descriptor_size == 0)
-        //        Get_MLDB_Full_Descriptor(kpts[i], desc.ptr<unsigned char>(i));
-        //    else
-        //        Get_MLDB_Descriptor_Subset(kpts[i], desc.ptr<unsigned char>(i));
-        //}
     }
         break;
     }
-
-    //t2 = cv::getTickCount();
-    //timing_.descriptor = 1000.0*(t2 - t1) / cv::getTickFrequency();
 }
 
 /* ************************************************************************* */
@@ -2047,22 +1949,6 @@ void Upright_MLDB_Descriptor_Subset_Invoker::Get_Upright_MLDB_Descriptor_Subset(
     }
 }
 
-
-
-/* ************************************************************************* */
-/**
- * @brief This method displays the computation times
- */
-//void AKAZEFeatures::Show_Computation_Times() const {
-//    cout << "(*) Time Scale Space: " << timing_.scale << endl;
-//    cout << "(*) Time Detector: " << timing_.detector << endl;
-//    cout << "   - Time Derivatives: " << timing_.derivatives << endl;
-//    cout << "   - Time Extrema: " << timing_.extrema << endl;
-//    cout << "   - Time Subpixel: " << timing_.subpixel << endl;
-//    cout << "(*) Time Descriptor: " << timing_.descriptor << endl;
-//    cout << endl;
-//}
-
 /* ************************************************************************* */
 /**
  * @brief This function computes a (quasi-random) list of bits to be taken
index 4bebc16..302ef0d 100644 (file)
@@ -51,30 +51,6 @@ public:
     void Compute_Descriptors(std::vector<cv::KeyPoint>& kpts, cv::Mat& desc);
 
     static void Compute_Main_Orientation(cv::KeyPoint& kpt, const std::vector<TEvolution>& evolution_);
-
-    // SURF Pattern Descriptor
-    //void Get_SURF_Descriptor_Upright_64(const cv::KeyPoint& kpt, float* desc) const;
-    //void Get_SURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const;
-
-    // M-SURF Pattern Descriptor
-    //void Get_MSURF_Upright_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const;
-    //void Get_MSURF_Descriptor_64(const cv::KeyPoint& kpt, float* desc) const;
-
-    // M-LDB Pattern Descriptor
-    //void Get_Upright_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const;
-    //void Get_MLDB_Full_Descriptor(const cv::KeyPoint& kpt, unsigned char* desc) const;
-    //void Get_Upright_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc);
-    //void Get_MLDB_Descriptor_Subset(const cv::KeyPoint& kpt, unsigned char* desc);
-
-    // Methods for saving some results and showing computation times
-    //void Save_Scale_Space();
-    //void Save_Detector_Responses();
-    //void Show_Computation_Times() const;
-
-    /// Return the computation times
-    //AKAZETiming Get_Computation_Times() const {
-    //    return timing_;
-    //}
 };
 
 /* ************************************************************************* */
index 8d1b726..15c003e 100644 (file)
@@ -135,18 +135,9 @@ void KAZEFeatures::Allocate_Memory_Evolution(void) {
  * @param img Input image for which the nonlinear scale space needs to be created
  * @return 0 if the nonlinear scale space was created successfully. -1 otherwise
  */
-int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) {
-
-    //double t2 = 0.0, t1 = 0.0;
-
+int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img)
+{
     CV_Assert(evolution_.size() > 0);
-    //if (evolution_.size() == 0) {
-    //    cout << "Error generating the nonlinear scale space!!" << endl;
-    //    cout << "Firstly you need to call KAZE::Allocate_Memory_Evolution()" << endl;
-    //    return -1;
-    //}
-
-    //t1 = getTickCount();
 
     // Copy the original image to the first level of the evolution
     img.copyTo(evolution_[0].Lt);
@@ -156,14 +147,6 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) {
     // Firstly compute the kcontrast factor
     Compute_KContrast(evolution_[0].Lt, KCONTRAST_PERCENTILE);
 
-    //t2 = getTickCount();
-    //tkcontrast_ = 1000.0*(t2 - t1) / getTickFrequency();
-
-    //if (verbosity_ == true) {
-    //    cout << "Computed image evolution step. Evolution time: " << evolution_[0].etime <<
-    //        " Sigma: " << evolution_[0].esigma << endl;
-    //}
-
     // Now generate the rest of evolution levels
     for (size_t i = 1; i < evolution_.size(); i++) {
 
@@ -196,16 +179,8 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) {
             AOS_Step_Scalar(evolution_[i].Lt, evolution_[i - 1].Lt, evolution_[i].Lflow,
                 evolution_[i].etime - evolution_[i - 1].etime);
         }
-
-        //if (verbosity_ == true) {
-        //    cout << "Computed image evolution step " << i << " Evolution time: " << evolution_[i].etime <<
-        //        " Sigma: " << evolution_[i].esigma << endl;
-        //}
     }
 
-    //t2 = getTickCount();
-    //tnlscale_ = 1000.0*(t2 - t1) / getTickFrequency();
-
     return 0;
 }
 
@@ -217,20 +192,9 @@ int KAZEFeatures::Create_Nonlinear_Scale_Space(const cv::Mat &img) {
  * @param img Input image
  * @param kpercentile Percentile of the gradient histogram
  */
-void KAZEFeatures::Compute_KContrast(const cv::Mat &img, const float &kpercentile) {
-
-    //if (verbosity_ == true) {
-    //    cout << "Computing Kcontrast factor." << endl;
-    //}
-
-    //if (COMPUTE_KCONTRAST) {
-        kcontrast_ = compute_k_percentile(img, kpercentile, sderivatives_, KCONTRAST_NBINS, 0, 0);
-    //}
-
-    //if (verbosity_ == true) {
-    //    cout << "kcontrast = " << kcontrast_ << endl;
-    //    cout << endl << "Now computing the nonlinear scale space!!" << endl;
-    //}
+void KAZEFeatures::Compute_KContrast(const cv::Mat &img, const float &kpercentile)
+{
+    kcontrast_ = compute_k_percentile(img, kpercentile, sderivatives_, KCONTRAST_NBINS, 0, 0);
 }
 
 //*************************************************************************************
@@ -241,19 +205,9 @@ void KAZEFeatures::Compute_KContrast(const cv::Mat &img, const float &kpercentil
  */
 void KAZEFeatures::Compute_Multiscale_Derivatives(void)
 {
-    //double t2 = 0.0, t1 = 0.0;
-    //t1 = getTickCount();
-
-#ifdef _OPENMP
-#pragma omp parallel for
-#endif
-    for (size_t i = 0; i < evolution_.size(); i++) {
-
-        //if (verbosity_ == true) {
-        //    cout << "Computing multiscale derivatives. Evolution time: " << evolution_[i].etime
-        //        << " Step (pixels): " << evolution_[i].sigma_size << endl;
-        //}
-
+    // TODO: use cv::parallel_for_
+    for (size_t i = 0; i < evolution_.size(); i++)
+    {
         // Compute multiscale derivatives for the detector
         compute_scharr_derivatives(evolution_[i].Lsmooth, evolution_[i].Lx, 1, 0, evolution_[i].sigma_size);
         compute_scharr_derivatives(evolution_[i].Lsmooth, evolution_[i].Ly, 0, 1, evolution_[i].sigma_size);
@@ -267,9 +221,6 @@ void KAZEFeatures::Compute_Multiscale_Derivatives(void)
         evolution_[i].Lxy = evolution_[i].Lxy*((evolution_[i].sigma_size)*(evolution_[i].sigma_size));
         evolution_[i].Lyy = evolution_[i].Lyy*((evolution_[i].sigma_size)*(evolution_[i].sigma_size));
     }
-
-    //t2 = getTickCount();
-    //tmderivatives_ = 1000.0*(t2 - t1) / getTickFrequency();
 }
 
 //*************************************************************************************
@@ -279,25 +230,19 @@ void KAZEFeatures::Compute_Multiscale_Derivatives(void)
  * @brief This method computes the feature detector response for the nonlinear scale space
  * @note We use the Hessian determinant as feature detector
  */
-void KAZEFeatures::Compute_Detector_Response(void) {
-
-    //double t2 = 0.0, t1 = 0.0;
+void KAZEFeatures::Compute_Detector_Response(void)
+{
     float lxx = 0.0, lxy = 0.0, lyy = 0.0;
 
-    //t1 = getTickCount();
-
     // Firstly compute the multiscale derivatives
     Compute_Multiscale_Derivatives();
 
-    for (size_t i = 0; i < evolution_.size(); i++) {
-
-        // Determinant of the Hessian
-        //if (verbosity_ == true) {
-        //    cout << "Computing detector response. Determinant of Hessian. Evolution time: " << evolution_[i].etime << endl;
-        //}
-
-        for (int ix = 0; ix < img_height_; ix++) {
-            for (int jx = 0; jx < img_width_; jx++) {
+    for (size_t i = 0; i < evolution_.size(); i++)
+    {
+        for (int ix = 0; ix < img_height_; ix++)
+        {
+            for (int jx = 0; jx < img_width_; jx++)
+            {
                 lxx = *(evolution_[i].Lxx.ptr<float>(ix)+jx);
                 lxy = *(evolution_[i].Lxy.ptr<float>(ix)+jx);
                 lyy = *(evolution_[i].Lyy.ptr<float>(ix)+jx);
@@ -305,9 +250,6 @@ void KAZEFeatures::Compute_Detector_Response(void) {
             }
         }
     }
-
-    //t2 = getTickCount();
-    //tdresponse_ = 1000.0*(t2 - t1) / getTickFrequency();
 }
 
 //*************************************************************************************
@@ -317,11 +259,8 @@ void KAZEFeatures::Compute_Detector_Response(void) {
  * @brief This method selects interesting keypoints through the nonlinear scale space
  * @param kpts Vector of keypoints
  */
-void KAZEFeatures::Feature_Detection(std::vector<cv::KeyPoint>& kpts) {
-
-    //double t2 = 0.0, t1 = 0.0;
-    //t1 = getTickCount();
-
+void KAZEFeatures::Feature_Detection(std::vector<cv::KeyPoint>& kpts)
+{
     kpts.clear();
 
     // Firstly compute the detector response for each pixel and scale level
@@ -332,9 +271,6 @@ void KAZEFeatures::Feature_Detection(std::vector<cv::KeyPoint>& kpts) {
 
     // Perform some subpixel refinement
     Do_Subpixel_Refinement(kpts);
-
-    //t2 = getTickCount();
-    //tdetector_ = 1000.0*(t2 - t1) / getTickFrequency();
 }
 
 //*************************************************************************************
@@ -346,8 +282,8 @@ void KAZEFeatures::Feature_Detection(std::vector<cv::KeyPoint>& kpts) {
  * @param kpts Vector of keypoints
  * @note We compute features for each of the nonlinear scale space level in a different processing thread
  */
-void KAZEFeatures::Determinant_Hessian_Parallel(std::vector<cv::KeyPoint>& kpts) {
-
+void KAZEFeatures::Determinant_Hessian_Parallel(std::vector<cv::KeyPoint>& kpts)
+{
     int level = 0;
     float dist = 0.0, smax = 3.0;
     int npoints = 0, id_repeated = 0;
@@ -367,9 +303,7 @@ void KAZEFeatures::Determinant_Hessian_Parallel(std::vector<cv::KeyPoint>& kpts)
         kpts_par_.push_back(aux);
     }
 
-#ifdef _OPENMP
-#pragma omp parallel for
-#endif
+    // TODO: Use cv::parallel_for_
     for (int i = 1; i < (int)evolution_.size() - 1; i++) {
         Find_Extremum_Threading(i);
     }
@@ -499,9 +433,7 @@ void KAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint> &kpts) {
     Mat A = Mat::zeros(3, 3, CV_32F);
     Mat b = Mat::zeros(3, 1, CV_32F);
     Mat dst = Mat::zeros(3, 1, CV_32F);
-    //double t2 = 0.0, t1 = 0.0;
 
-    //t1 = cv::getTickCount();
     vector<KeyPoint> kpts_(kpts);
 
     for (size_t i = 0; i < kpts_.size(); i++) {
@@ -583,9 +515,6 @@ void KAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint> &kpts) {
             kpts.push_back(kpts_[i]);
         }
     }
-
-    //t2 = getTickCount();
-    //tsubpixel_ = 1000.0*(t2 - t1) / getTickFrequency();
 }
 
 //*************************************************************************************
@@ -596,11 +525,8 @@ void KAZEFeatures::Do_Subpixel_Refinement(std::vector<cv::KeyPoint> &kpts) {
  * @param kpts Vector of keypoints
  * @param desc Matrix with the feature descriptors
  */
-void KAZEFeatures::Feature_Description(std::vector<cv::KeyPoint> &kpts, cv::Mat &desc) {
-
-    //double t2 = 0.0, t1 = 0.0;
-    //t1 = getTickCount();
-
+void KAZEFeatures::Feature_Description(std::vector<cv::KeyPoint> &kpts, cv::Mat &desc)
+{
     // Allocate memory for the matrix of descriptors
     if (use_extended_ == true) {
         desc = Mat::zeros((int)kpts.size(), 128, CV_32FC1);
@@ -730,9 +656,6 @@ void KAZEFeatures::Feature_Description(std::vector<cv::KeyPoint> &kpts, cv::Mat
             }
         }
     }
-
-    //t2 = getTickCount();
-    //tdescriptor_ = 1000.0*(t2 - t1) / getTickFrequency();
 }
 
 //*************************************************************************************