From f4e33ea0ba6a9b07e58f7f71ed75ce10e055ea31 Mon Sep 17 00:00:00 2001 From: Andrey Kamaev Date: Tue, 16 Oct 2012 20:03:07 +0400 Subject: [PATCH] Fix build of samples --- samples/cpp/dbt_face_detection.cpp | 104 ------------------------- samples/cpp/detection_based_tracker_sample.cpp | 69 ++++++---------- samples/gpu/generalized_hough.cpp | 58 +++++++------- samples/gpu/performance/tests.cpp | 2 +- samples/gpu/surf_keypoint_matcher.cpp | 4 +- 5 files changed, 52 insertions(+), 185 deletions(-) delete mode 100644 samples/cpp/dbt_face_detection.cpp diff --git a/samples/cpp/dbt_face_detection.cpp b/samples/cpp/dbt_face_detection.cpp deleted file mode 100644 index 35386a7..0000000 --- a/samples/cpp/dbt_face_detection.cpp +++ /dev/null @@ -1,104 +0,0 @@ -#if defined(__linux__) || defined(LINUX) || defined(__APPLE__) || defined(ANDROID) - -#include // Gaussian Blur -#include // Basic OpenCV structures (cv::Mat, Scalar) -#include // OpenCV window I/O -#include -#include - -#include -#include -#include - -using namespace std; -using namespace cv; - -const string WindowName = "Face Detection example"; - -class CascadeDetectorAdapter: public DetectionBasedTracker::IDetector -{ - public: - CascadeDetectorAdapter(cv::Ptr detector): - IDetector(), - Detector(detector) - { - CV_Assert(!detector.empty()); - } - - void detect(const cv::Mat &Image, std::vector &objects) - { - Detector->detectMultiScale(Image, objects, scaleFactor, minNeighbours, 0, minObjSize, maxObjSize); - } - - virtual ~CascadeDetectorAdapter() - {} - - private: - CascadeDetectorAdapter(); - cv::Ptr Detector; - }; - -int main(int , char** ) -{ - namedWindow(WindowName); - - VideoCapture VideoStream(0); - - if (!VideoStream.isOpened()) - { - printf("Error: Cannot open video stream from camera\n"); - return 1; - } - - std::string cascadeFrontalfilename = "../../data/lbpcascades/lbpcascade_frontalface.xml"; - cv::Ptr cascade = new cv::CascadeClassifier(cascadeFrontalfilename); - cv::Ptr MainDetector = new CascadeDetectorAdapter(cascade); - - cascade = new cv::CascadeClassifier(cascadeFrontalfilename); - cv::Ptr TrackingDetector = new CascadeDetectorAdapter(cascade); - - DetectionBasedTracker::Parameters params; - DetectionBasedTracker Detector(MainDetector, TrackingDetector, params); - - if (!Detector.run()) - { - printf("Error: Detector initialization failed\n"); - return 2; - } - - Mat ReferenceFrame; - Mat GrayFrame; - vector Faces; - - while(true) - { - VideoStream >> ReferenceFrame; - cvtColor(ReferenceFrame, GrayFrame, COLOR_RGB2GRAY); - Detector.process(GrayFrame); - Detector.getObjects(Faces); - - for (size_t i = 0; i < Faces.size(); i++) - { - rectangle(ReferenceFrame, Faces[i], CV_RGB(0,255,0)); - } - - imshow(WindowName, ReferenceFrame); - - if (cvWaitKey(30) >= 0) break; - } - - Detector.stop(); - - return 0; -} - -#else - -#include -int main() -{ - printf("This sample works for UNIX or ANDROID only\n"); - return 0; -} - -#endif diff --git a/samples/cpp/detection_based_tracker_sample.cpp b/samples/cpp/detection_based_tracker_sample.cpp index 61106f3..6aeb1b2 100644 --- a/samples/cpp/detection_based_tracker_sample.cpp +++ b/samples/cpp/detection_based_tracker_sample.cpp @@ -43,6 +43,8 @@ #define LOGE(...) do{} while(0) #endif + + using namespace cv; using namespace std; @@ -61,31 +63,9 @@ static void usage() LOGE0("\t (e.g.\"opencv/data/lbpcascades/lbpcascade_frontalface.xml\" "); } -class CascadeDetectorAdapter: public DetectionBasedTracker::IDetector -{ - public: - CascadeDetectorAdapter(cv::Ptr detector): - Detector(detector) - { - CV_Assert(!detector.empty()); - } - - void detect(const cv::Mat &Image, std::vector &objects) - { - Detector->detectMultiScale(Image, objects, 1.1, 3, 0, minObjSize, maxObjSize); - } - virtual ~CascadeDetectorAdapter() - {} - - private: - CascadeDetectorAdapter(); - cv::Ptr Detector; - }; - static int test_FaceDetector(int argc, char *argv[]) { - if (argc < 4) - { + if (argc < 4) { usage(); return -1; } @@ -100,14 +80,12 @@ static int test_FaceDetector(int argc, char *argv[]) vector images; { char filename[256]; - for(int n=1; ; n++) - { + for(int n=1; ; n++) { snprintf(filename, sizeof(filename), filepattern, n); LOGD("filename='%s'", filename); Mat m0; m0=imread(filename); - if (m0.empty()) - { + if (m0.empty()) { LOGI0("Cannot read the file --- break"); break; } @@ -116,15 +94,10 @@ static int test_FaceDetector(int argc, char *argv[]) LOGD("read %d images", (int)images.size()); } + DetectionBasedTracker::Parameters params; std::string cascadeFrontalfilename=cascadefile; - cv::Ptr cascade = new cv::CascadeClassifier(cascadeFrontalfilename); - cv::Ptr MainDetector = new CascadeDetectorAdapter(cascade); - - cascade = new cv::CascadeClassifier(cascadeFrontalfilename); - cv::Ptr TrackingDetector = new CascadeDetectorAdapter(cascade); - DetectionBasedTracker::Parameters params; - DetectionBasedTracker fd(MainDetector, TrackingDetector, params); + DetectionBasedTracker fd(cascadeFrontalfilename, params); fd.run(); @@ -135,13 +108,12 @@ static int test_FaceDetector(int argc, char *argv[]) double freq=getTickFrequency(); int num_images=images.size(); - for(int n=1; n <= num_images; n++) - { + for(int n=1; n <= num_images; n++) { int64 tcur=getTickCount(); int64 dt=tcur-tprev; tprev=tcur; double t_ms=((double)dt)/freq * 1000.0; - LOGD("\n\nSTEP n=%d from prev step %f ms\n", n, t_ms); + LOGD("\n\nSTEP n=%d from prev step %f ms\n\n", n, t_ms); m=images[n-1]; CV_Assert(! m.empty()); cvtColor(m, gray, CV_BGR2GRAY); @@ -151,8 +123,11 @@ static int test_FaceDetector(int argc, char *argv[]) vector result; fd.getObjects(result); - for(size_t i=0; i < result.size(); i++) - { + + + + + for(size_t i=0; i < result.size(); i++) { Rect r=result[i]; CV_Assert(r.area() > 0); Point tl=r.tl(); @@ -161,14 +136,14 @@ static int test_FaceDetector(int argc, char *argv[]) rectangle(m, tl, br, color, 3); } } - - char outfilename[256]; - for(int n=1; n <= num_images; n++) { - snprintf(outfilename, sizeof(outfilename), outfilepattern, n); - LOGD("outfilename='%s'", outfilename); - m=images[n-1]; - imwrite(outfilename, m); + char outfilename[256]; + for(int n=1; n <= num_images; n++) { + snprintf(outfilename, sizeof(outfilename), outfilepattern, n); + LOGD("outfilename='%s'", outfilename); + m=images[n-1]; + imwrite(outfilename, m); + } } fd.stop(); @@ -176,6 +151,8 @@ static int test_FaceDetector(int argc, char *argv[]) return 0; } + + int main(int argc, char *argv[]) { return test_FaceDetector(argc, argv); diff --git a/samples/gpu/generalized_hough.cpp b/samples/gpu/generalized_hough.cpp index c41e790..6f12ad7 100644 --- a/samples/gpu/generalized_hough.cpp +++ b/samples/gpu/generalized_hough.cpp @@ -26,41 +26,41 @@ static Mat loadImage(const string& name) int main(int argc, const char* argv[]) { CommandLineParser cmd(argc, argv, - "{ image i | pic1.png | input image }" - "{ template t | templ.png | template image }" - "{ scale s | | estimate scale }" - "{ rotation r | | estimate rotation }" - "{ gpu | | use gpu version }" - "{ minDist | 100 | minimum distance between the centers of the detected objects }" - "{ levels | 360 | R-Table levels }" - "{ votesThreshold | 30 | the accumulator threshold for the template centers at the detection stage. The smaller it is, the more false positions may be detected }" - "{ angleThresh | 10000 | angle votes treshold }" - "{ scaleThresh | 1000 | scale votes treshold }" - "{ posThresh | 100 | position votes threshold }" - "{ dp | 2 | inverse ratio of the accumulator resolution to the image resolution }" - "{ minScale | 0.5 | minimal scale to detect }" - "{ maxScale | 2 | maximal scale to detect }" - "{ scaleStep | 0.05 | scale step }" - "{ minAngle | 0 | minimal rotation angle to detect in degrees }" - "{ maxAngle | 360 | maximal rotation angle to detect in degrees }" - "{ angleStep | 1 | angle step in degrees }" - "{ maxSize | 1000 | maximal size of inner buffers }" - "{ help h ? | | print help message }" + "{ i | image | pic1.png | input image }" + "{ t | template | templ.png | template image }" + "{ s | scale | | estimate scale }" + "{ r | rotation | | estimate rotation }" + "{ | gpu | | use gpu version }" + "{ | minDist | 100 | minimum distance between the centers of the detected objects }" + "{ | levels | 360 | R-Table levels }" + "{ | votesThreshold | 30 | the accumulator threshold for the template centers at the detection stage. The smaller it is, the more false positions may be detected }" + "{ | angleThresh | 10000 | angle votes treshold }" + "{ | scaleThresh | 1000 | scale votes treshold }" + "{ | posThresh | 100 | position votes threshold }" + "{ | dp | 2 | inverse ratio of the accumulator resolution to the image resolution }" + "{ | minScale | 0.5 | minimal scale to detect }" + "{ | maxScale | 2 | maximal scale to detect }" + "{ | scaleStep | 0.05 | scale step }" + "{ | minAngle | 0 | minimal rotation angle to detect in degrees }" + "{ | maxAngle | 360 | maximal rotation angle to detect in degrees }" + "{ | angleStep | 1 | angle step in degrees }" + "{ | maxSize | 1000 | maximal size of inner buffers }" + "{ h | help | | print help message }" ); - cmd.about("This program demonstrates arbitary object finding with the Generalized Hough transform."); + //cmd.about("This program demonstrates arbitary object finding with the Generalized Hough transform."); - if (cmd.has("help")) + if (cmd.get("help")) { - cmd.printMessage(); + cmd.printParams(); return 0; } const string templName = cmd.get("template"); const string imageName = cmd.get("image"); - const bool estimateScale = cmd.has("scale"); - const bool estimateRotation = cmd.has("rotation"); - const bool useGpu = cmd.has("gpu"); + const bool estimateScale = cmd.get("scale"); + const bool estimateRotation = cmd.get("rotation"); + const bool useGpu = cmd.get("gpu"); const double minDist = cmd.get("minDist"); const int levels = cmd.get("levels"); const int votesThreshold = cmd.get("votesThreshold"); @@ -76,12 +76,6 @@ int main(int argc, const char* argv[]) const double angleStep = cmd.get("angleStep"); const int maxSize = cmd.get("maxSize"); - if (!cmd.check()) - { - cmd.printErrors(); - return -1; - } - Mat templ = loadImage(templName); Mat image = loadImage(imageName); diff --git a/samples/gpu/performance/tests.cpp b/samples/gpu/performance/tests.cpp index e117202..367bf7d 100644 --- a/samples/gpu/performance/tests.cpp +++ b/samples/gpu/performance/tests.cpp @@ -364,7 +364,7 @@ TEST(BruteForceMatcher) // Init GPU matcher - gpu::BFMatcher_GPU d_matcher(NORM_L2); + gpu::BruteForceMatcher_GPU_base d_matcher(gpu::BruteForceMatcher_GPU_base::L2Dist); gpu::GpuMat d_query(query); gpu::GpuMat d_train(train); diff --git a/samples/gpu/surf_keypoint_matcher.cpp b/samples/gpu/surf_keypoint_matcher.cpp index 605ed6a..0547485 100644 --- a/samples/gpu/surf_keypoint_matcher.cpp +++ b/samples/gpu/surf_keypoint_matcher.cpp @@ -57,7 +57,7 @@ int main(int argc, char* argv[]) cout << "FOUND " << keypoints2GPU.cols << " keypoints on second image" << endl; // matching descriptors - BFMatcher_GPU matcher(NORM_L2); + gpu::BruteForceMatcher_GPU_base matcher(gpu::BruteForceMatcher_GPU_base::L2Dist); GpuMat trainIdx, distance; matcher.matchSingle(descriptors1GPU, descriptors2GPU, trainIdx, distance); @@ -69,7 +69,7 @@ int main(int argc, char* argv[]) surf.downloadKeypoints(keypoints2GPU, keypoints2); surf.downloadDescriptors(descriptors1GPU, descriptors1); surf.downloadDescriptors(descriptors2GPU, descriptors2); - BFMatcher_GPU::matchDownload(trainIdx, distance, matches); + BruteForceMatcher_GPU_base::matchDownload(trainIdx, distance, matches); // drawing the results Mat img_matches; -- 2.7.4