//M*/\r
\r
\r
-#include "opencv2/core/core.hpp"\r
-#include "opencv2/contrib/contrib.hpp"\r
-#include "opencv2/highgui/highgui.hpp"\r
-\r
#include <iostream>\r
#include <cstdio>\r
#include <cstring>\r
#include <ctime>\r
+#include "opencv2/contrib/contrib.hpp"\r
+#include "opencv2/highgui/highgui.hpp"\r
\r
-using namespace std;\r
-using namespace cv;\r
+void help(char **argv)\r
+{\r
+ std::cout << "\nThis program demonstrates the contributed flesh detector CvAdaptiveSkinDetector which can be found in contrib.cpp\n"\r
+ << "Usage: " << std::endl <<\r
+ argv[0] << " fileMask firstFrame lastFrame" << std::endl << std::endl <<\r
+ "Example: " << std::endl <<\r
+ argv[0] << " C:\\VideoSequences\\sample1\\right_view\\temp_%05d.jpg 0 1000" << std::endl <<\r
+ " iterates through temp_00000.jpg to temp_01000.jpg" << std::endl << std::endl <<\r
+ "If no parameter specified, this application will try to capture from the default Webcam." << std::endl <<\r
+ "Please note: Background should not contain large surfaces with skin tone." <<\r
+ "\n\n ESC will stop\n"\r
+ "Using OpenCV version %s\n" << CV_VERSION << "\n"\r
+ << std::endl;\r
+}\r
\r
class ASDFrameHolder\r
{\r
\r
\r
//-------------------- ASDFrameSequencer -----------------------//\r
+\r
ASDFrameSequencer::~ASDFrameSequencer()\r
{\r
close();\r
\r
\r
//-------------------- ASDFrameSequencerWebCam -----------------------//\r
+\r
bool ASDFrameSequencerWebCam::open(int cameraIndex)\r
{\r
close();\r
}\r
};\r
\r
-void help()\r
-{\r
- printf("\nThis program demonstrates the contributed flesh detector CvAdaptiveSkinDetector \n"\r
- "which can be found in contrib.cpp \n"\r
- "Usage: \n"\r
- "./adaptiveskindetector [--fileMask]=<path to file, which are used in mask \n"\r
- " [--firstFrame]=<first frame number \n"\r
- " [--lastFrame]=<last frame number> \n"\r
- "if at least one parameter doesn't specified, it will try to use default webcam \n"\r
- "Expample: \n"\r
- " --fileMask = /home/user_home_directory/work/opencv/samples/c/temp_%%05d.jpg --firstFrame=0 --lastFrame=1000 \n");\r
-}\r
-\r
-int main(int argc, const char** argv )\r
+int main(int argc, char** argv )\r
{\r
- help();\r
-\r
- CommandLineParser parser(argc, argv);\r
-\r
- string fileMask = parser.get<string>("fileMask");\r
- int firstFrame = parser.get<int>("firstFrame", 0);\r
- int lastFrame = parser.get<int>("lastFrame", 0);\r
-\r
IplImage *img, *filterMask = NULL;\r
CvAdaptiveSkinDetector filter(1, CvAdaptiveSkinDetector::MORPHING_METHOD_ERODE_DILATE);\r
ASDFrameSequencer *sequencer;\r
CvFont base_font;\r
char caption[2048], s[256], windowName[256];\r
long int clockTotal = 0, numFrames = 0;\r
- std::clock_t clock;\r
+ std::clock_t clock;\r
\r
if (argc < 4)\r
{\r
+ help(argv);\r
sequencer = new ASDFrameSequencerWebCam();\r
(dynamic_cast<ASDFrameSequencerWebCam*>(sequencer))->open(-1);\r
\r
}\r
else\r
{\r
- // A sequence of images captured from video source, is stored here\r
sequencer = new ASDFrameSequencerImageFile();\r
- (dynamic_cast<ASDFrameSequencerImageFile*>(sequencer))->open(fileMask.c_str(), firstFrame, lastFrame );\r
+ (dynamic_cast<ASDFrameSequencerImageFile*>(sequencer))->open(argv[1], std::atoi(argv[2]), std::atoi(argv[3]) ); // A sequence of images captured from video source, is stored here\r
\r
}\r
std::sprintf(windowName, "%s", "Adaptive Skin Detection Algorithm for Video Sequences");\r
cvNamedWindow(windowName, CV_WINDOW_AUTOSIZE);\r
cvInitFont( &base_font, CV_FONT_VECTOR0, 0.5, 0.5);\r
\r
+ // Usage:\r
+ // c:\>CvASDSample "C:\VideoSequences\sample1\right_view\temp_%05d.jpg" 0 1000\r
+\r
+ std::cout << "Press ESC to stop." << std::endl << std::endl;\r
while ((img = sequencer->getNextImage()) != 0)\r
{\r
numFrames++;\r
#include <stdlib.h>
#include <ctype.h>
-#include "opencv2/core/core.hpp"
#include "opencv2/video/background_segm.hpp"
-#include <opencv2/imgproc/imgproc_c.h>
+#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/highgui/highgui.hpp"
-using namespace std;
-using namespace cv;
-
//VARIABLES for CODEBOOK METHOD:
CvBGCodeBookModel* model = 0;
const int NCHANNELS = 3;
{
printf("\nLearn background and find foreground using simple average and average difference learning method:\n"
"Originally from the book: Learning OpenCV by O'Reilly press\n"
- "\nUsage:\n"
- "./bgfg_codebook [--nframes]=<frames number, 300 as default> \n"
- " [--input]=<movie filename or camera index, zero camera index as default>\n"
- "***Keep the focus on the video windows, NOT the consol***\n\n"
- "INTERACTIVE PARAMETERS:\n"
- "\tESC,q,Q - quit the program\n"
- "\th - print this help\n"
- "\tp - pause toggle\n"
- "\ts - single step\n"
- "\tr - run mode (single step off)\n"
- "=== AVG PARAMS ===\n"
- "\t- - bump high threshold UP by 0.25\n"
- "\t= - bump high threshold DOWN by 0.25\n"
- "\t[ - bump low threshold UP by 0.25\n"
- "\t] - bump low threshold DOWN by 0.25\n"
- "=== CODEBOOK PARAMS ===\n"
- "\ty,u,v- only adjust channel 0(y) or 1(u) or 2(v) respectively\n"
- "\ta - adjust all 3 channels at once\n"
- "\tb - adjust both 2 and 3 at once\n"
- "\ti,o - bump upper threshold up,down by 1\n"
- "\tk,l - bump lower threshold up,down by 1\n"
- "\tSPACE - reset the model\n"
+ "\nUSAGE:\nbgfg_codebook [--nframes=300] [movie filename, else from camera]\n"
+ "***Keep the focus on the video windows, NOT the consol***\n\n"
+ "INTERACTIVE PARAMETERS:\n"
+ "\tESC,q,Q - quit the program\n"
+ "\th - print this help\n"
+ "\tp - pause toggle\n"
+ "\ts - single step\n"
+ "\tr - run mode (single step off)\n"
+ "=== AVG PARAMS ===\n"
+ "\t- - bump high threshold UP by 0.25\n"
+ "\t= - bump high threshold DOWN by 0.25\n"
+ "\t[ - bump low threshold UP by 0.25\n"
+ "\t] - bump low threshold DOWN by 0.25\n"
+ "=== CODEBOOK PARAMS ===\n"
+ "\ty,u,v- only adjust channel 0(y) or 1(u) or 2(v) respectively\n"
+ "\ta - adjust all 3 channels at once\n"
+ "\tb - adjust both 2 and 3 at once\n"
+ "\ti,o - bump upper threshold up,down by 1\n"
+ "\tk,l - bump lower threshold up,down by 1\n"
+ "\tSPACE - reset the model\n"
);
}
//USAGE: ch9_background startFrameCollection# endFrameCollection# [movie filename, else from camera]
//If from AVI, then optionally add HighAvg, LowAvg, HighCB_Y LowCB_Y HighCB_U LowCB_U HighCB_V LowCB_V
//
-int main(int argc, const char** argv)
+int main(int argc, char** argv)
{
- help();
-
- CommandLineParser parser(argc, argv);
-
- string inputName = parser.get<string>("input", "0");
- int nframesToLearnBG = parser.get<int>("nframes", 300);
-
+ const char* filename = 0;
IplImage* rawImage = 0, *yuvImage = 0; //yuvImage is for codebook method
IplImage *ImaskCodeBook = 0,*ImaskCodeBookCC = 0;
CvCapture* capture = 0;
- int c, n, nframes = 0;
+ int c, n, nframes = 0;
+ int nframesToLearnBG = 300;
model = cvCreateBGCodeBookModel();
bool pause = false;
bool singlestep = false;
- if( inputName.empty() || (isdigit(inputName.c_str()[0]) && inputName.c_str()[1] == '\0') )
+ for( n = 1; n < argc; n++ )
{
- printf("Capture from camera\n");
- capture = cvCaptureFromCAM( inputName.empty() ? 0 : inputName.c_str()[0] - '0' );
- int c = inputName.empty() ? 0 : inputName.c_str()[0] - '0' ;
- if( !capture)
- {
- printf ("Capture from CAM %d", c);
- printf (" didn't work\n");
- }
- }
- else
+ static const char* nframesOpt = "--nframes=";
+ if( strncmp(argv[n], nframesOpt, strlen(nframesOpt))==0 )
{
- printf("Capture from file %s\n",inputName.c_str());
- capture = cvCreateFileCapture(inputName.c_str());
- if( !capture)
+ if( sscanf(argv[n] + strlen(nframesOpt), "%d", &nframesToLearnBG) == 0 )
{
- printf ("Capture from file %s", inputName.c_str());
- printf (" didn't work\n");
help();
return -1;
}
-
}
+ else
+ filename = argv[n];
+ }
+
+ if( !filename )
+ {
+ printf("Capture from camera\n");
+ capture = cvCaptureFromCAM( 0 );
+ }
+ else
+ {
+ printf("Capture from file %s\n",filename);
+ capture = cvCreateFileCapture( filename );
+ }
+
+ if( !capture )
+ {
+ printf( "Can not initialize video capturing\n\n" );
+ help();
+ return -1;
+ }
//MAIN PROCESSING LOOP:
for(;;)
-#include "opencv2/core/core.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
void help()
{
- cout << "\nThis program demonstrates the cascade classifier. Now you can use Haar or LBP features.\n"
+ cout << "\nThis program demonstrates the cascade recognizer. Now you can use Haar or LBP features.\n"
"This classifier can recognize many ~rigid objects, it's most known use is for faces.\n"
"Usage:\n"
"./facedetect [--cascade=<cascade_path> this is the primary trained classifier such as frontal face]\n"
" [--nested-cascade[=nested_cascade_path this an optional secondary classifier such as eyes]]\n"
" [--scale=<image scale greater or equal to 1, try 1.3 for example>\n"
- " [--input=filename|camera_index]\n\n"
+ " [filename|camera_index]\n\n"
"see facedetect.cmd for one call:\n"
"./facedetect --cascade=\"../../data/haarcascades/haarcascade_frontalface_alt.xml\" --nested-cascade=\"../../data/haarcascades/haarcascade_eye.xml\" --scale=1.3 \n"
"Hit any key to quit.\n"
CascadeClassifier& cascade, CascadeClassifier& nestedCascade,
double scale);
+String cascadeName = "../../data/haarcascades/haarcascade_frontalface_alt.xml";
+String nestedCascadeName = "../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml";
+
int main( int argc, const char** argv )
{
- help();
-
- CommandLineParser parser(argc, argv);
-
- string cascadeName = parser.get<string>("cascade", "../../data/haarcascades/haarcascade_frontalface_alt.xml");
- string nestedCascadeName = parser.get<string>("nested-cascade", "../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml");
- double scale = parser.get<double>("scale", 1.0);
- string inputName = parser.get<string>("input", "0"); //read from camera by default
-
- if (!cascadeName.empty())
- cout << " from which we have cascadeName= " << cascadeName << endl;
-
- if (!nestedCascadeName.empty())
- cout << " from which we have nestedCascadeName= " << nestedCascadeName << endl;
-
CvCapture* capture = 0;
Mat frame, frameCopy, image;
+ const String scaleOpt = "--scale=";
+ size_t scaleOptLen = scaleOpt.length();
+ const String cascadeOpt = "--cascade=";
+ size_t cascadeOptLen = cascadeOpt.length();
+ const String nestedCascadeOpt = "--nested-cascade";
+ size_t nestedCascadeOptLen = nestedCascadeOpt.length();
+ String inputName;
+
+ help();
+
CascadeClassifier cascade, nestedCascade;
+ double scale = 1;
+
+ for( int i = 1; i < argc; i++ )
+ {
+ cout << "Processing " << i << " " << argv[i] << endl;
+ if( cascadeOpt.compare( 0, cascadeOptLen, argv[i], cascadeOptLen ) == 0 )
+ {
+ cascadeName.assign( argv[i] + cascadeOptLen );
+ cout << " from which we have cascadeName= " << cascadeName << endl;
+ }
+ else if( nestedCascadeOpt.compare( 0, nestedCascadeOptLen, argv[i], nestedCascadeOptLen ) == 0 )
+ {
+ if( argv[i][nestedCascadeOpt.length()] == '=' )
+ nestedCascadeName.assign( argv[i] + nestedCascadeOpt.length() + 1 );
+ if( !nestedCascade.load( nestedCascadeName ) )
+ cerr << "WARNING: Could not load classifier cascade for nested objects" << endl;
+ }
+ else if( scaleOpt.compare( 0, scaleOptLen, argv[i], scaleOptLen ) == 0 )
+ {
+ if( !sscanf( argv[i] + scaleOpt.length(), "%lf", &scale ) || scale < 1 )
+ scale = 1;
+ cout << " from which we read scale = " << scale << endl;
+ }
+ else if( argv[i][0] == '-' )
+ {
+ cerr << "WARNING: Unknown option %s" << argv[i] << endl;
+ }
+ else
+ inputName.assign( argv[i] );
+ }
if( !cascade.load( cascadeName ) )
{
cerr << "ERROR: Could not load classifier cascade" << endl;
+ cerr << "Usage: facedetect [--cascade=<cascade_path>]\n"
+ " [--nested-cascade[=nested_cascade_path]]\n"
+ " [--scale[=<image scale>\n"
+ " [filename|camera_index]\n" << endl ;
return -1;
}
- if( !nestedCascade.load( nestedCascadeName ) )
- cerr << "WARNING: Could not load classifier cascade for nested objects" << endl;
-
if( inputName.empty() || (isdigit(inputName.c_str()[0]) && inputName.c_str()[1] == '\0') )
{
capture = cvCaptureFromCAM( inputName.empty() ? 0 : inputName.c_str()[0] - '0' );
int c = inputName.empty() ? 0 : inputName.c_str()[0] - '0' ;
- if( !capture) cout << "Capture from CAM " << c << " didn't work" << endl;
+ if(!capture) cout << "Capture from CAM " << c << " didn't work" << endl;
}
else if( inputName.size() )
{
if( image.empty() )
{
capture = cvCaptureFromAVI( inputName.c_str() );
- if( !capture ) cout << "Capture from AVI didn't work" << endl;
+ if(!capture) cout << "Capture from AVI didn't work" << endl;
}
}
+ else
+ {
+ image = imread( "lena.jpg", 1 );
+ if(image.empty()) cout << "Couldn't read lena.jpg" << endl;
+ }
cvNamedWindow( "result", 1 );
* Author: Liu Liu
* liuliu.1987+opencv@gmail.com
*/
-#include "opencv2/core/core.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
-#include <opencv2/imgproc/imgproc_c.h>
+#include "opencv2/imgproc/imgproc_c.h"
#include <iostream>
#include <vector>
using namespace std;
-using namespace cv;
-
void help()
{
- printf( "\n This program demonstrated the use of the SURF Detector and Descriptor using\n"
- "either FLANN (fast approx nearst neighbor classification) or brute force matching\n"
- "on planar objects.\n"
- "Usage: \n"
- "./find_obj [--object_filename]=<object_filename, box.png as default> \n"
- " [--scene_filename]=<scene_filename box_in_scene.png as default>] \n"
- "Example: \n"
- "./find_obj --object_filename=box.png --scene_filename=box_in_scene.png \n\n"
- );
+ printf(
+ "This program demonstrated the use of the SURF Detector and Descriptor using\n"
+ "either FLANN (fast approx nearst neighbor classification) or brute force matching\n"
+ "on planar objects.\n"
+ "Call:\n"
+ "./find_obj [<object_filename default box.png> <scene_filename default box_in_scene.png>]\n\n"
+ );
+
}
// define whether to use approximate nearest-neighbor search
return 1;
}
-int main(int argc, const char** argv)
+int main(int argc, char** argv)
{
- help();
-
- CommandLineParser parser(argc, argv);
-
- string objectFileName = parser.get<string>("object_filename", "box.png");
- string sceneFileName = parser.get<string>("scene_filename", "box_in_scene.png");
+ const char* object_filename = argc == 3 ? argv[1] : "box.png";
+ const char* scene_filename = argc == 3 ? argv[2] : "box_in_scene.png";
CvMemStorage* storage = cvCreateMemStorage(0);
+ help();
cvNamedWindow("Object", 1);
cvNamedWindow("Object Correspond", 1);
{{255,255,255}}
};
- IplImage* object = cvLoadImage( objectFileName.c_str(), CV_LOAD_IMAGE_GRAYSCALE );
- IplImage* image = cvLoadImage( sceneFileName.c_str(), CV_LOAD_IMAGE_GRAYSCALE );
+ IplImage* object = cvLoadImage( object_filename, CV_LOAD_IMAGE_GRAYSCALE );
+ IplImage* image = cvLoadImage( scene_filename, CV_LOAD_IMAGE_GRAYSCALE );
if( !object || !image )
{
- fprintf( stderr, "Can not load %s and/or %s\n", objectFileName.c_str(), sceneFileName.c_str() );
+ fprintf( stderr, "Can not load %s and/or %s\n"
+ "Usage: find_obj [<object_filename> <scene_filename>]\n",
+ object_filename, scene_filename );
exit(-1);
}
IplImage* object_color = cvCreateImage(cvGetSize(object), 8, 3);
void help()
{
- printf("\n This program shows the use of the Calonder point descriptor classifier \n"
- "SURF is used to detect interest points, Calonder is used to describe/match these points \n"
- "Usage: \n"
- "./find_obj_calonder --classifier_file=<classifier file, there is no default classifier file. You should create it at first and when you can use it for test> \n"
- " --test_image=<image file for test, lena.jpg as default> \n"
- " [--train_container]=<txt file with train images filenames> \n"
- "Example: \n"
- " --classifier_file=test_classifier --test_image=lena.jpg --train_container=one_way_train_images.txt \n"
- " the test_classifier is created here using --train_container and tested witn --test_image at the end \n"
- " --classifier_file=test_classifier --test_image=lena.jpg \n"
- " the test classifier is tested here using lena.jpg \n");
+ cout << "This program shows the use of the Calonder point descriptor classifier"
+ "SURF is used to detect interest points, Calonder is used to describe/match these points\n"
+ "Format:" << endl <<
+ " classifier_file(to write) test_image file_with_train_images_filenames(txt)" <<
+ " or" << endl <<
+ " classifier_file(to read) test_image"
+ "Using OpenCV version %s\n" << CV_VERSION << "\n"
+ << endl;
}
/*
* Generates random perspective transform of image
waitKey();
}
-int main( int argc, const char **argv )
+int main( int argc, char **argv )
{
- help();
-
- CommandLineParser parser(argc, argv);
-
- string classifierFileName = parser.get<string>("classifier_file");
- string testImageFileName = parser.get<string>("test_image", "lena.jpg");
- string trainContainerFileName = parser.get<string>("train_container");
-
- if( classifierFileName.empty())
+ if( argc != 4 && argc != 3 )
{
- printf("\n Can't find classifier file, please select file for --classifier_file parameter \n");
help();
return -1;
}
- if( !trainContainerFileName.empty())
- trainCalonderClassifier( classifierFileName.c_str(), trainContainerFileName.c_str() );
+ if( argc == 4 )
+ trainCalonderClassifier( argv[1], argv[3] );
- testCalonderClassifier( classifierFileName.c_str(), testImageFileName.c_str() );
+ testCalonderClassifier( argv[1], argv[2] );
return 0;
}
#include <vector>
using namespace cv;
-
void help()
{
printf( "This program shows the use of the \"fern\" plannar PlanarObjectDetector point\n"
- "descriptor classifier"
- "Usage: \n"
- "./find_obj_ferns [--object_filename]=<object_filename, box.png as default> \n"
- " [--scene_filename]=<scene_filename box_in_scene.png as default>] \n"
- "Example: \n"
- "./find_obj_ferns --object_filename=box.png --scene_filename=box_in_scene.png \n");
+ "descriptor classifier"
+ "Usage:\n"
+ "./find_obj_ferns [<object_filename default: box.png> <scene_filename default:box_in_scene.png>]\n"
+ "\n");
}
-
-int main(int argc, const char** argv)
+int main(int argc, char** argv)
{
+ const char* object_filename = argc > 1 ? argv[1] : "box.png";
+ const char* scene_filename = argc > 2 ? argv[2] : "box_in_scene.png";
+ int i;
help();
-
- CommandLineParser parser(argc, argv);
-
- string objectFileName = parser.get<string>("object_filename", "box.png");
- string sceneFileName = parser.get<string>("scene_filename", "box_in_scene.png");
-
cvNamedWindow("Object", 1);
cvNamedWindow("Image", 1);
cvNamedWindow("Object Correspondence", 1);
- Mat object = imread( objectFileName.c_str(), CV_LOAD_IMAGE_GRAYSCALE );
+ Mat object = imread( object_filename, CV_LOAD_IMAGE_GRAYSCALE );
Mat image;
double imgscale = 1;
- Mat _image = imread( sceneFileName.c_str(), CV_LOAD_IMAGE_GRAYSCALE );
+ Mat _image = imread( scene_filename, CV_LOAD_IMAGE_GRAYSCALE );
resize(_image, image, Size(), 1./imgscale, 1./imgscale, INTER_CUBIC);
{
fprintf( stderr, "Can not load %s and/or %s\n"
"Usage: find_obj_ferns [<object_filename> <scene_filename>]\n",
- objectFileName.c_str(), sceneFileName.c_str() );
+ object_filename, scene_filename );
exit(-1);
}
vector<KeyPoint> objKeypoints, imgKeypoints;
PatchGenerator gen(0,256,5,true,0.8,1.2,-CV_PI/2,CV_PI/2,-CV_PI/2,CV_PI/2);
- string model_filename = format("%s_model.xml.gz", objectFileName.c_str());
+ string model_filename = format("%s_model.xml.gz", object_filename);
printf("Trying to load %s ...\n", model_filename.c_str());
FileStorage fs(model_filename, FileStorage::READ);
if( fs.isOpened() )
t = (double)getTickCount() - t;
printf("%gms\n", t*1000/getTickFrequency());
- int i = 0;
if( found )
{
for( i = 0; i < 4; i++ )
-#include "opencv2/core/core.hpp"\r
#include "opencv2/objdetect/objdetect.hpp"\r
#include "opencv2/highgui/highgui.hpp"\r
-\r
#include <stdio.h>\r
\r
#ifdef HAVE_CONFIG_H \r
-#include "cvconfig.h"\r
+#include <cvconfig.h> \r
#endif\r
#ifdef HAVE_TBB\r
#include "tbb/task_scheduler_init.h"\r
\r
void help()\r
{\r
- printf( "This program demonstrated the use of the latentSVM detector.\n"\r
- "It reads in a trained object model and then uses that to detect the object in an image\n"\r
- "Usage: \n"\r
- "./latentsvmdetect [--image_filename]=<image_filename, cat.jpg as default> \n"\r
- " [--model_filename]=<model_filename, cat.xml as default> \n"\r
- " [--threads_number]=<number of threads, -1 as default>\n"\r
- "Example: \n"\r
- "./latentsvmdetect --image_filename=cat.jpg --model_filename=cat.xml --threads_number=7 \n"\r
- " Press any key to quit.\n");\r
+ printf( "This program demonstrated the use of the latentSVM detector.\n"\r
+ "It reads in a trained object model and then uses that to detect the object in an image\n"\r
+ "Call:\n"\r
+ "./latentsvmdetect [<image_filename> <model_filename> [<threads_number>]]\n"\r
+ " The defaults for image_filename and model_filename are cat.jpg and cat.xml respectively\n"\r
+ " Press any key to quit.\n");\r
}\r
\r
+const char* model_filename = "cat.xml";\r
+const char* image_filename = "cat.jpg";\r
+int tbbNumThreads = -1;\r
\r
void detect_and_draw_objects( IplImage* image, CvLatentSvmDetector* detector, int numThreads = -1)\r
{\r
CvMemStorage* storage = cvCreateMemStorage(0);\r
CvSeq* detections = 0;\r
int i = 0;\r
- int64 start = 0, finish = 0;\r
+ int64 start = 0, finish = 0;\r
#ifdef HAVE_TBB\r
tbb::task_scheduler_init init(tbb::task_scheduler_init::deferred);\r
- if (numThreads > 0)\r
- {\r
- init.initialize(numThreads);\r
+ if (numThreads > 0)\r
+ {\r
+ init.initialize(numThreads);\r
printf("Number of threads %i\n", numThreads);\r
- }\r
- else\r
- {\r
- printf("Number of threads is not correct for TBB version");\r
- return;\r
- }\r
+ }\r
+ else\r
+ {\r
+ printf("Number of threads is not correct for TBB version");\r
+ return;\r
+ }\r
#endif\r
- start = cvGetTickCount();\r
+\r
+ start = cvGetTickCount();\r
detections = cvLatentSvmDetectObjects(image, detector, storage, 0.5f, numThreads);\r
- finish = cvGetTickCount();\r
- printf("detection time = %.3f\n", (float)(finish - start) / (float)(cvGetTickFrequency() * 1000000.0));\r
+ finish = cvGetTickCount();\r
+ printf("detection time = %.3f\n", (float)(finish - start) / (float)(cvGetTickFrequency() * 1000000.0));\r
\r
#ifdef HAVE_TBB\r
init.terminate();\r
for( i = 0; i < detections->total; i++ )\r
{\r
CvObjectDetection detection = *(CvObjectDetection*)cvGetSeqElem( detections, i );\r
- CvRect bounding_box = detection.rect;\r
+ CvRect bounding_box = detection.rect;\r
cvRectangle( image, cvPoint(bounding_box.x, bounding_box.y),\r
cvPoint(bounding_box.x + bounding_box.width, \r
- bounding_box.y + bounding_box.height),\r
+ bounding_box.y + bounding_box.height),\r
CV_RGB(255,0,0), 3 );\r
}\r
cvReleaseMemStorage( &storage );\r
}\r
\r
-int main(int argc, const char* argv[])\r
+int main(int argc, char* argv[])\r
{\r
- help();\r
-\r
- CommandLineParser parser(argc, argv);\r
-\r
- string imageFileName = parser.get<string>("image_filename", "cat.jpg");\r
- string modelFileName = parser.get<string>("model_filename", "cat.xml");\r
- int tbbNumThreads = parser.get<int>("threads_number", -1);\r
-\r
- IplImage* image = cvLoadImage(imageFileName.c_str());\r
- if (!image)\r
- {\r
- printf( "Unable to load the image\n"\r
+ help();\r
+ if (argc > 2)\r
+ {\r
+ image_filename = argv[1];\r
+ model_filename = argv[2];\r
+ if (argc > 3)\r
+ {\r
+ tbbNumThreads = atoi(argv[3]);\r
+ }\r
+ }\r
+ IplImage* image = cvLoadImage(image_filename);\r
+ if (!image)\r
+ {\r
+ printf( "Unable to load the image\n"\r
"Pass it as the first parameter: latentsvmdetect <path to cat.jpg> <path to cat.xml>\n" );\r
- return -1;\r
- }\r
- CvLatentSvmDetector* detector = cvLoadLatentSvmDetector(modelFileName.c_str());\r
- if (!detector)\r
- {\r
- printf( "Unable to load the model\n"\r
+ return -1;\r
+ }\r
+ CvLatentSvmDetector* detector = cvLoadLatentSvmDetector(model_filename);\r
+ if (!detector)\r
+ {\r
+ printf( "Unable to load the model\n"\r
"Pass it as the second parameter: latentsvmdetect <path to cat.jpg> <path to cat.xml>\n" );\r
- cvReleaseImage( &image );\r
- return -1;\r
- }\r
-\r
+ cvReleaseImage( &image );\r
+ return -1;\r
+ }\r
detect_and_draw_objects( image, detector, tbbNumThreads );\r
-\r
cvNamedWindow( "test", 0 );\r
cvShowImage( "test", image );\r
cvWaitKey(0);\r
cvReleaseImage( &image );\r
cvDestroyAllWindows();\r
\r
- return 0;\r
+ return 0;\r
}\r
* Copyright� 2009, Liu Liu All rights reserved.
*/
-#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/imgproc/imgproc_c.h"
-#include <iostream>
-
-using namespace std;
-using namespace cv;
void help()
{
- printf("\nThis program demonstrates the Maximal Extremal Region interest point detector.\n"
- "It finds the most stable (in size) dark and white regions as a threshold is increased.\n"
- "\n Usage: \n"
- "./mser_sample [--image_filename] <path_and_image_filename, default is 'puzzle.png'> \n"
- "Example: \n"
- "./mser_sample --image_filename=puzzle.png \n");
+ printf("\nThis program demonstrates the Maximal Extremal Region interest point detector.\n"
+ "It finds the most stable (in size) dark and white regions as a threshold is increased.\n"
+ "\nCall:\n"
+ "./mser_sample <path_and_image_filename, Default is 'puzzle.png'>\n\n");
}
static CvScalar colors[] =
};
-int main( int argc, const char** argv )
+int main( int argc, char** argv )
{
- help();
-
- CommandLineParser parser(argc, argv);
-
- string imageFileName = parser.get<string>("image_filename", "puzzle.png");
-
- IplImage* img;
-
- img = cvLoadImage( imageFileName.c_str(), CV_LOAD_IMAGE_GRAYSCALE );
- if (!img)
- {
- printf("Unable to load image %s\n",imageFileName.c_str());
- help();
- return 0;
- }
-
- IplImage* rsp = cvLoadImage( imageFileName.c_str(), CV_LOAD_IMAGE_COLOR );
- IplImage* ellipses = cvCloneImage(rsp);
- cvCvtColor(img,ellipses,CV_GRAY2BGR);
- CvSeq* contours;
- CvMemStorage* storage= cvCreateMemStorage();
- IplImage* hsv = cvCreateImage( cvGetSize( rsp ), IPL_DEPTH_8U, 3 );
- cvCvtColor( rsp, hsv, CV_BGR2YCrCb );
- CvMSERParams params = cvMSERParams();//cvMSERParams( 5, 60, cvRound(.2*img->width*img->height), .25, .2 );
-
- double t = (double)cvGetTickCount();
- cvExtractMSER( hsv, NULL, &contours, storage, params );
- t = cvGetTickCount() - t;
- printf( "MSER extracted %d contours in %g ms.\n", contours->total, t/((double)cvGetTickFrequency()*1000.) );
- uchar* rsptr = (uchar*)rsp->imageData;
- // draw mser with different color
- for ( int i = contours->total-1; i >= 0; i-- )
- {
- CvSeq* r = *(CvSeq**)cvGetSeqElem( contours, i );
- for ( int j = 0; j < r->total; j++ )
- {
- CvPoint* pt = CV_GET_SEQ_ELEM( CvPoint, r, j );
- rsptr[pt->x*3+pt->y*rsp->widthStep] = bcolors[i%9][2];
- rsptr[pt->x*3+1+pt->y*rsp->widthStep] = bcolors[i%9][1];
- rsptr[pt->x*3+2+pt->y*rsp->widthStep] = bcolors[i%9][0];
- }
- }
- // find ellipse ( it seems cvfitellipse2 have error or sth?
- for ( int i = 0; i < contours->total; i++ )
- {
- CvContour* r = *(CvContour**)cvGetSeqElem( contours, i );
- CvBox2D box = cvFitEllipse2( r );
- box.angle=(float)CV_PI/2-box.angle;
-
- if ( r->color > 0 )
- cvEllipseBox( ellipses, box, colors[9], 2 );
- else
- cvEllipseBox( ellipses, box, colors[2], 2 );
-
- }
-
- cvSaveImage( "rsp.png", rsp );
-
- cvNamedWindow( "original", 0 );
- cvShowImage( "original", img );
-
- cvNamedWindow( "response", 0 );
- cvShowImage( "response", rsp );
-
- cvNamedWindow( "ellipses", 0 );
- cvShowImage( "ellipses", ellipses );
-
- cvWaitKey(0);
-
- cvDestroyWindow( "original" );
- cvDestroyWindow( "response" );
- cvDestroyWindow( "ellipses" );
- cvReleaseImage(&rsp);
- cvReleaseImage(&img);
- cvReleaseImage(&ellipses);
+ char path[1024];
+ IplImage* img;
+ help();
+ if (argc!=2)
+ {
+ strcpy(path,"puzzle.png");
+ img = cvLoadImage( path, CV_LOAD_IMAGE_GRAYSCALE );
+ if (!img)
+ {
+ printf("\nUsage: mser_sample <path_to_image>\n");
+ return 0;
+ }
+ }
+ else
+ {
+ strcpy(path,argv[1]);
+ img = cvLoadImage( path, CV_LOAD_IMAGE_GRAYSCALE );
+ }
+
+ if (!img)
+ {
+ printf("Unable to load image %s\n",path);
+ return 0;
+ }
+ IplImage* rsp = cvLoadImage( path, CV_LOAD_IMAGE_COLOR );
+ IplImage* ellipses = cvCloneImage(rsp);
+ cvCvtColor(img,ellipses,CV_GRAY2BGR);
+ CvSeq* contours;
+ CvMemStorage* storage= cvCreateMemStorage();
+ IplImage* hsv = cvCreateImage( cvGetSize( rsp ), IPL_DEPTH_8U, 3 );
+ cvCvtColor( rsp, hsv, CV_BGR2YCrCb );
+ CvMSERParams params = cvMSERParams();//cvMSERParams( 5, 60, cvRound(.2*img->width*img->height), .25, .2 );
+
+ double t = (double)cvGetTickCount();
+ cvExtractMSER( hsv, NULL, &contours, storage, params );
+ t = cvGetTickCount() - t;
+ printf( "MSER extracted %d contours in %g ms.\n", contours->total, t/((double)cvGetTickFrequency()*1000.) );
+ uchar* rsptr = (uchar*)rsp->imageData;
+ // draw mser with different color
+ for ( int i = contours->total-1; i >= 0; i-- )
+ {
+ CvSeq* r = *(CvSeq**)cvGetSeqElem( contours, i );
+ for ( int j = 0; j < r->total; j++ )
+ {
+ CvPoint* pt = CV_GET_SEQ_ELEM( CvPoint, r, j );
+ rsptr[pt->x*3+pt->y*rsp->widthStep] = bcolors[i%9][2];
+ rsptr[pt->x*3+1+pt->y*rsp->widthStep] = bcolors[i%9][1];
+ rsptr[pt->x*3+2+pt->y*rsp->widthStep] = bcolors[i%9][0];
+ }
+ }
+ // find ellipse ( it seems cvfitellipse2 have error or sth?
+ for ( int i = 0; i < contours->total; i++ )
+ {
+ CvContour* r = *(CvContour**)cvGetSeqElem( contours, i );
+ CvBox2D box = cvFitEllipse2( r );
+ box.angle=(float)CV_PI/2-box.angle;
+
+ if ( r->color > 0 )
+ cvEllipseBox( ellipses, box, colors[9], 2 );
+ else
+ cvEllipseBox( ellipses, box, colors[2], 2 );
+
+ }
+
+ cvSaveImage( "rsp.png", rsp );
+
+ cvNamedWindow( "original", 0 );
+ cvShowImage( "original", img );
+
+ cvNamedWindow( "response", 0 );
+ cvShowImage( "response", rsp );
+
+ cvNamedWindow( "ellipses", 0 );
+ cvShowImage( "ellipses", ellipses );
+
+ cvWaitKey(0);
+
+ cvDestroyWindow( "original" );
+ cvDestroyWindow( "response" );
+ cvDestroyWindow( "ellipses" );
+ cvReleaseImage(&rsp);
+ cvReleaseImage(&img);
+ cvReleaseImage(&ellipses);
+
}
*
*/
-#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include <string>
-
void help()
{
- printf("\nThis program demonstrates the one way interest point descriptor found in features2d.hpp\n"
- "Correspondences are drawn\n"
- "Usage: \n"
- "./one_way_sample [--path]=<path_to_samples, '../../../opencv/samples/c' as default> \n"
- " [--first_image]=<first image file, scene_l.bmp as default> \n"
- " [--second_image]=<second image file, scene_r.bmp as default>\n"
- "For example: \n"
- " ./one_way_sample --path=../../../opencv/samples/c --first_image=scene_l.bmp --second_image=scene_r.bmp \n");
+ printf("\nThis program demonstrates the one way interest point descriptor found in features2d.hpp\n"
+ "Correspondences are drawn\n");
+ printf("Format: \n./one_way_sample [path_to_samples] [image1] [image2]\n");
+ printf("For example: ./one_way_sample ../../../opencv/samples/c scene_l.bmp scene_r.bmp\n");
}
using namespace cv;
IplImage* DrawCorrespondences(IplImage* img1, const vector<KeyPoint>& features1, IplImage* img2,
const vector<KeyPoint>& features2, const vector<int>& desc_idx);
-int main(int argc, const char** argv)
+int main(int argc, char** argv)
{
- help();
-
- CommandLineParser parser(argc, argv);
-
- std::string path_name = parser.get<string>("path", "../../../opencv/samples/c");
- std::string img1_name = path_name + "/" + parser.get<string>("first_image", "scene_l.bmp");
- std::string img2_name = path_name + "/" + parser.get<string>("second_image", "scene_r.bmp");
-
const char images_list[] = "one_way_train_images.txt";
const CvSize patch_size = cvSize(24, 24);
- const int pose_count = 1; //50
+ const int pose_count = 50;
+
+ if (argc != 3 && argc != 4)
+ {
+ help();
+ return 0;
+ }
+
+ std::string path_name = argv[1];
+ std::string img1_name = path_name + "/" + std::string(argv[2]);
+ std::string img2_name = path_name + "/" + std::string(argv[3]);
printf("Reading the images...\n");
IplImage* img1 = cvLoadImage(img1_name.c_str(), CV_LOAD_IMAGE_GRAYSCALE);
-#include "opencv2/core/core.hpp"
#include "opencv2/ml/ml.hpp"
#include "opencv2/core/core_c.h"
#include <stdio.h>
#include <map>
-using namespace std;
-using namespace cv;
-
void help()
{
- printf(
- "\nThis sample demonstrates how to use different decision trees and forests including boosting and random trees:\n"
- "CvDTree dtree;\n"
- "CvBoost boost;\n"
- "CvRTrees rtrees;\n"
- "CvERTrees ertrees;\n"
- "CvGBTrees gbtrees;\n"
- "Usage: \n"
- " ./tree_engine [--response_column]=<specified the 0-based index of the response, 0 as default> \n"
- "[--categorical_response]=<specifies that the response is categorical, 0-false, 1-true, 0 as default> \n"
- "[--csv_filename]=<is the name of training data file in comma-separated value format> \n"
- );
+ printf(
+ "\nThis sample demonstrates how to use different decision trees and forests including boosting and random trees:\n"
+ "CvDTree dtree;\n"
+ "CvBoost boost;\n"
+ "CvRTrees rtrees;\n"
+ "CvERTrees ertrees;\n"
+ "CvGBTrees gbtrees;\n"
+ "Call:\n\t./tree_engine [-r <response_column>] [-c] <csv filename>\n"
+ "where -r <response_column> specified the 0-based index of the response (0 by default)\n"
+ "-c specifies that the response is categorical (it's ordered by default) and\n"
+ "<csv filename> is the name of training data file in comma-separated value format\n\n");
}
printf("\n");
}
-int main(int argc, const char** argv)
+int main(int argc, char** argv)
{
- help();
-
- CommandLineParser parser(argc, argv);
-
- string filename = parser.get<string>("csv_filename");
- int response_idx = parser.get<int>("response_column", 0);
- bool categorical_response = (bool)parser.get<int>("categorical_response", 1);
-
- if(filename.empty())
+ if(argc < 2)
{
- printf("\n Please, select value for --csv_filename key \n");
help();
- return -1;
+ return 0;
+ }
+ const char* filename = 0;
+ int response_idx = 0;
+ bool categorical_response = false;
+
+ for(int i = 1; i < argc; i++)
+ {
+ if(strcmp(argv[i], "-r") == 0)
+ sscanf(argv[++i], "%d", &response_idx);
+ else if(strcmp(argv[i], "-c") == 0)
+ categorical_response = true;
+ else if(argv[i][0] != '-' )
+ filename = argv[i];
+ else
+ {
+ printf("Error. Invalid option %s\n", argv[i]);
+ help();
+ return -1;
+ }
}
- printf("\nReading in %s...\n\n",filename.c_str());
+ printf("\nReading in %s...\n\n",filename);
CvDTree dtree;
CvBoost boost;
CvRTrees rtrees;
CvTrainTestSplit spl( 0.5f );
- if ( data.read_csv( filename.c_str() ) == 0)
+ if ( data.read_csv( filename ) == 0)
{
data.set_response_idx( response_idx );
if(categorical_response)
-#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/features2d/features2d.hpp"
const string svmsDir = "/svms";
const string plotsDir = "/plots";
-void help()
-{
- printf("\nThis program shows how to read in, train on and produce test results for the PASCAL VOC (Visual Object Challenge) data. \n"
- "It shows how to use detectors, descriptors and recognition methods \n"
- "Usage: \n"
- "Format:\n"
- "./bagofwords_classification \n"
- "--voc_path=<Path to Pascal VOC data (e.g. /home/my/VOCdevkit/VOC2010). \n"
- " Note: VOC2007-VOC2010 are supported.> \n"
- "--result_directory=<Path to result directory. Following folders will be created in [result directory]: \n"
- " bowImageDescriptors - to store image descriptors, \n"
- " svms - to store trained svms, \n"
- " plots - to store files for plots creating. \n"
- "[--feature_detector]=<Feature detector name (e.g. SURF, FAST...) - see createFeatureDetector() function in detectors.cpp \n"
- " Currently 12/2010, this is FAST, STAR, SIFT, SURF, MSER, GFTT, HARRIS> \n"
- "[--descriptor_extractor]=<Descriptor extractor name (e.g. SURF, SIFT) - see createDescriptorExtractor() function in descriptors.cpp \n"
- " Currently 12/2010, this is SURF, OpponentSIFT, SIFT, OpponentSURF, BRIEF> \n"
- "[--descriptor_matcher]=<Descriptor matcher name (e.g. BruteForce) - see createDescriptorMatcher() function in matchers.cpp \n"
- " Currently 12/2010, this is BruteForce, BruteForce-L1, FlannBased, BruteForce-Hamming, BruteForce-HammingLUT> \n"
- "\n");
+void help(char** argv)
+{
+ cout << "\nThis program shows how to read in, train on and produce test results for the PASCAL VOC (Visual Object Challenge) data. \n"
+ << "It shows how to use detectors, descriptors and recognition methods \n"
+ "Using OpenCV version %s\n" << CV_VERSION << "\n"
+ << "Call: \n"
+ << "Format:\n ./" << argv[0] << " [VOC path] [result directory] \n"
+ << " or: \n"
+ << " ./" << argv[0] << " [VOC path] [result directory] [feature detector] [descriptor extractor] [descriptor matcher] \n"
+ << "\n"
+ << "Input parameters: \n"
+ << "[VOC path] Path to Pascal VOC data (e.g. /home/my/VOCdevkit/VOC2010). Note: VOC2007-VOC2010 are supported. \n"
+ << "[result directory] Path to result diractory. Following folders will be created in [result directory]: \n"
+ << " bowImageDescriptors - to store image descriptors, \n"
+ << " svms - to store trained svms, \n"
+ << " plots - to store files for plots creating. \n"
+ << "[feature detector] Feature detector name (e.g. SURF, FAST...) - see createFeatureDetector() function in detectors.cpp \n"
+ << " Currently 12/2010, this is FAST, STAR, SIFT, SURF, MSER, GFTT, HARRIS \n"
+ << "[descriptor extractor] Descriptor extractor name (e.g. SURF, SIFT) - see createDescriptorExtractor() function in descriptors.cpp \n"
+ << " Currently 12/2010, this is SURF, OpponentSIFT, SIFT, OpponentSURF, BRIEF \n"
+ << "[descriptor matcher] Descriptor matcher name (e.g. BruteForce) - see createDescriptorMatcher() function in matchers.cpp \n"
+ << " Currently 12/2010, this is BruteForce, BruteForce-L1, FlannBased, BruteForce-Hamming, BruteForce-HammingLUT \n"
+ << "\n";
}
-int main(int argc, const char** argv)
+int main(int argc, char** argv)
{
- help();
-
- CommandLineParser parser(argc, argv);
-
- const string vocPath = parser.get<string>("--voc_path");
- const string resPath = parser.get<string>("--result_directory");
- const string featureDetectName = parser.get<string>("--feature_detector");
- const string descExtName = parser.get<string>("--descriptor_extractor");
- const string descMatchName = parser.get<string>("--descriptor_matcher");
-
- if( vocPath.empty() || resPath.empty())
+ if( argc != 3 && argc != 6 )
{
- help();
- printf("Cannot find --voc_path=%s or --result_directory=%s\n", vocPath.c_str(), resPath.c_str());
+ help(argv);
return -1;
}
+
+ const string vocPath = argv[1], resPath = argv[2];
+
// Read or set default parameters
string vocName;
DDMParams ddmParams;
else
{
vocName = getVocName(vocPath);
- if( featureDetectName.empty() || descExtName.empty() || descMatchName.empty())
+ if( argc!= 6 )
{
cout << "Feature detector, descriptor extractor, descriptor matcher must be set" << endl;
return -1;
}
- ddmParams = DDMParams( featureDetectName.c_str(), descExtName.c_str(), descMatchName.c_str()); // from command line
+ ddmParams = DDMParams( argv[3], argv[4], argv[5] ); // from command line
// vocabTrainParams and svmTrainParamsExt is set by defaults
paramsFS.open( resPath + "/" + paramsFile, FileStorage::WRITE );
if( paramsFS.isOpened() )
-#include "opencv2/core/core.hpp"
#include "opencv2/video/background_segm.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdio.h>
using namespace cv;
-using namespace std;
void help()
{
printf("\nDo background segmentation, especially demonstrating the use of cvUpdateBGStatModel().\n"
-" Learns the background at the start and then segments.\n"
-" Learning is togged by the space key. Will read from file or camera\n"
-"Usage: \n"
-" ./bgfg_segm [--file_name]=<input file, camera as defautl>\n\n");
+"Learns the background at the start and then segments.\n"
+"Learning is togged by the space key. Will read from file or camera\n"
+"Call:\n"
+"./ bgfg_segm [file name -- if no name, read from camera]\n\n");
}
//this is a sample for foreground detection functions
-int main(int argc, const char** argv)
+int main(int argc, char** argv)
{
- help();
-
- CommandLineParser parser(argc, argv);
-
- string fileName = parser.get<string>("file_name", "0");
VideoCapture cap;
bool update_bg_model = true;
-
- if(fileName == "0" )
+ if( argc < 2 )
cap.open(0);
else
- cap.open(fileName.c_str());
-
+ cap.open(argv[1]);
+ help();
+
if( !cap.isOpened() )
{
- help();
printf("can not open camera or video file\n");
return -1;
}
* Created on: Oct 17, 2010
* Author: ethan
*/
-#include "opencv2/core/core.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <vector>
#include <iostream>
-using namespace std;
using namespace cv;
using std::cout;
using std::endl;
using std::vector;
-void help()
-{
- printf("\nThis program shows how to use BRIEF descriptor to match points in features2d\n"
- "It takes in two images, finds keypoints and matches them displaying matches and final homography warped results\n"
- "Usage: \n"
- " ./brief_match_test [--first_file]=<first file name, left01.jpg as default> \n"
- " [--second_file]=<second file name, left02.jpg as default> \n"
- "Example: \n"
- "./brief_match_test --first_file=left01.jpg --second_file=left02.jpg \n");
+void help(char **av)
+{
+ cerr << "usage: " << av[0] << " im1.jpg im2.jpg"
+ << "\n"
+ << "This program shows how to use BRIEF descriptor to match points in features2d\n"
+ << "It takes in two images, finds keypoints and matches them displaying matches and final homography warped results\n"
+ << endl;
}
//Copy (x,y) location of descriptor matches found from KeyPoint data structures into Point2f vectors
-int main(int ac, const char ** av)
+int main(int ac, char ** av)
{
- help();
-
- CommandLineParser parser(ac, av);
-
- string im1_name, im2_name;
- im1_name = parser.get<string>("first_file", "left01.jpg");
- im2_name = parser.get<string>("second_file", "left02.jpg");
-
- if (im1_name.empty() || im2_name.empty())
+ if (ac != 3)
{
- help();
- printf("\n You have to indicate two files first_file and second_file \n");
- return -1;
+ help(av);
+ return 1;
}
+ string im1_name, im2_name;
+ im1_name = av[1];
+ im2_name = av[2];
Mat im1 = imread(im1_name, CV_LOAD_IMAGE_GRAYSCALE);
Mat im2 = imread(im2_name, CV_LOAD_IMAGE_GRAYSCALE);
using namespace cv;
using namespace std;
+const char * usage =
+" \nexample command line for calibration from a live feed.\n"
+" calibration -w 4 -h 5 -s 0.025 -o camera.yml -op -oe\n"
+" \n"
+" example command line for calibration from a list of stored images:\n"
+" imagelist_creator image_list.xml *.png\n"
+" calibration -w 4 -h 5 -s 0.025 -o camera.yml -op -oe image_list.xml\n"
+" where image_list.xml is the standard OpenCV XML/YAML\n"
+" use imagelist_creator to create the xml or yaml list\n"
+" file consisting of the list of strings, e.g.:\n"
+" \n"
+"<?xml version=\"1.0\"?>\n"
+"<opencv_storage>\n"
+"<images>\n"
+"view000.png\n"
+"view001.png\n"
+"<!-- view002.png -->\n"
+"view003.png\n"
+"view010.png\n"
+"one_extra_view.jpg\n"
+"</images>\n"
+"</opencv_storage>\n";
+
+
+
+
+const char* liveCaptureHelp =
+ "When the live video from camera is used as input, the following hot-keys may be used:\n"
+ " <ESC>, 'q' - quit the program\n"
+ " 'g' - start capturing images\n"
+ " 'u' - switch undistortion on/off\n";
+
void help()
{
printf( "This is a camera calibration sample.\n"
"Usage: calibration\n"
- " -w=<board_width> # the number of inner corners per one of board dimension\n"
- " -h=<board_height> # the number of inner corners per another board dimension\n"
- " [-pt]=<pattern> # the type of pattern: chessboard or circles' grid\n"
- " [-n]=<number_of_frames> # the number of frames to use for calibration\n"
+ " -w <board_width> # the number of inner corners per one of board dimension\n"
+ " -h <board_height> # the number of inner corners per another board dimension\n"
+ " [-pt <pattern>] # the type of pattern: chessboard or circles' grid\n"
+ " [-n <number_of_frames>] # the number of frames to use for calibration\n"
" # (if not specified, it will be set to the number\n"
" # of board views actually available)\n"
- " [-d]=<delay> # a minimum delay in ms between subsequent attempts to capture a next view\n"
+ " [-d <delay>] # a minimum delay in ms between subsequent attempts to capture a next view\n"
" # (used only for video capturing)\n"
- " [-s]=<squareSize> # square size in some user-defined units (1 by default)\n"
- " [-o]=<out_camera_params> # the output filename for intrinsic [and extrinsic] parameters\n"
+ " [-s <squareSize>] # square size in some user-defined units (1 by default)\n"
+ " [-o <out_camera_params>] # the output filename for intrinsic [and extrinsic] parameters\n"
" [-op] # write detected feature points\n"
" [-oe] # write extrinsic parameters\n"
" [-zt] # assume zero tangential distortion\n"
- " [-a]=<aspectRatio> # fix aspect ratio (fx/fy)\n"
+ " [-a <aspectRatio>] # fix aspect ratio (fx/fy)\n"
" [-p] # fix the principal point at the center\n"
" [-v] # flip the captured images around the horizontal axis\n"
" [-V] # use a video file, and not an image list, uses\n"
" # [input_data] string for the video file name\n"
" [-su] # show undistorted images after calibration\n"
- " [-input_data]=<data file> # input data, one of the following:\n"
+ " [input_data] # input data, one of the following:\n"
" # - text file with a list of the images of the board\n"
" # the text file can be generated with imagelist_creator\n"
" # - name of video file with a video of the board\n"
- " [-cameraId]=<camera index># if input_data not specified, a live view from the camera is used\n"
- " \nExample command line for calibration from a live feed:\n"
- " ./calibration -w=4 -h=5 -s=0.025 -o=camera.yml -op -oe\n"
- " \n"
- " Example command line for calibration from a list of stored images:\n"
- " imagelist_creator image_list.xml *.png\n"
- " ./calibration -w=4 -h-5 -s=0.025 -o=camera.yml -op -oe -input_data=image_list.xml\n"
- " where image_list.xml is the standard OpenCV XML/YAML\n"
- " use imagelist_creator to create the xml or yaml list\n"
- " file consisting of the list of strings, e.g.:\n"
- " \n"
- "<?xml version=\"1.0\"?>\n"
- "<opencv_storage>\n"
- "<images>\n"
- "view000.png\n"
- "view001.png\n"
- "<!-- view002.png -->\n"
- "view003.png\n"
- "view010.png\n"
- "one_extra_view.jpg\n"
- "</images>\n"
- "</opencv_storage>\n"
- "\nWhen the live video from camera is used as input, the following hot-keys may be used:\n"
- " <ESC>, 'q' - quit the program\n"
- " 'g' - start capturing images\n"
- " 'u' - switch undistortion on/off\n");
+ " # if input_data not specified, a live view from the camera is used\n"
+ "\n" );
+ printf("\n%s",usage);
+ printf( "\n%s", liveCaptureHelp );
}
enum { DETECTION = 0, CAPTURING = 1, CALIBRATED = 2 };
}
-int main( int argc, const char** argv )
+int main( int argc, char** argv )
{
- help();
- CommandLineParser parser(argc, argv);
-
Size boardSize, imageSize;
- boardSize.width = parser.get<int>("w");
- boardSize.height = parser.get<int>("h");
- float squareSize = parser.get<float>("s", 1.f);
- float aspectRatio = parser.get<float>("a", 1.f);
+ float squareSize = 1.f, aspectRatio = 1.f;
Mat cameraMatrix, distCoeffs;
- string outputFilename = parser.get<string>("o","out_camera_data.yml");
- string inputFilename = parser.get<string>("input_data");
- int nframes = parser.get<int>("n", 10);
- bool writeExtrinsics = parser.get<bool>("oe");
- bool writePoints = parser.get<bool>("op");
- bool flipVertical = parser.get<bool>("v");
- bool showUndistorted = parser.get<bool>("su");
- bool videofile = parser.get<bool>("V");
- unsigned int delay = parser.get<unsigned int>("d", 1000);
- unsigned int cameraId = parser.get<unsigned int>("cameraId",0);
+ const char* outputFilename = "out_camera_data.yml";
+ const char* inputFilename = 0;
+
+ int i, nframes = 10;
+ bool writeExtrinsics = false, writePoints = false;
bool undistortImage = false;
int flags = 0;
VideoCapture capture;
+ bool flipVertical = false;
+ bool showUndistorted = false;
+ bool videofile = false;
+ int delay = 1000;
clock_t prevTimestamp = 0;
int mode = DETECTION;
+ int cameraId = 0;
vector<vector<Point2f> > imagePoints;
vector<string> imageList;
Pattern pattern = CHESSBOARD;
- if( (boardSize.width < 1) || (boardSize.height < 1))
+ if( argc < 2 )
{
help();
- return fprintf( stderr, "Invalid board width or height. It must be more than zero\n" ), -1;
+ return 0;
}
- if(parser.get<string>("pt")=="circles")
- pattern = CIRCLES_GRID;
- else if(parser.get<string>("pt")=="acircles")
- pattern = ASYMMETRIC_CIRCLES_GRID;
- if(squareSize <= 0)
- {
- help();
- return fprintf( stderr, "Invalid board square width. It must be more than zero.\n" ), -1;
- }
- if(nframes < 4)
+ for( i = 1; i < argc; i++ )
{
- help();
- return printf("Invalid number of images. It must be more than 3\n" ), -1;
- }
- if(aspectRatio <= 0)
- {
- help();
- return printf("Invalid aspect ratio. It must be more than zero\n" ), -1;
- }
+ const char* s = argv[i];
+ if( strcmp( s, "-w" ) == 0 )
+ {
+ if( sscanf( argv[++i], "%u", &boardSize.width ) != 1 || boardSize.width <= 0 )
+ return fprintf( stderr, "Invalid board width\n" ), -1;
+ }
+ else if( strcmp( s, "-h" ) == 0 )
+ {
+ if( sscanf( argv[++i], "%u", &boardSize.height ) != 1 || boardSize.height <= 0 )
+ return fprintf( stderr, "Invalid board height\n" ), -1;
+ }
+ else if( strcmp( s, "-pt" ) == 0 )
+ {
+ i++;
+ if( !strcmp( argv[i], "circles" ) )
+ pattern = CIRCLES_GRID;
+ else if( !strcmp( argv[i], "acircles" ) )
+ pattern = ASYMMETRIC_CIRCLES_GRID;
+ else if( !strcmp( argv[i], "chessboard" ) )
+ pattern = CHESSBOARD;
+ else
+ return fprintf( stderr, "Invalid pattern type: must be chessboard or circles\n" ), -1;
+ }
+ else if( strcmp( s, "-s" ) == 0 )
+ {
+ if( sscanf( argv[++i], "%f", &squareSize ) != 1 || squareSize <= 0 )
+ return fprintf( stderr, "Invalid board square width\n" ), -1;
+ }
+ else if( strcmp( s, "-n" ) == 0 )
+ {
+ if( sscanf( argv[++i], "%u", &nframes ) != 1 || nframes <= 3 )
+ return printf("Invalid number of images\n" ), -1;
+ }
+ else if( strcmp( s, "-a" ) == 0 )
+ {
+ if( sscanf( argv[++i], "%f", &aspectRatio ) != 1 || aspectRatio <= 0 )
+ return printf("Invalid aspect ratio\n" ), -1;
+ flags |= CV_CALIB_FIX_ASPECT_RATIO;
+ }
+ else if( strcmp( s, "-d" ) == 0 )
+ {
+ if( sscanf( argv[++i], "%u", &delay ) != 1 || delay <= 0 )
+ return printf("Invalid delay\n" ), -1;
+ }
+ else if( strcmp( s, "-op" ) == 0 )
+ {
+ writePoints = true;
+ }
+ else if( strcmp( s, "-oe" ) == 0 )
+ {
+ writeExtrinsics = true;
+ }
+ else if( strcmp( s, "-zt" ) == 0 )
+ {
+ flags |= CV_CALIB_ZERO_TANGENT_DIST;
+ }
+ else if( strcmp( s, "-p" ) == 0 )
+ {
+ flags |= CV_CALIB_FIX_PRINCIPAL_POINT;
+ }
+ else if( strcmp( s, "-v" ) == 0 )
+ {
+ flipVertical = true;
+ }
+ else if( strcmp( s, "-V" ) == 0 )
+ {
+ videofile = true;
+ }
+ else if( strcmp( s, "-o" ) == 0 )
+ {
+ outputFilename = argv[++i];
+ }
+ else if( strcmp( s, "-su" ) == 0 )
+ {
+ showUndistorted = true;
+ }
+ else if( s[0] != '-' )
+ {
+ if( isdigit(s[0]) )
+ sscanf(s, "%d", &cameraId);
+ else
+ inputFilename = s;
+ }
else
- flags |= CV_CALIB_FIX_ASPECT_RATIO;
- if(!delay)
- {
- help();
- return printf("Invalid delay. It must be more than zero.\n" ), -1;
+ return fprintf( stderr, "Unknown option %s", s ), -1;
}
- if(parser.get<bool>("zt"))
- flags |= CV_CALIB_ZERO_TANGENT_DIST;
- if(parser.get<bool>("p"))
- flags |= CV_CALIB_FIX_PRINCIPAL_POINT;
- if( !inputFilename.empty() )
+ if( inputFilename )
{
if( !videofile && readStringList(inputFilename, imageList) )
mode = CAPTURING;
if( !imageList.empty() )
nframes = (int)imageList.size();
+ if( capture.isOpened() )
+ printf( "%s", liveCaptureHelp );
+
namedWindow( "Image View", 1 );
- int i;
for(i = 0;;i++)
{
Mat view, viewGray;
-#include "opencv2/core/core.hpp"
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
-#include <stdio.h>
+#include <iostream>
#include <ctype.h>
using namespace cv;
void help()
{
- printf("\nThis is a demo that shows mean-shift based tracking\n"
- "You select a color objects such as your face and it tracks it.\n"
- "This reads from video camera (0 by default, or the camera number the user enters\n"
- "Usage:\n"
- "./camshiftdemo [--cameraIndex]=<camera number, zero as default>\n"
- "\nHot keys: \n"
- "\tESC - quit the program\n"
- "\tc - stop the tracking\n"
- "\tb - switch to/from backprojection view\n"
- "\th - show/hide object histogram\n"
- "To initialize tracking, select the object with mouse\n");
+ cout << "\nThis is a demo that shows mean-shift based tracking\n"
+ << "You select a color objects such as your face and it tracks it.\n"
+ << "This reads from video camera (0 by default, or the camera number the user enters\n"
+ << "Call:\n"
+ << "\n./camshiftdemo [camera number]"
+ << "\n" << endl;
+
+ cout << "\n\nHot keys: \n"
+ "\tESC - quit the program\n"
+ "\tc - stop the tracking\n"
+ "\tb - switch to/from backprojection view\n"
+ "\th - show/hide object histogram\n"
+ "To initialize tracking, select the object with mouse\n" << endl;
}
Mat image;
-int main( int argc, const char** argv )
+int main( int argc, char** argv )
{
- help();
-
- CommandLineParser parser(argc, argv);
-
- unsigned int cameraInd = parser.get<unsigned int>("cameraIndex", 0);
VideoCapture cap;
Rect trackWindow;
RotatedRect trackBox;
float hranges[] = {0,180};
const float* phranges = hranges;
- cap.open(cameraInd);
+ if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
+ cap.open(argc == 2 ? argv[1][0] - '0' : 0);
+ else if( argc == 2 )
+ cap.open(argv[1]);
if( !cap.isOpened() )
{
help();
- printf("***Could not initialize capturing...***\n");
+ cout << "***Could not initialize capturing...***\n";
return 0;
}
+ help();
+
namedWindow( "Histogram", 1 );
namedWindow( "CamShift Demo", 1 );
setMouseCallback( "CamShift Demo", onMouse, 0 );
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/contrib/contrib.hpp"
-#include <stdio.h>
+#include <iostream>
using namespace cv;
using namespace std;
void help()
{
- printf("\nThis program demonstrates Chamfer matching -- computing a distance between an \n"
- "edge template and a query edge image.\n"
- "Usage:\n"
- "./chamfer [<image edge map, logo_in_clutter.png as default>\n"
- "<template edge map, logo.png as default>]\n"
- "Example: \n"
- " ./chamfer logo_in_clutter.png logo.png\n");
+ cout <<
+ "\nThis program demonstrates Chamfer matching -- computing a distance between an \n"
+ "edge template and a query edge image.\n"
+ "Call:\n"
+ "./chamfer [<image edge map> <template edge map>]\n"
+ "By default\n"
+ "the inputs are ./chamfer logo_in_clutter.png logo.png\n"<< endl;
}
-int main( int argc, const char** argv )
+int main( int argc, char** argv )
{
- help();
-
- CommandLineParser parser(argc, argv);
-
- string image = parser.get<string>("0","logo_in_clutter.png");
- string tempLate = parser.get<string>("1","logo.png");
- Mat img = imread(image,0);
+ if( argc != 1 && argc != 3 )
+ {
+ help();
+ return 0;
+ }
+ Mat img = imread(argc == 3 ? argv[1] : "logo_in_clutter.png", 0);
Mat cimg;
cvtColor(img, cimg, CV_GRAY2BGR);
- Mat tpl = imread(tempLate,0);
-
+ Mat tpl = imread(argc == 3 ? argv[2] : "logo.png", 0);
+
// if the image and the template are not edge maps but normal grayscale images,
// you might want to uncomment the lines below to produce the maps. You can also
// run Sobel instead of Canny.
int best = chamerMatching( img, tpl, results, costs );
if( best < 0 )
{
- printf("not found;\n");
+ cout << "not found;\n";
return 0;
}