if (video.size() == 1 && isdigit(video[0]))
capture.open(parser.get<int>("@video"));
else
- capture.open(video);
+ capture.open(samples::findFileOrKeep(video)); // keep GStreamer pipelines
int nframes = 0;
if (capture.isOpened())
{
if (file.empty())
cap.open(camera);
else
+ {
+ file = samples::findFileOrKeep(file); // ignore gstreamer pipelines
cap.open(file.c_str());
+ }
if (!cap.isOpened())
{
cout << "Can not open video stream: '" << (file.empty() ? "<camera>" : file) << "'" << endl;
FileStorage fs(filename, FileStorage::READ);
if( !fs.isOpened() )
return false;
+ size_t dir_pos = filename.rfind('/');
+ if (dir_pos == string::npos)
+ dir_pos = filename.rfind('\\');
FileNode n = fs.getFirstTopLevelNode();
if( n.type() != FileNode::SEQ )
return false;
FileNodeIterator it = n.begin(), it_end = n.end();
for( ; it != it_end; ++it )
- l.push_back((string)*it);
+ {
+ string fname = (string)*it;
+ if (dir_pos != string::npos)
+ {
+ string fpath = samples::findFile(filename.substr(0, dir_pos + 1) + fname, false);
+ if (fpath.empty())
+ {
+ fpath = samples::findFile(fname);
+ }
+ fname = fpath;
+ }
+ else
+ {
+ fname = samples::findFile(fname);
+ }
+ l.push_back(fname);
+ }
return true;
}
if( !inputFilename.empty() )
{
- if( !videofile && readStringList(inputFilename, imageList) )
+ if( !videofile && readStringList(samples::findFile(inputFilename), imageList) )
mode = CAPTURING;
else
- capture.open(inputFilename);
+ capture.open(samples::findFileOrKeep(inputFilename));
}
else
capture.open(cameraId);
* The program takes as input a source and a destination image (for 1-3 methods)
* and outputs the cloned image.
*
-* Download test images from opencv_extra folder @github.
-*
+* Download test images from opencv_extra repository.
*/
#include "opencv2/photo.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/core.hpp"
#include <iostream>
-#include <stdlib.h>
using namespace std;
using namespace cv;
int main()
{
cout << endl;
+ cout << "Note: specify OPENCV_SAMPLES_DATA_PATH_HINT=<opencv_extra>/testdata/cv" << endl << endl;
cout << "Cloning Module" << endl;
cout << "---------------" << endl;
cout << "Options: " << endl;
if(num == 1)
{
string folder = "cloning/Normal_Cloning/";
- string original_path1 = folder + "source1.png";
- string original_path2 = folder + "destination1.png";
- string original_path3 = folder + "mask.png";
+ string original_path1 = samples::findFile(folder + "source1.png");
+ string original_path2 = samples::findFile(folder + "destination1.png");
+ string original_path3 = samples::findFile(folder + "mask.png");
Mat source = imread(original_path1, IMREAD_COLOR);
Mat destination = imread(original_path2, IMREAD_COLOR);
seamlessClone(source, destination, mask, p, result, 1);
imshow("Output",result);
- imwrite(folder + "cloned.png", result);
+ imwrite("cloned.png", result);
}
else if(num == 2)
{
string folder = "cloning/Mixed_Cloning/";
- string original_path1 = folder + "source1.png";
- string original_path2 = folder + "destination1.png";
- string original_path3 = folder + "mask.png";
+ string original_path1 = samples::findFile(folder + "source1.png");
+ string original_path2 = samples::findFile(folder + "destination1.png");
+ string original_path3 = samples::findFile(folder + "mask.png");
Mat source = imread(original_path1, IMREAD_COLOR);
Mat destination = imread(original_path2, IMREAD_COLOR);
seamlessClone(source, destination, mask, p, result, 2);
imshow("Output",result);
- imwrite(folder + "cloned.png", result);
+ imwrite("cloned.png", result);
}
else if(num == 3)
{
string folder = "cloning/Monochrome_Transfer/";
- string original_path1 = folder + "source1.png";
- string original_path2 = folder + "destination1.png";
- string original_path3 = folder + "mask.png";
+ string original_path1 = samples::findFile(folder + "source1.png");
+ string original_path2 = samples::findFile(folder + "destination1.png");
+ string original_path3 = samples::findFile(folder + "mask.png");
Mat source = imread(original_path1, IMREAD_COLOR);
Mat destination = imread(original_path2, IMREAD_COLOR);
seamlessClone(source, destination, mask, p, result, 3);
imshow("Output",result);
- imwrite(folder + "cloned.png", result);
+ imwrite("cloned.png", result);
}
else if(num == 4)
{
- string folder = "cloning/Color_Change/";
- string original_path1 = folder + "source1.png";
- string original_path2 = folder + "mask.png";
+ string folder = "cloning/color_change/";
+ string original_path1 = samples::findFile(folder + "source1.png");
+ string original_path2 = samples::findFile(folder + "mask.png");
Mat source = imread(original_path1, IMREAD_COLOR);
Mat mask = imread(original_path2, IMREAD_COLOR);
colorChange(source, mask, result, 1.5, .5, .5);
imshow("Output",result);
- imwrite(folder + "cloned.png", result);
+ imwrite("cloned.png", result);
}
else if(num == 5)
{
string folder = "cloning/Illumination_Change/";
- string original_path1 = folder + "source1.png";
- string original_path2 = folder + "mask.png";
+ string original_path1 = samples::findFile(folder + "source1.png");
+ string original_path2 = samples::findFile(folder + "mask.png");
Mat source = imread(original_path1, IMREAD_COLOR);
Mat mask = imread(original_path2, IMREAD_COLOR);
illuminationChange(source, mask, result, 0.2f, 0.4f);
imshow("Output",result);
- imwrite(folder + "cloned.png", result);
+ imwrite("cloned.png", result);
}
else if(num == 6)
{
string folder = "cloning/Texture_Flattening/";
- string original_path1 = folder + "source1.png";
- string original_path2 = folder + "mask.png";
+ string original_path1 = samples::findFile(folder + "source1.png");
+ string original_path2 = samples::findFile(folder + "mask.png");
Mat source = imread(original_path1, IMREAD_COLOR);
Mat mask = imread(original_path2, IMREAD_COLOR);
textureFlattening(source, mask, result, 30, 45, 3);
imshow("Output",result);
- imwrite(folder + "cloned.png", result);
+ imwrite("cloned.png", result);
+ }
+ else
+ {
+ cerr << "Invalid selection: " << num << endl;
+ exit(1);
}
waitKey(0);
}
* Result: The cloned image will be displayed.
*/
-#include <signal.h>
#include "opencv2/photo.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/core.hpp"
#include <iostream>
-#include <stdlib.h>
// we're NOT "using namespace std;" here, to avoid collisions between the beta variable and std::beta in c++17
using std::cin;
cout << "Enter Destination Image: ";
cin >> dest;
- img0 = imread(src);
+ img0 = imread(samples::findFile(src));
- img2 = imread(dest);
+ img2 = imread(samples::findFile(dest));
if(img0.empty())
{
cout << "Blue: ";
cin >> blue;
- img0 = imread(src);
+ img0 = imread(samples::findFile(src));
if(img0.empty())
{
cout << "beta: ";
cin >> beta;
- img0 = imread(src);
+ img0 = imread(samples::findFile(src));
if(img0.empty())
{
cout << "kernel_size: ";
cin >> kernel_size;
- img0 = imread(src);
+ img0 = imread(samples::findFile(src));
if(img0.empty())
{
int main( int argc, const char** argv )
{
- CommandLineParser parser(argc, argv, "{@image|../data/stuff.jpg|image for converting to a grayscale}");
+ CommandLineParser parser(argc, argv, "{@image|stuff.jpg|image for converting to a grayscale}");
parser.about("\nThis program demonstrates connected components and use of the trackbar\n");
parser.printMessage();
cout << "\nThe image is converted to grayscale and displayed, another image has a trackbar\n"
"that controls thresholding and thereby the extracted contours which are drawn in color\n";
String inputImage = parser.get<string>(0);
- img = imread(inputImage, IMREAD_GRAYSCALE);
+ img = imread(samples::findFile(inputImage), IMREAD_GRAYSCALE);
if(img.empty())
{
int main(int argc, char **argv)
{
- CommandLineParser parser(argc, argv, "{@input | ../data/lena.jpg | input image}");
+ CommandLineParser parser(argc, argv, "{@input | lena.jpg | input image}");
parser.about("This program demonstrates using mouse events\n");
parser.printMessage();
cout << "\n\tleft mouse button - set a point to create mask shape\n"
"\tmiddle mouse button - reset\n";
String input_image = parser.get<String>("@input");
- src = imread(input_image);
+ src = imread(samples::findFile(input_image));
if (src.empty())
- {
+ {
printf("Error opening image: %s\n", input_image.c_str());
return 0;
- }
+ }
namedWindow("Source", WINDOW_AUTOSIZE);
setMouseCallback("Source", mouseHandler, NULL);
return 1;
}
- std::string cascadeFrontalfilename = "../../data/lbpcascades/lbpcascade_frontalface.xml";
+ std::string cascadeFrontalfilename = samples::findFile("data/lbpcascades/lbpcascade_frontalface.xml");
cv::Ptr<cv::CascadeClassifier> cascade = makePtr<cv::CascadeClassifier>(cascadeFrontalfilename);
cv::Ptr<DetectionBasedTracker::IDetector> MainDetector = makePtr<CascadeDetectorAdapter>(cascade);
if ( cascade->empty() )
static void help()
{
std::cout << "\nThis program demonstrates the use of calcHist() -- histogram creation.\n"
- << "Usage: \n" << "demhist [image_name -- Defaults to ../data/baboon.jpg]" << std::endl;
+ << "Usage: \n" << "demhist [image_name -- Defaults to baboon.jpg]" << std::endl;
}
const char* keys =
{
- "{help h||}{@image|../data/baboon.jpg|input image file}"
+ "{help h||}{@image|baboon.jpg|input image file}"
};
int main( int argc, const char** argv )
string inputImage = parser.get<string>(0);
// Load the source image. HighGUI use.
- image = imread( inputImage, 0 );
+ image = imread(samples::findFile(inputImage), IMREAD_GRAYSCALE);
if(image.empty())
{
std::cerr << "Cannot read image file: " << inputImage << std::endl;
{
cout << "\n This program demonstrates how to use BLOB to detect and filter region \n"
"Usage: \n"
- " ./detect_blob <image1(../data/detect_blob.png as default)>\n"
+ " ./detect_blob <image1(detect_blob.png as default)>\n"
"Press a key when image window is active to change descriptor";
}
int main(int argc, char *argv[])
{
- vector<String> fileName;
- Mat img(600, 800, CV_8UC1);
- cv::CommandLineParser parser(argc, argv, "{@input |../data/detect_blob.png| }{h help | | }");
+ String fileName;
+ cv::CommandLineParser parser(argc, argv, "{@input |detect_blob.png| }{h help | | }");
if (parser.has("h"))
{
help();
return 0;
}
- fileName.push_back(parser.get<string>("@input"));
- img = imread(fileName[0], IMREAD_COLOR);
- if (img.rows*img.cols <= 0)
+ fileName = parser.get<string>("@input");
+ Mat img = imread(samples::findFile(fileName), IMREAD_COLOR);
+ if (img.empty())
{
- cout << "Image " << fileName[0] << " is empty or cannot be found\n";
- return(0);
+ cout << "Image " << fileName << " is empty or cannot be found\n";
+ return 1;
}
SimpleBlobDetector::Params pDefaultBLOB;
vector< Vec3b > palette;
for (int i = 0; i<65536; i++)
{
- palette.push_back(Vec3b((uchar)rand(), (uchar)rand(), (uchar)rand()));
+ uchar c1 = (uchar)rand();
+ uchar c2 = (uchar)rand();
+ uchar c3 = (uchar)rand();
+ palette.push_back(Vec3b(c1, c2, c3));
}
help();
// These descriptors are going to be detecting and computing BLOBS with 6 different params
// Param for first BLOB detector we want all
- typeDesc.push_back("BLOB"); // see http://docs.opencv.org/trunk/d0/d7a/classcv_1_1SimpleBlobDetector.html
+ typeDesc.push_back("BLOB"); // see http://docs.opencv.org/3.4/d0/d7a/classcv_1_1SimpleBlobDetector.html
pBLOB.push_back(pDefaultBLOB);
pBLOB.back().filterByArea = true;
pBLOB.back().minArea = 1;
pBLOB.back().filterByConvexity = true;
pBLOB.back().minConvexity = 0.;
pBLOB.back().maxConvexity = (float)0.9;
- // Param for six BLOB detector we want blob with gravity center color equal to 0 bug #4321 must be fixed
+ // Param for six BLOB detector we want blob with gravity center color equal to 0
typeDesc.push_back("BLOB");
pBLOB.push_back(pDefaultBLOB);
pBLOB.back().filterByColor = true;
string input = parser.get<string>("@input");
if (!input.empty())
{
- imgOrig = imread(input, IMREAD_GRAYSCALE);
+ imgOrig = imread(samples::findFile(input), IMREAD_GRAYSCALE);
blur(imgOrig, img, blurSize);
}
else
printf("\nThis program demonstrated the use of the discrete Fourier transform (dft)\n"
"The dft of an image is taken and it's power spectrum is displayed.\n"
"Usage:\n"
- "./dft [image_name -- default ../data/lena.jpg]\n");
+ "./dft [image_name -- default lena.jpg]\n");
}
const char* keys =
{
- "{help h||}{@image|../data/lena.jpg|input image file}"
+ "{help h||}{@image|lena.jpg|input image file}"
};
int main(int argc, const char ** argv)
return 0;
}
string filename = parser.get<string>(0);
- Mat img = imread(filename, IMREAD_GRAYSCALE);
+ Mat img = imread(samples::findFile(filename), IMREAD_GRAYSCALE);
if( img.empty() )
{
help();
{
printf("\nProgram to demonstrate the use of the distance transform function between edge images.\n"
"Usage:\n"
- "./distrans [image_name -- default image is ../data/stuff.jpg]\n"
+ "./distrans [image_name -- default image is stuff.jpg]\n"
"\nHot keys: \n"
"\tESC - quit the program\n"
"\tC - use C/Inf metric\n"
const char* keys =
{
- "{help h||}{@image |../data/stuff.jpg|input image file}"
+ "{help h||}{@image |stuff.jpg|input image file}"
};
int main( int argc, const char** argv )
if (parser.has("help"))
return 0;
string filename = parser.get<string>(0);
- gray = imread(filename, 0);
+ gray = imread(samples::findFile(filename), 0);
if(gray.empty())
{
printf("Cannot read image file: %s\n", filename.c_str());
{
printf("\nThis sample demonstrates Canny edge detection\n"
"Call:\n"
- " /.edge [image_name -- Default is ../data/fruits.jpg]\n\n");
+ " /.edge [image_name -- Default is fruits.jpg]\n\n");
}
const char* keys =
{
- "{help h||}{@image |../data/fruits.jpg|input image name}"
+ "{help h||}{@image |fruits.jpg|input image name}"
};
int main( int argc, const char** argv )
CommandLineParser parser(argc, argv, keys);
string filename = parser.get<string>(0);
- image = imread(filename, IMREAD_COLOR);
+ image = imread(samples::findFile(filename), IMREAD_COLOR);
if(image.empty())
{
printf("Cannot read image file: %s\n", filename.c_str());
" [--try-flip]\n"
" [filename|camera_index]\n\n"
"see facedetect.cmd for one call:\n"
- "./facedetect --cascade=\"../../data/haarcascades/haarcascade_frontalface_alt.xml\" --nested-cascade=\"../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml\" --scale=1.3\n\n"
+ "./facedetect --cascade=\"data/haarcascades/haarcascade_frontalface_alt.xml\" --nested-cascade=\"data/haarcascades/haarcascade_eye_tree_eyeglasses.xml\" --scale=1.3\n\n"
"During execution:\n\tHit any key to quit.\n"
"\tUsing OpenCV version " << CV_VERSION << "\n" << endl;
}
cv::CommandLineParser parser(argc, argv,
"{help h||}"
- "{cascade|../../data/haarcascades/haarcascade_frontalface_alt.xml|}"
- "{nested-cascade|../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml|}"
+ "{cascade|data/haarcascades/haarcascade_frontalface_alt.xml|}"
+ "{nested-cascade|data/haarcascades/haarcascade_eye_tree_eyeglasses.xml|}"
"{scale|1|}{try-flip||}{@filename||}"
);
if (parser.has("help"))
parser.printErrors();
return 0;
}
- if ( !nestedCascade.load( nestedCascadeName ) )
+ if (!nestedCascade.load(samples::findFileOrKeep(nestedCascadeName)))
cerr << "WARNING: Could not load classifier cascade for nested objects" << endl;
- if( !cascade.load( cascadeName ) )
+ if (!cascade.load(samples::findFile(cascadeName)))
{
cerr << "ERROR: Could not load classifier cascade" << endl;
help();
{
int camera = inputName.empty() ? 0 : inputName[0] - '0';
if(!capture.open(camera))
+ {
cout << "Capture from camera #" << camera << " didn't work" << endl;
+ return 1;
+ }
}
- else if( inputName.size() )
+ else if (!inputName.empty())
{
- image = imread( inputName, 1 );
- if( image.empty() )
+ image = imread(samples::findFileOrKeep(inputName), IMREAD_COLOR);
+ if (image.empty())
{
- if(!capture.open( inputName ))
+ if (!capture.open(samples::findFileOrKeep(inputName)))
+ {
cout << "Could not read " << inputName << endl;
+ return 1;
+ }
}
}
else
{
- image = imread( "../data/lena.jpg", 1 );
- if(image.empty()) cout << "Couldn't read ../data/lena.jpg" << endl;
+ image = imread(samples::findFile("lena.jpg"), IMREAD_COLOR);
+ if (image.empty())
+ {
+ cout << "Couldn't read lena.jpg" << endl;
+ return 1;
+ }
}
if( capture.isOpened() )
int main(int argc, char** argv)
{
cv::CommandLineParser parser(argc, argv,
- "{eyes||}{nose||}{mouth||}{help h||}");
+ "{eyes||}{nose||}{mouth||}{help h||}{@image||}{@facexml||}");
if (parser.has("help"))
{
help();
return 0;
}
- input_image_path = parser.get<string>(0);
- face_cascade_path = parser.get<string>(1);
+ input_image_path = parser.get<string>("@image");
+ face_cascade_path = parser.get<string>("@facexml");
eye_cascade_path = parser.has("eyes") ? parser.get<string>("eyes") : "";
nose_cascade_path = parser.has("nose") ? parser.get<string>("nose") : "";
mouth_cascade_path = parser.has("mouth") ? parser.get<string>("mouth") : "";
}
// Load image and cascade classifier files
Mat image;
- image = imread(input_image_path);
+ image = imread(samples::findFile(input_image_path));
// Detect faces and facial features
vector<Rect_<int> > faces;
" \nhttps://github.com/opencv/opencv/tree/3.4/data/haarcascades";
cout << "\n\nThe classifiers for nose and mouth can be downloaded from : "
- " \nhttps://github.com/opencv/opencv_contrib/tree/master/modules/face/data/cascades\n";
+ " \nhttps://github.com/opencv/opencv_contrib/tree/3.4/modules/face/data/cascades\n";
}
static void detectFaces(Mat& img, vector<Rect_<int> >& faces, string cascade_path)
{
CascadeClassifier face_cascade;
- face_cascade.load(cascade_path);
+ face_cascade.load(samples::findFile(cascade_path));
- face_cascade.detectMultiScale(img, faces, 1.15, 3, 0|CASCADE_SCALE_IMAGE, Size(30, 30));
+ if (!face_cascade.empty())
+ face_cascade.detectMultiScale(img, faces, 1.15, 3, 0|CASCADE_SCALE_IMAGE, Size(30, 30));
return;
}
static void detectEyes(Mat& img, vector<Rect_<int> >& eyes, string cascade_path)
{
CascadeClassifier eyes_cascade;
- eyes_cascade.load(cascade_path);
+ eyes_cascade.load(samples::findFile(cascade_path, !cascade_path.empty()));
- eyes_cascade.detectMultiScale(img, eyes, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30));
+ if (!eyes_cascade.empty())
+ eyes_cascade.detectMultiScale(img, eyes, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30));
return;
}
static void detectNose(Mat& img, vector<Rect_<int> >& nose, string cascade_path)
{
CascadeClassifier nose_cascade;
- nose_cascade.load(cascade_path);
+ nose_cascade.load(samples::findFile(cascade_path, !cascade_path.empty()));
- nose_cascade.detectMultiScale(img, nose, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30));
+ if (!nose_cascade.empty())
+ nose_cascade.detectMultiScale(img, nose, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30));
return;
}
static void detectMouth(Mat& img, vector<Rect_<int> >& mouth, string cascade_path)
{
CascadeClassifier mouth_cascade;
- mouth_cascade.load(cascade_path);
+ mouth_cascade.load(samples::findFile(cascade_path, !cascade_path.empty()));
- mouth_cascade.detectMultiScale(img, mouth, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30));
+ if (!mouth_cascade.empty())
+ mouth_cascade.detectMultiScale(img, mouth, 1.20, 5, 0|CASCADE_SCALE_IMAGE, Size(30, 30));
return;
}
Mat img;
if (argc > 1)
- img = imread(argv[1], IMREAD_GRAYSCALE);
+ img = imread(samples::findFile(argv[1]), IMREAD_GRAYSCALE);
else
img = DrawMyImage(2,256);
{
cout << "\nThis program demonstrated the floodFill() function\n"
"Call:\n"
- "./ffilldemo [image_name -- Default: ../data/fruits.jpg]\n" << endl;
+ "./ffilldemo [image_name -- Default: fruits.jpg]\n" << endl;
cout << "Hot keys: \n"
"\tESC - quit the program\n"
int main( int argc, char** argv )
{
cv::CommandLineParser parser (argc, argv,
- "{help h | | show help message}{@image|../data/fruits.jpg| input image}"
+ "{help h | | show help message}{@image|fruits.jpg| input image}"
);
if (parser.has("help"))
{
return 0;
}
string filename = parser.get<string>("@image");
- image0 = imread(filename, 1);
+ image0 = imread(samples::findFile(filename), 1);
if( image0.empty() )
{
cout << "writing images\n";
fs << "images" << "[";
- fs << "image1.jpg" << "myfi.png" << "../data/baboon.jpg";
- cout << "image1.jpg" << " myfi.png" << " ../data/baboon.jpg" << endl;
+ fs << "image1.jpg" << "myfi.png" << "baboon.jpg";
+ cout << "image1.jpg" << " myfi.png" << " baboon.jpg" << endl;
fs << "]";
"contours and approximate it by ellipses. Three methods are used to find the \n"
"elliptical fits: fitEllipse, fitEllipseAMS and fitEllipseDirect.\n"
"Call:\n"
- "./fitellipse [image_name -- Default ../data/stuff.jpg]\n" << endl;
+ "./fitellipse [image_name -- Default ellipses.jpg]\n" << endl;
}
int sliderPos = 70;
fitEllipseAMSQ = true;
fitEllipseDirectQ = true;
- cv::CommandLineParser parser(argc, argv,"{help h||}{@image|../data/ellipses.jpg|}");
+ cv::CommandLineParser parser(argc, argv,"{help h||}{@image|ellipses.jpg|}");
if (parser.has("help"))
{
help();
return 0;
}
string filename = parser.get<string>("@image");
- image = imread(filename, 0);
+ image = imread(samples::findFile(filename), 0);
if( image.empty() )
{
cout << "Couldn't open image " << filename << "\n";
int main( int argc, char** argv )
{
- cv::CommandLineParser parser(argc, argv, "{@input| ../data/messi5.jpg |}");
+ cv::CommandLineParser parser(argc, argv, "{@input| messi5.jpg |}");
help();
string filename = parser.get<string>("@input");
cout << "\nDurn, empty filename" << endl;
return 1;
}
- Mat image = imread( filename, 1 );
+ Mat image = imread(samples::findFile(filename), IMREAD_COLOR);
if( image.empty() )
{
cout << "\n Durn, couldn't read image filename " << filename << endl;
cout << "Unsupported mode: " << mode << endl;
return -1;
}
+ file_name = samples::findFile(file_name);
cout << "Mode: " << mode << ", Backend: " << backend << ", File: " << file_name << ", Codec: " << codec << endl;
TickMeter total;
"It shows reading of images, converting to planes and merging back, color conversion\n"
"and also iterating through pixels.\n"
"Call:\n"
- "./image [image-name Default: ../data/lena.jpg]\n" << endl;
+ "./image [image-name Default: lena.jpg]\n" << endl;
}
// enable/disable use of mixed API in the code below.
int main( int argc, char** argv )
{
- cv::CommandLineParser parser(argc, argv, "{help h | |}{@image|../data/lena.jpg|}");
+ cv::CommandLineParser parser(argc, argv, "{help h | |}{@image|lena.jpg|}");
if (parser.has("help"))
{
help();
// is converted, while the data is shared)
//! [iplimage]
#else
- Mat img = imread(imagename); // the newer cvLoadImage alternative, MATLAB-style function
+ Mat img = imread(samples::findFile(imagename)); // the newer cvLoadImage alternative, MATLAB-style function
if(img.empty())
{
fprintf(stderr, "Can not load image %s\n", imagename.c_str());
* findTransformECC that implements the image alignment ECC algorithm
*
*
-* The demo loads an image (defaults to ../data/fruits.jpg) and it artificially creates
+* The demo loads an image (defaults to fruits.jpg) and it artificially creates
* a template image based on the given motion type. When two images are given,
* the first image is the input image and the second one defines the template image.
* In the latter case, you can also parse the warp's initialization.
const std::string keys =
- "{@inputImage | ../data/fruits.jpg | input image filename }"
+ "{@inputImage | fruits.jpg | input image filename }"
"{@templateImage | | template image filename (optional)}"
"{@inputWarp | | input warp (matrix) filename (optional)}"
"{n numOfIter | 50 | ECC's iterations }"
" are given, the initialization of the warp by command line parsing is possible. "
"If inputWarp is missing, the identity transformation initializes the algorithm. \n" << endl;
- cout << "\nUsage example (one image): \n./ecc ../data/fruits.jpg -o=outWarp.ecc "
+ cout << "\nUsage example (one image): \n./image_alignment fruits.jpg -o=outWarp.ecc "
"-m=euclidean -e=1e-6 -N=70 -v=1 \n" << endl;
- cout << "\nUsage example (two images with initialization): \n./ecc yourInput.png yourTemplate.png "
+ cout << "\nUsage example (two images with initialization): \n./image_alignment yourInput.png yourTemplate.png "
"yourInitialWarp.ecc -o=outWarp.ecc -m=homography -e=1e-6 -N=70 -v=1 -w=yourFinalImage.png \n" << endl;
}
else
mode_temp = MOTION_HOMOGRAPHY;
- Mat inputImage = imread(imgFile,0);
+ Mat inputImage = imread(samples::findFile(imgFile), IMREAD_GRAYSCALE);
if (inputImage.empty())
{
cerr << "Unable to load the inputImage" << endl;
if (tempImgFile!="") {
inputImage.copyTo(target_image);
- template_image = imread(tempImgFile,0);
+ template_image = imread(samples::findFile(tempImgFile), IMREAD_GRAYSCALE);
if (template_image.empty()){
cerr << "Unable to load the template image" << endl;
return -1;
<< "with surrounding image areas.\n"
"Using OpenCV version %s\n" << CV_VERSION << "\n"
"Usage:\n"
- "./inpaint [image_name -- Default ../data/fruits.jpg]\n" << endl;
+ "./inpaint [image_name -- Default fruits.jpg]\n" << endl;
cout << "Hot keys: \n"
"\tESC - quit the program\n"
int main( int argc, char** argv )
{
- cv::CommandLineParser parser(argc, argv, "{@image|../data/fruits.jpg|}");
+ cv::CommandLineParser parser(argc, argv, "{@image|fruits.jpg|}");
help();
- string filename = parser.get<string>("@image");
- Mat img0 = imread(filename, -1);
+ string filename = samples::findFile(parser.get<string>("@image"));
+ Mat img0 = imread(filename, IMREAD_COLOR);
if(img0.empty())
{
cout << "Couldn't open the image " << filename << ". Usage: inpaint <image_name>\n" << endl;
return 0;
}
- namedWindow( "image", 1 );
+ namedWindow("image", WINDOW_AUTOSIZE);
img = img0.clone();
inpaintMask = Mat::zeros(img.size(), CV_8U);
imshow("image", img);
- setMouseCallback( "image", onMouse, 0 );
+ setMouseCallback( "image", onMouse, NULL);
for(;;)
{
int main( int argc, char** argv )
{
- VideoCapture cap;
cv::CommandLineParser parser(argc, argv, "{ c | 0 | }{ p | | }");
help();
- if( parser.get<string>("c").size() == 1 && isdigit(parser.get<string>("c")[0]) )
+ VideoCapture cap;
+ string camera = parser.get<string>("c");
+ if (camera.size() == 1 && isdigit(camera[0]))
cap.open(parser.get<int>("c"));
else
- cap.open(parser.get<string>("c"));
- if( cap.isOpened() )
- cout << "Video " << parser.get<string>("c") <<
- ": width=" << cap.get(CAP_PROP_FRAME_WIDTH) <<
- ", height=" << cap.get(CAP_PROP_FRAME_HEIGHT) <<
- ", nframes=" << cap.get(CAP_PROP_FRAME_COUNT) << endl;
- if( parser.has("p") )
+ cap.open(samples::findFileOrKeep(camera));
+ if (!cap.isOpened())
{
- int pos = parser.get<int>("p");
- if (!parser.check())
- {
- parser.printErrors();
- return -1;
- }
- cout << "seeking to frame #" << pos << endl;
- cap.set(CAP_PROP_POS_FRAMES, pos);
+ cerr << "Can't open camera/video stream: " << camera << endl;
+ return 1;
}
-
- if( !cap.isOpened() )
+ cout << "Video " << parser.get<string>("c") <<
+ ": width=" << cap.get(CAP_PROP_FRAME_WIDTH) <<
+ ", height=" << cap.get(CAP_PROP_FRAME_HEIGHT) <<
+ ", nframes=" << cap.get(CAP_PROP_FRAME_COUNT) << endl;
+ int pos = 0;
+ if (parser.has("p"))
+ {
+ pos = parser.get<int>("p");
+ }
+ if (!parser.check())
{
- cout << "Could not initialize capturing...\n";
+ parser.printErrors();
return -1;
}
- namedWindow( "Laplacian", 0 );
- createTrackbar( "Sigma", "Laplacian", &sigma, 15, 0 );
+ if (pos != 0)
+ {
+ cout << "seeking to frame #" << pos << endl;
+ if (!cap.set(CAP_PROP_POS_FRAMES, pos))
+ {
+ cerr << "ERROR: seekeing is not supported" << endl;
+ }
+ }
+
+ namedWindow("Laplacian", WINDOW_AUTOSIZE);
+ createTrackbar("Sigma", "Laplacian", &sigma, 15, 0);
Mat smoothed, laplace, result;
string data_filename;
int method = 0;
- cv::CommandLineParser parser(argc, argv, "{data|../data/letter-recognition.data|}{save||}{load||}{boost||}"
+ cv::CommandLineParser parser(argc, argv, "{data|letter-recognition.data|}{save||}{load||}{boost||}"
"{mlp||}{knn knearest||}{nbayes||}{svm||}");
- data_filename = parser.get<string>("data");
+ data_filename = samples::findFile(parser.get<string>("data"));
if (parser.has("save"))
filename_to_save = parser.get<string>("save");
if (parser.has("load"))
- filename_to_load = parser.get<string>("load");
+ filename_to_load = samples::findFile(parser.get<string>("load"));
if (parser.has("boost"))
method = 1;
else if (parser.has("mlp"))
using namespace std;
using namespace cv;
-void getMatWithQRCodeContour(Mat &color_image, vector<Point> transform);
-void getMatWithFPS(Mat &color_image, double fps);
-int liveQRCodeDetect();
-int showImageQRCodeDetect(string in, string out);
+static void drawQRCodeContour(Mat &color_image, vector<Point> transform);
+static void drawFPS(Mat &color_image, double fps);
+static int liveQRCodeDetect(const string& out_file);
+static int imageQRCodeDetect(const string& in_file, const string& out_file);
int main(int argc, char *argv[])
{
}
string in_file_name = cmd_parser.get<string>("in"); // input path to image
- string out_file_name = cmd_parser.get<string>("out"); // output path to image
+ string out_file_name;
+ if (cmd_parser.has("out"))
+ out_file_name = cmd_parser.get<string>("out"); // output path to image
if (!cmd_parser.check())
{
int return_code = 0;
if (in_file_name.empty())
{
- return_code = liveQRCodeDetect();
+ return_code = liveQRCodeDetect(out_file_name);
}
else
{
- return_code = showImageQRCodeDetect(in_file_name, out_file_name);
+ return_code = imageQRCodeDetect(samples::findFile(in_file_name), out_file_name);
}
return return_code;
}
-void getMatWithQRCodeContour(Mat &color_image, vector<Point> transform)
+void drawQRCodeContour(Mat &color_image, vector<Point> transform)
{
if (!transform.empty())
{
}
}
-void getMatWithFPS(Mat &color_image, double fps)
+void drawFPS(Mat &color_image, double fps)
{
ostringstream convert;
- convert << cvRound(fps) << " FPS.";
+ convert << cvRound(fps) << " FPS (QR detection)";
putText(color_image, convert.str(), Point(25, 25), FONT_HERSHEY_DUPLEX, 1, Scalar(0, 0, 255), 2);
}
-int liveQRCodeDetect()
+int liveQRCodeDetect(const string& out_file)
{
VideoCapture cap(0);
if(!cap.isOpened())
{
- cout << "Cannot open a camera" << '\n';
+ cout << "Cannot open a camera" << endl;
return -4;
}
string decode_info;
vector<Point> transform;
cap >> frame;
- if(frame.empty()) { break; }
+ if (frame.empty())
+ {
+ cout << "End of video stream" << endl;
+ break;
+ }
cvtColor(frame, src, COLOR_BGR2GRAY);
total.start();
if (result_detection)
{
decode_info = qrcode.decode(src, transform, straight_barcode);
- if (!decode_info.empty()) { cout << decode_info << '\n'; }
+ if (!decode_info.empty()) { cout << decode_info << endl; }
}
total.stop();
double fps = 1 / total.getTimeSec();
total.reset();
- if (result_detection) { getMatWithQRCodeContour(frame, transform); }
- getMatWithFPS(frame, fps);
+ if (result_detection) { drawQRCodeContour(frame, transform); }
+ drawFPS(frame, fps);
imshow("Live QR code detector", frame);
- if( waitKey(30) > 0 ) { break; }
+ char c = (char)waitKey(30);
+ if (c == 27)
+ break;
+ if (c == ' ' && !out_file.empty())
+ imwrite(out_file, frame); // TODO write original frame too
}
return 0;
}
-int showImageQRCodeDetect(string in, string out)
+int imageQRCodeDetect(const string& in_file, const string& out_file)
{
- Mat src = imread(in, IMREAD_GRAYSCALE), straight_barcode;
+ Mat color_src = imread(in_file, IMREAD_COLOR), src;
+ cvtColor(color_src, src, COLOR_BGR2GRAY);
+ Mat straight_barcode;
string decoded_info;
vector<Point> transform;
const int count_experiments = 10;
total.stop();
transform_time += total.getTimeSec();
total.reset();
- if (!result_detection) { break; }
+ if (!result_detection)
+ continue;
total.start();
decoded_info = qrcode.decode(src, transform, straight_barcode);
total.stop();
transform_time += total.getTimeSec();
total.reset();
- if (decoded_info.empty()) { break; }
-
}
double fps = count_experiments / transform_time;
- if (!result_detection) { cout << "QR code not found\n"; return -2; }
- if (decoded_info.empty()) { cout << "QR code cannot be decoded\n"; return -3; }
+ if (!result_detection)
+ cout << "QR code not found" << endl;
+ if (decoded_info.empty())
+ cout << "QR code cannot be decoded" << endl;
- Mat color_src = imread(in);
- getMatWithQRCodeContour(color_src, transform);
- getMatWithFPS(color_src, fps);
+ drawQRCodeContour(color_src, transform);
+ drawFPS(color_src, fps);
- for(;;)
+ cout << "Input image file path: " << in_file << endl;
+ cout << "Output image file path: " << out_file << endl;
+ cout << "Size: " << color_src.size() << endl;
+ cout << "FPS: " << fps << endl;
+ cout << "Decoded info: " << decoded_info << endl;
+
+ if (!out_file.empty())
{
- imshow("Detect QR code on image", color_src);
- if( waitKey(30) > 0 ) { break; }
+ imwrite(out_file, color_src);
}
- if (!out.empty())
+ for(;;)
{
- getMatWithQRCodeContour(color_src, transform);
- getMatWithFPS(color_src, fps);
-
- cout << "Input image file path: " << in << '\n';
- cout << "Output image file path: " << out << '\n';
- cout << "Size: " << color_src.size() << '\n';
- cout << "FPS: " << fps << '\n';
- cout << "Decoded info: " << decoded_info << '\n';
-
- vector<int> compression_params;
- compression_params.push_back(IMWRITE_PNG_COMPRESSION);
- compression_params.push_back(9);
- try
- {
- imwrite(out, color_src, compression_params);
- }
- catch (const cv::Exception& ex)
- {
- cout << "Exception converting image to PNG format: ";
- cout << ex.what() << '\n';
- return -3;
- }
+ imshow("Detect QR code on image", color_src);
+ if (waitKey(0) == 27)
+ break;
}
return 0;
}
int main()
{
- const String filename = "../data/data01.xml";
+ const String filename = samples::findFile("data01.xml");
cout << "**********************************************************************" << endl;
cout << filename
<< " contains digits 0 and 1 of 20 samples each, collected on an Android device" << endl;
int main(int argc, char** argv)
{
cv::CommandLineParser parser(argc, argv,
- "{input i|../data/building.jpg|input image}"
+ "{input i|building.jpg|input image}"
"{refine r|false|if true use LSD_REFINE_STD method, if false use LSD_REFINE_NONE method}"
"{canny c|false|use Canny edge detector}"
"{overlay o|false|show result on input image}"
parser.printMessage();
- String filename = parser.get<String>("input");
+ String filename = samples::findFile(parser.get<String>("input"));
bool useRefine = parser.get<bool>("refine");
bool useCanny = parser.get<bool>("canny");
bool overlay = parser.get<bool>("overlay");
int main( int argc, const char** argv )
{
CommandLineParser parser(argc, argv,
- "{ i | ../data/lena_tmpl.jpg |image name }"
- "{ t | ../data/tmpl.png |template name }"
- "{ m | ../data/mask.png |mask name }"
+ "{ i | lena_tmpl.jpg |image name }"
+ "{ t | tmpl.png |template name }"
+ "{ m | mask.png |mask name }"
"{ cm| 3 |comparison method }");
- cout << "This program demonstrates the use of template matching with mask.\n\n";
+ cout << "This program demonstrates the use of template matching with mask." << endl
+ << endl
+ << "Available methods: https://docs.opencv.org/3.4/df/dfb/group__imgproc__object.html#ga3a7850640f1fe1f58fe91a2d7583695d" << endl
+ << " TM_SQDIFF = " << (int)TM_SQDIFF << endl
+ << " TM_SQDIFF_NORMED = " << (int)TM_SQDIFF_NORMED << endl
+ << " TM_CCORR = " << (int)TM_CCORR << endl
+ << " TM_CCORR_NORMED = " << (int)TM_CCORR_NORMED << endl
+ << " TM_CCOEFF = " << (int)TM_CCOEFF << endl
+ << " TM_CCOEFF_NORMED = " << (int)TM_CCOEFF_NORMED << endl
+ << endl;
+
parser.printMessage();
- string filename = parser.get<string>("i");
- string tmplname = parser.get<string>("t");
- string maskname = parser.get<string>("m");
+ string filename = samples::findFile(parser.get<string>("i"));
+ string tmplname = samples::findFile(parser.get<string>("t"));
+ string maskname = samples::findFile(parser.get<string>("m"));
Mat img = imread(filename);
Mat tmpl = imread(tmplname);
Mat mask = imread(maskname);
{
cout << "\n This program demonstrates how to detect compute and match ORB BRISK and AKAZE descriptors \n"
"Usage: \n"
- " ./matchmethod_orb_akaze_brisk --image1=<image1(../data/basketball1.png as default)> --image2=<image2(../data/basketball2.png as default)>\n"
+ " ./matchmethod_orb_akaze_brisk --image1=<image1(basketball1.png as default)> --image2=<image2(basketball2.png as default)>\n"
"Press a key when image window is active to change algorithm or descriptor";
}
typeDesc.push_back("AKAZE"); // see http://docs.opencv.org/trunk/d8/d30/classcv_1_1AKAZE.html
typeDesc.push_back("ORB"); // see http://docs.opencv.org/trunk/de/dbf/classcv_1_1BRISK.html
typeDesc.push_back("BRISK"); // see http://docs.opencv.org/trunk/db/d95/classcv_1_1ORB.html
- // This algorithm would be used to match descriptors see http://docs.opencv.org/trunk/db/d39/classcv_1_1DescriptorMatcher.html#ab5dc5036569ecc8d47565007fa518257
+ // This algorithm would be used to match descriptors see http://docs.opencv.org/trunk/db/d39/classcv_1_1DescriptorMatcher.html#ab5dc5036569ecc8d47565007fa518257
typeAlgoMatch.push_back("BruteForce");
typeAlgoMatch.push_back("BruteForce-L1");
typeAlgoMatch.push_back("BruteForce-Hamming");
typeAlgoMatch.push_back("BruteForce-Hamming(2)");
cv::CommandLineParser parser(argc, argv,
- "{ @image1 | ../data/basketball1.png | }"
- "{ @image2 | ../data/basketball2.png | }"
+ "{ @image1 | basketball1.png | }"
+ "{ @image2 | basketball2.png | }"
"{help h ||}");
if (parser.has("help"))
{
help();
return 0;
}
- fileName.push_back(parser.get<string>(0));
- fileName.push_back(parser.get<string>(1));
+ fileName.push_back(samples::findFile(parser.get<string>(0)));
+ fileName.push_back(samples::findFile(parser.get<string>(1)));
Mat img1 = imread(fileName[0], IMREAD_GRAYSCALE);
Mat img2 = imread(fileName[1], IMREAD_GRAYSCALE);
- if (img1.rows*img1.cols <= 0)
- {
- cout << "Image " << fileName[0] << " is empty or cannot be found\n";
- return(0);
- }
- if (img2.rows*img2.cols <= 0)
- {
- cout << "Image " << fileName[1] << " is empty or cannot be found\n";
- return(0);
- }
+ if (img1.empty())
+ {
+ cerr << "Image " << fileName[0] << " is empty or cannot be found" << endl;
+ return 1;
+ }
+ if (img2.empty())
+ {
+ cerr << "Image " << fileName[1] << " is empty or cannot be found" << endl;
+ return 1;
+ }
vector<double> desMethCmp;
Ptr<Feature2D> b;
vector<String>::iterator itMatcher = typeAlgoMatch.end();
if (*itDesc == "AKAZE-DESCRIPTOR_KAZE_UPRIGHT"){
b = AKAZE::create(AKAZE::DESCRIPTOR_KAZE_UPRIGHT);
- }
+ }
if (*itDesc == "AKAZE"){
b = AKAZE::create();
- }
+ }
if (*itDesc == "ORB"){
b = ORB::create();
}
}
catch (const Exception& e)
{
+ cerr << "Exception: " << e.what() << endl;
cout << "Feature : " << *itDesc << "\n";
if (itMatcher != typeAlgoMatch.end())
{
cout << "Matcher : " << *itMatcher << "\n";
}
- cout << e.msg << endl;
}
}
int i=0;
{
help();
- Mat img(500, 500, CV_8UC3);
+ Mat img(500, 500, CV_8UC3, Scalar::all(0));
RNG& rng = theRNG();
for(;;)
// callback function for open/close trackbar
static void OpenClose(int, void*)
{
- int n = open_close_pos - max_iters;
- int an = n > 0 ? n : -n;
+ int n = open_close_pos;
+ int an = abs(n);
Mat element = getStructuringElement(element_shape, Size(an*2+1, an*2+1), Point(an, an) );
if( n < 0 )
morphologyEx(src, dst, MORPH_OPEN, element);
// callback function for erode/dilate trackbar
static void ErodeDilate(int, void*)
{
- int n = erode_dilate_pos - max_iters;
- int an = n > 0 ? n : -n;
+ int n = erode_dilate_pos;
+ int an = abs(n);
Mat element = getStructuringElement(element_shape, Size(an*2+1, an*2+1), Point(an, an) );
if( n < 0 )
erode(src, dst, element);
int main( int argc, char** argv )
{
- cv::CommandLineParser parser(argc, argv, "{help h||}{ @image | ../data/baboon.jpg | }");
+ cv::CommandLineParser parser(argc, argv, "{help h||}{ @image | baboon.jpg | }");
if (parser.has("help"))
{
help();
return 0;
}
- std::string filename = parser.get<std::string>("@image");
+ std::string filename = samples::findFile(parser.get<std::string>("@image"));
if( (src = imread(filename,IMREAD_COLOR)).empty() )
{
help();
open_close_pos = erode_dilate_pos = max_iters;
createTrackbar("iterations", "Open/Close",&open_close_pos,max_iters*2+1,OpenClose);
+ setTrackbarMin("iterations", "Open/Close", -max_iters);
+ setTrackbarMax("iterations", "Open/Close", max_iters);
+ setTrackbarPos("iterations", "Open/Close", 0);
+
createTrackbar("iterations", "Erode/Dilate",&erode_dilate_pos,max_iters*2+1,ErodeDilate);
+ setTrackbarMin("iterations", "Erode/Dilate", -max_iters);
+ setTrackbarMax("iterations", "Erode/Dilate", max_iters);
+ setTrackbarPos("iterations", "Erode/Dilate", 0);
for(;;)
{
int main(int argc, char* argv[])
{
- cv::CommandLineParser parser(argc, argv, "{help h||show help message}{@image|../data/lena.jpg|input image}");
+ cv::CommandLineParser parser(argc, argv, "{help h||show help message}{@image|lena.jpg|input image}");
if (parser.has("help"))
{
parser.printMessage();
- exit(0);
- }
- if (parser.get<string>("@image").empty())
- {
- parser.printMessage();
- exit(0);
+ return 0;
}
+ string filename = samples::findFile(parser.get<string>("@image"));
- Mat I = imread(parser.get<string>("@image"));
+ Mat I = imread(filename);
int num,type;
if(I.empty())
{
cout << "Image not found" << endl;
- exit(0);
+ return 1;
}
cout << endl;
if (file.empty())
cap.open(camera);
else
- cap.open(file.c_str());
+ {
+ file = samples::findFileOrKeep(file);
+ cap.open(file);
+ }
if (!cap.isOpened())
{
cout << "Can not open video stream: '" << (file.empty() ? "<camera>" : file) << "'" << endl;
#include "opencv2/imgproc.hpp"
#include "opencv2/ml.hpp"
#include "opencv2/highgui.hpp"
-#ifdef HAVE_OPENCV_OCL
-#define _OCL_KNN_ 1 // select whether using ocl::KNN method or not, default is using
-#define _OCL_SVM_ 1 // select whether using ocl::svm method or not, default is using
-#include "opencv2/ocl/ocl.hpp"
-#endif
#include <stdio.h>
if( arg.size() == 1 && isdigit(arg[0]) )
capture.open( arg[0] - '0' );
else
- capture.open( arg.c_str() );
+ capture.open(samples::findFileOrKeep(arg));
if( !capture.isOpened() )
{
if (input.empty())
cap.open(0);
else
- cap.open(input);
+ cap.open(samples::findFileOrKeep(input));
if( !cap.isOpened() )
{
if ( parser.get<string>("@input").size() == 1 && isdigit(parser.get<string>("@input")[0]) )
cameraId = parser.get<int>("@input");
else
- inputName = parser.get<string>("@input");
+ inputName = samples::findFileOrKeep(parser.get<string>("@input"));
if (!parser.check())
{
puts(help);
" [--try-flip]\n"
" [video_filename|camera_index]\n\n"
"Example:\n"
- "./smiledetect --cascade=\"../../data/haarcascades/haarcascade_frontalface_alt.xml\" --smile-cascade=\"../../data/haarcascades/haarcascade_smile.xml\" --scale=2.0\n\n"
+ "./smiledetect --cascade=\"data/haarcascades/haarcascade_frontalface_alt.xml\" --smile-cascade=\"data/haarcascades/haarcascade_smile.xml\" --scale=2.0\n\n"
"During execution:\n\tHit any key to quit.\n"
"\tUsing OpenCV version " << CV_VERSION << "\n" << endl;
}
double scale;
cv::CommandLineParser parser(argc, argv,
"{help h||}{scale|1|}"
- "{cascade|../../data/haarcascades/haarcascade_frontalface_alt.xml|}"
- "{smile-cascade|../../data/haarcascades/haarcascade_smile.xml|}"
+ "{cascade|data/haarcascades/haarcascade_frontalface_alt.xml|}"
+ "{smile-cascade|data/haarcascades/haarcascade_smile.xml|}"
"{try-flip||}{@input||}");
if (parser.has("help"))
{
help();
return 0;
}
- cascadeName = parser.get<string>("cascade");
- nestedCascadeName = parser.get<string>("smile-cascade");
+ cascadeName = samples::findFile(parser.get<string>("cascade"));
+ nestedCascadeName = samples::findFile(parser.get<string>("smile-cascade"));
tryflip = parser.has("try-flip");
inputName = parser.get<string>("@input");
scale = parser.get<int>("scale");
}
else if( inputName.size() )
{
+ inputName = samples::findFileOrKeep(inputName);
if(!capture.open( inputName ))
cout << "Could not read " << inputName << endl;
}
int main(int argc, char** argv)
{
- static const char* names[] = { "../data/pic1.png", "../data/pic2.png", "../data/pic3.png",
- "../data/pic4.png", "../data/pic5.png", "../data/pic6.png", 0 };
+ static const char* names[] = { "data/pic1.png", "data/pic2.png", "data/pic3.png",
+ "data/pic4.png", "data/pic5.png", "data/pic6.png", 0 };
help(argv[0]);
if( argc > 1)
for( int i = 0; names[i] != 0; i++ )
{
- Mat image = imread(names[i], IMREAD_COLOR);
+ string filename = samples::findFile(names[i]);
+ Mat image = imread(filename, IMREAD_COLOR);
if( image.empty() )
{
- cout << "Couldn't load " << names[i] << endl;
+ cout << "Couldn't load " << filename << endl;
continue;
}
Homepage: http://opencv.org
Online docs: http://docs.opencv.org
Q&A forum: http://answers.opencv.org
- Issue tracker: http://code.opencv.org
GitHub: https://github.com/opencv/opencv/
************************************************** */
" on the chessboards, and a flag: useCalibrated for \n"
" calibrated (0) or\n"
" uncalibrated \n"
- " (1: use cvStereoCalibrate(), 2: compute fundamental\n"
+ " (1: use stereoCalibrate(), 2: compute fundamental\n"
" matrix separately) stereo. \n"
" Calibrate the cameras and display the\n"
" rectified results along with the computed disparity images. \n" << endl;
- cout << "Usage:\n ./stereo_calib -w=<board_width default=9> -h=<board_height default=6> -s=<square_size default=1.0> <image list XML/YML file default=../data/stereo_calib.xml>\n" << endl;
+ cout << "Usage:\n ./stereo_calib -w=<board_width default=9> -h=<board_height default=6> -s=<square_size default=1.0> <image list XML/YML file default=stereo_calib.xml>\n" << endl;
return 0;
}
Size boardSize;
string imagelistfn;
bool showRectified;
- cv::CommandLineParser parser(argc, argv, "{w|9|}{h|6|}{s|1.0|}{nr||}{help||}{@input|../data/stereo_calib.xml|}");
+ cv::CommandLineParser parser(argc, argv, "{w|9|}{h|6|}{s|1.0|}{nr||}{help||}{@input|stereo_calib.xml|}");
if (parser.has("help"))
return print_help();
showRectified = !parser.has("nr");
- imagelistfn = parser.get<string>("@input");
+ imagelistfn = samples::findFile(parser.get<string>("@input"));
boardSize.width = parser.get<int>("w");
boardSize.height = parser.get<int>("h");
float squareSize = parser.get<float>("s");
print_help();
return 0;
}
- img1_filename = parser.get<std::string>(0);
- img2_filename = parser.get<std::string>(1);
+ img1_filename = samples::findFile(parser.get<std::string>(0));
+ img2_filename = samples::findFile(parser.get<std::string>(1));
if (parser.has("algorithm"))
{
std::string _alg = parser.get<std::string>("algorithm");
}
else
{
- Mat img = imread(argv[i]);
+ Mat img = imread(samples::findFile(argv[i]));
if (img.empty())
{
cout << "Can't read image '" << argv[i] << "'\n";
for (int i = 0; i < num_images; ++i)
{
- full_img = imread(img_names[i]);
+ full_img = imread(samples::findFile(img_names[i]));
full_img_sizes[i] = full_img.size();
if (full_img.empty())
LOGLN("Compositing image #" << indices[img_idx]+1);
// Read image and resize it if necessary
- full_img = imread(img_names[img_idx]);
+ full_img = imread(samples::findFile(img_names[img_idx]));
if (!is_compose_scale_set)
{
if (compose_megapix > 0)
#include <iostream>
#include <time.h>
// OpenCV
-#include <opencv2//core.hpp>
+#include <opencv2/core.hpp>
#include <opencv2/core/utility.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
return -1;
}
- Mat frame0 = imread(frame0_name, IMREAD_GRAYSCALE);
- Mat frame1 = imread(frame1_name, IMREAD_GRAYSCALE);
+ Mat frame0 = imread(samples::findFile(frame0_name), IMREAD_GRAYSCALE);
+ Mat frame1 = imread(samples::findFile(frame1_name), IMREAD_GRAYSCALE);
if (frame0.empty())
{
cout << "\nThis is a demo program shows how perspective transformation applied on an image, \n"
"Using OpenCV version " << CV_VERSION << endl;
- cout << "\nUsage:\n" << argv[0] << " [image_name -- Default ../data/right.jpg]\n" << endl;
+ cout << "\nUsage:\n" << argv[0] << " [image_name -- Default data/right.jpg]\n" << endl;
cout << "\nHot keys: \n"
"\tESC, q - quit the program\n"
int main(int argc, char** argv)
{
help(argv);
- CommandLineParser parser(argc, argv, "{@input| ../data/right.jpg |}");
+ CommandLineParser parser(argc, argv, "{@input| data/right.jpg |}");
- string filename = parser.get<string>("@input");
+ string filename = samples::findFile(parser.get<string>("@input"));
Mat original_image = imread( filename );
Mat image;
{
cout << "\nThis program demonstrates the famous watershed segmentation algorithm in OpenCV: watershed()\n"
"Usage:\n"
- "./watershed [image_name -- default is ../data/fruits.jpg]\n" << endl;
+ "./watershed [image_name -- default is fruits.jpg]\n" << endl;
cout << "Hot keys: \n"
int main( int argc, char** argv )
{
- cv::CommandLineParser parser(argc, argv, "{help h | | }{ @input | ../data/fruits.jpg | }");
+ cv::CommandLineParser parser(argc, argv, "{help h | | }{ @input | fruits.jpg | }");
if (parser.has("help"))
{
help();
return 0;
}
- string filename = parser.get<string>("@input");
+ string filename = samples::findFile(parser.get<string>("@input"));
Mat img0 = imread(filename, 1), imgGray;
if( img0.empty() )
{
- cout << "Couldn'g open image " << filename << ". Usage: watershed <image_name>\n";
+ cout << "Couldn't open image " << filename << ". Usage: watershed <image_name>\n";
return 0;
}
help();
--- /dev/null
+%YAML:1.0
+images:
+ - left01.jpg
+ - left02.jpg
+ - left03.jpg
+ - left04.jpg
+ - left05.jpg
+ - left06.jpg
+ - left07.jpg
+ - left08.jpg
+ - left09.jpg
+ - left11.jpg
+ - left12.jpg
+ - left13.jpg
+ - left14.jpg
cascade_fn = args.get('--cascade', "../../data/haarcascades/haarcascade_frontalface_alt.xml")
nested_fn = args.get('--nested-cascade', "../../data/haarcascades/haarcascade_eye.xml")
- cascade = cv.CascadeClassifier(cascade_fn)
- nested = cv.CascadeClassifier(nested_fn)
+ cascade = cv.CascadeClassifier(cv.samples.findFile(cascade_fn))
+ nested = cv.CascadeClassifier(cv.samples.findFile(nested_fn))
- cam = create_capture(video_src, fallback='synth:bg=../data/lena.jpg:noise=0.05')
+ cam = create_capture(video_src, fallback='synth:bg={}:noise=0.05'.format(cv.samples.findFile('samples/data/lena.jpg')))
while True:
ret, img = cam.read()