/** GLOBAL VARIABLES **/
-std::string tutorial_path = "../../samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/"; // path to tutorial
+using namespace cv;
+using namespace std;
-std::string video_read_path = tutorial_path + "Data/box.mp4"; // recorded video
-std::string yml_read_path = tutorial_path + "Data/cookies_ORB.yml"; // 3dpts + descriptors
-std::string ply_read_path = tutorial_path + "Data/box.ply"; // mesh
+string tutorial_path = "../../samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/"; // path to tutorial
+
+string video_read_path = tutorial_path + "Data/box.mp4"; // recorded video
+string yml_read_path = tutorial_path + "Data/cookies_ORB.yml"; // 3dpts + descriptors
+string ply_read_path = tutorial_path + "Data/box.ply"; // mesh
// Intrinsic camera parameters: UVC WEBCAM
double f = 55; // focal length in mm
height/2}; // cy
// Some basic colors
-cv::Scalar red(0, 0, 255);
-cv::Scalar green(0,255,0);
-cv::Scalar blue(255,0,0);
-cv::Scalar yellow(0,255,255);
+Scalar red(0, 0, 255);
+Scalar green(0,255,0);
+Scalar blue(255,0,0);
+Scalar yellow(0,255,255);
// Robust Matcher parameters
int numKeyPoints = 2000; // number of detected keypoints
-float ratio = 0.70f; // ratio test
+float ratioTest = 0.70f; // ratio test
bool fast_match = true; // fastRobustMatch() or robustMatch()
// RANSAC parameters
int minInliersKalman = 30; // Kalman threshold updating
// PnP parameters
-int pnpMethod = cv::SOLVEPNP_ITERATIVE;
+int pnpMethod = SOLVEPNP_ITERATIVE;
/** Functions headers **/
void help();
-void initKalmanFilter( cv::KalmanFilter &KF, int nStates, int nMeasurements, int nInputs, double dt);
-void updateKalmanFilter( cv::KalmanFilter &KF, cv::Mat &measurements,
- cv::Mat &translation_estimated, cv::Mat &rotation_estimated );
-void fillMeasurements( cv::Mat &measurements,
- const cv::Mat &translation_measured, const cv::Mat &rotation_measured);
+void initKalmanFilter( KalmanFilter &KF, int nStates, int nMeasurements, int nInputs, double dt);
+void updateKalmanFilter( KalmanFilter &KF, Mat &measurements,
+ Mat &translation_estimated, Mat &rotation_estimated );
+void fillMeasurements( Mat &measurements,
+ const Mat &translation_measured, const Mat &rotation_measured);
/** Main program **/
help();
- const cv::String keys =
+ const String keys =
"{help h | | print this message }"
"{video v | | path to recorded video }"
"{model | | path to yml model }"
"{method pnp |0 | PnP method: (0) ITERATIVE - (1) EPNP - (2) P3P - (3) DLS}"
"{fast f |true | use of robust fast match }"
;
- cv::CommandLineParser parser(argc, argv, keys);
+ CommandLineParser parser(argc, argv, keys);
if (parser.has("help"))
{
}
else
{
- video_read_path = parser.get<std::string>("video").size() > 0 ? parser.get<std::string>("video") : video_read_path;
- yml_read_path = parser.get<std::string>("model").size() > 0 ? parser.get<std::string>("model") : yml_read_path;
- ply_read_path = parser.get<std::string>("mesh").size() > 0 ? parser.get<std::string>("mesh") : ply_read_path;
+ video_read_path = parser.get<string>("video").size() > 0 ? parser.get<string>("video") : video_read_path;
+ yml_read_path = parser.get<string>("model").size() > 0 ? parser.get<string>("model") : yml_read_path;
+ ply_read_path = parser.get<string>("mesh").size() > 0 ? parser.get<string>("mesh") : ply_read_path;
numKeyPoints = !parser.has("keypoints") ? parser.get<int>("keypoints") : numKeyPoints;
- ratio = !parser.has("ratio") ? parser.get<float>("ratio") : ratio;
+ ratioTest = !parser.has("ratio") ? parser.get<float>("ratio") : ratioTest;
fast_match = !parser.has("fast") ? parser.get<bool>("fast") : fast_match;
iterationsCount = !parser.has("iterations") ? parser.get<int>("iterations") : iterationsCount;
reprojectionError = !parser.has("error") ? parser.get<float>("error") : reprojectionError;
RobustMatcher rmatcher; // instantiate RobustMatcher
- cv::FeatureDetector * detector = new cv::OrbFeatureDetector(numKeyPoints); // instatiate ORB feature detector
- cv::DescriptorExtractor * extractor = new cv::OrbDescriptorExtractor(); // instatiate ORB descriptor extractor
+ Ptr<FeatureDetector> orb = ORB::create();
- rmatcher.setFeatureDetector(detector); // set feature detector
- rmatcher.setDescriptorExtractor(extractor); // set descriptor extractor
+ rmatcher.setFeatureDetector(orb); // set feature detector
+ rmatcher.setDescriptorExtractor(orb); // set descriptor extractor
- cv::Ptr<cv::flann::IndexParams> indexParams = cv::makePtr<cv::flann::LshIndexParams>(6, 12, 1); // instantiate LSH index parameters
- cv::Ptr<cv::flann::SearchParams> searchParams = cv::makePtr<cv::flann::SearchParams>(50); // instantiate flann search parameters
+ Ptr<flann::IndexParams> indexParams = makePtr<flann::LshIndexParams>(6, 12, 1); // instantiate LSH index parameters
+ Ptr<flann::SearchParams> searchParams = makePtr<flann::SearchParams>(50); // instantiate flann search parameters
- cv::DescriptorMatcher * matcher = new cv::FlannBasedMatcher(indexParams, searchParams); // instantiate FlannBased matcher
+ // instantiate FlannBased matcher
+ Ptr<DescriptorMatcher> matcher = makePtr<FlannBasedMatcher>(indexParams, searchParams);
rmatcher.setDescriptorMatcher(matcher); // set matcher
- rmatcher.setRatio(ratio); // set ratio test parameter
+ rmatcher.setRatio(ratioTest); // set ratio test parameter
- cv::KalmanFilter KF; // instantiate Kalman Filter
+ KalmanFilter KF; // instantiate Kalman Filter
int nStates = 18; // the number of states
int nMeasurements = 6; // the number of measured states
int nInputs = 0; // the number of control actions
double dt = 0.125; // time between measurements (1/FPS)
initKalmanFilter(KF, nStates, nMeasurements, nInputs, dt); // init function
- cv::Mat measurements(nMeasurements, 1, CV_64F); measurements.setTo(cv::Scalar(0));
+ Mat measurements(nMeasurements, 1, CV_64F); measurements.setTo(Scalar(0));
bool good_measurement = false;
// Get the MODEL INFO
- std::vector<cv::Point3f> list_points3d_model = model.get_points3d(); // list with model 3D coordinates
- cv::Mat descriptors_model = model.get_descriptors(); // list with descriptors of each 3D coordinate
+ vector<Point3f> list_points3d_model = model.get_points3d(); // list with model 3D coordinates
+ Mat descriptors_model = model.get_descriptors(); // list with descriptors of each 3D coordinate
// Create & Open Window
- cv::namedWindow("REAL TIME DEMO", cv::WINDOW_KEEPRATIO);
+ namedWindow("REAL TIME DEMO", WINDOW_KEEPRATIO);
- cv::VideoCapture cap; // instantiate VideoCapture
+ VideoCapture cap; // instantiate VideoCapture
cap.open(video_read_path); // open a recorded video
if(!cap.isOpened()) // check if we succeeded
{
- std::cout << "Could not open the camera device" << std::endl;
+ cout << "Could not open the camera device" << endl;
return -1;
}
// start the clock
time(&start);
- cv::Mat frame, frame_vis;
+ Mat frame, frame_vis;
- while(cap.read(frame) && cv::waitKey(30) != 27) // capture frame until ESC is pressed
+ while(cap.read(frame) && waitKey(30) != 27) // capture frame until ESC is pressed
{
frame_vis = frame.clone(); // refresh visualisation frame
// -- Step 1: Robust matching between model descriptors and scene descriptors
- std::vector<cv::DMatch> good_matches; // to obtain the 3D points of the model
- std::vector<cv::KeyPoint> keypoints_scene; // to obtain the 2D points of the scene
+ vector<DMatch> good_matches; // to obtain the 3D points of the model
+ vector<KeyPoint> keypoints_scene; // to obtain the 2D points of the scene
if(fast_match)
// -- Step 2: Find out the 2D/3D correspondences
- std::vector<cv::Point3f> list_points3d_model_match; // container for the model 3D coordinates found in the scene
- std::vector<cv::Point2f> list_points2d_scene_match; // container for the model 2D coordinates found in the scene
+ vector<Point3f> list_points3d_model_match; // container for the model 3D coordinates found in the scene
+ vector<Point2f> list_points2d_scene_match; // container for the model 2D coordinates found in the scene
for(unsigned int match_index = 0; match_index < good_matches.size(); ++match_index)
{
- cv::Point3f point3d_model = list_points3d_model[ good_matches[match_index].trainIdx ]; // 3D point from model
- cv::Point2f point2d_scene = keypoints_scene[ good_matches[match_index].queryIdx ].pt; // 2D point from the scene
+ Point3f point3d_model = list_points3d_model[ good_matches[match_index].trainIdx ]; // 3D point from model
+ Point2f point2d_scene = keypoints_scene[ good_matches[match_index].queryIdx ].pt; // 2D point from the scene
list_points3d_model_match.push_back(point3d_model); // add 3D point
list_points2d_scene_match.push_back(point2d_scene); // add 2D point
}
draw2DPoints(frame_vis, list_points2d_scene_match, red);
- cv::Mat inliers_idx;
- std::vector<cv::Point2f> list_points2d_inliers;
+ Mat inliers_idx;
+ vector<Point2f> list_points2d_inliers;
if(good_matches.size() > 0) // None matches, then RANSAC crashes
{
for(int inliers_index = 0; inliers_index < inliers_idx.rows; ++inliers_index)
{
int n = inliers_idx.at<int>(inliers_index); // i-inlier
- cv::Point2f point2d = list_points2d_scene_match[n]; // i-inlier point 2D
+ Point2f point2d = list_points2d_scene_match[n]; // i-inlier point 2D
list_points2d_inliers.push_back(point2d); // add i-inlier to list
}
{
// Get the measured translation
- cv::Mat translation_measured(3, 1, CV_64F);
+ Mat translation_measured(3, 1, CV_64F);
translation_measured = pnp_detection.get_t_matrix();
// Get the measured rotation
- cv::Mat rotation_measured(3, 3, CV_64F);
+ Mat rotation_measured(3, 3, CV_64F);
rotation_measured = pnp_detection.get_R_matrix();
// fill the measurements vector
}
// Instantiate estimated translation and rotation
- cv::Mat translation_estimated(3, 1, CV_64F);
- cv::Mat rotation_estimated(3, 3, CV_64F);
+ Mat translation_estimated(3, 1, CV_64F);
+ Mat rotation_estimated(3, 3, CV_64F);
// update the Kalman filter with good measurements
updateKalmanFilter( KF, measurements,
}
float l = 5;
- std::vector<cv::Point2f> pose_points2d;
- pose_points2d.push_back(pnp_detection_est.backproject3DPoint(cv::Point3f(0,0,0))); // axis center
- pose_points2d.push_back(pnp_detection_est.backproject3DPoint(cv::Point3f(l,0,0))); // axis x
- pose_points2d.push_back(pnp_detection_est.backproject3DPoint(cv::Point3f(0,l,0))); // axis y
- pose_points2d.push_back(pnp_detection_est.backproject3DPoint(cv::Point3f(0,0,l))); // axis z
+ vector<Point2f> pose_points2d;
+ pose_points2d.push_back(pnp_detection_est.backproject3DPoint(Point3f(0,0,0))); // axis center
+ pose_points2d.push_back(pnp_detection_est.backproject3DPoint(Point3f(l,0,0))); // axis x
+ pose_points2d.push_back(pnp_detection_est.backproject3DPoint(Point3f(0,l,0))); // axis y
+ pose_points2d.push_back(pnp_detection_est.backproject3DPoint(Point3f(0,0,l))); // axis z
draw3DCoordinateAxes(frame_vis, pose_points2d); // draw axes
// FRAME RATE
// Draw some debug text
int inliers_int = inliers_idx.rows;
int outliers_int = (int)good_matches.size() - inliers_int;
- std::string inliers_str = IntToString(inliers_int);
- std::string outliers_str = IntToString(outliers_int);
- std::string n = IntToString((int)good_matches.size());
- std::string text = "Found " + inliers_str + " of " + n + " matches";
- std::string text2 = "Inliers: " + inliers_str + " - Outliers: " + outliers_str;
+ string inliers_str = IntToString(inliers_int);
+ string outliers_str = IntToString(outliers_int);
+ string n = IntToString((int)good_matches.size());
+ string text = "Found " + inliers_str + " of " + n + " matches";
+ string text2 = "Inliers: " + inliers_str + " - Outliers: " + outliers_str;
drawText(frame_vis, text, green);
drawText2(frame_vis, text2, red);
- cv::imshow("REAL TIME DEMO", frame_vis);
+ imshow("REAL TIME DEMO", frame_vis);
}
// Close and Destroy Window
- cv::destroyWindow("REAL TIME DEMO");
+ destroyWindow("REAL TIME DEMO");
- std::cout << "GOODBYE ..." << std::endl;
+ cout << "GOODBYE ..." << endl;
}
/**********************************************************************************************************/
void help()
{
-std::cout
-<< "--------------------------------------------------------------------------" << std::endl
+cout
+<< "--------------------------------------------------------------------------" << endl
<< "This program shows how to detect an object given its 3D textured model. You can choose to "
-<< "use a recorded video or the webcam." << std::endl
-<< "Usage:" << std::endl
-<< "./cpp-tutorial-pnp_detection -help" << std::endl
-<< "Keys:" << std::endl
-<< "'esc' - to quit." << std::endl
-<< "--------------------------------------------------------------------------" << std::endl
-<< std::endl;
+<< "use a recorded video or the webcam." << endl
+<< "Usage:" << endl
+<< "./cpp-tutorial-pnp_detection -help" << endl
+<< "Keys:" << endl
+<< "'esc' - to quit." << endl
+<< "--------------------------------------------------------------------------" << endl
+<< endl;
}
/**********************************************************************************************************/
-void initKalmanFilter(cv::KalmanFilter &KF, int nStates, int nMeasurements, int nInputs, double dt)
+void initKalmanFilter(KalmanFilter &KF, int nStates, int nMeasurements, int nInputs, double dt)
{
KF.init(nStates, nMeasurements, nInputs, CV_64F); // init Kalman Filter
- cv::setIdentity(KF.processNoiseCov, cv::Scalar::all(1e-5)); // set process noise
- cv::setIdentity(KF.measurementNoiseCov, cv::Scalar::all(1e-2)); // set measurement noise
- cv::setIdentity(KF.errorCovPost, cv::Scalar::all(1)); // error covariance
+ setIdentity(KF.processNoiseCov, Scalar::all(1e-5)); // set process noise
+ setIdentity(KF.measurementNoiseCov, Scalar::all(1e-2)); // set measurement noise
+ setIdentity(KF.errorCovPost, Scalar::all(1)); // error covariance
/** DYNAMIC MODEL **/
}
/**********************************************************************************************************/
-void updateKalmanFilter( cv::KalmanFilter &KF, cv::Mat &measurement,
- cv::Mat &translation_estimated, cv::Mat &rotation_estimated )
+void updateKalmanFilter( KalmanFilter &KF, Mat &measurement,
+ Mat &translation_estimated, Mat &rotation_estimated )
{
// First predict, to update the internal statePre variable
- cv::Mat prediction = KF.predict();
+ Mat prediction = KF.predict();
// The "correct" phase that is going to use the predicted value and our measurement
- cv::Mat estimated = KF.correct(measurement);
+ Mat estimated = KF.correct(measurement);
// Estimated translation
translation_estimated.at<double>(0) = estimated.at<double>(0);
translation_estimated.at<double>(2) = estimated.at<double>(2);
// Estimated euler angles
- cv::Mat eulers_estimated(3, 1, CV_64F);
+ Mat eulers_estimated(3, 1, CV_64F);
eulers_estimated.at<double>(0) = estimated.at<double>(9);
eulers_estimated.at<double>(1) = estimated.at<double>(10);
eulers_estimated.at<double>(2) = estimated.at<double>(11);
}
/**********************************************************************************************************/
-void fillMeasurements( cv::Mat &measurements,
- const cv::Mat &translation_measured, const cv::Mat &rotation_measured)
+void fillMeasurements( Mat &measurements,
+ const Mat &translation_measured, const Mat &rotation_measured)
{
// Convert rotation matrix to euler angles
- cv::Mat measured_eulers(3, 1, CV_64F);
+ Mat measured_eulers(3, 1, CV_64F);
measured_eulers = rot2euler(rotation_measured);
// Set measurement to predict
#include "ModelRegistration.h"
#include "Utils.h"
+using namespace cv;
+using namespace std;
+
/** GLOBAL VARIABLES **/
-std::string tutorial_path = "../../samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/"; // path to tutorial
+string tutorial_path = "../../samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/"; // path to tutorial
-std::string img_path = tutorial_path + "Data/resized_IMG_3875.JPG"; // image to register
-std::string ply_read_path = tutorial_path + "Data/box.ply"; // object mesh
-std::string write_path = tutorial_path + "Data/cookies_ORB.yml"; // output file
+string img_path = tutorial_path + "Data/resized_IMG_3875.JPG"; // image to register
+string ply_read_path = tutorial_path + "Data/box.ply"; // object mesh
+string write_path = tutorial_path + "Data/cookies_ORB.yml"; // output file
// Boolean the know if the registration it's done
bool end_registration = false;
int pts[] = {1, 2, 3, 4, 5, 6, 7, 8}; // 3 -> 4
// Some basic colors
-cv::Scalar red(0, 0, 255);
-cv::Scalar green(0,255,0);
-cv::Scalar blue(255,0,0);
-cv::Scalar yellow(0,255,255);
+Scalar red(0, 0, 255);
+Scalar green(0,255,0);
+Scalar blue(255,0,0);
+Scalar yellow(0,255,255);
/*
* CREATE MODEL REGISTRATION OBJECT
// Mouse events for model registration
static void onMouseModelRegistration( int event, int x, int y, int, void* )
{
- if ( event == cv::EVENT_LBUTTONUP )
+ if ( event == EVENT_LBUTTONUP )
{
int n_regist = registration.getNumRegist();
int n_vertex = pts[n_regist];
- cv::Point2f point_2d = cv::Point2f((float)x,(float)y);
- cv::Point3f point_3d = mesh.getVertex(n_vertex-1);
+ Point2f point_2d = Point2f((float)x,(float)y);
+ Point3f point_3d = mesh.getVertex(n_vertex-1);
bool is_registrable = registration.is_registrable();
if (is_registrable)
//Instantiate robust matcher: detector, extractor, matcher
RobustMatcher rmatcher;
- cv::FeatureDetector * detector = new cv::OrbFeatureDetector(numKeyPoints);
+ Ptr<FeatureDetector> detector = ORB::create(numKeyPoints);
rmatcher.setFeatureDetector(detector);
/** GROUND TRUTH OF THE FIRST IMAGE **/
// Create & Open Window
- cv::namedWindow("MODEL REGISTRATION", cv::WINDOW_KEEPRATIO);
+ namedWindow("MODEL REGISTRATION", WINDOW_KEEPRATIO);
// Set up the mouse events
- cv::setMouseCallback("MODEL REGISTRATION", onMouseModelRegistration, 0 );
+ setMouseCallback("MODEL REGISTRATION", onMouseModelRegistration, 0 );
// Open the image to register
- cv::Mat img_in = cv::imread(img_path, cv::IMREAD_COLOR);
- cv::Mat img_vis = img_in.clone();
+ Mat img_in = imread(img_path, IMREAD_COLOR);
+ Mat img_vis = img_in.clone();
if (!img_in.data) {
- std::cout << "Could not open or find the image" << std::endl;
+ cout << "Could not open or find the image" << endl;
return -1;
}
int num_registrations = n;
registration.setNumMax(num_registrations);
- std::cout << "Click the box corners ..." << std::endl;
- std::cout << "Waiting ..." << std::endl;
+ cout << "Click the box corners ..." << endl;
+ cout << "Waiting ..." << endl;
// Loop until all the points are registered
- while ( cv::waitKey(30) < 0 )
+ while ( waitKey(30) < 0 )
{
// Refresh debug image
img_vis = img_in.clone();
// Current registered points
- std::vector<cv::Point2f> list_points2d = registration.get_points2d();
- std::vector<cv::Point3f> list_points3d = registration.get_points3d();
+ vector<Point2f> list_points2d = registration.get_points2d();
+ vector<Point3f> list_points3d = registration.get_points3d();
// Draw current registered points
drawPoints(img_vis, list_points2d, list_points3d, red);
// Draw debug text
int n_regist = registration.getNumRegist();
int n_vertex = pts[n_regist];
- cv::Point3f current_poin3d = mesh.getVertex(n_vertex-1);
+ Point3f current_poin3d = mesh.getVertex(n_vertex-1);
drawQuestion(img_vis, current_poin3d, green);
drawCounter(img_vis, registration.getNumRegist(), registration.getNumMax(), red);
}
// Show the image
- cv::imshow("MODEL REGISTRATION", img_vis);
+ imshow("MODEL REGISTRATION", img_vis);
}
/** COMPUTE CAMERA POSE **/
- std::cout << "COMPUTING POSE ..." << std::endl;
+ cout << "COMPUTING POSE ..." << endl;
// The list of registered points
- std::vector<cv::Point2f> list_points2d = registration.get_points2d();
- std::vector<cv::Point3f> list_points3d = registration.get_points3d();
+ vector<Point2f> list_points2d = registration.get_points2d();
+ vector<Point3f> list_points3d = registration.get_points3d();
// Estimate pose given the registered points
- bool is_correspondence = pnp_registration.estimatePose(list_points3d, list_points2d, cv::SOLVEPNP_ITERATIVE);
+ bool is_correspondence = pnp_registration.estimatePose(list_points3d, list_points2d, SOLVEPNP_ITERATIVE);
if ( is_correspondence )
{
- std::cout << "Correspondence found" << std::endl;
+ cout << "Correspondence found" << endl;
// Compute all the 2D points of the mesh to verify the algorithm and draw it
- std::vector<cv::Point2f> list_points2d_mesh = pnp_registration.verify_points(&mesh);
+ vector<Point2f> list_points2d_mesh = pnp_registration.verify_points(&mesh);
draw2DPoints(img_vis, list_points2d_mesh, green);
} else {
- std::cout << "Correspondence not found" << std::endl << std::endl;
+ cout << "Correspondence not found" << endl << endl;
}
// Show the image
- cv::imshow("MODEL REGISTRATION", img_vis);
+ imshow("MODEL REGISTRATION", img_vis);
// Show image until ESC pressed
- cv::waitKey(0);
+ waitKey(0);
/** COMPUTE 3D of the image Keypoints **/
// Containers for keypoints and descriptors of the model
- std::vector<cv::KeyPoint> keypoints_model;
- cv::Mat descriptors;
+ vector<KeyPoint> keypoints_model;
+ Mat descriptors;
// Compute keypoints and descriptors
rmatcher.computeKeyPoints(img_in, keypoints_model);
// Check if keypoints are on the surface of the registration image and add to the model
for (unsigned int i = 0; i < keypoints_model.size(); ++i) {
- cv::Point2f point2d(keypoints_model[i].pt);
- cv::Point3f point3d;
+ Point2f point2d(keypoints_model[i].pt);
+ Point3f point3d;
bool on_surface = pnp_registration.backproject2DPoint(&mesh, point2d, point3d);
if (on_surface)
{
img_vis = img_in.clone();
// The list of the points2d of the model
- std::vector<cv::Point2f> list_points_in = model.get_points2d_in();
- std::vector<cv::Point2f> list_points_out = model.get_points2d_out();
+ vector<Point2f> list_points_in = model.get_points2d_in();
+ vector<Point2f> list_points_out = model.get_points2d_out();
// Draw some debug text
- std::string num = IntToString((int)list_points_in.size());
- std::string text = "There are " + num + " inliers";
+ string num = IntToString((int)list_points_in.size());
+ string text = "There are " + num + " inliers";
drawText(img_vis, text, green);
// Draw some debug text
draw2DPoints(img_vis, list_points_out, red);
// Show the image
- cv::imshow("MODEL REGISTRATION", img_vis);
+ imshow("MODEL REGISTRATION", img_vis);
// Wait until ESC pressed
- cv::waitKey(0);
+ waitKey(0);
// Close and Destroy Window
- cv::destroyWindow("MODEL REGISTRATION");
+ destroyWindow("MODEL REGISTRATION");
- std::cout << "GOODBYE" << std::endl;
+ cout << "GOODBYE" << endl;
}
/**********************************************************************************************************/
void help()
{
- std::cout
- << "--------------------------------------------------------------------------" << std::endl
- << "This program shows how to create your 3D textured model. " << std::endl
- << "Usage:" << std::endl
- << "./cpp-tutorial-pnp_registration" << std::endl
- << "--------------------------------------------------------------------------" << std::endl
- << std::endl;
+ cout
+ << "--------------------------------------------------------------------------" << endl
+ << "This program shows how to create your 3D textured model. " << endl
+ << "Usage:" << endl
+ << "./cpp-tutorial-pnp_registration" << endl
+ << "--------------------------------------------------------------------------" << endl
+ << endl;
}