+set(HAVE_FFMPEG 1)
set(NEW_FFMPEG 1)
set(HAVE_FFMPEG_CODEC 1)
set(HAVE_FFMPEG_FORMAT 1)
set(GSTREAMER_RIFF_VERSION ${ALIASOF_gstreamer-riff-0.10_VERSION})
set(GSTREAMER_PBUTILS_VERSION ${ALIASOF_gstreamer-pbutils-0.10_VERSION})
endif()
-
+
endif(WITH_GSTREAMER AND NOT WITH_GSTREAMER_1_X)
# if gstreamer 0.10 was not found, or we specified we wanted 1.x, try to find it
CHECK_MODULE(gstreamer-app-1.0 HAVE_GSTREAMER_APP)
CHECK_MODULE(gstreamer-riff-1.0 HAVE_GSTREAMER_RIFF)
CHECK_MODULE(gstreamer-pbutils-1.0 HAVE_GSTREAMER_PBUTILS)
-
+
if(HAVE_GSTREAMER_BASE AND HAVE_GSTREAMER_VIDEO AND HAVE_GSTREAMER_APP AND HAVE_GSTREAMER_RIFF AND HAVE_GSTREAMER_PBUTILS)
set(HAVE_GSTREAMER TRUE)
set(GSTREAMER_BASE_VERSION ${ALIASOF_gstreamer-base-1.0_VERSION})
set(GSTREAMER_RIFF_VERSION ${ALIASOF_gstreamer-riff-1.0_VERSION})
set(GSTREAMER_PBUTILS_VERSION ${ALIASOF_gstreamer-pbutils-1.0_VERSION})
endif()
-
+
endif(WITH_GSTREAMER_1_X OR NOT HAVE_GSTREAMER)
# --- unicap ---
CvMat* _v = 0;
CvPoint2D32f* v;
- IplImage* img = 0;
+ IplImage img;
IplImage* gray = 0;
IplImage* thresh = 0;
/* read the image */
sprintf( filename, "%s%s", filepath, imgname );
- img = cvLoadImage( filename );
+ cv::Mat img2 = cv::imread( filename );
+ img = img2;
- if( !img )
+ if( img2.empty() )
{
ts->printf( cvtest::TS::LOG, "one of chessboard images can't be read: %s\n", filename );
if( max_idx == 1 )
ts->printf(cvtest::TS::LOG, "%s: chessboard %d:\n", imgname, is_chessboard);
- gray = cvCreateImage( cvSize( img->width, img->height ), IPL_DEPTH_8U, 1 );
- thresh = cvCreateImage( cvSize( img->width, img->height ), IPL_DEPTH_8U, 1 );
- cvCvtColor( img, gray, CV_BGR2GRAY );
+ gray = cvCreateImage( cvSize( img.width, img.height ), IPL_DEPTH_8U, 1 );
+ thresh = cvCreateImage( cvSize( img.width, img.height ), IPL_DEPTH_8U, 1 );
+ cvCvtColor( &img, gray, CV_BGR2GRAY );
count0 = pattern_size.width*pattern_size.height;
find_chessboard_time*1e-6, find_chessboard_time/num_pixels);
cvReleaseMat( &_v );
- cvReleaseImage( &img );
cvReleaseImage( &gray );
cvReleaseImage( &thresh );
progress = update_progress( progress, idx-1, max_idx, 0 );
/* release occupied memory */
cvReleaseMat( &_v );
cvReleaseFileStorage( &fs );
- cvReleaseImage( &img );
cvReleaseImage( &gray );
cvReleaseImage( &thresh );
#ifdef HAVE_OPENCV_HIGHGUI
namedWindow("templ",1);
imshow("templ",templ_color);
-
- cvWaitKey(0);
+ waitKey();
#else
CV_Error(CV_StsNotImplemented, "OpenCV has been compiled without GUI support");
#endif
CV_EXPORTS_AS(randShuffle) void randShuffle_(InputOutputArray dst, double iterFactor = 1.);
+enum { FILLED = -1,
+ LINE_4 = 4,
+ LINE_8 = 8,
+ LINE_AA = 16
+ };
+
//! draws the line segment (pt1, pt2) in the image
CV_EXPORTS_W void line(CV_IN_OUT Mat& img, Point pt1, Point pt2, const Scalar& color,
- int thickness = 1, int lineType = 8, int shift = 0);
+ int thickness = 1, int lineType = LINE_8, int shift = 0);
//! draws the rectangle outline or a solid rectangle with the opposite corners pt1 and pt2 in the image
CV_EXPORTS_W void rectangle(CV_IN_OUT Mat& img, Point pt1, Point pt2,
const Scalar& color, int thickness = 1,
- int lineType = 8, int shift = 0);
+ int lineType = LINE_8, int shift = 0);
//! draws the rectangle outline or a solid rectangle covering rec in the image
CV_EXPORTS void rectangle(CV_IN_OUT Mat& img, Rect rec,
const Scalar& color, int thickness = 1,
- int lineType = 8, int shift = 0);
+ int lineType = LINE_8, int shift = 0);
//! draws the circle outline or a solid circle in the image
CV_EXPORTS_W void circle(CV_IN_OUT Mat& img, Point center, int radius,
const Scalar& color, int thickness = 1,
- int lineType = 8, int shift = 0);
+ int lineType = LINE_8, int shift = 0);
//! draws an elliptic arc, ellipse sector or a rotated ellipse in the image
CV_EXPORTS_W void ellipse(CV_IN_OUT Mat& img, Point center, Size axes,
double angle, double startAngle, double endAngle,
const Scalar& color, int thickness = 1,
- int lineType = 8, int shift = 0);
+ int lineType = LINE_8, int shift = 0);
//! draws a rotated ellipse in the image
CV_EXPORTS_W void ellipse(CV_IN_OUT Mat& img, const RotatedRect& box, const Scalar& color,
- int thickness = 1, int lineType = 8);
+ int thickness = 1, int lineType = LINE_8);
//! draws a filled convex polygon in the image
CV_EXPORTS void fillConvexPoly(Mat& img, const Point* pts, int npts,
- const Scalar& color, int lineType = 8,
+ const Scalar& color, int lineType = LINE_8,
int shift = 0);
CV_EXPORTS_W void fillConvexPoly(InputOutputArray img, InputArray points,
- const Scalar& color, int lineType = 8,
+ const Scalar& color, int lineType = LINE_8,
int shift = 0);
//! fills an area bounded by one or more polygons
CV_EXPORTS void fillPoly(Mat& img, const Point** pts,
const int* npts, int ncontours,
- const Scalar& color, int lineType = 8, int shift = 0,
+ const Scalar& color, int lineType = LINE_8, int shift = 0,
Point offset = Point() );
CV_EXPORTS_W void fillPoly(InputOutputArray img, InputArrayOfArrays pts,
- const Scalar& color, int lineType = 8, int shift = 0,
+ const Scalar& color, int lineType = LINE_8, int shift = 0,
Point offset = Point() );
//! draws one or more polygonal curves
CV_EXPORTS void polylines(Mat& img, const Point* const* pts, const int* npts,
int ncontours, bool isClosed, const Scalar& color,
- int thickness = 1, int lineType = 8, int shift = 0 );
+ int thickness = 1, int lineType = LINE_8, int shift = 0 );
CV_EXPORTS_W void polylines(InputOutputArray img, InputArrayOfArrays pts,
bool isClosed, const Scalar& color,
- int thickness = 1, int lineType = 8, int shift = 0 );
+ int thickness = 1, int lineType = LINE_8, int shift = 0 );
//! draws contours in the image
CV_EXPORTS_W void drawContours( InputOutputArray image, InputArrayOfArrays contours,
int contourIdx, const Scalar& color,
- int thickness = 1, int lineType = 8,
+ int thickness = 1, int lineType = LINE_8,
InputArray hierarchy = noArray(),
int maxLevel = INT_MAX, Point offset = Point() );
//! renders text string in the image
CV_EXPORTS_W void putText( Mat& img, const String& text, Point org,
int fontFace, double fontScale, Scalar color,
- int thickness = 1, int lineType = 8,
+ int thickness = 1, int lineType = LINE_8,
bool bottomLeftOrigin = false );
//! returns bounding box of the text string
PCA pca(pcaset, // pass the data
Mat(), // we do not have a pre-computed mean vector,
// so let the PCA engine to compute it
- CV_PCA_DATA_AS_ROW, // indicate that the vectors
+ PCA::DATA_AS_ROW, // indicate that the vectors
// are stored as matrix rows
- // (use CV_PCA_DATA_AS_COL if the vectors are
+ // (use PCA::DATA_AS_COL if the vectors are
// the matrix columns)
maxComponents // specify, how many principal components to retain
);
class CV_EXPORTS PCA
{
public:
+ enum { DATA_AS_ROW = 0,
+ DATA_AS_COL = 1,
+ USE_AVG = 2
+ };
+
//! default constructor
PCA();
if(WIN32)
link_directories("${OpenCV_SOURCE_DIR}/3rdparty/lib") # for ffmpeg wrapper only
include_directories(AFTER SYSTEM "${OpenCV_SOURCE_DIR}/3rdparty/include") # for directshow in VS2005 and multi-monitor support on MinGW
+ include_directories(AFTER SYSTEM "${OpenCV_SOURCE_DIR}/3rdparty/include/ffmpeg_") # for tests
endif()
if(UNIX)
#ifndef __OPENCV_HIGHGUI_HPP__
#define __OPENCV_HIGHGUI_HPP__
-#include "opencv2/highgui/highgui_c.h"
-
-#ifdef __cplusplus
#include "opencv2/core.hpp"
-struct CvCapture;
-struct CvVideoWriter;
+///////////////////////// graphical user interface //////////////////////////
namespace cv
{
-enum {
- // Flags for namedWindow
- WINDOW_NORMAL = CV_WINDOW_NORMAL, // the user can resize the window (no constraint) / also use to switch a fullscreen window to a normal size
- WINDOW_AUTOSIZE = CV_WINDOW_AUTOSIZE, // the user cannot resize the window, the size is constrainted by the image displayed
- WINDOW_OPENGL = CV_WINDOW_OPENGL, // window with opengl support
-
- // Flags for set / getWindowProperty
- WND_PROP_FULLSCREEN = CV_WND_PROP_FULLSCREEN, // fullscreen property
- WND_PROP_AUTOSIZE = CV_WND_PROP_AUTOSIZE, // autosize property
- WND_PROP_ASPECT_RATIO = CV_WND_PROP_ASPECTRATIO, // window's aspect ration
- WND_PROP_OPENGL = CV_WND_PROP_OPENGL // opengl support
-};
+// Flags for namedWindow
+enum { WINDOW_NORMAL = 0x00000000, // the user can resize the window (no constraint) / also use to switch a fullscreen window to a normal size
+ WINDOW_AUTOSIZE = 0x00000001, // the user cannot resize the window, the size is constrainted by the image displayed
+ WINDOW_OPENGL = 0x00001000, // window with opengl support
+
+ WINDOW_FULLSCREEN = 1, // change the window to fullscreen
+ WINDOW_FREERATIO = 0x00000100, // the image expends as much as it can (no ratio constraint)
+ WINDOW_KEEPRATIO = 0x00000000 // the ratio of the image is respected
+ };
+
+// Flags for set / getWindowProperty
+enum { WND_PROP_FULLSCREEN = 0, // fullscreen property (can be WINDOW_NORMAL or WINDOW_FULLSCREEN)
+ WND_PROP_AUTOSIZE = 1, // autosize property (can be WINDOW_NORMAL or WINDOW_AUTOSIZE)
+ WND_PROP_ASPECT_RATIO = 2, // window's aspect ration (can be set to WINDOW_FREERATIO or WINDOW_KEEPRATIO);
+ WND_PROP_OPENGL = 3 // opengl support
+ };
+
+enum { EVENT_MOUSEMOVE = 0,
+ EVENT_LBUTTONDOWN = 1,
+ EVENT_RBUTTONDOWN = 2,
+ EVENT_MBUTTONDOWN = 3,
+ EVENT_LBUTTONUP = 4,
+ EVENT_RBUTTONUP = 5,
+ EVENT_MBUTTONUP = 6,
+ EVENT_LBUTTONDBLCLK = 7,
+ EVENT_RBUTTONDBLCLK = 8,
+ EVENT_MBUTTONDBLCLK = 9
+ };
+
+enum { EVENT_FLAG_LBUTTON = 1,
+ EVENT_FLAG_RBUTTON = 2,
+ EVENT_FLAG_MBUTTON = 4,
+ EVENT_FLAG_CTRLKEY = 8,
+ EVENT_FLAG_SHIFTKEY = 16,
+ EVENT_FLAG_ALTKEY = 32
+ };
+
+// Qt font
+enum { QT_FONT_LIGHT = 25, //QFont::Light,
+ QT_FONT_NORMAL = 50, //QFont::Normal,
+ QT_FONT_DEMIBOLD = 63, //QFont::DemiBold,
+ QT_FONT_BOLD = 75, //QFont::Bold,
+ QT_FONT_BLACK = 87 //QFont::Black
+ };
+
+// Qt font style
+enum { QT_STYLE_NORMAL = 0, //QFont::StyleNormal,
+ QT_STYLE_ITALIC = 1, //QFont::StyleItalic,
+ QT_STYLE_OBLIQUE = 2 //QFont::StyleOblique
+ };
+
+// Qt "button" type
+enum { QT_PUSH_BUTTON = 0,
+ QT_CHECKBOX = 1,
+ QT_RADIOBOX = 2
+ };
+
+
+typedef void (*MouseCallback)(int event, int x, int y, int flags, void* userdata);
+typedef void (*TrackbarCallback)(int pos, void* userdata);
+typedef void (*OpenGlDrawCallback)(void* userdata);
+typedef void (*ButtonCallback)(int state, void* userdata);
+
CV_EXPORTS_W void namedWindow(const String& winname, int flags = WINDOW_AUTOSIZE);
+
CV_EXPORTS_W void destroyWindow(const String& winname);
+
CV_EXPORTS_W void destroyAllWindows();
CV_EXPORTS_W int startWindowThread();
CV_EXPORTS_W void imshow(const String& winname, InputArray mat);
CV_EXPORTS_W void resizeWindow(const String& winname, int width, int height);
+
CV_EXPORTS_W void moveWindow(const String& winname, int x, int y);
-CV_EXPORTS_W void setWindowProperty(const String& winname, int prop_id, double prop_value);//YV
-CV_EXPORTS_W double getWindowProperty(const String& winname, int prop_id);//YV
+CV_EXPORTS_W void setWindowProperty(const String& winname, int prop_id, double prop_value);
-enum
-{
- EVENT_MOUSEMOVE =0,
- EVENT_LBUTTONDOWN =1,
- EVENT_RBUTTONDOWN =2,
- EVENT_MBUTTONDOWN =3,
- EVENT_LBUTTONUP =4,
- EVENT_RBUTTONUP =5,
- EVENT_MBUTTONUP =6,
- EVENT_LBUTTONDBLCLK =7,
- EVENT_RBUTTONDBLCLK =8,
- EVENT_MBUTTONDBLCLK =9
-};
-
-enum
-{
- EVENT_FLAG_LBUTTON =1,
- EVENT_FLAG_RBUTTON =2,
- EVENT_FLAG_MBUTTON =4,
- EVENT_FLAG_CTRLKEY =8,
- EVENT_FLAG_SHIFTKEY =16,
- EVENT_FLAG_ALTKEY =32
-};
-
-typedef void (*MouseCallback)(int event, int x, int y, int flags, void* userdata);
+CV_EXPORTS_W double getWindowProperty(const String& winname, int prop_id);
//! assigns callback for mouse events
CV_EXPORTS void setMouseCallback(const String& winname, MouseCallback onMouse, void* userdata = 0);
-
-typedef void (CV_CDECL *TrackbarCallback)(int pos, void* userdata);
-
CV_EXPORTS int createTrackbar(const String& trackbarname, const String& winname,
int* value, int count,
TrackbarCallback onChange = 0,
void* userdata = 0);
CV_EXPORTS_W int getTrackbarPos(const String& trackbarname, const String& winname);
+
CV_EXPORTS_W void setTrackbarPos(const String& trackbarname, const String& winname, int pos);
-// OpenGL support
-typedef void (*OpenGlDrawCallback)(void* userdata);
+// OpenGL support
CV_EXPORTS void setOpenGlDrawCallback(const String& winname, OpenGlDrawCallback onOpenGlDraw, void* userdata = 0);
CV_EXPORTS void setOpenGlContext(const String& winname);
CV_EXPORTS void updateWindow(const String& winname);
-//Only for Qt
-CV_EXPORTS CvFont fontQt(const String& nameFont, int pointSize=-1,
- Scalar color=Scalar::all(0), int weight=CV_FONT_NORMAL,
- int style=CV_STYLE_NORMAL, int spacing=0);
-CV_EXPORTS void addText( const Mat& img, const String& text, Point org, CvFont font);
+// Only for Qt
+
+struct QtFont
+{
+ const char* nameFont; // Qt: nameFont
+ Scalar color; // Qt: ColorFont -> cvScalar(blue_component, green_component, red\_component[, alpha_component])
+ int font_face; // Qt: bool italic
+ const int* ascii; // font data and metrics
+ const int* greek;
+ const int* cyrillic;
+ float hscale, vscale;
+ float shear; // slope coefficient: 0 - normal, >0 - italic
+ int thickness; // Qt: weight
+ float dx; // horizontal interval between letters
+ int line_type; // Qt: PointSize
+};
+
+CV_EXPORTS QtFont fontQt(const String& nameFont, int pointSize = -1,
+ Scalar color = Scalar::all(0), int weight = QT_FONT_NORMAL,
+ int style = QT_STYLE_NORMAL, int spacing = 0);
-CV_EXPORTS void displayOverlay(const String& winname, const String& text, int delayms CV_DEFAULT(0));
-CV_EXPORTS void displayStatusBar(const String& winname, const String& text, int delayms CV_DEFAULT(0));
+CV_EXPORTS void addText( const Mat& img, const String& text, Point org, const QtFont& font);
+
+CV_EXPORTS void displayOverlay(const String& winname, const String& text, int delayms = 0);
+
+CV_EXPORTS void displayStatusBar(const String& winname, const String& text, int delayms = 0);
CV_EXPORTS void saveWindowParameters(const String& windowName);
+
CV_EXPORTS void loadWindowParameters(const String& windowName);
+
CV_EXPORTS int startLoop(int (*pt2Func)(int argc, char *argv[]), int argc, char* argv[]);
+
CV_EXPORTS void stopLoop();
-typedef void (CV_CDECL *ButtonCallback)(int state, void* userdata);
CV_EXPORTS int createButton( const String& bar_name, ButtonCallback on_change,
- void* userdata=NULL, int type=CV_PUSH_BUTTON,
- bool initial_button_state=0);
+ void* userdata = 0, int type = QT_PUSH_BUTTON,
+ bool initial_button_state = false);
-//-------------------------
+} // cv
-enum
-{
- // 8bit, color or not
- IMREAD_UNCHANGED =-1,
- // 8bit, gray
- IMREAD_GRAYSCALE =0,
- // ?, color
- IMREAD_COLOR =1,
- // any depth, ?
- IMREAD_ANYDEPTH =2,
- // ?, any color
- IMREAD_ANYCOLOR =4
-};
-enum
+
+//////////////////////////////// image codec ////////////////////////////////
+namespace cv
{
- IMWRITE_JPEG_QUALITY =1,
- IMWRITE_PNG_COMPRESSION =16,
- IMWRITE_PNG_STRATEGY =17,
- IMWRITE_PNG_BILEVEL =18,
- IMWRITE_PNG_STRATEGY_DEFAULT =0,
- IMWRITE_PNG_STRATEGY_FILTERED =1,
- IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY =2,
- IMWRITE_PNG_STRATEGY_RLE =3,
- IMWRITE_PNG_STRATEGY_FIXED =4,
- IMWRITE_PXM_BINARY =32
-};
-CV_EXPORTS_W Mat imread( const String& filename, int flags=1 );
+enum { IMREAD_UNCHANGED = -1, // 8bit, color or not
+ IMREAD_GRAYSCALE = 0, // 8bit, gray
+ IMREAD_COLOR = 1, // ?, color
+ IMREAD_ANYDEPTH = 2, // any depth, ?
+ IMREAD_ANYCOLOR = 4 // ?, any color
+ };
+
+enum { IMWRITE_JPEG_QUALITY = 1,
+ IMWRITE_PNG_COMPRESSION = 16,
+ IMWRITE_PNG_STRATEGY = 17,
+ IMWRITE_PNG_BILEVEL = 18,
+ IMWRITE_PXM_BINARY = 32,
+ IMWRITE_WEBP_QUALITY = 64
+ };
+
+enum { IMWRITE_PNG_STRATEGY_DEFAULT = 0,
+ IMWRITE_PNG_STRATEGY_FILTERED = 1,
+ IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY = 2,
+ IMWRITE_PNG_STRATEGY_RLE = 3,
+ IMWRITE_PNG_STRATEGY_FIXED = 4
+ };
+
+CV_EXPORTS_W Mat imread( const String& filename, int flags = IMREAD_COLOR );
+
CV_EXPORTS_W bool imwrite( const String& filename, InputArray img,
- const std::vector<int>& params=std::vector<int>());
+ const std::vector<int>& params = std::vector<int>());
+
CV_EXPORTS_W Mat imdecode( InputArray buf, int flags );
-CV_EXPORTS Mat imdecode( InputArray buf, int flags, Mat* dst );
+
+CV_EXPORTS Mat imdecode( InputArray buf, int flags, Mat* dst);
+
CV_EXPORTS_W bool imencode( const String& ext, InputArray img,
CV_OUT std::vector<uchar>& buf,
- const std::vector<int>& params=std::vector<int>());
+ const std::vector<int>& params = std::vector<int>());
+
+} // cv
+
+
-#ifndef CV_NO_VIDEO_CAPTURE_CPP_API
+////////////////////////////////// video io /////////////////////////////////
+
+typedef struct CvCapture CvCapture;
+typedef struct CvVideoWriter CvVideoWriter;
+
+namespace cv
+{
+
+// Camera API
+enum { CAP_ANY = 0, // autodetect
+ CAP_VFW = 200, // platform native
+ CAP_V4L = 200,
+ CAP_V4L2 = CAP_V4L,
+ CAP_FIREWARE = 300, // IEEE 1394 drivers
+ CAP_FIREWIRE = CAP_FIREWARE,
+ CAP_IEEE1394 = CAP_FIREWARE,
+ CAP_DC1394 = CAP_FIREWARE,
+ CAP_CMU1394 = CAP_FIREWARE,
+ CAP_QT = 500, // QuickTime
+ CAP_UNICAP = 600, // Unicap drivers
+ CAP_DSHOW = 700, // DirectShow (via videoInput)
+ CAP_PVAPI = 800, // PvAPI, Prosilica GigE SDK
+ CAP_OPENNI = 900, // OpenNI (for Kinect)
+ CAP_OPENNI_ASUS = 910, // OpenNI (for Asus Xtion)
+ CAP_ANDROID = 1000, // Android
+ CAP_XIAPI = 1100, // XIMEA Camera API
+ CAP_AVFOUNDATION = 1200, // AVFoundation framework for iOS (OS X Lion will have the same API)
+ CAP_GIGANETIX = 1300, // Smartek Giganetix GigEVisionSDK
+ CAP_MSMF = 1400 // Microsoft Media Foundation (via videoInput)
+ };
+
+// generic properties (based on DC1394 properties)
+enum { CAP_PROP_POS_MSEC =0,
+ CAP_PROP_POS_FRAMES =1,
+ CAP_PROP_POS_AVI_RATIO =2,
+ CAP_PROP_FRAME_WIDTH =3,
+ CAP_PROP_FRAME_HEIGHT =4,
+ CAP_PROP_FPS =5,
+ CAP_PROP_FOURCC =6,
+ CAP_PROP_FRAME_COUNT =7,
+ CAP_PROP_FORMAT =8,
+ CAP_PROP_MODE =9,
+ CAP_PROP_BRIGHTNESS =10,
+ CAP_PROP_CONTRAST =11,
+ CAP_PROP_SATURATION =12,
+ CAP_PROP_HUE =13,
+ CAP_PROP_GAIN =14,
+ CAP_PROP_EXPOSURE =15,
+ CAP_PROP_CONVERT_RGB =16,
+ CAP_PROP_WHITE_BALANCE_BLUE_U =17,
+ CAP_PROP_RECTIFICATION =18,
+ CAP_PROP_MONOCROME =19,
+ CAP_PROP_SHARPNESS =20,
+ CAP_PROP_AUTO_EXPOSURE =21, // DC1394: exposure control done by camera, user can adjust refernce level using this feature
+ CAP_PROP_GAMMA =22,
+ CAP_PROP_TEMPERATURE =23,
+ CAP_PROP_TRIGGER =24,
+ CAP_PROP_TRIGGER_DELAY =25,
+ CAP_PROP_WHITE_BALANCE_RED_V =26,
+ CAP_PROP_ZOOM =27,
+ CAP_PROP_FOCUS =28,
+ CAP_PROP_GUID =29,
+ CAP_PROP_ISO_SPEED =30,
+ CAP_PROP_BACKLIGHT =32,
+ CAP_PROP_PAN =33,
+ CAP_PROP_TILT =34,
+ CAP_PROP_ROLL =35,
+ CAP_PROP_IRIS =36,
+ CAP_PROP_SETTINGS =37
+ };
+
+
+// DC1394 only
+// modes of the controlling registers (can be: auto, manual, auto single push, absolute Latter allowed with any other mode)
+// every feature can have only one mode turned on at a time
+enum { CAP_PROP_DC1394_OFF = -4, //turn the feature off (not controlled manually nor automatically)
+ CAP_PROP_DC1394_MODE_MANUAL = -3, //set automatically when a value of the feature is set by the user
+ CAP_PROP_DC1394_MODE_AUTO = -2,
+ CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO = -1,
+ CAP_PROP_DC1394_MAX = 31
+ };
+
+
+// OpenNI map generators
+enum { CAP_OPENNI_DEPTH_GENERATOR = 1 << 31,
+ CAP_OPENNI_IMAGE_GENERATOR = 1 << 30,
+ CAP_OPENNI_GENERATORS_MASK = CAP_OPENNI_DEPTH_GENERATOR + CAP_OPENNI_IMAGE_GENERATOR
+ };
+
+// Properties of cameras available through OpenNI interfaces
+enum { CAP_PROP_OPENNI_OUTPUT_MODE = 100,
+ CAP_PROP_OPENNI_FRAME_MAX_DEPTH = 101, // in mm
+ CAP_PROP_OPENNI_BASELINE = 102, // in mm
+ CAP_PROP_OPENNI_FOCAL_LENGTH = 103, // in pixels
+ CAP_PROP_OPENNI_REGISTRATION = 104, // flag that synchronizes the remapping depth map to image map
+ // by changing depth generator's view point (if the flag is "on") or
+ // sets this view point to its normal one (if the flag is "off").
+ CAP_PROP_OPENNI_REGISTRATION_ON = CAP_PROP_OPENNI_REGISTRATION,
+ CAP_PROP_OPENNI_APPROX_FRAME_SYNC = 105,
+ CAP_PROP_OPENNI_MAX_BUFFER_SIZE = 106,
+ CAP_PROP_OPENNI_CIRCLE_BUFFER = 107,
+ CAP_PROP_OPENNI_MAX_TIME_DURATION = 108,
+ CAP_PROP_OPENNI_GENERATOR_PRESENT = 109
+ };
+
+// OpenNI shortcats
+enum { CAP_OPENNI_IMAGE_GENERATOR_PRESENT = CAP_OPENNI_IMAGE_GENERATOR + CAP_PROP_OPENNI_GENERATOR_PRESENT,
+ CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE = CAP_OPENNI_IMAGE_GENERATOR + CAP_PROP_OPENNI_OUTPUT_MODE,
+ CAP_OPENNI_DEPTH_GENERATOR_BASELINE = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_BASELINE,
+ CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_FOCAL_LENGTH,
+ CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_REGISTRATION,
+ CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION
+ };
+
+// OpenNI data given from depth generator
+enum { CAP_OPENNI_DEPTH_MAP = 0, // Depth values in mm (CV_16UC1)
+ CAP_OPENNI_POINT_CLOUD_MAP = 1, // XYZ in meters (CV_32FC3)
+ CAP_OPENNI_DISPARITY_MAP = 2, // Disparity in pixels (CV_8UC1)
+ CAP_OPENNI_DISPARITY_MAP_32F = 3, // Disparity in pixels (CV_32FC1)
+ CAP_OPENNI_VALID_DEPTH_MASK = 4, // CV_8UC1
+
+ // Data given from RGB image generator
+ CAP_OPENNI_BGR_IMAGE = 5,
+ CAP_OPENNI_GRAY_IMAGE = 6
+ };
+
+// Supported output modes of OpenNI image generator
+enum { CAP_OPENNI_VGA_30HZ = 0,
+ CAP_OPENNI_SXGA_15HZ = 1,
+ CAP_OPENNI_SXGA_30HZ = 2,
+ CAP_OPENNI_QVGA_30HZ = 3,
+ CAP_OPENNI_QVGA_60HZ = 4
+ };
+
+
+// GStreamer
+enum { CAP_PROP_GSTREAMER_QUEUE_LENGTH = 200 // default is 1
+ };
+
+
+// PVAPI
+enum { CAP_PROP_PVAPI_MULTICASTIP = 300 // ip for anable multicast master mode. 0 for disable multicast
+ };
+
+
+// Properties of cameras available through XIMEA SDK interface
+enum { CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping.
+ CAP_PROP_XI_DATA_FORMAT = 401, // Output data format.
+ CAP_PROP_XI_OFFSET_X = 402, // Horizontal offset from the origin to the area of interest (in pixels).
+ CAP_PROP_XI_OFFSET_Y = 403, // Vertical offset from the origin to the area of interest (in pixels).
+ CAP_PROP_XI_TRG_SOURCE = 404, // Defines source of trigger.
+ CAP_PROP_XI_TRG_SOFTWARE = 405, // Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE.
+ CAP_PROP_XI_GPI_SELECTOR = 406, // Selects general purpose input
+ CAP_PROP_XI_GPI_MODE = 407, // Set general purpose input mode
+ CAP_PROP_XI_GPI_LEVEL = 408, // Get general purpose level
+ CAP_PROP_XI_GPO_SELECTOR = 409, // Selects general purpose output
+ CAP_PROP_XI_GPO_MODE = 410, // Set general purpose output mode
+ CAP_PROP_XI_LED_SELECTOR = 411, // Selects camera signalling LED
+ CAP_PROP_XI_LED_MODE = 412, // Define camera signalling LED functionality
+ CAP_PROP_XI_MANUAL_WB = 413, // Calculates White Balance(must be called during acquisition)
+ CAP_PROP_XI_AUTO_WB = 414, // Automatic white balance
+ CAP_PROP_XI_AEAG = 415, // Automatic exposure/gain
+ CAP_PROP_XI_EXP_PRIORITY = 416, // Exposure priority (0.5 - exposure 50%, gain 50%).
+ CAP_PROP_XI_AE_MAX_LIMIT = 417, // Maximum limit of exposure in AEAG procedure
+ CAP_PROP_XI_AG_MAX_LIMIT = 418, // Maximum limit of gain in AEAG procedure
+ CAP_PROP_XI_AEAG_LEVEL = 419, // Average intensity of output signal AEAG should achieve(in %)
+ CAP_PROP_XI_TIMEOUT = 420 // Image capture timeout in milliseconds
+ };
+
+
+// Properties for Android cameras
+enum { CAP_PROP_ANDROID_AUTOGRAB = 1024,
+ CAP_PROP_ANDROID_PREVIEW_SIZES_STRING = 1025, // readonly, tricky property, returns const char* indeed
+ CAP_PROP_ANDROID_PREVIEW_FORMAT = 1026, // readonly, tricky property, returns const char* indeed
+ CAP_PROP_ANDROID_FLASH_MODE = 8001,
+ CAP_PROP_ANDROID_FOCUS_MODE = 8002,
+ CAP_PROP_ANDROID_WHITE_BALANCE = 8003,
+ CAP_PROP_ANDROID_ANTIBANDING = 8004,
+ CAP_PROP_ANDROID_FOCAL_LENGTH = 8005,
+ CAP_PROP_ANDROID_FOCUS_DISTANCE_NEAR = 8006,
+ CAP_PROP_ANDROID_FOCUS_DISTANCE_OPTIMAL = 8007,
+ CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR = 8008
+ };
+
+
+// Android camera output formats
+enum { CAP_ANDROID_COLOR_FRAME_BGR = 0, //BGR
+ CAP_ANDROID_COLOR_FRAME = CAP_ANDROID_COLOR_FRAME_BGR,
+ CAP_ANDROID_GREY_FRAME = 1, //Y
+ CAP_ANDROID_COLOR_FRAME_RGB = 2,
+ CAP_ANDROID_COLOR_FRAME_BGRA = 3,
+ CAP_ANDROID_COLOR_FRAME_RGBA = 4
+ };
+
+
+// Android camera flash modes
+enum { CAP_ANDROID_FLASH_MODE_AUTO = 0,
+ CAP_ANDROID_FLASH_MODE_OFF = 1,
+ CAP_ANDROID_FLASH_MODE_ON = 2,
+ CAP_ANDROID_FLASH_MODE_RED_EYE = 3,
+ CAP_ANDROID_FLASH_MODE_TORCH = 4
+ };
+
+
+// Android camera focus modes
+enum { CAP_ANDROID_FOCUS_MODE_AUTO = 0,
+ CAP_ANDROID_FOCUS_MODE_CONTINUOUS_VIDEO = 1,
+ CAP_ANDROID_FOCUS_MODE_EDOF = 2,
+ CAP_ANDROID_FOCUS_MODE_FIXED = 3,
+ CAP_ANDROID_FOCUS_MODE_INFINITY = 4,
+ CAP_ANDROID_FOCUS_MODE_MACRO = 5
+ };
+
+
+// Android camera white balance modes
+enum { CAP_ANDROID_WHITE_BALANCE_AUTO = 0,
+ CAP_ANDROID_WHITE_BALANCE_CLOUDY_DAYLIGHT = 1,
+ CAP_ANDROID_WHITE_BALANCE_DAYLIGHT = 2,
+ CAP_ANDROID_WHITE_BALANCE_FLUORESCENT = 3,
+ CAP_ANDROID_WHITE_BALANCE_INCANDESCENT = 4,
+ CAP_ANDROID_WHITE_BALANCE_SHADE = 5,
+ CAP_ANDROID_WHITE_BALANCE_TWILIGHT = 6,
+ CAP_ANDROID_WHITE_BALANCE_WARM_FLUORESCENT = 7
+ };
+
+
+// Android camera antibanding modes
+enum { CAP_ANDROID_ANTIBANDING_50HZ = 0,
+ CAP_ANDROID_ANTIBANDING_60HZ = 1,
+ CAP_ANDROID_ANTIBANDING_AUTO = 2,
+ CAP_ANDROID_ANTIBANDING_OFF = 3
+ };
+
+
+// Properties of cameras available through AVFOUNDATION interface
+enum { CAP_PROP_IOS_DEVICE_FOCUS = 9001,
+ CAP_PROP_IOS_DEVICE_EXPOSURE = 9002,
+ CAP_PROP_IOS_DEVICE_FLASH = 9003,
+ CAP_PROP_IOS_DEVICE_WHITEBALANCE = 9004,
+ CAP_PROP_IOS_DEVICE_TORCH = 9005
+ };
+
+
+// Properties of cameras available through Smartek Giganetix Ethernet Vision interface
+/* --- Vladimir Litvinenko (litvinenko.vladimir@gmail.com) --- */
+enum { CAP_PROP_GIGA_FRAME_OFFSET_X = 10001,
+ CAP_PROP_GIGA_FRAME_OFFSET_Y = 10002,
+ CAP_PROP_GIGA_FRAME_WIDTH_MAX = 10003,
+ CAP_PROP_GIGA_FRAME_HEIGH_MAX = 10004,
+ CAP_PROP_GIGA_FRAME_SENS_WIDTH = 10005,
+ CAP_PROP_GIGA_FRAME_SENS_HEIGH = 10006
+ };
-template<> void CV_EXPORTS Ptr<CvCapture>::delete_obj();
-template<> void CV_EXPORTS Ptr<CvVideoWriter>::delete_obj();
class CV_EXPORTS_W VideoCapture
{
CV_WRAP virtual void release();
CV_WRAP virtual bool grab();
- CV_WRAP virtual bool retrieve(CV_OUT Mat& image, int channel=0);
+ CV_WRAP virtual bool retrieve(CV_OUT Mat& image, int flag = 0);
virtual VideoCapture& operator >> (CV_OUT Mat& image);
CV_WRAP virtual bool read(CV_OUT Mat& image);
public:
CV_WRAP VideoWriter();
CV_WRAP VideoWriter(const String& filename, int fourcc, double fps,
- Size frameSize, bool isColor=true);
+ Size frameSize, bool isColor = true);
virtual ~VideoWriter();
CV_WRAP virtual bool open(const String& filename, int fourcc, double fps,
- Size frameSize, bool isColor=true);
+ Size frameSize, bool isColor = true);
CV_WRAP virtual bool isOpened() const;
CV_WRAP virtual void release();
virtual VideoWriter& operator << (const Mat& image);
CV_WRAP virtual void write(const Mat& image);
+ CV_WRAP static int fourcc(char c1, char c2, char c3, char c4);
+
protected:
Ptr<CvVideoWriter> writer;
};
-#endif
-
-}
+template<> void Ptr<CvCapture>::delete_obj();
+template<> void Ptr<CvVideoWriter>::delete_obj();
-#endif
+} // cv
#endif
double fps, CvSize frame_size,
int is_color CV_DEFAULT(1));
-//CVAPI(CvVideoWriter*) cvCreateImageSequenceWriter( const char* filename,
-// int is_color CV_DEFAULT(1));
-
/* write frame to video file */
CVAPI(int) cvWriteFrame( CvVideoWriter* writer, const IplImage* image );
string filename = getDataPath(get<0>(GetParam()));
bool isColor = get<1>(GetParam());
- VideoWriter writer(cv::tempfile(".avi"), CV_FOURCC('X', 'V', 'I', 'D'), 25, cv::Size(640, 480), isColor);
+ VideoWriter writer(cv::tempfile(".avi"), VideoWriter::fourcc('X', 'V', 'I', 'D'), 25, cv::Size(640, 480), isColor);
TEST_CYCLE() { Mat image = imread(filename, 1); writer << image; }
VideoWriter::VideoWriter()
{}
-VideoWriter::VideoWriter(const String& filename, int fourcc, double fps, Size frameSize, bool isColor)
+VideoWriter::VideoWriter(const String& filename, int _fourcc, double fps, Size frameSize, bool isColor)
{
- open(filename, fourcc, fps, frameSize, isColor);
+ open(filename, _fourcc, fps, frameSize, isColor);
}
void VideoWriter::release()
release();
}
-bool VideoWriter::open(const String& filename, int fourcc, double fps, Size frameSize, bool isColor)
+bool VideoWriter::open(const String& filename, int _fourcc, double fps, Size frameSize, bool isColor)
{
- writer = cvCreateVideoWriter(filename.c_str(), fourcc, fps, frameSize, isColor);
+ writer = cvCreateVideoWriter(filename.c_str(), _fourcc, fps, frameSize, isColor);
return isOpened();
}
return *this;
}
+int VideoWriter::fourcc(char c1, char c2, char c3, char c4)
+{
+ return (c1 & 255) + ((c2 & 255) << 8) + ((c3 & 255) << 16) + ((c4 & 255) << 24);
+}
+
}
#include "precomp.hpp"
-#ifdef HAVE_FFMPEG
+#ifndef WIN32
#include "cap_ffmpeg_impl.hpp"
#else
#include "cap_ffmpeg_api.hpp"
#endif
#ifdef WIN32
- #include <libavformat/avformat.h>
+# define AVUTIL_COMMON_H
+# define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24))
+# include <libavformat/avformat.h>
#else
// if the header path is not specified explicitly, let's deduce it
double cvGetOpenGlProp_QT(const char* name);
#endif
-
-
-/*namespace cv
-{
-
-class CV_EXPORTS BaseWindow
-{
-public:
- BaseWindow(const String& name, int flags=0);
- virtual ~BaseWindow();
- virtual void close();
- virtual void show(const Mat& mat);
- virtual void resize(Size size);
- virtual void move(Point topleft);
- virtual Size size() const;
- virtual Point topLeft() const;
- virtual void setGeometry(Point topLeft, Size size);
- virtual void getGeometry(Point& topLeft, Size& size) const;
- virtual String getTitle() const;
- virtual void setTitle(const String& str);
- virtual String getName() const;
- virtual void setScaleMode(int mode);
- virtual int getScaleMode();
- virtual void setScrollPos(double pos);
- virtual double getScrollPos() const;
- virtual void setScale(double scale);
- virtual double getScale() const;
- virtual Point getImageCoords(Point pos) const;
- virtual Scalar getPixelValue(Point pos, const String& colorspace=String()) const;
-
- virtual void addTrackbar( const String& trackbar, int low, int high, int step );
-};
-
-typedef Ptr<BaseWindow> Window;
-
-}*/
-
#endif /* __HIGHGUI_H_ */
#if defined (HAVE_QT)
-CvFont cv::fontQt(const String& nameFont, int pointSize, Scalar color, int weight, int style, int /*spacing*/)
+cv::QtFont cv::fontQt(const String& nameFont, int pointSize, Scalar color, int weight, int style, int /*spacing*/)
{
-return cvFontQt(nameFont.c_str(), pointSize,color,weight, style);
+ CvFont f = cvFontQt(nameFont.c_str(), pointSize,color,weight, style);
+ return *(cv::QtFont*)(&f);
}
-void cv::addText( const Mat& img, const String& text, Point org, CvFont font)
+void cv::addText( const Mat& img, const String& text, Point org, const QtFont& font)
{
CvMat _img = img;
- cvAddText( &_img, text.c_str(), org,&font);
+ cvAddText( &_img, text.c_str(), org, (CvFont*)&font);
}
void cv::displayStatusBar(const String& name, const String& text, int delayms)
#else
-CvFont cv::fontQt(const String&, int, Scalar, int, int, int)
+cv::QtFont cv::fontQt(const String&, int, Scalar, int, int, int)
{
CV_Error(CV_StsNotImplemented, "The library is compiled without QT support");
- return CvFont();
+ return QtFont();
}
-void cv::addText( const Mat&, const String&, Point, CvFont)
+void cv::addText( const Mat&, const String&, Point, const QtFont&)
{
CV_Error(CV_StsNotImplemented, "The library is compiled without QT support");
}
double fps = fps0;
Size frame_s = Size(img_c, img_r);
- if( tag == CV_FOURCC('H', '2', '6', '1') )
+ if( tag == VideoWriter::fourcc('H', '2', '6', '1') )
frame_s = Size(352, 288);
- else if( tag == CV_FOURCC('H', '2', '6', '3') )
+ else if( tag == VideoWriter::fourcc('H', '2', '6', '3') )
frame_s = Size(704, 576);
/*else if( tag == CV_FOURCC('M', 'J', 'P', 'G') ||
tag == CV_FOURCC('j', 'p', 'e', 'g') )
frame_s = Size(1920, 1080);*/
- if( tag == CV_FOURCC('M', 'P', 'E', 'G') )
+ if( tag == VideoWriter::fourcc('M', 'P', 'E', 'G') )
fps = 25;
VideoWriter writer(filename, tag, fps, frame_s);
std::string fileName = tempfile(stream.str().c_str());
files->operator[](i) = fileName;
- writers->operator[](i) = new VideoWriter(fileName, CV_FOURCC('X','V','I','D'), 25.0f, FrameSize);
+ writers->operator[](i) = new VideoWriter(fileName, VideoWriter::fourcc('X','V','I','D'), 25.0f, FrameSize);
CV_Assert(writers->operator[](i)->isOpened());
}
CV_Assert(capture->isOpened());
const static double eps = 23.0;
- unsigned int frameCount = static_cast<unsigned int>(capture->get(CV_CAP_PROP_FRAME_COUNT));
+ unsigned int frameCount = static_cast<unsigned int>(capture->get(CAP_PROP_FRAME_COUNT));
CV_Assert(frameCount == WriteVideo_Invoker::FrameCount);
Mat reference(CreateVideoWriterInvoker::FrameSize, CV_8UC3);
//M*/
#include "test_precomp.hpp"
-#include "opencv2/highgui.hpp"
+#include "opencv2/highgui/highgui_c.h"
#include <stdio.h>
using namespace cv;
FrameCount++;
}
- int framecount = (int)cvGetCaptureProperty(cap, CV_CAP_PROP_FRAME_COUNT);
+ int framecount = (int)cvGetCaptureProperty(cap, CAP_PROP_FRAME_COUNT);
ts->printf(cvtest::TS::LOG, "\nFile information (video %d): \n"\
"\nName: big_buck_bunny.%s\nActual frame count: %d\n"\
imwrite(img_path, img);
ts->printf(ts->LOG, "reading test image : %s\n", img_path.c_str());
- Mat img_test = imread(img_path, CV_LOAD_IMAGE_UNCHANGED);
+ Mat img_test = imread(img_path, IMREAD_UNCHANGED);
if (img_test.empty()) ts->set_failed_test_info(ts->FAIL_MISMATCH);
string filename = cv::tempfile(".jpg");
imwrite(filename, img);
- img = imread(filename, CV_LOAD_IMAGE_UNCHANGED);
+ img = imread(filename, IMREAD_UNCHANGED);
filename = string(ts->get_data_path() + "readwrite/test_" + char(k + 48) + "_c" + char(num_channels + 48) + ".jpg");
ts->printf(ts->LOG, "reading test image : %s\n", filename.c_str());
- Mat img_test = imread(filename, CV_LOAD_IMAGE_UNCHANGED);
+ Mat img_test = imread(filename, IMREAD_UNCHANGED);
if (img_test.empty()) ts->set_failed_test_info(ts->FAIL_MISMATCH);
string filename = cv::tempfile(".tiff");
imwrite(filename, img);
ts->printf(ts->LOG, "reading test image : %s\n", filename.c_str());
- Mat img_test = imread(filename, CV_LOAD_IMAGE_UNCHANGED);
+ Mat img_test = imread(filename, IMREAD_UNCHANGED);
if (img_test.empty()) ts->set_failed_test_info(ts->FAIL_MISMATCH);
Mat im = Mat::zeros(1000,1000, CV_8U);
//randu(im, 0, 256);
vector<int> param;
- param.push_back(CV_IMWRITE_PNG_COMPRESSION);
+ param.push_back(IMWRITE_PNG_COMPRESSION);
param.push_back(3); //default(3) 0-9.
cv::imencode(".png" ,im ,buff, param);
// hangs
- Mat im2 = imdecode(buff,CV_LOAD_IMAGE_ANYDEPTH);
+ Mat im2 = imdecode(buff,IMREAD_ANYDEPTH);
}
catch(...)
{
remove(output.c_str());
- cv::Mat decode = cv::imdecode(buf, CV_LOAD_IMAGE_COLOR);
+ cv::Mat decode = cv::imdecode(buf, IMREAD_COLOR);
ASSERT_FALSE(decode.empty());
EXPECT_TRUE(cv::norm(decode, img_webp, NORM_INF) == 0);
for(int q = 100; q>=0; q-=10)
{
std::vector<int> params;
- params.push_back(CV_IMWRITE_WEBP_QUALITY);
+ params.push_back(IMWRITE_WEBP_QUALITY);
params.push_back(q);
string output = cv::tempfile(".webp");
void CV_HighGuiOnlyGuiTest::run( int /*start_from */)
{
ts->printf(ts->LOG, "GUI 0\n");
- cvDestroyAllWindows();
+ destroyAllWindows();
ts->printf(ts->LOG, "GUI 1\n");
namedWindow("Win");
waitKey(500);
ts->printf(ts->LOG, "GUI 8\n");
- cvDestroyAllWindows();
+ destroyAllWindows();
ts->set_failed_test_info(cvtest::TS::OK);
}
//M*/
#include "test_precomp.hpp"
-#include "opencv2/highgui.hpp"
+#include "opencv2/highgui/highgui_c.h"
#include <stdio.h>
using namespace cv;
void CV_VideoPositioningTest::generate_idx_seq(CvCapture* cap, int method)
{
idx.clear();
- int N = (int)cvGetCaptureProperty(cap, CV_CAP_PROP_FRAME_COUNT);
+ int N = (int)cvGetCaptureProperty(cap, CAP_PROP_FRAME_COUNT);
switch(method)
{
case PROGRESSIVE:
failed_videos++; continue;
}
- cvSetCaptureProperty(cap, CV_CAP_PROP_POS_FRAMES, 0);
+ cvSetCaptureProperty(cap, CAP_PROP_POS_FRAMES, 0);
generate_idx_seq(cap, method);
{
bool flag = false;
- cvSetCaptureProperty(cap, CV_CAP_PROP_POS_FRAMES, idx.at(j));
+ cvSetCaptureProperty(cap, CAP_PROP_POS_FRAMES, idx.at(j));
/* IplImage* frame = cvRetrieveFrame(cap);
flag = !flag;
} */
- int val = (int)cvGetCaptureProperty(cap, CV_CAP_PROP_POS_FRAMES);
+ int val = (int)cvGetCaptureProperty(cap, CAP_PROP_POS_FRAMES);
if (idx.at(j) != val)
{
#include <iostream>
#include "opencv2/ts.hpp"
#include "opencv2/imgproc.hpp"
+#include "opencv2/highgui.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/core/private.hpp"
//M*/
#include "test_precomp.hpp"
-#include "opencv2/highgui.hpp"
+#include "opencv2/highgui/highgui_c.h"
using namespace cv;
using namespace std;
const VideoFormat g_specific_fmt_list[] =
{
- VideoFormat("avi", CV_FOURCC('X', 'V', 'I', 'D')),
- VideoFormat("avi", CV_FOURCC('M', 'P', 'E', 'G')),
- VideoFormat("avi", CV_FOURCC('M', 'J', 'P', 'G')),
- //VideoFormat("avi", CV_FOURCC('I', 'Y', 'U', 'V')),
- VideoFormat("mkv", CV_FOURCC('X', 'V', 'I', 'D')),
- VideoFormat("mkv", CV_FOURCC('M', 'P', 'E', 'G')),
- VideoFormat("mkv", CV_FOURCC('M', 'J', 'P', 'G')),
-
- VideoFormat("mov", CV_FOURCC('m', 'p', '4', 'v')),
+ VideoFormat("avi", VideoWriter::fourcc('X', 'V', 'I', 'D')),
+ VideoFormat("avi", VideoWriter::fourcc('M', 'P', 'E', 'G')),
+ VideoFormat("avi", VideoWriter::fourcc('M', 'J', 'P', 'G')),
+ //VideoFormat("avi", VideoWriter::fourcc('I', 'Y', 'U', 'V')),
+ VideoFormat("mkv", VideoWriter::fourcc('X', 'V', 'I', 'D')),
+ VideoFormat("mkv", VideoWriter::fourcc('M', 'P', 'E', 'G')),
+ VideoFormat("mkv", VideoWriter::fourcc('M', 'J', 'P', 'G')),
+
+ VideoFormat("mov", VideoWriter::fourcc('m', 'p', '4', 'v')),
VideoFormat()
};
for( size_t i = 0; i < IMAGE_COUNT; ++i )
{
string file_path = format("%s../python/images/QCIF_%02d.bmp", dir.c_str(), i);
- Mat img = imread(file_path, CV_LOAD_IMAGE_COLOR);
+ Mat img = imread(file_path, IMREAD_COLOR);
if (img.empty())
{
writer.release();
VideoCapture cap(video_file);
- size_t FRAME_COUNT = (size_t)cap.get(CV_CAP_PROP_FRAME_COUNT);
+ size_t FRAME_COUNT = (size_t)cap.get(CAP_PROP_FRAME_COUNT);
if (FRAME_COUNT != IMAGE_COUNT )
{
return;
}
- int N0 = (int)cap.get(CV_CAP_PROP_FRAME_COUNT);
- cap.set(CV_CAP_PROP_POS_FRAMES, 0);
- int N = (int)cap.get(CV_CAP_PROP_FRAME_COUNT);
+ int N0 = (int)cap.get(CAP_PROP_FRAME_COUNT);
+ cap.set(CAP_PROP_POS_FRAMES, 0);
+ int N = (int)cap.get(CAP_PROP_FRAME_COUNT);
if (N != n_frames || N != N0)
{
{
int idx = theRNG().uniform(0, N);
- if( !cap.set(CV_CAP_PROP_POS_FRAMES, idx) )
+ if( !cap.set(CAP_PROP_POS_FRAMES, idx) )
{
ts->printf(ts->LOG, "\nError: cannot seek to frame %d.\n", idx);
ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT);
return;
}
- int idx1 = (int)cap.get(CV_CAP_PROP_POS_FRAMES);
+ int idx1 = (int)cap.get(CAP_PROP_POS_FRAMES);
Mat img; cap >> img;
Mat img0 = drawFrame(idx);
{
cvtest::TS* ts = cvtest::TS::ptr();
- Mat given = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_input.png", CV_LOAD_IMAGE_GRAYSCALE);
- Mat gold = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_gold.png", CV_LOAD_IMAGE_UNCHANGED);
+ Mat given = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_input.png", IMREAD_GRAYSCALE);
+ Mat gold = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_gold.png", IMREAD_UNCHANGED);
Mat result;
CV_Assert(given.data != NULL && gold.data != NULL);
{
cvtest::TS* ts = cvtest::TS::ptr();
- Mat given = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_input.png", CV_LOAD_IMAGE_GRAYSCALE);
+ Mat given = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_input.png", IMREAD_GRAYSCALE);
string goldfname = string(ts->get_data_path()) + "/cvtcolor/bayerVNG_gold.png";
- Mat gold = imread(goldfname, CV_LOAD_IMAGE_UNCHANGED);
+ Mat gold = imread(goldfname, IMREAD_UNCHANGED);
Mat result;
CV_Assert(given.data != NULL);
Mat src, dst, bayer, reference;
std::string full_path = parent_path + image_name;
- src = imread(full_path, CV_LOAD_IMAGE_UNCHANGED);
+ src = imread(full_path, IMREAD_UNCHANGED);
if (src.data == NULL)
{
// reading a reference image
full_path = parent_path + pattern[i] + image_name;
- reference = imread(full_path, CV_LOAD_IMAGE_UNCHANGED);
+ reference = imread(full_path, IMREAD_UNCHANGED);
if (reference.data == NULL)
{
imwrite(full_path, dst);
Mat src, bayer;
std::string full_path = parent_path + image_name;
- src = imread(full_path, CV_LOAD_IMAGE_UNCHANGED);
+ src = imread(full_path, IMREAD_UNCHANGED);
if (src.data == NULL)
{
TEST(ImgProc_Bayer2RGBA, accuracy)
{
cvtest::TS* ts = cvtest::TS::ptr();
- Mat raw = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_input.png", CV_LOAD_IMAGE_GRAYSCALE);
+ Mat raw = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_input.png", IMREAD_GRAYSCALE);
Mat rgb, reference;
CV_Assert(raw.channels() == 1);
{
cvtest::TS& ts = *cvtest::TS::ptr();
- Mat image_1 = imread(string(ts.get_data_path()) + "grabcut/image1652.ppm", CV_LOAD_IMAGE_COLOR);
- Mat mask_1 = imread(string(ts.get_data_path()) + "grabcut/mask1652.ppm", CV_LOAD_IMAGE_GRAYSCALE);
+ Mat image_1 = imread(string(ts.get_data_path()) + "grabcut/image1652.ppm", IMREAD_COLOR);
+ Mat mask_1 = imread(string(ts.get_data_path()) + "grabcut/mask1652.ppm", IMREAD_GRAYSCALE);
Rect roi_1(0, 0, 150, 150);
Mat image_2 = image_1.clone();
#include <iostream>
#include "opencv2/ts.hpp"
#include "opencv2/imgproc.hpp"
-#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/highgui.hpp"
-#include "opencv2/highgui/highgui_c.h"
+
+#include "opencv2/imgproc/imgproc_c.h"
#endif
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_HIGHGUI
-#include "opencv2/highgui/highgui_c.h"
#include "opencv2/highgui.hpp"
using namespace cv;
VideoCapture* me = (VideoCapture*) self; //TODO: check for NULL
union {double prop; const char* name;} u;
- u.prop = me->get(CV_CAP_PROP_SUPPORTED_PREVIEW_SIZES_STRING);
+ u.prop = me->get(CAP_PROP_ANDROID_PREVIEW_SIZES_STRING);
return env->NewStringUTF(u.name);
} catch(cv::Exception e) {
} // extern "C"
-#endif // HAVE_OPENCV_HIGHGUI
\ No newline at end of file
+#endif // HAVE_OPENCV_HIGHGUI
cvConvertScale(m_samples[i], patch, 255/maxval);
#ifdef HAVE_OPENCV_HIGHGUI
- cvSaveImage(buf, patch);
+ cv::imwrite(buf, cv::cvarrToMat(patch));
#else
CV_Error(CV_StsNotImplemented, "OpenCV has been compiled without image I/O support");
#endif
sprintf(filename, "%s/%s", path, imagename);
//printf("Reading image %s...", filename);
- IplImage* img = 0;
+ IplImage img;
#ifdef HAVE_OPENCV_HIGHGUI
- img = cvLoadImage(filename, CV_LOAD_IMAGE_GRAYSCALE);
+ Mat img2 = cv::imread(filename, IMREAD_GRAYSCALE);
+ img = img2;
#else
CV_Error(CV_StsNotImplemented, "OpenCV has been compiled without image I/O support");
#endif
//printf("done\n");
- extractPatches (img, patches, patch_size);
-
- cvReleaseImage(&img);
+ extractPatches (&img, patches, patch_size);
}
fclose(pFile);
}
color, thickness, line_type, shift);
}
#ifdef HAVE_OPENCV_HIGHGUI
- cvShowImage("Initial image", image);
+ cv::imshow("Initial image", cv::cvarrToMat(image));
#endif
return LATENT_SVM_OK;
}
}
}
#ifdef HAVE_OPENCV_HIGHGUI
- cvShowImage("Initial image", image);
+ cv::imshow("Initial image", cv::cvarrToMat(image));
#endif
return LATENT_SVM_OK;
}
color, thickness, line_type, shift);
}
#ifdef HAVE_OPENCV_HIGHGUI
- cvShowImage("Initial image", img);
+ cv::imshow("Initial image", cv::cvarrToMat(img));
#endif
return LATENT_SVM_OK;
}
init.initialize(numThreads);
#endif
- IplImage* image = cvLoadImage(img_path.c_str());
- if (!image)
+ Mat image2 = cv::imread(img_path.c_str());
+ IplImage image = image2;
+ if (image2.empty())
{
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
return;
if (!detector)
{
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
- cvReleaseImage(&image);
return;
}
CvMemStorage* storage = cvCreateMemStorage(0);
CvSeq* detections = 0;
- detections = cvLatentSvmDetectObjects(image, detector, storage, 0.5f, numThreads);
+ detections = cvLatentSvmDetectObjects(&image, detector, storage, 0.5f, numThreads);
if (detections->total != num_detections)
{
ts->set_failed_test_info( cvtest::TS::FAIL_MISMATCH );
#endif
cvReleaseMemStorage( &storage );
cvReleaseLatentSvmDetector( &detector );
- cvReleaseImage( &image );
}
// Test for c++ version of Latent SVM
///////////// Canny ////////////////////////
TEST(Canny)
{
- Mat img = imread(abspath("aloeL.jpg"), CV_LOAD_IMAGE_GRAYSCALE);
+ Mat img = imread(abspath("aloeL.jpg"), IMREAD_GRAYSCALE);
if (img.empty())
{
}
TEST(Haar)
{
- Mat img = imread(abspath("basketball1.png"), CV_LOAD_IMAGE_GRAYSCALE);
+ Mat img = imread(abspath("basketball1.png"), IMREAD_GRAYSCALE);
if (img.empty())
{
string original_path = folder + "lena_noised_gaussian_sigma=10.png";
string expected_path = folder + "lena_noised_denoised_grayscale_tw=7_sw=21_h=10.png";
- Mat original = imread(original_path, CV_LOAD_IMAGE_GRAYSCALE);
- Mat expected = imread(expected_path, CV_LOAD_IMAGE_GRAYSCALE);
+ Mat original = imread(original_path, IMREAD_GRAYSCALE);
+ Mat expected = imread(expected_path, IMREAD_GRAYSCALE);
ASSERT_FALSE(original.empty()) << "Could not load input image " << original_path;
ASSERT_FALSE(expected.empty()) << "Could not load reference image " << expected_path;
string original_path = folder + "lena_noised_gaussian_sigma=10.png";
string expected_path = folder + "lena_noised_denoised_lab12_tw=7_sw=21_h=10_h2=10.png";
- Mat original = imread(original_path, CV_LOAD_IMAGE_COLOR);
- Mat expected = imread(expected_path, CV_LOAD_IMAGE_COLOR);
+ Mat original = imread(original_path, IMREAD_COLOR);
+ Mat expected = imread(expected_path, IMREAD_COLOR);
ASSERT_FALSE(original.empty()) << "Could not load input image " << original_path;
ASSERT_FALSE(expected.empty()) << "Could not load reference image " << expected_path;
string folder = string(cvtest::TS::ptr()->get_data_path()) + "denoising/";
string expected_path = folder + "lena_noised_denoised_multi_tw=7_sw=21_h=15.png";
- Mat expected = imread(expected_path, CV_LOAD_IMAGE_GRAYSCALE);
+ Mat expected = imread(expected_path, IMREAD_GRAYSCALE);
ASSERT_FALSE(expected.empty()) << "Could not load reference image " << expected_path;
vector<Mat> original(imgs_count);
for (int i = 0; i < imgs_count; i++)
{
string original_path = format("%slena_noised_gaussian_sigma=20_multi_%d.png", folder.c_str(), i);
- original[i] = imread(original_path, CV_LOAD_IMAGE_GRAYSCALE);
+ original[i] = imread(original_path, IMREAD_GRAYSCALE);
ASSERT_FALSE(original[i].empty()) << "Could not load input image " << original_path;
}
string folder = string(cvtest::TS::ptr()->get_data_path()) + "denoising/";
string expected_path = folder + "lena_noised_denoised_multi_lab12_tw=7_sw=21_h=10_h2=15.png";
- Mat expected = imread(expected_path, CV_LOAD_IMAGE_COLOR);
+ Mat expected = imread(expected_path, IMREAD_COLOR);
ASSERT_FALSE(expected.empty()) << "Could not load reference image " << expected_path;
vector<Mat> original(imgs_count);
for (int i = 0; i < imgs_count; i++)
{
string original_path = format("%slena_noised_gaussian_sigma=20_multi_%d.png", folder.c_str(), i);
- original[i] = imread(original_path, CV_LOAD_IMAGE_COLOR);
+ original[i] = imread(original_path, IMREAD_COLOR);
ASSERT_FALSE(original[i].empty()) << "Could not load input image " << original_path;
}
#include "opencv2/photo.hpp"
#include "opencv2/highgui.hpp"
+#include "opencv2/highgui/highgui_c.h"
+
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_NONFREE
#include "opencv2/legacy.hpp"
#include "opencv2/legacy/compat.hpp"
+#include "opencv2/highgui/highgui_c.h"
#define OLD_MODULESTR "cv2.cv"
simple_argtype_mapping = {
"bool": ("bool", "b", "0"),
+ "char": ("char", "b", "0"),
"int": ("int", "i", "0"),
"float": ("float", "f", "0.f"),
"double": ("double", "d", "0"),
if( code < 0 )
{
-#if defined _DEBUG && defined WIN32
+#if 0 //defined _DEBUG && defined WIN32
IplImage* dst = cvCreateImage( img_size, 8, 3 );
cvNamedWindow( "test", 1 );
cvCmpS( img, 0, img, CV_CMP_GT );
if( code < 0 )
{
-#if defined _DEBUG && defined WIN32
+#if 0// defined _DEBUG && defined WIN32
IplImage* dst = cvCreateImage( img_size, 8, 3 );
cvNamedWindow( "test", 1 );
cvCmpS( img, 0, img, CV_CMP_GT );
CvMat *_u = 0, *_v = 0, *_v2 = 0;
char* status = 0;
- IplImage* imgI = 0;
- IplImage* imgJ = 0;
+ IplImage imgI;
+ IplImage imgJ;
+ cv::Mat imgI2, imgJ2;
int n = 0, i = 0;
/* read first image */
sprintf( filename, "%soptflow/%s", ts->get_data_path().c_str(), "rock_1.bmp" );
- imgI = cvLoadImage( filename, -1 );
+ imgI2 = cv::imread( filename, cv::IMREAD_UNCHANGED );
+ imgI = imgI2;
- if( !imgI )
+ if( imgI2.empty() )
{
ts->printf( cvtest::TS::LOG, "could not read %s\n", filename );
code = cvtest::TS::FAIL_MISSING_TEST_DATA;
/* read second image */
sprintf( filename, "%soptflow/%s", ts->get_data_path().c_str(), "rock_2.bmp" );
- imgJ = cvLoadImage( filename, -1 );
+ imgJ2 = cv::imread( filename, cv::IMREAD_UNCHANGED );
+ imgJ = imgJ2;
- if( !imgJ )
+ if( imgJ2.empty() )
{
ts->printf( cvtest::TS::LOG, "could not read %s\n", filename );
code = cvtest::TS::FAIL_MISSING_TEST_DATA;
status = (char*)cvAlloc(n*sizeof(status[0]));
/* calculate flow */
- cvCalcOpticalFlowPyrLK( imgI, imgJ, 0, 0, u, v2, n, cvSize( 41, 41 ),
+ cvCalcOpticalFlowPyrLK( &imgI, &imgJ, 0, 0, u, v2, n, cvSize( 41, 41 ),
4, status, 0, cvTermCriteria( CV_TERMCRIT_ITER|
CV_TERMCRIT_EPS, 30, 0.01f ), 0 );
cvReleaseMat( &_v );
cvReleaseMat( &_v2 );
- cvReleaseImage( &imgI );
- cvReleaseImage( &imgJ );
-
if( code < 0 )
ts->set_failed_test_info( code );
}
}
#ifdef HAVE_OPENCV_HIGHGUI
- int width() {return static_cast<int>(vc.get(CV_CAP_PROP_FRAME_WIDTH));}
- int height() {return static_cast<int>(vc.get(CV_CAP_PROP_FRAME_HEIGHT));}
- int count() {return static_cast<int>(vc.get(CV_CAP_PROP_FRAME_COUNT));}
- double fps() {return vc.get(CV_CAP_PROP_FPS);}
+ int width() {return static_cast<int>(vc.get(CAP_PROP_FRAME_WIDTH));}
+ int height() {return static_cast<int>(vc.get(CAP_PROP_FRAME_HEIGHT));}
+ int count() {return static_cast<int>(vc.get(CAP_PROP_FRAME_COUNT));}
+ double fps() {return vc.get(CAP_PROP_FPS);}
#else
int width() {return 0;}
int height() {return 0;}
#include <cstring>
#include <ctime>
#include "opencv2/contrib/contrib.hpp"
-#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/highgui/highgui_c.h"
static void help(char **argv)
{
#include "opencv2/core/utility.hpp"
#include "opencv2/video/background_segm.hpp"
#include "opencv2/imgproc/imgproc_c.h"
-#include "opencv2/highgui.hpp"
+#include "opencv2/highgui/highgui_c.h"
#include "opencv2/legacy.hpp"
#include <stdio.h>
#include "opencv2/video/background_segm.hpp"
#include "opencv2/legacy/blobtrack.hpp"
#include "opencv2/legacy/legacy.hpp"
-#include "opencv2/highgui/highgui.hpp"
+#include <opencv2/highgui/highgui_c.h>
#include <opencv2/imgproc/imgproc_c.h>
#include <stdio.h>
#include "opencv2/objdetect/objdetect.hpp"
-#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/highgui/highgui_c.h"
#include <ctype.h>
#include <stdio.h>
-#include "opencv2/objdetect/objdetect.hpp"
-#include "opencv2/highgui/highgui.hpp"
-#include "opencv2/imgproc/imgproc.hpp"
+#include "opencv2/objdetect.hpp"
+#include "opencv2/highgui.hpp"
+#include "opencv2/imgproc.hpp"
#include "opencv2/core/utility.hpp"
+#include "opencv2/highgui/highgui_c.h"
+
#include <cctype>
#include <iostream>
#include <iterator>
#include "opencv2/video/tracking.hpp"
-#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/highgui/highgui_c.h"
#include "opencv2/imgproc/imgproc_c.h"
#include <stdio.h>
*/
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/features2d/features2d.hpp"
-#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/imgproc/imgproc_c.h"
+#include "opencv2/highgui/highgui_c.h"
#include "opencv2/legacy/legacy.hpp"
#include "opencv2/legacy/compat.hpp"
string str;
getline( is, str );
if (str.empty()) break;
- Mat img = imread( str, CV_LOAD_IMAGE_GRAYSCALE );
+ Mat img = imread( str, IMREAD_GRAYSCALE );
if( !img.empty() )
trainImgs.push_back( img );
}
*/
static void testCalonderClassifier( const string& classifierFilename, const string& imgFilename )
{
- Mat img1 = imread( imgFilename, CV_LOAD_IMAGE_GRAYSCALE ), img2, H12;
+ Mat img1 = imread( imgFilename, IMREAD_GRAYSCALE ), img2, H12;
if( img1.empty() )
{
cout << "Test image can not be read." << endl;
help();
- Mat object = imread( object_filename, CV_LOAD_IMAGE_GRAYSCALE );
- Mat scene = imread( scene_filename, CV_LOAD_IMAGE_GRAYSCALE );
+ Mat object = imread( object_filename, IMREAD_GRAYSCALE );
+ Mat scene = imread( scene_filename, IMREAD_GRAYSCALE );
if( !object.data || !scene.data )
{
resize(scene, image, Size(), 1./imgscale, 1./imgscale, INTER_CUBIC);
- cvNamedWindow("Object", 1);
- cvNamedWindow("Image", 1);
- cvNamedWindow("Object Correspondence", 1);
+ namedWindow("Object", 1);
+ namedWindow("Image", 1);
+ namedWindow("Object Correspondence", 1);
Size patchSize(32, 32);
LDetector ldetector(7, 20, 2, 2000, patchSize.width, 2);
-#include "opencv2/objdetect/objdetect.hpp"
-#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/objdetect.hpp"
+#include "opencv2/highgui/highgui_c.h"
#include <stdio.h>
#ifdef HAVE_CVCONFIG_H
-#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/highgui/highgui_c.h"
#include "opencv2/imgproc/imgproc_c.h"
#include <stdio.h>
#include "opencv2/video/tracking.hpp"
-#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/highgui/highgui_c.h"
#include "opencv2/imgproc/imgproc_c.h"
#include <time.h>
#include <stdio.h>
std::string img2_name = path_name + "/" + std::string(argv[3]);
printf("Reading the images...\n");
- Mat img1 = imread(img1_name, CV_LOAD_IMAGE_GRAYSCALE);
- Mat img2 = imread(img2_name, CV_LOAD_IMAGE_GRAYSCALE);
+ Mat img1 = imread(img1_name, IMREAD_GRAYSCALE);
+ Mat img2 = imread(img2_name, IMREAD_GRAYSCALE);
// extract keypoints from the first image
SURF surf_extractor(5.0e3);
-#include "opencv2/objdetect/objdetect.hpp"
-#include "opencv2/highgui/highgui.hpp"
-#include "opencv2/imgproc/imgproc.hpp"
+#include "opencv2/objdetect.hpp"
+#include "opencv2/highgui.hpp"
+#include "opencv2/imgproc.hpp"
#include "opencv2/core/utility.hpp"
+#include "opencv2/highgui/highgui_c.h"
+
#include <cctype>
#include <iostream>
#include <iterator>
#include <iostream>
#include <vector>
-#include <opencv/highgui.h>
+
+#include <opencv2/core/core_c.h>
+#include <opencv2/imgproc/imgproc_c.h>
+#include <opencv2/legacy/compat.hpp>
+#include <opencv2/calib3d.hpp>
+
+#include <opencv2/highgui.hpp>
#if defined WIN32 || defined _WIN32 || defined WINCE
#include <windows.h>
#include <GL/gl.h>
#endif
-#include <opencv/cxcore.h>
-#include <opencv/cv.h>
-
using namespace std;
using namespace cv;
int main(void)
{
help();
- CvCapture* video = cvCaptureFromFile("cube4.avi");
- CV_Assert(video);
+ VideoCapture video("cube4.avi");
+ CV_Assert(video.isOpened());
+
+ Mat frame; video >> frame;
- IplImage* source = cvCreateImage(cvGetSize(cvQueryFrame(video)),8,3);
- IplImage* grayImage = cvCreateImage(cvGetSize(cvQueryFrame(video)),8,1);
+ IplImage* grayImage = cvCreateImage(frame.size(),8,1);
- cvNamedWindow("original",CV_WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);
- cvNamedWindow("POSIT",CV_WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);
+ namedWindow("original", WINDOW_AUTOSIZE | WINDOW_FREERATIO);
+ namedWindow("POSIT", WINDOW_AUTOSIZE | WINDOW_FREERATIO);
displayOverlay("POSIT", "We lost the 4 corners' detection quite often (the red circles disappear). This demo is only to illustrate how to use OpenGL callback.\n -- Press ESC to exit.", 10000);
//For debug
//cvNamedWindow("tempGray",CV_WINDOW_AUTOSIZE);
float OpenGLMatrix[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
- cvSetOpenGlDrawCallback("POSIT",on_opengl,OpenGLMatrix);
+ setOpenGlDrawCallback("POSIT",on_opengl,OpenGLMatrix);
vector<CvPoint3D32f> modelPoints;
initPOSIT(&modelPoints);
vector<CvPoint2D32f> srcImagePoints(4,cvPoint2D32f(0,0));
- while(cvWaitKey(33) != 27)
+ while(waitKey(33) != 27)
{
- source=cvQueryFrame(video);
- cvShowImage("original",source);
+ video >> frame;
+ imshow("original", frame);
- foundCorners(&srcImagePoints,source,grayImage);
+ IplImage source = frame;
+ foundCorners(&srcImagePoints, &source, grayImage);
cvPOSIT( positObject, &srcImagePoints[0], FOCAL_LENGTH, criteria, rotation_matrix, translation_vector );
createOpenGLMatrixFrom(OpenGLMatrix,rotation_matrix,translation_vector);
- cvShowImage("POSIT",source);
+ imshow("POSIT", frame);
//For debug
//cvShowImage("tempGray",grayImage);
- if (cvGetCaptureProperty(video,CV_CAP_PROP_POS_AVI_RATIO)>0.99)
- cvSetCaptureProperty(video,CV_CAP_PROP_POS_AVI_RATIO,0);
+ if (video.get(CAP_PROP_POS_AVI_RATIO) > 0.99)
+ video.set(CAP_PROP_POS_AVI_RATIO, 0);
}
- cvDestroyAllWindows();
+ destroyAllWindows();
cvReleaseImage(&grayImage);
- cvReleaseCapture(&video);
+ video.release();
cvReleasePOSITObject(&positObject);
return 0;
return -1;
}
- namedWindow("image", CV_WINDOW_NORMAL);
- namedWindow("foreground mask", CV_WINDOW_NORMAL);
- namedWindow("foreground image", CV_WINDOW_NORMAL);
- namedWindow("mean background image", CV_WINDOW_NORMAL);
+ namedWindow("image", WINDOW_NORMAL);
+ namedWindow("foreground mask", WINDOW_NORMAL);
+ namedWindow("foreground image", WINDOW_NORMAL);
+ namedWindow("mean background image", WINDOW_NORMAL);
Ptr<BackgroundSubtractor> bg_model = createBackgroundSubtractorMOG2();
string im1_name = parser.get<string>(0);
string im2_name = parser.get<string>(1);
- Mat im1 = imread(im1_name, CV_LOAD_IMAGE_GRAYSCALE);
- Mat im2 = imread(im2_name, CV_LOAD_IMAGE_GRAYSCALE);
+ Mat im1 = imread(im1_name, IMREAD_GRAYSCALE);
+ Mat im2 = imread(im2_name, IMREAD_GRAYSCALE);
if (im1.empty() || im2.empty())
{
imshow("Current chessboard", boards[i]); waitKey(1000);
}
cout << "Done" << endl;
- cvDestroyAllWindows();
+ destroyAllWindows();
Mat camMat_est;
Mat distCoeffs_est;
switch( event )
{
- case CV_EVENT_LBUTTONDOWN:
+ case EVENT_LBUTTONDOWN:
origin = Point(x,y);
selection = Rect(x,y,0,0);
selectObject = true;
break;
- case CV_EVENT_LBUTTONUP:
+ case EVENT_LBUTTONUP:
selectObject = false;
if( selection.width > 0 && selection.height > 0 )
trackObject = -1;
Mat cnt_img = Mat::zeros(w, w, CV_8UC3);
int _levels = levels - 3;
drawContours( cnt_img, contours, _levels <= 0 ? 3 : -1, Scalar(128,255,255),
- 3, CV_AA, hierarchy, std::abs(_levels) );
+ 3, LINE_AA, hierarchy, std::abs(_levels) );
imshow("contours", cnt_img);
}
img = Scalar::all(0);
for( i = 0; i < count; i++ )
- circle(img, points[i], 3, Scalar(0, 0, 255), CV_FILLED, CV_AA);
+ circle(img, points[i], 3, Scalar(0, 0, 255), FILLED, LINE_AA);
int hullcount = (int)hull.size();
Point pt0 = points[hull[hullcount-1]];
for( i = 0; i < hullcount; i++ )
{
Point pt = points[hull[i]];
- line(img, pt0, pt, Scalar(0, 255, 0), 1, CV_AA);
+ line(img, pt0, pt, Scalar(0, 255, 0), 1,LINE_AA);
pt0 = pt;
}
imshow(WindowName, ReferenceFrame);
- if (cvWaitKey(30) >= 0) break;
+ if (waitKey(30) >= 0) break;
}
Detector.stop();
static void draw_subdiv_point( Mat& img, Point2f fp, Scalar color )
{
- circle( img, fp, 3, color, CV_FILLED, 8, 0 );
+ circle( img, fp, 3, color, FILLED, LINE_8, 0 );
}
static void draw_subdiv( Mat& img, Subdiv2D& subdiv, Scalar delaunay_color )
pt[0] = Point(cvRound(t[0]), cvRound(t[1]));
pt[1] = Point(cvRound(t[2]), cvRound(t[3]));
pt[2] = Point(cvRound(t[4]), cvRound(t[5]));
- line(img, pt[0], pt[1], delaunay_color, 1, CV_AA, 0);
- line(img, pt[1], pt[2], delaunay_color, 1, CV_AA, 0);
- line(img, pt[2], pt[0], delaunay_color, 1, CV_AA, 0);
+ line(img, pt[0], pt[1], delaunay_color, 1, LINE_AA, 0);
+ line(img, pt[1], pt[2], delaunay_color, 1, LINE_AA, 0);
+ line(img, pt[2], pt[0], delaunay_color, 1, LINE_AA, 0);
}
#else
vector<Vec4f> edgeList;
Vec4f e = edgeList[i];
Point pt0 = Point(cvRound(e[0]), cvRound(e[1]));
Point pt1 = Point(cvRound(e[2]), cvRound(e[3]));
- line(img, pt0, pt1, delaunay_color, 1, CV_AA, 0);
+ line(img, pt0, pt1, delaunay_color, 1, LINE_AA, 0);
}
#endif
}
{
Point2f org, dst;
if( subdiv.edgeOrg(e, &org) > 0 && subdiv.edgeDst(e, &dst) > 0 )
- line( img, org, dst, active_color, 3, CV_AA, 0 );
+ line( img, org, dst, active_color, 3, LINE_AA, 0 );
e = subdiv.getEdge(e, Subdiv2D::NEXT_AROUND_LEFT);
}
fillConvexPoly(img, ifacet, color, 8, 0);
ifacets[0] = ifacet;
- polylines(img, ifacets, true, Scalar(), 1, CV_AA, 0);
- circle(img, centers[i], 3, Scalar(), -1, CV_AA, 0);
+ polylines(img, ifacets, true, Scalar(), 1, LINE_AA, 0);
+ circle(img, centers[i], 3, Scalar(), FILLED, LINE_AA, 0);
}
}
calcHist(&dst, 1, 0, Mat(), hist, 1, &histSize, 0);
Mat histImage = Mat::ones(200, 320, CV_8U)*255;
- normalize(hist, hist, 0, histImage.rows, CV_MINMAX, CV_32F);
+ normalize(hist, hist, 0, histImage.rows, NORM_MINMAX, CV_32F);
histImage = Scalar::all(255);
int binW = cvRound((double)histImage.cols/histSize);
CommandLineParser parser(argc, argv, keys);
string filename = parser.get<string>(0);
- Mat img = imread(filename.c_str(), CV_LOAD_IMAGE_GRAYSCALE);
+ Mat img = imread(filename.c_str(), IMREAD_GRAYSCALE);
if( img.empty() )
{
help();
q2.copyTo(q1);
tmp.copyTo(q2);
- normalize(mag, mag, 0, 1, CV_MINMAX);
+ normalize(mag, mag, 0, 1, NORM_MINMAX);
imshow("spectrum magnitude", mag);
waitKey();
// Call to update the view
onTrackbar(0, 0);
- int c = cvWaitKey(0) & 255;
+ int c = waitKey() & 255;
if( c == 27 )
break;
char wndname[] = "Drawing Demo";
const int NUMBER = 100;
const int DELAY = 5;
- int lineType = CV_AA; // change it to 8 to see non-antialiased graphics
+ int lineType = LINE_AA; // change it to LINE_8 to see non-antialiased graphics
int i, width = 1000, height = 700;
int x1 = -width/2, x2 = width*3/2, y1 = -height/2, y2 = height*3/2;
RNG rng(0xFFFFFFFF);
return 0;
}
- Size textsize = getTextSize("OpenCV forever!", CV_FONT_HERSHEY_COMPLEX, 3, 5, 0);
+ Size textsize = getTextSize("OpenCV forever!", FONT_HERSHEY_COMPLEX, 3, 5, 0);
Point org((width - textsize.width)/2, (height - textsize.height)/2);
Mat image2;
for( i = 0; i < 255; i += 2 )
{
image2 = image - Scalar::all(i);
- putText(image2, "OpenCV forever!", org, CV_FONT_HERSHEY_COMPLEX, 3,
+ putText(image2, "OpenCV forever!", org, FONT_HERSHEY_COMPLEX, 3,
Scalar(i, i, 255), 5, lineType);
imshow(wndname, image2);
static void onMouse( int event, int x, int y, int, void* )
{
- if( event != CV_EVENT_LBUTTONDOWN )
+ if( event != EVENT_LBUTTONDOWN )
return;
Point seed = Point(x,y);
continue;
drawContours(cimage, contours, (int)i, Scalar::all(255), 1, 8);
- ellipse(cimage, box, Scalar(0,0,255), 1, CV_AA);
- ellipse(cimage, box.center, box.size*0.5f, box.angle, 0, 360, Scalar(0,255,255), 1, CV_AA);
+ ellipse(cimage, box, Scalar(0,0,255), 1, LINE_AA);
+ ellipse(cimage, box.center, box.size*0.5f, box.angle, 0, 360, Scalar(0,255,255), 1, LINE_AA);
Point2f vtx[4];
box.points(vtx);
for( int j = 0; j < 4; j++ )
- line(cimage, vtx[j], vtx[(j+1)%4], Scalar(0,255,0), 1, CV_AA);
+ line(cimage, vtx[j], vtx[(j+1)%4], Scalar(0,255,0), 1, LINE_AA);
}
imshow("result", cimage);
}
// Load images
- Mat imgA = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE );
+ Mat imgA = imread(argv[1], IMREAD_GRAYSCALE );
if( !imgA.data ) {
std::cout<< " --(!) Error reading image " << argv[1] << std::endl;
return -1;
}
- Mat imgB = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE );
+ Mat imgB = imread(argv[2], IMREAD_GRAYSCALE );
if( !imgA.data ) {
std::cout << " --(!) Error reading image " << argv[2] << std::endl;
return -1;
Mat imgMatch;
drawMatches(imgA, keypointsA, imgB, keypointsB, matches, imgMatch);
- namedWindow("matches", CV_WINDOW_KEEPRATIO);
+ namedWindow("matches", WINDOW_KEEPRATIO);
imshow("matches", imgMatch);
waitKey(0);
}
}
//printf("Reading the images...\n");
- Mat img1 = imread(img1_name, CV_LOAD_IMAGE_GRAYSCALE);
- Mat img2 = imread(img2_name, CV_LOAD_IMAGE_GRAYSCALE);
+ Mat img1 = imread(img1_name, IMREAD_GRAYSCALE);
+ Mat img2 = imread(img2_name, IMREAD_GRAYSCALE);
// extract keypoints from the first image
SURF surf_extractor(5.0e3);
const Scalar LIGHTBLUE = Scalar(255,255,160);
const Scalar GREEN = Scalar(0,255,0);
-const int BGD_KEY = CV_EVENT_FLAG_CTRLKEY;
-const int FGD_KEY = CV_EVENT_FLAG_SHIFTKEY;
+const int BGD_KEY = EVENT_FLAG_CTRLKEY;
+const int FGD_KEY = EVENT_FLAG_SHIFTKEY;
static void getBinMask( const Mat& comMask, Mat& binMask )
{
if( comMask.empty() || comMask.type()!=CV_8UC1 )
- CV_Error( CV_StsBadArg, "comMask is empty or has incorrect type (not CV_8UC1)" );
+ CV_Error( Error::StsBadArg, "comMask is empty or has incorrect type (not CV_8UC1)" );
if( binMask.empty() || binMask.rows!=comMask.rows || binMask.cols!=comMask.cols )
binMask.create( comMask.size(), CV_8UC1 );
binMask = comMask & 1;
void GCApplication::setRectInMask()
{
- assert( !mask.empty() );
+ CV_Assert( !mask.empty() );
mask.setTo( GC_BGD );
rect.x = max(0, rect.x);
rect.y = max(0, rect.y);
// TODO add bad args check
switch( event )
{
- case CV_EVENT_LBUTTONDOWN: // set rect or GC_BGD(GC_FGD) labels
+ case EVENT_LBUTTONDOWN: // set rect or GC_BGD(GC_FGD) labels
{
bool isb = (flags & BGD_KEY) != 0,
isf = (flags & FGD_KEY) != 0;
lblsState = IN_PROCESS;
}
break;
- case CV_EVENT_RBUTTONDOWN: // set GC_PR_BGD(GC_PR_FGD) labels
+ case EVENT_RBUTTONDOWN: // set GC_PR_BGD(GC_PR_FGD) labels
{
bool isb = (flags & BGD_KEY) != 0,
isf = (flags & FGD_KEY) != 0;
prLblsState = IN_PROCESS;
}
break;
- case CV_EVENT_LBUTTONUP:
+ case EVENT_LBUTTONUP:
if( rectState == IN_PROCESS )
{
rect = Rect( Point(rect.x, rect.y), Point(x,y) );
rectState = SET;
setRectInMask();
- assert( bgdPxls.empty() && fgdPxls.empty() && prBgdPxls.empty() && prFgdPxls.empty() );
+ CV_Assert( bgdPxls.empty() && fgdPxls.empty() && prBgdPxls.empty() && prFgdPxls.empty() );
showImage();
}
if( lblsState == IN_PROCESS )
showImage();
}
break;
- case CV_EVENT_RBUTTONUP:
+ case EVENT_RBUTTONUP:
if( prLblsState == IN_PROCESS )
{
setLblsInMask(flags, Point(x,y), true);
showImage();
}
break;
- case CV_EVENT_MOUSEMOVE:
+ case EVENT_MOUSEMOVE:
if( rectState == IN_PROCESS )
{
rect = Rect( Point(rect.x, rect.y), Point(x,y) );
- assert( bgdPxls.empty() && fgdPxls.empty() && prBgdPxls.empty() && prFgdPxls.empty() );
+ CV_Assert( bgdPxls.empty() && fgdPxls.empty() && prBgdPxls.empty() && prFgdPxls.empty() );
showImage();
}
else if( lblsState == IN_PROCESS )
help();
const string winName = "image";
- cvNamedWindow( winName.c_str(), CV_WINDOW_AUTOSIZE );
- cvSetMouseCallback( winName.c_str(), on_mouse, 0 );
+ namedWindow( winName.c_str(), WINDOW_AUTOSIZE );
+ setMouseCallback( winName.c_str(), on_mouse, 0 );
gcapp.setImageAndWinName( image, winName );
gcapp.showImage();
for(;;)
{
- int c = cvWaitKey(0);
+ int c = waitKey();
switch( (char) c )
{
case '\x1b':
}
exit_main:
- cvDestroyWindow( winName.c_str() );
+ destroyWindow( winName.c_str() );
return 0;
}
for( size_t i = 0; i < circles.size(); i++ )
{
Vec3i c = circles[i];
- circle( cimg, Point(c[0], c[1]), c[2], Scalar(0,0,255), 3, CV_AA);
- circle( cimg, Point(c[0], c[1]), 2, Scalar(0,255,0), 3, CV_AA);
+ circle( cimg, Point(c[0], c[1]), c[2], Scalar(0,0,255), 3, LINE_AA);
+ circle( cimg, Point(c[0], c[1]), 2, Scalar(0,255,0), 3, LINE_AA);
}
imshow("detected circles", cimg);
for( size_t i = 0; i < lines.size(); i++ )
{
Vec4i l = lines[i];
- line( cdst, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0,0,255), 3, CV_AA);
+ line( cdst, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0,0,255), 3, LINE_AA);
}
#endif
imshow("source", src);
}
switch (event) {
- case CV_EVENT_LBUTTONDOWN:
+ case EVENT_LBUTTONDOWN:
origin = Point(x, y);
selection = Rect(x, y, 0, 0);
selectObject = true;
break;
- case CV_EVENT_LBUTTONUP:
+ case EVENT_LBUTTONUP:
selectObject = false;
trackObject = -1;
break;
return 0;
}
cout << "Opened camera" << endl;
- cap.set(CV_CAP_PROP_FRAME_WIDTH, 640);
- cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
+ cap.set(CAP_PROP_FRAME_WIDTH, 640);
+ cap.set(CAP_PROP_FRAME_HEIGHT, 480);
cap >> frame;
}
int values_read = fscanf(f, "%d %f %f %f %f\n", &i, &w[0], &w[1], &w[2], &w[3]);
CV_Assert(values_read == 5);
sprintf(img_file, "seqG/%04d.png", i);
- image = imread(img_file, CV_LOAD_IMAGE_COLOR);
+ image = imread(img_file, IMREAD_COLOR);
if (image.empty())
break;
selection = Rect(cvRound(w[0]*image.cols), cvRound(w[1]*image.rows),
#include <stdio.h>
#include <iostream>
-#include "opencv2/imgproc/imgproc.hpp"
-#include "opencv2/highgui/highgui.hpp"
-#include "opencv2/flann/miniflann.hpp"
-#include "opencv2/core/utility.hpp"
+#include <opencv2/imgproc/imgproc.hpp>
+#include <opencv2/highgui/highgui.hpp>
+#include <opencv2/flann/miniflann.hpp>
+#include <opencv2/core/utility.hpp>
using namespace cv; // all the new API is put into "cv" namespace. Export its content
using namespace std;
// enable/disable use of mixed API in the code below.
#define DEMO_MIXED_API_USE 1
+#ifdef DEMO_MIXED_API_USE
+# include <opencv2/highgui/highgui_c.h>
+#endif
+
int main( int argc, char** argv )
{
help();
cvtColor(img_yuv, img, COLOR_YCrCb2BGR);
// this is counterpart for cvNamedWindow
- namedWindow("image with grain", CV_WINDOW_AUTOSIZE);
+ namedWindow("image with grain", WINDOW_AUTOSIZE);
#if DEMO_MIXED_API_USE
// this is to demonstrate that img and iplimg really share the data - the result of the above
// processing is stored in img and thus in iplimg too.
cout << "The warped image has been saved in the file: " << warpedImFile << endl << flush;
- namedWindow ("image", CV_WINDOW_AUTOSIZE);
- namedWindow ("template", CV_WINDOW_AUTOSIZE);
- namedWindow ("warped image", CV_WINDOW_AUTOSIZE);
- namedWindow ("error (black: no error)", CV_WINDOW_AUTOSIZE);
+ namedWindow ("image", WINDOW_AUTOSIZE);
+ namedWindow ("template", WINDOW_AUTOSIZE);
+ namedWindow ("warped image", WINDOW_AUTOSIZE);
+ namedWindow ("error (black: no error)", WINDOW_AUTOSIZE);
moveWindow ("template", 350, 350);
moveWindow ("warped image", 600, 300);
static void onMouse( int event, int x, int y, int flags, void* )
{
- if( event == CV_EVENT_LBUTTONUP || !(flags & CV_EVENT_FLAG_LBUTTON) )
+ if( event == EVENT_LBUTTONUP || !(flags & EVENT_FLAG_LBUTTON) )
prevPt = Point(-1,-1);
- else if( event == CV_EVENT_LBUTTONDOWN )
+ else if( event == EVENT_LBUTTONDOWN )
prevPt = Point(x,y);
- else if( event == CV_EVENT_MOUSEMOVE && (flags & CV_EVENT_FLAG_LBUTTON) )
+ else if( event == EVENT_MOUSEMOVE && (flags & EVENT_FLAG_LBUTTON) )
{
Point pt(x,y);
if( prevPt.x < 0 )
Mat pointChunk = points.rowRange(k*sampleCount/clusterCount,
k == clusterCount - 1 ? sampleCount :
(k+1)*sampleCount/clusterCount);
- rng.fill(pointChunk, CV_RAND_NORMAL, Scalar(center.x, center.y), Scalar(img.cols*0.05, img.rows*0.05));
+ rng.fill(pointChunk, RNG::NORMAL, Scalar(center.x, center.y), Scalar(img.cols*0.05, img.rows*0.05));
}
randShuffle(points, 1, &rng);
kmeans(points, clusterCount, labels,
- TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 10, 1.0),
+ TermCriteria( TermCriteria::EPS+TermCriteria::COUNT, 10, 1.0),
3, KMEANS_PP_CENTERS, centers);
img = Scalar::all(0);
{
int clusterIdx = labels.at<int>(i);
Point ipt = points.at<Point2f>(i);
- circle( img, ipt, 2, colorTab[clusterIdx], CV_FILLED, CV_AA );
+ circle( img, ipt, 2, colorTab[clusterIdx], FILLED, LINE_AA );
}
imshow("clusters", img);
cap.open(argv[1]);
if( cap.isOpened() )
cout << "Video " << argv[1] <<
- ": width=" << cap.get(CV_CAP_PROP_FRAME_WIDTH) <<
- ", height=" << cap.get(CV_CAP_PROP_FRAME_HEIGHT) <<
- ", nframes=" << cap.get(CV_CAP_PROP_FRAME_COUNT) << endl;
+ ": width=" << cap.get(CAP_PROP_FRAME_WIDTH) <<
+ ", height=" << cap.get(CAP_PROP_FRAME_HEIGHT) <<
+ ", nframes=" << cap.get(CAP_PROP_FRAME_COUNT) << endl;
if( argc > 2 && isdigit(argv[2][0]) )
{
int pos;
sscanf(argv[2], "%d", &pos);
cout << "seeking to frame #" << pos << endl;
- cap.set(CV_CAP_PROP_POS_FRAMES, pos);
+ cap.set(CAP_PROP_POS_FRAMES, pos);
}
}
public:
static void start(const std::string& a_img_name)
{
- cvSetMouseCallback(a_img_name.c_str(), Mouse::cv_on_mouse, 0);
+ cv::setMouseCallback(a_img_name.c_str(), Mouse::cv_on_mouse, 0);
}
static int event(void)
{
int num_modalities = (int)detector->getModalities().size();
// Open Kinect sensor
- cv::VideoCapture capture( CV_CAP_OPENNI );
+ cv::VideoCapture capture( cv::CAP_OPENNI );
if (!capture.isOpened())
{
printf("Could not open OpenNI-capable sensor\n");
return -1;
}
- capture.set(CV_CAP_PROP_OPENNI_REGISTRATION, 1);
- double focal_length = capture.get(CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH);
+ capture.set(cv::CAP_PROP_OPENNI_REGISTRATION, 1);
+ double focal_length = capture.get(cv::CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH);
//printf("Focal length = %f\n", focal_length);
// Main loop
{
// Capture next color/depth pair
capture.grab();
- capture.retrieve(depth, CV_CAP_OPENNI_DEPTH_MAP);
- capture.retrieve(color, CV_CAP_OPENNI_BGR_IMAGE);
+ capture.retrieve(depth, cv::CAP_OPENNI_DEPTH_MAP);
+ capture.retrieve(color, cv::CAP_OPENNI_BGR_IMAGE);
std::vector<cv::Mat> sources;
sources.push_back(color);
cv::Point pt1 = mouse - roi_offset; // top left
cv::Point pt2 = mouse + roi_offset; // bottom right
- if (event == CV_EVENT_RBUTTONDOWN)
+ if (event == cv::EVENT_RBUTTONDOWN)
{
// Compute object mask by subtracting the plane within the ROI
std::vector<CvPoint> chain(4);
cv::imshow("normals", quantized_images[1]);
cv::FileStorage fs;
- char key = (char)cvWaitKey(10);
+ char key = (char)cv::waitKey(10);
if( key == 'q' )
break;
static void onMouse( int event, int x, int y, int /*flags*/, void* /*param*/ )
{
- if( event == CV_EVENT_LBUTTONDOWN )
+ if( event == EVENT_LBUTTONDOWN )
{
point = Point2f((float)x,(float)y);
addRemovePt = true;
return -1;
}
- Mat img1 = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
- Mat img2 = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE);
+ Mat img1 = imread(argv[1], IMREAD_GRAYSCALE);
+ Mat img2 = imread(argv[2], IMREAD_GRAYSCALE);
if(img1.empty() || img2.empty())
{
printf("Can't read one of the images\n");
Mat& queryImage, vector <Mat>& trainImages, vector<string>& trainImageNames )
{
cout << "< Reading the images..." << endl;
- queryImage = imread( queryImageName, CV_LOAD_IMAGE_GRAYSCALE);
+ queryImage = imread( queryImageName, IMREAD_GRAYSCALE);
if( queryImage.empty() )
{
cout << "Query image can not be read." << endl << ">" << endl;
for( size_t i = 0; i < trainImageNames.size(); i++ )
{
string filename = trainDirName + trainImageNames[i];
- Mat img = imread( filename, CV_LOAD_IMAGE_GRAYSCALE );
+ Mat img = imread( filename, IMREAD_GRAYSCALE );
if( img.empty() )
cout << "Train image " << filename << " can not be read." << endl;
else
colorRad = 10;
maxPyrLevel = 1;
- namedWindow( winName, CV_WINDOW_AUTOSIZE );
+ namedWindow( winName, WINDOW_AUTOSIZE );
createTrackbar( "spatialRad", winName, &spatialRad, 80, meanShiftSegmentation );
createTrackbar( "colorRad", winName, &colorRad, 60, meanShiftSegmentation );
img = Scalar::all(0);
for( i = 0; i < count; i++ )
- circle( img, points[i], 3, Scalar(0, 0, 255), CV_FILLED, CV_AA );
+ circle( img, points[i], 3, Scalar(0, 0, 255), FILLED, LINE_AA );
for( i = 0; i < 4; i++ )
- line(img, vtx[i], vtx[(i+1)%4], Scalar(0, 255, 0), 1, CV_AA);
+ line(img, vtx[i], vtx[(i+1)%4], Scalar(0, 255, 0), 1, LINE_AA);
- circle(img, center, cvRound(radius), Scalar(0, 255, 255), 1, CV_AA);
+ circle(img, center, cvRound(radius), Scalar(0, 255, 255), 1, LINE_AA);
imshow( "rect & circle", img );
- char key = (char)cvWaitKey();
+ char key = (char)waitKey();
if( key == 27 || key == 'q' || key == 'Q' ) // 'ESC'
break;
}
OpenClose(open_close_pos, 0);
ErodeDilate(erode_dilate_pos, 0);
- c = cvWaitKey(0);
+ c = waitKey();
if( (char)c == 27 )
break;
static float getMaxDisparity( VideoCapture& capture )
{
const int minDistance = 400; // mm
- float b = (float)capture.get( CV_CAP_OPENNI_DEPTH_GENERATOR_BASELINE ); // mm
- float F = (float)capture.get( CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH ); // pixels
+ float b = (float)capture.get( CAP_OPENNI_DEPTH_GENERATOR_BASELINE ); // mm
+ float F = (float)capture.get( CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH ); // pixels
return b * F / minDistance;
}
{
string mask( argv[++i] );
if( mask.size() != 5)
- CV_Error( CV_StsBadArg, "Incorrect length of -m argument string" );
+ CV_Error( Error::StsBadArg, "Incorrect length of -m argument string" );
int val = atoi(mask.c_str());
int l = 100000, r = 10000, sum = 0;
if( isVideoReading )
capture.open( filename );
else
- capture.open( CV_CAP_OPENNI );
+ capture.open( CAP_OPENNI );
cout << "done." << endl;
switch ( imageMode )
{
case 0:
- modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_VGA_30HZ );
+ modeRes = capture.set( CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CAP_OPENNI_VGA_30HZ );
break;
case 1:
- modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_SXGA_15HZ );
+ modeRes = capture.set( CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CAP_OPENNI_SXGA_15HZ );
break;
case 2:
- modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_SXGA_30HZ );
+ modeRes = capture.set( CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CAP_OPENNI_SXGA_30HZ );
break;
//The following modes are only supported by the Xtion Pro Live
case 3:
- modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_QVGA_30HZ );
+ modeRes = capture.set( CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CAP_OPENNI_QVGA_30HZ );
break;
case 4:
- modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_QVGA_60HZ );
+ modeRes = capture.set( CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CAP_OPENNI_QVGA_60HZ );
break;
default:
- CV_Error( CV_StsBadArg, "Unsupported image mode property.\n");
+ CV_Error( Error::StsBadArg, "Unsupported image mode property.\n");
}
if (!modeRes)
cout << "\nThis image mode is not supported by the device, the default value (CV_CAP_OPENNI_SXGA_15HZ) will be used.\n" << endl;
// Print some avalible device settings.
cout << "\nDepth generator output mode:" << endl <<
- "FRAME_WIDTH " << capture.get( CV_CAP_PROP_FRAME_WIDTH ) << endl <<
- "FRAME_HEIGHT " << capture.get( CV_CAP_PROP_FRAME_HEIGHT ) << endl <<
- "FRAME_MAX_DEPTH " << capture.get( CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH ) << " mm" << endl <<
- "FPS " << capture.get( CV_CAP_PROP_FPS ) << endl <<
- "REGISTRATION " << capture.get( CV_CAP_PROP_OPENNI_REGISTRATION ) << endl;
- if( capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR_PRESENT ) )
+ "FRAME_WIDTH " << capture.get( CAP_PROP_FRAME_WIDTH ) << endl <<
+ "FRAME_HEIGHT " << capture.get( CAP_PROP_FRAME_HEIGHT ) << endl <<
+ "FRAME_MAX_DEPTH " << capture.get( CAP_PROP_OPENNI_FRAME_MAX_DEPTH ) << " mm" << endl <<
+ "FPS " << capture.get( CAP_PROP_FPS ) << endl <<
+ "REGISTRATION " << capture.get( CAP_PROP_OPENNI_REGISTRATION ) << endl;
+ if( capture.get( CAP_OPENNI_IMAGE_GENERATOR_PRESENT ) )
{
cout <<
"\nImage generator output mode:" << endl <<
- "FRAME_WIDTH " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FRAME_WIDTH ) << endl <<
- "FRAME_HEIGHT " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FRAME_HEIGHT ) << endl <<
- "FPS " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FPS ) << endl;
+ "FRAME_WIDTH " << capture.get( CAP_OPENNI_IMAGE_GENERATOR+CAP_PROP_FRAME_WIDTH ) << endl <<
+ "FRAME_HEIGHT " << capture.get( CAP_OPENNI_IMAGE_GENERATOR+CAP_PROP_FRAME_HEIGHT ) << endl <<
+ "FPS " << capture.get( CAP_OPENNI_IMAGE_GENERATOR+CAP_PROP_FPS ) << endl;
}
else
{
}
else
{
- if( retrievedImageFlags[0] && capture.retrieve( depthMap, CV_CAP_OPENNI_DEPTH_MAP ) )
+ if( retrievedImageFlags[0] && capture.retrieve( depthMap, CAP_OPENNI_DEPTH_MAP ) )
{
const float scaleFactor = 0.05f;
Mat show; depthMap.convertTo( show, CV_8UC1, scaleFactor );
imshow( "depth map", show );
}
- if( retrievedImageFlags[1] && capture.retrieve( disparityMap, CV_CAP_OPENNI_DISPARITY_MAP ) )
+ if( retrievedImageFlags[1] && capture.retrieve( disparityMap, CAP_OPENNI_DISPARITY_MAP ) )
{
if( isColorizeDisp )
{
}
}
- if( retrievedImageFlags[2] && capture.retrieve( validDepthMap, CV_CAP_OPENNI_VALID_DEPTH_MASK ) )
+ if( retrievedImageFlags[2] && capture.retrieve( validDepthMap, CAP_OPENNI_VALID_DEPTH_MASK ) )
imshow( "valid depth mask", validDepthMap );
- if( retrievedImageFlags[3] && capture.retrieve( bgrImage, CV_CAP_OPENNI_BGR_IMAGE ) )
+ if( retrievedImageFlags[3] && capture.retrieve( bgrImage, CAP_OPENNI_BGR_IMAGE ) )
imshow( "rgb image", bgrImage );
- if( retrievedImageFlags[4] && capture.retrieve( grayImage, CV_CAP_OPENNI_GRAY_IMAGE ) )
+ if( retrievedImageFlags[4] && capture.retrieve( grayImage, CAP_OPENNI_GRAY_IMAGE ) )
imshow( "gray image", grayImage );
}
std::ifstream file(filename.c_str(), ifstream::in);
if (!file) {
string error_message = "No valid input file was given, please check the given filename.";
- CV_Error(CV_StsBadArg, error_message);
+ CV_Error(Error::StsBadArg, error_message);
}
string line;
while (getline(file, line)) {
Mat src = _src.getMat();
// only allow one channel
if(src.channels() != 1) {
- CV_Error(CV_StsBadArg, "Only Matrices with one channel are supported");
+ CV_Error(Error::StsBadArg, "Only Matrices with one channel are supported");
}
// create and return normalized image
Mat dst;
struct params *p = (struct params *)ptr;
- p->pca = PCA(p->data, cv::Mat(), CV_PCA_DATA_AS_ROW, var);
+ p->pca = PCA(p->data, cv::Mat(), PCA::DATA_AS_ROW, var);
Mat point = p->pca.project(p->data.row(0));
Mat reconstruction = p->pca.backProject(point);
// Quit if there are not enough images for this demo.
if(images.size() <= 1) {
string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!";
- CV_Error(CV_StsError, error_message);
+ CV_Error(Error::StsError, error_message);
}
// Reshape and stack images into a rowMatrix
Mat data = formatImagesForPCA(images);
// perform PCA
- PCA pca(data, cv::Mat(), CV_PCA_DATA_AS_ROW, 0.95); // trackbar is initially set here, also this is a common value for retainedVariance
+ PCA pca(data, cv::Mat(), PCA::DATA_AS_ROW, 0.95); // trackbar is initially set here, also this is a common value for retainedVariance
// Demonstration of the effect of retainedVariance on the first image
Mat point = pca.project(data.row(0)); // project into the eigenspace, thus the image becomes a "point"
// init highgui window
string winName = "Reconstruction | press 'q' to quit";
- namedWindow(winName, CV_WINDOW_NORMAL);
+ namedWindow(winName, WINDOW_NORMAL);
// params struct to pass to the trackbar handler
params p;
{
// draw a circle and line indicating the shift direction...
Point center(curr.cols >> 1, curr.rows >> 1);
- circle(frame, center, (int)radius, Scalar(0, 255, 0), 3, CV_AA);
- line(frame, center, Point(center.x + (int)shift.x, center.y + (int)shift.y), Scalar(0, 255, 0), 3, CV_AA);
+ circle(frame, center, (int)radius, Scalar(0, 255, 0), 3, LINE_AA);
+ line(frame, center, Point(center.x + (int)shift.x, center.y + (int)shift.y), Scalar(0, 255, 0), 3, LINE_AA);
}
imshow("phase shift", frame);
int updateFlag = 0;
- if( event == CV_EVENT_LBUTTONUP )
+ if( event == EVENT_LBUTTONUP )
{
if( classColors.empty() )
return;
trainedPointsMarkers.push_back( (int)(classColors.size()-1) );
updateFlag = true;
}
- else if( event == CV_EVENT_RBUTTONUP )
+ else if( event == EVENT_RBUTTONUP )
{
#if _BT_
if( classColors.size() < 2 )
imgDst.create( 480, 640, CV_8UC3 );
imshow( "points", img );
- cvSetMouseCallback( "points", on_mouse );
+ setMouseCallback( "points", on_mouse );
for(;;)
{
}
}
Scalar color( 0, 0, 255 );
- drawContours( dst, contours, largestComp, color, CV_FILLED, 8, hierarchy );
+ drawContours( dst, contours, largestComp, color, FILLED, LINE_8, hierarchy );
}
for(;;)
{
float Z = 0.f;
- bool dragging = (mouse.buttonState & CV_EVENT_FLAG_LBUTTON) != 0;
+ bool dragging = (mouse.buttonState & EVENT_FLAG_LBUTTON) != 0;
int npt = nobjpt;
- if( (mouse.event == CV_EVENT_LBUTTONDOWN ||
- mouse.event == CV_EVENT_LBUTTONUP ||
+ if( (mouse.event == EVENT_LBUTTONDOWN ||
+ mouse.event == EVENT_LBUTTONUP ||
dragging) && nobjpt < 4 )
{
Point2f m = mouse.pt;
}
box[npt] = image2plane(imgpt[npt], R, tvec, cameraMatrix, npt<3 ? 0 : Z);
- if( (npt == 0 && mouse.event == CV_EVENT_LBUTTONDOWN) ||
+ if( (npt == 0 && mouse.event == EVENT_LBUTTONDOWN) ||
(npt > 0 && norm(box[npt] - box[npt-1]) > eps &&
- mouse.event == CV_EVENT_LBUTTONUP) )
+ mouse.event == EVENT_LBUTTONUP) )
{
nobjpt++;
if( nobjpt < 4 )
{
const Point* p = &squares[i][0];
int n = (int)squares[i].size();
- polylines(image, &p, &n, 1, true, Scalar(0,255,0), 3, CV_AA);
+ polylines(image, &p, &n, 1, true, Scalar(0,255,0), 3, LINE_AA);
}
imshow(wndname, image);
int process(vector<string> images)
{
- namedWindow("image",CV_WINDOW_KEEPRATIO); //resizable window;
- for (size_t i = 0; i < images.size(); i++)
- {
- Mat image = imread(images[i], CV_LOAD_IMAGE_GRAYSCALE); // do grayscale processing?
- imshow("image",image);
- cout << "Press a key to see the next image in the list." << endl;
- waitKey(); // wait indefinitely for a key to be pressed
- }
- return 0;
+ namedWindow("image", WINDOW_KEEPRATIO); //resizable window;
+ for (size_t i = 0; i < images.size(); i++)
+ {
+ Mat image = imread(images[i], IMREAD_GRAYSCALE); // do grayscale processing?
+ imshow("image",image);
+ cout << "Press a key to see the next image in the list." << endl;
+ waitKey(); // wait indefinitely for a key to be pressed
+ }
+ return 0;
}
}
char filename[200];
string window_name = "video | q or esc to quit";
cout << "press space to save a picture. q or esc to quit" << endl;
- namedWindow(window_name, CV_WINDOW_KEEPRATIO); //resizable window;
+ namedWindow(window_name, WINDOW_KEEPRATIO); //resizable window;
Mat frame;
for (;;) {
capture >> frame;
return -1;
}
- Size refS = Size((int) captRefrnc.get(CV_CAP_PROP_FRAME_WIDTH),
- (int) captRefrnc.get(CV_CAP_PROP_FRAME_HEIGHT)),
- uTSi = Size((int) captUndTst.get(CV_CAP_PROP_FRAME_WIDTH),
- (int) captUndTst.get(CV_CAP_PROP_FRAME_HEIGHT));
+ Size refS = Size((int) captRefrnc.get(CAP_PROP_FRAME_WIDTH),
+ (int) captRefrnc.get(CAP_PROP_FRAME_HEIGHT)),
+ uTSi = Size((int) captUndTst.get(CAP_PROP_FRAME_WIDTH),
+ (int) captUndTst.get(CAP_PROP_FRAME_HEIGHT));
if (refS != uTSi)
{
const char* WIN_RF = "Reference";
// Windows
- namedWindow(WIN_RF, CV_WINDOW_AUTOSIZE);
- namedWindow(WIN_UT, CV_WINDOW_AUTOSIZE);
- cvMoveWindow(WIN_RF, 400 , 0); //750, 2 (bernat =0)
- cvMoveWindow(WIN_UT, refS.width, 0); //1500, 2
+ namedWindow(WIN_RF, WINDOW_AUTOSIZE);
+ namedWindow(WIN_UT, WINDOW_AUTOSIZE);
+ moveWindow(WIN_RF, 400 , 0); //750, 2 (bernat =0)
+ moveWindow(WIN_UT, refS.width, 0); //1500, 2
cout << "Reference frame resolution: Width=" << refS.width << " Height=" << refS.height
- << " of nr#: " << captRefrnc.get(CV_CAP_PROP_FRAME_COUNT) << endl;
+ << " of nr#: " << captRefrnc.get(CAP_PROP_FRAME_COUNT) << endl;
cout << "PSNR trigger value " << setiosflags(ios::fixed) << setprecision(3)
<< psnrTriggerValue << endl;
imshow(WIN_RF, frameReference);
imshow(WIN_UT, frameUnderTest);
- c = (char)cvWaitKey(delay);
+ c = (char)waitKey(delay);
if (c == 27) break;
}
string::size_type pAt = source.find_last_of('.'); // Find extension point
const string NAME = source.substr(0, pAt) + argv[2][0] + ".avi"; // Form the new name with container
- int ex = static_cast<int>(inputVideo.get(CV_CAP_PROP_FOURCC)); // Get Codec Type- Int form
+ int ex = static_cast<int>(inputVideo.get(CAP_PROP_FOURCC)); // Get Codec Type- Int form
// Transform from int to char via Bitwise operators
char EXT[] = {(char)(ex & 0XFF) , (char)((ex & 0XFF00) >> 8),(char)((ex & 0XFF0000) >> 16),(char)((ex & 0XFF000000) >> 24), 0};
- Size S = Size((int) inputVideo.get(CV_CAP_PROP_FRAME_WIDTH), // Acquire input size
- (int) inputVideo.get(CV_CAP_PROP_FRAME_HEIGHT));
+ Size S = Size((int) inputVideo.get(CAP_PROP_FRAME_WIDTH), // Acquire input size
+ (int) inputVideo.get(CAP_PROP_FRAME_HEIGHT));
VideoWriter outputVideo; // Open the output
if (askOutputType)
- outputVideo.open(NAME, ex=-1, inputVideo.get(CV_CAP_PROP_FPS), S, true);
+ outputVideo.open(NAME, ex=-1, inputVideo.get(CAP_PROP_FPS), S, true);
else
- outputVideo.open(NAME, ex, inputVideo.get(CV_CAP_PROP_FPS), S, true);
+ outputVideo.open(NAME, ex, inputVideo.get(CAP_PROP_FPS), S, true);
if (!outputVideo.isOpened())
{
}
cout << "Input frame resolution: Width=" << S.width << " Height=" << S.height
- << " of nr#: " << inputVideo.get(CV_CAP_PROP_FRAME_COUNT) << endl;
+ << " of nr#: " << inputVideo.get(CAP_PROP_FRAME_COUNT) << endl;
cout << "Input codec type: " << EXT << endl;
int channel = 2; // Select the channel to save
equalizeHist( src, dst );
/// Display results
- namedWindow( source_window, CV_WINDOW_AUTOSIZE );
- namedWindow( equalized_window, CV_WINDOW_AUTOSIZE );
+ namedWindow( source_window, WINDOW_AUTOSIZE );
+ namedWindow( equalized_window, WINDOW_AUTOSIZE );
imshow( source_window, src );
imshow( equalized_window, dst );
templ = imread( argv[2], 1 );
/// Create windows
- namedWindow( image_window, CV_WINDOW_AUTOSIZE );
- namedWindow( result_window, CV_WINDOW_AUTOSIZE );
+ namedWindow( image_window, WINDOW_AUTOSIZE );
+ namedWindow( result_window, WINDOW_AUTOSIZE );
/// Create Trackbar
const char* trackbar_label = "Method: \n 0: SQDIFF \n 1: SQDIFF NORMED \n 2: TM CCORR \n 3: TM CCORR NORMED \n 4: TM COEFF \n 5: TM COEFF NORMED";
/// Create Trackbar to enter the number of bins
const char* window_image = "Source image";
- namedWindow( window_image, CV_WINDOW_AUTOSIZE );
+ namedWindow( window_image, WINDOW_AUTOSIZE );
createTrackbar("* Hue bins: ", window_image, &bins, 180, Hist_and_Backproj );
Hist_and_Backproj(0, 0);
cvtColor( src, hsv, COLOR_BGR2HSV );
/// Show the image
- namedWindow( window_image, CV_WINDOW_AUTOSIZE );
+ namedWindow( window_image, WINDOW_AUTOSIZE );
imshow( window_image, src );
/// Set Trackbars for floodfill thresholds
*/
void pickPoint (int event, int x, int y, int, void* )
{
- if( event != CV_EVENT_LBUTTONDOWN )
+ if( event != EVENT_LBUTTONDOWN )
{ return; }
// Fill and get the mask
}
/// Display
- namedWindow("calcHist Demo", CV_WINDOW_AUTOSIZE );
+ namedWindow("calcHist Demo", WINDOW_AUTOSIZE );
imshow("calcHist Demo", histImage );
waitKey(0);
{ return -1; }
/// Create windows
- namedWindow( "Erosion Demo", CV_WINDOW_AUTOSIZE );
- namedWindow( "Dilation Demo", CV_WINDOW_AUTOSIZE );
- cvMoveWindow( "Dilation Demo", src.cols, 0 );
+ namedWindow( "Erosion Demo", WINDOW_AUTOSIZE );
+ namedWindow( "Dilation Demo", WINDOW_AUTOSIZE );
+ moveWindow( "Dilation Demo", src.cols, 0 );
/// Create Erosion Trackbar
createTrackbar( "Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Erosion Demo",
{ return -1; }
/// Create window
- namedWindow( window_name, CV_WINDOW_AUTOSIZE );
+ namedWindow( window_name, WINDOW_AUTOSIZE );
/// Create Trackbar to select Morphology operation
createTrackbar("Operator:\n 0: Opening - 1: Closing \n 2: Gradient - 3: Top Hat \n 4: Black Hat", window_name, &morph_operator, max_operator, Morphology_Operations );
dst = tmp;
/// Create window
- namedWindow( window_name, CV_WINDOW_AUTOSIZE );
+ namedWindow( window_name, WINDOW_AUTOSIZE );
imshow( window_name, dst );
/// Loop
*/
int main( void )
{
- namedWindow( window_name, CV_WINDOW_AUTOSIZE );
+ namedWindow( window_name, WINDOW_AUTOSIZE );
/// Load the source image
src = imread( "../images/lena.png", 1 );
dst = Mat::zeros( src.size(), src.type() );
putText( dst, caption,
Point( src.cols/4, src.rows/2),
- CV_FONT_HERSHEY_COMPLEX, 1, Scalar(255, 255, 255) );
+ FONT_HERSHEY_COMPLEX, 1, Scalar(255, 255, 255) );
imshow( window_name, dst );
int c = waitKey( DELAY_CAPTION );
cvtColor( src, src_gray, COLOR_RGB2GRAY );
/// Create a window to display results
- namedWindow( window_name, CV_WINDOW_AUTOSIZE );
+ namedWindow( window_name, WINDOW_AUTOSIZE );
/// Create Trackbar to choose type of Threshold
createTrackbar( trackbar_type,
cvtColor( src, src_gray, COLOR_BGR2GRAY );
/// Create a window
- namedWindow( window_name, CV_WINDOW_AUTOSIZE );
+ namedWindow( window_name, WINDOW_AUTOSIZE );
/// Create a Trackbar for user to enter threshold
createTrackbar( "Min Threshold:", window_name, &lowThreshold, max_lowThreshold, CannyThreshold );
/// Show what you got
- namedWindow( source_window, CV_WINDOW_AUTOSIZE );
+ namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, src );
- namedWindow( warp_window, CV_WINDOW_AUTOSIZE );
+ namedWindow( warp_window, WINDOW_AUTOSIZE );
imshow( warp_window, warp_dst );
- namedWindow( warp_rotate_window, CV_WINDOW_AUTOSIZE );
+ namedWindow( warp_rotate_window, WINDOW_AUTOSIZE );
imshow( warp_rotate_window, warp_rotate_dst );
/// Wait until user exits the program
}
/// Show your results
- namedWindow( "Hough Circle Transform Demo", CV_WINDOW_AUTOSIZE );
+ namedWindow( "Hough Circle Transform Demo", WINDOW_AUTOSIZE );
imshow( "Hough Circle Transform Demo", src );
waitKey(0);
char thresh_label[50];
sprintf( thresh_label, "Thres: %d + input", min_threshold );
- namedWindow( standard_name, CV_WINDOW_AUTOSIZE );
+ namedWindow( standard_name, WINDOW_AUTOSIZE );
createTrackbar( thresh_label, standard_name, &s_trackbar, max_trackbar, Standard_Hough);
- namedWindow( probabilistic_name, CV_WINDOW_AUTOSIZE );
+ namedWindow( probabilistic_name, WINDOW_AUTOSIZE );
createTrackbar( thresh_label, probabilistic_name, &p_trackbar, max_trackbar, Probabilistic_Hough);
/// Initialize
Point pt1( cvRound(x0 + alpha*(-sin_t)), cvRound(y0 + alpha*cos_t) );
Point pt2( cvRound(x0 - alpha*(-sin_t)), cvRound(y0 - alpha*cos_t) );
- line( standard_hough, pt1, pt2, Scalar(255,0,0), 3, CV_AA);
+ line( standard_hough, pt1, pt2, Scalar(255,0,0), 3, LINE_AA);
}
imshow( standard_name, standard_hough );
for( size_t i = 0; i < p_lines.size(); i++ )
{
Vec4i l = p_lines[i];
- line( probabilistic_hough, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(255,0,0), 3, CV_AA);
+ line( probabilistic_hough, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(255,0,0), 3, LINE_AA);
}
imshow( probabilistic_name, probabilistic_hough );
cvtColor( src, src_gray, COLOR_RGB2GRAY );
/// Create window
- namedWindow( window_name, CV_WINDOW_AUTOSIZE );
+ namedWindow( window_name, WINDOW_AUTOSIZE );
/// Apply Laplace function
Mat abs_dst;
map_y.create( src.size(), CV_32FC1 );
/// Create window
- namedWindow( remap_window, CV_WINDOW_AUTOSIZE );
+ namedWindow( remap_window, WINDOW_AUTOSIZE );
/// Loop
for(;;)
cvtColor( src, src_gray, COLOR_RGB2GRAY );
/// Create window
- namedWindow( window_name, CV_WINDOW_AUTOSIZE );
+ namedWindow( window_name, WINDOW_AUTOSIZE );
/// Generate grad_x and grad_y
Mat grad_x, grad_y;
printf( " ** Press 'ESC' to exit the program \n");
/// Create window
- namedWindow( window_name, CV_WINDOW_AUTOSIZE );
+ namedWindow( window_name, WINDOW_AUTOSIZE );
/// Initialize arguments for the filter
top = (int) (0.05*src.rows); bottom = (int) (0.05*src.rows);
{ return -1; }
/// Create window
- namedWindow( window_name, CV_WINDOW_AUTOSIZE );
+ namedWindow( window_name, WINDOW_AUTOSIZE );
/// Initialize arguments for the filter
anchor = Point( -1, -1 );
/// Create Window
const char* source_window = "Source";
- namedWindow( source_window, CV_WINDOW_AUTOSIZE );
+ namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, src );
createTrackbar( " Canny thresh:", "Source", &thresh, max_thresh, thresh_callback );
}
/// Show in a window
- namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
+ namedWindow( "Contours", WINDOW_AUTOSIZE );
imshow( "Contours", drawing );
}
/// Create Window
const char* source_window = "Source";
- namedWindow( source_window, CV_WINDOW_AUTOSIZE );
+ namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, src );
createTrackbar( " Threshold:", "Source", &thresh, max_thresh, thresh_callback );
}
/// Show in a window
- namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
+ namedWindow( "Contours", WINDOW_AUTOSIZE );
imshow( "Contours", drawing );
}
/// Create Window
const char* source_window = "Source";
- namedWindow( source_window, CV_WINDOW_AUTOSIZE );
+ namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, src );
createTrackbar( " Threshold:", "Source", &thresh, max_thresh, thresh_callback );
}
/// Show in a window
- namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
+ namedWindow( "Contours", WINDOW_AUTOSIZE );
imshow( "Contours", drawing );
}
/// Create Window
const char* source_window = "Source";
- namedWindow( source_window, CV_WINDOW_AUTOSIZE );
+ namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, src );
createTrackbar( " Threshold:", "Source", &thresh, max_thresh, thresh_callback );
}
/// Show in a window
- namedWindow( "Hull demo", CV_WINDOW_AUTOSIZE );
+ namedWindow( "Hull demo", WINDOW_AUTOSIZE );
imshow( "Hull demo", drawing );
}
/// Create Window
const char* source_window = "Source";
- namedWindow( source_window, CV_WINDOW_AUTOSIZE );
+ namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, src );
createTrackbar( " Canny thresh:", "Source", &thresh, max_thresh, thresh_callback );
}
/// Show in a window
- namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
+ namedWindow( "Contours", WINDOW_AUTOSIZE );
imshow( "Contours", drawing );
/// Calculate the area with the moments 00 and compare with the result of the OpenCV function
/// Create Window and show your results
const char* source_window = "Source";
- namedWindow( source_window, CV_WINDOW_AUTOSIZE );
+ namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, src );
- namedWindow( "Distance", CV_WINDOW_AUTOSIZE );
+ namedWindow( "Distance", WINDOW_AUTOSIZE );
imshow( "Distance", drawing );
waitKey(0);
minMaxLoc( Mc, &myHarris_minVal, &myHarris_maxVal, 0, 0, Mat() );
/* Create Window and Trackbar */
- namedWindow( myHarris_window, CV_WINDOW_AUTOSIZE );
+ namedWindow( myHarris_window, WINDOW_AUTOSIZE );
createTrackbar( " Quality Level:", myHarris_window, &myHarris_qualityLevel, max_qualityLevel, myHarris_function );
myHarris_function( 0, 0 );
minMaxLoc( myShiTomasi_dst, &myShiTomasi_minVal, &myShiTomasi_maxVal, 0, 0, Mat() );
/* Create Window and Trackbar */
- namedWindow( myShiTomasi_window, CV_WINDOW_AUTOSIZE );
+ namedWindow( myShiTomasi_window, WINDOW_AUTOSIZE );
createTrackbar( " Quality Level:", myShiTomasi_window, &myShiTomasi_qualityLevel, max_qualityLevel, myShiTomasi_function );
myShiTomasi_function( 0, 0 );
cvtColor( src, src_gray, COLOR_BGR2GRAY );
/// Create a window and a trackbar
- namedWindow( source_window, CV_WINDOW_AUTOSIZE );
+ namedWindow( source_window, WINDOW_AUTOSIZE );
createTrackbar( "Threshold: ", source_window, &thresh, max_thresh, cornerHarris_demo );
imshow( source_window, src );
}
}
/// Showing the result
- namedWindow( corners_window, CV_WINDOW_AUTOSIZE );
+ namedWindow( corners_window, WINDOW_AUTOSIZE );
imshow( corners_window, dst_norm_scaled );
}
cvtColor( src, src_gray, COLOR_BGR2GRAY );
/// Create Window
- namedWindow( source_window, CV_WINDOW_AUTOSIZE );
+ namedWindow( source_window, WINDOW_AUTOSIZE );
/// Create Trackbar to set the number of corners
createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo );
{ circle( copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255), rng.uniform(0,255)), -1, 8, 0 ); }
/// Show what you got
- namedWindow( source_window, CV_WINDOW_AUTOSIZE );
+ namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, copy );
/// Set the neeed parameters to find the refined corners
Size winSize = Size( 5, 5 );
Size zeroZone = Size( -1, -1 );
- TermCriteria criteria = TermCriteria( CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 40, 0.001 );
+ TermCriteria criteria = TermCriteria( TermCriteria::EPS + TermCriteria::COUNT, 40, 0.001 );
/// Calculate the refined corner locations
cornerSubPix( src_gray, corners, winSize, zeroZone, criteria );
cvtColor( src, src_gray, COLOR_BGR2GRAY );
/// Create Window
- namedWindow( source_window, CV_WINDOW_AUTOSIZE );
+ namedWindow( source_window, WINDOW_AUTOSIZE );
/// Create Trackbar to set the number of corners
createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo );
{ circle( copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255), rng.uniform(0,255)), -1, 8, 0 ); }
/// Show what you got
- namedWindow( source_window, CV_WINDOW_AUTOSIZE );
+ namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, copy );
}
view0.copyTo(result);
}
else if( atImageList < (int)imageList.size() )
- result = imread(imageList[atImageList++], CV_LOAD_IMAGE_COLOR);
+ result = imread(imageList[atImageList++], IMREAD_COLOR);
return result;
}
{ readme(); return -1; }
//-- 1. Read the images
- Mat imgLeft = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
- Mat imgRight = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
+ Mat imgLeft = imread( argv[1], IMREAD_GRAYSCALE );
+ Mat imgRight = imread( argv[2], IMREAD_GRAYSCALE );
//-- And create the image in which we will save our disparities
Mat imgDisparity16S = Mat( imgLeft.rows, imgLeft.cols, CV_16S );
Mat imgDisparity8U = Mat( imgLeft.rows, imgLeft.cols, CV_8UC1 );
//-- 4. Display it as a CV_8UC1 image
imgDisparity16S.convertTo( imgDisparity8U, CV_8UC1, 255/(maxVal - minVal));
- namedWindow( windowDisparity, CV_WINDOW_NORMAL );
+ namedWindow( windowDisparity, WINDOW_NORMAL );
imshow( windowDisparity, imgDisparity8U );
//-- 5. Save the image
/// 3. Display your stuff!
imshow( atom_window, atom_image );
- cvMoveWindow( atom_window, 0, 200 );
+ moveWindow( atom_window, 0, 200 );
imshow( rook_window, rook_image );
- cvMoveWindow( rook_window, w, 200 );
+ moveWindow( rook_window, w, 200 );
waitKey( 0 );
return(0);
*/
int Displaying_Big_End( Mat image, char* window_name, RNG )
{
- Size textsize = getTextSize("OpenCV forever!", CV_FONT_HERSHEY_COMPLEX, 3, 5, 0);
+ Size textsize = getTextSize("OpenCV forever!", FONT_HERSHEY_COMPLEX, 3, 5, 0);
Point org((window_width - textsize.width)/2, (window_height - textsize.height)/2);
int lineType = 8;
for( int i = 0; i < 255; i += 2 )
{
image2 = image - Scalar::all(i);
- putText( image2, "OpenCV forever!", org, CV_FONT_HERSHEY_COMPLEX, 3,
+ putText( image2, "OpenCV forever!", org, FONT_HERSHEY_COMPLEX, 3,
Scalar(i, i, 255), 5, lineType );
imshow( window_name, image2 );
const char* filename = argc >=2 ? argv[1] : "lena.jpg";
- Mat I = imread(filename, CV_LOAD_IMAGE_GRAYSCALE);
+ Mat I = imread(filename, IMREAD_GRAYSCALE);
if( I.empty())
return -1;
q2.copyTo(q1);
tmp.copyTo(q2);
- normalize(magI, magI, 0, 1, CV_MINMAX); // Transform the matrix with float values into a
+ normalize(magI, magI, 0, 1, NORM_MINMAX); // Transform the matrix with float values into a
// viewable image form (float between values 0 and 1).
imshow("Input Image" , I ); // Show the result
Mat I, J;
if( argc == 4 && !strcmp(argv[3],"G") )
- I = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
+ I = imread(argv[1], IMREAD_GRAYSCALE);
else
- I = imread(argv[1], CV_LOAD_IMAGE_COLOR);
+ I = imread(argv[1], IMREAD_COLOR);
if (!I.data)
{
// comment out the define to use only the latest C++ API
#define DEMO_MIXED_API_USE
+#ifdef DEMO_MIXED_API_USE
+# include <opencv2/highgui/highgui_c.h>
+#endif
+
int main( int argc, char** argv )
{
help(argv[0]);
cvtColor(I_YUV, I, COLOR_YCrCb2BGR); // and produce the output RGB image
- namedWindow("image with grain", CV_WINDOW_AUTOSIZE); // use this to create images
+ namedWindow("image with grain", WINDOW_AUTOSIZE); // use this to create images
#ifdef DEMO_MIXED_API_USE
// this is to demonstrate that I and IplI really share the data - the result of the above
Mat I, J, K;
if (argc >= 3 && !strcmp("G", argv[2]))
- I = imread( filename, CV_LOAD_IMAGE_GRAYSCALE);
+ I = imread( filename, IMREAD_GRAYSCALE);
else
- I = imread( filename, CV_LOAD_IMAGE_COLOR);
+ I = imread( filename, IMREAD_COLOR);
- namedWindow("Input", CV_WINDOW_AUTOSIZE);
- namedWindow("Output", CV_WINDOW_AUTOSIZE);
+ namedWindow("Input", WINDOW_AUTOSIZE);
+ namedWindow("Output", WINDOW_AUTOSIZE);
imshow("Input", I);
double t = (double)getTickCount();
cout << "Hand written function times passed in seconds: " << t << endl;
imshow("Output", J);
- cvWaitKey(0);
+ waitKey();
Mat kern = (Mat_<char>(3,3) << 0, -1, 0,
-1, 5, -1,
imshow("Output", K);
- cvWaitKey(0);
+ waitKey();
return 0;
}
void Sharpen(const Mat& myImage,Mat& Result)
if( argc != 3 )
{ readme(); return -1; }
- Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
- Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
+ Mat img_1 = imread( argv[1], IMREAD_GRAYSCALE );
+ Mat img_2 = imread( argv[2], IMREAD_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
if( argc != 3 )
{ readme(); return -1; }
- Mat img_object = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
- Mat img_scene = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
+ Mat img_object = imread( argv[1], IMREAD_GRAYSCALE );
+ Mat img_scene = imread( argv[2], IMREAD_GRAYSCALE );
if( !img_object.data || !img_scene.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
if( argc != 3 )
{ return -1; }
- Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
- Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
+ Mat img_1 = imread( argv[1], IMREAD_GRAYSCALE );
+ Mat img_2 = imread( argv[2], IMREAD_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ return -1; }
if( argc != 3 )
{ readme(); return -1; }
- Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
- Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
+ Mat img_1 = imread( argv[1], IMREAD_GRAYSCALE );
+ Mat img_2 = imread( argv[2], IMREAD_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
}
Mat image;
- image = imread(argv[1], CV_LOAD_IMAGE_COLOR); // Read the file
+ image = imread(argv[1], IMREAD_COLOR); // Read the file
- if(! image.data ) // Check for invalid input
+ if(! image.data ) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl ;
return -1;
}
- namedWindow( "Display window", CV_WINDOW_AUTOSIZE );// Create a window for display.
- imshow( "Display window", image ); // Show our image inside it.
+ namedWindow( "Display window", WINDOW_AUTOSIZE ); // Create a window for display.
+ imshow( "Display window", image ); // Show our image inside it.
- waitKey(0); // Wait for a keystroke in the window
+ waitKey(0); // Wait for a keystroke in the window
return 0;
}
\ No newline at end of file
return -1;
}
- Size refS = Size((int) captRefrnc.get(CV_CAP_PROP_FRAME_WIDTH),
- (int) captRefrnc.get(CV_CAP_PROP_FRAME_HEIGHT)),
- uTSi = Size((int) captUndTst.get(CV_CAP_PROP_FRAME_WIDTH),
- (int) captUndTst.get(CV_CAP_PROP_FRAME_HEIGHT));
+ Size refS = Size((int) captRefrnc.get(CAP_PROP_FRAME_WIDTH),
+ (int) captRefrnc.get(CAP_PROP_FRAME_HEIGHT)),
+ uTSi = Size((int) captUndTst.get(CAP_PROP_FRAME_WIDTH),
+ (int) captUndTst.get(CAP_PROP_FRAME_HEIGHT));
if (refS != uTSi)
{
const char* WIN_RF = "Reference";
// Windows
- namedWindow(WIN_RF, CV_WINDOW_AUTOSIZE );
- namedWindow(WIN_UT, CV_WINDOW_AUTOSIZE );
- cvMoveWindow(WIN_RF, 400 , 0); //750, 2 (bernat =0)
- cvMoveWindow(WIN_UT, refS.width, 0); //1500, 2
+ namedWindow(WIN_RF, WINDOW_AUTOSIZE );
+ namedWindow(WIN_UT, WINDOW_AUTOSIZE );
+ moveWindow(WIN_RF, 400 , 0); //750, 2 (bernat =0)
+ moveWindow(WIN_UT, refS.width, 0); //1500, 2
cout << "Frame resolution: Width=" << refS.width << " Height=" << refS.height
- << " of nr#: " << captRefrnc.get(CV_CAP_PROP_FRAME_COUNT) << endl;
+ << " of nr#: " << captRefrnc.get(CAP_PROP_FRAME_COUNT) << endl;
cout << "PSNR trigger value " <<
setiosflags(ios::fixed) << setprecision(3) << psnrTriggerValue << endl;
imshow( WIN_RF, frameReference);
imshow( WIN_UT, frameUnderTest);
- c = (char)cvWaitKey(delay);
+ c = (char)waitKey(delay);
if (c == 27) break;
}
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/core/utility.hpp"
+#include "opencv2/highgui/highgui_c.h"
+
#include <iostream>
#include <stdio.h>
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/core/utility.hpp"
+#include "opencv2/highgui/highgui_c.h"
+
#include <iostream>
#include <stdio.h>
char filename[200];
string window_name = "video | q or esc to quit";
cout << "press space to save a picture. q or esc to quit" << endl;
- namedWindow(window_name, CV_WINDOW_KEEPRATIO); //resizable window;
+ namedWindow(window_name, WINDOW_KEEPRATIO); //resizable window;
Mat frame;
for (;;)
{
if (!outputPath.empty())
{
if (!writer.isOpened())
- writer.open(outputPath, CV_FOURCC('X','V','I','D'),
+ writer.open(outputPath, VideoWriter::fourcc('X','V','I','D'),
outputFps, stabilizedFrame.size());
writer << stabilizedFrame;
}
{
if( x < 0 || x >= img.cols || y < 0 || y >= img.rows )
return;
- if( event == CV_EVENT_LBUTTONUP || !(flags & CV_EVENT_FLAG_LBUTTON) )
+ if( event == EVENT_LBUTTONUP || !(flags & EVENT_FLAG_LBUTTON) )
prevPt = Point(-1,-1);
- else if( event == CV_EVENT_LBUTTONDOWN )
+ else if( event == EVENT_LBUTTONDOWN )
prevPt = Point(x,y);
- else if( event == CV_EVENT_MOUSEMOVE && (flags & CV_EVENT_FLAG_LBUTTON) )
+ else if( event == EVENT_MOUSEMOVE && (flags & EVENT_FLAG_LBUTTON) )
{
Point pt(x, y);
if( prevPt.x < 0 )
{
if (!video_writer.isOpened())
{
- video_writer.open(args.dst_video, CV_FOURCC('x','v','i','d'), args.dst_video_fps,
+ video_writer.open(args.dst_video, VideoWriter::fourcc('x','v','i','d'), args.dst_video_fps,
img_to_show.size(), true);
if (!video_writer.isOpened())
throw std::runtime_error("can't create video writer");
OpenClose(open_close_pos, 0);
ErodeDilate(erode_dilate_pos, 0);
- c = cvWaitKey(0);
+ c = waitKey();
if( (char)c == 27 )
break;
TEST(SURF)
{
- Mat src = imread(abspath("aloeL.jpg"), CV_LOAD_IMAGE_GRAYSCALE);
+ Mat src = imread(abspath("aloeL.jpg"), IMREAD_GRAYSCALE);
if (src.empty()) throw runtime_error("can't open aloeL.jpg");
SURF surf;
TEST(FAST)
{
- Mat src = imread(abspath("aloeL.jpg"), CV_LOAD_IMAGE_GRAYSCALE);
+ Mat src = imread(abspath("aloeL.jpg"), IMREAD_GRAYSCALE);
if (src.empty()) throw runtime_error("can't open aloeL.jpg");
vector<KeyPoint> keypoints;
TEST(ORB)
{
- Mat src = imread(abspath("aloeL.jpg"), CV_LOAD_IMAGE_GRAYSCALE);
+ Mat src = imread(abspath("aloeL.jpg"), IMREAD_GRAYSCALE);
if (src.empty()) throw runtime_error("can't open aloeL.jpg");
ORB orb(4000);
TEST(Canny)
{
- Mat img = imread(abspath("aloeL.jpg"), CV_LOAD_IMAGE_GRAYSCALE);
+ Mat img = imread(abspath("aloeL.jpg"), IMREAD_GRAYSCALE);
if (img.empty()) throw runtime_error("can't open aloeL.jpg");
if (!outputVideoName.empty())
{
if (!writer.isOpened())
- writer.open(outputVideoName, CV_FOURCC('X', 'V', 'I', 'D'), 25.0, result.size());
+ writer.open(outputVideoName, VideoWriter::fourcc('X', 'V', 'I', 'D'), 25.0, result.size());
writer << result;
}
}
{
if (string(argv[i]) == "--left")
{
- img1.upload(imread(argv[++i], CV_LOAD_IMAGE_GRAYSCALE));
+ img1.upload(imread(argv[++i], IMREAD_GRAYSCALE));
CV_Assert(!img1.empty());
}
else if (string(argv[i]) == "--right")
{
- img2.upload(imread(argv[++i], CV_LOAD_IMAGE_GRAYSCALE));
+ img2.upload(imread(argv[++i], IMREAD_GRAYSCALE));
CV_Assert(!img2.empty());
}
else if (string(argv[i]) == "--help")
std::cout << "Open CPU Writer" << std::endl;
- if (!writer.open("output_cpu.avi", CV_FOURCC('X', 'V', 'I', 'D'), FPS, frame.size()))
+ if (!writer.open("output_cpu.avi", cv::VideoWriter::fourcc('X', 'V', 'I', 'D'), FPS, frame.size()))
return -1;
}
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/ocl/ocl.hpp"
+
+#include "opencv2/highgui/highgui_c.h"
+
#include <iostream>
#include <stdio.h>
{
if (!video_writer.isOpened())
{
- video_writer.open(args.dst_video, CV_FOURCC('x','v','i','d'), args.dst_video_fps,
+ video_writer.open(args.dst_video, VideoWriter::fourcc('x','v','i','d'), args.dst_video_fps,
img_to_show.size(), true);
if (!video_writer.isOpened())
throw std::runtime_error("can't create video writer");