From 5560db73bfa38686bc2dd6123b05ab455a2f6f8b Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Mon, 11 Dec 2017 12:55:03 +0300 Subject: [PATCH] python: 'cv2.' -> 'cv.' via 'import cv2 as cv' --- .../js_contours_begin/js_contours_begin.markdown | 4 +- .../py_calibration/py_calibration.markdown | 56 ++++++++--------- .../py_calib3d/py_depthmap/py_depthmap.markdown | 8 +-- .../py_epipolar_geometry.markdown | 26 ++++---- .../py_calib3d/py_pose/py_pose.markdown | 40 ++++++------ .../py_core/py_basic_ops/py_basic_ops.markdown | 38 ++++++------ .../py_image_arithmetics.markdown | 44 ++++++------- .../py_optimization/py_optimization.markdown | 42 ++++++------- .../py_feature2d/py_brief/py_brief.markdown | 8 +-- .../py_feature2d/py_fast/py_fast.markdown | 18 +++--- .../py_feature_homography.markdown | 24 ++++---- .../py_features_harris/py_features_harris.markdown | 44 ++++++------- .../py_feature2d/py_matcher/py_matcher.markdown | 52 ++++++++-------- .../py_feature2d/py_orb/py_orb.markdown | 10 +-- .../py_shi_tomasi/py_shi_tomasi.markdown | 14 ++--- .../py_sift_intro/py_sift_intro.markdown | 22 +++---- .../py_surf_intro/py_surf_intro.markdown | 8 +-- .../py_drawing_functions.markdown | 32 +++++----- .../py_image_display/py_image_display.markdown | 66 ++++++++++---------- .../py_mouse_handling/py_mouse_handling.markdown | 48 +++++++-------- .../py_gui/py_trackbar/py_trackbar.markdown | 30 ++++----- .../py_video_display/py_video_display.markdown | 56 ++++++++--------- .../py_imgproc/py_canny/py_canny.markdown | 10 +-- .../py_colorspaces/py_colorspaces.markdown | 36 +++++------ .../py_contour_features.markdown | 64 +++++++++---------- .../py_contour_properties.markdown | 24 ++++---- .../py_contours_begin/py_contours_begin.markdown | 32 +++++----- .../py_contours_hierarchy.markdown | 14 ++--- .../py_contours_more_functions.markdown | 56 ++++++++--------- .../py_imgproc/py_filtering/py_filtering.markdown | 32 +++++----- .../py_geometric_transformations.markdown | 66 ++++++++++---------- .../py_imgproc/py_grabcut/py_grabcut.markdown | 20 +++--- .../py_imgproc/py_gradients/py_gradients.markdown | 28 ++++----- .../py_2d_histogram/py_2d_histogram.markdown | 28 ++++----- .../py_histogram_backprojection.markdown | 57 +++++++++-------- .../py_histogram_begins.markdown | 32 +++++----- .../py_histogram_equalization.markdown | 20 +++--- .../py_houghcircles/py_houghcircles.markdown | 24 ++++---- .../py_houghlines/py_houghlines.markdown | 6 +- .../py_morphological_ops.markdown | 32 +++++----- .../py_imgproc/py_pyramids/py_pyramids.markdown | 38 ++++++------ .../py_template_matching.markdown | 56 ++++++++--------- .../py_thresholding/py_thresholding.markdown | 70 ++++++++++----------- .../py_fourier_transform.markdown | 48 +++++++-------- .../py_imgproc/py_watershed/py_watershed.markdown | 26 ++++---- .../py_kmeans_opencv/py_kmeans_opencv.markdown | 38 ++++++------ .../py_knn/py_knn_opencv/py_knn_opencv.markdown | 16 ++--- .../py_knn_understanding.markdown | 6 +- .../py_face_detection/py_face_detection.markdown | 20 +++--- doc/py_tutorials/py_photo/py_hdr/py_hdr.markdown | 24 ++++---- .../py_photo/py_inpainting/py_inpainting.markdown | 20 +++--- .../py_non_local_means/py_non_local_means.markdown | 30 ++++----- .../py_setup_in_fedora/py_setup_in_fedora.markdown | 6 +- .../py_setup_in_ubuntu/py_setup_in_ubuntu.markdown | 8 +-- .../py_setup_in_windows.markdown | 6 +- .../py_bg_subtraction/py_bg_subtraction.markdown | 42 ++++++------- .../py_lucas_kanade/py_lucas_kanade.markdown | 60 +++++++++--------- .../py_video/py_meanshift/py_meanshift.markdown | 64 +++++++++---------- .../core/adding_images/adding_images.markdown | 2 +- .../mat_mask_operations.markdown | 2 +- modules/python/test/test_algorithm_rw.py | 10 +-- modules/python/test/test_calibration.py | 14 ++--- modules/python/test/test_camshift.py | 16 ++--- modules/python/test/test_dft.py | 22 +++---- modules/python/test/test_digits.py | 28 ++++----- modules/python/test/test_facedetect.py | 12 ++-- modules/python/test/test_feature_homography.py | 14 ++--- modules/python/test/test_fitline.py | 12 ++-- modules/python/test/test_gaussian_mix.py | 10 +-- modules/python/test/test_goodfeatures.py | 10 +-- modules/python/test/test_grabcut.py | 18 +++--- modules/python/test/test_houghcircles.py | 16 ++--- modules/python/test/test_houghlines.py | 10 +-- modules/python/test/test_kmeans.py | 6 +- modules/python/test/test_legacy.py | 32 +++++----- modules/python/test/test_letter_recog.py | 34 +++++----- modules/python/test/test_lk_homography.py | 18 +++--- modules/python/test/test_lk_track.py | 16 ++--- modules/python/test/test_misc.py | 6 +- modules/python/test/test_morphology.py | 6 +- modules/python/test/test_mser.py | 10 +-- modules/python/test/test_peopledetect.py | 6 +- modules/python/test/test_shape.py | 14 ++--- modules/python/test/test_squares.py | 24 ++++---- modules/python/test/test_stitching.py | 8 +-- modules/python/test/test_texture_flow.py | 12 ++-- modules/python/test/test_umat.py | 46 +++++++------- modules/python/test/test_watershed.py | 8 +-- modules/python/test/tests_common.py | 14 ++--- modules/python/test/tst_scene_render.py | 16 ++--- samples/dnn/googlenet_python.py | 4 +- samples/python/_coverage.py | 6 +- samples/python/asift.py | 26 ++++---- samples/python/browse.py | 20 +++--- samples/python/calibrate.py | 30 ++++----- samples/python/camshift.py | 40 ++++++------ samples/python/coherence.py | 40 ++++++------ samples/python/color_histogram.py | 24 ++++---- samples/python/common.py | 32 +++++----- samples/python/contours.py | 44 ++++++------- samples/python/deconvolution.py | 44 ++++++------- samples/python/dft.py | 32 +++++----- samples/python/digits.py | 36 +++++------ samples/python/digits_adjust.py | 4 +- samples/python/digits_video.py | 34 +++++----- samples/python/distrans.py | 20 +++--- samples/python/edge.py | 22 +++---- samples/python/facedetect.py | 20 +++--- samples/python/feature_homography.py | 14 ++--- samples/python/find_obj.py | 72 +++++++++++----------- samples/python/fitline.py | 34 +++++----- samples/python/floodfill.py | 30 ++++----- samples/python/gabor_threads.py | 18 +++--- samples/python/gaussian_mix.py | 18 +++--- samples/python/grabcut.py | 56 ++++++++--------- samples/python/hist.py | 54 ++++++++-------- samples/python/houghcircles.py | 22 +++---- samples/python/houghlines.py | 22 +++---- samples/python/inpaint.py | 12 ++-- samples/python/kalman.py | 24 ++++---- samples/python/kmeans.py | 16 ++--- samples/python/lappyr.py | 18 +++--- samples/python/letter_recog.py | 36 +++++------ samples/python/lk_homography.py | 32 +++++----- samples/python/lk_track.py | 24 ++++---- samples/python/logpolar.py | 16 ++--- samples/python/morphology.py | 26 ++++---- samples/python/mosse.py | 60 +++++++++--------- samples/python/mouse_and_match.py | 36 +++++------ samples/python/mser.py | 16 ++--- samples/python/opencv_version.py | 4 +- samples/python/opt_flow.py | 28 ++++----- samples/python/peopledetect.py | 16 ++--- samples/python/plane_ar.py | 22 +++---- samples/python/plane_tracker.py | 20 +++--- samples/python/squares.py | 30 ++++----- samples/python/stereo_match.py | 20 +++--- samples/python/texture_flow.py | 18 +++--- samples/python/tst_scene_render.py | 14 ++--- samples/python/turing.py | 20 +++--- .../tutorial_code/ImgTrans/Filter2D/filter2D.py | 10 +-- .../ImgTrans/HoughCircle/hough_circle.py | 18 +++--- .../ImgTrans/HoughLine/hough_lines.py | 24 ++++---- .../tutorial_code/ImgTrans/LaPlace/laplace_demo.py | 20 +++--- .../ImgTrans/MakeBorder/copy_make_border.py | 18 +++--- .../tutorial_code/ImgTrans/SobelDemo/sobel_demo.py | 28 ++++----- .../core/AddingImages/adding_images.py | 14 ++--- .../basic_geometric_drawing.py | 26 ++++---- .../discrete_fourier_transform.py | 30 ++++----- .../mat_mask_operations/mat_mask_operations.py | 28 ++++----- .../tutorial_code/imgProc/HitMiss/hit_miss.py | 26 ++++---- .../tutorial_code/imgProc/Pyramids/pyramids.py | 14 ++--- .../tutorial_code/imgProc/Smoothing/smoothing.py | 22 +++---- .../hough_line_transform/hough_line_transform.py | 14 ++--- .../probabilistic_hough_line_transform.py | 14 ++--- .../imgProc/match_template/match_template.py | 36 +++++------ .../morph_lines_detection/morph_lines_detection.py | 44 ++++++------- .../tutorial_code/ml/py_svm_opencv/hogsvm.py | 24 ++++---- samples/python/video.py | 42 ++++++------- samples/python/video_threaded.py | 14 ++--- samples/python/video_v4l2.py | 38 ++++++------ samples/python/watershed.py | 16 ++--- 162 files changed, 2083 insertions(+), 2084 deletions(-) diff --git a/doc/js_tutorials/js_imgproc/js_contours/js_contours_begin/js_contours_begin.markdown b/doc/js_tutorials/js_imgproc/js_contours/js_contours_begin/js_contours_begin.markdown index 48eb92b..9678a7c 100644 --- a/doc/js_tutorials/js_imgproc/js_contours/js_contours_begin/js_contours_begin.markdown +++ b/doc/js_tutorials/js_imgproc/js_contours/js_contours_begin/js_contours_begin.markdown @@ -68,5 +68,5 @@ this contour approximation method. If you pass cv.ContourApproximationModes.CHAIN_APPROX_NONE.value, all the boundary points are stored. But actually do we need all the points? For eg, you found the contour of a straight line. Do you need all the points on the line to represent that line? No, we need just two end points of that line. This is what -cv2.CHAIN_APPROX_SIMPLE does. It removes all redundant points and compresses the contour, thereby -saving memory. \ No newline at end of file +cv.CHAIN_APPROX_SIMPLE does. It removes all redundant points and compresses the contour, thereby +saving memory. diff --git a/doc/py_tutorials/py_calib3d/py_calibration/py_calibration.markdown b/doc/py_tutorials/py_calib3d/py_calibration/py_calibration.markdown index 3e7a401..cb5d0ad 100644 --- a/doc/py_tutorials/py_calib3d/py_calibration/py_calibration.markdown +++ b/doc/py_tutorials/py_calib3d/py_calibration/py_calibration.markdown @@ -80,7 +80,7 @@ pass in terms of square size). ### Setup -So to find pattern in chess board, we use the function, **cv2.findChessboardCorners()**. We also +So to find pattern in chess board, we use the function, **cv.findChessboardCorners()**. We also need to pass what kind of pattern we are looking, like 8x8 grid, 5x5 grid etc. In this example, we use 7x6 grid. (Normally a chess board has 8x8 squares and 7x7 internal corners). It returns the corner points and retval which will be True if pattern is obtained. These corners will be placed in @@ -95,19 +95,19 @@ are not sure out of 14 images given, how many are good. So we read all the image ones. @sa Instead of chess board, we can use some circular grid, but then use the function -**cv2.findCirclesGrid()** to find the pattern. It is said that less number of images are enough when +**cv.findCirclesGrid()** to find the pattern. It is said that less number of images are enough when using circular grid. -Once we find the corners, we can increase their accuracy using **cv2.cornerSubPix()**. We can also -draw the pattern using **cv2.drawChessboardCorners()**. All these steps are included in below code: +Once we find the corners, we can increase their accuracy using **cv.cornerSubPix()**. We can also +draw the pattern using **cv.drawChessboardCorners()**. All these steps are included in below code: @code{.py} import numpy as np -import cv2 +import cv2 as cv import glob # termination criteria -criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) +criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001) # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) objp = np.zeros((6*7,3), np.float32) @@ -120,25 +120,25 @@ imgpoints = [] # 2d points in image plane. images = glob.glob('*.jpg') for fname in images: - img = cv2.imread(fname) - gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + img = cv.imread(fname) + gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) # Find the chess board corners - ret, corners = cv2.findChessboardCorners(gray, (7,6), None) + ret, corners = cv.findChessboardCorners(gray, (7,6), None) # If found, add object points, image points (after refining them) if ret == True: objpoints.append(objp) - corners2=cv2.cornerSubPix(gray,corners, (11,11), (-1,-1), criteria) + corners2 = cv.cornerSubPix(gray,corners, (11,11), (-1,-1), criteria) imgpoints.append(corners) # Draw and display the corners - cv2.drawChessboardCorners(img, (7,6), corners2, ret) - cv2.imshow('img', img) - cv2.waitKey(500) + cv.drawChessboardCorners(img, (7,6), corners2, ret) + cv.imshow('img', img) + cv.waitKey(500) -cv2.destroyAllWindows() +cv.destroyAllWindows() @endcode One image with pattern drawn on it is shown below: @@ -147,37 +147,37 @@ One image with pattern drawn on it is shown below: ### Calibration So now we have our object points and image points we are ready to go for calibration. For that we -use the function, **cv2.calibrateCamera()**. It returns the camera matrix, distortion coefficients, +use the function, **cv.calibrateCamera()**. It returns the camera matrix, distortion coefficients, rotation and translation vectors etc. @code{.py} -ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None) +ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None) @endcode ### Undistortion We have got what we were trying. Now we can take an image and undistort it. OpenCV comes with two methods, we will see both. But before that, we can refine the camera matrix based on a free scaling -parameter using **cv2.getOptimalNewCameraMatrix()**. If the scaling parameter alpha=0, it returns +parameter using **cv.getOptimalNewCameraMatrix()**. If the scaling parameter alpha=0, it returns undistorted image with minimum unwanted pixels. So it may even remove some pixels at image corners. If alpha=1, all pixels are retained with some extra black images. It also returns an image ROI which can be used to crop the result. So we take a new image (left12.jpg in this case. That is the first image in this chapter) @code{.py} -img = cv2.imread('left12.jpg') +img = cv.imread('left12.jpg') h, w = img.shape[:2] -newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx, dist, (w,h), 1, (w,h)) +newcameramtx, roi = cv.getOptimalNewCameraMatrix(mtx, dist, (w,h), 1, (w,h)) @endcode -#### 1. Using **cv2.undistort()** +#### 1. Using **cv.undistort()** This is the shortest path. Just call the function and use ROI obtained above to crop the result. @code{.py} # undistort -dst = cv2.undistort(img, mtx, dist, None, newcameramtx) +dst = cv.undistort(img, mtx, dist, None, newcameramtx) # crop the image x, y, w, h = roi dst = dst[y:y+h, x:x+w] -cv2.imwrite('calibresult.png', dst) +cv.imwrite('calibresult.png', dst) @endcode #### 2. Using **remapping** @@ -185,13 +185,13 @@ This is curved path. First find a mapping function from distorted image to undis use the remap function. @code{.py} # undistort -mapx, mapy = cv2.initUndistortRectifyMap(mtx, dist, None, newcameramtx, (w,h), 5) -dst = cv2.remap(img, mapx, mapy, cv2.INTER_LINEAR) +mapx, mapy = cv.initUndistortRectifyMap(mtx, dist, None, newcameramtx, (w,h), 5) +dst = cv.remap(img, mapx, mapy, cv.INTER_LINEAR) # crop the image x, y, w, h = roi dst = dst[y:y+h, x:x+w] -cv2.imwrite('calibresult.png', dst) +cv.imwrite('calibresult.png', dst) @endcode Both the methods give the same result. See the result below: @@ -207,15 +207,15 @@ Re-projection Error Re-projection error gives a good estimation of just how exact is the found parameters. This should be as close to zero as possible. Given the intrinsic, distortion, rotation and translation matrices, -we first transform the object point to image point using **cv2.projectPoints()**. Then we calculate +we first transform the object point to image point using **cv.projectPoints()**. Then we calculate the absolute norm between what we got with our transformation and the corner finding algorithm. To find the average error we calculate the arithmetical mean of the errors calculate for all the calibration images. @code{.py} mean_error = 0 for i in xrange(len(objpoints)): - imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist) - error = cv2.norm(imgpoints[i], imgpoints2, cv2.NORM_L2)/len(imgpoints2) + imgpoints2, _ = cv.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist) + error = cv.norm(imgpoints[i], imgpoints2, cv.NORM_L2)/len(imgpoints2) mean_error += error print( "total error: {}".format(mean_error/len(objpoints)) ) diff --git a/doc/py_tutorials/py_calib3d/py_depthmap/py_depthmap.markdown b/doc/py_tutorials/py_calib3d/py_depthmap/py_depthmap.markdown index 8ecbd2c..b1a143e 100644 --- a/doc/py_tutorials/py_calib3d/py_depthmap/py_depthmap.markdown +++ b/doc/py_tutorials/py_calib3d/py_depthmap/py_depthmap.markdown @@ -38,13 +38,13 @@ Code Below code snippet shows a simple procedure to create a disparity map. @code{.py} import numpy as np -import cv2 +import cv2 as cv from matplotlib import pyplot as plt -imgL = cv2.imread('tsukuba_l.png',0) -imgR = cv2.imread('tsukuba_r.png',0) +imgL = cv.imread('tsukuba_l.png',0) +imgR = cv.imread('tsukuba_r.png',0) -stereo = cv2.StereoBM_create(numDisparities=16, blockSize=15) +stereo = cv.StereoBM_create(numDisparities=16, blockSize=15) disparity = stereo.compute(imgL,imgR) plt.imshow(disparity,'gray') plt.show() diff --git a/doc/py_tutorials/py_calib3d/py_epipolar_geometry/py_epipolar_geometry.markdown b/doc/py_tutorials/py_calib3d/py_epipolar_geometry/py_epipolar_geometry.markdown index 432773d..3ed072c 100644 --- a/doc/py_tutorials/py_calib3d/py_epipolar_geometry/py_epipolar_geometry.markdown +++ b/doc/py_tutorials/py_calib3d/py_epipolar_geometry/py_epipolar_geometry.markdown @@ -72,14 +72,14 @@ Code So first we need to find as many possible matches between two images to find the fundamental matrix. For this, we use SIFT descriptors with FLANN based matcher and ratio test. @code{.py} -import cv2 import numpy as np +import cv2 as cv from matplotlib import pyplot as plt -img1 = cv2.imread('myleft.jpg',0) #queryimage # left image -img2 = cv2.imread('myright.jpg',0) #trainimage # right image +img1 = cv.imread('myleft.jpg',0) #queryimage # left image +img2 = cv.imread('myright.jpg',0) #trainimage # right image -sift = cv2.SIFT() +sift = cv.SIFT() # find the keypoints and descriptors with SIFT kp1, des1 = sift.detectAndCompute(img1,None) @@ -90,7 +90,7 @@ FLANN_INDEX_KDTREE = 1 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks=50) -flann = cv2.FlannBasedMatcher(index_params,search_params) +flann = cv.FlannBasedMatcher(index_params,search_params) matches = flann.knnMatch(des1,des2,k=2) good = [] @@ -108,7 +108,7 @@ Now we have the list of best matches from both the images. Let's find the Fundam @code{.py} pts1 = np.int32(pts1) pts2 = np.int32(pts2) -F, mask = cv2.findFundamentalMat(pts1,pts2,cv2.FM_LMEDS) +F, mask = cv.findFundamentalMat(pts1,pts2,cv.FM_LMEDS) # We select only inlier points pts1 = pts1[mask.ravel()==1] @@ -122,28 +122,28 @@ def drawlines(img1,img2,lines,pts1,pts2): ''' img1 - image on which we draw the epilines for the points in img2 lines - corresponding epilines ''' r,c = img1.shape - img1 = cv2.cvtColor(img1,cv2.COLOR_GRAY2BGR) - img2 = cv2.cvtColor(img2,cv2.COLOR_GRAY2BGR) + img1 = cv.cvtColor(img1,cv.COLOR_GRAY2BGR) + img2 = cv.cvtColor(img2,cv.COLOR_GRAY2BGR) for r,pt1,pt2 in zip(lines,pts1,pts2): color = tuple(np.random.randint(0,255,3).tolist()) x0,y0 = map(int, [0, -r[2]/r[1] ]) x1,y1 = map(int, [c, -(r[2]+r[0]*c)/r[1] ]) - img1 = cv2.line(img1, (x0,y0), (x1,y1), color,1) - img1 = cv2.circle(img1,tuple(pt1),5,color,-1) - img2 = cv2.circle(img2,tuple(pt2),5,color,-1) + img1 = cv.line(img1, (x0,y0), (x1,y1), color,1) + img1 = cv.circle(img1,tuple(pt1),5,color,-1) + img2 = cv.circle(img2,tuple(pt2),5,color,-1) return img1,img2 @endcode Now we find the epilines in both the images and draw them. @code{.py} # Find epilines corresponding to points in right image (second image) and # drawing its lines on left image -lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1,1,2), 2,F) +lines1 = cv.computeCorrespondEpilines(pts2.reshape(-1,1,2), 2,F) lines1 = lines1.reshape(-1,3) img5,img6 = drawlines(img1,img2,lines1,pts1,pts2) # Find epilines corresponding to points in left image (first image) and # drawing its lines on right image -lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1,1,2), 1,F) +lines2 = cv.computeCorrespondEpilines(pts1.reshape(-1,1,2), 1,F) lines2 = lines2.reshape(-1,3) img3,img4 = drawlines(img2,img1,lines2,pts2,pts1) diff --git a/doc/py_tutorials/py_calib3d/py_pose/py_pose.markdown b/doc/py_tutorials/py_calib3d/py_pose/py_pose.markdown index 0ec22c6..1fcad94 100644 --- a/doc/py_tutorials/py_calib3d/py_pose/py_pose.markdown +++ b/doc/py_tutorials/py_calib3d/py_pose/py_pose.markdown @@ -24,8 +24,8 @@ should feel like it is perpendicular to our chessboard plane. First, let's load the camera matrix and distortion coefficients from the previous calibration result. @code{.py} -import cv2 import numpy as np +import cv2 as cv import glob # Load previously saved data @@ -33,13 +33,13 @@ with np.load('B.npz') as X: mtx, dist, _, _ = [X[i] for i in ('mtx','dist','rvecs','tvecs')] @endcode Now let's create a function, draw which takes the corners in the chessboard (obtained using -**cv2.findChessboardCorners()**) and **axis points** to draw a 3D axis. +**cv.findChessboardCorners()**) and **axis points** to draw a 3D axis. @code{.py} def draw(img, corners, imgpts): corner = tuple(corners[0].ravel()) - img = cv2.line(img, corner, tuple(imgpts[0].ravel()), (255,0,0), 5) - img = cv2.line(img, corner, tuple(imgpts[1].ravel()), (0,255,0), 5) - img = cv2.line(img, corner, tuple(imgpts[2].ravel()), (0,0,255), 5) + img = cv.line(img, corner, tuple(imgpts[0].ravel()), (255,0,0), 5) + img = cv.line(img, corner, tuple(imgpts[1].ravel()), (0,255,0), 5) + img = cv.line(img, corner, tuple(imgpts[2].ravel()), (0,0,255), 5) return img @endcode Then as in previous case, we create termination criteria, object points (3D points of corners in @@ -48,7 +48,7 @@ of length 3 (units will be in terms of chess square size since we calibrated bas our X axis is drawn from (0,0,0) to (3,0,0), so for Y axis. For Z axis, it is drawn from (0,0,0) to (0,0,-3). Negative denotes it is drawn towards the camera. @code{.py} -criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) +criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001) objp = np.zeros((6*7,3), np.float32) objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2) @@ -56,32 +56,32 @@ axis = np.float32([[3,0,0], [0,3,0], [0,0,-3]]).reshape(-1,3) @endcode Now, as usual, we load each image. Search for 7x6 grid. If found, we refine it with subcorner pixels. Then to calculate the rotation and translation, we use the function, -**cv2.solvePnPRansac()**. Once we those transformation matrices, we use them to project our **axis +**cv.solvePnPRansac()**. Once we those transformation matrices, we use them to project our **axis points** to the image plane. In simple words, we find the points on image plane corresponding to each of (3,0,0),(0,3,0),(0,0,3) in 3D space. Once we get them, we draw lines from the first corner to each of these points using our draw() function. Done !!! @code{.py} for fname in glob.glob('left*.jpg'): - img = cv2.imread(fname) - gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) - ret, corners = cv2.findChessboardCorners(gray, (7,6),None) + img = cv.imread(fname) + gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) + ret, corners = cv.findChessboardCorners(gray, (7,6),None) if ret == True: - corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria) + corners2 = cv.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria) # Find the rotation and translation vectors. - ret,rvecs, tvecs, inliers = cv2.solvePnP(objp, corners2, mtx, dist) + ret,rvecs, tvecs, inliers = cv.solvePnP(objp, corners2, mtx, dist) # project 3D points to image plane - imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, mtx, dist) + imgpts, jac = cv.projectPoints(axis, rvecs, tvecs, mtx, dist) img = draw(img,corners2,imgpts) - cv2.imshow('img',img) - k = cv2.waitKey(0) & 0xFF + cv.imshow('img',img) + k = cv.waitKey(0) & 0xFF if k == ord('s'): - cv2.imwrite(fname[:6]+'.png', img) + cv.imwrite(fname[:6]+'.png', img) -cv2.destroyAllWindows() +cv.destroyAllWindows() @endcode See some results below. Notice that each axis is 3 squares long.: @@ -97,14 +97,14 @@ def draw(img, corners, imgpts): imgpts = np.int32(imgpts).reshape(-1,2) # draw ground floor in green - img = cv2.drawContours(img, [imgpts[:4]],-1,(0,255,0),-3) + img = cv.drawContours(img, [imgpts[:4]],-1,(0,255,0),-3) # draw pillars in blue color for i,j in zip(range(4),range(4,8)): - img = cv2.line(img, tuple(imgpts[i]), tuple(imgpts[j]),(255),3) + img = cv.line(img, tuple(imgpts[i]), tuple(imgpts[j]),(255),3) # draw top layer in red color - img = cv2.drawContours(img, [imgpts[4:]],-1,(0,0,255),3) + img = cv.drawContours(img, [imgpts[4:]],-1,(0,0,255),3) return img @endcode diff --git a/doc/py_tutorials/py_core/py_basic_ops/py_basic_ops.markdown b/doc/py_tutorials/py_core/py_basic_ops/py_basic_ops.markdown index 893ec2e..b0f92d7 100644 --- a/doc/py_tutorials/py_core/py_basic_ops/py_basic_ops.markdown +++ b/doc/py_tutorials/py_core/py_basic_ops/py_basic_ops.markdown @@ -21,10 +21,10 @@ Accessing and Modifying pixel values Let's load a color image first: @code{.py} ->>> import cv2 >>> import numpy as np +>>> import cv2 as cv ->>> img = cv2.imread('messi5.jpg') +>>> img = cv.imread('messi5.jpg') @endcode You can access a pixel value by its row and column coordinates. For BGR image, it returns an array of Blue, Green, Red values. For grayscale image, just corresponding intensity is returned. @@ -122,8 +122,8 @@ Sometimes you will need to work separately on B,G,R channels of image. In this c to split the BGR images to single channels. In other cases, you may need to join these individual channels to a BGR image. You can do it simply by: @code{.py} ->>> b,g,r = cv2.split(img) ->>> img = cv2.merge((b,g,r)) +>>> b,g,r = cv.split(img) +>>> img = cv.merge((b,g,r)) @endcode Or @code @@ -137,14 +137,14 @@ Numpy indexing is faster: **Warning** -cv2.split() is a costly operation (in terms of time). So do it only if you need it. Otherwise go +cv.split() is a costly operation (in terms of time). So do it only if you need it. Otherwise go for Numpy indexing. Making Borders for Images (Padding) ----------------------------------- If you want to create a border around the image, something like a photo frame, you can use -**cv2.copyMakeBorder()**. But it has more applications for convolution operation, zero +**cv.copyMakeBorder()**. But it has more applications for convolution operation, zero padding etc. This function takes following arguments: - **src** - input image @@ -152,34 +152,34 @@ padding etc. This function takes following arguments: directions - **borderType** - Flag defining what kind of border to be added. It can be following types: - - **cv2.BORDER_CONSTANT** - Adds a constant colored border. The value should be given + - **cv.BORDER_CONSTANT** - Adds a constant colored border. The value should be given as next argument. - - **cv2.BORDER_REFLECT** - Border will be mirror reflection of the border elements, + - **cv.BORDER_REFLECT** - Border will be mirror reflection of the border elements, like this : *fedcba|abcdefgh|hgfedcb* - - **cv2.BORDER_REFLECT_101** or **cv2.BORDER_DEFAULT** - Same as above, but with a + - **cv.BORDER_REFLECT_101** or **cv.BORDER_DEFAULT** - Same as above, but with a slight change, like this : *gfedcb|abcdefgh|gfedcba* - - **cv2.BORDER_REPLICATE** - Last element is replicated throughout, like this: + - **cv.BORDER_REPLICATE** - Last element is replicated throughout, like this: *aaaaaa|abcdefgh|hhhhhhh* - - **cv2.BORDER_WRAP** - Can't explain, it will look like this : + - **cv.BORDER_WRAP** - Can't explain, it will look like this : *cdefgh|abcdefgh|abcdefg* -- **value** - Color of border if border type is cv2.BORDER_CONSTANT +- **value** - Color of border if border type is cv.BORDER_CONSTANT Below is a sample code demonstrating all these border types for better understanding: @code{.py} -import cv2 +import cv2 as cv import numpy as np from matplotlib import pyplot as plt BLUE = [255,0,0] -img1 = cv2.imread('opencv-logo.png') +img1 = cv.imread('opencv-logo.png') -replicate = cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_REPLICATE) -reflect = cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_REFLECT) -reflect101 = cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_REFLECT_101) -wrap = cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_WRAP) -constant= cv2.copyMakeBorder(img1,10,10,10,10,cv2.BORDER_CONSTANT,value=BLUE) +replicate = cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_REPLICATE) +reflect = cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_REFLECT) +reflect101 = cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_REFLECT_101) +wrap = cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_WRAP) +constant= cv.copyMakeBorder(img1,10,10,10,10,cv.BORDER_CONSTANT,value=BLUE) plt.subplot(231),plt.imshow(img1,'gray'),plt.title('ORIGINAL') plt.subplot(232),plt.imshow(replicate,'gray'),plt.title('REPLICATE') diff --git a/doc/py_tutorials/py_core/py_image_arithmetics/py_image_arithmetics.markdown b/doc/py_tutorials/py_core/py_image_arithmetics/py_image_arithmetics.markdown index 10bcd18..8eddf7f 100644 --- a/doc/py_tutorials/py_core/py_image_arithmetics/py_image_arithmetics.markdown +++ b/doc/py_tutorials/py_core/py_image_arithmetics/py_image_arithmetics.markdown @@ -6,12 +6,12 @@ Goal - Learn several arithmetic operations on images like addition, subtraction, bitwise operations etc. -- You will learn these functions : **cv2.add()**, **cv2.addWeighted()** etc. +- You will learn these functions : **cv.add()**, **cv.addWeighted()** etc. Image Addition -------------- -You can add two images by OpenCV function, cv2.add() or simply by numpy operation, +You can add two images by OpenCV function, cv.add() or simply by numpy operation, res = img1 + img2. Both images should be of same depth and type, or second image can just be a scalar value. @@ -23,7 +23,7 @@ For example, consider below sample: >>> x = np.uint8([250]) >>> y = np.uint8([10]) ->>> print( cv2.add(x,y) ) # 250+10 = 260 => 255 +>>> print( cv.add(x,y) ) # 250+10 = 260 => 255 [[255]] >>> print( x+y ) # 250+10 = 260 % 256 = 4 @@ -44,20 +44,20 @@ By varying \f$\alpha\f$ from \f$0 \rightarrow 1\f$, you can perform a cool trans another. Here I took two images to blend them together. First image is given a weight of 0.7 and second image -is given 0.3. cv2.addWeighted() applies following equation on the image. +is given 0.3. cv.addWeighted() applies following equation on the image. \f[dst = \alpha \cdot img1 + \beta \cdot img2 + \gamma\f] Here \f$\gamma\f$ is taken as zero. @code{.py} -img1 = cv2.imread('ml.png') -img2 = cv2.imread('opencv-logo.png') +img1 = cv.imread('ml.png') +img2 = cv.imread('opencv-logo.png') -dst = cv2.addWeighted(img1,0.7,img2,0.3,0) +dst = cv.addWeighted(img1,0.7,img2,0.3,0) -cv2.imshow('dst',dst) -cv2.waitKey(0) -cv2.destroyAllWindows() +cv.imshow('dst',dst) +cv.waitKey(0) +cv.destroyAllWindows() @endcode Check the result below: @@ -76,31 +76,31 @@ ROI as we did in last chapter. But OpenCV logo is a not a rectangular shape. So bitwise operations as below: @code{.py} # Load two images -img1 = cv2.imread('messi5.jpg') -img2 = cv2.imread('opencv-logo.png') +img1 = cv.imread('messi5.jpg') +img2 = cv.imread('opencv-logo.png') # I want to put logo on top-left corner, So I create a ROI rows,cols,channels = img2.shape roi = img1[0:rows, 0:cols ] # Now create a mask of logo and create its inverse mask also -img2gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY) -ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY) -mask_inv = cv2.bitwise_not(mask) +img2gray = cv.cvtColor(img2,cv.COLOR_BGR2GRAY) +ret, mask = cv.threshold(img2gray, 10, 255, cv.THRESH_BINARY) +mask_inv = cv.bitwise_not(mask) # Now black-out the area of logo in ROI -img1_bg = cv2.bitwise_and(roi,roi,mask = mask_inv) +img1_bg = cv.bitwise_and(roi,roi,mask = mask_inv) # Take only region of logo from logo image. -img2_fg = cv2.bitwise_and(img2,img2,mask = mask) +img2_fg = cv.bitwise_and(img2,img2,mask = mask) # Put logo in ROI and modify the main image -dst = cv2.add(img1_bg,img2_fg) +dst = cv.add(img1_bg,img2_fg) img1[0:rows, 0:cols ] = dst -cv2.imshow('res',img1) -cv2.waitKey(0) -cv2.destroyAllWindows() +cv.imshow('res',img1) +cv.waitKey(0) +cv.destroyAllWindows() @endcode See the result below. Left image shows the mask we created. Right image shows the final result. For more understanding, display all the intermediate images in the above code, especially img1_bg and @@ -115,4 +115,4 @@ Exercises --------- -# Create a slide show of images in a folder with smooth transition between images using - cv2.addWeighted function + cv.addWeighted function diff --git a/doc/py_tutorials/py_core/py_optimization/py_optimization.markdown b/doc/py_tutorials/py_core/py_optimization/py_optimization.markdown index 2c3878b..69d5dd0 100644 --- a/doc/py_tutorials/py_core/py_optimization/py_optimization.markdown +++ b/doc/py_tutorials/py_core/py_optimization/py_optimization.markdown @@ -10,7 +10,7 @@ So in this chapter, you will learn - To measure the performance of your code. - Some tips to improve the performance of your code. -- You will see these functions : **cv2.getTickCount**, **cv2.getTickFrequency** etc. +- You will see these functions : **cv.getTickCount**, **cv.getTickFrequency** etc. Apart from OpenCV, Python also provides a module **time** which is helpful in measuring the time of execution. Another module **profile** helps to get detailed report on the code, like how much time @@ -21,34 +21,34 @@ ones, and for more details, check links in **Additional Resouces** section. Measuring Performance with OpenCV --------------------------------- -**cv2.getTickCount** function returns the number of clock-cycles after a reference event (like the +**cv.getTickCount** function returns the number of clock-cycles after a reference event (like the moment machine was switched ON) to the moment this function is called. So if you call it before and after the function execution, you get number of clock-cycles used to execute a function. -**cv2.getTickFrequency** function returns the frequency of clock-cycles, or the number of +**cv.getTickFrequency** function returns the frequency of clock-cycles, or the number of clock-cycles per second. So to find the time of execution in seconds, you can do following: @code{.py} -e1 = cv2.getTickCount() +e1 = cv.getTickCount() # your code execution -e2 = cv2.getTickCount() -time = (e2 - e1)/ cv2.getTickFrequency() +e2 = cv.getTickCount() +time = (e2 - e1)/ cv.getTickFrequency() @endcode We will demonstrate with following example. Following example apply median filtering with a kernel of odd size ranging from 5 to 49. (Don't worry about what will the result look like, that is not our goal): @code{.py} -img1 = cv2.imread('messi5.jpg') +img1 = cv.imread('messi5.jpg') -e1 = cv2.getTickCount() +e1 = cv.getTickCount() for i in xrange(5,49,2): - img1 = cv2.medianBlur(img1,i) -e2 = cv2.getTickCount() -t = (e2 - e1)/cv2.getTickFrequency() + img1 = cv.medianBlur(img1,i) +e2 = cv.getTickCount() +t = (e2 - e1)/cv.getTickFrequency() print( t ) # Result I got is 0.521107655 seconds @endcode -@note You can do the same with time module. Instead of cv2.getTickCount, use time.time() function. +@note You can do the same with time module. Instead of cv.getTickCount, use time.time() function. Then take the difference of two times. Default Optimization in OpenCV @@ -57,23 +57,23 @@ Default Optimization in OpenCV Many of the OpenCV functions are optimized using SSE2, AVX etc. It contains unoptimized code also. So if our system support these features, we should exploit them (almost all modern day processors support them). It is enabled by default while compiling. So OpenCV runs the optimized code if it is -enabled, else it runs the unoptimized code. You can use **cv2.useOptimized()** to check if it is -enabled/disabled and **cv2.setUseOptimized()** to enable/disable it. Let's see a simple example. +enabled, else it runs the unoptimized code. You can use **cv.useOptimized()** to check if it is +enabled/disabled and **cv.setUseOptimized()** to enable/disable it. Let's see a simple example. @code{.py} # check if optimization is enabled -In [5]: cv2.useOptimized() +In [5]: cv.useOptimized() Out[5]: True -In [6]: %timeit res = cv2.medianBlur(img,49) +In [6]: %timeit res = cv.medianBlur(img,49) 10 loops, best of 3: 34.9 ms per loop # Disable it -In [7]: cv2.setUseOptimized(False) +In [7]: cv.setUseOptimized(False) -In [8]: cv2.useOptimized() +In [8]: cv.useOptimized() Out[8]: False -In [9]: %timeit res = cv2.medianBlur(img,49) +In [9]: %timeit res = cv.medianBlur(img,49) 10 loops, best of 3: 64.1 ms per loop @endcode See, optimized median filtering is \~2x faster than unoptimized version. If you check its source, @@ -115,11 +115,11 @@ working on this issue)* one or two elements, Python scalar is better than Numpy arrays. Numpy takes advantage when size of array is a little bit bigger. -We will try one more example. This time, we will compare the performance of **cv2.countNonZero()** +We will try one more example. This time, we will compare the performance of **cv.countNonZero()** and **np.count_nonzero()** for same image. @code{.py} -In [35]: %timeit z = cv2.countNonZero(img) +In [35]: %timeit z = cv.countNonZero(img) 100000 loops, best of 3: 15.8 us per loop In [36]: %timeit z = np.count_nonzero(img) diff --git a/doc/py_tutorials/py_feature2d/py_brief/py_brief.markdown b/doc/py_tutorials/py_feature2d/py_brief/py_brief.markdown index 1ad9899..1dcdee5 100644 --- a/doc/py_tutorials/py_feature2d/py_brief/py_brief.markdown +++ b/doc/py_tutorials/py_feature2d/py_brief/py_brief.markdown @@ -52,16 +52,16 @@ detector is called STAR detector in OpenCV) note, that you need [opencv contrib](https://github.com/opencv/opencv_contrib)) to use this. @code{.py} import numpy as np -import cv2 +import cv2 as cv from matplotlib import pyplot as plt -img = cv2.imread('simple.jpg',0) +img = cv.imread('simple.jpg',0) # Initiate FAST detector -star = cv2.xfeatures2d.StarDetector_create() +star = cv.xfeatures2d.StarDetector_create() # Initiate BRIEF extractor -brief = cv2.xfeatures2d.BriefDescriptorExtractor_create() +brief = cv.xfeatures2d.BriefDescriptorExtractor_create() # find the keypoints with STAR kp = star.detect(img,None) diff --git a/doc/py_tutorials/py_feature2d/py_fast/py_fast.markdown b/doc/py_tutorials/py_feature2d/py_fast/py_fast.markdown index 590c1e3..db1550d 100644 --- a/doc/py_tutorials/py_feature2d/py_fast/py_fast.markdown +++ b/doc/py_tutorials/py_feature2d/py_fast/py_fast.markdown @@ -90,22 +90,22 @@ FAST Feature Detector in OpenCV It is called as any other feature detector in OpenCV. If you want, you can specify the threshold, whether non-maximum suppression to be applied or not, the neighborhood to be used etc. -For the neighborhood, three flags are defined, cv2.FAST_FEATURE_DETECTOR_TYPE_5_8, -cv2.FAST_FEATURE_DETECTOR_TYPE_7_12 and cv2.FAST_FEATURE_DETECTOR_TYPE_9_16. Below is a +For the neighborhood, three flags are defined, cv.FAST_FEATURE_DETECTOR_TYPE_5_8, +cv.FAST_FEATURE_DETECTOR_TYPE_7_12 and cv.FAST_FEATURE_DETECTOR_TYPE_9_16. Below is a simple code on how to detect and draw the FAST feature points. @code{.py} import numpy as np -import cv2 +import cv2 as cv from matplotlib import pyplot as plt -img = cv2.imread('simple.jpg',0) +img = cv.imread('simple.jpg',0) # Initiate FAST object with default values -fast = cv2.FastFeatureDetector_create() +fast = cv.FastFeatureDetector_create() # find and draw the keypoints kp = fast.detect(img,None) -img2 = cv2.drawKeypoints(img, kp, None, color=(255,0,0)) +img2 = cv.drawKeypoints(img, kp, None, color=(255,0,0)) # Print all default params print( "Threshold: {}".format(fast.getThreshold()) ) @@ -113,7 +113,7 @@ print( "nonmaxSuppression:{}".format(fast.getNonmaxSuppression()) ) print( "neighborhood: {}".format(fast.getType()) ) print( "Total Keypoints with nonmaxSuppression: {}".format(len(kp)) ) -cv2.imwrite('fast_true.png',img2) +cv.imwrite('fast_true.png',img2) # Disable nonmaxSuppression fast.setNonmaxSuppression(0) @@ -121,9 +121,9 @@ kp = fast.detect(img,None) print( "Total Keypoints without nonmaxSuppression: {}".format(len(kp)) ) -img3 = cv2.drawKeypoints(img, kp, None, color=(255,0,0)) +img3 = cv.drawKeypoints(img, kp, None, color=(255,0,0)) -cv2.imwrite('fast_false.png',img3) +cv.imwrite('fast_false.png',img3) @endcode See the results. First image shows FAST with nonmaxSuppression and second one without nonmaxSuppression: diff --git a/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown b/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown index 20b2d2e..f2c0cdd 100644 --- a/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown +++ b/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown @@ -16,15 +16,15 @@ another trainImage, found the features in that image too and we found the best m In short, we found locations of some parts of an object in another cluttered image. This information is sufficient to find the object exactly on the trainImage. -For that, we can use a function from calib3d module, ie **cv2.findHomography()**. If we pass the set +For that, we can use a function from calib3d module, ie **cv.findHomography()**. If we pass the set of points from both the images, it will find the perpective transformation of that object. Then we -can use **cv2.perspectiveTransform()** to find the object. It needs atleast four correct points to +can use **cv.perspectiveTransform()** to find the object. It needs atleast four correct points to find the transformation. We have seen that there can be some possible errors while matching which may affect the result. To solve this problem, algorithm uses RANSAC or LEAST_MEDIAN (which can be decided by the flags). So good matches which provide correct estimation are called inliers and remaining are called outliers. -**cv2.findHomography()** returns a mask which specifies the inlier and outlier points. +**cv.findHomography()** returns a mask which specifies the inlier and outlier points. So let's do it !!! @@ -35,16 +35,16 @@ First, as usual, let's find SIFT features in images and apply the ratio test to matches. @code{.py} import numpy as np -import cv2 +import cv2 as cv from matplotlib import pyplot as plt MIN_MATCH_COUNT = 10 -img1 = cv2.imread('box.png',0) # queryImage -img2 = cv2.imread('box_in_scene.png',0) # trainImage +img1 = cv.imread('box.png',0) # queryImage +img2 = cv.imread('box_in_scene.png',0) # trainImage # Initiate SIFT detector -sift = cv2.xfeatures2d.SIFT_create() +sift = cv.xfeatures2d.SIFT_create() # find the keypoints and descriptors with SIFT kp1, des1 = sift.detectAndCompute(img1,None) @@ -54,7 +54,7 @@ FLANN_INDEX_KDTREE = 1 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks = 50) -flann = cv2.FlannBasedMatcher(index_params, search_params) +flann = cv.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(des1,des2,k=2) @@ -75,14 +75,14 @@ if len(good)>MIN_MATCH_COUNT: src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2) dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2) - M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0) + M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC,5.0) matchesMask = mask.ravel().tolist() h,w,d = img1.shape pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) - dst = cv2.perspectiveTransform(pts,M) + dst = cv.perspectiveTransform(pts,M) - img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA) + img2 = cv.polylines(img2,[np.int32(dst)],True,255,3, cv.LINE_AA) else: print( "Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT) ) @@ -95,7 +95,7 @@ draw_params = dict(matchColor = (0,255,0), # draw matches in green color matchesMask = matchesMask, # draw only inliers flags = 2) -img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params) +img3 = cv.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params) plt.imshow(img3, 'gray'),plt.show() @endcode diff --git a/doc/py_tutorials/py_feature2d/py_features_harris/py_features_harris.markdown b/doc/py_tutorials/py_feature2d/py_features_harris/py_features_harris.markdown index 7297eb4..34ca320 100644 --- a/doc/py_tutorials/py_feature2d/py_features_harris/py_features_harris.markdown +++ b/doc/py_tutorials/py_feature2d/py_features_harris/py_features_harris.markdown @@ -7,7 +7,7 @@ Goal In this chapter, - We will understand the concepts behind Harris Corner Detection. -- We will see the functions: **cv2.cornerHarris()**, **cv2.cornerSubPix()** +- We will see the functions: **cv.cornerHarris()**, **cv.cornerSubPix()** Theory ------ @@ -35,7 +35,7 @@ where I_x I_y & I_y I_y \end{bmatrix}\f] Here, \f$I_x\f$ and \f$I_y\f$ are image derivatives in x and y directions respectively. (Can be easily found -out using **cv2.Sobel()**). +out using **cv.Sobel()**). Then comes the main part. After this, they created a score, basically an equation, which will determine if a window can contain a corner or not. @@ -65,7 +65,7 @@ suitable give you the corners in the image. We will do it with a simple image. Harris Corner Detector in OpenCV -------------------------------- -OpenCV has the function **cv2.cornerHarris()** for this purpose. Its arguments are : +OpenCV has the function **cv.cornerHarris()** for this purpose. Its arguments are : - **img** - Input image, it should be grayscale and float32 type. - **blockSize** - It is the size of neighbourhood considered for corner detection @@ -74,25 +74,25 @@ OpenCV has the function **cv2.cornerHarris()** for this purpose. Its arguments a See the example below: @code{.py} -import cv2 import numpy as np +import cv2 as cv filename = 'chessboard.png' -img = cv2.imread(filename) -gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) +img = cv.imread(filename) +gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) gray = np.float32(gray) -dst = cv2.cornerHarris(gray,2,3,0.04) +dst = cv.cornerHarris(gray,2,3,0.04) #result is dilated for marking the corners, not important -dst = cv2.dilate(dst,None) +dst = cv.dilate(dst,None) # Threshold for an optimal value, it may vary depending on the image. img[dst>0.01*dst.max()]=[0,0,255] -cv2.imshow('dst',img) -if cv2.waitKey(0) & 0xff == 27: - cv2.destroyAllWindows() +cv.imshow('dst',img) +if cv.waitKey(0) & 0xff == 27: + cv.destroyAllWindows() @endcode Below are the three results: @@ -102,7 +102,7 @@ Corner with SubPixel Accuracy ----------------------------- Sometimes, you may need to find the corners with maximum accuracy. OpenCV comes with a function -**cv2.cornerSubPix()** which further refines the corners detected with sub-pixel accuracy. Below is +**cv.cornerSubPix()** which further refines the corners detected with sub-pixel accuracy. Below is an example. As usual, we need to find the harris corners first. Then we pass the centroids of these corners (There may be a bunch of pixels at a corner, we take their centroid) to refine them. Harris corners are marked in red pixels and refined corners are marked in green pixels. For this function, @@ -110,26 +110,26 @@ we have to define the criteria when to stop the iteration. We stop it after a sp iteration or a certain accuracy is achieved, whichever occurs first. We also need to define the size of neighbourhood it would search for corners. @code{.py} -import cv2 import numpy as np +import cv2 as cv filename = 'chessboard2.jpg' -img = cv2.imread(filename) -gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) +img = cv.imread(filename) +gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) # find Harris corners gray = np.float32(gray) -dst = cv2.cornerHarris(gray,2,3,0.04) -dst = cv2.dilate(dst,None) -ret, dst = cv2.threshold(dst,0.01*dst.max(),255,0) +dst = cv.cornerHarris(gray,2,3,0.04) +dst = cv.dilate(dst,None) +ret, dst = cv.threshold(dst,0.01*dst.max(),255,0) dst = np.uint8(dst) # find centroids -ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst) +ret, labels, stats, centroids = cv.connectedComponentsWithStats(dst) # define the criteria to stop and refine the corners -criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001) -corners = cv2.cornerSubPix(gray,np.float32(centroids),(5,5),(-1,-1),criteria) +criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 100, 0.001) +corners = cv.cornerSubPix(gray,np.float32(centroids),(5,5),(-1,-1),criteria) # Now draw them res = np.hstack((centroids,corners)) @@ -137,7 +137,7 @@ res = np.int0(res) img[res[:,1],res[:,0]]=[0,0,255] img[res[:,3],res[:,2]] = [0,255,0] -cv2.imwrite('subpixel5.png',img) +cv.imwrite('subpixel5.png',img) @endcode Below is the result, where some important locations are shown in zoomed window to visualize: diff --git a/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.markdown b/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.markdown index 1c6ef13..bac7c43 100644 --- a/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.markdown +++ b/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.markdown @@ -15,11 +15,11 @@ Brute-Force matcher is simple. It takes the descriptor of one feature in first s with all other features in second set using some distance calculation. And the closest one is returned. -For BF matcher, first we have to create the BFMatcher object using **cv2.BFMatcher()**. It takes two +For BF matcher, first we have to create the BFMatcher object using **cv.BFMatcher()**. It takes two optional params. First one is normType. It specifies the distance measurement to be used. By -default, it is cv2.NORM_L2. It is good for SIFT, SURF etc (cv2.NORM_L1 is also there). For binary -string based descriptors like ORB, BRIEF, BRISK etc, cv2.NORM_HAMMING should be used, which used -Hamming distance as measurement. If ORB is using WTA_K == 3 or 4, cv2.NORM_HAMMING2 should be +default, it is cv.NORM_L2. It is good for SIFT, SURF etc (cv.NORM_L1 is also there). For binary +string based descriptors like ORB, BRIEF, BRISK etc, cv.NORM_HAMMING should be used, which used +Hamming distance as measurement. If ORB is using WTA_K == 3 or 4, cv.NORM_HAMMING2 should be used. Second param is boolean variable, crossCheck which is false by default. If it is true, Matcher @@ -32,9 +32,9 @@ Once it is created, two important methods are *BFMatcher.match()* and *BFMatcher one returns the best match. Second method returns k best matches where k is specified by the user. It may be useful when we need to do additional work on that. -Like we used cv2.drawKeypoints() to draw keypoints, **cv2.drawMatches()** helps us to draw the +Like we used cv.drawKeypoints() to draw keypoints, **cv.drawMatches()** helps us to draw the matches. It stacks two images horizontally and draw lines from first image to second image showing -best matches. There is also **cv2.drawMatchesKnn** which draws all the k best matches. If k=2, it +best matches. There is also **cv.drawMatchesKnn** which draws all the k best matches. If k=2, it will draw two match-lines for each keypoint. So we have to pass a mask if we want to selectively draw it. @@ -50,27 +50,27 @@ We are using ORB descriptors to match features. So let's start with loading imag descriptors etc. @code{.py} import numpy as np -import cv2 +import cv2 as cv import matplotlib.pyplot as plt -img1 = cv2.imread('box.png',0) # queryImage -img2 = cv2.imread('box_in_scene.png',0) # trainImage +img1 = cv.imread('box.png',0) # queryImage +img2 = cv.imread('box_in_scene.png',0) # trainImage # Initiate ORB detector -orb = cv2.ORB_create() +orb = cv.ORB_create() # find the keypoints and descriptors with ORB kp1, des1 = orb.detectAndCompute(img1,None) kp2, des2 = orb.detectAndCompute(img2,None) @endcode -Next we create a BFMatcher object with distance measurement cv2.NORM_HAMMING (since we are using +Next we create a BFMatcher object with distance measurement cv.NORM_HAMMING (since we are using ORB) and crossCheck is switched on for better results. Then we use Matcher.match() method to get the best matches in two images. We sort them in ascending order of their distances so that best matches (with low distance) come to front. Then we draw only first 10 matches (Just for sake of visibility. You can increase it as you like) @code{.py} # create BFMatcher object -bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) +bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True) # Match descriptors. matches = bf.match(des1,des2) @@ -79,7 +79,7 @@ matches = bf.match(des1,des2) matches = sorted(matches, key = lambda x:x.distance) # Draw first 10 matches. -img3 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:10], flags=2) +img3 = cv.drawMatches(img1,kp1,img2,kp2,matches[:10], flags=2) plt.imshow(img3),plt.show() @endcode @@ -103,21 +103,21 @@ This time, we will use BFMatcher.knnMatch() to get k best matches. In this examp so that we can apply ratio test explained by D.Lowe in his paper. @code{.py} import numpy as np -import cv2 +import cv2 as cv from matplotlib import pyplot as plt -img1 = cv2.imread('box.png',0) # queryImage -img2 = cv2.imread('box_in_scene.png',0) # trainImage +img1 = cv.imread('box.png',0) # queryImage +img2 = cv.imread('box_in_scene.png',0) # trainImage # Initiate SIFT detector -sift = cv2.SIFT() +sift = cv.SIFT() # find the keypoints and descriptors with SIFT kp1, des1 = sift.detectAndCompute(img1,None) kp2, des2 = sift.detectAndCompute(img2,None) # BFMatcher with default params -bf = cv2.BFMatcher() +bf = cv.BFMatcher() matches = bf.knnMatch(des1,des2, k=2) # Apply ratio test @@ -126,8 +126,8 @@ for m,n in matches: if m.distance < 0.75*n.distance: good.append([m]) -# cv2.drawMatchesKnn expects list of lists as matches. -img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good,flags=2) +# cv.drawMatchesKnn expects list of lists as matches. +img3 = cv.drawMatchesKnn(img1,kp1,img2,kp2,good,flags=2) plt.imshow(img3),plt.show() @endcode @@ -167,14 +167,14 @@ you want to change the value, pass search_params = dict(checks=100). With these informations, we are good to go. @code{.py} import numpy as np -import cv2 +import cv2 as cv from matplotlib import pyplot as plt -img1 = cv2.imread('box.png',0) # queryImage -img2 = cv2.imread('box_in_scene.png',0) # trainImage +img1 = cv.imread('box.png',0) # queryImage +img2 = cv.imread('box_in_scene.png',0) # trainImage # Initiate SIFT detector -sift = cv2.SIFT() +sift = cv.SIFT() # find the keypoints and descriptors with SIFT kp1, des1 = sift.detectAndCompute(img1,None) @@ -185,7 +185,7 @@ FLANN_INDEX_KDTREE = 1 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks=50) # or pass empty dictionary -flann = cv2.FlannBasedMatcher(index_params,search_params) +flann = cv.FlannBasedMatcher(index_params,search_params) matches = flann.knnMatch(des1,des2,k=2) @@ -202,7 +202,7 @@ draw_params = dict(matchColor = (0,255,0), matchesMask = matchesMask, flags = 0) -img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,matches,None,**draw_params) +img3 = cv.drawMatchesKnn(img1,kp1,img2,kp2,matches,None,**draw_params) plt.imshow(img3,),plt.show() @endcode diff --git a/doc/py_tutorials/py_feature2d/py_orb/py_orb.markdown b/doc/py_tutorials/py_feature2d/py_orb/py_orb.markdown index 49c558a..55bc0f2 100644 --- a/doc/py_tutorials/py_feature2d/py_orb/py_orb.markdown +++ b/doc/py_tutorials/py_feature2d/py_orb/py_orb.markdown @@ -52,7 +52,7 @@ choice in low-power devices for panorama stitching etc. ORB in OpenCV ------------- -As usual, we have to create an ORB object with the function, **cv2.ORB()** or using feature2d common +As usual, we have to create an ORB object with the function, **cv.ORB()** or using feature2d common interface. It has a number of optional parameters. Most useful ones are nFeatures which denotes maximum number of features to be retained (by default 500), scoreType which denotes whether Harris score or FAST score to rank the features (by default, Harris score) etc. Another parameter, WTA_K @@ -64,13 +64,13 @@ is defined by NORM_HAMMING2. Below is a simple code which shows the use of ORB. @code{.py} import numpy as np -import cv2 +import cv2 as cv from matplotlib import pyplot as plt -img = cv2.imread('simple.jpg',0) +img = cv.imread('simple.jpg',0) # Initiate ORB detector -orb = cv2.ORB_create() +orb = cv.ORB_create() # find the keypoints with ORB kp = orb.detect(img,None) @@ -79,7 +79,7 @@ kp = orb.detect(img,None) kp, des = orb.compute(img, kp) # draw only keypoints location,not size and orientation -img2 = cv2.drawKeypoints(img, kp, None, color=(0,255,0), flags=0) +img2 = cv.drawKeypoints(img, kp, None, color=(0,255,0), flags=0) plt.imshow(img2), plt.show() @endcode See the result below: diff --git a/doc/py_tutorials/py_feature2d/py_shi_tomasi/py_shi_tomasi.markdown b/doc/py_tutorials/py_feature2d/py_shi_tomasi/py_shi_tomasi.markdown index 5731e21..5ddb46f 100644 --- a/doc/py_tutorials/py_feature2d/py_shi_tomasi/py_shi_tomasi.markdown +++ b/doc/py_tutorials/py_feature2d/py_shi_tomasi/py_shi_tomasi.markdown @@ -7,7 +7,7 @@ Goal In this chapter, - We will learn about the another corner detector: Shi-Tomasi Corner Detector -- We will see the function: **cv2.goodFeaturesToTrack()** +- We will see the function: **cv.goodFeaturesToTrack()** Theory ------ @@ -33,7 +33,7 @@ From the figure, you can see that only when \f$\lambda_1\f$ and \f$\lambda_2\f$ Code ---- -OpenCV has a function, **cv2.goodFeaturesToTrack()**. It finds N strongest corners in the image by +OpenCV has a function, **cv.goodFeaturesToTrack()**. It finds N strongest corners in the image by Shi-Tomasi method (or Harris Corner Detection, if you specify it). As usual, image should be a grayscale image. Then you specify number of corners you want to find. Then you specify the quality level, which is a value between 0-1, which denotes the minimum quality of corner below which @@ -47,18 +47,18 @@ minimum distance and returns N strongest corners. In below example, we will try to find 25 best corners: @code{.py} import numpy as np -import cv2 +import cv2 as cv from matplotlib import pyplot as plt -img = cv2.imread('blox.jpg') -gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) +img = cv.imread('blox.jpg') +gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) -corners = cv2.goodFeaturesToTrack(gray,25,0.01,10) +corners = cv.goodFeaturesToTrack(gray,25,0.01,10) corners = np.int0(corners) for i in corners: x,y = i.ravel() - cv2.circle(img,(x,y),3,255,-1) + cv.circle(img,(x,y),3,255,-1) plt.imshow(img),plt.show() @endcode diff --git a/doc/py_tutorials/py_feature2d/py_sift_intro/py_sift_intro.markdown b/doc/py_tutorials/py_feature2d/py_sift_intro/py_sift_intro.markdown index 2b4b516..0825e3e 100644 --- a/doc/py_tutorials/py_feature2d/py_sift_intro/py_sift_intro.markdown +++ b/doc/py_tutorials/py_feature2d/py_sift_intro/py_sift_intro.markdown @@ -113,30 +113,30 @@ So now let's see SIFT functionalities available in OpenCV. Let's start with keyp draw them. First we have to construct a SIFT object. We can pass different parameters to it which are optional and they are well explained in docs. @code{.py} -import cv2 import numpy as np +import cv2 as cv -img = cv2.imread('home.jpg') -gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) +img = cv.imread('home.jpg') +gray= cv.cvtColor(img,cv.COLOR_BGR2GRAY) -sift = cv2.xfeatures2d.SIFT_create() +sift = cv.xfeatures2d.SIFT_create() kp = sift.detect(gray,None) -img=cv2.drawKeypoints(gray,kp,img) +img=cv.drawKeypoints(gray,kp,img) -cv2.imwrite('sift_keypoints.jpg',img) +cv.imwrite('sift_keypoints.jpg',img) @endcode **sift.detect()** function finds the keypoint in the images. You can pass a mask if you want to search only a part of image. Each keypoint is a special structure which has many attributes like its (x,y) coordinates, size of the meaningful neighbourhood, angle which specifies its orientation, response that specifies strength of keypoints etc. -OpenCV also provides **cv2.drawKeyPoints()** function which draws the small circles on the locations -of keypoints. If you pass a flag, **cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS** to it, it will +OpenCV also provides **cv.drawKeyPoints()** function which draws the small circles on the locations +of keypoints. If you pass a flag, **cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS** to it, it will draw a circle with size of keypoint and it will even show its orientation. See below example. @code{.py} -img=cv2.drawKeypoints(gray,kp,img,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) -cv2.imwrite('sift_keypoints.jpg',img) +img=cv.drawKeypoints(gray,kp,img,flags=cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) +cv.imwrite('sift_keypoints.jpg',img) @endcode See the two results below: @@ -151,7 +151,7 @@ Now to calculate the descriptor, OpenCV provides two methods. We will see the second method: @code{.py} -sift = cv2.xfeatures2d.SIFT_create() +sift = cv.xfeatures2d.SIFT_create() kp, des = sift.detectAndCompute(gray,None) @endcode Here kp will be a list of keypoints and des is a numpy array of shape diff --git a/doc/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.markdown b/doc/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.markdown index 7c9456f..10288ca 100644 --- a/doc/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.markdown +++ b/doc/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.markdown @@ -76,11 +76,11 @@ and descriptors. First we will see a simple demo on how to find SURF keypoints and descriptors and draw it. All examples are shown in Python terminal since it is just same as SIFT only. @code{.py} ->>> img = cv2.imread('fly.png',0) +>>> img = cv.imread('fly.png',0) # Create SURF object. You can specify params here or later. # Here I set Hessian Threshold to 400 ->>> surf = cv2.xfeatures2d.SURF_create(400) +>>> surf = cv.xfeatures2d.SURF_create(400) # Find keypoints and descriptors directly >>> kp, des = surf.detectAndCompute(img,None) @@ -107,7 +107,7 @@ While matching, we may need all those features, but not now. So we increase the @endcode It is less than 50. Let's draw it on the image. @code{.py} ->>> img2 = cv2.drawKeypoints(img,kp,None,(255,0,0),4) +>>> img2 = cv.drawKeypoints(img,kp,None,(255,0,0),4) >>> plt.imshow(img2),plt.show() @endcode @@ -126,7 +126,7 @@ False # Recompute the feature points and draw it >>> kp = surf.detect(img,None) ->>> img2 = cv2.drawKeypoints(img,kp,None,(255,0,0),4) +>>> img2 = cv.drawKeypoints(img,kp,None,(255,0,0),4) >>> plt.imshow(img2),plt.show() @endcode diff --git a/doc/py_tutorials/py_gui/py_drawing_functions/py_drawing_functions.markdown b/doc/py_tutorials/py_gui/py_drawing_functions/py_drawing_functions.markdown index a9c616b..4b7ff32 100644 --- a/doc/py_tutorials/py_gui/py_drawing_functions/py_drawing_functions.markdown +++ b/doc/py_tutorials/py_gui/py_drawing_functions/py_drawing_functions.markdown @@ -5,8 +5,8 @@ Goal ---- - Learn to draw different geometric shapes with OpenCV -- You will learn these functions : **cv2.line()**, **cv2.circle()** , **cv2.rectangle()**, - **cv2.ellipse()**, **cv2.putText()** etc. +- You will learn these functions : **cv.line()**, **cv.circle()** , **cv.rectangle()**, + **cv.ellipse()**, **cv.putText()** etc. Code ---- @@ -19,7 +19,7 @@ In all the above functions, you will see some common arguments as given below: - thickness : Thickness of the line or circle etc. If **-1** is passed for closed figures like circles, it will fill the shape. *default thickness = 1* - lineType : Type of line, whether 8-connected, anti-aliased line etc. *By default, it is - 8-connected.* cv2.LINE_AA gives anti-aliased line which looks great for curves. + 8-connected.* cv.LINE_AA gives anti-aliased line which looks great for curves. ### Drawing Line @@ -27,27 +27,27 @@ To draw a line, you need to pass starting and ending coordinates of line. We wil image and draw a blue line on it from top-left to bottom-right corners. @code{.py} import numpy as np -import cv2 +import cv2 as cv # Create a black image img = np.zeros((512,512,3), np.uint8) # Draw a diagonal blue line with thickness of 5 px -cv2.line(img,(0,0),(511,511),(255,0,0),5) +cv.line(img,(0,0),(511,511),(255,0,0),5) @endcode ### Drawing Rectangle To draw a rectangle, you need top-left corner and bottom-right corner of rectangle. This time we will draw a green rectangle at the top-right corner of image. @code{.py} -cv2.rectangle(img,(384,0),(510,128),(0,255,0),3) +cv.rectangle(img,(384,0),(510,128),(0,255,0),3) @endcode ### Drawing Circle To draw a circle, you need its center coordinates and radius. We will draw a circle inside the rectangle drawn above. @code{.py} -cv2.circle(img,(447,63), 63, (0,0,255), -1) +cv.circle(img,(447,63), 63, (0,0,255), -1) @endcode ### Drawing Ellipse @@ -55,10 +55,10 @@ To draw the ellipse, we need to pass several arguments. One argument is the cent Next argument is axes lengths (major axis length, minor axis length). angle is the angle of rotation of ellipse in anti-clockwise direction. startAngle and endAngle denotes the starting and ending of ellipse arc measured in clockwise direction from major axis. i.e. giving values 0 and 360 gives the -full ellipse. For more details, check the documentation of **cv2.ellipse()**. Below example draws a +full ellipse. For more details, check the documentation of **cv.ellipse()**. Below example draws a half ellipse at the center of the image. @code{.py} -cv2.ellipse(img,(256,256),(100,50),0,0,180,255,-1) +cv.ellipse(img,(256,256),(100,50),0,0,180,255,-1) @endcode ### Drawing Polygon @@ -68,30 +68,30 @@ polygon of with four vertices in yellow color. @code{.py} pts = np.array([[10,5],[20,30],[70,20],[50,10]], np.int32) pts = pts.reshape((-1,1,2)) -cv2.polylines(img,[pts],True,(0,255,255)) +cv.polylines(img,[pts],True,(0,255,255)) @endcode @note If third argument is False, you will get a polylines joining all the points, not a closed shape. -@note cv2.polylines() can be used to draw multiple lines. Just create a list of all the lines you +@note cv.polylines() can be used to draw multiple lines. Just create a list of all the lines you want to draw and pass it to the function. All lines will be drawn individually. It is a much better -and faster way to draw a group of lines than calling cv2.line() for each line. +and faster way to draw a group of lines than calling cv.line() for each line. ### Adding Text to Images: To put texts in images, you need specify following things. - Text data that you want to write - Position coordinates of where you want put it (i.e. bottom-left corner where data starts). - - Font type (Check **cv2.putText()** docs for supported fonts) + - Font type (Check **cv.putText()** docs for supported fonts) - Font Scale (specifies the size of font) - - regular things like color, thickness, lineType etc. For better look, lineType = cv2.LINE_AA + - regular things like color, thickness, lineType etc. For better look, lineType = cv.LINE_AA is recommended. We will write **OpenCV** on our image in white color. @code{.py} -font = cv2.FONT_HERSHEY_SIMPLEX -cv2.putText(img,'OpenCV',(10,500), font, 4,(255,255,255),2,cv2.LINE_AA) +font = cv.FONT_HERSHEY_SIMPLEX +cv.putText(img,'OpenCV',(10,500), font, 4,(255,255,255),2,cv.LINE_AA) @endcode ### Result diff --git a/doc/py_tutorials/py_gui/py_image_display/py_image_display.markdown b/doc/py_tutorials/py_gui/py_image_display/py_image_display.markdown index c286a3f..252437e 100644 --- a/doc/py_tutorials/py_gui/py_image_display/py_image_display.markdown +++ b/doc/py_tutorials/py_gui/py_image_display/py_image_display.markdown @@ -5,7 +5,7 @@ Goals ----- - Here, you will learn how to read an image, how to display it and how to save it back -- You will learn these functions : **cv2.imread()**, **cv2.imshow()** , **cv2.imwrite()** +- You will learn these functions : **cv.imread()**, **cv.imshow()** , **cv.imwrite()** - Optionally, you will learn how to display images with Matplotlib Using OpenCV @@ -13,25 +13,25 @@ Using OpenCV ### Read an image -Use the function **cv2.imread()** to read an image. The image should be in the working directory or +Use the function **cv.imread()** to read an image. The image should be in the working directory or a full path of image should be given. Second argument is a flag which specifies the way image should be read. -- cv2.IMREAD_COLOR : Loads a color image. Any transparency of image will be neglected. It is the +- cv.IMREAD_COLOR : Loads a color image. Any transparency of image will be neglected. It is the default flag. -- cv2.IMREAD_GRAYSCALE : Loads image in grayscale mode -- cv2.IMREAD_UNCHANGED : Loads image as such including alpha channel +- cv.IMREAD_GRAYSCALE : Loads image in grayscale mode +- cv.IMREAD_UNCHANGED : Loads image as such including alpha channel @note Instead of these three flags, you can simply pass integers 1, 0 or -1 respectively. See the code below: @code{.py} import numpy as np -import cv2 +import cv2 as cv # Load an color image in grayscale -img = cv2.imread('messi5.jpg',0) +img = cv.imread('messi5.jpg',0) @endcode **warning** @@ -40,21 +40,21 @@ Even if the image path is wrong, it won't throw any error, but `print img` will ### Display an image -Use the function **cv2.imshow()** to display an image in a window. The window automatically fits to +Use the function **cv.imshow()** to display an image in a window. The window automatically fits to the image size. First argument is a window name which is a string. second argument is our image. You can create as many windows as you wish, but with different window names. @code{.py} -cv2.imshow('image',img) -cv2.waitKey(0) -cv2.destroyAllWindows() +cv.imshow('image',img) +cv.waitKey(0) +cv.destroyAllWindows() @endcode A screenshot of the window will look like this (in Fedora-Gnome machine): ![image](images/opencv_screenshot.jpg) -**cv2.waitKey()** is a keyboard binding function. Its argument is the time in milliseconds. The +**cv.waitKey()** is a keyboard binding function. Its argument is the time in milliseconds. The function waits for specified milliseconds for any keyboard event. If you press any key in that time, the program continues. If **0** is passed, it waits indefinitely for a key stroke. It can also be set to detect specific key strokes like, if key a is pressed etc which we will discuss below. @@ -62,30 +62,30 @@ set to detect specific key strokes like, if key a is pressed etc which we will d @note Besides binding keyboard events this function also processes many other GUI events, so you MUST use it to actually display the image. -**cv2.destroyAllWindows()** simply destroys all the windows we created. If you want to destroy any -specific window, use the function **cv2.destroyWindow()** where you pass the exact window name as +**cv.destroyAllWindows()** simply destroys all the windows we created. If you want to destroy any +specific window, use the function **cv.destroyWindow()** where you pass the exact window name as the argument. @note There is a special case where you can already create a window and load image to it later. In that case, you can specify whether window is resizable or not. It is done with the function -**cv2.namedWindow()**. By default, the flag is cv2.WINDOW_AUTOSIZE. But if you specify flag to be -cv2.WINDOW_NORMAL, you can resize window. It will be helpful when image is too large in dimension +**cv.namedWindow()**. By default, the flag is cv.WINDOW_AUTOSIZE. But if you specify flag to be +cv.WINDOW_NORMAL, you can resize window. It will be helpful when image is too large in dimension and adding track bar to windows. See the code below: @code{.py} -cv2.namedWindow('image', cv2.WINDOW_NORMAL) -cv2.imshow('image',img) -cv2.waitKey(0) -cv2.destroyAllWindows() +cv.namedWindow('image', cv.WINDOW_NORMAL) +cv.imshow('image',img) +cv.waitKey(0) +cv.destroyAllWindows() @endcode ### Write an image -Use the function **cv2.imwrite()** to save an image. +Use the function **cv.imwrite()** to save an image. First argument is the file name, second argument is the image you want to save. @code{.py} -cv2.imwrite('messigray.png',img) +cv.imwrite('messigray.png',img) @endcode This will save the image in PNG format in the working directory. @@ -95,22 +95,22 @@ Below program loads an image in grayscale, displays it, save the image if you pr simply exit without saving if you press ESC key. @code{.py} import numpy as np -import cv2 +import cv2 as cv -img = cv2.imread('messi5.jpg',0) -cv2.imshow('image',img) -k = cv2.waitKey(0) +img = cv.imread('messi5.jpg',0) +cv.imshow('image',img) +k = cv.waitKey(0) if k == 27: # wait for ESC key to exit - cv2.destroyAllWindows() + cv.destroyAllWindows() elif k == ord('s'): # wait for 's' key to save and exit - cv2.imwrite('messigray.png',img) - cv2.destroyAllWindows() + cv.imwrite('messigray.png',img) + cv.destroyAllWindows() @endcode **warning** -If you are using a 64-bit machine, you will have to modify `k = cv2.waitKey(0)` line as follows : -`k = cv2.waitKey(0) & 0xFF` +If you are using a 64-bit machine, you will have to modify `k = cv.waitKey(0)` line as follows : +`k = cv.waitKey(0) & 0xFF` Using Matplotlib ---------------- @@ -120,10 +120,10 @@ will see them in coming articles. Here, you will learn how to display image with zoom images, save it etc using Matplotlib. @code{.py} import numpy as np -import cv2 +import cv2 as cv from matplotlib import pyplot as plt -img = cv2.imread('messi5.jpg',0) +img = cv.imread('messi5.jpg',0) plt.imshow(img, cmap = 'gray', interpolation = 'bicubic') plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis plt.show() diff --git a/doc/py_tutorials/py_gui/py_mouse_handling/py_mouse_handling.markdown b/doc/py_tutorials/py_gui/py_mouse_handling/py_mouse_handling.markdown index 53529c7..3c17b2e 100644 --- a/doc/py_tutorials/py_gui/py_mouse_handling/py_mouse_handling.markdown +++ b/doc/py_tutorials/py_gui/py_mouse_handling/py_mouse_handling.markdown @@ -5,7 +5,7 @@ Goal ---- - Learn to handle mouse events in OpenCV -- You will learn these functions : **cv2.setMouseCallback()** +- You will learn these functions : **cv.setMouseCallback()** Simple Demo ----------- @@ -19,32 +19,32 @@ double-click etc. It gives us the coordinates (x,y) for every mouse event. With location, we can do whatever we like. To list all available events available, run the following code in Python terminal: @code{.py} -import cv2 -events = [i for i in dir(cv2) if 'EVENT' in i] +import cv2 as cv +events = [i for i in dir(cv) if 'EVENT' in i] print( events ) @endcode Creating mouse callback function has a specific format which is same everywhere. It differs only in what the function does. So our mouse callback function does one thing, it draws a circle where we double-click. So see the code below. Code is self-explanatory from comments : @code{.py} -import cv2 import numpy as np +import cv2 as cv # mouse callback function def draw_circle(event,x,y,flags,param): - if event == cv2.EVENT_LBUTTONDBLCLK: - cv2.circle(img,(x,y),100,(255,0,0),-1) + if event == cv.EVENT_LBUTTONDBLCLK: + cv.circle(img,(x,y),100,(255,0,0),-1) # Create a black image, a window and bind the function to window img = np.zeros((512,512,3), np.uint8) -cv2.namedWindow('image') -cv2.setMouseCallback('image',draw_circle) +cv.namedWindow('image') +cv.setMouseCallback('image',draw_circle) while(1): - cv2.imshow('image',img) - if cv2.waitKey(20) & 0xFF == 27: + cv.imshow('image',img) + if cv.waitKey(20) & 0xFF == 27: break -cv2.destroyAllWindows() +cv.destroyAllWindows() @endcode More Advanced Demo ------------------ @@ -55,8 +55,8 @@ function has two parts, one to draw rectangle and other to draw the circles. Thi will be really helpful in creating and understanding some interactive applications like object tracking, image segmentation etc. @code{.py} -import cv2 import numpy as np +import cv2 as cv drawing = False # true if mouse is pressed mode = True # if True, draw rectangle. Press 'm' to toggle to curve @@ -66,40 +66,40 @@ ix,iy = -1,-1 def draw_circle(event,x,y,flags,param): global ix,iy,drawing,mode - if event == cv2.EVENT_LBUTTONDOWN: + if event == cv.EVENT_LBUTTONDOWN: drawing = True ix,iy = x,y - elif event == cv2.EVENT_MOUSEMOVE: + elif event == cv.EVENT_MOUSEMOVE: if drawing == True: if mode == True: - cv2.rectangle(img,(ix,iy),(x,y),(0,255,0),-1) + cv.rectangle(img,(ix,iy),(x,y),(0,255,0),-1) else: - cv2.circle(img,(x,y),5,(0,0,255),-1) + cv.circle(img,(x,y),5,(0,0,255),-1) - elif event == cv2.EVENT_LBUTTONUP: + elif event == cv.EVENT_LBUTTONUP: drawing = False if mode == True: - cv2.rectangle(img,(ix,iy),(x,y),(0,255,0),-1) + cv.rectangle(img,(ix,iy),(x,y),(0,255,0),-1) else: - cv2.circle(img,(x,y),5,(0,0,255),-1) + cv.circle(img,(x,y),5,(0,0,255),-1) @endcode Next we have to bind this mouse callback function to OpenCV window. In the main loop, we should set a keyboard binding for key 'm' to toggle between rectangle and circle. @code{.py} img = np.zeros((512,512,3), np.uint8) -cv2.namedWindow('image') -cv2.setMouseCallback('image',draw_circle) +cv.namedWindow('image') +cv.setMouseCallback('image',draw_circle) while(1): - cv2.imshow('image',img) - k = cv2.waitKey(1) & 0xFF + cv.imshow('image',img) + k = cv.waitKey(1) & 0xFF if k == ord('m'): mode = not mode elif k == 27: break -cv2.destroyAllWindows() +cv.destroyAllWindows() @endcode Additional Resources -------------------- diff --git a/doc/py_tutorials/py_gui/py_trackbar/py_trackbar.markdown b/doc/py_tutorials/py_gui/py_trackbar/py_trackbar.markdown index 1d6e6ae..e5e9306 100644 --- a/doc/py_tutorials/py_gui/py_trackbar/py_trackbar.markdown +++ b/doc/py_tutorials/py_gui/py_trackbar/py_trackbar.markdown @@ -5,7 +5,7 @@ Goal ---- - Learn to bind trackbar to OpenCV windows -- You will learn these functions : **cv2.getTrackbarPos()**, **cv2.createTrackbar()** etc. +- You will learn these functions : **cv.getTrackbarPos()**, **cv.createTrackbar()** etc. Code Demo --------- @@ -14,7 +14,7 @@ Here we will create a simple application which shows the color you specify. You shows the color and three trackbars to specify each of B,G,R colors. You slide the trackbar and correspondingly window color changes. By default, initial color will be set to Black. -For cv2.getTrackbarPos() function, first argument is the trackbar name, second one is the window +For cv.getTrackbarPos() function, first argument is the trackbar name, second one is the window name to which it is attached, third argument is the default value, fourth one is the maximum value and fifth one is the callback function which is executed everytime trackbar value changes. The callback function always has a default argument which is the trackbar position. In our case, @@ -25,43 +25,43 @@ doesn't have button functionality. So you can use trackbar to get such functiona application, we have created one switch in which application works only if switch is ON, otherwise screen is always black. @code{.py} -import cv2 import numpy as np +import cv2 as cv def nothing(x): pass # Create a black image, a window img = np.zeros((300,512,3), np.uint8) -cv2.namedWindow('image') +cv.namedWindow('image') # create trackbars for color change -cv2.createTrackbar('R','image',0,255,nothing) -cv2.createTrackbar('G','image',0,255,nothing) -cv2.createTrackbar('B','image',0,255,nothing) +cv.createTrackbar('R','image',0,255,nothing) +cv.createTrackbar('G','image',0,255,nothing) +cv.createTrackbar('B','image',0,255,nothing) # create switch for ON/OFF functionality switch = '0 : OFF \n1 : ON' -cv2.createTrackbar(switch, 'image',0,1,nothing) +cv.createTrackbar(switch, 'image',0,1,nothing) while(1): - cv2.imshow('image',img) - k = cv2.waitKey(1) & 0xFF + cv.imshow('image',img) + k = cv.waitKey(1) & 0xFF if k == 27: break # get current positions of four trackbars - r = cv2.getTrackbarPos('R','image') - g = cv2.getTrackbarPos('G','image') - b = cv2.getTrackbarPos('B','image') - s = cv2.getTrackbarPos(switch,'image') + r = cv.getTrackbarPos('R','image') + g = cv.getTrackbarPos('G','image') + b = cv.getTrackbarPos('B','image') + s = cv.getTrackbarPos(switch,'image') if s == 0: img[:] = 0 else: img[:] = [b,g,r] -cv2.destroyAllWindows() +cv.destroyAllWindows() @endcode The screenshot of the application looks like below : diff --git a/doc/py_tutorials/py_gui/py_video_display/py_video_display.markdown b/doc/py_tutorials/py_gui/py_video_display/py_video_display.markdown index a65f513..edd369a 100644 --- a/doc/py_tutorials/py_gui/py_video_display/py_video_display.markdown +++ b/doc/py_tutorials/py_gui/py_video_display/py_video_display.markdown @@ -6,7 +6,7 @@ Goal - Learn to read video, display video and save video. - Learn to capture from Camera and display it. -- You will learn these functions : **cv2.VideoCapture()**, **cv2.VideoWriter()** +- You will learn these functions : **cv.VideoCapture()**, **cv.VideoWriter()** Capture Video from Camera ------------------------- @@ -22,25 +22,25 @@ the second camera by passing 1 and so on. After that, you can capture frame-by-f end, don't forget to release the capture. @code{.py} import numpy as np -import cv2 +import cv2 as cv -cap = cv2.VideoCapture(0) +cap = cv.VideoCapture(0) while(True): # Capture frame-by-frame ret, frame = cap.read() # Our operations on the frame come here - gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) # Display the resulting frame - cv2.imshow('frame',gray) - if cv2.waitKey(1) & 0xFF == ord('q'): + cv.imshow('frame',gray) + if cv.waitKey(1) & 0xFF == ord('q'): break # When everything done, release the capture cap.release() -cv2.destroyAllWindows() +cv.destroyAllWindows() @endcode `cap.read()` returns a bool (`True`/`False`). If frame is read correctly, it will be `True`. So you can check end of the video by checking this return value. @@ -55,9 +55,9 @@ video) and full details can be seen here: cv::VideoCapture::get(). Some of these values can be modified using **cap.set(propId, value)**. Value is the new value you want. -For example, I can check the frame width and height by `cap.get(cv2.CAP_PROP_FRAME_WIDTH)` and `cap.get(cv2.CAP_PROP_FRAME_HEIGHT)`. It gives me -640x480 by default. But I want to modify it to 320x240. Just use `ret = cap.set(cv2.CAP_PROP_FRAME_WIDTH,320)` and -`ret = cap.set(cv2.CAP_PROP_FRAME_HEIGHT,240)`. +For example, I can check the frame width and height by `cap.get(cv.CAP_PROP_FRAME_WIDTH)` and `cap.get(cv.CAP_PROP_FRAME_HEIGHT)`. It gives me +640x480 by default. But I want to modify it to 320x240. Just use `ret = cap.set(cv.CAP_PROP_FRAME_WIDTH,320)` and +`ret = cap.set(cv.CAP_PROP_FRAME_HEIGHT,240)`. @note If you are getting error, make sure camera is working fine using any other camera application (like Cheese in Linux). @@ -66,26 +66,26 @@ Playing Video from file ----------------------- It is same as capturing from Camera, just change camera index with video file name. Also while -displaying the frame, use appropriate time for `cv2.waitKey()`. If it is too less, video will be very +displaying the frame, use appropriate time for `cv.waitKey()`. If it is too less, video will be very fast and if it is too high, video will be slow (Well, that is how you can display videos in slow motion). 25 milliseconds will be OK in normal cases. @code{.py} import numpy as np -import cv2 +import cv2 as cv -cap = cv2.VideoCapture('vtest.avi') +cap = cv.VideoCapture('vtest.avi') while(cap.isOpened()): ret, frame = cap.read() - gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) - cv2.imshow('frame',gray) - if cv2.waitKey(1) & 0xFF == ord('q'): + cv.imshow('frame',gray) + if cv.waitKey(1) & 0xFF == ord('q'): break cap.release() -cv2.destroyAllWindows() +cv.destroyAllWindows() @endcode @note Make sure proper versions of ffmpeg or gstreamer is installed. Sometimes, it is a headache to @@ -95,7 +95,7 @@ Saving a Video -------------- So we capture a video, process it frame-by-frame and we want to save that video. For images, it is -very simple, just use `cv2.imwrite()`. Here a little more work is required. +very simple, just use `cv.imwrite()`. Here a little more work is required. This time we create a **VideoWriter** object. We should specify the output file name (eg: output.avi). Then we should specify the **FourCC** code (details in next paragraph). Then number of @@ -111,30 +111,30 @@ platform dependent. Following codecs works fine for me. - In Windows: DIVX (More to be tested and added) - In OSX: MJPG (.mp4), DIVX (.avi), X264 (.mkv). -FourCC code is passed as `cv2.VideoWriter_fourcc('M','J','P','G')` or -`cv2.VideoWriter_fourcc(*'MJPG')` for MJPG. +FourCC code is passed as `cv.VideoWriter_fourcc('M','J','P','G')` or +`cv.VideoWriter_fourcc(*'MJPG')` for MJPG. Below code capture from a Camera, flip every frame in vertical direction and saves it. @code{.py} import numpy as np -import cv2 +import cv2 as cv -cap = cv2.VideoCapture(0) +cap = cv.VideoCapture(0) # Define the codec and create VideoWriter object -fourcc = cv2.VideoWriter_fourcc(*'XVID') -out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480)) +fourcc = cv.VideoWriter_fourcc(*'XVID') +out = cv.VideoWriter('output.avi',fourcc, 20.0, (640,480)) while(cap.isOpened()): ret, frame = cap.read() if ret==True: - frame = cv2.flip(frame,0) + frame = cv.flip(frame,0) # write the flipped frame out.write(frame) - cv2.imshow('frame',frame) - if cv2.waitKey(1) & 0xFF == ord('q'): + cv.imshow('frame',frame) + if cv.waitKey(1) & 0xFF == ord('q'): break else: break @@ -142,7 +142,7 @@ while(cap.isOpened()): # Release everything if job is finished cap.release() out.release() -cv2.destroyAllWindows() +cv.destroyAllWindows() @endcode Additional Resources diff --git a/doc/py_tutorials/py_imgproc/py_canny/py_canny.markdown b/doc/py_tutorials/py_imgproc/py_canny/py_canny.markdown index 94e249a..cbc2a72 100644 --- a/doc/py_tutorials/py_imgproc/py_canny/py_canny.markdown +++ b/doc/py_tutorials/py_imgproc/py_canny/py_canny.markdown @@ -7,7 +7,7 @@ Goal In this chapter, we will learn about - Concept of Canny edge detection -- OpenCV functions for that : **cv2.Canny()** +- OpenCV functions for that : **cv.Canny()** Theory ------ @@ -72,19 +72,19 @@ So what we finally get is strong edges in the image. Canny Edge Detection in OpenCV ------------------------------ -OpenCV puts all the above in single function, **cv2.Canny()**. We will see how to use it. First +OpenCV puts all the above in single function, **cv.Canny()**. We will see how to use it. First argument is our input image. Second and third arguments are our minVal and maxVal respectively. Third argument is aperture_size. It is the size of Sobel kernel used for find image gradients. By default it is 3. Last argument is L2gradient which specifies the equation for finding gradient magnitude. If it is True, it uses the equation mentioned above which is more accurate, otherwise it uses this function: \f$Edge\_Gradient \; (G) = |G_x| + |G_y|\f$. By default, it is False. @code{.py} -import cv2 import numpy as np +import cv2 as cv from matplotlib import pyplot as plt -img = cv2.imread('messi5.jpg',0) -edges = cv2.Canny(img,100,200) +img = cv.imread('messi5.jpg',0) +edges = cv.Canny(img,100,200) plt.subplot(121),plt.imshow(img,cmap = 'gray') plt.title('Original Image'), plt.xticks([]), plt.yticks([]) diff --git a/doc/py_tutorials/py_imgproc/py_colorspaces/py_colorspaces.markdown b/doc/py_tutorials/py_imgproc/py_colorspaces/py_colorspaces.markdown index b0fa725..874dbdb 100644 --- a/doc/py_tutorials/py_imgproc/py_colorspaces/py_colorspaces.markdown +++ b/doc/py_tutorials/py_imgproc/py_colorspaces/py_colorspaces.markdown @@ -7,7 +7,7 @@ Goal - In this tutorial, you will learn how to convert images from one color-space to another, like BGR \f$\leftrightarrow\f$ Gray, BGR \f$\leftrightarrow\f$ HSV etc. - In addition to that, we will create an application which extracts a colored object in a video -- You will learn following functions : **cv2.cvtColor()**, **cv2.inRange()** etc. +- You will learn following functions : **cv.cvtColor()**, **cv.inRange()** etc. Changing Color-space -------------------- @@ -15,15 +15,15 @@ Changing Color-space There are more than 150 color-space conversion methods available in OpenCV. But we will look into only two which are most widely used ones, BGR \f$\leftrightarrow\f$ Gray and BGR \f$\leftrightarrow\f$ HSV. -For color conversion, we use the function cv2.cvtColor(input_image, flag) where flag determines the +For color conversion, we use the function cv.cvtColor(input_image, flag) where flag determines the type of conversion. -For BGR \f$\rightarrow\f$ Gray conversion we use the flags cv2.COLOR_BGR2GRAY. Similarly for BGR -\f$\rightarrow\f$ HSV, we use the flag cv2.COLOR_BGR2HSV. To get other flags, just run following +For BGR \f$\rightarrow\f$ Gray conversion we use the flags cv.COLOR_BGR2GRAY. Similarly for BGR +\f$\rightarrow\f$ HSV, we use the flag cv.COLOR_BGR2HSV. To get other flags, just run following commands in your Python terminal : @code{.py} ->>> import cv2 ->>> flags = [i for i in dir(cv2) if i.startswith('COLOR_')] +>>> import cv2 as cv +>>> flags = [i for i in dir(cv) if i.startswith('COLOR_')] >>> print( flags ) @endcode @note For HSV, Hue range is [0,179], Saturation range is [0,255] and Value range is [0,255]. @@ -44,10 +44,10 @@ a blue colored object. So here is the method: Below is the code which are commented in detail : @code{.py} -import cv2 +import cv2 as cv import numpy as np -cap = cv2.VideoCapture(0) +cap = cv.VideoCapture(0) while(1): @@ -55,26 +55,26 @@ while(1): _, frame = cap.read() # Convert BGR to HSV - hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) + hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV) # define range of blue color in HSV lower_blue = np.array([110,50,50]) upper_blue = np.array([130,255,255]) # Threshold the HSV image to get only blue colors - mask = cv2.inRange(hsv, lower_blue, upper_blue) + mask = cv.inRange(hsv, lower_blue, upper_blue) # Bitwise-AND mask and original image - res = cv2.bitwise_and(frame,frame, mask= mask) + res = cv.bitwise_and(frame,frame, mask= mask) - cv2.imshow('frame',frame) - cv2.imshow('mask',mask) - cv2.imshow('res',res) - k = cv2.waitKey(5) & 0xFF + cv.imshow('frame',frame) + cv.imshow('mask',mask) + cv.imshow('res',res) + k = cv.waitKey(5) & 0xFF if k == 27: break -cv2.destroyAllWindows() +cv.destroyAllWindows() @endcode Below image shows tracking of the blue object: @@ -90,12 +90,12 @@ How to find HSV values to track? -------------------------------- This is a common question found in [stackoverflow.com](http://www.stackoverflow.com). It is very simple and -you can use the same function, cv2.cvtColor(). Instead of passing an image, you just pass the BGR +you can use the same function, cv.cvtColor(). Instead of passing an image, you just pass the BGR values you want. For example, to find the HSV value of Green, try following commands in Python terminal: @code{.py} >>> green = np.uint8([[[0,255,0 ]]]) ->>> hsv_green = cv2.cvtColor(green,cv2.COLOR_BGR2HSV) +>>> hsv_green = cv.cvtColor(green,cv.COLOR_BGR2HSV) >>> print( hsv_green ) [[[ 60 255 255]]] @endcode diff --git a/doc/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.markdown b/doc/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.markdown index 32243b0..f018e11 100644 --- a/doc/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.markdown +++ b/doc/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.markdown @@ -16,17 +16,17 @@ Image moments help you to calculate some features like center of mass of the obj object etc. Check out the wikipedia page on [Image Moments](http://en.wikipedia.org/wiki/Image_moment) -The function **cv2.moments()** gives a dictionary of all moment values calculated. See below: +The function **cv.moments()** gives a dictionary of all moment values calculated. See below: @code{.py} -import cv2 import numpy as np +import cv2 as cv -img = cv2.imread('star.jpg',0) -ret,thresh = cv2.threshold(img,127,255,0) -im2,contours,hierarchy = cv2.findContours(thresh, 1, 2) +img = cv.imread('star.jpg',0) +ret,thresh = cv.threshold(img,127,255,0) +im2,contours,hierarchy = cv.findContours(thresh, 1, 2) cnt = contours[0] -M = cv2.moments(cnt) +M = cv.moments(cnt) print( M ) @endcode From this moments, you can extract useful data like area, centroid etc. Centroid is given by the @@ -40,18 +40,18 @@ cy = int(M['m01']/M['m00']) 2. Contour Area --------------- -Contour area is given by the function **cv2.contourArea()** or from moments, **M['m00']**. +Contour area is given by the function **cv.contourArea()** or from moments, **M['m00']**. @code{.py} -area = cv2.contourArea(cnt) +area = cv.contourArea(cnt) @endcode 3. Contour Perimeter -------------------- -It is also called arc length. It can be found out using **cv2.arcLength()** function. Second +It is also called arc length. It can be found out using **cv.arcLength()** function. Second argument specify whether shape is a closed contour (if passed True), or just a curve. @code{.py} -perimeter = cv2.arcLength(cnt,True) +perimeter = cv.arcLength(cnt,True) @endcode 4. Contour Approximation @@ -68,8 +68,8 @@ you can use this function to approximate the shape. In this, second argument is which is maximum distance from contour to approximated contour. It is an accuracy parameter. A wise selection of epsilon is needed to get the correct output. @code{.py} -epsilon = 0.1*cv2.arcLength(cnt,True) -approx = cv2.approxPolyDP(cnt,epsilon,True) +epsilon = 0.1*cv.arcLength(cnt,True) +approx = cv.approxPolyDP(cnt,epsilon,True) @endcode Below, in second image, green line shows the approximated curve for epsilon = 10% of arc length. Third image shows the same for epsilon = 1% of the arc length. Third argument specifies whether @@ -81,7 +81,7 @@ curve is closed or not. -------------- Convex Hull will look similar to contour approximation, but it is not (Both may provide same results -in some cases). Here, **cv2.convexHull()** function checks a curve for convexity defects and +in some cases). Here, **cv.convexHull()** function checks a curve for convexity defects and corrects it. Generally speaking, convex curves are the curves which are always bulged out, or at-least flat. And if it is bulged inside, it is called convexity defects. For example, check the below image of hand. Red line shows the convex hull of hand. The double-sided arrow marks shows the @@ -91,7 +91,7 @@ convexity defects, which are the local maximum deviations of hull from contours. There is a little bit things to discuss about it its syntax: @code{.py} -hull = cv2.convexHull(points[, hull[, clockwise[, returnPoints]] +hull = cv.convexHull(points[, hull[, clockwise[, returnPoints]] @endcode Arguments details: @@ -104,7 +104,7 @@ Arguments details: So to get a convex hull as in above image, following is sufficient: @code{.py} -hull = cv2.convexHull(cnt) +hull = cv.convexHull(cnt) @endcode But if you want to find convexity defects, you need to pass returnPoints = False. To understand it, we will take the rectangle image above. First I found its contour as cnt. Now I found its convex @@ -119,10 +119,10 @@ You will see it again when we discuss about convexity defects. 6. Checking Convexity --------------------- -There is a function to check if a curve is convex or not, **cv2.isContourConvex()**. It just return +There is a function to check if a curve is convex or not, **cv.isContourConvex()**. It just return whether True or False. Not a big deal. @code{.py} -k = cv2.isContourConvex(cnt) +k = cv.isContourConvex(cnt) @endcode 7. Bounding Rectangle @@ -133,25 +133,25 @@ There are two types of bounding rectangles. ### 7.a. Straight Bounding Rectangle It is a straight rectangle, it doesn't consider the rotation of the object. So area of the bounding -rectangle won't be minimum. It is found by the function **cv2.boundingRect()**. +rectangle won't be minimum. It is found by the function **cv.boundingRect()**. Let (x,y) be the top-left coordinate of the rectangle and (w,h) be its width and height. @code{.py} -x,y,w,h = cv2.boundingRect(cnt) -cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2) +x,y,w,h = cv.boundingRect(cnt) +cv.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2) @endcode ### 7.b. Rotated Rectangle Here, bounding rectangle is drawn with minimum area, so it considers the rotation also. The function -used is **cv2.minAreaRect()**. It returns a Box2D structure which contains following detals - ( +used is **cv.minAreaRect()**. It returns a Box2D structure which contains following detals - ( center (x,y), (width, height), angle of rotation ). But to draw this rectangle, we need 4 corners of -the rectangle. It is obtained by the function **cv2.boxPoints()** +the rectangle. It is obtained by the function **cv.boxPoints()** @code{.py} -rect = cv2.minAreaRect(cnt) -box = cv2.boxPoints(rect) +rect = cv.minAreaRect(cnt) +box = cv.boxPoints(rect) box = np.int0(box) -cv2.drawContours(img,[box],0,(0,0,255),2) +cv.drawContours(img,[box],0,(0,0,255),2) @endcode Both the rectangles are shown in a single image. Green rectangle shows the normal bounding rect. Red rectangle is the rotated rect. @@ -161,13 +161,13 @@ rectangle is the rotated rect. 8. Minimum Enclosing Circle --------------------------- -Next we find the circumcircle of an object using the function **cv2.minEnclosingCircle()**. It is a +Next we find the circumcircle of an object using the function **cv.minEnclosingCircle()**. It is a circle which completely covers the object with minimum area. @code{.py} -(x,y),radius = cv2.minEnclosingCircle(cnt) +(x,y),radius = cv.minEnclosingCircle(cnt) center = (int(x),int(y)) radius = int(radius) -cv2.circle(img,center,radius,(0,255,0),2) +cv.circle(img,center,radius,(0,255,0),2) @endcode ![image](images/circumcircle.png) @@ -177,8 +177,8 @@ cv2.circle(img,center,radius,(0,255,0),2) Next one is to fit an ellipse to an object. It returns the rotated rectangle in which the ellipse is inscribed. @code{.py} -ellipse = cv2.fitEllipse(cnt) -cv2.ellipse(img,ellipse,(0,255,0),2) +ellipse = cv.fitEllipse(cnt) +cv.ellipse(img,ellipse,(0,255,0),2) @endcode ![image](images/fitellipse.png) @@ -189,10 +189,10 @@ Similarly we can fit a line to a set of points. Below image contains a set of wh approximate a straight line to it. @code{.py} rows,cols = img.shape[:2] -[vx,vy,x,y] = cv2.fitLine(cnt, cv2.DIST_L2,0,0.01,0.01) +[vx,vy,x,y] = cv.fitLine(cnt, cv.DIST_L2,0,0.01,0.01) lefty = int((-x*vy/vx) + y) righty = int(((cols-x)*vy/vx)+y) -cv2.line(img,(cols-1,righty),(0,lefty),(0,255,0),2) +cv.line(img,(cols-1,righty),(0,lefty),(0,255,0),2) @endcode ![image](images/fitline.jpg) diff --git a/doc/py_tutorials/py_imgproc/py_contours/py_contour_properties/py_contour_properties.markdown b/doc/py_tutorials/py_imgproc/py_contours/py_contour_properties/py_contour_properties.markdown index 2fe1b5e..a07b5e8 100644 --- a/doc/py_tutorials/py_imgproc/py_contours/py_contour_properties/py_contour_properties.markdown +++ b/doc/py_tutorials/py_imgproc/py_contours/py_contour_properties/py_contour_properties.markdown @@ -15,7 +15,7 @@ It is the ratio of width to height of bounding rect of the object. \f[Aspect \; Ratio = \frac{Width}{Height}\f] @code{.py} -x,y,w,h = cv2.boundingRect(cnt) +x,y,w,h = cv.boundingRect(cnt) aspect_ratio = float(w)/h @endcode @@ -26,8 +26,8 @@ Extent is the ratio of contour area to bounding rectangle area. \f[Extent = \frac{Object \; Area}{Bounding \; Rectangle \; Area}\f] @code{.py} -area = cv2.contourArea(cnt) -x,y,w,h = cv2.boundingRect(cnt) +area = cv.contourArea(cnt) +x,y,w,h = cv.boundingRect(cnt) rect_area = w*h extent = float(area)/rect_area @endcode @@ -39,9 +39,9 @@ Solidity is the ratio of contour area to its convex hull area. \f[Solidity = \frac{Contour \; Area}{Convex \; Hull \; Area}\f] @code{.py} -area = cv2.contourArea(cnt) -hull = cv2.convexHull(cnt) -hull_area = cv2.contourArea(hull) +area = cv.contourArea(cnt) +hull = cv.convexHull(cnt) +hull_area = cv.contourArea(hull) solidity = float(area)/hull_area @endcode @@ -52,7 +52,7 @@ Equivalent Diameter is the diameter of the circle whose area is same as the cont \f[Equivalent \; Diameter = \sqrt{\frac{4 \times Contour \; Area}{\pi}}\f] @code{.py} -area = cv2.contourArea(cnt) +area = cv.contourArea(cnt) equi_diameter = np.sqrt(4*area/np.pi) @endcode @@ -62,7 +62,7 @@ equi_diameter = np.sqrt(4*area/np.pi) Orientation is the angle at which object is directed. Following method also gives the Major Axis and Minor Axis lengths. @code{.py} -(x,y),(MA,ma),angle = cv2.fitEllipse(cnt) +(x,y),(MA,ma),angle = cv.fitEllipse(cnt) @endcode 6. Mask and Pixel Points @@ -71,9 +71,9 @@ Minor Axis lengths. In some cases, we may need all the points which comprises that object. It can be done as follows: @code{.py} mask = np.zeros(imgray.shape,np.uint8) -cv2.drawContours(mask,[cnt],0,255,-1) +cv.drawContours(mask,[cnt],0,255,-1) pixelpoints = np.transpose(np.nonzero(mask)) -#pixelpoints = cv2.findNonZero(mask) +#pixelpoints = cv.findNonZero(mask) @endcode Here, two methods, one using Numpy functions, next one using OpenCV function (last commented line) are given to do the same. Results are also same, but with a slight difference. Numpy gives @@ -85,7 +85,7 @@ basically the answers will be interchanged. Note that, **row = x** and **column We can find these parameters using a mask image. @code{.py} -min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(imgray,mask = mask) +min_val, max_val, min_loc, max_loc = cv.minMaxLoc(imgray,mask = mask) @endcode 8. Mean Color or Mean Intensity @@ -94,7 +94,7 @@ min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(imgray,mask = mask) Here, we can find the average color of an object. Or it can be average intensity of the object in grayscale mode. We again use the same mask to do it. @code{.py} -mean_val = cv2.mean(im,mask = mask) +mean_val = cv.mean(im,mask = mask) @endcode 9. Extreme Points diff --git a/doc/py_tutorials/py_imgproc/py_contours/py_contours_begin/py_contours_begin.markdown b/doc/py_tutorials/py_imgproc/py_contours/py_contours_begin/py_contours_begin.markdown index a3e3928..c2055f7 100644 --- a/doc/py_tutorials/py_imgproc/py_contours/py_contours_begin/py_contours_begin.markdown +++ b/doc/py_tutorials/py_imgproc/py_contours/py_contours_begin/py_contours_begin.markdown @@ -6,7 +6,7 @@ Goal - Understand what contours are. - Learn to find contours, draw contours etc -- You will see these functions : **cv2.findContours()**, **cv2.drawContours()** +- You will see these functions : **cv.findContours()**, **cv.drawContours()** What are contours? ------------------ @@ -24,14 +24,14 @@ detection and recognition. Let's see how to find contours of a binary image: @code{.py} import numpy as np -import cv2 +import cv2 as cv -im = cv2.imread('test.jpg') -imgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) -ret, thresh = cv2.threshold(imgray, 127, 255, 0) -im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) +im = cv.imread('test.jpg') +imgray = cv.cvtColor(im, cv.COLOR_BGR2GRAY) +ret, thresh = cv.threshold(imgray, 127, 255, 0) +im2, contours, hierarchy = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) @endcode -See, there are three arguments in **cv2.findContours()** function, first one is source image, second +See, there are three arguments in **cv.findContours()** function, first one is source image, second is contour retrieval mode, third is contour approximation method. And it outputs a modified image, the contours and hierarchy. contours is a Python list of all the contours in the image. Each individual contour is a Numpy array of (x,y) coordinates of boundary points of the object. @@ -42,7 +42,7 @@ the values given to them in code sample will work fine for all images. How to draw the contours? ------------------------- -To draw the contours, cv2.drawContours function is used. It can also be used to draw any shape +To draw the contours, cv.drawContours function is used. It can also be used to draw any shape provided you have its boundary points. Its first argument is source image, second argument is the contours which should be passed as a Python list, third argument is index of contours (useful when drawing individual contour. To draw all contours, pass -1) and remaining arguments are color, @@ -50,16 +50,16 @@ thickness etc. * To draw all the contours in an image: @code{.py} -cv2.drawContours(img, contours, -1, (0,255,0), 3) +cv.drawContours(img, contours, -1, (0,255,0), 3) @endcode * To draw an individual contour, say 4th contour: @code{.py} -cv2.drawContours(img, contours, 3, (0,255,0), 3) +cv.drawContours(img, contours, 3, (0,255,0), 3) @endcode * But most of the time, below method will be useful: @code{.py} cnt = contours[4] -cv2.drawContours(img, [cnt], 0, (0,255,0), 3) +cv.drawContours(img, [cnt], 0, (0,255,0), 3) @endcode @note Last two methods are same, but when you go forward, you will see last one is more useful. @@ -67,21 +67,21 @@ cv2.drawContours(img, [cnt], 0, (0,255,0), 3) Contour Approximation Method ============================ -This is the third argument in cv2.findContours function. What does it denote actually? +This is the third argument in cv.findContours function. What does it denote actually? Above, we told that contours are the boundaries of a shape with same intensity. It stores the (x,y) coordinates of the boundary of a shape. But does it store all the coordinates ? That is specified by this contour approximation method. -If you pass cv2.CHAIN_APPROX_NONE, all the boundary points are stored. But actually do we need all +If you pass cv.CHAIN_APPROX_NONE, all the boundary points are stored. But actually do we need all the points? For eg, you found the contour of a straight line. Do you need all the points on the line to represent that line? No, we need just two end points of that line. This is what -cv2.CHAIN_APPROX_SIMPLE does. It removes all redundant points and compresses the contour, thereby +cv.CHAIN_APPROX_SIMPLE does. It removes all redundant points and compresses the contour, thereby saving memory. Below image of a rectangle demonstrate this technique. Just draw a circle on all the coordinates in -the contour array (drawn in blue color). First image shows points I got with cv2.CHAIN_APPROX_NONE -(734 points) and second image shows the one with cv2.CHAIN_APPROX_SIMPLE (only 4 points). See, how +the contour array (drawn in blue color). First image shows points I got with cv.CHAIN_APPROX_NONE +(734 points) and second image shows the one with cv.CHAIN_APPROX_SIMPLE (only 4 points). See, how much memory it saves!!! ![image](images/none.jpg) diff --git a/doc/py_tutorials/py_imgproc/py_contours/py_contours_hierarchy/py_contours_hierarchy.markdown b/doc/py_tutorials/py_imgproc/py_contours/py_contours_hierarchy/py_contours_hierarchy.markdown index 424469a..831754d 100644 --- a/doc/py_tutorials/py_imgproc/py_contours/py_contours_hierarchy/py_contours_hierarchy.markdown +++ b/doc/py_tutorials/py_imgproc/py_contours/py_contours_hierarchy/py_contours_hierarchy.markdown @@ -10,9 +10,9 @@ Theory ------ In the last few articles on contours, we have worked with several functions related to contours -provided by OpenCV. But when we found the contours in image using **cv2.findContours()** function, -we have passed an argument, **Contour Retrieval Mode**. We usually passed **cv2.RETR_LIST** or -**cv2.RETR_TREE** and it worked nice. But what does it actually mean ? +provided by OpenCV. But when we found the contours in image using **cv.findContours()** function, +we have passed an argument, **Contour Retrieval Mode**. We usually passed **cv.RETR_LIST** or +**cv.RETR_TREE** and it worked nice. But what does it actually mean ? Also, in the output, we got three arrays, first is the image, second is our contours, and one more output which we named as **hierarchy** (Please checkout the codes in previous articles). But we @@ -23,7 +23,7 @@ That is what we are going to deal in this article. ### What is Hierarchy? -Normally we use the **cv2.findContours()** function to detect objects in an image, right ? Sometimes +Normally we use the **cv.findContours()** function to detect objects in an image, right ? Sometimes objects are in different locations. But in some cases, some shapes are inside other shapes. Just like nested figures. In this case, we call outer one as **parent** and inner one as **child**. This way, contours in an image has some relationship to each other. And we can specify how one contour is @@ -82,8 +82,8 @@ contour-3a. For contour-3a, it is contour-3 and so on. @note If there is no child or parent, that field is taken as -1 So now we know about the hierarchy style used in OpenCV, we can check into Contour Retrieval Modes -in OpenCV with the help of same image given above. ie what do flags like cv2.RETR_LIST, -cv2.RETR_TREE, cv2.RETR_CCOMP, cv2.RETR_EXTERNAL etc mean? +in OpenCV with the help of same image given above. ie what do flags like cv.RETR_LIST, +cv.RETR_TREE, cv.RETR_CCOMP, cv.RETR_EXTERNAL etc mean? Contour Retrieval Mode ---------------------- @@ -185,7 +185,7 @@ array([[[ 3, -1, 1, -1], And this is the final guy, Mr.Perfect. It retrieves all the contours and creates a full family hierarchy list. **It even tells, who is the grandpa, father, son, grandson and even beyond... :)**. -For examle, I took above image, rewrite the code for cv2.RETR_TREE, reorder the contours as per the +For examle, I took above image, rewrite the code for cv.RETR_TREE, reorder the contours as per the result given by OpenCV and analyze it. Again, red letters give the contour number and green letters give the hierarchy order. diff --git a/doc/py_tutorials/py_imgproc/py_contours/py_contours_more_functions/py_contours_more_functions.markdown b/doc/py_tutorials/py_imgproc/py_contours/py_contours_more_functions/py_contours_more_functions.markdown index 81b4a74..378099a 100644 --- a/doc/py_tutorials/py_imgproc/py_contours/py_contours_more_functions/py_contours_more_functions.markdown +++ b/doc/py_tutorials/py_imgproc/py_contours/py_contours_more_functions/py_contours_more_functions.markdown @@ -17,11 +17,11 @@ Theory and Code We saw what is convex hull in second chapter about contours. Any deviation of the object from this hull can be considered as convexity defect. -OpenCV comes with a ready-made function to find this, **cv2.convexityDefects()**. A basic function +OpenCV comes with a ready-made function to find this, **cv.convexityDefects()**. A basic function call would look like below: @code{.py} -hull = cv2.convexHull(cnt,returnPoints = False) -defects = cv2.convexityDefects(cnt,hull) +hull = cv.convexHull(cnt,returnPoints = False) +defects = cv.convexityDefects(cnt,hull) @endcode @note Remember we have to pass returnPoints = False while finding convex hull, in order to find @@ -33,29 +33,29 @@ line joining start point and end point, then draw a circle at the farthest point three values returned are indices of cnt. So we have to bring those values from cnt. @code{.py} -import cv2 +import cv2 as cv import numpy as np -img = cv2.imread('star.jpg') -img_gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) -ret,thresh = cv2.threshold(img_gray, 127, 255,0) -im2,contours,hierarchy = cv2.findContours(thresh,2,1) +img = cv.imread('star.jpg') +img_gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) +ret,thresh = cv.threshold(img_gray, 127, 255,0) +im2,contours,hierarchy = cv.findContours(thresh,2,1) cnt = contours[0] -hull = cv2.convexHull(cnt,returnPoints = False) -defects = cv2.convexityDefects(cnt,hull) +hull = cv.convexHull(cnt,returnPoints = False) +defects = cv.convexityDefects(cnt,hull) for i in range(defects.shape[0]): s,e,f,d = defects[i,0] start = tuple(cnt[s][0]) end = tuple(cnt[e][0]) far = tuple(cnt[f][0]) - cv2.line(img,start,end,[0,255,0],2) - cv2.circle(img,far,5,[0,0,255],-1) + cv.line(img,start,end,[0,255,0],2) + cv.circle(img,far,5,[0,0,255],-1) -cv2.imshow('img',img) -cv2.waitKey(0) -cv2.destroyAllWindows() +cv.imshow('img',img) +cv.waitKey(0) +cv.destroyAllWindows() @endcode And see the result: @@ -69,7 +69,7 @@ if point is on the contour. For example, we can check the point (50,50) as follows: @code{.py} -dist = cv2.pointPolygonTest(cnt,(50,50),True) +dist = cv.pointPolygonTest(cnt,(50,50),True) @endcode In the function, third argument is measureDist. If it is True, it finds the signed distance. If False, it finds whether the point is inside or outside or on the contour (it returns +1, -1, 0 @@ -80,25 +80,25 @@ time consuming process. So, making it False gives about 2-3X speedup. ### 3. Match Shapes -OpenCV comes with a function **cv2.matchShapes()** which enables us to compare two shapes, or two +OpenCV comes with a function **cv.matchShapes()** which enables us to compare two shapes, or two contours and returns a metric showing the similarity. The lower the result, the better match it is. It is calculated based on the hu-moment values. Different measurement methods are explained in the docs. @code{.py} -import cv2 +import cv2 as cv import numpy as np -img1 = cv2.imread('star.jpg',0) -img2 = cv2.imread('star2.jpg',0) +img1 = cv.imread('star.jpg',0) +img2 = cv.imread('star2.jpg',0) -ret, thresh = cv2.threshold(img1, 127, 255,0) -ret, thresh2 = cv2.threshold(img2, 127, 255,0) -im2,contours,hierarchy = cv2.findContours(thresh,2,1) +ret, thresh = cv.threshold(img1, 127, 255,0) +ret, thresh2 = cv.threshold(img2, 127, 255,0) +im2,contours,hierarchy = cv.findContours(thresh,2,1) cnt1 = contours[0] -im2,contours,hierarchy = cv2.findContours(thresh2,2,1) +im2,contours,hierarchy = cv.findContours(thresh2,2,1) cnt2 = contours[0] -ret = cv2.matchShapes(cnt1,cnt2,1,0.0) +ret = cv.matchShapes(cnt1,cnt2,1,0.0) print( ret ) @endcode I tried matching shapes with different shapes given below: @@ -115,7 +115,7 @@ See, even image rotation doesn't affect much on this comparison. @sa [Hu-Moments](http://en.wikipedia.org/wiki/Image_moment#Rotation_invariant_moments) are seven moments invariant to translation, rotation and scale. Seventh one is skew-invariant. Those values -can be found using **cv2.HuMoments()** function. +can be found using **cv.HuMoments()** function. Additional Resources ==================== @@ -123,10 +123,10 @@ Additional Resources Exercises --------- --# Check the documentation for **cv2.pointPolygonTest()**, you can find a nice image in Red and +-# Check the documentation for **cv.pointPolygonTest()**, you can find a nice image in Red and Blue color. It represents the distance from all pixels to the white curve on it. All pixels inside curve is blue depending on the distance. Similarly outside points are red. Contour edges are marked with White. So problem is simple. Write a code to create such a representation of distance. --# Compare images of digits or letters using **cv2.matchShapes()**. ( That would be a simple step +-# Compare images of digits or letters using **cv.matchShapes()**. ( That would be a simple step towards OCR ) diff --git a/doc/py_tutorials/py_imgproc/py_filtering/py_filtering.markdown b/doc/py_tutorials/py_imgproc/py_filtering/py_filtering.markdown index bc3e87b..68b8613 100644 --- a/doc/py_tutorials/py_imgproc/py_filtering/py_filtering.markdown +++ b/doc/py_tutorials/py_imgproc/py_filtering/py_filtering.markdown @@ -15,7 +15,7 @@ As in one-dimensional signals, images also can be filtered with various low-pass high-pass filters(HPF) etc. LPF helps in removing noises, blurring the images etc. HPF filters helps in finding edges in the images. -OpenCV provides a function **cv2.filter2D()** to convolve a kernel with an image. As an example, we +OpenCV provides a function **cv.filter2D()** to convolve a kernel with an image. As an example, we will try an averaging filter on an image. A 5x5 averaging filter kernel will look like below: \f[K = \frac{1}{25} \begin{bmatrix} 1 & 1 & 1 & 1 & 1 \\ 1 & 1 & 1 & 1 & 1 \\ 1 & 1 & 1 & 1 & 1 \\ 1 & 1 & 1 & 1 & 1 \\ 1 & 1 & 1 & 1 & 1 \end{bmatrix}\f] @@ -24,14 +24,14 @@ Operation is like this: keep this kernel above a pixel, add all the 25 pixels be take its average and replace the central pixel with the new average value. It continues this operation for all the pixels in the image. Try this code and check the result: @code{.py} -import cv2 import numpy as np +import cv2 as cv from matplotlib import pyplot as plt -img = cv2.imread('opencv_logo.png') +img = cv.imread('opencv_logo.png') kernel = np.ones((5,5),np.float32)/25 -dst = cv2.filter2D(img,-1,kernel) +dst = cv.filter2D(img,-1,kernel) plt.subplot(121),plt.imshow(img),plt.title('Original') plt.xticks([]), plt.yticks([]) @@ -55,23 +55,23 @@ blur the edges too). OpenCV provides mainly four types of blurring techniques. This is done by convolving image with a normalized box filter. It simply takes the average of all the pixels under kernel area and replace the central element. This is done by the function -**cv2.blur()** or **cv2.boxFilter()**. Check the docs for more details about the kernel. We should +**cv.blur()** or **cv.boxFilter()**. Check the docs for more details about the kernel. We should specify the width and height of kernel. A 3x3 normalized box filter would look like below: \f[K = \frac{1}{9} \begin{bmatrix} 1 & 1 & 1 \\ 1 & 1 & 1 \\ 1 & 1 & 1 \end{bmatrix}\f] -@note If you don't want to use normalized box filter, use **cv2.boxFilter()**. Pass an argument +@note If you don't want to use normalized box filter, use **cv.boxFilter()**. Pass an argument normalize=False to the function. Check a sample demo below with a kernel of 5x5 size: @code{.py} -import cv2 +import cv2 as cv import numpy as np from matplotlib import pyplot as plt -img = cv2.imread('opencv-logo-white.png') +img = cv.imread('opencv-logo-white.png') -blur = cv2.blur(img,(5,5)) +blur = cv.blur(img,(5,5)) plt.subplot(121),plt.imshow(img),plt.title('Original') plt.xticks([]), plt.yticks([]) @@ -86,17 +86,17 @@ Result: ### 2. Gaussian Blurring In this, instead of box filter, gaussian kernel is used. It is done with the function, -**cv2.GaussianBlur()**. We should specify the width and height of kernel which should be positive +**cv.GaussianBlur()**. We should specify the width and height of kernel which should be positive and odd. We also should specify the standard deviation in X and Y direction, sigmaX and sigmaY respectively. If only sigmaX is specified, sigmaY is taken as same as sigmaX. If both are given as zeros, they are calculated from kernel size. Gaussian blurring is highly effective in removing gaussian noise from the image. -If you want, you can create a Gaussian kernel with the function, **cv2.getGaussianKernel()**. +If you want, you can create a Gaussian kernel with the function, **cv.getGaussianKernel()**. The above code can be modified for Gaussian blurring: @code{.py} -blur = cv2.GaussianBlur(img,(5,5),0) +blur = cv.GaussianBlur(img,(5,5),0) @endcode Result: @@ -104,7 +104,7 @@ Result: ### 3. Median Blurring -Here, the function **cv2.medianBlur()** takes median of all the pixels under kernel area and central +Here, the function **cv.medianBlur()** takes median of all the pixels under kernel area and central element is replaced with this median value. This is highly effective against salt-and-pepper noise in the images. Interesting thing is that, in the above filters, central element is a newly calculated value which may be a pixel value in the image or a new value. But in median blurring, @@ -113,7 +113,7 @@ effectively. Its kernel size should be a positive odd integer. In this demo, I added a 50% noise to our original image and applied median blur. Check the result: @code{.py} -median = cv2.medianBlur(img,5) +median = cv.medianBlur(img,5) @endcode Result: @@ -121,7 +121,7 @@ Result: ### 4. Bilateral Filtering -**cv2.bilateralFilter()** is highly effective in noise removal while keeping edges sharp. But the +**cv.bilateralFilter()** is highly effective in noise removal while keeping edges sharp. But the operation is slower compared to other filters. We already saw that gaussian filter takes the a neighbourhood around the pixel and find its gaussian weighted average. This gaussian filter is a function of space alone, that is, nearby pixels are considered while filtering. It doesn't consider @@ -136,7 +136,7 @@ pixels at edges will have large intensity variation. Below samples shows use bilateral filter (For details on arguments, visit docs). @code{.py} -blur = cv2.bilateralFilter(img,9,75,75) +blur = cv.bilateralFilter(img,9,75,75) @endcode Result: diff --git a/doc/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.markdown b/doc/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.markdown index a71878f..ea45180 100644 --- a/doc/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.markdown +++ b/doc/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.markdown @@ -6,35 +6,35 @@ Goals - Learn to apply different geometric transformation to images like translation, rotation, affine transformation etc. -- You will see these functions: **cv2.getPerspectiveTransform** +- You will see these functions: **cv.getPerspectiveTransform** Transformations --------------- -OpenCV provides two transformation functions, **cv2.warpAffine** and **cv2.warpPerspective**, with -which you can have all kinds of transformations. **cv2.warpAffine** takes a 2x3 transformation -matrix while **cv2.warpPerspective** takes a 3x3 transformation matrix as input. +OpenCV provides two transformation functions, **cv.warpAffine** and **cv.warpPerspective**, with +which you can have all kinds of transformations. **cv.warpAffine** takes a 2x3 transformation +matrix while **cv.warpPerspective** takes a 3x3 transformation matrix as input. ### Scaling -Scaling is just resizing of the image. OpenCV comes with a function **cv2.resize()** for this +Scaling is just resizing of the image. OpenCV comes with a function **cv.resize()** for this purpose. The size of the image can be specified manually, or you can specify the scaling factor. -Different interpolation methods are used. Preferable interpolation methods are **cv2.INTER_AREA** -for shrinking and **cv2.INTER_CUBIC** (slow) & **cv2.INTER_LINEAR** for zooming. By default, -interpolation method used is **cv2.INTER_LINEAR** for all resizing purposes. You can resize an +Different interpolation methods are used. Preferable interpolation methods are **cv.INTER_AREA** +for shrinking and **cv.INTER_CUBIC** (slow) & **cv.INTER_LINEAR** for zooming. By default, +interpolation method used is **cv.INTER_LINEAR** for all resizing purposes. You can resize an input image either of following methods: @code{.py} -import cv2 import numpy as np +import cv2 as cv -img = cv2.imread('messi5.jpg') +img = cv.imread('messi5.jpg') -res = cv2.resize(img,None,fx=2, fy=2, interpolation = cv2.INTER_CUBIC) +res = cv.resize(img,None,fx=2, fy=2, interpolation = cv.INTER_CUBIC) #OR height, width = img.shape[:2] -res = cv2.resize(img,(2*width, 2*height), interpolation = cv2.INTER_CUBIC) +res = cv.resize(img,(2*width, 2*height), interpolation = cv.INTER_CUBIC) @endcode ### Translation @@ -43,25 +43,25 @@ be \f$(t_x,t_y)\f$, you can create the transformation matrix \f$\textbf{M}\f$ as \f[M = \begin{bmatrix} 1 & 0 & t_x \\ 0 & 1 & t_y \end{bmatrix}\f] -You can take make it into a Numpy array of type np.float32 and pass it into **cv2.warpAffine()** +You can take make it into a Numpy array of type np.float32 and pass it into **cv.warpAffine()** function. See below example for a shift of (100,50): @code{.py} -import cv2 import numpy as np +import cv2 as cv -img = cv2.imread('messi5.jpg',0) +img = cv.imread('messi5.jpg',0) rows,cols = img.shape M = np.float32([[1,0,100],[0,1,50]]) -dst = cv2.warpAffine(img,M,(cols,rows)) +dst = cv.warpAffine(img,M,(cols,rows)) -cv2.imshow('img',dst) -cv2.waitKey(0) -cv2.destroyAllWindows() +cv.imshow('img',dst) +cv.waitKey(0) +cv.destroyAllWindows() @endcode **warning** -Third argument of the **cv2.warpAffine()** function is the size of the output image, which should +Third argument of the **cv.warpAffine()** function is the size of the output image, which should be in the form of **(width, height)**. Remember width = number of columns, and height = number of rows. @@ -84,14 +84,14 @@ where: \f[\begin{array}{l} \alpha = scale \cdot \cos \theta , \\ \beta = scale \cdot \sin \theta \end{array}\f] -To find this transformation matrix, OpenCV provides a function, **cv2.getRotationMatrix2D**. Check +To find this transformation matrix, OpenCV provides a function, **cv.getRotationMatrix2D**. Check below example which rotates the image by 90 degree with respect to center without any scaling. @code{.py} -img = cv2.imread('messi5.jpg',0) +img = cv.imread('messi5.jpg',0) rows,cols = img.shape -M = cv2.getRotationMatrix2D((cols/2,rows/2),90,1) -dst = cv2.warpAffine(img,M,(cols,rows)) +M = cv.getRotationMatrix2D((cols/2,rows/2),90,1) +dst = cv.warpAffine(img,M,(cols,rows)) @endcode See the result: @@ -101,20 +101,20 @@ See the result: In affine transformation, all parallel lines in the original image will still be parallel in the output image. To find the transformation matrix, we need three points from input image and their -corresponding locations in output image. Then **cv2.getAffineTransform** will create a 2x3 matrix -which is to be passed to **cv2.warpAffine**. +corresponding locations in output image. Then **cv.getAffineTransform** will create a 2x3 matrix +which is to be passed to **cv.warpAffine**. Check below example, and also look at the points I selected (which are marked in Green color): @code{.py} -img = cv2.imread('drawing.png') +img = cv.imread('drawing.png') rows,cols,ch = img.shape pts1 = np.float32([[50,50],[200,50],[50,200]]) pts2 = np.float32([[10,100],[200,50],[100,250]]) -M = cv2.getAffineTransform(pts1,pts2) +M = cv.getAffineTransform(pts1,pts2) -dst = cv2.warpAffine(img,M,(cols,rows)) +dst = cv.warpAffine(img,M,(cols,rows)) plt.subplot(121),plt.imshow(img),plt.title('Input') plt.subplot(122),plt.imshow(dst),plt.title('Output') @@ -130,20 +130,20 @@ For perspective transformation, you need a 3x3 transformation matrix. Straight l straight even after the transformation. To find this transformation matrix, you need 4 points on the input image and corresponding points on the output image. Among these 4 points, 3 of them should not be collinear. Then transformation matrix can be found by the function -**cv2.getPerspectiveTransform**. Then apply **cv2.warpPerspective** with this 3x3 transformation +**cv.getPerspectiveTransform**. Then apply **cv.warpPerspective** with this 3x3 transformation matrix. See the code below: @code{.py} -img = cv2.imread('sudoku.png') +img = cv.imread('sudoku.png') rows,cols,ch = img.shape pts1 = np.float32([[56,65],[368,52],[28,387],[389,390]]) pts2 = np.float32([[0,0],[300,0],[0,300],[300,300]]) -M = cv2.getPerspectiveTransform(pts1,pts2) +M = cv.getPerspectiveTransform(pts1,pts2) -dst = cv2.warpPerspective(img,M,(300,300)) +dst = cv.warpPerspective(img,M,(300,300)) plt.subplot(121),plt.imshow(img),plt.title('Input') plt.subplot(122),plt.imshow(dst),plt.title('Output') diff --git a/doc/py_tutorials/py_imgproc/py_grabcut/py_grabcut.markdown b/doc/py_tutorials/py_imgproc/py_grabcut/py_grabcut.markdown index a44a727..b57f1b8 100644 --- a/doc/py_tutorials/py_imgproc/py_grabcut/py_grabcut.markdown +++ b/doc/py_tutorials/py_imgproc/py_grabcut/py_grabcut.markdown @@ -64,24 +64,24 @@ It is illustrated in below image (Image Courtesy: cv2.calcHist(images, channels, mask, histSize, ranges[, hist[, accumulate]]) +
cv.calcHist(images, channels, mask, histSize, ranges[, hist[, accumulate]])
-# images : it is the source image of type uint8 or float32. it should be given in square brackets, ie, "[img]". @@ -78,8 +78,8 @@ and its parameters : So let's start with a sample image. Simply load an image in grayscale mode and find its full histogram. @code{.py} -img = cv2.imread('home.jpg',0) -hist = cv2.calcHist([img],[0],None,[256],[0,256]) +img = cv.imread('home.jpg',0) +hist = cv.calcHist([img],[0],None,[256],[0,256]) @endcode hist is a 256x1 array, each value corresponds to number of pixels in that image with its corresponding pixel value. @@ -118,11 +118,11 @@ Matplotlib comes with a histogram plotting function : matplotlib.pyplot.hist() It directly finds the histogram and plot it. You need not use calcHist() or np.histogram() function to find the histogram. See the code below: @code{.py} -import cv2 import numpy as np +import cv2 as cv from matplotlib import pyplot as plt -img = cv2.imread('home.jpg',0) +img = cv.imread('home.jpg',0) plt.hist(img.ravel(),256,[0,256]); plt.show() @endcode You will get a plot as below : @@ -132,14 +132,14 @@ You will get a plot as below : Or you can use normal plot of matplotlib, which would be good for BGR plot. For that, you need to find the histogram data first. Try below code: @code{.py} -import cv2 import numpy as np +import cv2 as cv from matplotlib import pyplot as plt -img = cv2.imread('home.jpg') +img = cv.imread('home.jpg') color = ('b','g','r') for i,col in enumerate(color): - histr = cv2.calcHist([img],[i],None,[256],[0,256]) + histr = cv.calcHist([img],[i],None,[256],[0,256]) plt.plot(histr,color = col) plt.xlim([0,256]) plt.show() @@ -154,28 +154,28 @@ should be due to the sky) ### 2. Using OpenCV Well, here you adjust the values of histograms along with its bin values to look like x,y -coordinates so that you can draw it using cv2.line() or cv2.polyline() function to generate same +coordinates so that you can draw it using cv.line() or cv.polyline() function to generate same image as above. This is already available with OpenCV-Python2 official samples. Check the code at samples/python/hist.py. Application of Mask ------------------- -We used cv2.calcHist() to find the histogram of the full image. What if you want to find histograms +We used cv.calcHist() to find the histogram of the full image. What if you want to find histograms of some regions of an image? Just create a mask image with white color on the region you want to find histogram and black otherwise. Then pass this as the mask. @code{.py} -img = cv2.imread('home.jpg',0) +img = cv.imread('home.jpg',0) # create a mask mask = np.zeros(img.shape[:2], np.uint8) mask[100:300, 100:400] = 255 -masked_img = cv2.bitwise_and(img,img,mask = mask) +masked_img = cv.bitwise_and(img,img,mask = mask) # Calculate histogram with mask and without mask # Check third argument for mask -hist_full = cv2.calcHist([img],[0],None,[256],[0,256]) -hist_mask = cv2.calcHist([img],[0],mask,[256],[0,256]) +hist_full = cv.calcHist([img],[0],None,[256],[0,256]) +hist_mask = cv.calcHist([img],[0],mask,[256],[0,256]) plt.subplot(221), plt.imshow(img, 'gray') plt.subplot(222), plt.imshow(mask,'gray') diff --git a/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/py_histogram_equalization.markdown b/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/py_histogram_equalization.markdown index 933014d..99ef285 100644 --- a/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/py_histogram_equalization.markdown +++ b/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/py_histogram_equalization.markdown @@ -26,11 +26,11 @@ a very good explanation with worked out examples, so that you would understand a after reading that. Instead, here we will see its Numpy implementation. After that, we will see OpenCV function. @code{.py} -import cv2 import numpy as np +import cv2 as cv from matplotlib import pyplot as plt -img = cv2.imread('wiki.jpg',0) +img = cv.imread('wiki.jpg',0) hist,bins = np.histogram(img.flatten(),256,[0,256]) @@ -76,15 +76,15 @@ histogram equalized to make them all with same lighting conditions. Histograms Equalization in OpenCV --------------------------------- -OpenCV has a function to do this, **cv2.equalizeHist()**. Its input is just grayscale image and +OpenCV has a function to do this, **cv.equalizeHist()**. Its input is just grayscale image and output is our histogram equalized image. Below is a simple code snippet showing its usage for same image we used : @code{.py} -img = cv2.imread('wiki.jpg',0) -equ = cv2.equalizeHist(img) +img = cv.imread('wiki.jpg',0) +equ = cv.equalizeHist(img) res = np.hstack((img,equ)) #stacking images side-by-side -cv2.imwrite('res.png',res) +cv.imwrite('res.png',res) @endcode ![image](images/equalization_opencv.jpg) @@ -122,15 +122,15 @@ applied. Below code snippet shows how to apply CLAHE in OpenCV: @code{.py} import numpy as np -import cv2 +import cv2 as cv -img = cv2.imread('tsukuba_l.png',0) +img = cv.imread('tsukuba_l.png',0) # create a CLAHE object (Arguments are optional). -clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) +clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) cl1 = clahe.apply(img) -cv2.imwrite('clahe_2.jpg',cl1) +cv.imwrite('clahe_2.jpg',cl1) @endcode See the result below and compare it with results above, especially the statue region: diff --git a/doc/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.markdown b/doc/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.markdown index 50fc05c..7a424e9 100644 --- a/doc/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.markdown +++ b/doc/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.markdown @@ -6,7 +6,7 @@ Goal In this chapter, - We will learn to use Hough Transform to find circles in an image. - - We will see these functions: **cv2.HoughCircles()** + - We will see these functions: **cv.HoughCircles()** Theory ------ @@ -17,29 +17,29 @@ equation, we can see we have 3 parameters, so we need a 3D accumulator for hough would be highly ineffective. So OpenCV uses more trickier method, **Hough Gradient Method** which uses the gradient information of edges. -The function we use here is **cv2.HoughCircles()**. It has plenty of arguments which are well +The function we use here is **cv.HoughCircles()**. It has plenty of arguments which are well explained in the documentation. So we directly go to the code. @code{.py} -import cv2 import numpy as np +import cv2 as cv -img = cv2.imread('opencv-logo-white.png',0) -img = cv2.medianBlur(img,5) -cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR) +img = cv.imread('opencv-logo-white.png',0) +img = cv.medianBlur(img,5) +cimg = cv.cvtColor(img,cv.COLOR_GRAY2BGR) -circles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,1,20, +circles = cv.HoughCircles(img,cv.HOUGH_GRADIENT,1,20, param1=50,param2=30,minRadius=0,maxRadius=0) circles = np.uint16(np.around(circles)) for i in circles[0,:]: # draw the outer circle - cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2) + cv.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2) # draw the center of the circle - cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3) + cv.circle(cimg,(i[0],i[1]),2,(0,0,255),3) -cv2.imshow('detected circles',cimg) -cv2.waitKey(0) -cv2.destroyAllWindows() +cv.imshow('detected circles',cimg) +cv.waitKey(0) +cv.destroyAllWindows() @endcode Result is shown below: diff --git a/doc/py_tutorials/py_imgproc/py_houghlines/py_houghlines.markdown b/doc/py_tutorials/py_imgproc/py_houghlines/py_houghlines.markdown index f42d6ad..17844b7 100644 --- a/doc/py_tutorials/py_imgproc/py_houghlines/py_houghlines.markdown +++ b/doc/py_tutorials/py_imgproc/py_houghlines/py_houghlines.markdown @@ -7,7 +7,7 @@ Goal In this chapter, - We will understand the concept of the Hough Transform. - We will see how to use it to detect lines in an image. - - We will see the following functions: **cv2.HoughLines()**, **cv2.HoughLinesP()** + - We will see the following functions: **cv.HoughLines()**, **cv.HoughLinesP()** Theory ------ @@ -62,7 +62,7 @@ denote they are the parameters of possible lines in the image. (Image courtesy: Hough Transform in OpenCV ========================= -Everything explained above is encapsulated in the OpenCV function, **cv2.HoughLines()**. It simply returns an array of :math:(rho, +Everything explained above is encapsulated in the OpenCV function, **cv.HoughLines()**. It simply returns an array of :math:(rho, theta)\` values. \f$\rho\f$ is measured in pixels and \f$\theta\f$ is measured in radians. First parameter, Input image should be a binary image, so apply threshold or use canny edge detection before applying hough transform. Second and third parameters are \f$\rho\f$ and \f$\theta\f$ accuracies @@ -88,7 +88,7 @@ Hough Transform and Probabilistic Hough Transform in Hough space. (Image Courtes OpenCV implementation is based on Robust Detection of Lines Using the Progressive Probabilistic Hough Transform by Matas, J. and Galambos, C. and Kittler, J.V. @cite Matas00. The function used is -**cv2.HoughLinesP()**. It has two new arguments. +**cv.HoughLinesP()**. It has two new arguments. - **minLineLength** - Minimum length of line. Line segments shorter than this are rejected. - **maxLineGap** - Maximum allowed gap between line segments to treat them as a single line. diff --git a/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.markdown b/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.markdown index 1c0ef42..35e716d 100644 --- a/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.markdown +++ b/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.markdown @@ -7,8 +7,8 @@ Goal In this chapter, - We will learn different morphological operations like Erosion, Dilation, Opening, Closing etc. - - We will see different functions like : **cv2.erode()**, **cv2.dilate()**, - **cv2.morphologyEx()** etc. + - We will see different functions like : **cv.erode()**, **cv.dilate()**, + **cv.morphologyEx()** etc. Theory ------ @@ -35,12 +35,12 @@ detach two connected objects etc. Here, as an example, I would use a 5x5 kernel with full of ones. Let's see it how it works: @code{.py} -import cv2 +import cv2 as cv import numpy as np -img = cv2.imread('j.png',0) +img = cv.imread('j.png',0) kernel = np.ones((5,5),np.uint8) -erosion = cv2.erode(img,kernel,iterations = 1) +erosion = cv.erode(img,kernel,iterations = 1) @endcode Result: @@ -54,7 +54,7 @@ Normally, in cases like noise removal, erosion is followed by dilation. Because, white noises, but it also shrinks our object. So we dilate it. Since noise is gone, they won't come back, but our object area increases. It is also useful in joining broken parts of an object. @code{.py} -dilation = cv2.dilate(img,kernel,iterations = 1) +dilation = cv.dilate(img,kernel,iterations = 1) @endcode Result: @@ -63,9 +63,9 @@ Result: ### 3. Opening Opening is just another name of **erosion followed by dilation**. It is useful in removing noise, as -we explained above. Here we use the function, **cv2.morphologyEx()** +we explained above. Here we use the function, **cv.morphologyEx()** @code{.py} -opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel) +opening = cv.morphologyEx(img, cv.MORPH_OPEN, kernel) @endcode Result: @@ -76,7 +76,7 @@ Result: Closing is reverse of Opening, **Dilation followed by Erosion**. It is useful in closing small holes inside the foreground objects, or small black points on the object. @code{.py} -closing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel) +closing = cv.morphologyEx(img, cv.MORPH_CLOSE, kernel) @endcode Result: @@ -88,7 +88,7 @@ It is the difference between dilation and erosion of an image. The result will look like the outline of the object. @code{.py} -gradient = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, kernel) +gradient = cv.morphologyEx(img, cv.MORPH_GRADIENT, kernel) @endcode Result: @@ -99,7 +99,7 @@ Result: It is the difference between input image and Opening of the image. Below example is done for a 9x9 kernel. @code{.py} -tophat = cv2.morphologyEx(img, cv2.MORPH_TOPHAT, kernel) +tophat = cv.morphologyEx(img, cv.MORPH_TOPHAT, kernel) @endcode Result: @@ -109,7 +109,7 @@ Result: It is the difference between the closing of the input image and input image. @code{.py} -blackhat = cv2.morphologyEx(img, cv2.MORPH_BLACKHAT, kernel) +blackhat = cv.morphologyEx(img, cv.MORPH_BLACKHAT, kernel) @endcode Result: @@ -120,11 +120,11 @@ Structuring Element We manually created a structuring elements in the previous examples with help of Numpy. It is rectangular shape. But in some cases, you may need elliptical/circular shaped kernels. So for this -purpose, OpenCV has a function, **cv2.getStructuringElement()**. You just pass the shape and size of +purpose, OpenCV has a function, **cv.getStructuringElement()**. You just pass the shape and size of the kernel, you get the desired kernel. @code{.py} # Rectangular Kernel ->>> cv2.getStructuringElement(cv2.MORPH_RECT,(5,5)) +>>> cv.getStructuringElement(cv.MORPH_RECT,(5,5)) array([[1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], @@ -132,7 +132,7 @@ array([[1, 1, 1, 1, 1], [1, 1, 1, 1, 1]], dtype=uint8) # Elliptical Kernel ->>> cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)) +>>> cv.getStructuringElement(cv.MORPH_ELLIPSE,(5,5)) array([[0, 0, 1, 0, 0], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], @@ -140,7 +140,7 @@ array([[0, 0, 1, 0, 0], [0, 0, 1, 0, 0]], dtype=uint8) # Cross-shaped Kernel ->>> cv2.getStructuringElement(cv2.MORPH_CROSS,(5,5)) +>>> cv.getStructuringElement(cv.MORPH_CROSS,(5,5)) array([[0, 0, 1, 0, 0], [0, 0, 1, 0, 0], [1, 1, 1, 1, 1], diff --git a/doc/py_tutorials/py_imgproc/py_pyramids/py_pyramids.markdown b/doc/py_tutorials/py_imgproc/py_pyramids/py_pyramids.markdown index c6634c6..14e52fc 100644 --- a/doc/py_tutorials/py_imgproc/py_pyramids/py_pyramids.markdown +++ b/doc/py_tutorials/py_imgproc/py_pyramids/py_pyramids.markdown @@ -7,7 +7,7 @@ Goal In this chapter, - We will learn about Image Pyramids - We will use Image pyramids to create a new fruit, "Orapple" - - We will see these functions: **cv2.pyrUp()**, **cv2.pyrDown()** + - We will see these functions: **cv.pyrUp()**, **cv.pyrDown()** Theory ------ @@ -28,18 +28,18 @@ contribution from 5 pixels in underlying level with gaussian weights. By doing s image becomes \f$M/2 \times N/2\f$ image. So area reduces to one-fourth of original area. It is called an Octave. The same pattern continues as we go upper in pyramid (ie, resolution decreases). Similarly while expanding, area becomes 4 times in each level. We can find Gaussian pyramids using -**cv2.pyrDown()** and **cv2.pyrUp()** functions. +**cv.pyrDown()** and **cv.pyrUp()** functions. @code{.py} -img = cv2.imread('messi5.jpg') -lower_reso = cv2.pyrDown(higher_reso) +img = cv.imread('messi5.jpg') +lower_reso = cv.pyrDown(higher_reso) @endcode Below is the 4 levels in an image pyramid. ![image](images/messipyr.jpg) -Now you can go down the image pyramid with **cv2.pyrUp()** function. +Now you can go down the image pyramid with **cv.pyrUp()** function. @code{.py} -higher_reso2 = cv2.pyrUp(lower_reso) +higher_reso2 = cv.pyrUp(lower_reso) @endcode Remember, higher_reso2 is not equal to higher_reso, because once you decrease the resolution, you loose the information. Below image is 3 level down the pyramid created from smallest image in @@ -79,38 +79,38 @@ blending, Laplacian Pyramids etc. Simply it is done as follows: Below is the full code. (For sake of simplicity, each step is done separately which may take more memory. You can optimize it if you want so). @code{.py} -import cv2 +import cv2 as cv import numpy as np,sys -A = cv2.imread('apple.jpg') -B = cv2.imread('orange.jpg') +A = cv.imread('apple.jpg') +B = cv.imread('orange.jpg') # generate Gaussian pyramid for A G = A.copy() gpA = [G] for i in xrange(6): - G = cv2.pyrDown(G) + G = cv.pyrDown(G) gpA.append(G) # generate Gaussian pyramid for B G = B.copy() gpB = [G] for i in xrange(6): - G = cv2.pyrDown(G) + G = cv.pyrDown(G) gpB.append(G) # generate Laplacian Pyramid for A lpA = [gpA[5]] for i in xrange(5,0,-1): - GE = cv2.pyrUp(gpA[i]) - L = cv2.subtract(gpA[i-1],GE) + GE = cv.pyrUp(gpA[i]) + L = cv.subtract(gpA[i-1],GE) lpA.append(L) # generate Laplacian Pyramid for B lpB = [gpB[5]] for i in xrange(5,0,-1): - GE = cv2.pyrUp(gpB[i]) - L = cv2.subtract(gpB[i-1],GE) + GE = cv.pyrUp(gpB[i]) + L = cv.subtract(gpB[i-1],GE) lpB.append(L) # Now add left and right halves of images in each level @@ -123,14 +123,14 @@ for la,lb in zip(lpA,lpB): # now reconstruct ls_ = LS[0] for i in xrange(1,6): - ls_ = cv2.pyrUp(ls_) - ls_ = cv2.add(ls_, LS[i]) + ls_ = cv.pyrUp(ls_) + ls_ = cv.add(ls_, LS[i]) # image with direct connecting each half real = np.hstack((A[:,:cols/2],B[:,cols/2:])) -cv2.imwrite('Pyramid_blending2.jpg',ls_) -cv2.imwrite('Direct_blending.jpg',real) +cv.imwrite('Pyramid_blending2.jpg',ls_) +cv.imwrite('Direct_blending.jpg',real) @endcode Additional Resources -------------------- diff --git a/doc/py_tutorials/py_imgproc/py_template_matching/py_template_matching.markdown b/doc/py_tutorials/py_imgproc/py_template_matching/py_template_matching.markdown index 47b976c..c40c1d6 100644 --- a/doc/py_tutorials/py_imgproc/py_template_matching/py_template_matching.markdown +++ b/doc/py_tutorials/py_imgproc/py_template_matching/py_template_matching.markdown @@ -6,24 +6,24 @@ Goals In this chapter, you will learn - To find objects in an image using Template Matching - - You will see these functions : **cv2.matchTemplate()**, **cv2.minMaxLoc()** + - You will see these functions : **cv.matchTemplate()**, **cv.minMaxLoc()** Theory ------ Template Matching is a method for searching and finding the location of a template image in a larger -image. OpenCV comes with a function **cv2.matchTemplate()** for this purpose. It simply slides the +image. OpenCV comes with a function **cv.matchTemplate()** for this purpose. It simply slides the template image over the input image (as in 2D convolution) and compares the template and patch of input image under the template image. Several comparison methods are implemented in OpenCV. (You can check docs for more details). It returns a grayscale image, where each pixel denotes how much does the neighbourhood of that pixel match with template. If input image is of size (WxH) and template image is of size (wxh), output image will have a size -of (W-w+1, H-h+1). Once you got the result, you can use **cv2.minMaxLoc()** function to find where +of (W-w+1, H-h+1). Once you got the result, you can use **cv.minMaxLoc()** function to find where is the maximum/minimum value. Take it as the top-left corner of rectangle and take (w,h) as width and height of the rectangle. That rectangle is your region of template. -@note If you are using cv2.TM_SQDIFF as comparison method, minimum value gives the best match. +@note If you are using cv.TM_SQDIFF as comparison method, minimum value gives the best match. Template Matching in OpenCV --------------------------- @@ -34,35 +34,35 @@ Here, as an example, we will search for Messi's face in his photo. So I created We will try all the comparison methods so that we can see how their results look like: @code{.py} -import cv2 +import cv2 as cv import numpy as np from matplotlib import pyplot as plt -img = cv2.imread('messi5.jpg',0) +img = cv.imread('messi5.jpg',0) img2 = img.copy() -template = cv2.imread('template.jpg',0) +template = cv.imread('template.jpg',0) w, h = template.shape[::-1] # All the 6 methods for comparison in a list -methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR', - 'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED'] +methods = ['cv.TM_CCOEFF', 'cv.TM_CCOEFF_NORMED', 'cv.TM_CCORR', + 'cv.TM_CCORR_NORMED', 'cv.TM_SQDIFF', 'cv.TM_SQDIFF_NORMED'] for meth in methods: img = img2.copy() method = eval(meth) # Apply template Matching - res = cv2.matchTemplate(img,template,method) - min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) + res = cv.matchTemplate(img,template,method) + min_val, max_val, min_loc, max_loc = cv.minMaxLoc(res) # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum - if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]: + if method in [cv.TM_SQDIFF, cv.TM_SQDIFF_NORMED]: top_left = min_loc else: top_left = max_loc bottom_right = (top_left[0] + w, top_left[1] + h) - cv2.rectangle(img,top_left, bottom_right, 255, 2) + cv.rectangle(img,top_left, bottom_right, 255, 2) plt.subplot(121),plt.imshow(res,cmap = 'gray') plt.title('Matching Result'), plt.xticks([]), plt.yticks([]) @@ -74,56 +74,56 @@ for meth in methods: @endcode See the results below: -- cv2.TM_CCOEFF +- cv.TM_CCOEFF ![image](images/template_ccoeff_1.jpg) -- cv2.TM_CCOEFF_NORMED +- cv.TM_CCOEFF_NORMED ![image](images/template_ccoeffn_2.jpg) -- cv2.TM_CCORR +- cv.TM_CCORR ![image](images/template_ccorr_3.jpg) -- cv2.TM_CCORR_NORMED +- cv.TM_CCORR_NORMED ![image](images/template_ccorrn_4.jpg) -- cv2.TM_SQDIFF +- cv.TM_SQDIFF ![image](images/template_sqdiff_5.jpg) -- cv2.TM_SQDIFF_NORMED +- cv.TM_SQDIFF_NORMED ![image](images/template_sqdiffn_6.jpg) -You can see that the result using **cv2.TM_CCORR** is not good as we expected. +You can see that the result using **cv.TM_CCORR** is not good as we expected. Template Matching with Multiple Objects --------------------------------------- In the previous section, we searched image for Messi's face, which occurs only once in the image. -Suppose you are searching for an object which has multiple occurances, **cv2.minMaxLoc()** won't +Suppose you are searching for an object which has multiple occurances, **cv.minMaxLoc()** won't give you all the locations. In that case, we will use thresholding. So in this example, we will use a screenshot of the famous game **Mario** and we will find the coins in it. @code{.py} -import cv2 +import cv2 as cv import numpy as np from matplotlib import pyplot as plt -img_rgb = cv2.imread('mario.png') -img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY) -template = cv2.imread('mario_coin.png',0) +img_rgb = cv.imread('mario.png') +img_gray = cv.cvtColor(img_rgb, cv.COLOR_BGR2GRAY) +template = cv.imread('mario_coin.png',0) w, h = template.shape[::-1] -res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED) +res = cv.matchTemplate(img_gray,template,cv.TM_CCOEFF_NORMED) threshold = 0.8 loc = np.where( res >= threshold) for pt in zip(*loc[::-1]): - cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2) + cv.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2) -cv2.imwrite('res.png',img_rgb) +cv.imwrite('res.png',img_rgb) @endcode Result: diff --git a/doc/py_tutorials/py_imgproc/py_thresholding/py_thresholding.markdown b/doc/py_tutorials/py_imgproc/py_thresholding/py_thresholding.markdown index 0f4b790..1326bc5 100644 --- a/doc/py_tutorials/py_imgproc/py_thresholding/py_thresholding.markdown +++ b/doc/py_tutorials/py_imgproc/py_thresholding/py_thresholding.markdown @@ -6,24 +6,24 @@ Goal - In this tutorial, you will learn Simple thresholding, Adaptive thresholding, Otsu's thresholding etc. -- You will learn these functions : **cv2.threshold**, **cv2.adaptiveThreshold** etc. +- You will learn these functions : **cv.threshold**, **cv.adaptiveThreshold** etc. Simple Thresholding ------------------- Here, the matter is straight forward. If pixel value is greater than a threshold value, it is assigned one value (may be white), else it is assigned another value (may be black). The function -used is **cv2.threshold**. First argument is the source image, which **should be a grayscale +used is **cv.threshold**. First argument is the source image, which **should be a grayscale image**. Second argument is the threshold value which is used to classify the pixel values. Third argument is the maxVal which represents the value to be given if pixel value is more than (sometimes less than) the threshold value. OpenCV provides different styles of thresholding and it is decided by the fourth parameter of the function. Different types are: -- cv2.THRESH_BINARY -- cv2.THRESH_BINARY_INV -- cv2.THRESH_TRUNC -- cv2.THRESH_TOZERO -- cv2.THRESH_TOZERO_INV +- cv.THRESH_BINARY +- cv.THRESH_BINARY_INV +- cv.THRESH_TRUNC +- cv.THRESH_TOZERO +- cv.THRESH_TOZERO_INV Documentation clearly explain what each type is meant for. Please check out the documentation. @@ -32,16 +32,16 @@ our **thresholded image**. Code : @code{.py} -import cv2 +import cv2 as cv import numpy as np from matplotlib import pyplot as plt -img = cv2.imread('gradient.png',0) -ret,thresh1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY) -ret,thresh2 = cv2.threshold(img,127,255,cv2.THRESH_BINARY_INV) -ret,thresh3 = cv2.threshold(img,127,255,cv2.THRESH_TRUNC) -ret,thresh4 = cv2.threshold(img,127,255,cv2.THRESH_TOZERO) -ret,thresh5 = cv2.threshold(img,127,255,cv2.THRESH_TOZERO_INV) +img = cv.imread('gradient.png',0) +ret,thresh1 = cv.threshold(img,127,255,cv.THRESH_BINARY) +ret,thresh2 = cv.threshold(img,127,255,cv.THRESH_BINARY_INV) +ret,thresh3 = cv.threshold(img,127,255,cv.THRESH_TRUNC) +ret,thresh4 = cv.threshold(img,127,255,cv.THRESH_TOZERO) +ret,thresh5 = cv.threshold(img,127,255,cv.THRESH_TOZERO_INV) titles = ['Original Image','BINARY','BINARY_INV','TRUNC','TOZERO','TOZERO_INV'] images = [img, thresh1, thresh2, thresh3, thresh4, thresh5] @@ -72,8 +72,8 @@ results for images with varying illumination. It has three ‘special’ input params and only one output argument. **Adaptive Method** - It decides how thresholding value is calculated. - - cv2.ADAPTIVE_THRESH_MEAN_C : threshold value is the mean of neighbourhood area. - - cv2.ADAPTIVE_THRESH_GAUSSIAN_C : threshold value is the weighted sum of neighbourhood + - cv.ADAPTIVE_THRESH_MEAN_C : threshold value is the mean of neighbourhood area. + - cv.ADAPTIVE_THRESH_GAUSSIAN_C : threshold value is the weighted sum of neighbourhood values where weights are a gaussian window. **Block Size** - It decides the size of neighbourhood area. @@ -83,18 +83,18 @@ It has three ‘special’ input params and only one output argument. Below piece of code compares global thresholding and adaptive thresholding for an image with varying illumination: @code{.py} -import cv2 +import cv2 as cv import numpy as np from matplotlib import pyplot as plt -img = cv2.imread('sudoku.png',0) -img = cv2.medianBlur(img,5) +img = cv.imread('sudoku.png',0) +img = cv.medianBlur(img,5) -ret,th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY) -th2 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,\ - cv2.THRESH_BINARY,11,2) -th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\ - cv2.THRESH_BINARY,11,2) +ret,th1 = cv.threshold(img,127,255,cv.THRESH_BINARY) +th2 = cv.adaptiveThreshold(img,255,cv.ADAPTIVE_THRESH_MEAN_C,\ + cv.THRESH_BINARY,11,2) +th3 = cv.adaptiveThreshold(img,255,cv.ADAPTIVE_THRESH_GAUSSIAN_C,\ + cv.THRESH_BINARY,11,2) titles = ['Original Image', 'Global Thresholding (v = 127)', 'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding'] @@ -124,7 +124,7 @@ That is what Otsu binarization does. So in simple words, it automatically calcul value from image histogram for a bimodal image. (For images which are not bimodal, binarization won’t be accurate.) -For this, our cv2.threshold() function is used, but pass an extra flag, cv2.THRESH_OTSU. **For +For this, our cv.threshold() function is used, but pass an extra flag, cv.THRESH_OTSU. **For threshold value, simply pass zero**. Then the algorithm finds the optimal threshold value and returns you as the second output, retVal. If Otsu thresholding is not used, retVal is same as the threshold value you used. @@ -134,21 +134,21 @@ for a value of 127. In second case, I applied Otsu’s thresholding directly. In filtered image with a 5x5 gaussian kernel to remove the noise, then applied Otsu thresholding. See how noise filtering improves the result. @code{.py} -import cv2 +import cv2 as cv import numpy as np from matplotlib import pyplot as plt -img = cv2.imread('noisy2.png',0) +img = cv.imread('noisy2.png',0) # global thresholding -ret1,th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY) +ret1,th1 = cv.threshold(img,127,255,cv.THRESH_BINARY) # Otsu's thresholding -ret2,th2 = cv2.threshold(img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) +ret2,th2 = cv.threshold(img,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU) # Otsu's thresholding after Gaussian filtering -blur = cv2.GaussianBlur(img,(5,5),0) -ret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) +blur = cv.GaussianBlur(img,(5,5),0) +ret3,th3 = cv.threshold(blur,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU) # plot all the images and their histograms images = [img, 0, th1, @@ -188,11 +188,11 @@ where It actually finds a value of t which lies in between two peaks such that variances to both classes are minimum. It can be simply implemented in Python as follows: @code{.py} -img = cv2.imread('noisy2.png',0) -blur = cv2.GaussianBlur(img,(5,5),0) +img = cv.imread('noisy2.png',0) +blur = cv.GaussianBlur(img,(5,5),0) # find normalized_histogram, and its cumulative distribution function -hist = cv2.calcHist([blur],[0],None,[256],[0,256]) +hist = cv.calcHist([blur],[0],None,[256],[0,256]) hist_norm = hist.ravel()/hist.max() Q = hist_norm.cumsum() @@ -217,7 +217,7 @@ for i in xrange(1,256): thresh = i # find otsu's threshold value with OpenCV function -ret, otsu = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) +ret, otsu = cv.threshold(blur,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU) print( "{} {}".format(thresh,ret) ) @endcode *(Some of the functions may be new here, but we will cover them in coming chapters)* diff --git a/doc/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/py_fourier_transform.markdown b/doc/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/py_fourier_transform.markdown index 1c503c0..6104565 100644 --- a/doc/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/py_fourier_transform.markdown +++ b/doc/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/py_fourier_transform.markdown @@ -8,7 +8,7 @@ In this section, we will learn - To find the Fourier Transform of images using OpenCV - To utilize the FFT functions available in Numpy - Some applications of Fourier Transform - - We will see following functions : **cv2.dft()**, **cv2.idft()** etc + - We will see following functions : **cv.dft()**, **cv.idft()** etc Theory ------ @@ -50,11 +50,11 @@ you want to bring it to center, you need to shift the result by \f$\frac{N}{2}\f directions. This is simply done by the function, **np.fft.fftshift()**. (It is more easier to analyze). Once you found the frequency transform, you can find the magnitude spectrum. @code{.py} -import cv2 +import cv2 as cv import numpy as np from matplotlib import pyplot as plt -img = cv2.imread('messi5.jpg',0) +img = cv.imread('messi5.jpg',0) f = np.fft.fft2(img) fshift = np.fft.fftshift(f) magnitude_spectrum = 20*np.log(np.abs(fshift)) @@ -112,21 +112,21 @@ Better option is Gaussian Windows. Fourier Transform in OpenCV --------------------------- -OpenCV provides the functions **cv2.dft()** and **cv2.idft()** for this. It returns the same result +OpenCV provides the functions **cv.dft()** and **cv.idft()** for this. It returns the same result as previous, but with two channels. First channel will have the real part of the result and second channel will have the imaginary part of the result. The input image should be converted to np.float32 first. We will see how to do it. @code{.py} import numpy as np -import cv2 +import cv2 as cv from matplotlib import pyplot as plt -img = cv2.imread('messi5.jpg',0) +img = cv.imread('messi5.jpg',0) -dft = cv2.dft(np.float32(img),flags = cv2.DFT_COMPLEX_OUTPUT) +dft = cv.dft(np.float32(img),flags = cv.DFT_COMPLEX_OUTPUT) dft_shift = np.fft.fftshift(dft) -magnitude_spectrum = 20*np.log(cv2.magnitude(dft_shift[:,:,0],dft_shift[:,:,1])) +magnitude_spectrum = 20*np.log(cv.magnitude(dft_shift[:,:,0],dft_shift[:,:,1])) plt.subplot(121),plt.imshow(img, cmap = 'gray') plt.title('Input Image'), plt.xticks([]), plt.yticks([]) @@ -135,7 +135,7 @@ plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([]) plt.show() @endcode -@note You can also use **cv2.cartToPolar()** which returns both magnitude and phase in a single shot +@note You can also use **cv.cartToPolar()** which returns both magnitude and phase in a single shot So, now we have to do inverse DFT. In previous session, we created a HPF, this time we will see how to remove high frequency contents in the image, ie we apply LPF to image. It actually blurs the @@ -153,8 +153,8 @@ mask[crow-30:crow+30, ccol-30:ccol+30] = 1 # apply mask and inverse DFT fshift = dft_shift*mask f_ishift = np.fft.ifftshift(fshift) -img_back = cv2.idft(f_ishift) -img_back = cv2.magnitude(img_back[:,:,0],img_back[:,:,1]) +img_back = cv.idft(f_ishift) +img_back = cv.magnitude(img_back[:,:,0],img_back[:,:,1]) plt.subplot(121),plt.imshow(img, cmap = 'gray') plt.title('Input Image'), plt.xticks([]), plt.yticks([]) @@ -166,7 +166,7 @@ See the result: ![image](images/fft4.jpg) -@note As usual, OpenCV functions **cv2.dft()** and **cv2.idft()** are faster than Numpy +@note As usual, OpenCV functions **cv.dft()** and **cv.idft()** are faster than Numpy counterparts. But Numpy functions are more user-friendly. For more details about performance issues, see below section. @@ -180,23 +180,23 @@ the array to any optimal size (by padding zeros) before finding DFT. For OpenCV, manually pad zeros. But for Numpy, you specify the new size of FFT calculation, and it will automatically pad zeros for you. -So how do we find this optimal size ? OpenCV provides a function, **cv2.getOptimalDFTSize()** for -this. It is applicable to both **cv2.dft()** and **np.fft.fft2()**. Let's check their performance +So how do we find this optimal size ? OpenCV provides a function, **cv.getOptimalDFTSize()** for +this. It is applicable to both **cv.dft()** and **np.fft.fft2()**. Let's check their performance using IPython magic command %timeit. @code{.py} -In [16]: img = cv2.imread('messi5.jpg',0) +In [16]: img = cv.imread('messi5.jpg',0) In [17]: rows,cols = img.shape In [18]: print("{} {}".format(rows,cols)) 342 548 -In [19]: nrows = cv2.getOptimalDFTSize(rows) -In [20]: ncols = cv2.getOptimalDFTSize(cols) +In [19]: nrows = cv.getOptimalDFTSize(rows) +In [20]: ncols = cv.getOptimalDFTSize(cols) In [21]: print("{} {}".format(nrows,ncols)) 360 576 @endcode See, the size (342,548) is modified to (360, 576). Now let's pad it with zeros (for OpenCV) and find their DFT calculation performance. You can do it by creating a new big zero array and copy the data -to it, or use **cv2.copyMakeBorder()**. +to it, or use **cv.copyMakeBorder()**. @code{.py} nimg = np.zeros((nrows,ncols)) nimg[:rows,:cols] = img @@ -205,8 +205,8 @@ OR: @code{.py} right = ncols - cols bottom = nrows - rows -bordertype = cv2.BORDER_CONSTANT #just to avoid line breakup in PDF file -nimg = cv2.copyMakeBorder(img,0,bottom,0,right,bordertype, value = 0) +bordertype = cv.BORDER_CONSTANT #just to avoid line breakup in PDF file +nimg = cv.copyMakeBorder(img,0,bottom,0,right,bordertype, value = 0) @endcode Now we calculate the DFT performance comparison of Numpy function: @code{.py} @@ -217,9 +217,9 @@ In [23]: %timeit fft2 = np.fft.fft2(img,[nrows,ncols]) @endcode It shows a 4x speedup. Now we will try the same with OpenCV functions. @code{.py} -In [24]: %timeit dft1= cv2.dft(np.float32(img),flags=cv2.DFT_COMPLEX_OUTPUT) +In [24]: %timeit dft1= cv.dft(np.float32(img),flags=cv.DFT_COMPLEX_OUTPUT) 100 loops, best of 3: 13.5 ms per loop -In [27]: %timeit dft2= cv2.dft(np.float32(nimg),flags=cv2.DFT_COMPLEX_OUTPUT) +In [27]: %timeit dft2= cv.dft(np.float32(nimg),flags=cv.DFT_COMPLEX_OUTPUT) 100 loops, best of 3: 3.11 ms per loop @endcode It also shows a 4x speed-up. You can also see that OpenCV functions are around 3x faster than Numpy @@ -232,7 +232,7 @@ A similar question was asked in a forum. The question is, why Laplacian is a hig Sobel is a HPF? etc. And the first answer given to it was in terms of Fourier Transform. Just take the fourier transform of Laplacian for some higher size of FFT. Analyze it: @code{.py} -import cv2 +import cv2 as cv import numpy as np from matplotlib import pyplot as plt @@ -240,7 +240,7 @@ from matplotlib import pyplot as plt mean_filter = np.ones((3,3)) # creating a guassian filter -x = cv2.getGaussianKernel(5,10) +x = cv.getGaussianKernel(5,10) gaussian = x*x.T # different edge detecting filters diff --git a/doc/py_tutorials/py_imgproc/py_watershed/py_watershed.markdown b/doc/py_tutorials/py_imgproc/py_watershed/py_watershed.markdown index 7dc57cb..7d2ca46 100644 --- a/doc/py_tutorials/py_imgproc/py_watershed/py_watershed.markdown +++ b/doc/py_tutorials/py_imgproc/py_watershed/py_watershed.markdown @@ -6,7 +6,7 @@ Goal In this chapter, - We will learn to use marker-based image segmentation using watershed algorithm - - We will see: **cv2.watershed()** + - We will see: **cv.watershed()** Theory ------ @@ -45,12 +45,12 @@ We start with finding an approximate estimate of the coins. For that, we can use binarization. @code{.py} import numpy as np -import cv2 +import cv2 as cv from matplotlib import pyplot as plt -img = cv2.imread('coins.png') -gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) -ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) +img = cv.imread('coins.png') +gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) +ret, thresh = cv.threshold(gray,0,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU) @endcode Result: @@ -78,18 +78,18 @@ obtained from subtracting sure_fg area from sure_bg area. @code{.py} # noise removal kernel = np.ones((3,3),np.uint8) -opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2) +opening = cv.morphologyEx(thresh,cv.MORPH_OPEN,kernel, iterations = 2) # sure background area -sure_bg = cv2.dilate(opening,kernel,iterations=3) +sure_bg = cv.dilate(opening,kernel,iterations=3) # Finding sure foreground area -dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5) -ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0) +dist_transform = cv.distanceTransform(opening,cv.DIST_L2,5) +ret, sure_fg = cv.threshold(dist_transform,0.7*dist_transform.max(),255,0) # Finding unknown region sure_fg = np.uint8(sure_fg) -unknown = cv2.subtract(sure_bg,sure_fg) +unknown = cv.subtract(sure_bg,sure_fg) @endcode See the result. In the thresholded image, we get some regions of coins which we are sure of coins and they are detached now. (In some cases, you may be interested in only foreground segmentation, @@ -103,7 +103,7 @@ Now we know for sure which are region of coins, which are background and all. So (it is an array of same size as that of original image, but with int32 datatype) and label the regions inside it. The regions we know for sure (whether foreground or background) are labelled with any positive integers, but different integers, and the area we don't know for sure are just left as -zero. For this we use **cv2.connectedComponents()**. It labels background of the image with 0, then +zero. For this we use **cv.connectedComponents()**. It labels background of the image with 0, then other objects are labelled with integers starting from 1. But we know that if background is marked with 0, watershed will consider it as unknown area. So we @@ -111,7 +111,7 @@ want to mark it with different integer. Instead, we will mark unknown region, de with 0. @code{.py} # Marker labelling -ret, markers = cv2.connectedComponents(sure_fg) +ret, markers = cv.connectedComponents(sure_fg) # Add one to all labels so that sure background is not 0, but 1 markers = markers+1 @@ -128,7 +128,7 @@ compared to unknown region. Now our marker is ready. It is time for final step, apply watershed. Then marker image will be modified. The boundary region will be marked with -1. @code{.py} -markers = cv2.watershed(img,markers) +markers = cv.watershed(img,markers) img[markers == -1] = [255,0,0] @endcode See the result below. For some coins, the region where they touch are segmented properly and for diff --git a/doc/py_tutorials/py_ml/py_kmeans/py_kmeans_opencv/py_kmeans_opencv.markdown b/doc/py_tutorials/py_ml/py_kmeans/py_kmeans_opencv/py_kmeans_opencv.markdown index a8237d8..05a1300 100644 --- a/doc/py_tutorials/py_ml/py_kmeans/py_kmeans_opencv/py_kmeans_opencv.markdown +++ b/doc/py_tutorials/py_ml/py_kmeans/py_kmeans_opencv/py_kmeans_opencv.markdown @@ -4,7 +4,7 @@ K-Means Clustering in OpenCV {#tutorial_py_kmeans_opencv} Goal ---- -- Learn to use **cv2.kmeans()** function in OpenCV for data clustering +- Learn to use **cv.kmeans()** function in OpenCV for data clustering Understanding Parameters ------------------------ @@ -16,9 +16,9 @@ Understanding Parameters -# **nclusters(K)** : Number of clusters required at end -# **criteria** : It is the iteration termination criteria. When this criteria is satisfied, algorithm iteration stops. Actually, it should be a tuple of 3 parameters. They are \`( type, max_iter, epsilon )\`: -# type of termination criteria. It has 3 flags as below: - - **cv2.TERM_CRITERIA_EPS** - stop the algorithm iteration if specified accuracy, *epsilon*, is reached. - - **cv2.TERM_CRITERIA_MAX_ITER** - stop the algorithm after the specified number of iterations, *max_iter*. - - **cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER** - stop the iteration when any of the above condition is met. + - **cv.TERM_CRITERIA_EPS** - stop the algorithm iteration if specified accuracy, *epsilon*, is reached. + - **cv.TERM_CRITERIA_MAX_ITER** - stop the algorithm after the specified number of iterations, *max_iter*. + - **cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER** - stop the iteration when any of the above condition is met. -# max_iter - An integer specifying maximum number of iterations. -# epsilon - Required accuracy @@ -26,7 +26,7 @@ Understanding Parameters initial labellings. The algorithm returns the labels that yield the best compactness. This compactness is returned as output. -# **flags** : This flag is used to specify how initial centers are taken. Normally two flags are - used for this : **cv2.KMEANS_PP_CENTERS** and **cv2.KMEANS_RANDOM_CENTERS**. + used for this : **cv.KMEANS_PP_CENTERS** and **cv.KMEANS_RANDOM_CENTERS**. ### Output parameters @@ -47,7 +47,7 @@ t-shirt problem where you use only height of people to decide the size of t-shir So we start by creating data and plot it in Matplotlib @code{.py} import numpy as np -import cv2 +import cv2 as cv from matplotlib import pyplot as plt x = np.random.randint(25,100,25) @@ -70,13 +70,13 @@ that, whenever 10 iterations of algorithm is ran, or an accuracy of epsilon = 1. the algorithm and return the answer. @code{.py} # Define criteria = ( type, max_iter = 10 , epsilon = 1.0 ) -criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0) +criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10, 1.0) # Set flags (Just to avoid line break in the code) -flags = cv2.KMEANS_RANDOM_CENTERS +flags = cv.KMEANS_RANDOM_CENTERS # Apply KMeans -compactness,labels,centers = cv2.kmeans(z,2,None,criteria,10,flags) +compactness,labels,centers = cv.kmeans(z,2,None,criteria,10,flags) @endcode This gives us the compactness, labels and centers. In this case, I got centers as 60 and 207. Labels will have the same size as that of test data where each data will be labelled as '0','1','2' etc. @@ -117,7 +117,7 @@ Check image below: Now I am directly moving to the code: @code{.py} import numpy as np -import cv2 +import cv2 as cv from matplotlib import pyplot as plt X = np.random.randint(25,50,(25,2)) @@ -128,8 +128,8 @@ Z = np.vstack((X,Y)) Z = np.float32(Z) # define criteria and apply kmeans() -criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0) -ret,label,center=cv2.kmeans(Z,2,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS) +criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10, 1.0) +ret,label,center=cv.kmeans(Z,2,None,criteria,10,cv.KMEANS_RANDOM_CENTERS) # Now separate the data, Note the flatten() A = Z[label.ravel()==0] @@ -161,27 +161,27 @@ specified number of colors. And again we need to reshape it back to the shape of Below is the code: @code{.py} import numpy as np -import cv2 +import cv2 as cv -img = cv2.imread('home.jpg') +img = cv.imread('home.jpg') Z = img.reshape((-1,3)) # convert to np.float32 Z = np.float32(Z) # define criteria, number of clusters(K) and apply kmeans() -criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0) +criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10, 1.0) K = 8 -ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS) +ret,label,center=cv.kmeans(Z,K,None,criteria,10,cv.KMEANS_RANDOM_CENTERS) # Now convert back into uint8, and make original image center = np.uint8(center) res = center[label.flatten()] res2 = res.reshape((img.shape)) -cv2.imshow('res2',res2) -cv2.waitKey(0) -cv2.destroyAllWindows() +cv.imshow('res2',res2) +cv.waitKey(0) +cv.destroyAllWindows() @endcode See the result below for K=8: diff --git a/doc/py_tutorials/py_ml/py_knn/py_knn_opencv/py_knn_opencv.markdown b/doc/py_tutorials/py_ml/py_knn/py_knn_opencv/py_knn_opencv.markdown index 2c06b1a..5fbbff2 100644 --- a/doc/py_tutorials/py_ml/py_knn/py_knn_opencv/py_knn_opencv.markdown +++ b/doc/py_tutorials/py_ml/py_knn/py_knn_opencv/py_knn_opencv.markdown @@ -20,11 +20,11 @@ pixels. It is the simplest feature set we can create. We use first 250 samples o train_data, and next 250 samples as test_data. So let's prepare them first. @code{.py} import numpy as np -import cv2 +import cv2 as cv from matplotlib import pyplot as plt -img = cv2.imread('digits.png') -gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) +img = cv.imread('digits.png') +gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) # Now we split the image to 5000 cells, each 20x20 size cells = [np.hsplit(row,100) for row in np.vsplit(gray,50)] @@ -42,8 +42,8 @@ train_labels = np.repeat(k,250)[:,np.newaxis] test_labels = train_labels.copy() # Initiate kNN, train the data, then test it with test data for k=1 -knn = cv2.ml.KNearest_create() -knn.train(train, cv2.ml.ROW_SAMPLE, train_labels) +knn = cv.ml.KNearest_create() +knn.train(train, cv.ml.ROW_SAMPLE, train_labels) ret,result,neighbours,dist = knn.findNearest(test,k=5) # Now we check the accuracy of classification @@ -87,7 +87,7 @@ There are 20000 samples available, so we take first 10000 data as training sampl 10000 as test samples. We should change the alphabets to ascii characters because we can't work with alphabets directly. @code{.py} -import cv2 +import cv2 as cv import numpy as np import matplotlib.pyplot as plt @@ -103,8 +103,8 @@ responses, trainData = np.hsplit(train,[1]) labels, testData = np.hsplit(test,[1]) # Initiate the kNN, classify, measure accuracy. -knn = cv2.ml.KNearest_create() -knn.train(trainData, cv2.ml.ROW_SAMPLE, responses) +knn = cv.ml.KNearest_create() +knn.train(trainData, cv.ml.ROW_SAMPLE, responses) ret, result, neighbours, dist = knn.findNearest(testData, k=5) correct = np.count_nonzero(result == labels) diff --git a/doc/py_tutorials/py_ml/py_knn/py_knn_understanding/py_knn_understanding.markdown b/doc/py_tutorials/py_ml/py_knn/py_knn_understanding/py_knn_understanding.markdown index a9c36c4..30e53be 100644 --- a/doc/py_tutorials/py_ml/py_knn/py_knn_understanding/py_knn_understanding.markdown +++ b/doc/py_tutorials/py_ml/py_knn/py_knn_understanding/py_knn_understanding.markdown @@ -73,7 +73,7 @@ We do all these with the help of Random Number Generator in Numpy. Then we plot it with the help of Matplotlib. Red families are shown as Red Triangles and Blue families are shown as Blue Squares. @code{.py} -import cv2 +import cv2 as cv import numpy as np import matplotlib.pyplot as plt @@ -114,8 +114,8 @@ So let's see how it works. New comer is marked in green color. newcomer = np.random.randint(0,100,(1,2)).astype(np.float32) plt.scatter(newcomer[:,0],newcomer[:,1],80,'g','o') -knn = cv2.ml.KNearest_create() -knn.train(trainData, cv2.ml.ROW_SAMPLE, responses) +knn = cv.ml.KNearest_create() +knn.train(trainData, cv.ml.ROW_SAMPLE, responses) ret, results, neighbours ,dist = knn.findNearest(newcomer, 3) print( "result: {}\n".format(results) ) diff --git a/doc/py_tutorials/py_objdetect/py_face_detection/py_face_detection.markdown b/doc/py_tutorials/py_objdetect/py_face_detection/py_face_detection.markdown index bbf8025..c42ed73 100644 --- a/doc/py_tutorials/py_objdetect/py_face_detection/py_face_detection.markdown +++ b/doc/py_tutorials/py_objdetect/py_face_detection/py_face_detection.markdown @@ -94,13 +94,13 @@ First we need to load the required XML classifiers. Then load our input image (o grayscale mode. @code{.py} import numpy as np -import cv2 +import cv2 as cv -face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') -eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml') +face_cascade = cv.CascadeClassifier('haarcascade_frontalface_default.xml') +eye_cascade = cv.CascadeClassifier('haarcascade_eye.xml') -img = cv2.imread('sachin.jpg') -gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) +img = cv.imread('sachin.jpg') +gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) @endcode Now we find the faces in the image. If faces are found, it returns the positions of detected faces as Rect(x,y,w,h). Once we get these locations, we can create a ROI for the face and apply eye @@ -108,16 +108,16 @@ detection on this ROI (since eyes are always on the face !!! ). @code{.py} faces = face_cascade.detectMultiScale(gray, 1.3, 5) for (x,y,w,h) in faces: - cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) + cv.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) roi_gray = gray[y:y+h, x:x+w] roi_color = img[y:y+h, x:x+w] eyes = eye_cascade.detectMultiScale(roi_gray) for (ex,ey,ew,eh) in eyes: - cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2) + cv.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2) -cv2.imshow('img',img) -cv2.waitKey(0) -cv2.destroyAllWindows() +cv.imshow('img',img) +cv.waitKey(0) +cv.destroyAllWindows() @endcode Result looks like below: diff --git a/doc/py_tutorials/py_photo/py_hdr/py_hdr.markdown b/doc/py_tutorials/py_photo/py_hdr/py_hdr.markdown index 1350f11..bbfbed4 100644 --- a/doc/py_tutorials/py_photo/py_hdr/py_hdr.markdown +++ b/doc/py_tutorials/py_photo/py_hdr/py_hdr.markdown @@ -53,12 +53,12 @@ Pay attention for the data types, as the images should be 1-channel or 3-channel 8-bit (np.uint8) and the exposure times need to be float32 and in seconds. @code{.py} -import cv2 +import cv2 as cv import numpy as np # Loading exposure images into a list img_fn = ["img0.jpg", "img1.jpg", "img2.jpg", "img3.jpg"] -img_list = [cv2.imread(fn) for fn in img_fn] +img_list = [cv.imread(fn) for fn in img_fn] exposure_times = np.array([15.0, 2.5, 0.25, 0.0333], dtype=np.float32) @endcode @@ -71,9 +71,9 @@ full dynamic range of all exposure images. @code{.py} # Merge exposures to HDR image -merge_debvec = cv2.createMergeDebevec() +merge_debvec = cv.createMergeDebevec() hdr_debvec = merge_debvec.process(img_list, times=exposure_times.copy()) -merge_robertson = cv2.createMergeRobertson() +merge_robertson = cv.createMergeRobertson() hdr_robertson = merge_robertson.process(img_list, times=exposure_times.copy()) @endcode @@ -85,9 +85,9 @@ we will later have to clip the data in order to avoid overflow. @code{.py} # Tonemap HDR image -tonemap1 = cv2.createTonemapDurand(gamma=2.2) +tonemap1 = cv.createTonemapDurand(gamma=2.2) res_debvec = tonemap1.process(hdr_debvec.copy()) -tonemap2 = cv2.createTonemapDurand(gamma=1.3) +tonemap2 = cv.createTonemapDurand(gamma=1.3) res_robertson = tonemap2.process(hdr_robertson.copy()) @endcode @@ -100,7 +100,7 @@ range of [0..1]. @code{.py} # Exposure fusion using Mertens -merge_mertens = cv2.createMergeMertens() +merge_mertens = cv.createMergeMertens() res_mertens = merge_mertens.process(img_list) @endcode @@ -115,9 +115,9 @@ res_debvec_8bit = np.clip(res_debvec*255, 0, 255).astype('uint8') res_robertson_8bit = np.clip(res_robertson*255, 0, 255).astype('uint8') res_mertens_8bit = np.clip(res_mertens*255, 0, 255).astype('uint8') -cv2.imwrite("ldr_debvec.jpg", res_debvec_8bit) -cv2.imwrite("ldr_robertson.jpg", res_robertson_8bit) -cv2.imwrite("fusion_mertens.jpg", res_mertens_8bit) +cv.imwrite("ldr_debvec.jpg", res_debvec_8bit) +cv.imwrite("ldr_robertson.jpg", res_robertson_8bit) +cv.imwrite("fusion_mertens.jpg", res_mertens_8bit) @endcode Results @@ -150,10 +150,10 @@ function and use it for the HDR merge. @code{.py} # Estimate camera response function (CRF) -cal_debvec = cv2.createCalibrateDebevec() +cal_debvec = cv.createCalibrateDebevec() crf_debvec = cal_debvec.process(img_list, times=exposure_times) hdr_debvec = merge_debvec.process(img_list, times=exposure_times.copy(), response=crf_debvec.copy()) -cal_robertson = cv2.createCalibrateRobertson() +cal_robertson = cv.createCalibrateRobertson() crf_robertson = cal_robertson.process(img_list, times=exposure_times) hdr_robertson = merge_robertson.process(img_list, times=exposure_times.copy(), response=crf_robertson.copy()) @endcode diff --git a/doc/py_tutorials/py_photo/py_inpainting/py_inpainting.markdown b/doc/py_tutorials/py_photo/py_inpainting/py_inpainting.markdown index 2612e03..e6bfe97 100644 --- a/doc/py_tutorials/py_photo/py_inpainting/py_inpainting.markdown +++ b/doc/py_tutorials/py_photo/py_inpainting/py_inpainting.markdown @@ -22,7 +22,7 @@ shown below (taken from [Wikipedia](http://en.wikipedia.org/wiki/Inpainting)): ![image](images/inpaint_basics.jpg) Several algorithms were designed for this purpose and OpenCV provides two of them. Both can be -accessed by the same function, **cv2.inpaint()** +accessed by the same function, **cv.inpaint()** First algorithm is based on the paper **"An Image Inpainting Technique Based on the Fast Marching Method"** by Alexandru Telea in 2004. It is based on Fast Marching Method. Consider a region in the @@ -33,7 +33,7 @@ known pixels in the neigbourhood. Selection of the weights is an important matte given to those pixels lying near to the point, near to the normal of the boundary and those lying on the boundary contours. Once a pixel is inpainted, it moves to next nearest pixel using Fast Marching Method. FMM ensures those pixels near the known pixels are inpainted first, so that it just works -like a manual heuristic operation. This algorithm is enabled by using the flag, cv2.INPAINT_TELEA. +like a manual heuristic operation. This algorithm is enabled by using the flag, cv.INPAINT_TELEA. Second algorithm is based on the paper **"Navier-Stokes, Fluid Dynamics, and Image and Video Inpainting"** by Bertalmio, Marcelo, Andrea L. Bertozzi, and Guillermo Sapiro in 2001. This @@ -43,7 +43,7 @@ are meant to be continuous). It continues isophotes (lines joining points with s like contours joins points with same elevation) while matching gradient vectors at the boundary of the inpainting region. For this, some methods from fluid dynamics are used. Once they are obtained, color is filled to reduce minimum variance in that area. This algorithm is enabled by using the -flag, cv2.INPAINT_NS. +flag, cv.INPAINT_NS. Code ---- @@ -53,16 +53,16 @@ the area which is to be inpainted. Everything else is simple. My image is degrad strokes (I added manually). I created a corresponding strokes with Paint tool. @code{.py} import numpy as np -import cv2 +import cv2 as cv -img = cv2.imread('messi_2.jpg') -mask = cv2.imread('mask2.png',0) +img = cv.imread('messi_2.jpg') +mask = cv.imread('mask2.png',0) -dst = cv2.inpaint(img,mask,3,cv2.INPAINT_TELEA) +dst = cv.inpaint(img,mask,3,cv.INPAINT_TELEA) -cv2.imshow('dst',dst) -cv2.waitKey(0) -cv2.destroyAllWindows() +cv.imshow('dst',dst) +cv.waitKey(0) +cv.destroyAllWindows() @endcode See the result below. First image shows degraded input. Second image is the mask. Third image is the result of first algorithm and last image is the result of second algorithm. diff --git a/doc/py_tutorials/py_photo/py_non_local_means/py_non_local_means.markdown b/doc/py_tutorials/py_photo/py_non_local_means/py_non_local_means.markdown index eb154cc..0dea202 100644 --- a/doc/py_tutorials/py_photo/py_non_local_means/py_non_local_means.markdown +++ b/doc/py_tutorials/py_photo/py_non_local_means/py_non_local_means.markdown @@ -7,8 +7,8 @@ Goal In this chapter, - You will learn about Non-local Means Denoising algorithm to remove noise in the image. -- You will see different functions like **cv2.fastNlMeansDenoising()**, - **cv2.fastNlMeansDenoisingColored()** etc. +- You will see different functions like **cv.fastNlMeansDenoising()**, + **cv.fastNlMeansDenoisingColored()** etc. Theory ------ @@ -52,11 +52,11 @@ Image Denoising in OpenCV OpenCV provides four variations of this technique. --# **cv2.fastNlMeansDenoising()** - works with a single grayscale images -2. **cv2.fastNlMeansDenoisingColored()** - works with a color image. -3. **cv2.fastNlMeansDenoisingMulti()** - works with image sequence captured in short period of time +-# **cv.fastNlMeansDenoising()** - works with a single grayscale images +2. **cv.fastNlMeansDenoisingColored()** - works with a color image. +3. **cv.fastNlMeansDenoisingMulti()** - works with image sequence captured in short period of time (grayscale images) -4. **cv2.fastNlMeansDenoisingColoredMulti()** - same as above, but for color images. +4. **cv.fastNlMeansDenoisingColoredMulti()** - same as above, but for color images. Common arguments are: - h : parameter deciding filter strength. Higher h value removes noise better, but removes @@ -69,18 +69,18 @@ Please visit first link in additional resources for more details on these parame We will demonstrate 2 and 3 here. Rest is left for you. -### 1. cv2.fastNlMeansDenoisingColored() +### 1. cv.fastNlMeansDenoisingColored() As mentioned above it is used to remove noise from color images. (Noise is expected to be gaussian). See the example below: @code{.py} import numpy as np -import cv2 +import cv2 as cv from matplotlib import pyplot as plt -img = cv2.imread('die.png') +img = cv.imread('die.png') -dst = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21) +dst = cv.fastNlMeansDenoisingColored(img,None,10,10,7,21) plt.subplot(121),plt.imshow(img) plt.subplot(122),plt.imshow(dst) @@ -91,7 +91,7 @@ result: ![image](images/nlm_result1.jpg) -### 2. cv2.fastNlMeansDenoisingMulti() +### 2. cv.fastNlMeansDenoisingMulti() Now we will apply the same method to a video. The first argument is the list of noisy frames. Second argument imgToDenoiseIndex specifies which frame we need to denoise, for that we pass the index of @@ -102,16 +102,16 @@ input. Let imgToDenoiseIndex = 2 and temporalWindowSize = 3. Then frame-1, frame used to denoise frame-2. Let's see an example. @code{.py} import numpy as np -import cv2 +import cv2 as cv from matplotlib import pyplot as plt -cap = cv2.VideoCapture('vtest.avi') +cap = cv.VideoCapture('vtest.avi') # create a list of first 5 frames img = [cap.read()[1] for i in xrange(5)] # convert all to grayscale -gray = [cv2.cvtColor(i, cv2.COLOR_BGR2GRAY) for i in img] +gray = [cv.cvtColor(i, cv.COLOR_BGR2GRAY) for i in img] # convert all to float64 gray = [np.float64(i) for i in gray] @@ -126,7 +126,7 @@ noisy = [i+noise for i in gray] noisy = [np.uint8(np.clip(i,0,255)) for i in noisy] # Denoise 3rd frame considering all the 5 frames -dst = cv2.fastNlMeansDenoisingMulti(noisy, 2, 5, None, 4, 7, 35) +dst = cv.fastNlMeansDenoisingMulti(noisy, 2, 5, None, 4, 7, 35) plt.subplot(131),plt.imshow(gray[2],'gray') plt.subplot(132),plt.imshow(noisy[2],'gray') diff --git a/doc/py_tutorials/py_setup/py_setup_in_fedora/py_setup_in_fedora.markdown b/doc/py_tutorials/py_setup/py_setup_in_fedora/py_setup_in_fedora.markdown index a538cb4..bc5386b 100644 --- a/doc/py_tutorials/py_setup/py_setup_in_fedora/py_setup_in_fedora.markdown +++ b/doc/py_tutorials/py_setup/py_setup_in_fedora/py_setup_in_fedora.markdown @@ -29,8 +29,8 @@ $ yum install numpy opencv* @endcode Open Python IDLE (or IPython) and type following codes in Python terminal. @code{.py} ->>> import cv2 ->>> print( cv2.__version__ ) +>>> import cv2 as cv +>>> print( cv.__version__ ) @endcode If the results are printed out without any errors, congratulations !!! You have installed OpenCV-Python successfully. @@ -230,7 +230,7 @@ But you will have to do this every time you install OpenCV. @code{.sh} export PYTHONPATH=$PYTHONPATH:/usr/local/lib/python2.7/site-packages @endcode -Thus OpenCV installation is finished. Open a terminal and try import cv2. +Thus OpenCV installation is finished. Open a terminal and try 'import cv2 as cv'. To build the documentation, just enter following commands: @code{.sh} diff --git a/doc/py_tutorials/py_setup/py_setup_in_ubuntu/py_setup_in_ubuntu.markdown b/doc/py_tutorials/py_setup/py_setup_in_ubuntu/py_setup_in_ubuntu.markdown index e50bb36..56a2e6b 100644 --- a/doc/py_tutorials/py_setup/py_setup_in_ubuntu/py_setup_in_ubuntu.markdown +++ b/doc/py_tutorials/py_setup/py_setup_in_ubuntu/py_setup_in_ubuntu.markdown @@ -31,8 +31,8 @@ $ sudo apt-get install python-opencv Open Python IDLE (or IPython) and type following codes in Python terminal. ``` -import cv2 -print(cv2.__version__) +import cv2 as cv +print(cv.__version__) ``` If the results are printed out without any errors, congratulations !!! @@ -160,6 +160,6 @@ All files are installed in "/usr/local/" folder. Open a terminal and try import "cv2". ``` -import cv2 -print(cv2.__version__) +import cv2 as cv +print(cv.__version__) ``` diff --git a/doc/py_tutorials/py_setup/py_setup_in_windows/py_setup_in_windows.markdown b/doc/py_tutorials/py_setup/py_setup_in_windows/py_setup_in_windows.markdown index f971192..891c51f 100644 --- a/doc/py_tutorials/py_setup/py_setup_in_windows/py_setup_in_windows.markdown +++ b/doc/py_tutorials/py_setup/py_setup_in_windows/py_setup_in_windows.markdown @@ -35,8 +35,8 @@ Installing OpenCV from prebuilt binaries -# Open Python IDLE and type following codes in Python terminal. @code - >>> import cv2 - >>> print( cv2.__version__ ) + >>> import cv2 as cv + >>> print( cv.__version__ ) @endcode If the results are printed out without any errors, congratulations !!! You have installed @@ -136,7 +136,7 @@ Building OpenCV from source ![image](images/Capture8.png) --# Open Python IDLE and enter import cv2. If no error, it is installed correctly. +-# Open Python IDLE and enter 'import cv2 as cv'. If no error, it is installed correctly. @note We have installed with no other support like TBB, Eigen, Qt, Documentation etc. It would be difficult to explain it here. A more detailed video will be added soon or you can just hack around. diff --git a/doc/py_tutorials/py_video/py_bg_subtraction/py_bg_subtraction.markdown b/doc/py_tutorials/py_video/py_bg_subtraction/py_bg_subtraction.markdown index ee44831..2337154 100644 --- a/doc/py_tutorials/py_video/py_bg_subtraction/py_bg_subtraction.markdown +++ b/doc/py_tutorials/py_video/py_bg_subtraction/py_bg_subtraction.markdown @@ -37,31 +37,31 @@ the time proportions that those colours stay in the scene. The probable backgrou ones which stay longer and more static. While coding, we need to create a background object using the function, -**cv2.createBackgroundSubtractorMOG()**. It has some optional parameters like length of history, +**cv.createBackgroundSubtractorMOG()**. It has some optional parameters like length of history, number of gaussian mixtures, threshold etc. It is all set to some default values. Then inside the video loop, use backgroundsubtractor.apply() method to get the foreground mask. See a simple example below: @code{.py} import numpy as np -import cv2 +import cv2 as cv -cap = cv2.VideoCapture('vtest.avi') +cap = cv.VideoCapture('vtest.avi') -fgbg = cv2.createBackgroundSubtractorMOG() +fgbg = cv.createBackgroundSubtractorMOG() while(1): ret, frame = cap.read() fgmask = fgbg.apply(frame) - cv2.imshow('frame',fgmask) - k = cv2.waitKey(30) & 0xff + cv.imshow('frame',fgmask) + k = cv.waitKey(30) & 0xff if k == 27: break cap.release() -cv2.destroyAllWindows() +cv.destroyAllWindows() @endcode ( All the results are shown at the end for comparison). @@ -80,24 +80,24 @@ detecting shadows or not. If detectShadows = True (which is so by default), it detects and marks shadows, but decreases the speed. Shadows will be marked in gray color. @code{.py} import numpy as np -import cv2 +import cv2 as cv -cap = cv2.VideoCapture('vtest.avi') +cap = cv.VideoCapture('vtest.avi') -fgbg = cv2.createBackgroundSubtractorMOG2() +fgbg = cv.createBackgroundSubtractorMOG2() while(1): ret, frame = cap.read() fgmask = fgbg.apply(frame) - cv2.imshow('frame',fgmask) - k = cv2.waitKey(30) & 0xff + cv.imshow('frame',fgmask) + k = cv.waitKey(30) & 0xff if k == 27: break cap.release() -cv2.destroyAllWindows() +cv.destroyAllWindows() @endcode (Results given at the end) @@ -120,26 +120,26 @@ frames. It would be better to apply morphological opening to the result to remove the noises. @code{.py} import numpy as np -import cv2 +import cv2 as cv -cap = cv2.VideoCapture('vtest.avi') +cap = cv.VideoCapture('vtest.avi') -kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)) -fgbg = cv2.createBackgroundSubtractorGMG() +kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE,(3,3)) +fgbg = cv.createBackgroundSubtractorGMG() while(1): ret, frame = cap.read() fgmask = fgbg.apply(frame) - fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel) + fgmask = cv.morphologyEx(fgmask, cv.MORPH_OPEN, kernel) - cv2.imshow('frame',fgmask) - k = cv2.waitKey(30) & 0xff + cv.imshow('frame',fgmask) + k = cv.waitKey(30) & 0xff if k == 27: break cap.release() -cv2.destroyAllWindows() +cv.destroyAllWindows() @endcode Results ------- diff --git a/doc/py_tutorials/py_video/py_lucas_kanade/py_lucas_kanade.markdown b/doc/py_tutorials/py_video/py_lucas_kanade/py_lucas_kanade.markdown index 2c88617..0c63e35 100644 --- a/doc/py_tutorials/py_video/py_lucas_kanade/py_lucas_kanade.markdown +++ b/doc/py_tutorials/py_video/py_lucas_kanade/py_lucas_kanade.markdown @@ -7,7 +7,7 @@ Goal In this chapter, - We will understand the concepts of optical flow and its estimation using Lucas-Kanade method. - - We will use functions like **cv2.calcOpticalFlowPyrLK()** to track feature points in a + - We will use functions like **cv.calcOpticalFlowPyrLK()** to track feature points in a video. Optical Flow @@ -84,19 +84,19 @@ Lucas-Kanade there, we get optical flow along with the scale. Lucas-Kanade Optical Flow in OpenCV ----------------------------------- -OpenCV provides all these in a single function, **cv2.calcOpticalFlowPyrLK()**. Here, we create a +OpenCV provides all these in a single function, **cv.calcOpticalFlowPyrLK()**. Here, we create a simple application which tracks some points in a video. To decide the points, we use -**cv2.goodFeaturesToTrack()**. We take the first frame, detect some Shi-Tomasi corner points in it, +**cv.goodFeaturesToTrack()**. We take the first frame, detect some Shi-Tomasi corner points in it, then we iteratively track those points using Lucas-Kanade optical flow. For the function -**cv2.calcOpticalFlowPyrLK()** we pass the previous frame, previous points and next frame. It +**cv.calcOpticalFlowPyrLK()** we pass the previous frame, previous points and next frame. It returns next points along with some status numbers which has a value of 1 if next point is found, else zero. We iteratively pass these next points as previous points in next step. See the code below: @code{.py} import numpy as np -import cv2 +import cv2 as cv -cap = cv2.VideoCapture('slow.flv') +cap = cv.VideoCapture('slow.flv') # params for ShiTomasi corner detection feature_params = dict( maxCorners = 100, @@ -107,25 +107,25 @@ feature_params = dict( maxCorners = 100, # Parameters for lucas kanade optical flow lk_params = dict( winSize = (15,15), maxLevel = 2, - criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) + criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03)) # Create some random colors color = np.random.randint(0,255,(100,3)) # Take first frame and find corners in it ret, old_frame = cap.read() -old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY) -p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params) +old_gray = cv.cvtColor(old_frame, cv.COLOR_BGR2GRAY) +p0 = cv.goodFeaturesToTrack(old_gray, mask = None, **feature_params) # Create a mask image for drawing purposes mask = np.zeros_like(old_frame) while(1): ret,frame = cap.read() - frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) # calculate optical flow - p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params) + p1, st, err = cv.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params) # Select good points good_new = p1[st==1] @@ -135,12 +135,12 @@ while(1): for i,(new,old) in enumerate(zip(good_new,good_old)): a,b = new.ravel() c,d = old.ravel() - mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2) - frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1) - img = cv2.add(frame,mask) + mask = cv.line(mask, (a,b),(c,d), color[i].tolist(), 2) + frame = cv.circle(frame,(a,b),5,color[i].tolist(),-1) + img = cv.add(frame,mask) - cv2.imshow('frame',img) - k = cv2.waitKey(30) & 0xff + cv.imshow('frame',img) + k = cv.waitKey(30) & 0xff if k == 27: break @@ -148,7 +148,7 @@ while(1): old_gray = frame_gray.copy() p0 = good_new.reshape(-1,1,2) -cv2.destroyAllWindows() +cv.destroyAllWindows() cap.release() @endcode (This code doesn't check how correct are the next keypoints. So even if any feature point disappears @@ -176,37 +176,37 @@ array with optical flow vectors, \f$(u,v)\f$. We find their magnitude and direct result for better visualization. Direction corresponds to Hue value of the image. Magnitude corresponds to Value plane. See the code below: @code{.py} -import cv2 +import cv2 as cv import numpy as np -cap = cv2.VideoCapture("vtest.avi") +cap = cv.VideoCapture("vtest.avi") ret, frame1 = cap.read() -prvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY) +prvs = cv.cvtColor(frame1,cv.COLOR_BGR2GRAY) hsv = np.zeros_like(frame1) hsv[...,1] = 255 while(1): ret, frame2 = cap.read() - next = cv2.cvtColor(frame2,cv2.COLOR_BGR2GRAY) + next = cv.cvtColor(frame2,cv.COLOR_BGR2GRAY) - flow = cv2.calcOpticalFlowFarneback(prvs,next, None, 0.5, 3, 15, 3, 5, 1.2, 0) + flow = cv.calcOpticalFlowFarneback(prvs,next, None, 0.5, 3, 15, 3, 5, 1.2, 0) - mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1]) + mag, ang = cv.cartToPolar(flow[...,0], flow[...,1]) hsv[...,0] = ang*180/np.pi/2 - hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX) - bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR) + hsv[...,2] = cv.normalize(mag,None,0,255,cv.NORM_MINMAX) + bgr = cv.cvtColor(hsv,cv.COLOR_HSV2BGR) - cv2.imshow('frame2',bgr) - k = cv2.waitKey(30) & 0xff + cv.imshow('frame2',bgr) + k = cv.waitKey(30) & 0xff if k == 27: break elif k == ord('s'): - cv2.imwrite('opticalfb.png',frame2) - cv2.imwrite('opticalhsv.png',bgr) + cv.imwrite('opticalfb.png',frame2) + cv.imwrite('opticalhsv.png',bgr) prvs = next cap.release() -cv2.destroyAllWindows() +cv.destroyAllWindows() @endcode See the result below: diff --git a/doc/py_tutorials/py_video/py_meanshift/py_meanshift.markdown b/doc/py_tutorials/py_video/py_meanshift/py_meanshift.markdown index 499cc66..8976cc3 100644 --- a/doc/py_tutorials/py_video/py_meanshift/py_meanshift.markdown +++ b/doc/py_tutorials/py_video/py_meanshift/py_meanshift.markdown @@ -39,12 +39,12 @@ algorithm moves our window to the new location with maximum density. To use meanshift in OpenCV, first we need to setup the target, find its histogram so that we can backproject the target on each frame for calculation of meanshift. We also need to provide initial location of window. For histogram, only Hue is considered here. Also, to avoid false values due to -low light, low light values are discarded using **cv2.inRange()** function. +low light, low light values are discarded using **cv.inRange()** function. @code{.py} import numpy as np -import cv2 +import cv2 as cv -cap = cv2.VideoCapture('slow.flv') +cap = cv.VideoCapture('slow.flv') # take first frame of the video ret,frame = cap.read() @@ -55,39 +55,39 @@ track_window = (c,r,w,h) # set up the ROI for tracking roi = frame[r:r+h, c:c+w] -hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) -mask = cv2.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.))) -roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180]) -cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX) +hsv_roi = cv.cvtColor(roi, cv.COLOR_BGR2HSV) +mask = cv.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.))) +roi_hist = cv.calcHist([hsv_roi],[0],mask,[180],[0,180]) +cv.normalize(roi_hist,roi_hist,0,255,cv.NORM_MINMAX) # Setup the termination criteria, either 10 iteration or move by atleast 1 pt -term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 ) +term_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1 ) while(1): ret ,frame = cap.read() if ret == True: - hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) - dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1) + hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV) + dst = cv.calcBackProject([hsv],[0],roi_hist,[0,180],1) # apply meanshift to get the new location - ret, track_window = cv2.meanShift(dst, track_window, term_crit) + ret, track_window = cv.meanShift(dst, track_window, term_crit) # Draw it on image x,y,w,h = track_window - img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2) - cv2.imshow('img2',img2) + img2 = cv.rectangle(frame, (x,y), (x+w,y+h), 255,2) + cv.imshow('img2',img2) - k = cv2.waitKey(60) & 0xff + k = cv.waitKey(60) & 0xff if k == 27: break else: - cv2.imwrite(chr(k)+".jpg",img2) + cv.imwrite(chr(k)+".jpg",img2) else: break -cv2.destroyAllWindows() +cv.destroyAllWindows() cap.release() @endcode Three frames in a video I used is given below: @@ -116,9 +116,9 @@ It is almost same as meanshift, but it returns a rotated rectangle (that is our parameters (used to be passed as search window in next iteration). See the code below: @code{.py} import numpy as np -import cv2 +import cv2 as cv -cap = cv2.VideoCapture('slow.flv') +cap = cv.VideoCapture('slow.flv') # take first frame of the video ret,frame = cap.read() @@ -129,40 +129,40 @@ track_window = (c,r,w,h) # set up the ROI for tracking roi = frame[r:r+h, c:c+w] -hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) -mask = cv2.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.))) -roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180]) -cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX) +hsv_roi = cv.cvtColor(roi, cv.COLOR_BGR2HSV) +mask = cv.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.))) +roi_hist = cv.calcHist([hsv_roi],[0],mask,[180],[0,180]) +cv.normalize(roi_hist,roi_hist,0,255,cv.NORM_MINMAX) # Setup the termination criteria, either 10 iteration or move by atleast 1 pt -term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 ) +term_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1 ) while(1): ret ,frame = cap.read() if ret == True: - hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) - dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1) + hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV) + dst = cv.calcBackProject([hsv],[0],roi_hist,[0,180],1) # apply meanshift to get the new location - ret, track_window = cv2.CamShift(dst, track_window, term_crit) + ret, track_window = cv.CamShift(dst, track_window, term_crit) # Draw it on image - pts = cv2.boxPoints(ret) + pts = cv.boxPoints(ret) pts = np.int0(pts) - img2 = cv2.polylines(frame,[pts],True, 255,2) - cv2.imshow('img2',img2) + img2 = cv.polylines(frame,[pts],True, 255,2) + cv.imshow('img2',img2) - k = cv2.waitKey(60) & 0xff + k = cv.waitKey(60) & 0xff if k == 27: break else: - cv2.imwrite(chr(k)+".jpg",img2) + cv.imwrite(chr(k)+".jpg",img2) else: break -cv2.destroyAllWindows() +cv.destroyAllWindows() cap.release() @endcode Three frames of the result is shown below: diff --git a/doc/tutorials/core/adding_images/adding_images.markdown b/doc/tutorials/core/adding_images/adding_images.markdown index 95cc19e..c877632 100644 --- a/doc/tutorials/core/adding_images/adding_images.markdown +++ b/doc/tutorials/core/adding_images/adding_images.markdown @@ -86,7 +86,7 @@ Now we need to generate the `g(x)` image. For this, the function **addWeighted() @add_toggle_python @snippet python/tutorial_code/core/AddingImages/adding_images.py blend_images -Numpy version of above line (but cv2 function is around 2x faster): +Numpy version of above line (but cv function is around 2x faster): \code{.py} dst = np.uint8(alpha*(img1)+beta*(img2)) \endcode diff --git a/doc/tutorials/core/mat-mask-operations/mat_mask_operations.markdown b/doc/tutorials/core/mat-mask-operations/mat_mask_operations.markdown index 32545de..8e1febe 100644 --- a/doc/tutorials/core/mat-mask-operations/mat_mask_operations.markdown +++ b/doc/tutorials/core/mat-mask-operations/mat_mask_operations.markdown @@ -82,7 +82,7 @@ At first we make sure that the input images data in unsigned 8 bit format. At first we make sure that the input images data in unsigned 8 bit format. @code{.py} -my_image = cv2.cvtColor(my_image, cv2.CV_8U) +my_image = cv.cvtColor(my_image, cv.CV_8U) @endcode @end_toggle diff --git a/modules/python/test/test_algorithm_rw.py b/modules/python/test/test_algorithm_rw.py index 788fa22..f87a261 100644 --- a/modules/python/test/test_algorithm_rw.py +++ b/modules/python/test/test_algorithm_rw.py @@ -3,18 +3,18 @@ ''' Algorithm serializaion test ''' -import cv2 +import cv2 as cv from tests_common import NewOpenCVTests class algorithm_rw_test(NewOpenCVTests): def test_algorithm_rw(self): # some arbitrary non-default parameters - gold = cv2.AKAZE_create(descriptor_size=1, descriptor_channels=2, nOctaves=3, threshold=4.0) - gold.write(cv2.FileStorage("params.yml", 1), "AKAZE") + gold = cv.AKAZE_create(descriptor_size=1, descriptor_channels=2, nOctaves=3, threshold=4.0) + gold.write(cv.FileStorage("params.yml", 1), "AKAZE") - fs = cv2.FileStorage("params.yml", 0) - algorithm = cv2.AKAZE_create() + fs = cv.FileStorage("params.yml", 0) + algorithm = cv.AKAZE_create() algorithm.read(fs.getNode("AKAZE")) self.assertEqual(algorithm.getDescriptorSize(), 1) diff --git a/modules/python/test/test_calibration.py b/modules/python/test/test_calibration.py index 396ec23..f2cf4cc 100644 --- a/modules/python/test/test_calibration.py +++ b/modules/python/test/test_calibration.py @@ -9,7 +9,7 @@ reads distorted images, calculates the calibration and write undistorted images from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv from tests_common import NewOpenCVTests @@ -38,10 +38,10 @@ class calibration_test(NewOpenCVTests): continue h, w = img.shape[:2] - found, corners = cv2.findChessboardCorners(img, pattern_size) + found, corners = cv.findChessboardCorners(img, pattern_size) if found: - term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1) - cv2.cornerSubPix(img, corners, (5, 5), (-1, -1), term) + term = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_COUNT, 30, 0.1) + cv.cornerSubPix(img, corners, (5, 5), (-1, -1), term) if not found: continue @@ -50,7 +50,7 @@ class calibration_test(NewOpenCVTests): obj_points.append(pattern_points) # calculate camera distortion - rms, camera_matrix, dist_coefs, _rvecs, _tvecs = cv2.calibrateCamera(obj_points, img_points, (w, h), None, None, flags = 0) + rms, camera_matrix, dist_coefs, _rvecs, _tvecs = cv.calibrateCamera(obj_points, img_points, (w, h), None, None, flags = 0) eps = 0.01 normCamEps = 10.0 @@ -64,8 +64,8 @@ class calibration_test(NewOpenCVTests): 1.21234330e-03, -1.40825372e-04, 1.54865844e-01] self.assertLess(abs(rms - 0.196334638034), eps) - self.assertLess(cv2.norm(camera_matrix - cameraMatrixTest, cv2.NORM_L1), normCamEps) - self.assertLess(cv2.norm(dist_coefs - distCoeffsTest, cv2.NORM_L1), normDistEps) + self.assertLess(cv.norm(camera_matrix - cameraMatrixTest, cv.NORM_L1), normCamEps) + self.assertLess(cv.norm(dist_coefs - distCoeffsTest, cv.NORM_L1), normDistEps) diff --git a/modules/python/test/test_camshift.py b/modules/python/test/test_camshift.py index 91d45b0..3c29427 100644 --- a/modules/python/test/test_camshift.py +++ b/modules/python/test/test_camshift.py @@ -21,7 +21,7 @@ if PY3: xrange = range import numpy as np -import cv2 +import cv2 as cv from tst_scene_render import TestSceneRender from tests_common import NewOpenCVTests, intersectionRate @@ -53,8 +53,8 @@ class camshift_test(NewOpenCVTests): while True: framesCounter += 1 self.frame = self.render.getNextFrame() - hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV) - mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.))) + hsv = cv.cvtColor(self.frame, cv.COLOR_BGR2HSV) + mask = cv.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.))) if self.selection: x0, y0, x1, y1 = self.render.getCurrentRect() + 50 @@ -63,17 +63,17 @@ class camshift_test(NewOpenCVTests): hsv_roi = hsv[y0:y1, x0:x1] mask_roi = mask[y0:y1, x0:x1] - hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] ) - cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX) + hist = cv.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] ) + cv.normalize(hist, hist, 0, 255, cv.NORM_MINMAX) self.hist = hist.reshape(-1) self.selection = False if self.track_window and self.track_window[2] > 0 and self.track_window[3] > 0: self.selection = None - prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1) + prob = cv.calcBackProject([hsv], [0], self.hist, [0, 180], 1) prob &= mask - term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 ) - _track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit) + term_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1 ) + _track_box, self.track_window = cv.CamShift(prob, self.track_window, term_crit) trackingRect = np.array(self.track_window) trackingRect[2] += trackingRect[0] diff --git a/modules/python/test/test_dft.py b/modules/python/test/test_dft.py index a1284d3..2d65192 100644 --- a/modules/python/test/test_dft.py +++ b/modules/python/test/test_dft.py @@ -7,7 +7,7 @@ Test for disctrete fourier transform (dft) # Python 2/3 compatibility from __future__ import print_function -import cv2 +import cv2 as cv import numpy as np import sys @@ -24,26 +24,26 @@ class dft_test(NewOpenCVTests): refDftShift = np.fft.fftshift(refDft) refMagnitide = np.log(1.0 + np.abs(refDftShift)) - testDft = cv2.dft(np.float32(img),flags = cv2.DFT_COMPLEX_OUTPUT) + testDft = cv.dft(np.float32(img),flags = cv.DFT_COMPLEX_OUTPUT) testDftShift = np.fft.fftshift(testDft) - testMagnitude = np.log(1.0 + cv2.magnitude(testDftShift[:,:,0], testDftShift[:,:,1])) + testMagnitude = np.log(1.0 + cv.magnitude(testDftShift[:,:,0], testDftShift[:,:,1])) - refMagnitide = cv2.normalize(refMagnitide, 0.0, 1.0, cv2.NORM_MINMAX) - testMagnitude = cv2.normalize(testMagnitude, 0.0, 1.0, cv2.NORM_MINMAX) + refMagnitide = cv.normalize(refMagnitide, 0.0, 1.0, cv.NORM_MINMAX) + testMagnitude = cv.normalize(testMagnitude, 0.0, 1.0, cv.NORM_MINMAX) - self.assertLess(cv2.norm(refMagnitide - testMagnitude), eps) + self.assertLess(cv.norm(refMagnitide - testMagnitude), eps) #test inverse transform img_back = np.fft.ifft2(refDft) img_back = np.abs(img_back) - img_backTest = cv2.idft(testDft) - img_backTest = cv2.magnitude(img_backTest[:,:,0], img_backTest[:,:,1]) + img_backTest = cv.idft(testDft) + img_backTest = cv.magnitude(img_backTest[:,:,0], img_backTest[:,:,1]) - img_backTest = cv2.normalize(img_backTest, 0.0, 1.0, cv2.NORM_MINMAX) - img_back = cv2.normalize(img_back, 0.0, 1.0, cv2.NORM_MINMAX) + img_backTest = cv.normalize(img_backTest, 0.0, 1.0, cv.NORM_MINMAX) + img_back = cv.normalize(img_back, 0.0, 1.0, cv.NORM_MINMAX) - self.assertLess(cv2.norm(img_back - img_backTest), eps) + self.assertLess(cv.norm(img_back - img_backTest), eps) if __name__ == '__main__': diff --git a/modules/python/test/test_digits.py b/modules/python/test/test_digits.py index 1204526..2d5c998 100644 --- a/modules/python/test/test_digits.py +++ b/modules/python/test/test_digits.py @@ -28,7 +28,7 @@ from __future__ import print_function # built-in modules from multiprocessing.pool import ThreadPool -import cv2 +import cv2 as cv import numpy as np from numpy.linalg import norm @@ -48,12 +48,12 @@ def split2d(img, cell_size, flatten=True): return cells def deskew(img): - m = cv2.moments(img) + m = cv.moments(img) if abs(m['mu02']) < 1e-2: return img.copy() skew = m['mu11']/m['mu02'] M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]]) - img = cv2.warpAffine(img, M, (SZ, SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR) + img = cv.warpAffine(img, M, (SZ, SZ), flags=cv.WARP_INVERSE_MAP | cv.INTER_LINEAR) return img class StatModel(object): @@ -65,10 +65,10 @@ class StatModel(object): class KNearest(StatModel): def __init__(self, k = 3): self.k = k - self.model = cv2.ml.KNearest_create() + self.model = cv.ml.KNearest_create() def train(self, samples, responses): - self.model.train(samples, cv2.ml.ROW_SAMPLE, responses) + self.model.train(samples, cv.ml.ROW_SAMPLE, responses) def predict(self, samples): _retval, results, _neigh_resp, _dists = self.model.findNearest(samples, self.k) @@ -76,14 +76,14 @@ class KNearest(StatModel): class SVM(StatModel): def __init__(self, C = 1, gamma = 0.5): - self.model = cv2.ml.SVM_create() + self.model = cv.ml.SVM_create() self.model.setGamma(gamma) self.model.setC(C) - self.model.setKernel(cv2.ml.SVM_RBF) - self.model.setType(cv2.ml.SVM_C_SVC) + self.model.setKernel(cv.ml.SVM_RBF) + self.model.setType(cv.ml.SVM_C_SVC) def train(self, samples, responses): - self.model.train(samples, cv2.ml.ROW_SAMPLE, responses) + self.model.train(samples, cv.ml.ROW_SAMPLE, responses) def predict(self, samples): return self.model.predict(samples)[1].ravel() @@ -105,9 +105,9 @@ def preprocess_simple(digits): def preprocess_hog(digits): samples = [] for img in digits: - gx = cv2.Sobel(img, cv2.CV_32F, 1, 0) - gy = cv2.Sobel(img, cv2.CV_32F, 0, 1) - mag, ang = cv2.cartToPolar(gx, gy) + gx = cv.Sobel(img, cv.CV_32F, 1, 0) + gy = cv.Sobel(img, cv.CV_32F, 0, 1) + mag, ang = cv.cartToPolar(gx, gy) bin_n = 16 bin = np.int32(bin_n*ang/(2*np.pi)) bin_cells = bin[:10,:10], bin[10:,:10], bin[:10,10:], bin[10:,10:] @@ -190,8 +190,8 @@ class digits_test(NewOpenCVTests): [ 0, 0, 0, 0, 0, 0, 0, 0, 47, 0], [ 0, 1, 0, 1, 0, 0, 0, 0, 1, 45]] - self.assertLess(cv2.norm(confusionMatrixes[0] - confusionKNN, cv2.NORM_L1), normEps) - self.assertLess(cv2.norm(confusionMatrixes[1] - confusionSVM, cv2.NORM_L1), normEps) + self.assertLess(cv.norm(confusionMatrixes[0] - confusionKNN, cv.NORM_L1), normEps) + self.assertLess(cv.norm(confusionMatrixes[1] - confusionSVM, cv.NORM_L1), normEps) self.assertLess(errors[0] - 0.034, eps) self.assertLess(errors[1] - 0.018, eps) diff --git a/modules/python/test/test_facedetect.py b/modules/python/test/test_facedetect.py index c2e58b0..f532eda 100644 --- a/modules/python/test/test_facedetect.py +++ b/modules/python/test/test_facedetect.py @@ -8,11 +8,11 @@ face detection using haar cascades from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv def detect(img, cascade): rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30), - flags=cv2.CASCADE_SCALE_IMAGE) + flags=cv.CASCADE_SCALE_IMAGE) if len(rects) == 0: return [] rects[:,2:] += rects[:,:2] @@ -26,8 +26,8 @@ class facedetect_test(NewOpenCVTests): cascade_fn = self.repoPath + '/data/haarcascades/haarcascade_frontalface_alt.xml' nested_fn = self.repoPath + '/data/haarcascades/haarcascade_eye.xml' - cascade = cv2.CascadeClassifier(cascade_fn) - nested = cv2.CascadeClassifier(nested_fn) + cascade = cv.CascadeClassifier(cascade_fn) + nested = cv.CascadeClassifier(nested_fn) samples = ['samples/data/lena.jpg', 'cv/cascadeandhog/images/mona-lisa.png'] @@ -49,8 +49,8 @@ class facedetect_test(NewOpenCVTests): for sample in samples: img = self.get_sample( sample) - gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - gray = cv2.GaussianBlur(gray, (5, 5), 5.1) + gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) + gray = cv.GaussianBlur(gray, (5, 5), 5.1) rects = detect(gray, cascade) faces.append(rects) diff --git a/modules/python/test/test_feature_homography.py b/modules/python/test/test_feature_homography.py index 6991b47..c829128 100644 --- a/modules/python/test/test_feature_homography.py +++ b/modules/python/test/test_feature_homography.py @@ -13,7 +13,7 @@ PlaneTracker class in plane_tracker.py from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv import sys PY3 = sys.version_info[0] == 3 @@ -28,8 +28,8 @@ def intersectionRate(s1, s2): x1, y1, x2, y2 = s1 s1 = np.array([[x1, y1], [x2,y1], [x2, y2], [x1, y2]]) - area, _intersection = cv2.intersectConvexConvex(s1, np.array(s2)) - return 2 * area / (cv2.contourArea(s1) + cv2.contourArea(np.array(s2))) + area, _intersection = cv.intersectConvexConvex(s1, np.array(s2)) + return 2 * area / (cv.contourArea(s1) + cv.contourArea(np.array(s2))) from tests_common import NewOpenCVTests @@ -92,8 +92,8 @@ TrackedTarget = namedtuple('TrackedTarget', 'target, p0, p1, H, quad') class PlaneTracker: def __init__(self): - self.detector = cv2.AKAZE_create(threshold = 0.003) - self.matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329) + self.detector = cv.AKAZE_create(threshold = 0.003) + self.matcher = cv.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329) self.targets = [] self.frame_points = [] @@ -137,7 +137,7 @@ class PlaneTracker: p0 = [target.keypoints[m.trainIdx].pt for m in matches] p1 = [self.frame_points[m.queryIdx].pt for m in matches] p0, p1 = np.float32((p0, p1)) - H, status = cv2.findHomography(p0, p1, cv2.RANSAC, 3.0) + H, status = cv.findHomography(p0, p1, cv.RANSAC, 3.0) status = status.ravel() != 0 if status.sum() < MIN_MATCH_COUNT: continue @@ -145,7 +145,7 @@ class PlaneTracker: x0, y0, x1, y1 = target.rect quad = np.float32([[x0, y0], [x1, y0], [x1, y1], [x0, y1]]) - quad = cv2.perspectiveTransform(quad.reshape(1, -1, 2), H).reshape(-1, 2) + quad = cv.perspectiveTransform(quad.reshape(1, -1, 2), H).reshape(-1, 2) track = TrackedTarget(target=target, p0=p0, p1=p1, H=H, quad=quad) tracked.append(track) diff --git a/modules/python/test/test_fitline.py b/modules/python/test/test_fitline.py index b804566..2197f8d 100644 --- a/modules/python/test/test_fitline.py +++ b/modules/python/test/test_fitline.py @@ -4,7 +4,7 @@ Robust line fitting. ================== -Example of using cv2.fitLine function for fitting line +Example of using cv.fitLine function for fitting line to points in presence of outliers. Switch through different M-estimator functions and see, @@ -19,7 +19,7 @@ import sys PY3 = sys.version_info[0] == 3 import numpy as np -import cv2 +import cv2 as cv from tests_common import NewOpenCVTests @@ -53,17 +53,17 @@ class fitline_test(NewOpenCVTests): lines = [] for name in dist_func_names: - func = getattr(cv2, name) - vx, vy, cx, cy = cv2.fitLine(np.float32(points), func, 0, 0.01, 0.01) + func = getattr(cv, name) + vx, vy, cx, cy = cv.fitLine(np.float32(points), func, 0, 0.01, 0.01) line = [float(vx), float(vy), float(cx), float(cy)] lines.append(line) eps = 0.05 - refVec = (np.float32(p1) - p0) / cv2.norm(np.float32(p1) - p0) + refVec = (np.float32(p1) - p0) / cv.norm(np.float32(p1) - p0) for i in range(len(lines)): - self.assertLessEqual(cv2.norm(refVec - lines[i][0:2], cv2.NORM_L2), eps) + self.assertLessEqual(cv.norm(refVec - lines[i][0:2], cv.NORM_L2), eps) if __name__ == '__main__': diff --git a/modules/python/test/test_gaussian_mix.py b/modules/python/test/test_gaussian_mix.py index 7ee39b3..6286629 100644 --- a/modules/python/test/test_gaussian_mix.py +++ b/modules/python/test/test_gaussian_mix.py @@ -10,7 +10,7 @@ if PY3: import numpy as np from numpy import random -import cv2 +import cv2 as cv def make_gaussians(cluster_n, img_size): points = [] @@ -38,9 +38,9 @@ class gaussian_mix_test(NewOpenCVTests): points, ref_distrs = make_gaussians(cluster_n, img_size) - em = cv2.ml.EM_create() + em = cv.ml.EM_create() em.setClustersNumber(cluster_n) - em.setCovarianceMatrixType(cv2.ml.EM_COV_MAT_GENERIC) + em.setCovarianceMatrixType(cv.ml.EM_COV_MAT_GENERIC) em.trainEM(points) means = em.getMeans() covs = em.getCovs() # Known bug: https://github.com/opencv/opencv/pull/4232 @@ -53,8 +53,8 @@ class gaussian_mix_test(NewOpenCVTests): for i in range(cluster_n): for j in range(cluster_n): - if (cv2.norm(means[i] - ref_distrs[j][0], cv2.NORM_L2) / cv2.norm(ref_distrs[j][0], cv2.NORM_L2) < meanEps and - cv2.norm(covs[i] - ref_distrs[j][1], cv2.NORM_L2) / cv2.norm(ref_distrs[j][1], cv2.NORM_L2) < covEps): + if (cv.norm(means[i] - ref_distrs[j][0], cv.NORM_L2) / cv.norm(ref_distrs[j][0], cv.NORM_L2) < meanEps and + cv.norm(covs[i] - ref_distrs[j][1], cv.NORM_L2) / cv.norm(ref_distrs[j][1], cv.NORM_L2) < covEps): matches_count += 1 self.assertEqual(matches_count, cluster_n) diff --git a/modules/python/test/test_goodfeatures.py b/modules/python/test/test_goodfeatures.py index c874147..91fc885 100644 --- a/modules/python/test/test_goodfeatures.py +++ b/modules/python/test/test_goodfeatures.py @@ -3,7 +3,7 @@ # Python 2/3 compatibility from __future__ import print_function -import cv2 +import cv2 as cv import numpy as np from tests_common import NewOpenCVTests @@ -15,16 +15,16 @@ class TestGoodFeaturesToTrack_test(NewOpenCVTests): threshes = [ x / 100. for x in range(1,10) ] numPoints = 20000 - results = dict([(t, cv2.goodFeaturesToTrack(arr, numPoints, t, 2, useHarrisDetector=True)) for t in threshes]) + results = dict([(t, cv.goodFeaturesToTrack(arr, numPoints, t, 2, useHarrisDetector=True)) for t in threshes]) # Check that GoodFeaturesToTrack has not modified input image self.assertTrue(arr.tostring() == original.tostring()) # Check for repeatability for i in range(1): - results2 = dict([(t, cv2.goodFeaturesToTrack(arr, numPoints, t, 2, useHarrisDetector=True)) for t in threshes]) + results2 = dict([(t, cv.goodFeaturesToTrack(arr, numPoints, t, 2, useHarrisDetector=True)) for t in threshes]) for t in threshes: self.assertTrue(len(results2[t]) == len(results[t])) for i in range(len(results[t])): - self.assertTrue(cv2.norm(results[t][i][0] - results2[t][i][0]) == 0) + self.assertTrue(cv.norm(results[t][i][0] - results2[t][i][0]) == 0) for t0,t1 in zip(threshes, threshes[1:]): r0 = results[t0] @@ -33,7 +33,7 @@ class TestGoodFeaturesToTrack_test(NewOpenCVTests): self.assertTrue(len(r0) > len(r1)) # Increasing thresh should monly truncate result list for i in range(len(r1)): - self.assertTrue(cv2.norm(r1[i][0] - r0[i][0])==0) + self.assertTrue(cv.norm(r1[i][0] - r0[i][0])==0) if __name__ == '__main__': diff --git a/modules/python/test/test_grabcut.py b/modules/python/test/test_grabcut.py index 908a70b..dea86c6 100644 --- a/modules/python/test/test_grabcut.py +++ b/modules/python/test/test_grabcut.py @@ -9,7 +9,7 @@ Interactive Image Segmentation using GrabCut algorithm. from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv import sys from tests_common import NewOpenCVTests @@ -26,7 +26,7 @@ class grabcut_test(NewOpenCVTests): def scaleMask(self, mask): - return np.where((mask==cv2.GC_FGD) + (mask==cv2.GC_PR_FGD),255,0).astype('uint8') + return np.where((mask==cv.GC_FGD) + (mask==cv.GC_PR_FGD),255,0).astype('uint8') def test_grabcut(self): @@ -42,27 +42,27 @@ class grabcut_test(NewOpenCVTests): mask = np.zeros(img.shape[:2], dtype = np.uint8) bgdModel = np.zeros((1,65),np.float64) fgdModel = np.zeros((1,65),np.float64) - cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 0, cv2.GC_INIT_WITH_RECT) - cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 2, cv2.GC_EVAL) + cv.grabCut(img, mask, rect, bgdModel, fgdModel, 0, cv.GC_INIT_WITH_RECT) + cv.grabCut(img, mask, rect, bgdModel, fgdModel, 2, cv.GC_EVAL) if mask_prob is None: mask_prob = mask.copy() - cv2.imwrite(self.extraTestDataPath + '/cv/grabcut/mask_probpy.png', mask_prob) + cv.imwrite(self.extraTestDataPath + '/cv/grabcut/mask_probpy.png', mask_prob) if exp_mask1 is None: exp_mask1 = self.scaleMask(mask) - cv2.imwrite(self.extraTestDataPath + '/cv/grabcut/exp_mask1py.png', exp_mask1) + cv.imwrite(self.extraTestDataPath + '/cv/grabcut/exp_mask1py.png', exp_mask1) self.assertEqual(self.verify(self.scaleMask(mask), exp_mask1), True) mask = mask_prob bgdModel = np.zeros((1,65),np.float64) fgdModel = np.zeros((1,65),np.float64) - cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 0, cv2.GC_INIT_WITH_MASK) - cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 1, cv2.GC_EVAL) + cv.grabCut(img, mask, rect, bgdModel, fgdModel, 0, cv.GC_INIT_WITH_MASK) + cv.grabCut(img, mask, rect, bgdModel, fgdModel, 1, cv.GC_EVAL) if exp_mask2 is None: exp_mask2 = self.scaleMask(mask) - cv2.imwrite(self.extraTestDataPath + '/cv/grabcut/exp_mask2py.png', exp_mask2) + cv.imwrite(self.extraTestDataPath + '/cv/grabcut/exp_mask2py.png', exp_mask2) self.assertEqual(self.verify(self.scaleMask(mask), exp_mask2), True) diff --git a/modules/python/test/test_houghcircles.py b/modules/python/test/test_houghcircles.py index 794730b..90cd184 100644 --- a/modules/python/test/test_houghcircles.py +++ b/modules/python/test/test_houghcircles.py @@ -1,13 +1,13 @@ #!/usr/bin/python ''' -This example illustrates how to use cv2.HoughCircles() function. +This example illustrates how to use cv.HoughCircles() function. ''' # Python 2/3 compatibility from __future__ import print_function -import cv2 +import cv2 as cv import numpy as np import sys from numpy import pi, sin, cos @@ -27,10 +27,10 @@ def circleApproximation(circle): def convContoursIntersectiponRate(c1, c2): - s1 = cv2.contourArea(c1) - s2 = cv2.contourArea(c2) + s1 = cv.contourArea(c1) + s2 = cv.contourArea(c2) - s, _ = cv2.intersectConvexConvex(c1, c2) + s, _ = cv.intersectConvexConvex(c1, c2) return 2*s/(s1+s2) @@ -41,10 +41,10 @@ class houghcircles_test(NewOpenCVTests): fn = "samples/data/board.jpg" src = self.get_sample(fn, 1) - img = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) - img = cv2.medianBlur(img, 5) + img = cv.cvtColor(src, cv.COLOR_BGR2GRAY) + img = cv.medianBlur(img, 5) - circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 10, np.array([]), 100, 30, 1, 30)[0] + circles = cv.HoughCircles(img, cv.HOUGH_GRADIENT, 1, 10, np.array([]), 100, 30, 1, 30)[0] testCircles = [[38, 181, 17.6], [99.7, 166, 13.12], diff --git a/modules/python/test/test_houghlines.py b/modules/python/test/test_houghlines.py index 315c598..8deae54 100644 --- a/modules/python/test/test_houghlines.py +++ b/modules/python/test/test_houghlines.py @@ -7,7 +7,7 @@ This example illustrates how to use Hough Transform to find lines # Python 2/3 compatibility from __future__ import print_function -import cv2 +import cv2 as cv import numpy as np import sys import math @@ -16,9 +16,9 @@ from tests_common import NewOpenCVTests def linesDiff(line1, line2): - norm1 = cv2.norm(line1 - line2, cv2.NORM_L2) + norm1 = cv.norm(line1 - line2, cv.NORM_L2) line3 = line1[2:4] + line1[0:2] - norm2 = cv2.norm(line3 - line2, cv2.NORM_L2) + norm2 = cv.norm(line3 - line2, cv.NORM_L2) return min(norm1, norm2) @@ -29,9 +29,9 @@ class houghlines_test(NewOpenCVTests): fn = "/samples/data/pic1.png" src = self.get_sample(fn) - dst = cv2.Canny(src, 50, 200) + dst = cv.Canny(src, 50, 200) - lines = cv2.HoughLinesP(dst, 1, math.pi/180.0, 40, np.array([]), 50, 10)[:,0,:] + lines = cv.HoughLinesP(dst, 1, math.pi/180.0, 40, np.array([]), 50, 10)[:,0,:] eps = 5 testLines = [ diff --git a/modules/python/test/test_kmeans.py b/modules/python/test/test_kmeans.py index 8c7d37b..f2fcb92 100644 --- a/modules/python/test/test_kmeans.py +++ b/modules/python/test/test_kmeans.py @@ -8,7 +8,7 @@ K-means clusterization test from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv from numpy import random import sys PY3 = sys.version_info[0] == 3 @@ -58,8 +58,8 @@ class kmeans_test(NewOpenCVTests): points, _, clusterSizes = make_gaussians(cluster_n, img_size) - term_crit = (cv2.TERM_CRITERIA_EPS, 30, 0.1) - _ret, labels, centers = cv2.kmeans(points, cluster_n, None, term_crit, 10, 0) + term_crit = (cv.TERM_CRITERIA_EPS, 30, 0.1) + _ret, labels, centers = cv.kmeans(points, cluster_n, None, term_crit, 10, 0) self.assertEqual(len(centers), cluster_n) diff --git a/modules/python/test/test_legacy.py b/modules/python/test/test_legacy.py index ff7cfa1..db6e1c4 100644 --- a/modules/python/test/test_legacy.py +++ b/modules/python/test/test_legacy.py @@ -2,7 +2,7 @@ from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv from tests_common import NewOpenCVTests @@ -11,13 +11,13 @@ class Hackathon244Tests(NewOpenCVTests): def test_int_array(self): a = np.array([-1, 2, -3, 4, -5]) absa0 = np.abs(a) - self.assertTrue(cv2.norm(a, cv2.NORM_L1) == 15) - absa1 = cv2.absdiff(a, 0) - self.assertEqual(cv2.norm(absa1, absa0, cv2.NORM_INF), 0) + self.assertTrue(cv.norm(a, cv.NORM_L1) == 15) + absa1 = cv.absdiff(a, 0) + self.assertEqual(cv.norm(absa1, absa0, cv.NORM_INF), 0) def test_imencode(self): a = np.zeros((480, 640), dtype=np.uint8) - flag, ajpg = cv2.imencode("img_q90.jpg", a, [cv2.IMWRITE_JPEG_QUALITY, 90]) + flag, ajpg = cv.imencode("img_q90.jpg", a, [cv.IMWRITE_JPEG_QUALITY, 90]) self.assertEqual(flag, True) self.assertEqual(ajpg.dtype, np.uint8) self.assertGreater(ajpg.shape[0], 1) @@ -25,8 +25,8 @@ class Hackathon244Tests(NewOpenCVTests): def test_projectPoints(self): objpt = np.float64([[1,2,3]]) - imgpt0, jac0 = cv2.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), np.float64([])) - imgpt1, jac1 = cv2.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), None) + imgpt0, jac0 = cv.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), np.float64([])) + imgpt1, jac1 = cv.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), None) self.assertEqual(imgpt0.shape, (objpt.shape[0], 1, 2)) self.assertEqual(imgpt1.shape, imgpt0.shape) self.assertEqual(jac0.shape, jac1.shape) @@ -37,17 +37,17 @@ class Hackathon244Tests(NewOpenCVTests): pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32) pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2) pattern_points *= 10 - (retval, out, inliers) = cv2.estimateAffine3D(pattern_points, pattern_points) + (retval, out, inliers) = cv.estimateAffine3D(pattern_points, pattern_points) self.assertEqual(retval, 1) - if cv2.norm(out[2,:]) < 1e-3: + if cv.norm(out[2,:]) < 1e-3: out[2,2]=1 - self.assertLess(cv2.norm(out, np.float64([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])), 1e-3) - self.assertEqual(cv2.countNonZero(inliers), pattern_size[0]*pattern_size[1]) + self.assertLess(cv.norm(out, np.float64([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])), 1e-3) + self.assertEqual(cv.countNonZero(inliers), pattern_size[0]*pattern_size[1]) def test_fast(self): - fd = cv2.FastFeatureDetector_create(30, True) + fd = cv.FastFeatureDetector_create(30, True) img = self.get_sample("samples/data/right02.jpg", 0) - img = cv2.medianBlur(img, 3) + img = cv.medianBlur(img, 3) keypoints = fd.detect(img) self.assertTrue(600 <= len(keypoints) <= 700) for kpt in keypoints: @@ -71,9 +71,9 @@ class Hackathon244Tests(NewOpenCVTests): np.random.seed(244) a = np.random.randn(npt,2).astype('float32')*50 + 150 - be = cv2.fitEllipse(a) - br = cv2.minAreaRect(a) - mc, mr = cv2.minEnclosingCircle(a) + be = cv.fitEllipse(a) + br = cv.minAreaRect(a) + mc, mr = cv.minEnclosingCircle(a) be0 = ((150.2511749267578, 150.77322387695312), (158.024658203125, 197.57696533203125), 37.57804489135742) br0 = ((161.2974090576172, 154.41793823242188), (199.2301483154297, 207.7177734375), -9.164555549621582) diff --git a/modules/python/test/test_letter_recog.py b/modules/python/test/test_letter_recog.py index c34559c..66bef39 100644 --- a/modules/python/test/test_letter_recog.py +++ b/modules/python/test/test_letter_recog.py @@ -24,7 +24,7 @@ and the remaining 10000 - to test the classifier. from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv def load_base(fn): a = np.loadtxt(fn, np.float32, delimiter=',', converters={ 0 : lambda ch : ord(ch)-ord('A') }) @@ -56,12 +56,12 @@ class LetterStatModel(object): class RTrees(LetterStatModel): def __init__(self): - self.model = cv2.ml.RTrees_create() + self.model = cv.ml.RTrees_create() def train(self, samples, responses): #sample_n, var_n = samples.shape self.model.setMaxDepth(20) - self.model.train(samples, cv2.ml.ROW_SAMPLE, responses.astype(int)) + self.model.train(samples, cv.ml.ROW_SAMPLE, responses.astype(int)) def predict(self, samples): _ret, resp = self.model.predict(samples) @@ -70,10 +70,10 @@ class RTrees(LetterStatModel): class KNearest(LetterStatModel): def __init__(self): - self.model = cv2.ml.KNearest_create() + self.model = cv.ml.KNearest_create() def train(self, samples, responses): - self.model.train(samples, cv2.ml.ROW_SAMPLE, responses) + self.model.train(samples, cv.ml.ROW_SAMPLE, responses) def predict(self, samples): _retval, results, _neigh_resp, _dists = self.model.findNearest(samples, k = 10) @@ -82,17 +82,17 @@ class KNearest(LetterStatModel): class Boost(LetterStatModel): def __init__(self): - self.model = cv2.ml.Boost_create() + self.model = cv.ml.Boost_create() def train(self, samples, responses): _sample_n, var_n = samples.shape new_samples = self.unroll_samples(samples) new_responses = self.unroll_responses(responses) - var_types = np.array([cv2.ml.VAR_NUMERICAL] * var_n + [cv2.ml.VAR_CATEGORICAL, cv2.ml.VAR_CATEGORICAL], np.uint8) + var_types = np.array([cv.ml.VAR_NUMERICAL] * var_n + [cv.ml.VAR_CATEGORICAL, cv.ml.VAR_CATEGORICAL], np.uint8) self.model.setWeakCount(15) self.model.setMaxDepth(10) - self.model.train(cv2.ml.TrainData_create(new_samples, cv2.ml.ROW_SAMPLE, new_responses.astype(int), varType = var_types)) + self.model.train(cv.ml.TrainData_create(new_samples, cv.ml.ROW_SAMPLE, new_responses.astype(int), varType = var_types)) def predict(self, samples): new_samples = self.unroll_samples(samples) @@ -103,14 +103,14 @@ class Boost(LetterStatModel): class SVM(LetterStatModel): def __init__(self): - self.model = cv2.ml.SVM_create() + self.model = cv.ml.SVM_create() def train(self, samples, responses): - self.model.setType(cv2.ml.SVM_C_SVC) + self.model.setType(cv.ml.SVM_C_SVC) self.model.setC(1) - self.model.setKernel(cv2.ml.SVM_RBF) + self.model.setKernel(cv.ml.SVM_RBF) self.model.setGamma(.1) - self.model.train(samples, cv2.ml.ROW_SAMPLE, responses.astype(int)) + self.model.train(samples, cv.ml.ROW_SAMPLE, responses.astype(int)) def predict(self, samples): _ret, resp = self.model.predict(samples) @@ -119,7 +119,7 @@ class SVM(LetterStatModel): class MLP(LetterStatModel): def __init__(self): - self.model = cv2.ml.ANN_MLP_create() + self.model = cv.ml.ANN_MLP_create() def train(self, samples, responses): _sample_n, var_n = samples.shape @@ -127,13 +127,13 @@ class MLP(LetterStatModel): layer_sizes = np.int32([var_n, 100, 100, self.class_n]) self.model.setLayerSizes(layer_sizes) - self.model.setTrainMethod(cv2.ml.ANN_MLP_BACKPROP) + self.model.setTrainMethod(cv.ml.ANN_MLP_BACKPROP) self.model.setBackpropMomentumScale(0) self.model.setBackpropWeightScale(0.001) - self.model.setTermCriteria((cv2.TERM_CRITERIA_COUNT, 20, 0.01)) - self.model.setActivationFunction(cv2.ml.ANN_MLP_SIGMOID_SYM, 2, 1) + self.model.setTermCriteria((cv.TERM_CRITERIA_COUNT, 20, 0.01)) + self.model.setActivationFunction(cv.ml.ANN_MLP_SIGMOID_SYM, 2, 1) - self.model.train(samples, cv2.ml.ROW_SAMPLE, np.float32(new_responses)) + self.model.train(samples, cv.ml.ROW_SAMPLE, np.float32(new_responses)) def predict(self, samples): _ret, resp = self.model.predict(samples) diff --git a/modules/python/test/test_lk_homography.py b/modules/python/test/test_lk_homography.py index 30d8fbe..5f0a205 100644 --- a/modules/python/test/test_lk_homography.py +++ b/modules/python/test/test_lk_homography.py @@ -11,7 +11,7 @@ between frames. Finds homography between reference and current views. from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv #local modules from tst_scene_render import TestSceneRender @@ -19,7 +19,7 @@ from tests_common import NewOpenCVTests, isPointInRect lk_params = dict( winSize = (19, 19), maxLevel = 2, - criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) + criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03)) feature_params = dict( maxCorners = 1000, qualityLevel = 0.01, @@ -27,8 +27,8 @@ feature_params = dict( maxCorners = 1000, blockSize = 19 ) def checkedTrace(img0, img1, p0, back_threshold = 1.0): - p1, _st, _err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params) - p0r, _st, _err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params) + p1, _st, _err = cv.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params) + p0r, _st, _err = cv.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params) d = abs(p0-p0r).reshape(-1, 2).max(-1) status = d < back_threshold return p1, status @@ -48,9 +48,9 @@ class lk_homography_test(NewOpenCVTests): self.get_sample('samples/data/box.png'), noise = 0.1, speed = 1.0) frame = self.render.getNextFrame() - frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) self.frame0 = frame.copy() - self.p0 = cv2.goodFeaturesToTrack(frame_gray, **feature_params) + self.p0 = cv.goodFeaturesToTrack(frame_gray, **feature_params) isForegroundHomographyFound = False @@ -66,7 +66,7 @@ class lk_homography_test(NewOpenCVTests): while self.framesCounter < 200: self.framesCounter += 1 frame = self.render.getNextFrame() - frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) if self.p0 is not None: p2, trace_status = checkedTrace(self.gray1, frame_gray, self.p1) @@ -77,7 +77,7 @@ class lk_homography_test(NewOpenCVTests): if len(self.p0) < 4: self.p0 = None continue - _H, status = cv2.findHomography(self.p0, self.p1, cv2.RANSAC, 5.0) + _H, status = cv.findHomography(self.p0, self.p1, cv.RANSAC, 5.0) goodPointsInRect = 0 goodPointsOutsideRect = 0 @@ -91,7 +91,7 @@ class lk_homography_test(NewOpenCVTests): isForegroundHomographyFound = True self.assertGreater(float(goodPointsInRect) / (self.numFeaturesInRectOnStart + 1), 0.6) else: - self.p0 = cv2.goodFeaturesToTrack(frame_gray, **feature_params) + self.p0 = cv.goodFeaturesToTrack(frame_gray, **feature_params) self.assertEqual(isForegroundHomographyFound, True) diff --git a/modules/python/test/test_lk_track.py b/modules/python/test/test_lk_track.py index f66faee..1ffde9a 100644 --- a/modules/python/test/test_lk_track.py +++ b/modules/python/test/test_lk_track.py @@ -13,7 +13,7 @@ between frames. from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv #local modules from tst_scene_render import TestSceneRender @@ -21,7 +21,7 @@ from tests_common import NewOpenCVTests, intersectionRate, isPointInRect lk_params = dict( winSize = (15, 15), maxLevel = 2, - criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) + criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03)) feature_params = dict( maxCorners = 500, qualityLevel = 0.3, @@ -32,7 +32,7 @@ def getRectFromPoints(points): distances = [] for point in points: - distances.append(cv2.norm(point, cv2.NORM_L2)) + distances.append(cv.norm(point, cv.NORM_L2)) x0, y0 = points[np.argmin(distances)] x1, y1 = points[np.argmax(distances)] @@ -58,13 +58,13 @@ class lk_track_test(NewOpenCVTests): while True: frame = self.render.getNextFrame() - frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) if len(self.tracks) > 0: img0, img1 = self.prev_gray, frame_gray p0 = np.float32([tr[-1][0] for tr in self.tracks]).reshape(-1, 1, 2) - p1, _st, _err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params) - p0r, _st, _err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params) + p1, _st, _err = cv.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params) + p0r, _st, _err = cv.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params) d = abs(p0-p0r).reshape(-1, 2).max(-1) good = d < 1 new_tracks = [] @@ -98,8 +98,8 @@ class lk_track_test(NewOpenCVTests): mask = np.zeros_like(frame_gray) mask[:] = 255 for x, y in [np.int32(tr[-1][0]) for tr in self.tracks]: - cv2.circle(mask, (x, y), 5, 0, -1) - p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params) + cv.circle(mask, (x, y), 5, 0, -1) + p = cv.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params) if p is not None: for x, y in np.float32(p).reshape(-1, 2): self.tracks.append([[(x, y), self.frame_idx]]) diff --git a/modules/python/test/test_misc.py b/modules/python/test/test_misc.py index 08535d4..818c453 100644 --- a/modules/python/test/test_misc.py +++ b/modules/python/test/test_misc.py @@ -2,18 +2,18 @@ from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv from tests_common import NewOpenCVTests class Bindings(NewOpenCVTests): def test_inheritance(self): - bm = cv2.StereoBM_create() + bm = cv.StereoBM_create() bm.getPreFilterCap() # from StereoBM bm.getBlockSize() # from SteroMatcher - boost = cv2.ml.Boost_create() + boost = cv.ml.Boost_create() boost.getBoostType() # from ml::Boost boost.getMaxDepth() # from ml::DTrees boost.isClassifier() # from ml::StatModel diff --git a/modules/python/test/test_morphology.py b/modules/python/test/test_morphology.py index bfffc88..6960d88 100644 --- a/modules/python/test/test_morphology.py +++ b/modules/python/test/test_morphology.py @@ -10,7 +10,7 @@ import sys PY3 = sys.version_info[0] == 3 import numpy as np -import cv2 +import cv2 as cv from tests_common import NewOpenCVTests @@ -43,8 +43,8 @@ class morphology_test(NewOpenCVTests): str_name = 'MORPH_' + cur_str_mode.upper() oper_name = 'MORPH_' + op.upper() - st = cv2.getStructuringElement(getattr(cv2, str_name), (sz, sz)) - return cv2.morphologyEx(img, getattr(cv2, oper_name), st, iterations=iters) + st = cv.getStructuringElement(getattr(cv, str_name), (sz, sz)) + return cv.morphologyEx(img, getattr(cv, oper_name), st, iterations=iters) for mode in modes: res = update(mode) diff --git a/modules/python/test/test_mser.py b/modules/python/test/test_mser.py index bf878e5..c76e9d4 100644 --- a/modules/python/test/test_mser.py +++ b/modules/python/test/test_mser.py @@ -7,7 +7,7 @@ MSER detector test from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv from tests_common import NewOpenCVTests @@ -33,7 +33,7 @@ class mser_test(NewOpenCVTests): ] thresharr = [ 0, 70, 120, 180, 255 ] kDelta = 5 - mserExtractor = cv2.MSER_create() + mserExtractor = cv.MSER_create() mserExtractor.setDelta(kDelta) np.random.seed(10) @@ -53,11 +53,11 @@ class mser_test(NewOpenCVTests): mserExtractor.setMinArea(kMinArea) mserExtractor.setMaxArea(kMaxArea) if invert: - cv2.bitwise_not(src, src) + cv.bitwise_not(src, src) if binarize: - _, src = cv2.threshold(src, thresh, 255, cv2.THRESH_BINARY) + _, src = cv.threshold(src, thresh, 255, cv.THRESH_BINARY) if blur: - src = cv2.GaussianBlur(src, (5, 5), 1.5, 1.5) + src = cv.GaussianBlur(src, (5, 5), 1.5, 1.5) minRegs = 7 if use_big_image else 2 maxRegs = 1000 if use_big_image else 20 if binarize and (thresh == 0 or thresh == 255): diff --git a/modules/python/test/test_peopledetect.py b/modules/python/test/test_peopledetect.py index 08d39ab..8f03356 100644 --- a/modules/python/test/test_peopledetect.py +++ b/modules/python/test/test_peopledetect.py @@ -8,7 +8,7 @@ example to detect upright people in images using HOG features from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv def inside(r, q): @@ -21,8 +21,8 @@ from tests_common import NewOpenCVTests, intersectionRate class peopledetect_test(NewOpenCVTests): def test_peopledetect(self): - hog = cv2.HOGDescriptor() - hog.setSVMDetector( cv2.HOGDescriptor_getDefaultPeopleDetector() ) + hog = cv.HOGDescriptor() + hog.setSVMDetector( cv.HOGDescriptor_getDefaultPeopleDetector() ) dirPath = 'samples/data/' samples = ['basketball1.png', 'basketball2.png'] diff --git a/modules/python/test/test_shape.py b/modules/python/test/test_shape.py index e396b62..fbd705f 100644 --- a/modules/python/test/test_shape.py +++ b/modules/python/test/test_shape.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -import cv2 +import cv2 as cv from tests_common import NewOpenCVTests @@ -7,14 +7,14 @@ class shape_test(NewOpenCVTests): def test_computeDistance(self): - a = self.get_sample('samples/data/shape_sample/1.png', cv2.IMREAD_GRAYSCALE) - b = self.get_sample('samples/data/shape_sample/2.png', cv2.IMREAD_GRAYSCALE) + a = self.get_sample('samples/data/shape_sample/1.png', cv.IMREAD_GRAYSCALE) + b = self.get_sample('samples/data/shape_sample/2.png', cv.IMREAD_GRAYSCALE) - _, ca, _ = cv2.findContours(a, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS) - _, cb, _ = cv2.findContours(b, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS) + _, ca, _ = cv.findContours(a, cv.RETR_CCOMP, cv.CHAIN_APPROX_TC89_KCOS) + _, cb, _ = cv.findContours(b, cv.RETR_CCOMP, cv.CHAIN_APPROX_TC89_KCOS) - hd = cv2.createHausdorffDistanceExtractor() - sd = cv2.createShapeContextDistanceExtractor() + hd = cv.createHausdorffDistanceExtractor() + sd = cv.createShapeContextDistanceExtractor() d1 = hd.computeDistance(ca[0], cb[0]) d2 = sd.computeDistance(ca[0], cb[0]) diff --git a/modules/python/test/test_squares.py b/modules/python/test/test_squares.py index c70cf87..92169b6 100644 --- a/modules/python/test/test_squares.py +++ b/modules/python/test/test_squares.py @@ -14,7 +14,7 @@ if PY3: xrange = range import numpy as np -import cv2 +import cv2 as cv def angle_cos(p0, p1, p2): @@ -22,20 +22,20 @@ def angle_cos(p0, p1, p2): return abs( np.dot(d1, d2) / np.sqrt( np.dot(d1, d1)*np.dot(d2, d2) ) ) def find_squares(img): - img = cv2.GaussianBlur(img, (5, 5), 0) + img = cv.GaussianBlur(img, (5, 5), 0) squares = [] - for gray in cv2.split(img): + for gray in cv.split(img): for thrs in xrange(0, 255, 26): if thrs == 0: - bin = cv2.Canny(gray, 0, 50, apertureSize=5) - bin = cv2.dilate(bin, None) + bin = cv.Canny(gray, 0, 50, apertureSize=5) + bin = cv.dilate(bin, None) else: - _retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY) - bin, contours, _hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) + _retval, bin = cv.threshold(gray, thrs, 255, cv.THRESH_BINARY) + bin, contours, _hierarchy = cv.findContours(bin, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE) for cnt in contours: - cnt_len = cv2.arcLength(cnt, True) - cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True) - if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt): + cnt_len = cv.arcLength(cnt, True) + cnt = cv.approxPolyDP(cnt, 0.02*cnt_len, True) + if len(cnt) == 4 and cv.contourArea(cnt) > 1000 and cv.isContourConvex(cnt): cnt = cnt.reshape(-1, 2) max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)]) if max_cos < 0.1 and filterSquares(squares, cnt): @@ -44,8 +44,8 @@ def find_squares(img): return squares def intersectionRate(s1, s2): - area, _intersection = cv2.intersectConvexConvex(np.array(s1), np.array(s2)) - return 2 * area / (cv2.contourArea(np.array(s1)) + cv2.contourArea(np.array(s2))) + area, _intersection = cv.intersectConvexConvex(np.array(s1), np.array(s2)) + return 2 * area / (cv.contourArea(np.array(s1)) + cv.contourArea(np.array(s2))) def filterSquares(squares, square): diff --git a/modules/python/test/test_stitching.py b/modules/python/test/test_stitching.py index 6d6c52d..3a5a99a 100644 --- a/modules/python/test/test_stitching.py +++ b/modules/python/test/test_stitching.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -import cv2 +import cv2 as cv from tests_common import NewOpenCVTests @@ -10,11 +10,11 @@ class stitching_test(NewOpenCVTests): img1 = self.get_sample('stitching/a1.png') img2 = self.get_sample('stitching/a2.png') - stitcher = cv2.createStitcher(False) + stitcher = cv.createStitcher(False) (_result, pano) = stitcher.stitch((img1, img2)) - #cv2.imshow("pano", pano) - #cv2.waitKey() + #cv.imshow("pano", pano) + #cv.waitKey() self.assertAlmostEqual(pano.shape[0], 685, delta=100, msg="rows: %r" % list(pano.shape)) self.assertAlmostEqual(pano.shape[1], 1025, delta=100, msg="cols: %r" % list(pano.shape)) diff --git a/modules/python/test/test_texture_flow.py b/modules/python/test/test_texture_flow.py index c4bbce2..f2ba144 100644 --- a/modules/python/test/test_texture_flow.py +++ b/modules/python/test/test_texture_flow.py @@ -3,7 +3,7 @@ ''' Texture flow direction estimation. -Sample shows how cv2.cornerEigenValsAndVecs function can be used +Sample shows how cv.cornerEigenValsAndVecs function can be used to estimate image texture flow direction. ''' @@ -11,7 +11,7 @@ to estimate image texture flow direction. from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv import sys from tests_common import NewOpenCVTests @@ -23,10 +23,10 @@ class texture_flow_test(NewOpenCVTests): img = self.get_sample('samples/data/chessboard.png') - gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) h, w = img.shape[:2] - eigen = cv2.cornerEigenValsAndVecs(gray, 5, 3) + eigen = cv.cornerEigenValsAndVecs(gray, 5, 3) eigen = eigen.reshape(h, w, 3, 2) # [[e1, e2], v1, v2] flow = eigen[:,:,2] @@ -40,8 +40,8 @@ class texture_flow_test(NewOpenCVTests): textureVectors.append(np.int32(flow[y, x]*d)) for i in range(len(textureVectors)): - self.assertTrue(cv2.norm(textureVectors[i], cv2.NORM_L2) < eps - or abs(cv2.norm(textureVectors[i], cv2.NORM_L2) - d) < eps) + self.assertTrue(cv.norm(textureVectors[i], cv.NORM_L2) < eps + or abs(cv.norm(textureVectors[i], cv.NORM_L2) - d) < eps) if __name__ == '__main__': NewOpenCVTests.bootstrap() diff --git a/modules/python/test/test_umat.py b/modules/python/test/test_umat.py index f9f5645..c76ddd4 100644 --- a/modules/python/test/test_umat.py +++ b/modules/python/test/test_umat.py @@ -2,7 +2,7 @@ from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv from tests_common import NewOpenCVTests @@ -11,39 +11,39 @@ class UMat(NewOpenCVTests): def test_umat_construct(self): data = np.random.random([512, 512]) # UMat constructors - data_um = cv2.UMat(data) # from ndarray - data_sub_um = cv2.UMat(data_um, [128, 256], [128, 256]) # from UMat - data_dst_um = cv2.UMat(128, 128, cv2.CV_64F) # from size/type + data_um = cv.UMat(data) # from ndarray + data_sub_um = cv.UMat(data_um, [128, 256], [128, 256]) # from UMat + data_dst_um = cv.UMat(128, 128, cv.CV_64F) # from size/type # test continuous and submatrix flags assert data_um.isContinuous() and not data_um.isSubmatrix() assert not data_sub_um.isContinuous() and data_sub_um.isSubmatrix() # test operation on submatrix - cv2.multiply(data_sub_um, 2., dst=data_dst_um) + cv.multiply(data_sub_um, 2., dst=data_dst_um) assert np.allclose(2. * data[128:256, 128:256], data_dst_um.get()) def test_umat_handle(self): - a_um = cv2.UMat(256, 256, cv2.CV_32F) - _ctx_handle = cv2.UMat.context() # obtain context handle - _queue_handle = cv2.UMat.queue() # obtain queue handle - _a_handle = a_um.handle(cv2.ACCESS_READ) # obtain buffer handle + a_um = cv.UMat(256, 256, cv.CV_32F) + _ctx_handle = cv.UMat.context() # obtain context handle + _queue_handle = cv.UMat.queue() # obtain queue handle + _a_handle = a_um.handle(cv.ACCESS_READ) # obtain buffer handle _offset = a_um.offset # obtain buffer offset def test_umat_matching(self): img1 = self.get_sample("samples/data/right01.jpg") img2 = self.get_sample("samples/data/right02.jpg") - orb = cv2.ORB_create() + orb = cv.ORB_create() - img1, img2 = cv2.UMat(img1), cv2.UMat(img2) + img1, img2 = cv.UMat(img1), cv.UMat(img2) ps1, descs_umat1 = orb.detectAndCompute(img1, None) ps2, descs_umat2 = orb.detectAndCompute(img2, None) - self.assertIsInstance(descs_umat1, cv2.UMat) - self.assertIsInstance(descs_umat2, cv2.UMat) + self.assertIsInstance(descs_umat1, cv.UMat) + self.assertIsInstance(descs_umat2, cv.UMat) self.assertGreater(len(ps1), 0) self.assertGreater(len(ps2), 0) - bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) + bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True) res_umats = bf.match(descs_umat1, descs_umat2) res = bf.match(descs_umat1.get(), descs_umat2.get()) @@ -52,8 +52,8 @@ class UMat(NewOpenCVTests): self.assertEqual(len(res_umats), len(res)) def test_umat_optical_flow(self): - img1 = self.get_sample("samples/data/right01.jpg", cv2.IMREAD_GRAYSCALE) - img2 = self.get_sample("samples/data/right02.jpg", cv2.IMREAD_GRAYSCALE) + img1 = self.get_sample("samples/data/right01.jpg", cv.IMREAD_GRAYSCALE) + img2 = self.get_sample("samples/data/right02.jpg", cv.IMREAD_GRAYSCALE) # Note, that if you want to see performance boost by OCL implementation - you need enough data # For example you can increase maxCorners param to 10000 and increase img1 and img2 in such way: # img = np.hstack([np.vstack([img] * 6)] * 6) @@ -63,19 +63,19 @@ class UMat(NewOpenCVTests): minDistance=7, blockSize=7) - p0 = cv2.goodFeaturesToTrack(img1, mask=None, **feature_params) - p0_umat = cv2.goodFeaturesToTrack(cv2.UMat(img1), mask=None, **feature_params) + p0 = cv.goodFeaturesToTrack(img1, mask=None, **feature_params) + p0_umat = cv.goodFeaturesToTrack(cv.UMat(img1), mask=None, **feature_params) self.assertEqual(p0_umat.get().shape, p0.shape) p0 = np.array(sorted(p0, key=lambda p: tuple(p[0]))) - p0_umat = cv2.UMat(np.array(sorted(p0_umat.get(), key=lambda p: tuple(p[0])))) + p0_umat = cv.UMat(np.array(sorted(p0_umat.get(), key=lambda p: tuple(p[0])))) self.assertTrue(np.allclose(p0_umat.get(), p0)) - _p1_mask_err = cv2.calcOpticalFlowPyrLK(img1, img2, p0, None) + _p1_mask_err = cv.calcOpticalFlowPyrLK(img1, img2, p0, None) - _p1_mask_err_umat0 = map(cv2.UMat.get, cv2.calcOpticalFlowPyrLK(img1, img2, p0_umat, None)) - _p1_mask_err_umat1 = map(cv2.UMat.get, cv2.calcOpticalFlowPyrLK(cv2.UMat(img1), img2, p0_umat, None)) - _p1_mask_err_umat2 = map(cv2.UMat.get, cv2.calcOpticalFlowPyrLK(img1, cv2.UMat(img2), p0_umat, None)) + _p1_mask_err_umat0 = map(cv.UMat.get, cv.calcOpticalFlowPyrLK(img1, img2, p0_umat, None)) + _p1_mask_err_umat1 = map(cv.UMat.get, cv.calcOpticalFlowPyrLK(cv.UMat(img1), img2, p0_umat, None)) + _p1_mask_err_umat2 = map(cv.UMat.get, cv.calcOpticalFlowPyrLK(img1, cv.UMat(img2), p0_umat, None)) # # results of OCL optical flow differs from CPU implementation, so result can not be easily compared # for p1_mask_err_umat in [p1_mask_err_umat0, p1_mask_err_umat1, p1_mask_err_umat2]: diff --git a/modules/python/test/test_watershed.py b/modules/python/test/test_watershed.py index 9e424e0..b3f72c1 100644 --- a/modules/python/test/test_watershed.py +++ b/modules/python/test/test_watershed.py @@ -8,7 +8,7 @@ Watershed segmentation test from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv from tests_common import NewOpenCVTests @@ -23,14 +23,14 @@ class watershed_test(NewOpenCVTests): self.assertEqual(0, 1, 'Missing test data') colors = np.int32( list(np.ndindex(3, 3, 3)) ) * 122 - cv2.watershed(img, np.int32(markers)) + cv.watershed(img, np.int32(markers)) segments = colors[np.maximum(markers, 0)] if refSegments is None: refSegments = segments.copy() - cv2.imwrite(self.extraTestDataPath + '/cv/watershed/wshed_segments.png', refSegments) + cv.imwrite(self.extraTestDataPath + '/cv/watershed/wshed_segments.png', refSegments) - self.assertLess(cv2.norm(segments - refSegments, cv2.NORM_L1) / 255.0, 50) + self.assertLess(cv.norm(segments - refSegments, cv.NORM_L1) / 255.0, 50) if __name__ == '__main__': NewOpenCVTests.bootstrap() diff --git a/modules/python/test/tests_common.py b/modules/python/test/tests_common.py index 3356838..93c7b2f 100644 --- a/modules/python/test/tests_common.py +++ b/modules/python/test/tests_common.py @@ -10,7 +10,7 @@ import random import argparse import numpy as np -import cv2 +import cv2 as cv # Python 3 moved urlopen to urllib.requests try: @@ -26,7 +26,7 @@ class NewOpenCVTests(unittest.TestCase): # github repository url repoUrl = 'https://raw.github.com/opencv/opencv/master' - def get_sample(self, filename, iscolor = cv2.IMREAD_COLOR): + def get_sample(self, filename, iscolor = cv.IMREAD_COLOR): if not filename in self.image_cache: filedata = None if NewOpenCVTests.repoPath is not None: @@ -41,11 +41,11 @@ class NewOpenCVTests(unittest.TestCase): filedata = f.read() if filedata is None: return None#filedata = urlopen(NewOpenCVTests.repoUrl + '/' + filename).read() - self.image_cache[filename] = cv2.imdecode(np.fromstring(filedata, dtype=np.uint8), iscolor) + self.image_cache[filename] = cv.imdecode(np.fromstring(filedata, dtype=np.uint8), iscolor) return self.image_cache[filename] def setUp(self): - cv2.setRNGSeed(10) + cv.setRNGSeed(10) self.image_cache = {} def hashimg(self, im): @@ -73,7 +73,7 @@ class NewOpenCVTests(unittest.TestCase): parser.add_argument('--data', help=' use data files from local folder (path to folder), ' 'if not set, data files will be downloaded from docs.opencv.org') args, other = parser.parse_known_args() - print("Testing OpenCV", cv2.__version__) + print("Testing OpenCV", cv.__version__) print("Local repo path:", args.repo) NewOpenCVTests.repoPath = args.repo try: @@ -93,8 +93,8 @@ def intersectionRate(s1, s2): x1, y1, x2, y2 = s2 s2 = np.array([[x1, y1], [x2,y1], [x2, y2], [x1, y2]]) - area, _intersection = cv2.intersectConvexConvex(s1, s2) - return 2 * area / (cv2.contourArea(s1) + cv2.contourArea(s2)) + area, _intersection = cv.intersectConvexConvex(s1, s2) + return 2 * area / (cv.contourArea(s1) + cv.contourArea(s2)) def isPointInRect(p, rect): if rect[0] <= p[0] and rect[1] <=p[1] and p[0] <= rect[2] and p[1] <= rect[3]: diff --git a/modules/python/test/tst_scene_render.py b/modules/python/test/tst_scene_render.py index 25d5a40..2dd6309 100644 --- a/modules/python/test/tst_scene_render.py +++ b/modules/python/test/tst_scene_render.py @@ -7,7 +7,7 @@ from __future__ import print_function import numpy as np from numpy import pi, sin, cos -import cv2 +import cv2 as cv defaultSize = 512 @@ -88,14 +88,14 @@ class TestSceneRender(): self.currentRect = self.initialRect + np.int( 30*cos(self.time) + 50*sin(self.time/3)) if self.deformation: self.currentRect[1:3] += int(self.h/20*cos(self.time)) - cv2.fillConvexPoly(img, self.currentRect, (0, 0, 255)) + cv.fillConvexPoly(img, self.currentRect, (0, 0, 255)) self.time += self.timeStep if self.noise: noise = np.zeros(self.sceneBg.shape, np.int8) - cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise) - img = cv2.add(img, noise, dtype=cv2.CV_8UC3) + cv.randn(noise, np.zeros(3), np.ones(3)*255*self.noise) + img = cv.add(img, noise, dtype=cv.CV_8UC3) return img def resetTime(self): @@ -104,16 +104,16 @@ class TestSceneRender(): if __name__ == '__main__': - backGr = cv2.imread('../../../samples/data/lena.jpg') + backGr = cv.imread('../../../samples/data/lena.jpg') render = TestSceneRender(backGr, noise = 0.5) while True: img = render.getNextFrame() - cv2.imshow('img', img) + cv.imshow('img', img) - ch = cv2.waitKey(3) + ch = cv.waitKey(3) if ch == 27: break - cv2.destroyAllWindows() + cv.destroyAllWindows() diff --git a/samples/dnn/googlenet_python.py b/samples/dnn/googlenet_python.py index 0a5caaa..81ba146 100644 --- a/samples/dnn/googlenet_python.py +++ b/samples/dnn/googlenet_python.py @@ -1,6 +1,6 @@ from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv from cv2 import dnn import timeit @@ -11,7 +11,7 @@ def get_class_list(): with open('synset_words.txt', 'rt') as f: return [x[x.find(" ") + 1:] for x in f] -blob = dnn.blobFromImage(cv2.imread('space_shuttle.jpg'), 1, (224, 224), (104, 117, 123), False) +blob = dnn.blobFromImage(cv.imread('space_shuttle.jpg'), 1, (224, 224), (104, 117, 123), False) print("Input:", blob.shape, blob.dtype) net = dnn.readNetFromCaffe('bvlc_googlenet.prototxt', 'bvlc_googlenet.caffemodel') diff --git a/samples/python/_coverage.py b/samples/python/_coverage.py index 5ec3e18..62dd7aa 100755 --- a/samples/python/_coverage.py +++ b/samples/python/_coverage.py @@ -8,11 +8,11 @@ Utility for measuring python opencv API coverage by samples. from __future__ import print_function from glob import glob -import cv2 +import cv2 as cv import re if __name__ == '__main__': - cv2_callable = set(['cv2.'+name for name in dir(cv2) if callable( getattr(cv2, name) )]) + cv2_callable = set(['cv.'+name for name in dir(cv) if callable( getattr(cv, name) )]) found = set() for fn in glob('*.py'): @@ -26,4 +26,4 @@ if __name__ == '__main__': f.write('\n'.join(sorted(cv2_unused))) r = 1.0 * len(cv2_used) / len(cv2_callable) - print('\ncv2 api coverage: %d / %d (%.1f%%)' % ( len(cv2_used), len(cv2_callable), r*100 )) + print('\ncv api coverage: %d / %d (%.1f%%)' % ( len(cv2_used), len(cv2_callable), r*100 )) diff --git a/samples/python/asift.py b/samples/python/asift.py index f2b0e79..5a05021 100755 --- a/samples/python/asift.py +++ b/samples/python/asift.py @@ -23,7 +23,7 @@ USAGE from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv # built-in modules import itertools as it @@ -51,18 +51,18 @@ def affine_skew(tilt, phi, img, mask=None): A = np.float32([[c,-s], [ s, c]]) corners = [[0, 0], [w, 0], [w, h], [0, h]] tcorners = np.int32( np.dot(corners, A.T) ) - x, y, w, h = cv2.boundingRect(tcorners.reshape(1,-1,2)) + x, y, w, h = cv.boundingRect(tcorners.reshape(1,-1,2)) A = np.hstack([A, [[-x], [-y]]]) - img = cv2.warpAffine(img, A, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE) + img = cv.warpAffine(img, A, (w, h), flags=cv.INTER_LINEAR, borderMode=cv.BORDER_REPLICATE) if tilt != 1.0: s = 0.8*np.sqrt(tilt*tilt-1) - img = cv2.GaussianBlur(img, (0, 0), sigmaX=s, sigmaY=0.01) - img = cv2.resize(img, (0, 0), fx=1.0/tilt, fy=1.0, interpolation=cv2.INTER_NEAREST) + img = cv.GaussianBlur(img, (0, 0), sigmaX=s, sigmaY=0.01) + img = cv.resize(img, (0, 0), fx=1.0/tilt, fy=1.0, interpolation=cv.INTER_NEAREST) A[0] /= tilt if phi != 0.0 or tilt != 1.0: h, w = img.shape[:2] - mask = cv2.warpAffine(mask, A, (w, h), flags=cv2.INTER_NEAREST) - Ai = cv2.invertAffineTransform(A) + mask = cv.warpAffine(mask, A, (w, h), flags=cv.INTER_NEAREST) + Ai = cv.invertAffineTransform(A) return img, mask, Ai @@ -119,8 +119,8 @@ if __name__ == '__main__': fn1 = '../data/aero1.jpg' fn2 = '../data/aero3.jpg' - img1 = cv2.imread(fn1, 0) - img2 = cv2.imread(fn2, 0) + img1 = cv.imread(fn1, 0) + img2 = cv.imread(fn2, 0) detector, matcher = init_feature(feature_name) if img1 is None: @@ -137,7 +137,7 @@ if __name__ == '__main__': print('using', feature_name) - pool=ThreadPool(processes = cv2.getNumberOfCPUs()) + pool=ThreadPool(processes = cv.getNumberOfCPUs()) kp1, desc1 = affine_detect(detector, img1, pool=pool) kp2, desc2 = affine_detect(detector, img2, pool=pool) print('img1 - %d features, img2 - %d features' % (len(kp1), len(kp2))) @@ -147,7 +147,7 @@ if __name__ == '__main__': raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2 p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches) if len(p1) >= 4: - H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0) + H, status = cv.findHomography(p1, p2, cv.RANSAC, 5.0) print('%d / %d inliers/matched' % (np.sum(status), len(status))) # do not draw outliers (there will be a lot of them) kp_pairs = [kpp for kpp, flag in zip(kp_pairs, status) if flag] @@ -159,5 +159,5 @@ if __name__ == '__main__': match_and_draw('affine find_obj') - cv2.waitKey() - cv2.destroyAllWindows() + cv.waitKey() + cv.destroyAllWindows() diff --git a/samples/python/browse.py b/samples/python/browse.py index a9d7ad2..261d154 100755 --- a/samples/python/browse.py +++ b/samples/python/browse.py @@ -21,7 +21,7 @@ if PY3: xrange = range import numpy as np -import cv2 +import cv2 as cv # built-in modules import sys @@ -34,7 +34,7 @@ if __name__ == '__main__': if len(sys.argv) > 1: fn = sys.argv[1] print('loading %s ...' % fn) - img = cv2.imread(fn) + img = cv.imread(fn) if img is None: print('Failed to load fn:', fn) sys.exit(1) @@ -45,21 +45,21 @@ if __name__ == '__main__': img = np.zeros((sz, sz), np.uint8) track = np.cumsum(np.random.rand(500000, 2)-0.5, axis=0) track = np.int32(track*10 + (sz/2, sz/2)) - cv2.polylines(img, [track], 0, 255, 1, cv2.LINE_AA) + cv.polylines(img, [track], 0, 255, 1, cv.LINE_AA) small = img for i in xrange(3): - small = cv2.pyrDown(small) + small = cv.pyrDown(small) def onmouse(event, x, y, flags, param): h, _w = img.shape[:2] h1, _w1 = small.shape[:2] x, y = 1.0*x*h/h1, 1.0*y*h/h1 - zoom = cv2.getRectSubPix(img, (800, 600), (x+0.5, y+0.5)) - cv2.imshow('zoom', zoom) + zoom = cv.getRectSubPix(img, (800, 600), (x+0.5, y+0.5)) + cv.imshow('zoom', zoom) - cv2.imshow('preview', small) - cv2.setMouseCallback('preview', onmouse) - cv2.waitKey() - cv2.destroyAllWindows() + cv.imshow('preview', small) + cv.setMouseCallback('preview', onmouse) + cv.waitKey() + cv.destroyAllWindows() diff --git a/samples/python/calibrate.py b/samples/python/calibrate.py index a5c4a91..1401912 100755 --- a/samples/python/calibrate.py +++ b/samples/python/calibrate.py @@ -17,7 +17,7 @@ default values: from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv # local modules from common import splitfn @@ -53,27 +53,27 @@ if __name__ == '__main__': obj_points = [] img_points = [] - h, w = cv2.imread(img_names[0], 0).shape[:2] # TODO: use imquery call to retrieve results + h, w = cv.imread(img_names[0], 0).shape[:2] # TODO: use imquery call to retrieve results def processImage(fn): print('processing %s... ' % fn) - img = cv2.imread(fn, 0) + img = cv.imread(fn, 0) if img is None: print("Failed to load", fn) return None assert w == img.shape[1] and h == img.shape[0], ("size: %d x %d ... " % (img.shape[1], img.shape[0])) - found, corners = cv2.findChessboardCorners(img, pattern_size) + found, corners = cv.findChessboardCorners(img, pattern_size) if found: - term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1) - cv2.cornerSubPix(img, corners, (5, 5), (-1, -1), term) + term = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_COUNT, 30, 0.1) + cv.cornerSubPix(img, corners, (5, 5), (-1, -1), term) if debug_dir: - vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) - cv2.drawChessboardCorners(vis, pattern_size, corners, found) + vis = cv.cvtColor(img, cv.COLOR_GRAY2BGR) + cv.drawChessboardCorners(vis, pattern_size, corners, found) path, name, ext = splitfn(fn) outfile = os.path.join(debug_dir, name + '_chess.png') - cv2.imwrite(outfile, vis) + cv.imwrite(outfile, vis) if not found: print('chessboard not found') @@ -97,7 +97,7 @@ if __name__ == '__main__': obj_points.append(pattern_points) # calculate camera distortion - rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, (w, h), None, None) + rms, camera_matrix, dist_coefs, rvecs, tvecs = cv.calibrateCamera(obj_points, img_points, (w, h), None, None) print("\nRMS:", rms) print("camera matrix:\n", camera_matrix) @@ -110,20 +110,20 @@ if __name__ == '__main__': img_found = os.path.join(debug_dir, name + '_chess.png') outfile = os.path.join(debug_dir, name + '_undistorted.png') - img = cv2.imread(img_found) + img = cv.imread(img_found) if img is None: continue h, w = img.shape[:2] - newcameramtx, roi = cv2.getOptimalNewCameraMatrix(camera_matrix, dist_coefs, (w, h), 1, (w, h)) + newcameramtx, roi = cv.getOptimalNewCameraMatrix(camera_matrix, dist_coefs, (w, h), 1, (w, h)) - dst = cv2.undistort(img, camera_matrix, dist_coefs, None, newcameramtx) + dst = cv.undistort(img, camera_matrix, dist_coefs, None, newcameramtx) # crop and save the image x, y, w, h = roi dst = dst[y:y+h, x:x+w] print('Undistorted image written to: %s' % outfile) - cv2.imwrite(outfile, dst) + cv.imwrite(outfile, dst) - cv2.destroyAllWindows() + cv.destroyAllWindows() diff --git a/samples/python/camshift.py b/samples/python/camshift.py index 48d5dae..5805512 100755 --- a/samples/python/camshift.py +++ b/samples/python/camshift.py @@ -31,7 +31,7 @@ if PY3: xrange = range import numpy as np -import cv2 +import cv2 as cv # local module import video @@ -42,8 +42,8 @@ class App(object): def __init__(self, video_src): self.cam = video.create_capture(video_src, presets['cube']) _ret, self.frame = self.cam.read() - cv2.namedWindow('camshift') - cv2.setMouseCallback('camshift', self.onmouse) + cv.namedWindow('camshift') + cv.setMouseCallback('camshift', self.onmouse) self.selection = None self.drag_start = None @@ -51,7 +51,7 @@ class App(object): self.track_window = None def onmouse(self, event, x, y, flags, param): - if event == cv2.EVENT_LBUTTONDOWN: + if event == cv.EVENT_LBUTTONDOWN: self.drag_start = (x, y) self.track_window = None if self.drag_start: @@ -60,7 +60,7 @@ class App(object): xmax = max(x, self.drag_start[0]) ymax = max(y, self.drag_start[1]) self.selection = (xmin, ymin, xmax, ymax) - if event == cv2.EVENT_LBUTTONUP: + if event == cv.EVENT_LBUTTONUP: self.drag_start = None self.track_window = (xmin, ymin, xmax - xmin, ymax - ymin) @@ -70,52 +70,52 @@ class App(object): img = np.zeros((256, bin_count*bin_w, 3), np.uint8) for i in xrange(bin_count): h = int(self.hist[i]) - cv2.rectangle(img, (i*bin_w+2, 255), ((i+1)*bin_w-2, 255-h), (int(180.0*i/bin_count), 255, 255), -1) - img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR) - cv2.imshow('hist', img) + cv.rectangle(img, (i*bin_w+2, 255), ((i+1)*bin_w-2, 255-h), (int(180.0*i/bin_count), 255, 255), -1) + img = cv.cvtColor(img, cv.COLOR_HSV2BGR) + cv.imshow('hist', img) def run(self): while True: _ret, self.frame = self.cam.read() vis = self.frame.copy() - hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV) - mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.))) + hsv = cv.cvtColor(self.frame, cv.COLOR_BGR2HSV) + mask = cv.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.))) if self.selection: x0, y0, x1, y1 = self.selection hsv_roi = hsv[y0:y1, x0:x1] mask_roi = mask[y0:y1, x0:x1] - hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] ) - cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX) + hist = cv.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] ) + cv.normalize(hist, hist, 0, 255, cv.NORM_MINMAX) self.hist = hist.reshape(-1) self.show_hist() vis_roi = vis[y0:y1, x0:x1] - cv2.bitwise_not(vis_roi, vis_roi) + cv.bitwise_not(vis_roi, vis_roi) vis[mask == 0] = 0 if self.track_window and self.track_window[2] > 0 and self.track_window[3] > 0: self.selection = None - prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1) + prob = cv.calcBackProject([hsv], [0], self.hist, [0, 180], 1) prob &= mask - term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 ) - track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit) + term_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1 ) + track_box, self.track_window = cv.CamShift(prob, self.track_window, term_crit) if self.show_backproj: vis[:] = prob[...,np.newaxis] try: - cv2.ellipse(vis, track_box, (0, 0, 255), 2) + cv.ellipse(vis, track_box, (0, 0, 255), 2) except: print(track_box) - cv2.imshow('camshift', vis) + cv.imshow('camshift', vis) - ch = cv2.waitKey(5) + ch = cv.waitKey(5) if ch == 27: break if ch == ord('b'): self.show_backproj = not self.show_backproj - cv2.destroyAllWindows() + cv.destroyAllWindows() if __name__ == '__main__': diff --git a/samples/python/coherence.py b/samples/python/coherence.py index a1e34b8..225fc13 100755 --- a/samples/python/coherence.py +++ b/samples/python/coherence.py @@ -18,7 +18,7 @@ if PY3: xrange = range import numpy as np -import cv2 +import cv2 as cv def coherence_filter(img, sigma = 11, str_sigma = 11, blend = 0.5, iter_n = 4): h, w = img.shape[:2] @@ -26,19 +26,19 @@ def coherence_filter(img, sigma = 11, str_sigma = 11, blend = 0.5, iter_n = 4): for i in xrange(iter_n): print(i) - gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - eigen = cv2.cornerEigenValsAndVecs(gray, str_sigma, 3) + gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) + eigen = cv.cornerEigenValsAndVecs(gray, str_sigma, 3) eigen = eigen.reshape(h, w, 3, 2) # [[e1, e2], v1, v2] x, y = eigen[:,:,1,0], eigen[:,:,1,1] - gxx = cv2.Sobel(gray, cv2.CV_32F, 2, 0, ksize=sigma) - gxy = cv2.Sobel(gray, cv2.CV_32F, 1, 1, ksize=sigma) - gyy = cv2.Sobel(gray, cv2.CV_32F, 0, 2, ksize=sigma) + gxx = cv.Sobel(gray, cv.CV_32F, 2, 0, ksize=sigma) + gxy = cv.Sobel(gray, cv.CV_32F, 1, 1, ksize=sigma) + gyy = cv.Sobel(gray, cv.CV_32F, 0, 2, ksize=sigma) gvv = x*x*gxx + 2*x*y*gxy + y*y*gyy m = gvv < 0 - ero = cv2.erode(img, None) - dil = cv2.dilate(img, None) + ero = cv.erode(img, None) + dil = cv.dilate(img, None) img1 = ero img1[m] = dil[m] img = np.uint8(img*(1.0 - blend) + img1*blend) @@ -53,33 +53,33 @@ if __name__ == '__main__': except: fn = '../data/baboon.jpg' - src = cv2.imread(fn) + src = cv.imread(fn) def nothing(*argv): pass def update(): - sigma = cv2.getTrackbarPos('sigma', 'control')*2+1 - str_sigma = cv2.getTrackbarPos('str_sigma', 'control')*2+1 - blend = cv2.getTrackbarPos('blend', 'control') / 10.0 + sigma = cv.getTrackbarPos('sigma', 'control')*2+1 + str_sigma = cv.getTrackbarPos('str_sigma', 'control')*2+1 + blend = cv.getTrackbarPos('blend', 'control') / 10.0 print('sigma: %d str_sigma: %d blend_coef: %f' % (sigma, str_sigma, blend)) dst = coherence_filter(src, sigma=sigma, str_sigma = str_sigma, blend = blend) - cv2.imshow('dst', dst) + cv.imshow('dst', dst) - cv2.namedWindow('control', 0) - cv2.createTrackbar('sigma', 'control', 9, 15, nothing) - cv2.createTrackbar('blend', 'control', 7, 10, nothing) - cv2.createTrackbar('str_sigma', 'control', 9, 15, nothing) + cv.namedWindow('control', 0) + cv.createTrackbar('sigma', 'control', 9, 15, nothing) + cv.createTrackbar('blend', 'control', 7, 10, nothing) + cv.createTrackbar('str_sigma', 'control', 9, 15, nothing) print('Press SPACE to update the image\n') - cv2.imshow('src', src) + cv.imshow('src', src) update() while True: - ch = cv2.waitKey() + ch = cv.waitKey() if ch == ord(' '): update() if ch == 27: break - cv2.destroyAllWindows() + cv.destroyAllWindows() diff --git a/samples/python/color_histogram.py b/samples/python/color_histogram.py index c1e5d56..d997241 100755 --- a/samples/python/color_histogram.py +++ b/samples/python/color_histogram.py @@ -9,7 +9,7 @@ Keys: ''' import numpy as np -import cv2 +import cv2 as cv # built-in modules import sys @@ -24,16 +24,16 @@ if __name__ == '__main__': hsv_map[:,:,0] = h hsv_map[:,:,1] = s hsv_map[:,:,2] = 255 - hsv_map = cv2.cvtColor(hsv_map, cv2.COLOR_HSV2BGR) - cv2.imshow('hsv_map', hsv_map) + hsv_map = cv.cvtColor(hsv_map, cv.COLOR_HSV2BGR) + cv.imshow('hsv_map', hsv_map) - cv2.namedWindow('hist', 0) + cv.namedWindow('hist', 0) hist_scale = 10 def set_scale(val): global hist_scale hist_scale = val - cv2.createTrackbar('scale', 'hist', hist_scale, 32, set_scale) + cv.createTrackbar('scale', 'hist', hist_scale, 32, set_scale) try: fn = sys.argv[1] @@ -43,20 +43,20 @@ if __name__ == '__main__': while True: flag, frame = cam.read() - cv2.imshow('camera', frame) + cv.imshow('camera', frame) - small = cv2.pyrDown(frame) + small = cv.pyrDown(frame) - hsv = cv2.cvtColor(small, cv2.COLOR_BGR2HSV) + hsv = cv.cvtColor(small, cv.COLOR_BGR2HSV) dark = hsv[...,2] < 32 hsv[dark] = 0 - h = cv2.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256]) + h = cv.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256]) h = np.clip(h*0.005*hist_scale, 0, 1) vis = hsv_map*h[:,:,np.newaxis] / 255.0 - cv2.imshow('hist', vis) + cv.imshow('hist', vis) - ch = cv2.waitKey(1) + ch = cv.waitKey(1) if ch == 27: break - cv2.destroyAllWindows() + cv.destroyAllWindows() diff --git a/samples/python/common.py b/samples/python/common.py index 4f859c1..85cda62 100755 --- a/samples/python/common.py +++ b/samples/python/common.py @@ -13,7 +13,7 @@ if PY3: from functools import reduce import numpy as np -import cv2 +import cv2 as cv # built-in modules import os @@ -71,7 +71,7 @@ def lookat(eye, target, up = (0, 0, 1)): return R, tvec def mtx2rvec(R): - w, u, vt = cv2.SVDecomp(R - np.eye(3)) + w, u, vt = cv.SVDecomp(R - np.eye(3)) p = vt[0] + u[:,0]*w[0] # same as np.dot(R, vt[0]) c = np.dot(vt[0], p) s = np.dot(vt[1], p) @@ -80,8 +80,8 @@ def mtx2rvec(R): def draw_str(dst, target, s): x, y = target - cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv2.LINE_AA) - cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv2.LINE_AA) + cv.putText(dst, s, (x+1, y+1), cv.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv.LINE_AA) + cv.putText(dst, s, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv.LINE_AA) class Sketcher: def __init__(self, windowname, dests, colors_func): @@ -91,21 +91,21 @@ class Sketcher: self.colors_func = colors_func self.dirty = False self.show() - cv2.setMouseCallback(self.windowname, self.on_mouse) + cv.setMouseCallback(self.windowname, self.on_mouse) def show(self): - cv2.imshow(self.windowname, self.dests[0]) + cv.imshow(self.windowname, self.dests[0]) def on_mouse(self, event, x, y, flags, param): pt = (x, y) - if event == cv2.EVENT_LBUTTONDOWN: + if event == cv.EVENT_LBUTTONDOWN: self.prev_pt = pt - elif event == cv2.EVENT_LBUTTONUP: + elif event == cv.EVENT_LBUTTONUP: self.prev_pt = None - if self.prev_pt and flags & cv2.EVENT_FLAG_LBUTTON: + if self.prev_pt and flags & cv.EVENT_FLAG_LBUTTON: for dst, color in zip(self.dests, self.colors_func()): - cv2.line(dst, self.prev_pt, pt, color, 5) + cv.line(dst, self.prev_pt, pt, color, 5) self.dirty = True self.prev_pt = pt self.show() @@ -140,7 +140,7 @@ def nothing(*arg, **kw): pass def clock(): - return cv2.getTickCount() / cv2.getTickFrequency() + return cv.getTickCount() / cv.getTickFrequency() @contextmanager def Timer(msg): @@ -166,16 +166,16 @@ class RectSelector: def __init__(self, win, callback): self.win = win self.callback = callback - cv2.setMouseCallback(win, self.onmouse) + cv.setMouseCallback(win, self.onmouse) self.drag_start = None self.drag_rect = None def onmouse(self, event, x, y, flags, param): x, y = np.int16([x, y]) # BUG - if event == cv2.EVENT_LBUTTONDOWN: + if event == cv.EVENT_LBUTTONDOWN: self.drag_start = (x, y) return if self.drag_start: - if flags & cv2.EVENT_FLAG_LBUTTON: + if flags & cv.EVENT_FLAG_LBUTTON: xo, yo = self.drag_start x0, y0 = np.minimum([xo, yo], [x, y]) x1, y1 = np.maximum([xo, yo], [x, y]) @@ -192,7 +192,7 @@ class RectSelector: if not self.drag_rect: return False x0, y0, x1, y1 = self.drag_rect - cv2.rectangle(vis, (x0, y0), (x1, y1), (0, 255, 0), 2) + cv.rectangle(vis, (x0, y0), (x1, y1), (0, 255, 0), 2) return True @property def dragging(self): @@ -234,4 +234,4 @@ def mdot(*args): def draw_keypoints(vis, keypoints, color = (0, 255, 255)): for kp in keypoints: x, y = kp.pt - cv2.circle(vis, (int(x), int(y)), 2, color) + cv.circle(vis, (int(x), int(y)), 2, color) diff --git a/samples/python/contours.py b/samples/python/contours.py index 73b9f2f..69b46d4 100755 --- a/samples/python/contours.py +++ b/samples/python/contours.py @@ -18,7 +18,7 @@ if PY3: xrange = range import numpy as np -import cv2 +import cv2 as cv def make_image(): img = np.zeros((500, 500), np.uint8) @@ -33,19 +33,19 @@ def make_image(): c, s = np.cos(angle), np.sin(angle) x1, y1 = np.int32([dx+100+j*10-80*c, dy+100-90*s]) x2, y2 = np.int32([dx+100+j*10-30*c, dy+100-30*s]) - cv2.line(img, (x1, y1), (x2, y2), white) + cv.line(img, (x1, y1), (x2, y2), white) - cv2.ellipse( img, (dx+150, dy+100), (100,70), 0, 0, 360, white, -1 ) - cv2.ellipse( img, (dx+115, dy+70), (30,20), 0, 0, 360, black, -1 ) - cv2.ellipse( img, (dx+185, dy+70), (30,20), 0, 0, 360, black, -1 ) - cv2.ellipse( img, (dx+115, dy+70), (15,15), 0, 0, 360, white, -1 ) - cv2.ellipse( img, (dx+185, dy+70), (15,15), 0, 0, 360, white, -1 ) - cv2.ellipse( img, (dx+115, dy+70), (5,5), 0, 0, 360, black, -1 ) - cv2.ellipse( img, (dx+185, dy+70), (5,5), 0, 0, 360, black, -1 ) - cv2.ellipse( img, (dx+150, dy+100), (10,5), 0, 0, 360, black, -1 ) - cv2.ellipse( img, (dx+150, dy+150), (40,10), 0, 0, 360, black, -1 ) - cv2.ellipse( img, (dx+27, dy+100), (20,35), 0, 0, 360, white, -1 ) - cv2.ellipse( img, (dx+273, dy+100), (20,35), 0, 0, 360, white, -1 ) + cv.ellipse( img, (dx+150, dy+100), (100,70), 0, 0, 360, white, -1 ) + cv.ellipse( img, (dx+115, dy+70), (30,20), 0, 0, 360, black, -1 ) + cv.ellipse( img, (dx+185, dy+70), (30,20), 0, 0, 360, black, -1 ) + cv.ellipse( img, (dx+115, dy+70), (15,15), 0, 0, 360, white, -1 ) + cv.ellipse( img, (dx+185, dy+70), (15,15), 0, 0, 360, white, -1 ) + cv.ellipse( img, (dx+115, dy+70), (5,5), 0, 0, 360, black, -1 ) + cv.ellipse( img, (dx+185, dy+70), (5,5), 0, 0, 360, black, -1 ) + cv.ellipse( img, (dx+150, dy+100), (10,5), 0, 0, 360, black, -1 ) + cv.ellipse( img, (dx+150, dy+150), (40,10), 0, 0, 360, black, -1 ) + cv.ellipse( img, (dx+27, dy+100), (20,35), 0, 0, 360, white, -1 ) + cv.ellipse( img, (dx+273, dy+100), (20,35), 0, 0, 360, white, -1 ) return img if __name__ == '__main__': @@ -54,17 +54,17 @@ if __name__ == '__main__': img = make_image() h, w = img.shape[:2] - _, contours0, hierarchy = cv2.findContours( img.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) - contours = [cv2.approxPolyDP(cnt, 3, True) for cnt in contours0] + _, contours0, hierarchy = cv.findContours( img.copy(), cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) + contours = [cv.approxPolyDP(cnt, 3, True) for cnt in contours0] def update(levels): vis = np.zeros((h, w, 3), np.uint8) levels = levels - 3 - cv2.drawContours( vis, contours, (-1, 2)[levels <= 0], (128,255,255), - 3, cv2.LINE_AA, hierarchy, abs(levels) ) - cv2.imshow('contours', vis) + cv.drawContours( vis, contours, (-1, 2)[levels <= 0], (128,255,255), + 3, cv.LINE_AA, hierarchy, abs(levels) ) + cv.imshow('contours', vis) update(3) - cv2.createTrackbar( "levels+3", "contours", 3, 7, update ) - cv2.imshow('image', img) - cv2.waitKey() - cv2.destroyAllWindows() + cv.createTrackbar( "levels+3", "contours", 3, 7, update ) + cv.imshow('image', img) + cv.waitKey() + cv.destroyAllWindows() diff --git a/samples/python/deconvolution.py b/samples/python/deconvolution.py index 74f51a7..d34ad6b 100755 --- a/samples/python/deconvolution.py +++ b/samples/python/deconvolution.py @@ -34,7 +34,7 @@ Examples: from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv # local module from common import nothing @@ -42,8 +42,8 @@ from common import nothing def blur_edge(img, d=31): h, w = img.shape[:2] - img_pad = cv2.copyMakeBorder(img, d, d, d, d, cv2.BORDER_WRAP) - img_blur = cv2.GaussianBlur(img_pad, (2*d+1, 2*d+1), -1)[d:-d,d:-d] + img_pad = cv.copyMakeBorder(img, d, d, d, d, cv.BORDER_WRAP) + img_blur = cv.GaussianBlur(img_pad, (2*d+1, 2*d+1), -1)[d:-d,d:-d] y, x = np.indices((h, w)) dist = np.dstack([x, w-x-1, y, h-y-1]).min(-1) w = np.minimum(np.float32(dist)/d, 1.0) @@ -55,12 +55,12 @@ def motion_kernel(angle, d, sz=65): A = np.float32([[c, -s, 0], [s, c, 0]]) sz2 = sz // 2 A[:,2] = (sz2, sz2) - np.dot(A[:,:2], ((d-1)*0.5, 0)) - kern = cv2.warpAffine(kern, A, (sz, sz), flags=cv2.INTER_CUBIC) + kern = cv.warpAffine(kern, A, (sz, sz), flags=cv.INTER_CUBIC) return kern def defocus_kernel(d, sz=65): kern = np.zeros((sz, sz), np.uint8) - cv2.circle(kern, (sz, sz), d, 255, -1, cv2.LINE_AA, shift=1) + cv.circle(kern, (sz, sz), d, 255, -1, cv.LINE_AA, shift=1) kern = np.float32(kern) / 255.0 return kern @@ -77,52 +77,52 @@ if __name__ == '__main__': win = 'deconvolution' - img = cv2.imread(fn, 0) + img = cv.imread(fn, 0) if img is None: print('Failed to load fn1:', fn1) sys.exit(1) img = np.float32(img)/255.0 - cv2.imshow('input', img) + cv.imshow('input', img) img = blur_edge(img) - IMG = cv2.dft(img, flags=cv2.DFT_COMPLEX_OUTPUT) + IMG = cv.dft(img, flags=cv.DFT_COMPLEX_OUTPUT) defocus = '--circle' in opts def update(_): - ang = np.deg2rad( cv2.getTrackbarPos('angle', win) ) - d = cv2.getTrackbarPos('d', win) - noise = 10**(-0.1*cv2.getTrackbarPos('SNR (db)', win)) + ang = np.deg2rad( cv.getTrackbarPos('angle', win) ) + d = cv.getTrackbarPos('d', win) + noise = 10**(-0.1*cv.getTrackbarPos('SNR (db)', win)) if defocus: psf = defocus_kernel(d) else: psf = motion_kernel(ang, d) - cv2.imshow('psf', psf) + cv.imshow('psf', psf) psf /= psf.sum() psf_pad = np.zeros_like(img) kh, kw = psf.shape psf_pad[:kh, :kw] = psf - PSF = cv2.dft(psf_pad, flags=cv2.DFT_COMPLEX_OUTPUT, nonzeroRows = kh) + PSF = cv.dft(psf_pad, flags=cv.DFT_COMPLEX_OUTPUT, nonzeroRows = kh) PSF2 = (PSF**2).sum(-1) iPSF = PSF / (PSF2 + noise)[...,np.newaxis] - RES = cv2.mulSpectrums(IMG, iPSF, 0) - res = cv2.idft(RES, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT ) + RES = cv.mulSpectrums(IMG, iPSF, 0) + res = cv.idft(RES, flags=cv.DFT_SCALE | cv.DFT_REAL_OUTPUT ) res = np.roll(res, -kh//2, 0) res = np.roll(res, -kw//2, 1) - cv2.imshow(win, res) + cv.imshow(win, res) - cv2.namedWindow(win) - cv2.namedWindow('psf', 0) - cv2.createTrackbar('angle', win, int(opts.get('--angle', 135)), 180, update) - cv2.createTrackbar('d', win, int(opts.get('--d', 22)), 50, update) - cv2.createTrackbar('SNR (db)', win, int(opts.get('--snr', 25)), 50, update) + cv.namedWindow(win) + cv.namedWindow('psf', 0) + cv.createTrackbar('angle', win, int(opts.get('--angle', 135)), 180, update) + cv.createTrackbar('d', win, int(opts.get('--d', 22)), 50, update) + cv.createTrackbar('SNR (db)', win, int(opts.get('--snr', 25)), 50, update) update(None) while True: - ch = cv2.waitKey() + ch = cv.waitKey() if ch == 27: break if ch == ord(' '): diff --git a/samples/python/dft.py b/samples/python/dft.py index 4437aad..51206cf 100755 --- a/samples/python/dft.py +++ b/samples/python/dft.py @@ -11,7 +11,7 @@ USAGE: # Python 2/3 compatibility from __future__ import print_function -import cv2 +import cv2 as cv import numpy as np import sys @@ -65,47 +65,47 @@ def shift_dft(src, dst=None): if __name__ == "__main__": if len(sys.argv) > 1: - im = cv2.imread(sys.argv[1]) + im = cv.imread(sys.argv[1]) else: - im = cv2.imread('../data/baboon.jpg') + im = cv.imread('../data/baboon.jpg') print("usage : python dft.py ") # convert to grayscale - im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) + im = cv.cvtColor(im, cv.COLOR_BGR2GRAY) h, w = im.shape[:2] realInput = im.astype(np.float64) # perform an optimally sized dft - dft_M = cv2.getOptimalDFTSize(w) - dft_N = cv2.getOptimalDFTSize(h) + dft_M = cv.getOptimalDFTSize(w) + dft_N = cv.getOptimalDFTSize(h) # copy A to dft_A and pad dft_A with zeros dft_A = np.zeros((dft_N, dft_M, 2), dtype=np.float64) dft_A[:h, :w, 0] = realInput # no need to pad bottom part of dft_A with zeros because of - # use of nonzeroRows parameter in cv2.dft() - cv2.dft(dft_A, dst=dft_A, nonzeroRows=h) + # use of nonzeroRows parameter in cv.dft() + cv.dft(dft_A, dst=dft_A, nonzeroRows=h) - cv2.imshow("win", im) + cv.imshow("win", im) # Split fourier into real and imaginary parts - image_Re, image_Im = cv2.split(dft_A) + image_Re, image_Im = cv.split(dft_A) # Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2) - magnitude = cv2.sqrt(image_Re**2.0 + image_Im**2.0) + magnitude = cv.sqrt(image_Re**2.0 + image_Im**2.0) # Compute log(1 + Mag) - log_spectrum = cv2.log(1.0 + magnitude) + log_spectrum = cv.log(1.0 + magnitude) # Rearrange the quadrants of Fourier image so that the origin is at # the image center shift_dft(log_spectrum, log_spectrum) # normalize and display the results as rgb - cv2.normalize(log_spectrum, log_spectrum, 0.0, 1.0, cv2.NORM_MINMAX) - cv2.imshow("magnitude", log_spectrum) + cv.normalize(log_spectrum, log_spectrum, 0.0, 1.0, cv.NORM_MINMAX) + cv.imshow("magnitude", log_spectrum) - cv2.waitKey(0) - cv2.destroyAllWindows() + cv.waitKey(0) + cv.destroyAllWindows() diff --git a/samples/python/digits.py b/samples/python/digits.py index bac67d6..f9f1e0b 100755 --- a/samples/python/digits.py +++ b/samples/python/digits.py @@ -30,7 +30,7 @@ from __future__ import print_function # built-in modules from multiprocessing.pool import ThreadPool -import cv2 +import cv2 as cv import numpy as np from numpy.linalg import norm @@ -55,18 +55,18 @@ def split2d(img, cell_size, flatten=True): def load_digits(fn): print('loading "%s" ...' % fn) - digits_img = cv2.imread(fn, 0) + digits_img = cv.imread(fn, 0) digits = split2d(digits_img, (SZ, SZ)) labels = np.repeat(np.arange(CLASS_N), len(digits)/CLASS_N) return digits, labels def deskew(img): - m = cv2.moments(img) + m = cv.moments(img) if abs(m['mu02']) < 1e-2: return img.copy() skew = m['mu11']/m['mu02'] M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]]) - img = cv2.warpAffine(img, M, (SZ, SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR) + img = cv.warpAffine(img, M, (SZ, SZ), flags=cv.WARP_INVERSE_MAP | cv.INTER_LINEAR) return img class StatModel(object): @@ -78,10 +78,10 @@ class StatModel(object): class KNearest(StatModel): def __init__(self, k = 3): self.k = k - self.model = cv2.ml.KNearest_create() + self.model = cv.ml.KNearest_create() def train(self, samples, responses): - self.model.train(samples, cv2.ml.ROW_SAMPLE, responses) + self.model.train(samples, cv.ml.ROW_SAMPLE, responses) def predict(self, samples): _retval, results, _neigh_resp, _dists = self.model.findNearest(samples, self.k) @@ -89,14 +89,14 @@ class KNearest(StatModel): class SVM(StatModel): def __init__(self, C = 1, gamma = 0.5): - self.model = cv2.ml.SVM_create() + self.model = cv.ml.SVM_create() self.model.setGamma(gamma) self.model.setC(C) - self.model.setKernel(cv2.ml.SVM_RBF) - self.model.setType(cv2.ml.SVM_C_SVC) + self.model.setKernel(cv.ml.SVM_RBF) + self.model.setType(cv.ml.SVM_C_SVC) def train(self, samples, responses): - self.model.train(samples, cv2.ml.ROW_SAMPLE, responses) + self.model.train(samples, cv.ml.ROW_SAMPLE, responses) def predict(self, samples): return self.model.predict(samples)[1].ravel() @@ -116,7 +116,7 @@ def evaluate_model(model, digits, samples, labels): vis = [] for img, flag in zip(digits, resp == labels): - img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) + img = cv.cvtColor(img, cv.COLOR_GRAY2BGR) if not flag: img[...,:2] = 0 vis.append(img) @@ -128,9 +128,9 @@ def preprocess_simple(digits): def preprocess_hog(digits): samples = [] for img in digits: - gx = cv2.Sobel(img, cv2.CV_32F, 1, 0) - gy = cv2.Sobel(img, cv2.CV_32F, 0, 1) - mag, ang = cv2.cartToPolar(gx, gy) + gx = cv.Sobel(img, cv.CV_32F, 1, 0) + gy = cv.Sobel(img, cv.CV_32F, 0, 1) + mag, ang = cv.cartToPolar(gx, gy) bin_n = 16 bin = np.int32(bin_n*ang/(2*np.pi)) bin_cells = bin[:10,:10], bin[10:,:10], bin[:10,10:], bin[10:,10:] @@ -163,7 +163,7 @@ if __name__ == '__main__': samples = preprocess_hog(digits2) train_n = int(0.9*len(samples)) - cv2.imshow('test set', mosaic(25, digits[train_n:])) + cv.imshow('test set', mosaic(25, digits[train_n:])) digits_train, digits_test = np.split(digits2, [train_n]) samples_train, samples_test = np.split(samples, [train_n]) labels_train, labels_test = np.split(labels, [train_n]) @@ -173,14 +173,14 @@ if __name__ == '__main__': model = KNearest(k=4) model.train(samples_train, labels_train) vis = evaluate_model(model, digits_test, samples_test, labels_test) - cv2.imshow('KNearest test', vis) + cv.imshow('KNearest test', vis) print('training SVM...') model = SVM(C=2.67, gamma=5.383) model.train(samples_train, labels_train) vis = evaluate_model(model, digits_test, samples_test, labels_test) - cv2.imshow('SVM test', vis) + cv.imshow('SVM test', vis) print('saving SVM as "digits_svm.dat"...') model.save('digits_svm.dat') - cv2.waitKey(0) + cv.waitKey(0) diff --git a/samples/python/digits_adjust.py b/samples/python/digits_adjust.py index 79c46d6..2e50fe8 100755 --- a/samples/python/digits_adjust.py +++ b/samples/python/digits_adjust.py @@ -22,7 +22,7 @@ if PY3: xrange = range import numpy as np -import cv2 +import cv2 as cv from multiprocessing.pool import ThreadPool from digits import * @@ -66,7 +66,7 @@ class App(object): return self._samples, self._labels def run_jobs(self, f, jobs): - pool = ThreadPool(processes=cv2.getNumberOfCPUs()) + pool = ThreadPool(processes=cv.getNumberOfCPUs()) ires = pool.imap_unordered(f, jobs) return ires diff --git a/samples/python/digits_video.py b/samples/python/digits_video.py index 9f9c42c..b2431c6 100755 --- a/samples/python/digits_video.py +++ b/samples/python/digits_video.py @@ -4,7 +4,7 @@ from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv # built-in modules import os @@ -29,19 +29,19 @@ def main(): return if True: - model = cv2.ml.SVM_load(classifier_fn) + model = cv.ml.SVM_load(classifier_fn) else: - model = cv2.ml.SVM_create() + model = cv.ml.SVM_create() model.load_(classifier_fn) #Known bug: https://github.com/opencv/opencv/issues/4969 while True: _ret, frame = cap.read() - gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) - bin = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 31, 10) - bin = cv2.medianBlur(bin, 3) - _, contours, heirs = cv2.findContours( bin.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE) + bin = cv.adaptiveThreshold(gray, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV, 31, 10) + bin = cv.medianBlur(bin, 3) + _, contours, heirs = cv.findContours( bin.copy(), cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE) try: heirs = heirs[0] except: @@ -51,12 +51,12 @@ def main(): _, _, _, outer_i = heir if outer_i >= 0: continue - x, y, w, h = cv2.boundingRect(cnt) + x, y, w, h = cv.boundingRect(cnt) if not (16 <= h <= 64 and w <= 1.2*h): continue pad = max(h-w, 0) x, w = x - (pad // 2), w + pad - cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0)) + cv.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0)) bin_roi = bin[y:,x:][:h,:w] @@ -69,33 +69,33 @@ def main(): if v_out.std() > 10.0: continue s = "%f, %f" % (abs(v_in.mean() - v_out.mean()), v_out.std()) - cv2.putText(frame, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (200, 0, 0), thickness = 1) + cv.putText(frame, s, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (200, 0, 0), thickness = 1) ''' s = 1.5*float(h)/SZ - m = cv2.moments(bin_roi) + m = cv.moments(bin_roi) c1 = np.float32([m['m10'], m['m01']]) / m['m00'] c0 = np.float32([SZ/2, SZ/2]) t = c1 - s*c0 A = np.zeros((2, 3), np.float32) A[:,:2] = np.eye(2)*s A[:,2] = t - bin_norm = cv2.warpAffine(bin_roi, A, (SZ, SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR) + bin_norm = cv.warpAffine(bin_roi, A, (SZ, SZ), flags=cv.WARP_INVERSE_MAP | cv.INTER_LINEAR) bin_norm = deskew(bin_norm) if x+w+SZ < frame.shape[1] and y+SZ < frame.shape[0]: frame[y:,x+w:][:SZ, :SZ] = bin_norm[...,np.newaxis] sample = preprocess_hog([bin_norm]) digit = model.predict(sample)[0] - cv2.putText(frame, '%d'%digit, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (200, 0, 0), thickness = 1) + cv.putText(frame, '%d'%digit, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (200, 0, 0), thickness = 1) - cv2.imshow('frame', frame) - cv2.imshow('bin', bin) - ch = cv2.waitKey(1) + cv.imshow('frame', frame) + cv.imshow('bin', bin) + ch = cv.waitKey(1) if ch == 27: break if __name__ == '__main__': main() - cv2.destroyAllWindows() + cv.destroyAllWindows() diff --git a/samples/python/distrans.py b/samples/python/distrans.py index 8abe460..02a51d5 100755 --- a/samples/python/distrans.py +++ b/samples/python/distrans.py @@ -15,7 +15,7 @@ Keys: from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv from common import make_cmap @@ -27,7 +27,7 @@ if __name__ == '__main__': fn = '../data/fruits.jpg' print(__doc__) - img = cv2.imread(fn, 0) + img = cv.imread(fn, 0) if img is None: print('Failed to load fn:', fn) sys.exit(1) @@ -39,27 +39,27 @@ if __name__ == '__main__': def update(dummy=None): global need_update need_update = False - thrs = cv2.getTrackbarPos('threshold', 'distrans') - mark = cv2.Canny(img, thrs, 3*thrs) - dist, labels = cv2.distanceTransformWithLabels(~mark, cv2.DIST_L2, 5) + thrs = cv.getTrackbarPos('threshold', 'distrans') + mark = cv.Canny(img, thrs, 3*thrs) + dist, labels = cv.distanceTransformWithLabels(~mark, cv.DIST_L2, 5) if voronoi: vis = cm[np.uint8(labels)] else: vis = cm[np.uint8(dist*2)] vis[mark != 0] = 255 - cv2.imshow('distrans', vis) + cv.imshow('distrans', vis) def invalidate(dummy=None): global need_update need_update = True - cv2.namedWindow('distrans') - cv2.createTrackbar('threshold', 'distrans', 60, 255, invalidate) + cv.namedWindow('distrans') + cv.createTrackbar('threshold', 'distrans', 60, 255, invalidate) update() while True: - ch = cv2.waitKey(50) + ch = cv.waitKey(50) if ch == 27: break if ch == ord('v'): @@ -68,4 +68,4 @@ if __name__ == '__main__': update() if need_update: update() - cv2.destroyAllWindows() + cv.destroyAllWindows() diff --git a/samples/python/edge.py b/samples/python/edge.py index b597729..ae09c3c 100755 --- a/samples/python/edge.py +++ b/samples/python/edge.py @@ -13,7 +13,7 @@ Usage: # Python 2/3 compatibility from __future__ import print_function -import cv2 +import cv2 as cv import numpy as np # relative module @@ -34,22 +34,22 @@ if __name__ == '__main__': def nothing(*arg): pass - cv2.namedWindow('edge') - cv2.createTrackbar('thrs1', 'edge', 2000, 5000, nothing) - cv2.createTrackbar('thrs2', 'edge', 4000, 5000, nothing) + cv.namedWindow('edge') + cv.createTrackbar('thrs1', 'edge', 2000, 5000, nothing) + cv.createTrackbar('thrs2', 'edge', 4000, 5000, nothing) cap = video.create_capture(fn) while True: flag, img = cap.read() - gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - thrs1 = cv2.getTrackbarPos('thrs1', 'edge') - thrs2 = cv2.getTrackbarPos('thrs2', 'edge') - edge = cv2.Canny(gray, thrs1, thrs2, apertureSize=5) + gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) + thrs1 = cv.getTrackbarPos('thrs1', 'edge') + thrs2 = cv.getTrackbarPos('thrs2', 'edge') + edge = cv.Canny(gray, thrs1, thrs2, apertureSize=5) vis = img.copy() vis = np.uint8(vis/2.) vis[edge != 0] = (0, 255, 0) - cv2.imshow('edge', vis) - ch = cv2.waitKey(5) + cv.imshow('edge', vis) + ch = cv.waitKey(5) if ch == 27: break - cv2.destroyAllWindows() + cv.destroyAllWindows() diff --git a/samples/python/facedetect.py b/samples/python/facedetect.py index 0f79663..4067dc8 100755 --- a/samples/python/facedetect.py +++ b/samples/python/facedetect.py @@ -11,7 +11,7 @@ USAGE: from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv # local modules from video import create_capture @@ -20,7 +20,7 @@ from common import clock, draw_str def detect(img, cascade): rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30), - flags=cv2.CASCADE_SCALE_IMAGE) + flags=cv.CASCADE_SCALE_IMAGE) if len(rects) == 0: return [] rects[:,2:] += rects[:,:2] @@ -28,7 +28,7 @@ def detect(img, cascade): def draw_rects(img, rects, color): for x1, y1, x2, y2 in rects: - cv2.rectangle(img, (x1, y1), (x2, y2), color, 2) + cv.rectangle(img, (x1, y1), (x2, y2), color, 2) if __name__ == '__main__': import sys, getopt @@ -43,15 +43,15 @@ if __name__ == '__main__': cascade_fn = args.get('--cascade', "../../data/haarcascades/haarcascade_frontalface_alt.xml") nested_fn = args.get('--nested-cascade', "../../data/haarcascades/haarcascade_eye.xml") - cascade = cv2.CascadeClassifier(cascade_fn) - nested = cv2.CascadeClassifier(nested_fn) + cascade = cv.CascadeClassifier(cascade_fn) + nested = cv.CascadeClassifier(nested_fn) cam = create_capture(video_src, fallback='synth:bg=../data/lena.jpg:noise=0.05') while True: ret, img = cam.read() - gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - gray = cv2.equalizeHist(gray) + gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) + gray = cv.equalizeHist(gray) t = clock() rects = detect(gray, cascade) @@ -66,8 +66,8 @@ if __name__ == '__main__': dt = clock() - t draw_str(vis, (20, 20), 'time: %.1f ms' % (dt*1000)) - cv2.imshow('facedetect', vis) + cv.imshow('facedetect', vis) - if cv2.waitKey(5) == 27: + if cv.waitKey(5) == 27: break - cv2.destroyAllWindows() + cv.destroyAllWindows() diff --git a/samples/python/feature_homography.py b/samples/python/feature_homography.py index 37f269f..80537e0 100755 --- a/samples/python/feature_homography.py +++ b/samples/python/feature_homography.py @@ -26,7 +26,7 @@ Select a textured planar object to track by drawing a box with a mouse. from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv # local modules import video @@ -43,7 +43,7 @@ class App: self.paused = False self.tracker = PlaneTracker() - cv2.namedWindow('plane') + cv.namedWindow('plane') self.rect_sel = common.RectSelector('plane', self.on_rect) def on_rect(self, rect): @@ -67,20 +67,20 @@ class App: vis[:,w:] = target.image draw_keypoints(vis[:,w:], target.keypoints) x0, y0, x1, y1 = target.rect - cv2.rectangle(vis, (x0+w, y0), (x1+w, y1), (0, 255, 0), 2) + cv.rectangle(vis, (x0+w, y0), (x1+w, y1), (0, 255, 0), 2) if playing: tracked = self.tracker.track(self.frame) if len(tracked) > 0: tracked = tracked[0] - cv2.polylines(vis, [np.int32(tracked.quad)], True, (255, 255, 255), 2) + cv.polylines(vis, [np.int32(tracked.quad)], True, (255, 255, 255), 2) for (x0, y0), (x1, y1) in zip(np.int32(tracked.p0), np.int32(tracked.p1)): - cv2.line(vis, (x0+w, y0), (x1, y1), (0, 255, 0)) + cv.line(vis, (x0+w, y0), (x1, y1), (0, 255, 0)) draw_keypoints(vis, self.tracker.frame_points) self.rect_sel.draw(vis) - cv2.imshow('plane', vis) - ch = cv2.waitKey(1) + cv.imshow('plane', vis) + ch = cv.waitKey(1) if ch == ord(' '): self.paused = not self.paused if ch == 27: diff --git a/samples/python/find_obj.py b/samples/python/find_obj.py index 7e82bf4..12f51d9 100755 --- a/samples/python/find_obj.py +++ b/samples/python/find_obj.py @@ -18,7 +18,7 @@ USAGE from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv from common import anorm, getsize FLANN_INDEX_KDTREE = 1 # bug: flann enums are missing @@ -28,33 +28,33 @@ FLANN_INDEX_LSH = 6 def init_feature(name): chunks = name.split('-') if chunks[0] == 'sift': - detector = cv2.xfeatures2d.SIFT_create() - norm = cv2.NORM_L2 + detector = cv.xfeatures2d.SIFT_create() + norm = cv.NORM_L2 elif chunks[0] == 'surf': - detector = cv2.xfeatures2d.SURF_create(800) - norm = cv2.NORM_L2 + detector = cv.xfeatures2d.SURF_create(800) + norm = cv.NORM_L2 elif chunks[0] == 'orb': - detector = cv2.ORB_create(400) - norm = cv2.NORM_HAMMING + detector = cv.ORB_create(400) + norm = cv.NORM_HAMMING elif chunks[0] == 'akaze': - detector = cv2.AKAZE_create() - norm = cv2.NORM_HAMMING + detector = cv.AKAZE_create() + norm = cv.NORM_HAMMING elif chunks[0] == 'brisk': - detector = cv2.BRISK_create() - norm = cv2.NORM_HAMMING + detector = cv.BRISK_create() + norm = cv.NORM_HAMMING else: return None, None if 'flann' in chunks: - if norm == cv2.NORM_L2: + if norm == cv.NORM_L2: flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) else: flann_params= dict(algorithm = FLANN_INDEX_LSH, table_number = 6, # 12 key_size = 12, # 20 multi_probe_level = 1) #2 - matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329) + matcher = cv.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329) else: - matcher = cv2.BFMatcher(norm) + matcher = cv.BFMatcher(norm) return detector, matcher @@ -76,12 +76,12 @@ def explore_match(win, img1, img2, kp_pairs, status = None, H = None): vis = np.zeros((max(h1, h2), w1+w2), np.uint8) vis[:h1, :w1] = img1 vis[:h2, w1:w1+w2] = img2 - vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR) + vis = cv.cvtColor(vis, cv.COLOR_GRAY2BGR) if H is not None: corners = np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]]) - corners = np.int32( cv2.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0) ) - cv2.polylines(vis, [corners], True, (255, 255, 255)) + corners = np.int32( cv.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0) ) + cv.polylines(vis, [corners], True, (255, 255, 255)) if status is None: status = np.ones(len(kp_pairs), np.bool_) @@ -96,26 +96,26 @@ def explore_match(win, img1, img2, kp_pairs, status = None, H = None): for (x1, y1), (x2, y2), inlier in zip(p1, p2, status): if inlier: col = green - cv2.circle(vis, (x1, y1), 2, col, -1) - cv2.circle(vis, (x2, y2), 2, col, -1) + cv.circle(vis, (x1, y1), 2, col, -1) + cv.circle(vis, (x2, y2), 2, col, -1) else: col = red r = 2 thickness = 3 - cv2.line(vis, (x1-r, y1-r), (x1+r, y1+r), col, thickness) - cv2.line(vis, (x1-r, y1+r), (x1+r, y1-r), col, thickness) - cv2.line(vis, (x2-r, y2-r), (x2+r, y2+r), col, thickness) - cv2.line(vis, (x2-r, y2+r), (x2+r, y2-r), col, thickness) + cv.line(vis, (x1-r, y1-r), (x1+r, y1+r), col, thickness) + cv.line(vis, (x1-r, y1+r), (x1+r, y1-r), col, thickness) + cv.line(vis, (x2-r, y2-r), (x2+r, y2+r), col, thickness) + cv.line(vis, (x2-r, y2+r), (x2+r, y2-r), col, thickness) vis0 = vis.copy() for (x1, y1), (x2, y2), inlier in zip(p1, p2, status): if inlier: - cv2.line(vis, (x1, y1), (x2, y2), green) + cv.line(vis, (x1, y1), (x2, y2), green) - cv2.imshow(win, vis) + cv.imshow(win, vis) def onmouse(event, x, y, flags, param): cur_vis = vis - if flags & cv2.EVENT_FLAG_LBUTTON: + if flags & cv.EVENT_FLAG_LBUTTON: cur_vis = vis0.copy() r = 8 m = (anorm(np.array(p1) - (x, y)) < r) | (anorm(np.array(p2) - (x, y)) < r) @@ -124,15 +124,15 @@ def explore_match(win, img1, img2, kp_pairs, status = None, H = None): for i in idxs: (x1, y1), (x2, y2) = p1[i], p2[i] col = (red, green)[status[i]] - cv2.line(cur_vis, (x1, y1), (x2, y2), col) + cv.line(cur_vis, (x1, y1), (x2, y2), col) kp1, kp2 = kp_pairs[i] kp1s.append(kp1) kp2s.append(kp2) - cur_vis = cv2.drawKeypoints(cur_vis, kp1s, None, flags=4, color=kp_color) - cur_vis[:,w1:] = cv2.drawKeypoints(cur_vis[:,w1:], kp2s, None, flags=4, color=kp_color) + cur_vis = cv.drawKeypoints(cur_vis, kp1s, None, flags=4, color=kp_color) + cur_vis[:,w1:] = cv.drawKeypoints(cur_vis[:,w1:], kp2s, None, flags=4, color=kp_color) - cv2.imshow(win, cur_vis) - cv2.setMouseCallback(win, onmouse) + cv.imshow(win, cur_vis) + cv.setMouseCallback(win, onmouse) return vis @@ -149,8 +149,8 @@ if __name__ == '__main__': fn1 = '../data/box.png' fn2 = '../data/box_in_scene.png' - img1 = cv2.imread(fn1, 0) - img2 = cv2.imread(fn2, 0) + img1 = cv.imread(fn1, 0) + img2 = cv.imread(fn2, 0) detector, matcher = init_feature(feature_name) if img1 is None: @@ -176,7 +176,7 @@ if __name__ == '__main__': raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2 p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches) if len(p1) >= 4: - H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0) + H, status = cv.findHomography(p1, p2, cv.RANSAC, 5.0) print('%d / %d inliers/matched' % (np.sum(status), len(status))) else: H, status = None, None @@ -185,5 +185,5 @@ if __name__ == '__main__': _vis = explore_match(win, img1, img2, kp_pairs, status, H) match_and_draw('find_obj') - cv2.waitKey() - cv2.destroyAllWindows() + cv.waitKey() + cv.destroyAllWindows() diff --git a/samples/python/fitline.py b/samples/python/fitline.py index c91144c..b8d282a 100755 --- a/samples/python/fitline.py +++ b/samples/python/fitline.py @@ -4,7 +4,7 @@ Robust line fitting. ================== -Example of using cv2.fitLine function for fitting line +Example of using cv.fitLine function for fitting line to points in presence of outliers. Usage @@ -28,7 +28,7 @@ import sys PY3 = sys.version_info[0] == 3 import numpy as np -import cv2 +import cv2 as cv # built-in modules import itertools as it @@ -55,40 +55,40 @@ else: cur_func_name = dist_func_names.next() def update(_=None): - noise = cv2.getTrackbarPos('noise', 'fit line') - n = cv2.getTrackbarPos('point n', 'fit line') - r = cv2.getTrackbarPos('outlier %', 'fit line') / 100.0 + noise = cv.getTrackbarPos('noise', 'fit line') + n = cv.getTrackbarPos('point n', 'fit line') + r = cv.getTrackbarPos('outlier %', 'fit line') / 100.0 outn = int(n*r) p0, p1 = (90, 80), (w-90, h-80) img = np.zeros((h, w, 3), np.uint8) - cv2.line(img, toint(p0), toint(p1), (0, 255, 0)) + cv.line(img, toint(p0), toint(p1), (0, 255, 0)) if n > 0: line_points = sample_line(p0, p1, n-outn, noise) outliers = np.random.rand(outn, 2) * (w, h) points = np.vstack([line_points, outliers]) for p in line_points: - cv2.circle(img, toint(p), 2, (255, 255, 255), -1) + cv.circle(img, toint(p), 2, (255, 255, 255), -1) for p in outliers: - cv2.circle(img, toint(p), 2, (64, 64, 255), -1) - func = getattr(cv2, cur_func_name) - vx, vy, cx, cy = cv2.fitLine(np.float32(points), func, 0, 0.01, 0.01) - cv2.line(img, (int(cx-vx*w), int(cy-vy*w)), (int(cx+vx*w), int(cy+vy*w)), (0, 0, 255)) + cv.circle(img, toint(p), 2, (64, 64, 255), -1) + func = getattr(cv, cur_func_name) + vx, vy, cx, cy = cv.fitLine(np.float32(points), func, 0, 0.01, 0.01) + cv.line(img, (int(cx-vx*w), int(cy-vy*w)), (int(cx+vx*w), int(cy+vy*w)), (0, 0, 255)) draw_str(img, (20, 20), cur_func_name) - cv2.imshow('fit line', img) + cv.imshow('fit line', img) if __name__ == '__main__': print(__doc__) - cv2.namedWindow('fit line') - cv2.createTrackbar('noise', 'fit line', 3, 50, update) - cv2.createTrackbar('point n', 'fit line', 100, 500, update) - cv2.createTrackbar('outlier %', 'fit line', 30, 100, update) + cv.namedWindow('fit line') + cv.createTrackbar('noise', 'fit line', 3, 50, update) + cv.createTrackbar('point n', 'fit line', 100, 500, update) + cv.createTrackbar('outlier %', 'fit line', 30, 100, update) while True: update() - ch = cv2.waitKey(0) + ch = cv.waitKey(0) if ch == ord('f'): if PY3: cur_func_name = next(dist_func_names) diff --git a/samples/python/floodfill.py b/samples/python/floodfill.py index 1b988d3..f03beef 100755 --- a/samples/python/floodfill.py +++ b/samples/python/floodfill.py @@ -18,7 +18,7 @@ Keys: from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv if __name__ == '__main__': import sys @@ -28,7 +28,7 @@ if __name__ == '__main__': fn = '../data/fruits.jpg' print(__doc__) - img = cv2.imread(fn, True) + img = cv.imread(fn, True) if img is None: print('Failed to load image file:', fn) sys.exit(1) @@ -41,32 +41,32 @@ if __name__ == '__main__': def update(dummy=None): if seed_pt is None: - cv2.imshow('floodfill', img) + cv.imshow('floodfill', img) return flooded = img.copy() mask[:] = 0 - lo = cv2.getTrackbarPos('lo', 'floodfill') - hi = cv2.getTrackbarPos('hi', 'floodfill') + lo = cv.getTrackbarPos('lo', 'floodfill') + hi = cv.getTrackbarPos('hi', 'floodfill') flags = connectivity if fixed_range: - flags |= cv2.FLOODFILL_FIXED_RANGE - cv2.floodFill(flooded, mask, seed_pt, (255, 255, 255), (lo,)*3, (hi,)*3, flags) - cv2.circle(flooded, seed_pt, 2, (0, 0, 255), -1) - cv2.imshow('floodfill', flooded) + flags |= cv.FLOODFILL_FIXED_RANGE + cv.floodFill(flooded, mask, seed_pt, (255, 255, 255), (lo,)*3, (hi,)*3, flags) + cv.circle(flooded, seed_pt, 2, (0, 0, 255), -1) + cv.imshow('floodfill', flooded) def onmouse(event, x, y, flags, param): global seed_pt - if flags & cv2.EVENT_FLAG_LBUTTON: + if flags & cv.EVENT_FLAG_LBUTTON: seed_pt = x, y update() update() - cv2.setMouseCallback('floodfill', onmouse) - cv2.createTrackbar('lo', 'floodfill', 20, 255, update) - cv2.createTrackbar('hi', 'floodfill', 20, 255, update) + cv.setMouseCallback('floodfill', onmouse) + cv.createTrackbar('lo', 'floodfill', 20, 255, update) + cv.createTrackbar('hi', 'floodfill', 20, 255, update) while True: - ch = cv2.waitKey() + ch = cv.waitKey() if ch == 27: break if ch == ord('f'): @@ -77,4 +77,4 @@ if __name__ == '__main__': connectivity = 12-connectivity print('connectivity =', connectivity) update() - cv2.destroyAllWindows() + cv.destroyAllWindows() diff --git a/samples/python/gabor_threads.py b/samples/python/gabor_threads.py index f7d62c1..5c1cf11 100755 --- a/samples/python/gabor_threads.py +++ b/samples/python/gabor_threads.py @@ -18,7 +18,7 @@ gabor_threads.py [image filename] from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv from multiprocessing.pool import ThreadPool @@ -26,7 +26,7 @@ def build_filters(): filters = [] ksize = 31 for theta in np.arange(0, np.pi, np.pi / 16): - kern = cv2.getGaborKernel((ksize, ksize), 4.0, theta, 10.0, 0.5, 0, ktype=cv2.CV_32F) + kern = cv.getGaborKernel((ksize, ksize), 4.0, theta, 10.0, 0.5, 0, ktype=cv.CV_32F) kern /= 1.5*kern.sum() filters.append(kern) return filters @@ -34,14 +34,14 @@ def build_filters(): def process(img, filters): accum = np.zeros_like(img) for kern in filters: - fimg = cv2.filter2D(img, cv2.CV_8UC3, kern) + fimg = cv.filter2D(img, cv.CV_8UC3, kern) np.maximum(accum, fimg, accum) return accum def process_threaded(img, filters, threadn = 8): accum = np.zeros_like(img) def f(kern): - return cv2.filter2D(img, cv2.CV_8UC3, kern) + return cv.filter2D(img, cv.CV_8UC3, kern) pool = ThreadPool(processes=threadn) for fimg in pool.imap_unordered(f, filters): np.maximum(accum, fimg, accum) @@ -57,7 +57,7 @@ if __name__ == '__main__': except: img_fn = '../data/baboon.jpg' - img = cv2.imread(img_fn) + img = cv.imread(img_fn) if img is None: print('Failed to load image file:', img_fn) sys.exit(1) @@ -70,7 +70,7 @@ if __name__ == '__main__': res2 = process_threaded(img, filters) print('res1 == res2: ', (res1 == res2).all()) - cv2.imshow('img', img) - cv2.imshow('result', res2) - cv2.waitKey() - cv2.destroyAllWindows() + cv.imshow('img', img) + cv.imshow('result', res2) + cv.waitKey() + cv.destroyAllWindows() diff --git a/samples/python/gaussian_mix.py b/samples/python/gaussian_mix.py index e7f3e8a..a939928 100755 --- a/samples/python/gaussian_mix.py +++ b/samples/python/gaussian_mix.py @@ -10,7 +10,7 @@ if PY3: import numpy as np from numpy import random -import cv2 +import cv2 as cv def make_gaussians(cluster_n, img_size): points = [] @@ -28,10 +28,10 @@ def make_gaussians(cluster_n, img_size): def draw_gaussain(img, mean, cov, color): x, y = np.int32(mean) - w, u, _vt = cv2.SVDecomp(cov) + w, u, _vt = cv.SVDecomp(cov) ang = np.arctan2(u[1, 0], u[0, 0])*(180/np.pi) s1, s2 = np.sqrt(w)*3.0 - cv2.ellipse(img, (x, y), (s1, s2), ang, 0, 360, color, 1, cv2.LINE_AA) + cv.ellipse(img, (x, y), (s1, s2), ang, 0, 360, color, 1, cv.LINE_AA) if __name__ == '__main__': @@ -45,9 +45,9 @@ if __name__ == '__main__': points, ref_distrs = make_gaussians(cluster_n, img_size) print('EM (opencv) ...') - em = cv2.ml.EM_create() + em = cv.ml.EM_create() em.setClustersNumber(cluster_n) - em.setCovarianceMatrixType(cv2.ml.EM_COV_MAT_GENERIC) + em.setCovarianceMatrixType(cv.ml.EM_COV_MAT_GENERIC) em.trainEM(points) means = em.getMeans() covs = em.getCovs() # Known bug: https://github.com/opencv/opencv/pull/4232 @@ -56,14 +56,14 @@ if __name__ == '__main__': img = np.zeros((img_size, img_size, 3), np.uint8) for x, y in np.int32(points): - cv2.circle(img, (x, y), 1, (255, 255, 255), -1) + cv.circle(img, (x, y), 1, (255, 255, 255), -1) for m, cov in ref_distrs: draw_gaussain(img, m, cov, (0, 255, 0)) for m, cov in found_distrs: draw_gaussain(img, m, cov, (0, 0, 255)) - cv2.imshow('gaussian mixture', img) - ch = cv2.waitKey(0) + cv.imshow('gaussian mixture', img) + ch = cv.waitKey(0) if ch == 27: break - cv2.destroyAllWindows() + cv.destroyAllWindows() diff --git a/samples/python/grabcut.py b/samples/python/grabcut.py index 1a5c2d0..37bc2e0 100644 --- a/samples/python/grabcut.py +++ b/samples/python/grabcut.py @@ -31,7 +31,7 @@ Key 's' - To save the results from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv import sys BLUE = [255,0,0] # rectangle color @@ -58,45 +58,45 @@ def onmouse(event,x,y,flags,param): global img,img2,drawing,value,mask,rectangle,rect,rect_or_mask,ix,iy,rect_over # Draw Rectangle - if event == cv2.EVENT_RBUTTONDOWN: + if event == cv.EVENT_RBUTTONDOWN: rectangle = True ix,iy = x,y - elif event == cv2.EVENT_MOUSEMOVE: + elif event == cv.EVENT_MOUSEMOVE: if rectangle == True: img = img2.copy() - cv2.rectangle(img,(ix,iy),(x,y),BLUE,2) + cv.rectangle(img,(ix,iy),(x,y),BLUE,2) rect = (min(ix,x),min(iy,y),abs(ix-x),abs(iy-y)) rect_or_mask = 0 - elif event == cv2.EVENT_RBUTTONUP: + elif event == cv.EVENT_RBUTTONUP: rectangle = False rect_over = True - cv2.rectangle(img,(ix,iy),(x,y),BLUE,2) + cv.rectangle(img,(ix,iy),(x,y),BLUE,2) rect = (min(ix,x),min(iy,y),abs(ix-x),abs(iy-y)) rect_or_mask = 0 print(" Now press the key 'n' a few times until no further change \n") # draw touchup curves - if event == cv2.EVENT_LBUTTONDOWN: + if event == cv.EVENT_LBUTTONDOWN: if rect_over == False: print("first draw rectangle \n") else: drawing = True - cv2.circle(img,(x,y),thickness,value['color'],-1) - cv2.circle(mask,(x,y),thickness,value['val'],-1) + cv.circle(img,(x,y),thickness,value['color'],-1) + cv.circle(mask,(x,y),thickness,value['val'],-1) - elif event == cv2.EVENT_MOUSEMOVE: + elif event == cv.EVENT_MOUSEMOVE: if drawing == True: - cv2.circle(img,(x,y),thickness,value['color'],-1) - cv2.circle(mask,(x,y),thickness,value['val'],-1) + cv.circle(img,(x,y),thickness,value['color'],-1) + cv.circle(mask,(x,y),thickness,value['val'],-1) - elif event == cv2.EVENT_LBUTTONUP: + elif event == cv.EVENT_LBUTTONUP: if drawing == True: drawing = False - cv2.circle(img,(x,y),thickness,value['color'],-1) - cv2.circle(mask,(x,y),thickness,value['val'],-1) + cv.circle(img,(x,y),thickness,value['color'],-1) + cv.circle(mask,(x,y),thickness,value['val'],-1) if __name__ == '__main__': @@ -111,25 +111,25 @@ if __name__ == '__main__': print("Correct Usage: python grabcut.py \n") filename = '../data/lena.jpg' - img = cv2.imread(filename) + img = cv.imread(filename) img2 = img.copy() # a copy of original image mask = np.zeros(img.shape[:2],dtype = np.uint8) # mask initialized to PR_BG output = np.zeros(img.shape,np.uint8) # output image to be shown # input and output windows - cv2.namedWindow('output') - cv2.namedWindow('input') - cv2.setMouseCallback('input',onmouse) - cv2.moveWindow('input',img.shape[1]+10,90) + cv.namedWindow('output') + cv.namedWindow('input') + cv.setMouseCallback('input',onmouse) + cv.moveWindow('input',img.shape[1]+10,90) print(" Instructions: \n") print(" Draw a rectangle around the object using right mouse button \n") while(1): - cv2.imshow('output',output) - cv2.imshow('input',img) - k = cv2.waitKey(1) + cv.imshow('output',output) + cv.imshow('input',img) + k = cv.waitKey(1) # key bindings if k == 27: # esc to exit @@ -147,7 +147,7 @@ if __name__ == '__main__': elif k == ord('s'): # save image bar = np.zeros((img.shape[0],5,3),np.uint8) res = np.hstack((img2,bar,img,bar,output)) - cv2.imwrite('grabcut_output.png',res) + cv.imwrite('grabcut_output.png',res) print(" Result saved as image \n") elif k == ord('r'): # reset everything print("resetting \n") @@ -166,14 +166,14 @@ if __name__ == '__main__': if (rect_or_mask == 0): # grabcut with rect bgdmodel = np.zeros((1,65),np.float64) fgdmodel = np.zeros((1,65),np.float64) - cv2.grabCut(img2,mask,rect,bgdmodel,fgdmodel,1,cv2.GC_INIT_WITH_RECT) + cv.grabCut(img2,mask,rect,bgdmodel,fgdmodel,1,cv.GC_INIT_WITH_RECT) rect_or_mask = 1 elif rect_or_mask == 1: # grabcut with mask bgdmodel = np.zeros((1,65),np.float64) fgdmodel = np.zeros((1,65),np.float64) - cv2.grabCut(img2,mask,rect,bgdmodel,fgdmodel,1,cv2.GC_INIT_WITH_MASK) + cv.grabCut(img2,mask,rect,bgdmodel,fgdmodel,1,cv.GC_INIT_WITH_MASK) mask2 = np.where((mask==1) + (mask==3),255,0).astype('uint8') - output = cv2.bitwise_and(img2,img2,mask=mask2) + output = cv.bitwise_and(img2,img2,mask=mask2) - cv2.destroyAllWindows() + cv.destroyAllWindows() diff --git a/samples/python/hist.py b/samples/python/hist.py index d1801c7..266fa12 100755 --- a/samples/python/hist.py +++ b/samples/python/hist.py @@ -3,7 +3,7 @@ ''' This is a sample for histogram plotting for RGB images and grayscale images for better understanding of colour distribution Benefit : Learn how to draw histogram of images - Get familier with cv2.calcHist, cv2.equalizeHist,cv2.normalize and some drawing functions + Get familier with cv.calcHist, cv.equalizeHist,cv.normalize and some drawing functions Level : Beginner or Intermediate @@ -18,7 +18,7 @@ Abid Rahman 3/14/12 debug Gary Bradski # Python 2/3 compatibility from __future__ import print_function -import cv2 +import cv2 as cv import numpy as np bins = np.arange(256).reshape(256,1) @@ -30,11 +30,11 @@ def hist_curve(im): elif im.shape[2] == 3: color = [ (255,0,0),(0,255,0),(0,0,255) ] for ch, col in enumerate(color): - hist_item = cv2.calcHist([im],[ch],None,[256],[0,256]) - cv2.normalize(hist_item,hist_item,0,255,cv2.NORM_MINMAX) + hist_item = cv.calcHist([im],[ch],None,[256],[0,256]) + cv.normalize(hist_item,hist_item,0,255,cv.NORM_MINMAX) hist=np.int32(np.around(hist_item)) pts = np.int32(np.column_stack((bins,hist))) - cv2.polylines(h,[pts],False,col) + cv.polylines(h,[pts],False,col) y=np.flipud(h) return y @@ -43,12 +43,12 @@ def hist_lines(im): if len(im.shape)!=2: print("hist_lines applicable only for grayscale images") #print("so converting image to grayscale for representation" - im = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY) - hist_item = cv2.calcHist([im],[0],None,[256],[0,256]) - cv2.normalize(hist_item,hist_item,0,255,cv2.NORM_MINMAX) + im = cv.cvtColor(im,cv.COLOR_BGR2GRAY) + hist_item = cv.calcHist([im],[0],None,[256],[0,256]) + cv.normalize(hist_item,hist_item,0,255,cv.NORM_MINMAX) hist=np.int32(np.around(hist_item)) for x,y in enumerate(hist): - cv2.line(h,(x,0),(x,y),(255,255,255)) + cv.line(h,(x,0),(x,y),(255,255,255)) y = np.flipud(h) return y @@ -63,13 +63,13 @@ if __name__ == '__main__': fname = '../data/lena.jpg' print("usage : python hist.py ") - im = cv2.imread(fname) + im = cv.imread(fname) if im is None: print('Failed to load image file:', fname) sys.exit(1) - gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY) + gray = cv.cvtColor(im,cv.COLOR_BGR2GRAY) print(''' Histogram plotting \n @@ -82,38 +82,38 @@ if __name__ == '__main__': Esc - exit \n ''') - cv2.imshow('image',im) + cv.imshow('image',im) while True: - k = cv2.waitKey(0) + k = cv.waitKey(0) if k == ord('a'): curve = hist_curve(im) - cv2.imshow('histogram',curve) - cv2.imshow('image',im) + cv.imshow('histogram',curve) + cv.imshow('image',im) print('a') elif k == ord('b'): print('b') lines = hist_lines(im) - cv2.imshow('histogram',lines) - cv2.imshow('image',gray) + cv.imshow('histogram',lines) + cv.imshow('image',gray) elif k == ord('c'): print('c') - equ = cv2.equalizeHist(gray) + equ = cv.equalizeHist(gray) lines = hist_lines(equ) - cv2.imshow('histogram',lines) - cv2.imshow('image',equ) + cv.imshow('histogram',lines) + cv.imshow('image',equ) elif k == ord('d'): print('d') curve = hist_curve(gray) - cv2.imshow('histogram',curve) - cv2.imshow('image',gray) + cv.imshow('histogram',curve) + cv.imshow('image',gray) elif k == ord('e'): print('e') - norm = cv2.normalize(gray, gray, alpha = 0,beta = 255,norm_type = cv2.NORM_MINMAX) + norm = cv.normalize(gray, gray, alpha = 0,beta = 255,norm_type = cv.NORM_MINMAX) lines = hist_lines(norm) - cv2.imshow('histogram',lines) - cv2.imshow('image',norm) + cv.imshow('histogram',lines) + cv.imshow('image',norm) elif k == 27: print('ESC') - cv2.destroyAllWindows() + cv.destroyAllWindows() break - cv2.destroyAllWindows() + cv.destroyAllWindows() diff --git a/samples/python/houghcircles.py b/samples/python/houghcircles.py index c766c1c..2c24c00 100755 --- a/samples/python/houghcircles.py +++ b/samples/python/houghcircles.py @@ -1,7 +1,7 @@ #!/usr/bin/python ''' -This example illustrates how to use cv2.HoughCircles() function. +This example illustrates how to use cv.HoughCircles() function. Usage: houghcircles.py [] @@ -11,7 +11,7 @@ Usage: # Python 2/3 compatibility from __future__ import print_function -import cv2 +import cv2 as cv import numpy as np import sys @@ -23,20 +23,20 @@ if __name__ == '__main__': except IndexError: fn = "../data/board.jpg" - src = cv2.imread(fn, 1) - img = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) - img = cv2.medianBlur(img, 5) + src = cv.imread(fn, 1) + img = cv.cvtColor(src, cv.COLOR_BGR2GRAY) + img = cv.medianBlur(img, 5) cimg = src.copy() # numpy function - circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 10, np.array([]), 100, 30, 1, 30) + circles = cv.HoughCircles(img, cv.HOUGH_GRADIENT, 1, 10, np.array([]), 100, 30, 1, 30) if circles is not None: # Check if circles have been found and only then iterate over these and add them to the image a, b, c = circles.shape for i in range(b): - cv2.circle(cimg, (circles[0][i][0], circles[0][i][1]), circles[0][i][2], (0, 0, 255), 3, cv2.LINE_AA) - cv2.circle(cimg, (circles[0][i][0], circles[0][i][1]), 2, (0, 255, 0), 3, cv2.LINE_AA) # draw center of circle + cv.circle(cimg, (circles[0][i][0], circles[0][i][1]), circles[0][i][2], (0, 0, 255), 3, cv.LINE_AA) + cv.circle(cimg, (circles[0][i][0], circles[0][i][1]), 2, (0, 255, 0), 3, cv.LINE_AA) # draw center of circle - cv2.imshow("detected circles", cimg) + cv.imshow("detected circles", cimg) - cv2.imshow("source", src) - cv2.waitKey(0) + cv.imshow("source", src) + cv.waitKey(0) diff --git a/samples/python/houghlines.py b/samples/python/houghlines.py index efe2940..8435107 100755 --- a/samples/python/houghlines.py +++ b/samples/python/houghlines.py @@ -11,7 +11,7 @@ Usage: # Python 2/3 compatibility from __future__ import print_function -import cv2 +import cv2 as cv import numpy as np import sys import math @@ -24,18 +24,18 @@ if __name__ == '__main__': except IndexError: fn = "../data/pic1.png" - src = cv2.imread(fn) - dst = cv2.Canny(src, 50, 200) - cdst = cv2.cvtColor(dst, cv2.COLOR_GRAY2BGR) + src = cv.imread(fn) + dst = cv.Canny(src, 50, 200) + cdst = cv.cvtColor(dst, cv.COLOR_GRAY2BGR) if True: # HoughLinesP - lines = cv2.HoughLinesP(dst, 1, math.pi/180.0, 40, np.array([]), 50, 10) + lines = cv.HoughLinesP(dst, 1, math.pi/180.0, 40, np.array([]), 50, 10) a,b,c = lines.shape for i in range(a): - cv2.line(cdst, (lines[i][0][0], lines[i][0][1]), (lines[i][0][2], lines[i][0][3]), (0, 0, 255), 3, cv2.LINE_AA) + cv.line(cdst, (lines[i][0][0], lines[i][0][1]), (lines[i][0][2], lines[i][0][3]), (0, 0, 255), 3, cv.LINE_AA) else: # HoughLines - lines = cv2.HoughLines(dst, 1, math.pi/180.0, 50, np.array([]), 0, 0) + lines = cv.HoughLines(dst, 1, math.pi/180.0, 50, np.array([]), 0, 0) if lines is not None: a,b,c = lines.shape for i in range(a): @@ -46,9 +46,9 @@ if __name__ == '__main__': x0, y0 = a*rho, b*rho pt1 = ( int(x0+1000*(-b)), int(y0+1000*(a)) ) pt2 = ( int(x0-1000*(-b)), int(y0-1000*(a)) ) - cv2.line(cdst, pt1, pt2, (0, 0, 255), 3, cv2.LINE_AA) + cv.line(cdst, pt1, pt2, (0, 0, 255), 3, cv.LINE_AA) - cv2.imshow("detected lines", cdst) + cv.imshow("detected lines", cdst) - cv2.imshow("source", src) - cv2.waitKey(0) + cv.imshow("source", src) + cv.waitKey(0) diff --git a/samples/python/inpaint.py b/samples/python/inpaint.py index 3b738bb..fb0140c 100755 --- a/samples/python/inpaint.py +++ b/samples/python/inpaint.py @@ -19,7 +19,7 @@ Keys: from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv from common import Sketcher if __name__ == '__main__': @@ -31,7 +31,7 @@ if __name__ == '__main__': print(__doc__) - img = cv2.imread(fn) + img = cv.imread(fn) if img is None: print('Failed to load image file:', fn) sys.exit(1) @@ -41,14 +41,14 @@ if __name__ == '__main__': sketch = Sketcher('img', [img_mark, mark], lambda : ((255, 255, 255), 255)) while True: - ch = cv2.waitKey() + ch = cv.waitKey() if ch == 27: break if ch == ord(' '): - res = cv2.inpaint(img_mark, mark, 3, cv2.INPAINT_TELEA) - cv2.imshow('inpaint', res) + res = cv.inpaint(img_mark, mark, 3, cv.INPAINT_TELEA) + cv.imshow('inpaint', res) if ch == ord('r'): img_mark[:] = img mark[:] = 0 sketch.show() - cv2.destroyAllWindows() + cv.destroyAllWindows() diff --git a/samples/python/kalman.py b/samples/python/kalman.py index 96a6fa6..3028f54 100755 --- a/samples/python/kalman.py +++ b/samples/python/kalman.py @@ -18,7 +18,7 @@ PY3 = sys.version_info[0] == 3 if PY3: long = int -import cv2 +import cv2 as cv from math import cos, sin, sqrt import numpy as np @@ -26,11 +26,11 @@ if __name__ == "__main__": img_height = 500 img_width = 500 - kalman = cv2.KalmanFilter(2, 1, 0) + kalman = cv.KalmanFilter(2, 1, 0) code = long(-1) - cv2.namedWindow("Kalman") + cv.namedWindow("Kalman") while True: state = 0.1 * np.random.randn(2, 1) @@ -64,33 +64,33 @@ if __name__ == "__main__": # plot points def draw_cross(center, color, d): - cv2.line(img, + cv.line(img, (center[0] - d, center[1] - d), (center[0] + d, center[1] + d), - color, 1, cv2.LINE_AA, 0) - cv2.line(img, + color, 1, cv.LINE_AA, 0) + cv.line(img, (center[0] + d, center[1] - d), (center[0] - d, center[1] + d), - color, 1, cv2.LINE_AA, 0) + color, 1, cv.LINE_AA, 0) img = np.zeros((img_height, img_width, 3), np.uint8) draw_cross(np.int32(state_pt), (255, 255, 255), 3) draw_cross(np.int32(measurement_pt), (0, 0, 255), 3) draw_cross(np.int32(predict_pt), (0, 255, 0), 3) - cv2.line(img, state_pt, measurement_pt, (0, 0, 255), 3, cv2.LINE_AA, 0) - cv2.line(img, state_pt, predict_pt, (0, 255, 255), 3, cv2.LINE_AA, 0) + cv.line(img, state_pt, measurement_pt, (0, 0, 255), 3, cv.LINE_AA, 0) + cv.line(img, state_pt, predict_pt, (0, 255, 255), 3, cv.LINE_AA, 0) kalman.correct(measurement) process_noise = sqrt(kalman.processNoiseCov[0,0]) * np.random.randn(2, 1) state = np.dot(kalman.transitionMatrix, state) + process_noise - cv2.imshow("Kalman", img) + cv.imshow("Kalman", img) - code = cv2.waitKey(100) + code = cv.waitKey(100) if code != -1: break if code in [27, ord('q'), ord('Q')]: break - cv2.destroyWindow("Kalman") + cv.destroyWindow("Kalman") diff --git a/samples/python/kmeans.py b/samples/python/kmeans.py index 0fdc759..9c67b7a 100755 --- a/samples/python/kmeans.py +++ b/samples/python/kmeans.py @@ -14,7 +14,7 @@ Keyboard shortcuts: from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv from gaussian_mix import make_gaussians @@ -28,23 +28,23 @@ if __name__ == '__main__': colors = np.zeros((1, cluster_n, 3), np.uint8) colors[0,:] = 255 colors[0,:,0] = np.arange(0, 180, 180.0/cluster_n) - colors = cv2.cvtColor(colors, cv2.COLOR_HSV2BGR)[0] + colors = cv.cvtColor(colors, cv.COLOR_HSV2BGR)[0] while True: print('sampling distributions...') points, _ = make_gaussians(cluster_n, img_size) - term_crit = (cv2.TERM_CRITERIA_EPS, 30, 0.1) - ret, labels, centers = cv2.kmeans(points, cluster_n, None, term_crit, 10, 0) + term_crit = (cv.TERM_CRITERIA_EPS, 30, 0.1) + ret, labels, centers = cv.kmeans(points, cluster_n, None, term_crit, 10, 0) img = np.zeros((img_size, img_size, 3), np.uint8) for (x, y), label in zip(np.int32(points), labels.ravel()): c = list(map(int, colors[label])) - cv2.circle(img, (x, y), 1, c, -1) + cv.circle(img, (x, y), 1, c, -1) - cv2.imshow('gaussian mixture', img) - ch = cv2.waitKey(0) + cv.imshow('gaussian mixture', img) + ch = cv.waitKey(0) if ch == 27: break - cv2.destroyAllWindows() + cv.destroyAllWindows() diff --git a/samples/python/lappyr.py b/samples/python/lappyr.py index ba9d9f9..291251a 100755 --- a/samples/python/lappyr.py +++ b/samples/python/lappyr.py @@ -21,7 +21,7 @@ if PY3: xrange = range import numpy as np -import cv2 +import cv2 as cv import video from common import nothing, getsize @@ -29,8 +29,8 @@ def build_lappyr(img, leveln=6, dtype=np.int16): img = dtype(img) levels = [] for _i in xrange(leveln-1): - next_img = cv2.pyrDown(img) - img1 = cv2.pyrUp(next_img, dstsize=getsize(img)) + next_img = cv.pyrDown(img) + img1 = cv.pyrUp(next_img, dstsize=getsize(img)) levels.append(img-img1) img = next_img levels.append(img) @@ -39,7 +39,7 @@ def build_lappyr(img, leveln=6, dtype=np.int16): def merge_lappyr(levels): img = levels[-1] for lev_img in levels[-2::-1]: - img = cv2.pyrUp(img, dstsize=getsize(lev_img)) + img = cv.pyrUp(img, dstsize=getsize(lev_img)) img += lev_img return np.uint8(np.clip(img, 0, 255)) @@ -55,20 +55,20 @@ if __name__ == '__main__': cap = video.create_capture(fn) leveln = 6 - cv2.namedWindow('level control') + cv.namedWindow('level control') for i in xrange(leveln): - cv2.createTrackbar('%d'%i, 'level control', 5, 50, nothing) + cv.createTrackbar('%d'%i, 'level control', 5, 50, nothing) while True: ret, frame = cap.read() pyr = build_lappyr(frame, leveln) for i in xrange(leveln): - v = int(cv2.getTrackbarPos('%d'%i, 'level control') / 5) + v = int(cv.getTrackbarPos('%d'%i, 'level control') / 5) pyr[i] *= v res = merge_lappyr(pyr) - cv2.imshow('laplacian pyramid filter', res) + cv.imshow('laplacian pyramid filter', res) - if cv2.waitKey(1) == 27: + if cv.waitKey(1) == 27: break diff --git a/samples/python/letter_recog.py b/samples/python/letter_recog.py index f5fbca4..d498736 100755 --- a/samples/python/letter_recog.py +++ b/samples/python/letter_recog.py @@ -29,7 +29,7 @@ USAGE: from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv def load_base(fn): a = np.loadtxt(fn, np.float32, delimiter=',', converters={ 0 : lambda ch : ord(ch)-ord('A') }) @@ -61,11 +61,11 @@ class LetterStatModel(object): class RTrees(LetterStatModel): def __init__(self): - self.model = cv2.ml.RTrees_create() + self.model = cv.ml.RTrees_create() def train(self, samples, responses): self.model.setMaxDepth(20) - self.model.train(samples, cv2.ml.ROW_SAMPLE, responses.astype(int)) + self.model.train(samples, cv.ml.ROW_SAMPLE, responses.astype(int)) def predict(self, samples): _ret, resp = self.model.predict(samples) @@ -74,10 +74,10 @@ class RTrees(LetterStatModel): class KNearest(LetterStatModel): def __init__(self): - self.model = cv2.ml.KNearest_create() + self.model = cv.ml.KNearest_create() def train(self, samples, responses): - self.model.train(samples, cv2.ml.ROW_SAMPLE, responses) + self.model.train(samples, cv.ml.ROW_SAMPLE, responses) def predict(self, samples): _retval, results, _neigh_resp, _dists = self.model.findNearest(samples, k = 10) @@ -86,17 +86,17 @@ class KNearest(LetterStatModel): class Boost(LetterStatModel): def __init__(self): - self.model = cv2.ml.Boost_create() + self.model = cv.ml.Boost_create() def train(self, samples, responses): _sample_n, var_n = samples.shape new_samples = self.unroll_samples(samples) new_responses = self.unroll_responses(responses) - var_types = np.array([cv2.ml.VAR_NUMERICAL] * var_n + [cv2.ml.VAR_CATEGORICAL, cv2.ml.VAR_CATEGORICAL], np.uint8) + var_types = np.array([cv.ml.VAR_NUMERICAL] * var_n + [cv.ml.VAR_CATEGORICAL, cv.ml.VAR_CATEGORICAL], np.uint8) self.model.setWeakCount(15) self.model.setMaxDepth(10) - self.model.train(cv2.ml.TrainData_create(new_samples, cv2.ml.ROW_SAMPLE, new_responses.astype(int), varType = var_types)) + self.model.train(cv.ml.TrainData_create(new_samples, cv.ml.ROW_SAMPLE, new_responses.astype(int), varType = var_types)) def predict(self, samples): new_samples = self.unroll_samples(samples) @@ -107,14 +107,14 @@ class Boost(LetterStatModel): class SVM(LetterStatModel): def __init__(self): - self.model = cv2.ml.SVM_create() + self.model = cv.ml.SVM_create() def train(self, samples, responses): - self.model.setType(cv2.ml.SVM_C_SVC) + self.model.setType(cv.ml.SVM_C_SVC) self.model.setC(1) - self.model.setKernel(cv2.ml.SVM_RBF) + self.model.setKernel(cv.ml.SVM_RBF) self.model.setGamma(.1) - self.model.train(samples, cv2.ml.ROW_SAMPLE, responses.astype(int)) + self.model.train(samples, cv.ml.ROW_SAMPLE, responses.astype(int)) def predict(self, samples): _ret, resp = self.model.predict(samples) @@ -123,7 +123,7 @@ class SVM(LetterStatModel): class MLP(LetterStatModel): def __init__(self): - self.model = cv2.ml.ANN_MLP_create() + self.model = cv.ml.ANN_MLP_create() def train(self, samples, responses): _sample_n, var_n = samples.shape @@ -131,13 +131,13 @@ class MLP(LetterStatModel): layer_sizes = np.int32([var_n, 100, 100, self.class_n]) self.model.setLayerSizes(layer_sizes) - self.model.setTrainMethod(cv2.ml.ANN_MLP_BACKPROP) + self.model.setTrainMethod(cv.ml.ANN_MLP_BACKPROP) self.model.setBackpropMomentumScale(0.0) self.model.setBackpropWeightScale(0.001) - self.model.setTermCriteria((cv2.TERM_CRITERIA_COUNT, 20, 0.01)) - self.model.setActivationFunction(cv2.ml.ANN_MLP_SIGMOID_SYM, 2, 1) + self.model.setTermCriteria((cv.TERM_CRITERIA_COUNT, 20, 0.01)) + self.model.setActivationFunction(cv.ml.ANN_MLP_SIGMOID_SYM, 2, 1) - self.model.train(samples, cv2.ml.ROW_SAMPLE, np.float32(new_responses)) + self.model.train(samples, cv.ml.ROW_SAMPLE, np.float32(new_responses)) def predict(self, samples): _ret, resp = self.model.predict(samples) @@ -184,4 +184,4 @@ if __name__ == '__main__': fn = args['--save'] print('saving model to %s ...' % fn) model.save(fn) - cv2.destroyAllWindows() + cv.destroyAllWindows() diff --git a/samples/python/lk_homography.py b/samples/python/lk_homography.py index 450b0da..0da4761 100755 --- a/samples/python/lk_homography.py +++ b/samples/python/lk_homography.py @@ -24,14 +24,14 @@ r - toggle RANSAC from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv import video from common import draw_str from video import presets lk_params = dict( winSize = (19, 19), maxLevel = 2, - criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) + criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03)) feature_params = dict( maxCorners = 1000, qualityLevel = 0.01, @@ -39,8 +39,8 @@ feature_params = dict( maxCorners = 1000, blockSize = 19 ) def checkedTrace(img0, img1, p0, back_threshold = 1.0): - p1, _st, _err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params) - p0r, _st, _err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params) + p1, _st, _err = cv.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params) + p0r, _st, _err = cv.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params) d = abs(p0-p0r).reshape(-1, 2).max(-1) status = d < back_threshold return p1, status @@ -57,7 +57,7 @@ class App: def run(self): while True: _ret, frame = self.cam.read() - frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) vis = frame.copy() if self.p0 is not None: p2, trace_status = checkedTrace(self.gray1, frame_gray, self.p1) @@ -69,33 +69,33 @@ class App: if len(self.p0) < 4: self.p0 = None continue - H, status = cv2.findHomography(self.p0, self.p1, (0, cv2.RANSAC)[self.use_ransac], 10.0) + H, status = cv.findHomography(self.p0, self.p1, (0, cv.RANSAC)[self.use_ransac], 10.0) h, w = frame.shape[:2] - overlay = cv2.warpPerspective(self.frame0, H, (w, h)) - vis = cv2.addWeighted(vis, 0.5, overlay, 0.5, 0.0) + overlay = cv.warpPerspective(self.frame0, H, (w, h)) + vis = cv.addWeighted(vis, 0.5, overlay, 0.5, 0.0) for (x0, y0), (x1, y1), good in zip(self.p0[:,0], self.p1[:,0], status[:,0]): if good: - cv2.line(vis, (x0, y0), (x1, y1), (0, 128, 0)) - cv2.circle(vis, (x1, y1), 2, (red, green)[good], -1) + cv.line(vis, (x0, y0), (x1, y1), (0, 128, 0)) + cv.circle(vis, (x1, y1), 2, (red, green)[good], -1) draw_str(vis, (20, 20), 'track count: %d' % len(self.p1)) if self.use_ransac: draw_str(vis, (20, 40), 'RANSAC') else: - p = cv2.goodFeaturesToTrack(frame_gray, **feature_params) + p = cv.goodFeaturesToTrack(frame_gray, **feature_params) if p is not None: for x, y in p[:,0]: - cv2.circle(vis, (x, y), 2, green, -1) + cv.circle(vis, (x, y), 2, green, -1) draw_str(vis, (20, 20), 'feature count: %d' % len(p)) - cv2.imshow('lk_homography', vis) + cv.imshow('lk_homography', vis) - ch = cv2.waitKey(1) + ch = cv.waitKey(1) if ch == 27: break if ch == ord(' '): self.frame0 = frame.copy() - self.p0 = cv2.goodFeaturesToTrack(frame_gray, **feature_params) + self.p0 = cv.goodFeaturesToTrack(frame_gray, **feature_params) if self.p0 is not None: self.p1 = self.p0 self.gray0 = frame_gray @@ -114,7 +114,7 @@ def main(): print(__doc__) App(video_src).run() - cv2.destroyAllWindows() + cv.destroyAllWindows() if __name__ == '__main__': main() diff --git a/samples/python/lk_track.py b/samples/python/lk_track.py index 11cf2db..19e819f 100755 --- a/samples/python/lk_track.py +++ b/samples/python/lk_track.py @@ -22,14 +22,14 @@ ESC - exit from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv import video from common import anorm2, draw_str from time import clock lk_params = dict( winSize = (15, 15), maxLevel = 2, - criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) + criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03)) feature_params = dict( maxCorners = 500, qualityLevel = 0.3, @@ -47,14 +47,14 @@ class App: def run(self): while True: _ret, frame = self.cam.read() - frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY) vis = frame.copy() if len(self.tracks) > 0: img0, img1 = self.prev_gray, frame_gray p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2) - p1, _st, _err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params) - p0r, _st, _err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params) + p1, _st, _err = cv.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params) + p0r, _st, _err = cv.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params) d = abs(p0-p0r).reshape(-1, 2).max(-1) good = d < 1 new_tracks = [] @@ -65,17 +65,17 @@ class App: if len(tr) > self.track_len: del tr[0] new_tracks.append(tr) - cv2.circle(vis, (x, y), 2, (0, 255, 0), -1) + cv.circle(vis, (x, y), 2, (0, 255, 0), -1) self.tracks = new_tracks - cv2.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0)) + cv.polylines(vis, [np.int32(tr) for tr in self.tracks], False, (0, 255, 0)) draw_str(vis, (20, 20), 'track count: %d' % len(self.tracks)) if self.frame_idx % self.detect_interval == 0: mask = np.zeros_like(frame_gray) mask[:] = 255 for x, y in [np.int32(tr[-1]) for tr in self.tracks]: - cv2.circle(mask, (x, y), 5, 0, -1) - p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params) + cv.circle(mask, (x, y), 5, 0, -1) + p = cv.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params) if p is not None: for x, y in np.float32(p).reshape(-1, 2): self.tracks.append([(x, y)]) @@ -83,9 +83,9 @@ class App: self.frame_idx += 1 self.prev_gray = frame_gray - cv2.imshow('lk_track', vis) + cv.imshow('lk_track', vis) - ch = cv2.waitKey(1) + ch = cv.waitKey(1) if ch == 27: break @@ -98,7 +98,7 @@ def main(): print(__doc__) App(video_src).run() - cv2.destroyAllWindows() + cv.destroyAllWindows() if __name__ == '__main__': main() diff --git a/samples/python/logpolar.py b/samples/python/logpolar.py index fdf03f3..1af0f11 100644 --- a/samples/python/logpolar.py +++ b/samples/python/logpolar.py @@ -13,7 +13,7 @@ Keys: # Python 2/3 compatibility from __future__ import print_function -import cv2 +import cv2 as cv if __name__ == '__main__': print(__doc__) @@ -24,16 +24,16 @@ if __name__ == '__main__': except IndexError: fn = '../data/fruits.jpg' - img = cv2.imread(fn) + img = cv.imread(fn) if img is None: print('Failed to load image file:', fn) sys.exit(1) - img2 = cv2.logPolar(img, (img.shape[0]/2, img.shape[1]/2), 40, cv2.WARP_FILL_OUTLIERS) - img3 = cv2.linearPolar(img, (img.shape[0]/2, img.shape[1]/2), 40, cv2.WARP_FILL_OUTLIERS) + img2 = cv.logPolar(img, (img.shape[0]/2, img.shape[1]/2), 40, cv.WARP_FILL_OUTLIERS) + img3 = cv.linearPolar(img, (img.shape[0]/2, img.shape[1]/2), 40, cv.WARP_FILL_OUTLIERS) - cv2.imshow('before', img) - cv2.imshow('logpolar', img2) - cv2.imshow('linearpolar', img3) + cv.imshow('before', img) + cv.imshow('logpolar', img2) + cv.imshow('linearpolar', img3) - cv2.waitKey(0) + cv.waitKey(0) diff --git a/samples/python/morphology.py b/samples/python/morphology.py index 0b62784..1d95fa9 100755 --- a/samples/python/morphology.py +++ b/samples/python/morphology.py @@ -18,7 +18,7 @@ import sys PY3 = sys.version_info[0] == 3 import numpy as np -import cv2 +import cv2 as cv if __name__ == '__main__': @@ -33,13 +33,13 @@ if __name__ == '__main__': except: fn = '../data/baboon.jpg' - img = cv2.imread(fn) + img = cv.imread(fn) if img is None: print('Failed to load image file:', fn) sys.exit(1) - cv2.imshow('original', img) + cv.imshow('original', img) modes = cycle(['erode/dilate', 'open/close', 'blackhat/tophat', 'gradient']) str_modes = cycle(['ellipse', 'rect', 'cross']) @@ -52,8 +52,8 @@ if __name__ == '__main__': cur_str_mode = str_modes.next() def update(dummy=None): - sz = cv2.getTrackbarPos('op/size', 'morphology') - iters = cv2.getTrackbarPos('iters', 'morphology') + sz = cv.getTrackbarPos('op/size', 'morphology') + iters = cv.getTrackbarPos('iters', 'morphology') opers = cur_mode.split('/') if len(opers) > 1: sz = sz - 10 @@ -65,21 +65,21 @@ if __name__ == '__main__': str_name = 'MORPH_' + cur_str_mode.upper() oper_name = 'MORPH_' + op.upper() - st = cv2.getStructuringElement(getattr(cv2, str_name), (sz, sz)) - res = cv2.morphologyEx(img, getattr(cv2, oper_name), st, iterations=iters) + st = cv.getStructuringElement(getattr(cv, str_name), (sz, sz)) + res = cv.morphologyEx(img, getattr(cv, oper_name), st, iterations=iters) draw_str(res, (10, 20), 'mode: ' + cur_mode) draw_str(res, (10, 40), 'operation: ' + oper_name) draw_str(res, (10, 60), 'structure: ' + str_name) draw_str(res, (10, 80), 'ksize: %d iters: %d' % (sz, iters)) - cv2.imshow('morphology', res) + cv.imshow('morphology', res) - cv2.namedWindow('morphology') - cv2.createTrackbar('op/size', 'morphology', 12, 20, update) - cv2.createTrackbar('iters', 'morphology', 1, 10, update) + cv.namedWindow('morphology') + cv.createTrackbar('op/size', 'morphology', 12, 20, update) + cv.createTrackbar('iters', 'morphology', 1, 10, update) update() while True: - ch = cv2.waitKey() + ch = cv.waitKey() if ch == 27: break if ch == ord('1'): @@ -93,4 +93,4 @@ if __name__ == '__main__': else: cur_str_mode = str_modes.next() update() - cv2.destroyAllWindows() + cv.destroyAllWindows() diff --git a/samples/python/mosse.py b/samples/python/mosse.py index e0311bd..e11e921 100755 --- a/samples/python/mosse.py +++ b/samples/python/mosse.py @@ -30,7 +30,7 @@ if PY3: xrange = range import numpy as np -import cv2 +import cv2 as cv from common import draw_str, RectSelector import video @@ -44,7 +44,7 @@ def rnd_warp(a): T[:2, :2] += (np.random.rand(2, 2) - 0.5)*coef c = (w/2, h/2) T[:,2] = c - np.dot(T[:2, :2], c) - return cv2.warpAffine(a, T, (w, h), borderMode = cv2.BORDER_REFLECT) + return cv.warpAffine(a, T, (w, h), borderMode = cv.BORDER_REFLECT) def divSpec(A, B): Ar, Ai = A[...,0], A[...,1] @@ -58,32 +58,32 @@ eps = 1e-5 class MOSSE: def __init__(self, frame, rect): x1, y1, x2, y2 = rect - w, h = map(cv2.getOptimalDFTSize, [x2-x1, y2-y1]) + w, h = map(cv.getOptimalDFTSize, [x2-x1, y2-y1]) x1, y1 = (x1+x2-w)//2, (y1+y2-h)//2 self.pos = x, y = x1+0.5*(w-1), y1+0.5*(h-1) self.size = w, h - img = cv2.getRectSubPix(frame, (w, h), (x, y)) + img = cv.getRectSubPix(frame, (w, h), (x, y)) - self.win = cv2.createHanningWindow((w, h), cv2.CV_32F) + self.win = cv.createHanningWindow((w, h), cv.CV_32F) g = np.zeros((h, w), np.float32) g[h//2, w//2] = 1 - g = cv2.GaussianBlur(g, (-1, -1), 2.0) + g = cv.GaussianBlur(g, (-1, -1), 2.0) g /= g.max() - self.G = cv2.dft(g, flags=cv2.DFT_COMPLEX_OUTPUT) + self.G = cv.dft(g, flags=cv.DFT_COMPLEX_OUTPUT) self.H1 = np.zeros_like(self.G) self.H2 = np.zeros_like(self.G) for _i in xrange(128): a = self.preprocess(rnd_warp(img)) - A = cv2.dft(a, flags=cv2.DFT_COMPLEX_OUTPUT) - self.H1 += cv2.mulSpectrums(self.G, A, 0, conjB=True) - self.H2 += cv2.mulSpectrums( A, A, 0, conjB=True) + A = cv.dft(a, flags=cv.DFT_COMPLEX_OUTPUT) + self.H1 += cv.mulSpectrums(self.G, A, 0, conjB=True) + self.H2 += cv.mulSpectrums( A, A, 0, conjB=True) self.update_kernel() self.update(frame) def update(self, frame, rate = 0.125): (x, y), (w, h) = self.pos, self.size - self.last_img = img = cv2.getRectSubPix(frame, (w, h), (x, y)) + self.last_img = img = cv.getRectSubPix(frame, (w, h), (x, y)) img = self.preprocess(img) self.last_resp, (dx, dy), self.psr = self.correlate(img) self.good = self.psr > 8.0 @@ -91,19 +91,19 @@ class MOSSE: return self.pos = x+dx, y+dy - self.last_img = img = cv2.getRectSubPix(frame, (w, h), self.pos) + self.last_img = img = cv.getRectSubPix(frame, (w, h), self.pos) img = self.preprocess(img) - A = cv2.dft(img, flags=cv2.DFT_COMPLEX_OUTPUT) - H1 = cv2.mulSpectrums(self.G, A, 0, conjB=True) - H2 = cv2.mulSpectrums( A, A, 0, conjB=True) + A = cv.dft(img, flags=cv.DFT_COMPLEX_OUTPUT) + H1 = cv.mulSpectrums(self.G, A, 0, conjB=True) + H2 = cv.mulSpectrums( A, A, 0, conjB=True) self.H1 = self.H1 * (1.0-rate) + H1 * rate self.H2 = self.H2 * (1.0-rate) + H2 * rate self.update_kernel() @property def state_vis(self): - f = cv2.idft(self.H, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT ) + f = cv.idft(self.H, flags=cv.DFT_SCALE | cv.DFT_REAL_OUTPUT ) h, w = f.shape f = np.roll(f, -h//2, 0) f = np.roll(f, -w//2, 1) @@ -116,12 +116,12 @@ class MOSSE: def draw_state(self, vis): (x, y), (w, h) = self.pos, self.size x1, y1, x2, y2 = int(x-0.5*w), int(y-0.5*h), int(x+0.5*w), int(y+0.5*h) - cv2.rectangle(vis, (x1, y1), (x2, y2), (0, 0, 255)) + cv.rectangle(vis, (x1, y1), (x2, y2), (0, 0, 255)) if self.good: - cv2.circle(vis, (int(x), int(y)), 2, (0, 0, 255), -1) + cv.circle(vis, (int(x), int(y)), 2, (0, 0, 255), -1) else: - cv2.line(vis, (x1, y1), (x2, y2), (0, 0, 255)) - cv2.line(vis, (x2, y1), (x1, y2), (0, 0, 255)) + cv.line(vis, (x1, y1), (x2, y2), (0, 0, 255)) + cv.line(vis, (x2, y1), (x1, y2), (0, 0, 255)) draw_str(vis, (x1, y2+16), 'PSR: %.2f' % self.psr) def preprocess(self, img): @@ -130,12 +130,12 @@ class MOSSE: return img*self.win def correlate(self, img): - C = cv2.mulSpectrums(cv2.dft(img, flags=cv2.DFT_COMPLEX_OUTPUT), self.H, 0, conjB=True) - resp = cv2.idft(C, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT) + C = cv.mulSpectrums(cv.dft(img, flags=cv.DFT_COMPLEX_OUTPUT), self.H, 0, conjB=True) + resp = cv.idft(C, flags=cv.DFT_SCALE | cv.DFT_REAL_OUTPUT) h, w = resp.shape - _, mval, _, (mx, my) = cv2.minMaxLoc(resp) + _, mval, _, (mx, my) = cv.minMaxLoc(resp) side_resp = resp.copy() - cv2.rectangle(side_resp, (mx-5, my-5), (mx+5, my+5), 0, -1) + cv.rectangle(side_resp, (mx-5, my-5), (mx+5, my+5), 0, -1) smean, sstd = side_resp.mean(), side_resp.std() psr = (mval-smean) / (sstd+eps) return resp, (mx-w//2, my-h//2), psr @@ -148,13 +148,13 @@ class App: def __init__(self, video_src, paused = False): self.cap = video.create_capture(video_src) _, self.frame = self.cap.read() - cv2.imshow('frame', self.frame) + cv.imshow('frame', self.frame) self.rect_sel = RectSelector('frame', self.onrect) self.trackers = [] self.paused = paused def onrect(self, rect): - frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY) + frame_gray = cv.cvtColor(self.frame, cv.COLOR_BGR2GRAY) tracker = MOSSE(frame_gray, rect) self.trackers.append(tracker) @@ -164,7 +164,7 @@ class App: ret, self.frame = self.cap.read() if not ret: break - frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY) + frame_gray = cv.cvtColor(self.frame, cv.COLOR_BGR2GRAY) for tracker in self.trackers: tracker.update(frame_gray) @@ -172,11 +172,11 @@ class App: for tracker in self.trackers: tracker.draw_state(vis) if len(self.trackers) > 0: - cv2.imshow('tracker state', self.trackers[-1].state_vis) + cv.imshow('tracker state', self.trackers[-1].state_vis) self.rect_sel.draw(vis) - cv2.imshow('frame', vis) - ch = cv2.waitKey(10) + cv.imshow('frame', vis) + ch = cv.waitKey(10) if ch == 27: break if ch == ord(' '): diff --git a/samples/python/mouse_and_match.py b/samples/python/mouse_and_match.py index 17ca2c9..1c86f74 100755 --- a/samples/python/mouse_and_match.py +++ b/samples/python/mouse_and_match.py @@ -15,7 +15,7 @@ Demonstrate using a mouse to interact with an image: from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv # built-in modules import os @@ -30,27 +30,27 @@ sel = (0,0,0,0) def onmouse(event, x, y, flags, param): global drag_start, sel - if event == cv2.EVENT_LBUTTONDOWN: + if event == cv.EVENT_LBUTTONDOWN: drag_start = x, y sel = 0,0,0,0 - elif event == cv2.EVENT_LBUTTONUP: + elif event == cv.EVENT_LBUTTONUP: if sel[2] > sel[0] and sel[3] > sel[1]: patch = gray[sel[1]:sel[3],sel[0]:sel[2]] - result = cv2.matchTemplate(gray,patch,cv2.TM_CCOEFF_NORMED) + result = cv.matchTemplate(gray,patch,cv.TM_CCOEFF_NORMED) result = np.abs(result)**3 - _val, result = cv2.threshold(result, 0.01, 0, cv2.THRESH_TOZERO) - result8 = cv2.normalize(result,None,0,255,cv2.NORM_MINMAX,cv2.CV_8U) - cv2.imshow("result", result8) + _val, result = cv.threshold(result, 0.01, 0, cv.THRESH_TOZERO) + result8 = cv.normalize(result,None,0,255,cv.NORM_MINMAX,cv.CV_8U) + cv.imshow("result", result8) drag_start = None elif drag_start: #print flags - if flags & cv2.EVENT_FLAG_LBUTTON: + if flags & cv.EVENT_FLAG_LBUTTON: minpos = min(drag_start[0], x), min(drag_start[1], y) maxpos = max(drag_start[0], x), max(drag_start[1], y) sel = minpos[0], minpos[1], maxpos[0], maxpos[1] - img = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR) - cv2.rectangle(img, (sel[0], sel[1]), (sel[2], sel[3]), (0,255,255), 1) - cv2.imshow("gray", img) + img = cv.cvtColor(gray, cv.COLOR_GRAY2BGR) + cv.rectangle(img, (sel[0], sel[1]), (sel[2], sel[3]), (0,255,255), 1) + cv.imshow("gray", img) else: print("selection is complete") drag_start = None @@ -63,21 +63,21 @@ if __name__ == '__main__': args = parser.parse_args() path = args.input - cv2.namedWindow("gray",1) - cv2.setMouseCallback("gray", onmouse) + cv.namedWindow("gray",1) + cv.setMouseCallback("gray", onmouse) '''Loop through all the images in the directory''' for infile in glob.glob( os.path.join(path, '*.*') ): ext = os.path.splitext(infile)[1][1:] #get the filename extenstion if ext == "png" or ext == "jpg" or ext == "bmp" or ext == "tiff" or ext == "pbm": print(infile) - img=cv2.imread(infile,1) + img=cv.imread(infile,1) if img is None: continue sel = (0,0,0,0) drag_start = None - gray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - cv2.imshow("gray",gray) - if cv2.waitKey() == 27: + gray=cv.cvtColor(img, cv.COLOR_BGR2GRAY) + cv.imshow("gray",gray) + if cv.waitKey() == 27: break - cv2.destroyAllWindows() + cv.destroyAllWindows() diff --git a/samples/python/mser.py b/samples/python/mser.py index 1c5c69b..bf984d0 100755 --- a/samples/python/mser.py +++ b/samples/python/mser.py @@ -15,7 +15,7 @@ Keys: ''' import numpy as np -import cv2 +import cv2 as cv import video import sys @@ -26,20 +26,20 @@ if __name__ == '__main__': video_src = 0 cam = video.create_capture(video_src) - mser = cv2.MSER_create() + mser = cv.MSER_create() while True: ret, img = cam.read() if ret == 0: break - gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) vis = img.copy() regions, _ = mser.detectRegions(gray) - hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions] - cv2.polylines(vis, hulls, 1, (0, 255, 0)) + hulls = [cv.convexHull(p.reshape(-1, 1, 2)) for p in regions] + cv.polylines(vis, hulls, 1, (0, 255, 0)) - cv2.imshow('img', vis) - if cv2.waitKey(5) == 27: + cv.imshow('img', vis) + if cv.waitKey(5) == 27: break - cv2.destroyAllWindows() + cv.destroyAllWindows() diff --git a/samples/python/opencv_version.py b/samples/python/opencv_version.py index b26b55c..3e503a4 100644 --- a/samples/python/opencv_version.py +++ b/samples/python/opencv_version.py @@ -13,7 +13,7 @@ Usage: # Python 2/3 compatibility from __future__ import print_function -import cv2 +import cv2 as cv if __name__ == '__main__': import sys @@ -25,7 +25,7 @@ if __name__ == '__main__': param = "" if "--build" == param: - print(cv2.getBuildInformation()) + print(cv.getBuildInformation()) elif "--help" == param: print("\t--build\n\t\tprint complete build info") print("\t--help\n\t\tprint this help") diff --git a/samples/python/opt_flow.py b/samples/python/opt_flow.py index 8338dc6..c43c726 100755 --- a/samples/python/opt_flow.py +++ b/samples/python/opt_flow.py @@ -17,7 +17,7 @@ Keys: from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv import video @@ -27,10 +27,10 @@ def draw_flow(img, flow, step=16): fx, fy = flow[y,x].T lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2) lines = np.int32(lines + 0.5) - vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) - cv2.polylines(vis, lines, 0, (0, 255, 0)) + vis = cv.cvtColor(img, cv.COLOR_GRAY2BGR) + cv.polylines(vis, lines, 0, (0, 255, 0)) for (x1, y1), (_x2, _y2) in lines: - cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1) + cv.circle(vis, (x1, y1), 1, (0, 255, 0), -1) return vis @@ -43,7 +43,7 @@ def draw_hsv(flow): hsv[...,0] = ang*(180/np.pi/2) hsv[...,1] = 255 hsv[...,2] = np.minimum(v*4, 255) - bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) + bgr = cv.cvtColor(hsv, cv.COLOR_HSV2BGR) return bgr @@ -52,7 +52,7 @@ def warp_flow(img, flow): flow = -flow flow[:,:,0] += np.arange(w) flow[:,:,1] += np.arange(h)[:,np.newaxis] - res = cv2.remap(img, flow, None, cv2.INTER_LINEAR) + res = cv.remap(img, flow, None, cv.INTER_LINEAR) return res if __name__ == '__main__': @@ -65,25 +65,25 @@ if __name__ == '__main__': cam = video.create_capture(fn) ret, prev = cam.read() - prevgray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY) + prevgray = cv.cvtColor(prev, cv.COLOR_BGR2GRAY) show_hsv = False show_glitch = False cur_glitch = prev.copy() while True: ret, img = cam.read() - gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - flow = cv2.calcOpticalFlowFarneback(prevgray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0) + gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) + flow = cv.calcOpticalFlowFarneback(prevgray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0) prevgray = gray - cv2.imshow('flow', draw_flow(gray, flow)) + cv.imshow('flow', draw_flow(gray, flow)) if show_hsv: - cv2.imshow('flow HSV', draw_hsv(flow)) + cv.imshow('flow HSV', draw_hsv(flow)) if show_glitch: cur_glitch = warp_flow(cur_glitch, flow) - cv2.imshow('glitch', cur_glitch) + cv.imshow('glitch', cur_glitch) - ch = cv2.waitKey(5) + ch = cv.waitKey(5) if ch == 27: break if ch == ord('1'): @@ -94,4 +94,4 @@ if __name__ == '__main__': if show_glitch: cur_glitch = img.copy() print('glitch is', ['off', 'on'][show_glitch]) - cv2.destroyAllWindows() + cv.destroyAllWindows() diff --git a/samples/python/peopledetect.py b/samples/python/peopledetect.py index 84db096..d0ddc1b 100755 --- a/samples/python/peopledetect.py +++ b/samples/python/peopledetect.py @@ -13,7 +13,7 @@ Press any key to continue, ESC to stop. from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv def inside(r, q): @@ -27,7 +27,7 @@ def draw_detections(img, rects, thickness = 1): # the HOG detector returns slightly larger rectangles than the real objects. # so we slightly shrink the rectangles to get a nicer output. pad_w, pad_h = int(0.15*w), int(0.05*h) - cv2.rectangle(img, (x+pad_w, y+pad_h), (x+w-pad_w, y+h-pad_h), (0, 255, 0), thickness) + cv.rectangle(img, (x+pad_w, y+pad_h), (x+w-pad_w, y+h-pad_h), (0, 255, 0), thickness) if __name__ == '__main__': @@ -37,15 +37,15 @@ if __name__ == '__main__': print(__doc__) - hog = cv2.HOGDescriptor() - hog.setSVMDetector( cv2.HOGDescriptor_getDefaultPeopleDetector() ) + hog = cv.HOGDescriptor() + hog.setSVMDetector( cv.HOGDescriptor_getDefaultPeopleDetector() ) default = ['../data/basketball2.png '] if len(sys.argv[1:]) == 0 else [] for fn in it.chain(*map(glob, default + sys.argv[1:])): print(fn, ' - ',) try: - img = cv2.imread(fn) + img = cv.imread(fn) if img is None: print('Failed to load image file:', fn) continue @@ -64,8 +64,8 @@ if __name__ == '__main__': draw_detections(img, found) draw_detections(img, found_filtered, 3) print('%d (%d) found' % (len(found_filtered), len(found))) - cv2.imshow('img', img) - ch = cv2.waitKey() + cv.imshow('img', img) + ch = cv.waitKey() if ch == 27: break - cv2.destroyAllWindows() + cv.destroyAllWindows() diff --git a/samples/python/plane_ar.py b/samples/python/plane_ar.py index 1fb13e2..8265248 100755 --- a/samples/python/plane_ar.py +++ b/samples/python/plane_ar.py @@ -26,7 +26,7 @@ Use 'focal' slider to adjust to camera focal length for proper video augmentatio from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv import video import common from plane_tracker import PlaneTracker @@ -48,8 +48,8 @@ class App: self.paused = False self.tracker = PlaneTracker() - cv2.namedWindow('plane') - cv2.createTrackbar('focal', 'plane', 25, 50, common.nothing) + cv.namedWindow('plane') + cv.createTrackbar('focal', 'plane', 25, 50, common.nothing) self.rect_sel = common.RectSelector('plane', self.on_rect) def on_rect(self, rect): @@ -68,14 +68,14 @@ class App: if playing: tracked = self.tracker.track(self.frame) for tr in tracked: - cv2.polylines(vis, [np.int32(tr.quad)], True, (255, 255, 255), 2) + cv.polylines(vis, [np.int32(tr.quad)], True, (255, 255, 255), 2) for (x, y) in np.int32(tr.p1): - cv2.circle(vis, (x, y), 2, (255, 255, 255)) + cv.circle(vis, (x, y), 2, (255, 255, 255)) self.draw_overlay(vis, tr) self.rect_sel.draw(vis) - cv2.imshow('plane', vis) - ch = cv2.waitKey(1) + cv.imshow('plane', vis) + ch = cv.waitKey(1) if ch == ord(' '): self.paused = not self.paused if ch == ord('c'): @@ -86,18 +86,18 @@ class App: def draw_overlay(self, vis, tracked): x0, y0, x1, y1 = tracked.target.rect quad_3d = np.float32([[x0, y0, 0], [x1, y0, 0], [x1, y1, 0], [x0, y1, 0]]) - fx = 0.5 + cv2.getTrackbarPos('focal', 'plane') / 50.0 + fx = 0.5 + cv.getTrackbarPos('focal', 'plane') / 50.0 h, w = vis.shape[:2] K = np.float64([[fx*w, 0, 0.5*(w-1)], [0, fx*w, 0.5*(h-1)], [0.0,0.0, 1.0]]) dist_coef = np.zeros(4) - _ret, rvec, tvec = cv2.solvePnP(quad_3d, tracked.quad, K, dist_coef) + _ret, rvec, tvec = cv.solvePnP(quad_3d, tracked.quad, K, dist_coef) verts = ar_verts * [(x1-x0), (y1-y0), -(x1-x0)*0.3] + (x0, y0, 0) - verts = cv2.projectPoints(verts, rvec, tvec, K, dist_coef)[0].reshape(-1, 2) + verts = cv.projectPoints(verts, rvec, tvec, K, dist_coef)[0].reshape(-1, 2) for i, j in ar_edges: (x0, y0), (x1, y1) = verts[i], verts[j] - cv2.line(vis, (int(x0), int(y0)), (int(x1), int(y1)), (255, 255, 0), 2) + cv.line(vis, (int(x0), int(y0)), (int(x1), int(y1)), (255, 255, 0), 2) if __name__ == '__main__': diff --git a/samples/python/plane_tracker.py b/samples/python/plane_tracker.py index 36d5972..ed84fb0 100755 --- a/samples/python/plane_tracker.py +++ b/samples/python/plane_tracker.py @@ -30,7 +30,7 @@ if PY3: xrange = range import numpy as np -import cv2 +import cv2 as cv # built-in modules from collections import namedtuple @@ -70,8 +70,8 @@ TrackedTarget = namedtuple('TrackedTarget', 'target, p0, p1, H, quad') class PlaneTracker: def __init__(self): - self.detector = cv2.ORB_create( nfeatures = 1000 ) - self.matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329) + self.detector = cv.ORB_create( nfeatures = 1000 ) + self.matcher = cv.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329) self.targets = [] self.frame_points = [] @@ -115,7 +115,7 @@ class PlaneTracker: p0 = [target.keypoints[m.trainIdx].pt for m in matches] p1 = [self.frame_points[m.queryIdx].pt for m in matches] p0, p1 = np.float32((p0, p1)) - H, status = cv2.findHomography(p0, p1, cv2.RANSAC, 3.0) + H, status = cv.findHomography(p0, p1, cv.RANSAC, 3.0) status = status.ravel() != 0 if status.sum() < MIN_MATCH_COUNT: continue @@ -123,7 +123,7 @@ class PlaneTracker: x0, y0, x1, y1 = target.rect quad = np.float32([[x0, y0], [x1, y0], [x1, y1], [x0, y1]]) - quad = cv2.perspectiveTransform(quad.reshape(1, -1, 2), H).reshape(-1, 2) + quad = cv.perspectiveTransform(quad.reshape(1, -1, 2), H).reshape(-1, 2) track = TrackedTarget(target=target, p0=p0, p1=p1, H=H, quad=quad) tracked.append(track) @@ -145,7 +145,7 @@ class App: self.paused = False self.tracker = PlaneTracker() - cv2.namedWindow('plane') + cv.namedWindow('plane') self.rect_sel = common.RectSelector('plane', self.on_rect) def on_rect(self, rect): @@ -164,13 +164,13 @@ class App: if playing: tracked = self.tracker.track(self.frame) for tr in tracked: - cv2.polylines(vis, [np.int32(tr.quad)], True, (255, 255, 255), 2) + cv.polylines(vis, [np.int32(tr.quad)], True, (255, 255, 255), 2) for (x, y) in np.int32(tr.p1): - cv2.circle(vis, (x, y), 2, (255, 255, 255)) + cv.circle(vis, (x, y), 2, (255, 255, 255)) self.rect_sel.draw(vis) - cv2.imshow('plane', vis) - ch = cv2.waitKey(1) + cv.imshow('plane', vis) + ch = cv.waitKey(1) if ch == ord(' '): self.paused = not self.paused if ch == ord('c'): diff --git a/samples/python/squares.py b/samples/python/squares.py index 967892a..90e8af3 100755 --- a/samples/python/squares.py +++ b/samples/python/squares.py @@ -14,7 +14,7 @@ if PY3: xrange = range import numpy as np -import cv2 +import cv2 as cv def angle_cos(p0, p1, p2): @@ -22,20 +22,20 @@ def angle_cos(p0, p1, p2): return abs( np.dot(d1, d2) / np.sqrt( np.dot(d1, d1)*np.dot(d2, d2) ) ) def find_squares(img): - img = cv2.GaussianBlur(img, (5, 5), 0) + img = cv.GaussianBlur(img, (5, 5), 0) squares = [] - for gray in cv2.split(img): + for gray in cv.split(img): for thrs in xrange(0, 255, 26): if thrs == 0: - bin = cv2.Canny(gray, 0, 50, apertureSize=5) - bin = cv2.dilate(bin, None) + bin = cv.Canny(gray, 0, 50, apertureSize=5) + bin = cv.dilate(bin, None) else: - _retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY) - bin, contours, _hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) + _retval, bin = cv.threshold(gray, thrs, 255, cv.THRESH_BINARY) + bin, contours, _hierarchy = cv.findContours(bin, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE) for cnt in contours: - cnt_len = cv2.arcLength(cnt, True) - cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True) - if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt): + cnt_len = cv.arcLength(cnt, True) + cnt = cv.approxPolyDP(cnt, 0.02*cnt_len, True) + if len(cnt) == 4 and cv.contourArea(cnt) > 1000 and cv.isContourConvex(cnt): cnt = cnt.reshape(-1, 2) max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)]) if max_cos < 0.1: @@ -45,11 +45,11 @@ def find_squares(img): if __name__ == '__main__': from glob import glob for fn in glob('../data/pic*.png'): - img = cv2.imread(fn) + img = cv.imread(fn) squares = find_squares(img) - cv2.drawContours( img, squares, -1, (0, 255, 0), 3 ) - cv2.imshow('squares', img) - ch = cv2.waitKey() + cv.drawContours( img, squares, -1, (0, 255, 0), 3 ) + cv.imshow('squares', img) + ch = cv.waitKey() if ch == 27: break - cv2.destroyAllWindows() + cv.destroyAllWindows() diff --git a/samples/python/stereo_match.py b/samples/python/stereo_match.py index 1f67330..5b08670 100755 --- a/samples/python/stereo_match.py +++ b/samples/python/stereo_match.py @@ -10,7 +10,7 @@ Resulting .ply file cam be easily viewed using MeshLab ( http://meshlab.sourcefo from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv ply_header = '''ply format ascii 1.0 @@ -35,14 +35,14 @@ def write_ply(fn, verts, colors): if __name__ == '__main__': print('loading images...') - imgL = cv2.pyrDown( cv2.imread('../data/aloeL.jpg') ) # downscale images for faster processing - imgR = cv2.pyrDown( cv2.imread('../data/aloeR.jpg') ) + imgL = cv.pyrDown( cv.imread('../data/aloeL.jpg') ) # downscale images for faster processing + imgR = cv.pyrDown( cv.imread('../data/aloeR.jpg') ) # disparity range is tuned for 'aloe' image pair window_size = 3 min_disp = 16 num_disp = 112-min_disp - stereo = cv2.StereoSGBM_create(minDisparity = min_disp, + stereo = cv.StereoSGBM_create(minDisparity = min_disp, numDisparities = num_disp, blockSize = 16, P1 = 8*3*window_size**2, @@ -63,8 +63,8 @@ if __name__ == '__main__': [0,-1, 0, 0.5*h], # turn points 180 deg around x-axis, [0, 0, 0, -f], # so that y-axis looks up [0, 0, 1, 0]]) - points = cv2.reprojectImageTo3D(disp, Q) - colors = cv2.cvtColor(imgL, cv2.COLOR_BGR2RGB) + points = cv.reprojectImageTo3D(disp, Q) + colors = cv.cvtColor(imgL, cv.COLOR_BGR2RGB) mask = disp > disp.min() out_points = points[mask] out_colors = colors[mask] @@ -72,7 +72,7 @@ if __name__ == '__main__': write_ply('out.ply', out_points, out_colors) print('%s saved' % 'out.ply') - cv2.imshow('left', imgL) - cv2.imshow('disparity', (disp-min_disp)/num_disp) - cv2.waitKey() - cv2.destroyAllWindows() + cv.imshow('left', imgL) + cv.imshow('disparity', (disp-min_disp)/num_disp) + cv.waitKey() + cv.destroyAllWindows() diff --git a/samples/python/texture_flow.py b/samples/python/texture_flow.py index 45c50d9..c322080 100755 --- a/samples/python/texture_flow.py +++ b/samples/python/texture_flow.py @@ -3,7 +3,7 @@ ''' Texture flow direction estimation. -Sample shows how cv2.cornerEigenValsAndVecs function can be used +Sample shows how cv.cornerEigenValsAndVecs function can be used to estimate image texture flow direction. Usage: @@ -14,7 +14,7 @@ Usage: from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv if __name__ == '__main__': import sys @@ -23,15 +23,15 @@ if __name__ == '__main__': except: fn = '../data/starry_night.jpg' - img = cv2.imread(fn) + img = cv.imread(fn) if img is None: print('Failed to load image file:', fn) sys.exit(1) - gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) h, w = img.shape[:2] - eigen = cv2.cornerEigenValsAndVecs(gray, 15, 3) + eigen = cv.cornerEigenValsAndVecs(gray, 15, 3) eigen = eigen.reshape(h, w, 3, 2) # [[e1, e2], v1, v2] flow = eigen[:,:,2] @@ -41,7 +41,7 @@ if __name__ == '__main__': points = np.dstack( np.mgrid[d/2:w:d, d/2:h:d] ).reshape(-1, 2) for x, y in np.int32(points): vx, vy = np.int32(flow[y, x]*d) - cv2.line(vis, (x-vx, y-vy), (x+vx, y+vy), (0, 0, 0), 1, cv2.LINE_AA) - cv2.imshow('input', img) - cv2.imshow('flow', vis) - cv2.waitKey() + cv.line(vis, (x-vx, y-vy), (x+vx, y+vy), (0, 0, 0), 1, cv.LINE_AA) + cv.imshow('input', img) + cv.imshow('flow', vis) + cv.waitKey() diff --git a/samples/python/tst_scene_render.py b/samples/python/tst_scene_render.py index ce956f5..6955d16 100644 --- a/samples/python/tst_scene_render.py +++ b/samples/python/tst_scene_render.py @@ -7,7 +7,7 @@ from __future__ import print_function import numpy as np from numpy import pi, sin, cos -import cv2 +import cv2 as cv defaultSize = 512 @@ -87,7 +87,7 @@ class TestSceneRender(): self.currentRect = self.initialRect + np.int( 30*cos(self.time*self.speed) + 50*sin(self.time*self.speed)) if self.deformation: self.currentRect[1:3] += self.h/20*cos(self.time) - cv2.fillConvexPoly(img, self.currentRect, (0, 0, 255)) + cv.fillConvexPoly(img, self.currentRect, (0, 0, 255)) self.time += self.timeStep return img @@ -98,19 +98,19 @@ class TestSceneRender(): if __name__ == '__main__': - backGr = cv2.imread('../data/graf1.png') - fgr = cv2.imread('../data/box.png') + backGr = cv.imread('../data/graf1.png') + fgr = cv.imread('../data/box.png') render = TestSceneRender(backGr, fgr) while True: img = render.getNextFrame() - cv2.imshow('img', img) + cv.imshow('img', img) - ch = cv2.waitKey(3) + ch = cv.waitKey(3) if ch == 27: break #import os #print (os.environ['PYTHONPATH']) - cv2.destroyAllWindows() + cv.destroyAllWindows() diff --git a/samples/python/turing.py b/samples/python/turing.py index 179f0e3..0d80cba 100755 --- a/samples/python/turing.py +++ b/samples/python/turing.py @@ -16,7 +16,7 @@ if PY3: xrange = range import numpy as np -import cv2 +import cv2 as cv from common import draw_str import getopt, sys from itertools import count @@ -37,24 +37,24 @@ if __name__ == '__main__': out = None if '-o' in args: fn = args['-o'] - out = cv2.VideoWriter(args['-o'], cv2.VideoWriter_fourcc(*'DIB '), 30.0, (w, h), False) + out = cv.VideoWriter(args['-o'], cv.VideoWriter_fourcc(*'DIB '), 30.0, (w, h), False) print('writing %s ...' % fn) a = np.zeros((h, w), np.float32) - cv2.randu(a, np.array([0]), np.array([1])) + cv.randu(a, np.array([0]), np.array([1])) def process_scale(a_lods, lod): - d = a_lods[lod] - cv2.pyrUp(a_lods[lod+1]) + d = a_lods[lod] - cv.pyrUp(a_lods[lod+1]) for _i in xrange(lod): - d = cv2.pyrUp(d) - v = cv2.GaussianBlur(d*d, (3, 3), 0) + d = cv.pyrUp(d) + v = cv.GaussianBlur(d*d, (3, 3), 0) return np.sign(d), v scale_num = 6 for frame_i in count(): a_lods = [a] for i in xrange(scale_num): - a_lods.append(cv2.pyrDown(a_lods[-1])) + a_lods.append(cv.pyrDown(a_lods[-1])) ms, vs = [], [] for i in xrange(1, scale_num): m, v = process_scale(a_lods, i) @@ -68,7 +68,7 @@ if __name__ == '__main__': out.write(a) vis = a.copy() draw_str(vis, (20, 20), 'frame %d' % frame_i) - cv2.imshow('a', vis) - if cv2.waitKey(5) == 27: + cv.imshow('a', vis) + if cv.waitKey(5) == 27: break - cv2.destroyAllWindows() + cv.destroyAllWindows() diff --git a/samples/python/tutorial_code/ImgTrans/Filter2D/filter2D.py b/samples/python/tutorial_code/ImgTrans/Filter2D/filter2D.py index e4d1afe..407cd8b 100644 --- a/samples/python/tutorial_code/ImgTrans/Filter2D/filter2D.py +++ b/samples/python/tutorial_code/ImgTrans/Filter2D/filter2D.py @@ -3,7 +3,7 @@ @brief Sample code that shows how to implement your own linear filters by using filter2D function """ import sys -import cv2 +import cv2 as cv import numpy as np @@ -14,7 +14,7 @@ def main(argv): imageName = argv[0] if len(argv) > 0 else "../data/lena.jpg" # Loads an image - src = cv2.imread(imageName, cv2.IMREAD_COLOR) + src = cv.imread(imageName, cv.IMREAD_COLOR) # Check if image is loaded fine if src is None: @@ -37,11 +37,11 @@ def main(argv): ## [update_kernel] ## [apply_filter] # Apply filter - dst = cv2.filter2D(src, ddepth, kernel) + dst = cv.filter2D(src, ddepth, kernel) ## [apply_filter] - cv2.imshow(window_name, dst) + cv.imshow(window_name, dst) - c = cv2.waitKey(500) + c = cv.waitKey(500) if c == 27: break diff --git a/samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py b/samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py index c8c472f..1f0a74c 100644 --- a/samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py +++ b/samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py @@ -1,5 +1,5 @@ import sys -import cv2 +import cv2 as cv import numpy as np @@ -9,7 +9,7 @@ def main(argv): filename = argv[0] if len(argv) > 0 else default_file # Loads an image - src = cv2.imread(filename, cv2.IMREAD_COLOR) + src = cv.imread(filename, cv.IMREAD_COLOR) # Check if image is loaded fine if src is None: @@ -20,17 +20,17 @@ def main(argv): ## [convert_to_gray] # Convert it to gray - gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) + gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) ## [convert_to_gray] ## [reduce_noise] # Reduce the noise to avoid false circle detection - gray = cv2.medianBlur(gray, 5) + gray = cv.medianBlur(gray, 5) ## [reduce_noise] ## [houghcircles] rows = gray.shape[0] - circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, rows / 8, + circles = cv.HoughCircles(gray, cv.HOUGH_GRADIENT, 1, rows / 8, param1=100, param2=30, minRadius=1, maxRadius=30) ## [houghcircles] @@ -41,15 +41,15 @@ def main(argv): for i in circles[0, :]: center = (i[0], i[1]) # circle center - cv2.circle(src, center, 1, (0, 100, 100), 3) + cv.circle(src, center, 1, (0, 100, 100), 3) # circle outline radius = i[2] - cv2.circle(src, center, radius, (255, 0, 255), 3) + cv.circle(src, center, radius, (255, 0, 255), 3) ## [draw] ## [display] - cv2.imshow("detected circles", src) - cv2.waitKey(0) + cv.imshow("detected circles", src) + cv.waitKey(0) ## [display] return 0 diff --git a/samples/python/tutorial_code/ImgTrans/HoughLine/hough_lines.py b/samples/python/tutorial_code/ImgTrans/HoughLine/hough_lines.py index 2907996..697f388 100644 --- a/samples/python/tutorial_code/ImgTrans/HoughLine/hough_lines.py +++ b/samples/python/tutorial_code/ImgTrans/HoughLine/hough_lines.py @@ -4,7 +4,7 @@ """ import sys import math -import cv2 +import cv2 as cv import numpy as np @@ -14,7 +14,7 @@ def main(argv): filename = argv[0] if len(argv) > 0 else default_file # Loads an image - src = cv2.imread(filename, cv2.IMREAD_GRAYSCALE) + src = cv.imread(filename, cv.IMREAD_GRAYSCALE) # Check if image is loaded fine if src is None: @@ -25,16 +25,16 @@ def main(argv): ## [edge_detection] # Edge detection - dst = cv2.Canny(src, 50, 200, None, 3) + dst = cv.Canny(src, 50, 200, None, 3) ## [edge_detection] # Copy edges to the images that will display the results in BGR - cdst = cv2.cvtColor(dst, cv2.COLOR_GRAY2BGR) + cdst = cv.cvtColor(dst, cv.COLOR_GRAY2BGR) cdstP = np.copy(cdst) ## [hough_lines] # Standard Hough Line Transform - lines = cv2.HoughLines(dst, 1, np.pi / 180, 150, None, 0, 0) + lines = cv.HoughLines(dst, 1, np.pi / 180, 150, None, 0, 0) ## [hough_lines] ## [draw_lines] # Draw the lines @@ -49,29 +49,29 @@ def main(argv): pt1 = (int(x0 + 1000*(-b)), int(y0 + 1000*(a))) pt2 = (int(x0 - 1000*(-b)), int(y0 - 1000*(a))) - cv2.line(cdst, pt1, pt2, (0,0,255), 3, cv2.LINE_AA) + cv.line(cdst, pt1, pt2, (0,0,255), 3, cv.LINE_AA) ## [draw_lines] ## [hough_lines_p] # Probabilistic Line Transform - linesP = cv2.HoughLinesP(dst, 1, np.pi / 180, 50, None, 50, 10) + linesP = cv.HoughLinesP(dst, 1, np.pi / 180, 50, None, 50, 10) ## [hough_lines_p] ## [draw_lines_p] # Draw the lines if linesP is not None: for i in range(0, len(linesP)): l = linesP[i][0] - cv2.line(cdstP, (l[0], l[1]), (l[2], l[3]), (0,0,255), 3, cv2.LINE_AA) + cv.line(cdstP, (l[0], l[1]), (l[2], l[3]), (0,0,255), 3, cv.LINE_AA) ## [draw_lines_p] ## [imshow] # Show results - cv2.imshow("Source", src) - cv2.imshow("Detected Lines (in red) - Standard Hough Line Transform", cdst) - cv2.imshow("Detected Lines (in red) - Probabilistic Line Transform", cdstP) + cv.imshow("Source", src) + cv.imshow("Detected Lines (in red) - Standard Hough Line Transform", cdst) + cv.imshow("Detected Lines (in red) - Probabilistic Line Transform", cdstP) ## [imshow] ## [exit] # Wait and Exit - cv2.waitKey() + cv.waitKey() return 0 ## [exit] diff --git a/samples/python/tutorial_code/ImgTrans/LaPlace/laplace_demo.py b/samples/python/tutorial_code/ImgTrans/LaPlace/laplace_demo.py index 5776e44..1cff041 100644 --- a/samples/python/tutorial_code/ImgTrans/LaPlace/laplace_demo.py +++ b/samples/python/tutorial_code/ImgTrans/LaPlace/laplace_demo.py @@ -3,12 +3,12 @@ @brief Sample code showing how to detect edges using the Laplace operator """ import sys -import cv2 +import cv2 as cv def main(argv): # [variables] # Declare the variables we are going to use - ddepth = cv2.CV_16S + ddepth = cv.CV_16S kernel_size = 3 window_name = "Laplace Demo" # [variables] @@ -16,7 +16,7 @@ def main(argv): # [load] imageName = argv[0] if len(argv) > 0 else "../data/lena.jpg" - src = cv2.imread(imageName, cv2.IMREAD_COLOR) # Load an image + src = cv.imread(imageName, cv.IMREAD_COLOR) # Load an image # Check if image is loaded fine if src is None: @@ -27,30 +27,30 @@ def main(argv): # [reduce_noise] # Remove noise by blurring with a Gaussian filter - src = cv2.GaussianBlur(src, (3, 3), 0) + src = cv.GaussianBlur(src, (3, 3), 0) # [reduce_noise] # [convert_to_gray] # Convert the image to grayscale - src_gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) + src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) # [convert_to_gray] # Create Window - cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE) + cv.namedWindow(window_name, cv.WINDOW_AUTOSIZE) # [laplacian] # Apply Laplace function - dst = cv2.Laplacian(src_gray, ddepth, kernel_size) + dst = cv.Laplacian(src_gray, ddepth, kernel_size) # [laplacian] # [convert] # converting back to uint8 - abs_dst = cv2.convertScaleAbs(dst) + abs_dst = cv.convertScaleAbs(dst) # [convert] # [display] - cv2.imshow(window_name, abs_dst) - cv2.waitKey(0) + cv.imshow(window_name, abs_dst) + cv.waitKey(0) # [display] return 0 diff --git a/samples/python/tutorial_code/ImgTrans/MakeBorder/copy_make_border.py b/samples/python/tutorial_code/ImgTrans/MakeBorder/copy_make_border.py index 36b4e13..453037f 100644 --- a/samples/python/tutorial_code/ImgTrans/MakeBorder/copy_make_border.py +++ b/samples/python/tutorial_code/ImgTrans/MakeBorder/copy_make_border.py @@ -4,20 +4,20 @@ """ import sys from random import randint -import cv2 +import cv2 as cv def main(argv): ## [variables] # First we declare the variables we are going to use - borderType = cv2.BORDER_CONSTANT + borderType = cv.BORDER_CONSTANT window_name = "copyMakeBorder Demo" ## [variables] ## [load] imageName = argv[0] if len(argv) > 0 else "../data/lena.jpg" # Loads an image - src = cv2.imread(imageName, cv2.IMREAD_COLOR) + src = cv.imread(imageName, cv.IMREAD_COLOR) # Check if image is loaded fine if src is None: @@ -33,7 +33,7 @@ def main(argv): ' ** Press \'r\' to set the border to be replicated \n' ' ** Press \'ESC\' to exit the program ') ## [create_window] - cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE) + cv.namedWindow(window_name, cv.WINDOW_AUTOSIZE) ## [create_window] ## [init_arguments] # Initialize arguments for the filter @@ -47,20 +47,20 @@ def main(argv): value = [randint(0, 255), randint(0, 255), randint(0, 255)] ## [update_value] ## [copymakeborder] - dst = cv2.copyMakeBorder(src, top, bottom, left, right, borderType, None, value) + dst = cv.copyMakeBorder(src, top, bottom, left, right, borderType, None, value) ## [copymakeborder] ## [display] - cv2.imshow(window_name, dst) + cv.imshow(window_name, dst) ## [display] ## [check_keypress] - c = cv2.waitKey(500) + c = cv.waitKey(500) if c == 27: break elif c == 99: # 99 = ord('c') - borderType = cv2.BORDER_CONSTANT + borderType = cv.BORDER_CONSTANT elif c == 114: # 114 = ord('r') - borderType = cv2.BORDER_REPLICATE + borderType = cv.BORDER_REPLICATE ## [check_keypress] return 0 diff --git a/samples/python/tutorial_code/ImgTrans/SobelDemo/sobel_demo.py b/samples/python/tutorial_code/ImgTrans/SobelDemo/sobel_demo.py index 4afe3af..7c25542 100644 --- a/samples/python/tutorial_code/ImgTrans/SobelDemo/sobel_demo.py +++ b/samples/python/tutorial_code/ImgTrans/SobelDemo/sobel_demo.py @@ -3,7 +3,7 @@ @brief Sample code using Sobel and/or Scharr OpenCV functions to make a simple Edge Detector """ import sys -import cv2 +import cv2 as cv def main(argv): @@ -12,7 +12,7 @@ def main(argv): window_name = ('Sobel Demo - Simple Edge Detector') scale = 1 delta = 0 - ddepth = cv2.CV_16S + ddepth = cv.CV_16S ## [variables] ## [load] @@ -24,7 +24,7 @@ def main(argv): return -1 # Load the image - src = cv2.imread(argv[0], cv2.IMREAD_COLOR) + src = cv.imread(argv[0], cv.IMREAD_COLOR) # Check if image is loaded fine if src is None: @@ -34,38 +34,38 @@ def main(argv): ## [reduce_noise] # Remove noise by blurring with a Gaussian filter ( kernel size = 3 ) - src = cv2.GaussianBlur(src, (3, 3), 0) + src = cv.GaussianBlur(src, (3, 3), 0) ## [reduce_noise] ## [convert_to_gray] # Convert the image to grayscale - gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) + gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) ## [convert_to_gray] ## [sobel] # Gradient-X - # grad_x = cv2.Scharr(gray,ddepth,1,0) - grad_x = cv2.Sobel(gray, ddepth, 1, 0, ksize=3, scale=scale, delta=delta, borderType=cv2.BORDER_DEFAULT) + # grad_x = cv.Scharr(gray,ddepth,1,0) + grad_x = cv.Sobel(gray, ddepth, 1, 0, ksize=3, scale=scale, delta=delta, borderType=cv.BORDER_DEFAULT) # Gradient-Y - # grad_y = cv2.Scharr(gray,ddepth,0,1) - grad_y = cv2.Sobel(gray, ddepth, 0, 1, ksize=3, scale=scale, delta=delta, borderType=cv2.BORDER_DEFAULT) + # grad_y = cv.Scharr(gray,ddepth,0,1) + grad_y = cv.Sobel(gray, ddepth, 0, 1, ksize=3, scale=scale, delta=delta, borderType=cv.BORDER_DEFAULT) ## [sobel] ## [convert] # converting back to uint8 - abs_grad_x = cv2.convertScaleAbs(grad_x) - abs_grad_y = cv2.convertScaleAbs(grad_y) + abs_grad_x = cv.convertScaleAbs(grad_x) + abs_grad_y = cv.convertScaleAbs(grad_y) ## [convert] ## [blend] ## Total Gradient (approximate) - grad = cv2.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0) + grad = cv.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0) ## [blend] ## [display] - cv2.imshow(window_name, grad) - cv2.waitKey(0) + cv.imshow(window_name, grad) + cv.waitKey(0) ## [display] return 0 diff --git a/samples/python/tutorial_code/core/AddingImages/adding_images.py b/samples/python/tutorial_code/core/AddingImages/adding_images.py index 62abb3e..c81fdcd 100644 --- a/samples/python/tutorial_code/core/AddingImages/adding_images.py +++ b/samples/python/tutorial_code/core/AddingImages/adding_images.py @@ -1,7 +1,7 @@ from __future__ import print_function import sys -import cv2 +import cv2 as cv alpha = 0.5 @@ -15,8 +15,8 @@ else: if 0 <= alpha <= 1: alpha = input_alpha ## [load] -src1 = cv2.imread('../../../../data/LinuxLogo.jpg') -src2 = cv2.imread('../../../../data/WindowsLogo.jpg') +src1 = cv.imread('../../../../data/LinuxLogo.jpg') +src2 = cv.imread('../../../../data/WindowsLogo.jpg') ## [load] if src1 is None: print ("Error loading src1") @@ -26,10 +26,10 @@ elif src2 is None: exit(-1) ## [blend_images] beta = (1.0 - alpha) -dst = cv2.addWeighted(src1, alpha, src2, beta, 0.0) +dst = cv.addWeighted(src1, alpha, src2, beta, 0.0) ## [blend_images] ## [display] -cv2.imshow('dst', dst) -cv2.waitKey(0) +cv.imshow('dst', dst) +cv.waitKey(0) ## [display] -cv2.destroyAllWindows() +cv.destroyAllWindows() diff --git a/samples/python/tutorial_code/core/BasicGeometricDrawing/basic_geometric_drawing.py b/samples/python/tutorial_code/core/BasicGeometricDrawing/basic_geometric_drawing.py index a6f4098..ec25d64 100644 --- a/samples/python/tutorial_code/core/BasicGeometricDrawing/basic_geometric_drawing.py +++ b/samples/python/tutorial_code/core/BasicGeometricDrawing/basic_geometric_drawing.py @@ -1,4 +1,4 @@ -import cv2 +import cv2 as cv import numpy as np W = 400 @@ -7,7 +7,7 @@ def my_ellipse(img, angle): thickness = 2 line_type = 8 - cv2.ellipse(img, + cv.ellipse(img, (W / 2, W / 2), (W / 4, W / 16), angle, @@ -22,7 +22,7 @@ def my_filled_circle(img, center): thickness = -1 line_type = 8 - cv2.circle(img, + cv.circle(img, center, W / 32, (0, 0, 255), @@ -45,16 +45,16 @@ def my_polygon(img): [W / 4, 3 * W / 8], [13 * W / 32, 3 * W / 8], [5 * W / 16, 13 * W / 16], [W / 4, 13 * W / 16]], np.int32) ppt = ppt.reshape((-1, 1, 2)) - cv2.fillPoly(img, [ppt], (255, 255, 255), line_type) + cv.fillPoly(img, [ppt], (255, 255, 255), line_type) # Only drawind the lines would be: - # cv2.polylines(img, [ppt], True, (255, 0, 255), line_type) + # cv.polylines(img, [ppt], True, (255, 0, 255), line_type) ## [my_polygon] ## [my_line] def my_line(img, start, end): thickness = 2 line_type = 8 - cv2.line(img, + cv.line(img, start, end, (0, 0, 0), @@ -92,7 +92,7 @@ my_filled_circle(atom_image, (W / 2, W / 2)) my_polygon(rook_image) ## [rectangle] # 2.b. Creating rectangles -cv2.rectangle(rook_image, +cv.rectangle(rook_image, (0, 7 * W / 8), (W, W), (0, 255, 255), @@ -106,10 +106,10 @@ my_line(rook_image, (W / 4, 7 * W / 8), (W / 4, W)) my_line(rook_image, (W / 2, 7 * W / 8), (W / 2, W)) my_line(rook_image, (3 * W / 4, 7 * W / 8), (3 * W / 4, W)) ## [draw_rook] -cv2.imshow(atom_window, atom_image) -cv2.moveWindow(atom_window, 0, 200) -cv2.imshow(rook_window, rook_image) -cv2.moveWindow(rook_window, W, 200) +cv.imshow(atom_window, atom_image) +cv.moveWindow(atom_window, 0, 200) +cv.imshow(rook_window, rook_image) +cv.moveWindow(rook_window, W, 200) -cv2.waitKey(0) -cv2.destroyAllWindows() +cv.waitKey(0) +cv.destroyAllWindows() diff --git a/samples/python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py b/samples/python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py index a16c9be..96535ac 100644 --- a/samples/python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py +++ b/samples/python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py @@ -1,7 +1,7 @@ from __future__ import print_function import sys -import cv2 +import cv2 as cv import numpy as np @@ -19,34 +19,34 @@ def main(argv): filename = argv[0] if len(argv) > 0 else "../../../../data/lena.jpg" - I = cv2.imread(filename, cv2.IMREAD_GRAYSCALE) + I = cv.imread(filename, cv.IMREAD_GRAYSCALE) if I is None: print('Error opening image') return -1 ## [expand] rows, cols = I.shape - m = cv2.getOptimalDFTSize( rows ) - n = cv2.getOptimalDFTSize( cols ) - padded = cv2.copyMakeBorder(I, 0, m - rows, 0, n - cols, cv2.BORDER_CONSTANT, value=[0, 0, 0]) + m = cv.getOptimalDFTSize( rows ) + n = cv.getOptimalDFTSize( cols ) + padded = cv.copyMakeBorder(I, 0, m - rows, 0, n - cols, cv.BORDER_CONSTANT, value=[0, 0, 0]) ## [expand] ## [complex_and_real] planes = [np.float32(padded), np.zeros(padded.shape, np.float32)] - complexI = cv2.merge(planes) # Add to the expanded another plane with zeros + complexI = cv.merge(planes) # Add to the expanded another plane with zeros ## [complex_and_real] ## [dft] - cv2.dft(complexI, complexI) # this way the result may fit in the source matrix + cv.dft(complexI, complexI) # this way the result may fit in the source matrix ## [dft] # compute the magnitude and switch to logarithmic scale # = > log(1 + sqrt(Re(DFT(I)) ^ 2 + Im(DFT(I)) ^ 2)) ## [magnitude] - cv2.split(complexI, planes) # planes[0] = Re(DFT(I), planes[1] = Im(DFT(I)) - cv2.magnitude(planes[0], planes[1], planes[0])# planes[0] = magnitude + cv.split(complexI, planes) # planes[0] = Re(DFT(I), planes[1] = Im(DFT(I)) + cv.magnitude(planes[0], planes[1], planes[0])# planes[0] = magnitude magI = planes[0] ## [magnitude] ## [log] matOfOnes = np.ones(magI.shape, dtype=magI.dtype) - cv2.add(matOfOnes, magI, magI) # switch to logarithmic scale - cv2.log(magI, magI) + cv.add(matOfOnes, magI, magI) # switch to logarithmic scale + cv.log(magI, magI) ## [log] ## [crop_rearrange] magI_rows, magI_cols = magI.shape @@ -69,12 +69,12 @@ def main(argv): magI[0:cx, cy:cy + cy] = tmp ## [crop_rearrange] ## [normalize] - cv2.normalize(magI, magI, 0, 1, cv2.NORM_MINMAX) # Transform the matrix with float values into a + cv.normalize(magI, magI, 0, 1, cv.NORM_MINMAX) # Transform the matrix with float values into a ## viewable image form(float between values 0 and 1). ## [normalize] - cv2.imshow("Input Image" , I ) # Show the result - cv2.imshow("spectrum magnitude", magI) - cv2.waitKey() + cv.imshow("Input Image" , I ) # Show the result + cv.imshow("spectrum magnitude", magI) + cv.waitKey() if __name__ == "__main__": main(sys.argv[1:]) diff --git a/samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py b/samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py index 21cdb2b..b151575 100644 --- a/samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py +++ b/samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py @@ -3,7 +3,7 @@ import sys import time import numpy as np -import cv2 +import cv2 as cv ## [basic_method] def is_grayscale(my_image): @@ -23,7 +23,7 @@ def sharpen(my_image): if is_grayscale(my_image): height, width = my_image.shape else: - my_image = cv2.cvtColor(my_image, cv2.CV_8U) + my_image = cv.cvtColor(my_image, cv.CV_8U) height, width, n_channels = my_image.shape result = np.zeros(my_image.shape, my_image.dtype) @@ -47,13 +47,13 @@ def sharpen(my_image): def main(argv): filename = "../../../../data/lena.jpg" - img_codec = cv2.IMREAD_COLOR + img_codec = cv.IMREAD_COLOR if argv: filename = sys.argv[1] if len(argv) >= 2 and sys.argv[2] == "G": - img_codec = cv2.IMREAD_GRAYSCALE + img_codec = cv.IMREAD_GRAYSCALE - src = cv2.imread(filename, img_codec) + src = cv.imread(filename, img_codec) if src is None: print("Can't open image [" + filename + "]") @@ -61,10 +61,10 @@ def main(argv): print("mat_mask_operations.py [image_path -- default ../../../../data/lena.jpg] [G -- grayscale]") return -1 - cv2.namedWindow("Input", cv2.WINDOW_AUTOSIZE) - cv2.namedWindow("Output", cv2.WINDOW_AUTOSIZE) + cv.namedWindow("Input", cv.WINDOW_AUTOSIZE) + cv.namedWindow("Output", cv.WINDOW_AUTOSIZE) - cv2.imshow("Input", src) + cv.imshow("Input", src) t = round(time.time()) dst0 = sharpen(src) @@ -72,8 +72,8 @@ def main(argv): t = (time.time() - t) / 1000 print("Hand written function time passed in seconds: %s" % t) - cv2.imshow("Output", dst0) - cv2.waitKey() + cv.imshow("Output", dst0) + cv.waitKey() t = time.time() ## [kern] @@ -82,17 +82,17 @@ def main(argv): [0, -1, 0]], np.float32) # kernel should be floating point type ## [kern] ## [filter2D] - dst1 = cv2.filter2D(src, -1, kernel) + dst1 = cv.filter2D(src, -1, kernel) # ddepth = -1, means destination image has depth same as input image ## [filter2D] t = (time.time() - t) / 1000 print("Built-in filter2D time passed in seconds: %s" % t) - cv2.imshow("Output", dst1) + cv.imshow("Output", dst1) - cv2.waitKey(0) - cv2.destroyAllWindows() + cv.waitKey(0) + cv.destroyAllWindows() return 0 diff --git a/samples/python/tutorial_code/imgProc/HitMiss/hit_miss.py b/samples/python/tutorial_code/imgProc/HitMiss/hit_miss.py index c25715c..55605ac 100644 --- a/samples/python/tutorial_code/imgProc/HitMiss/hit_miss.py +++ b/samples/python/tutorial_code/imgProc/HitMiss/hit_miss.py @@ -1,4 +1,4 @@ -import cv2 +import cv2 as cv import numpy as np input_image = np.array(( @@ -16,23 +16,23 @@ kernel = np.array(( [1, -1, 1], [0, 1, 0]), dtype="int") -output_image = cv2.morphologyEx(input_image, cv2.MORPH_HITMISS, kernel) +output_image = cv.morphologyEx(input_image, cv.MORPH_HITMISS, kernel) rate = 50 kernel = (kernel + 1) * 127 kernel = np.uint8(kernel) -kernel = cv2.resize(kernel, None, fx = rate, fy = rate, interpolation = cv2.INTER_NEAREST) -cv2.imshow("kernel", kernel) -cv2.moveWindow("kernel", 0, 0) +kernel = cv.resize(kernel, None, fx = rate, fy = rate, interpolation = cv.INTER_NEAREST) +cv.imshow("kernel", kernel) +cv.moveWindow("kernel", 0, 0) -input_image = cv2.resize(input_image, None, fx = rate, fy = rate, interpolation = cv2.INTER_NEAREST) -cv2.imshow("Original", input_image) -cv2.moveWindow("Original", 0, 200) +input_image = cv.resize(input_image, None, fx = rate, fy = rate, interpolation = cv.INTER_NEAREST) +cv.imshow("Original", input_image) +cv.moveWindow("Original", 0, 200) -output_image = cv2.resize(output_image, None , fx = rate, fy = rate, interpolation = cv2.INTER_NEAREST) -cv2.imshow("Hit or Miss", output_image) -cv2.moveWindow("Hit or Miss", 500, 200) +output_image = cv.resize(output_image, None , fx = rate, fy = rate, interpolation = cv.INTER_NEAREST) +cv.imshow("Hit or Miss", output_image) +cv.moveWindow("Hit or Miss", 500, 200) -cv2.waitKey(0) -cv2.destroyAllWindows() +cv.waitKey(0) +cv.destroyAllWindows() diff --git a/samples/python/tutorial_code/imgProc/Pyramids/pyramids.py b/samples/python/tutorial_code/imgProc/Pyramids/pyramids.py index 127345a..387e26c 100644 --- a/samples/python/tutorial_code/imgProc/Pyramids/pyramids.py +++ b/samples/python/tutorial_code/imgProc/Pyramids/pyramids.py @@ -1,5 +1,5 @@ import sys -import cv2 +import cv2 as cv def main(argv): @@ -14,7 +14,7 @@ def main(argv): filename = argv[0] if len(argv) > 0 else "../data/chicky_512.png" # Load the image - src = cv2.imread(filename) + src = cv.imread(filename) # Check if image is loaded fine if src is None: @@ -26,25 +26,25 @@ def main(argv): while 1: rows, cols, _channels = map(int, src.shape) ## [show_image] - cv2.imshow('Pyramids Demo', src) + cv.imshow('Pyramids Demo', src) ## [show_image] - k = cv2.waitKey(0) + k = cv.waitKey(0) if k == 27: break ## [pyrup] elif chr(k) == 'i': - src = cv2.pyrUp(src, dstsize=(2 * cols, 2 * rows)) + src = cv.pyrUp(src, dstsize=(2 * cols, 2 * rows)) print ('** Zoom In: Image x 2') ## [pyrup] ## [pyrdown] elif chr(k) == 'o': - src = cv2.pyrDown(src, dstsize=(cols // 2, rows // 2)) + src = cv.pyrDown(src, dstsize=(cols // 2, rows // 2)) print ('** Zoom Out: Image / 2') ## [pyrdown] ## [loop] - cv2.destroyAllWindows() + cv.destroyAllWindows() return 0 if __name__ == "__main__": diff --git a/samples/python/tutorial_code/imgProc/Smoothing/smoothing.py b/samples/python/tutorial_code/imgProc/Smoothing/smoothing.py index c20ca87..205ee6d 100644 --- a/samples/python/tutorial_code/imgProc/Smoothing/smoothing.py +++ b/samples/python/tutorial_code/imgProc/Smoothing/smoothing.py @@ -1,5 +1,5 @@ import sys -import cv2 +import cv2 as cv import numpy as np # Global Variables @@ -14,13 +14,13 @@ window_name = 'Smoothing Demo' def main(argv): - cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE) + cv.namedWindow(window_name, cv.WINDOW_AUTOSIZE) # Load the source image imageName = argv[0] if len(argv) > 0 else "../data/lena.jpg" global src - src = cv2.imread(imageName, 1) + src = cv.imread(imageName, 1) if src is None: print ('Error opening image') print ('Usage: smoothing.py [image_name -- default ../data/lena.jpg] \n') @@ -40,7 +40,7 @@ def main(argv): ## [blur] for i in range(1, MAX_KERNEL_LENGTH, 2): - dst = cv2.blur(src, (i, i)) + dst = cv.blur(src, (i, i)) if display_dst(DELAY_BLUR) != 0: return 0 ## [blur] @@ -51,7 +51,7 @@ def main(argv): ## [gaussianblur] for i in range(1, MAX_KERNEL_LENGTH, 2): - dst = cv2.GaussianBlur(src, (i, i), 0) + dst = cv.GaussianBlur(src, (i, i), 0) if display_dst(DELAY_BLUR) != 0: return 0 ## [gaussianblur] @@ -62,7 +62,7 @@ def main(argv): ## [medianblur] for i in range(1, MAX_KERNEL_LENGTH, 2): - dst = cv2.medianBlur(src, i) + dst = cv.medianBlur(src, i) if display_dst(DELAY_BLUR) != 0: return 0 ## [medianblur] @@ -74,7 +74,7 @@ def main(argv): ## [bilateralfilter] # Remember, bilateral is a bit slow, so as value go higher, it takes long time for i in range(1, MAX_KERNEL_LENGTH, 2): - dst = cv2.bilateralFilter(src, i, i * 2, i / 2) + dst = cv.bilateralFilter(src, i, i * 2, i / 2) if display_dst(DELAY_BLUR) != 0: return 0 ## [bilateralfilter] @@ -89,16 +89,16 @@ def display_caption(caption): global dst dst = np.zeros(src.shape, src.dtype) rows, cols, ch = src.shape - cv2.putText(dst, caption, + cv.putText(dst, caption, (int(cols / 4), int(rows / 2)), - cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255)) + cv.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255)) return display_dst(DELAY_CAPTION) def display_dst(delay): - cv2.imshow(window_name, dst) - c = cv2.waitKey(delay) + cv.imshow(window_name, dst) + c = cv.waitKey(delay) if c >= 0 : return -1 return 0 diff --git a/samples/python/tutorial_code/imgProc/hough_line_transform/hough_line_transform.py b/samples/python/tutorial_code/imgProc/hough_line_transform/hough_line_transform.py index 0bcf6c5..6af4e28 100644 --- a/samples/python/tutorial_code/imgProc/hough_line_transform/hough_line_transform.py +++ b/samples/python/tutorial_code/imgProc/hough_line_transform/hough_line_transform.py @@ -1,11 +1,11 @@ -import cv2 +import cv2 as cv import numpy as np -img = cv2.imread('../data/sudoku.png') -gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) -edges = cv2.Canny(gray,50,150,apertureSize = 3) +img = cv.imread('../data/sudoku.png') +gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) +edges = cv.Canny(gray,50,150,apertureSize = 3) -lines = cv2.HoughLines(edges,1,np.pi/180,200) +lines = cv.HoughLines(edges,1,np.pi/180,200) for line in lines: rho,theta = line[0] a = np.cos(theta) @@ -17,6 +17,6 @@ for line in lines: x2 = int(x0 - 1000*(-b)) y2 = int(y0 - 1000*(a)) - cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2) + cv.line(img,(x1,y1),(x2,y2),(0,0,255),2) -cv2.imwrite('houghlines3.jpg',img) +cv.imwrite('houghlines3.jpg',img) diff --git a/samples/python/tutorial_code/imgProc/hough_line_transform/probabilistic_hough_line_transform.py b/samples/python/tutorial_code/imgProc/hough_line_transform/probabilistic_hough_line_transform.py index 2d000a1..7e510db 100644 --- a/samples/python/tutorial_code/imgProc/hough_line_transform/probabilistic_hough_line_transform.py +++ b/samples/python/tutorial_code/imgProc/hough_line_transform/probabilistic_hough_line_transform.py @@ -1,12 +1,12 @@ -import cv2 +import cv2 as cv import numpy as np -img = cv2.imread('../data/sudoku.png') -gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) -edges = cv2.Canny(gray,50,150,apertureSize = 3) -lines = cv2.HoughLinesP(edges,1,np.pi/180,100,minLineLength=100,maxLineGap=10) +img = cv.imread('../data/sudoku.png') +gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) +edges = cv.Canny(gray,50,150,apertureSize = 3) +lines = cv.HoughLinesP(edges,1,np.pi/180,100,minLineLength=100,maxLineGap=10) for line in lines: x1,y1,x2,y2 = line[0] - cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2) + cv.line(img,(x1,y1),(x2,y2),(0,255,0),2) -cv2.imwrite('houghlines5.jpg',img) +cv.imwrite('houghlines5.jpg',img) diff --git a/samples/python/tutorial_code/imgProc/match_template/match_template.py b/samples/python/tutorial_code/imgProc/match_template/match_template.py index b90b83a..bd288d3 100644 --- a/samples/python/tutorial_code/imgProc/match_template/match_template.py +++ b/samples/python/tutorial_code/imgProc/match_template/match_template.py @@ -1,5 +1,5 @@ import sys -import cv2 +import cv2 as cv ## [global_variables] use_mask = False @@ -23,14 +23,14 @@ def main(argv): ## [load_image] global img global templ - img = cv2.imread(sys.argv[1], cv2.IMREAD_COLOR) - templ = cv2.imread(sys.argv[2], cv2.IMREAD_COLOR) + img = cv.imread(sys.argv[1], cv.IMREAD_COLOR) + templ = cv.imread(sys.argv[2], cv.IMREAD_COLOR) if (len(sys.argv) > 3): global use_mask use_mask = True global mask - mask = cv2.imread( sys.argv[3], cv2.IMREAD_COLOR ) + mask = cv.imread( sys.argv[3], cv.IMREAD_COLOR ) if ((img is None) or (templ is None) or (use_mask and (mask is None))): print 'Can\'t read one of the images' @@ -38,19 +38,19 @@ def main(argv): ## [load_image] ## [create_windows] - cv2.namedWindow( image_window, cv2.WINDOW_AUTOSIZE ) - cv2.namedWindow( result_window, cv2.WINDOW_AUTOSIZE ) + cv.namedWindow( image_window, cv.WINDOW_AUTOSIZE ) + cv.namedWindow( result_window, cv.WINDOW_AUTOSIZE ) ## [create_windows] ## [create_trackbar] trackbar_label = 'Method: \n 0: SQDIFF \n 1: SQDIFF NORMED \n 2: TM CCORR \n 3: TM CCORR NORMED \n 4: TM COEFF \n 5: TM COEFF NORMED' - cv2.createTrackbar( trackbar_label, image_window, match_method, max_Trackbar, MatchingMethod ) + cv.createTrackbar( trackbar_label, image_window, match_method, max_Trackbar, MatchingMethod ) ## [create_trackbar] MatchingMethod(match_method) ## [wait_key] - cv2.waitKey(0) + cv.waitKey(0) return 0 ## [wait_key] @@ -63,32 +63,32 @@ def MatchingMethod(param): img_display = img.copy() ## [copy_source] ## [match_template] - method_accepts_mask = (cv2.TM_SQDIFF == match_method or match_method == cv2.TM_CCORR_NORMED) + method_accepts_mask = (cv.TM_SQDIFF == match_method or match_method == cv.TM_CCORR_NORMED) if (use_mask and method_accepts_mask): - result = cv2.matchTemplate(img, templ, match_method, None, mask) + result = cv.matchTemplate(img, templ, match_method, None, mask) else: - result = cv2.matchTemplate(img, templ, match_method) + result = cv.matchTemplate(img, templ, match_method) ## [match_template] ## [normalize] - cv2.normalize( result, result, 0, 1, cv2.NORM_MINMAX, -1 ) + cv.normalize( result, result, 0, 1, cv.NORM_MINMAX, -1 ) ## [normalize] ## [best_match] - _minVal, _maxVal, minLoc, maxLoc = cv2.minMaxLoc(result, None) + _minVal, _maxVal, minLoc, maxLoc = cv.minMaxLoc(result, None) ## [best_match] ## [match_loc] - if (match_method == cv2.TM_SQDIFF or match_method == cv2.TM_SQDIFF_NORMED): + if (match_method == cv.TM_SQDIFF or match_method == cv.TM_SQDIFF_NORMED): matchLoc = minLoc else: matchLoc = maxLoc ## [match_loc] ## [imshow] - cv2.rectangle(img_display, matchLoc, (matchLoc[0] + templ.shape[0], matchLoc[1] + templ.shape[1]), (0,0,0), 2, 8, 0 ) - cv2.rectangle(result, matchLoc, (matchLoc[0] + templ.shape[0], matchLoc[1] + templ.shape[1]), (0,0,0), 2, 8, 0 ) - cv2.imshow(image_window, img_display) - cv2.imshow(result_window, result) + cv.rectangle(img_display, matchLoc, (matchLoc[0] + templ.shape[0], matchLoc[1] + templ.shape[1]), (0,0,0), 2, 8, 0 ) + cv.rectangle(result, matchLoc, (matchLoc[0] + templ.shape[0], matchLoc[1] + templ.shape[1]), (0,0,0), 2, 8, 0 ) + cv.imshow(image_window, img_display) + cv.imshow(result_window, result) ## [imshow] pass diff --git a/samples/python/tutorial_code/imgProc/morph_lines_detection/morph_lines_detection.py b/samples/python/tutorial_code/imgProc/morph_lines_detection/morph_lines_detection.py index 24965e5..4e9c0d4 100644 --- a/samples/python/tutorial_code/imgProc/morph_lines_detection/morph_lines_detection.py +++ b/samples/python/tutorial_code/imgProc/morph_lines_detection/morph_lines_detection.py @@ -4,14 +4,14 @@ """ import numpy as np import sys -import cv2 +import cv2 as cv def show_wait_destroy(winname, img): - cv2.imshow(winname, img) - cv2.moveWindow(winname, 500, 0) - cv2.waitKey(0) - cv2.destroyWindow(winname) + cv.imshow(winname, img) + cv.moveWindow(winname, 500, 0) + cv.waitKey(0) + cv.destroyWindow(winname) def main(argv): @@ -23,7 +23,7 @@ def main(argv): return -1 # Load the image - src = cv2.imread(argv[0], cv2.IMREAD_COLOR) + src = cv.imread(argv[0], cv.IMREAD_COLOR) # Check if image is loaded fine if src is None: @@ -31,13 +31,13 @@ def main(argv): return -1 # Show source image - cv2.imshow("src", src) + cv.imshow("src", src) # [load_image] # [gray] # Transform source image to gray if it is not already if len(src.shape) != 2: - gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) + gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) else: gray = src @@ -47,9 +47,9 @@ def main(argv): # [bin] # Apply adaptiveThreshold at the bitwise_not of gray, notice the ~ symbol - gray = cv2.bitwise_not(gray) - bw = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, \ - cv2.THRESH_BINARY, 15, -2) + gray = cv.bitwise_not(gray) + bw = cv.adaptiveThreshold(gray, 255, cv.ADAPTIVE_THRESH_MEAN_C, \ + cv.THRESH_BINARY, 15, -2) # Show binary image show_wait_destroy("binary", bw) # [bin] @@ -66,11 +66,11 @@ def main(argv): horizontal_size = cols / 30 # Create structure element for extracting horizontal lines through morphology operations - horizontalStructure = cv2.getStructuringElement(cv2.MORPH_RECT, (horizontal_size, 1)) + horizontalStructure = cv.getStructuringElement(cv.MORPH_RECT, (horizontal_size, 1)) # Apply morphology operations - horizontal = cv2.erode(horizontal, horizontalStructure) - horizontal = cv2.dilate(horizontal, horizontalStructure) + horizontal = cv.erode(horizontal, horizontalStructure) + horizontal = cv.dilate(horizontal, horizontalStructure) # Show extracted horizontal lines show_wait_destroy("horizontal", horizontal) @@ -82,11 +82,11 @@ def main(argv): verticalsize = rows / 30 # Create structure element for extracting vertical lines through morphology operations - verticalStructure = cv2.getStructuringElement(cv2.MORPH_RECT, (1, verticalsize)) + verticalStructure = cv.getStructuringElement(cv.MORPH_RECT, (1, verticalsize)) # Apply morphology operations - vertical = cv2.erode(vertical, verticalStructure) - vertical = cv2.dilate(vertical, verticalStructure) + vertical = cv.erode(vertical, verticalStructure) + vertical = cv.dilate(vertical, verticalStructure) # Show extracted vertical lines show_wait_destroy("vertical", vertical) @@ -94,7 +94,7 @@ def main(argv): # [smooth] # Inverse vertical image - vertical = cv2.bitwise_not(vertical) + vertical = cv.bitwise_not(vertical) show_wait_destroy("vertical_bit", vertical) ''' @@ -107,20 +107,20 @@ def main(argv): ''' # Step 1 - edges = cv2.adaptiveThreshold(vertical, 255, cv2.ADAPTIVE_THRESH_MEAN_C, \ - cv2.THRESH_BINARY, 3, -2) + edges = cv.adaptiveThreshold(vertical, 255, cv.ADAPTIVE_THRESH_MEAN_C, \ + cv.THRESH_BINARY, 3, -2) show_wait_destroy("edges", edges) # Step 2 kernel = np.ones((2, 2), np.uint8) - edges = cv2.dilate(edges, kernel) + edges = cv.dilate(edges, kernel) show_wait_destroy("dilate", edges) # Step 3 smooth = np.copy(vertical) # Step 4 - smooth = cv2.blur(smooth, (2, 2)) + smooth = cv.blur(smooth, (2, 2)) # Step 5 (rows, cols) = np.where(edges != 0) diff --git a/samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py b/samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py index 00586c8..4d089c2 100644 --- a/samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py +++ b/samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py @@ -1,28 +1,28 @@ -import cv2 +import cv2 as cv import numpy as np SZ=20 bin_n = 16 # Number of bins -affine_flags = cv2.WARP_INVERSE_MAP|cv2.INTER_LINEAR +affine_flags = cv.WARP_INVERSE_MAP|cv.INTER_LINEAR ## [deskew] def deskew(img): - m = cv2.moments(img) + m = cv.moments(img) if abs(m['mu02']) < 1e-2: return img.copy() skew = m['mu11']/m['mu02'] M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]]) - img = cv2.warpAffine(img,M,(SZ, SZ),flags=affine_flags) + img = cv.warpAffine(img,M,(SZ, SZ),flags=affine_flags) return img ## [deskew] ## [hog] def hog(img): - gx = cv2.Sobel(img, cv2.CV_32F, 1, 0) - gy = cv2.Sobel(img, cv2.CV_32F, 0, 1) - mag, ang = cv2.cartToPolar(gx, gy) + gx = cv.Sobel(img, cv.CV_32F, 1, 0) + gy = cv.Sobel(img, cv.CV_32F, 0, 1) + mag, ang = cv.cartToPolar(gx, gy) bins = np.int32(bin_n*ang/(2*np.pi)) # quantizing binvalues in (0...16) bin_cells = bins[:10,:10], bins[10:,:10], bins[:10,10:], bins[10:,10:] mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:] @@ -31,7 +31,7 @@ def hog(img): return hist ## [hog] -img = cv2.imread('digits.png',0) +img = cv.imread('digits.png',0) if img is None: raise Exception("we need the digits.png image from samples/data here !") @@ -49,13 +49,13 @@ hogdata = [map(hog,row) for row in deskewed] trainData = np.float32(hogdata).reshape(-1,64) responses = np.repeat(np.arange(10),250)[:,np.newaxis] -svm = cv2.ml.SVM_create() -svm.setKernel(cv2.ml.SVM_LINEAR) -svm.setType(cv2.ml.SVM_C_SVC) +svm = cv.ml.SVM_create() +svm.setKernel(cv.ml.SVM_LINEAR) +svm.setType(cv.ml.SVM_C_SVC) svm.setC(2.67) svm.setGamma(5.383) -svm.train(trainData, cv2.ml.ROW_SAMPLE, responses) +svm.train(trainData, cv.ml.ROW_SAMPLE, responses) svm.save('svm_data.dat') ###### Now testing ######################## diff --git a/samples/python/video.py b/samples/python/video.py index be15641..42a1d77 100755 --- a/samples/python/video.py +++ b/samples/python/video.py @@ -35,7 +35,7 @@ from __future__ import print_function import numpy as np from numpy import pi, sin, cos -import cv2 +import cv2 as cv # built-in modules from time import clock @@ -49,14 +49,14 @@ class VideoSynthBase(object): self.bg = None self.frame_size = (640, 480) if bg is not None: - self.bg = cv2.imread(bg, 1) + self.bg = cv.imread(bg, 1) h, w = self.bg.shape[:2] self.frame_size = (w, h) if size is not None: w, h = map(int, size.split('x')) self.frame_size = (w, h) - self.bg = cv2.resize(self.bg, self.frame_size) + self.bg = cv.resize(self.bg, self.frame_size) self.noise = float(noise) @@ -75,8 +75,8 @@ class VideoSynthBase(object): if self.noise > 0.0: noise = np.zeros((h, w, 3), np.int8) - cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise) - buf = cv2.add(buf, noise, dtype=cv2.CV_8UC3) + cv.randn(noise, np.zeros(3), np.ones(3)*255*self.noise) + buf = cv.add(buf, noise, dtype=cv.CV_8UC3) return True, buf def isOpened(self): @@ -85,26 +85,26 @@ class VideoSynthBase(object): class Book(VideoSynthBase): def __init__(self, **kw): super(Book, self).__init__(**kw) - backGr = cv2.imread('../data/graf1.png') - fgr = cv2.imread('../data/box.png') + backGr = cv.imread('../data/graf1.png') + fgr = cv.imread('../data/box.png') self.render = TestSceneRender(backGr, fgr, speed = 1) def read(self, dst=None): noise = np.zeros(self.render.sceneBg.shape, np.int8) - cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise) + cv.randn(noise, np.zeros(3), np.ones(3)*255*self.noise) - return True, cv2.add(self.render.getNextFrame(), noise, dtype=cv2.CV_8UC3) + return True, cv.add(self.render.getNextFrame(), noise, dtype=cv.CV_8UC3) class Cube(VideoSynthBase): def __init__(self, **kw): super(Cube, self).__init__(**kw) - self.render = TestSceneRender(cv2.imread('../data/pca_test1.jpg'), deformation = True, speed = 1) + self.render = TestSceneRender(cv.imread('../data/pca_test1.jpg'), deformation = True, speed = 1) def read(self, dst=None): noise = np.zeros(self.render.sceneBg.shape, np.int8) - cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise) + cv.randn(noise, np.zeros(3), np.ones(3)*255*self.noise) - return True, cv2.add(self.render.getNextFrame(), noise, dtype=cv2.CV_8UC3) + return True, cv.add(self.render.getNextFrame(), noise, dtype=cv.CV_8UC3) class Chess(VideoSynthBase): def __init__(self, **kw): @@ -130,10 +130,10 @@ class Chess(VideoSynthBase): self.t = 0 def draw_quads(self, img, quads, color = (0, 255, 0)): - img_quads = cv2.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef) [0] + img_quads = cv.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef) [0] img_quads.shape = quads.shape[:2] + (2,) for q in img_quads: - cv2.fillConvexPoly(img, np.int32(q*4), color, cv2.LINE_AA, shift=2) + cv.fillConvexPoly(img, np.int32(q*4), color, cv.LINE_AA, shift=2) def render(self, dst): t = self.t @@ -186,11 +186,11 @@ def create_capture(source = 0, fallback = presets['chess']): try: cap = Class(**params) except: pass else: - cap = cv2.VideoCapture(source) + cap = cv.VideoCapture(source) if 'size' in params: w, h = map(int, params['size'].split('x')) - cap.set(cv2.CAP_PROP_FRAME_WIDTH, w) - cap.set(cv2.CAP_PROP_FRAME_HEIGHT, h) + cap.set(cv.CAP_PROP_FRAME_WIDTH, w) + cap.set(cv.CAP_PROP_FRAME_HEIGHT, h) if cap is None or not cap.isOpened(): print('Warning: unable to open video source: ', source) if fallback is not None: @@ -216,14 +216,14 @@ if __name__ == '__main__': for i, cap in enumerate(caps): ret, img = cap.read() imgs.append(img) - cv2.imshow('capture %d' % i, img) - ch = cv2.waitKey(1) + cv.imshow('capture %d' % i, img) + ch = cv.waitKey(1) if ch == 27: break if ch == ord(' '): for i, img in enumerate(imgs): fn = '%s/shot_%d_%03d.bmp' % (shotdir, i, shot_idx) - cv2.imwrite(fn, img) + cv.imwrite(fn, img) print(fn, 'saved') shot_idx += 1 - cv2.destroyAllWindows() + cv.destroyAllWindows() diff --git a/samples/python/video_threaded.py b/samples/python/video_threaded.py index 896a5c3..c95a5ba 100755 --- a/samples/python/video_threaded.py +++ b/samples/python/video_threaded.py @@ -19,7 +19,7 @@ Keyboard shortcuts: from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv from multiprocessing.pool import ThreadPool from collections import deque @@ -50,11 +50,11 @@ if __name__ == '__main__': def process_frame(frame, t0): # some intensive computation... - frame = cv2.medianBlur(frame, 19) - frame = cv2.medianBlur(frame, 19) + frame = cv.medianBlur(frame, 19) + frame = cv.medianBlur(frame, 19) return frame, t0 - threadn = cv2.getNumberOfCPUs() + threadn = cv.getNumberOfCPUs() pool = ThreadPool(processes = threadn) pending = deque() @@ -70,7 +70,7 @@ if __name__ == '__main__': draw_str(res, (20, 20), "threaded : " + str(threaded_mode)) draw_str(res, (20, 40), "latency : %.1f ms" % (latency.value*1000)) draw_str(res, (20, 60), "frame interval : %.1f ms" % (frame_interval.value*1000)) - cv2.imshow('threaded video', res) + cv.imshow('threaded video', res) if len(pending) < threadn: ret, frame = cap.read() t = clock() @@ -81,9 +81,9 @@ if __name__ == '__main__': else: task = DummyTask(process_frame(frame, t)) pending.append(task) - ch = cv2.waitKey(1) + ch = cv.waitKey(1) if ch == ord(' '): threaded_mode = not threaded_mode if ch == 27: break -cv2.destroyAllWindows() +cv.destroyAllWindows() diff --git a/samples/python/video_v4l2.py b/samples/python/video_v4l2.py index 0af68e5..d506833 100644 --- a/samples/python/video_v4l2.py +++ b/samples/python/video_v4l2.py @@ -17,51 +17,51 @@ Keys: # Python 2/3 compatibility from __future__ import print_function -import cv2 +import cv2 as cv def decode_fourcc(v): v = int(v) return "".join([chr((v >> 8 * i) & 0xFF) for i in range(4)]) -font = cv2.FONT_HERSHEY_SIMPLEX +font = cv.FONT_HERSHEY_SIMPLEX color = (0, 255, 0) -cap = cv2.VideoCapture(0) -cap.set(cv2.CAP_PROP_AUTOFOCUS, False) # Known bug: https://github.com/opencv/opencv/pull/5474 +cap = cv.VideoCapture(0) +cap.set(cv.CAP_PROP_AUTOFOCUS, False) # Known bug: https://github.com/opencv/opencv/pull/5474 -cv2.namedWindow("Video") +cv.namedWindow("Video") convert_rgb = True -fps = int(cap.get(cv2.CAP_PROP_FPS)) -focus = int(min(cap.get(cv2.CAP_PROP_FOCUS) * 100, 2**31-1)) # ceil focus to C_LONG as Python3 int can go to +inf +fps = int(cap.get(cv.CAP_PROP_FPS)) +focus = int(min(cap.get(cv.CAP_PROP_FOCUS) * 100, 2**31-1)) # ceil focus to C_LONG as Python3 int can go to +inf -cv2.createTrackbar("FPS", "Video", fps, 30, lambda v: cap.set(cv2.CAP_PROP_FPS, v)) -cv2.createTrackbar("Focus", "Video", focus, 100, lambda v: cap.set(cv2.CAP_PROP_FOCUS, v / 100)) +cv.createTrackbar("FPS", "Video", fps, 30, lambda v: cap.set(cv.CAP_PROP_FPS, v)) +cv.createTrackbar("Focus", "Video", focus, 100, lambda v: cap.set(cv.CAP_PROP_FOCUS, v / 100)) while True: status, img = cap.read() - fourcc = decode_fourcc(cap.get(cv2.CAP_PROP_FOURCC)) + fourcc = decode_fourcc(cap.get(cv.CAP_PROP_FOURCC)) - fps = cap.get(cv2.CAP_PROP_FPS) + fps = cap.get(cv.CAP_PROP_FPS) - if not bool(cap.get(cv2.CAP_PROP_CONVERT_RGB)): + if not bool(cap.get(cv.CAP_PROP_CONVERT_RGB)): if fourcc == "MJPG": - img = cv2.imdecode(img, cv2.IMREAD_GRAYSCALE) + img = cv.imdecode(img, cv.IMREAD_GRAYSCALE) elif fourcc == "YUYV": - img = cv2.cvtColor(img, cv2.COLOR_YUV2GRAY_YUYV) + img = cv.cvtColor(img, cv.COLOR_YUV2GRAY_YUYV) else: print("unsupported format") break - cv2.putText(img, "Mode: {}".format(fourcc), (15, 40), font, 1.0, color) - cv2.putText(img, "FPS: {}".format(fps), (15, 80), font, 1.0, color) - cv2.imshow("Video", img) + cv.putText(img, "Mode: {}".format(fourcc), (15, 40), font, 1.0, color) + cv.putText(img, "FPS: {}".format(fps), (15, 80), font, 1.0, color) + cv.imshow("Video", img) - k = cv2.waitKey(1) + k = cv.waitKey(1) if k == 27: break elif k == ord('g'): convert_rgb = not convert_rgb - cap.set(cv2.CAP_PROP_CONVERT_RGB, convert_rgb) + cap.set(cv.CAP_PROP_CONVERT_RGB, convert_rgb) diff --git a/samples/python/watershed.py b/samples/python/watershed.py index 30be82c..9ca254e 100755 --- a/samples/python/watershed.py +++ b/samples/python/watershed.py @@ -26,12 +26,12 @@ Keys from __future__ import print_function import numpy as np -import cv2 +import cv2 as cv from common import Sketcher class App: def __init__(self, fn): - self.img = cv2.imread(fn) + self.img = cv.imread(fn) if self.img is None: raise Exception('Failed to load image file: %s' % fn) @@ -49,14 +49,14 @@ class App: def watershed(self): m = self.markers.copy() - cv2.watershed(self.img, m) + cv.watershed(self.img, m) overlay = self.colors[np.maximum(m, 0)] - vis = cv2.addWeighted(self.img, 0.5, overlay, 0.5, 0.0, dtype=cv2.CV_8UC3) - cv2.imshow('watershed', vis) + vis = cv.addWeighted(self.img, 0.5, overlay, 0.5, 0.0, dtype=cv.CV_8UC3) + cv.imshow('watershed', vis) def run(self): - while cv2.getWindowProperty('img', 0) != -1 or cv2.getWindowProperty('watershed', 0) != -1: - ch = cv2.waitKey(50) + while cv.getWindowProperty('img', 0) != -1 or cv.getWindowProperty('watershed', 0) != -1: + ch = cv.waitKey(50) if ch == 27: break if ch >= ord('1') and ch <= ord('7'): @@ -72,7 +72,7 @@ class App: self.markers[:] = 0 self.markers_vis[:] = self.img self.sketch.show() - cv2.destroyAllWindows() + cv.destroyAllWindows() if __name__ == '__main__': -- 2.7.4