.. ocv:function:: double calibrateCamera( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags=0 )
-.. ocv:pyfunction:: cv2.calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix, distCoeffs[, rvecs[, tvecs[, flags]]]) -> retval, cameraMatrix, distCoeffs, rvecs, tvecs
+.. ocv:pyfunction:: cv2.calibrateCamera(objectPoints, imagePoints, imageSize[, cameraMatrix[, distCoeffs[, rvecs[, tvecs[, flags]]]]]) -> retval, cameraMatrix, distCoeffs, rvecs, tvecs
.. ocv:cfunction:: double cvCalibrateCamera2( const CvMat* objectPoints, const CvMat* imagePoints, const CvMat* pointCounts, CvSize imageSize, CvMat* cameraMatrix, CvMat* distCoeffs, CvMat* rvecs=NULL, CvMat* tvecs=NULL, int flags=0 )
.. ocv:function:: double stereoCalibrate( InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, InputOutputArray cameraMatrix1, InputOutputArray distCoeffs1, InputOutputArray cameraMatrix2, InputOutputArray distCoeffs2, Size imageSize, OutputArray R, OutputArray T, OutputArray E, OutputArray F, TermCriteria term_crit = TermCriteria(TermCriteria::COUNT+ TermCriteria::EPS, 30, 1e-6), int flags=CALIB_FIX_INTRINSIC )
-.. ocv:pyfunction:: cv2.stereoCalibrate(objectPoints, imagePoints1, imagePoints2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize[, R[, T[, E[, F[, criteria[, flags]]]]]]) -> retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F
+.. ocv:pyfunction:: cv2.stereoCalibrate(objectPoints, imagePoints1, imagePoints2, imageSize[, cameraMatrix1[, distCoeffs1[, cameraMatrix2[, distCoeffs2[, R[, T[, E[, F[, criteria[, flags]]]]]]) -> retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F
.. ocv:cfunction:: double cvStereoCalibrate( const CvMat* objectPoints, const CvMat* imagePoints1, const CvMat* imagePoints2, const CvMat* pointCounts, CvMat* cameraMatrix1, CvMat* distCoeffs1, CvMat* cameraMatrix2, CvMat* distCoeffs2, CvSize imageSize, CvMat* R, CvMat* T, CvMat* E=0, CvMat* F=0, CvTermCriteria termCrit=cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 30, 1e-6), int flags=CV_CALIB_FIX_INTRINSIC )
.. ocv:pyoldfunction:: cv.StereoCalibrate( objectPoints, imagePoints1, imagePoints2, pointCounts, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T, E=None, F=None, termCrit=(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 30, 1e-6), flags=CV_CALIB_FIX_INTRINSIC)-> None
.. ocv:function:: void accumulate( InputArray src, InputOutputArray dst, InputArray mask=noArray() )
-.. ocv:pyfunction:: cv2.accumulate(src, dst[, mask]) -> dst
+.. ocv:pyfunction:: cv2.accumulate(src, dst[, mask]) -> None
.. ocv:cfunction:: void cvAcc( const CvArr* src, CvArr* dst, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.Acc(src, dst, mask=None)-> None
.. ocv:function:: void accumulateSquare( InputArray src, InputOutputArray dst, InputArray mask=noArray() )
-.. ocv:pyfunction:: cv2.accumulateSquare(src, dst[, mask]) -> dst
+.. ocv:pyfunction:: cv2.accumulateSquare(src, dst[, mask]) -> None
.. ocv:cfunction:: void cvSquareAcc( const CvArr* src, CvArr* dst, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.SquareAcc(src, dst, mask=None)-> None
.. ocv:function:: void accumulateProduct( InputArray src1, InputArray src2, InputOutputArray dst, InputArray mask=noArray() )
-.. ocv:pyfunction:: cv2.accumulateProduct(src1, src2, dst[, mask]) -> dst
+.. ocv:pyfunction:: cv2.accumulateProduct(src1, src2, dst[, mask]) -> None
.. ocv:cfunction:: void cvMultiplyAcc( const CvArr* src1, const CvArr* src2, CvArr* dst, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.MultiplyAcc(src1, src2, dst, mask=None)-> None
.. ocv:function:: void accumulateWeighted( InputArray src, InputOutputArray dst, double alpha, InputArray mask=noArray() )
-.. ocv:pyfunction:: cv2.accumulateWeighted(src, dst, alpha[, mask]) -> dst
+.. ocv:pyfunction:: cv2.accumulateWeighted(src, dst, alpha[, mask]) -> None
.. ocv:cfunction:: void cvRunningAvg( const CvArr* src, CvArr* dst, double alpha, const CvArr* mask=NULL )
.. ocv:pyoldfunction:: cv.RunningAvg(src, dst, alpha, mask=None)-> None
int sdepth=-1 );
//! adds image to the accumulator (dst += src). Unlike cv::add, dst and src can have different types.
-CV_EXPORTS_W void accumulate( InputArray src, CV_OUT InputOutputArray dst,
+CV_EXPORTS_W void accumulate( InputArray src, InputOutputArray dst,
InputArray mask=noArray() );
//! adds squared src image to the accumulator (dst += src*src).
-CV_EXPORTS_W void accumulateSquare( InputArray src, CV_OUT InputOutputArray dst,
+CV_EXPORTS_W void accumulateSquare( InputArray src, InputOutputArray dst,
InputArray mask=noArray() );
//! adds product of the 2 images to the accumulator (dst += src1*src2).
CV_EXPORTS_W void accumulateProduct( InputArray src1, InputArray src2,
- CV_OUT InputOutputArray dst, InputArray mask=noArray() );
+ InputOutputArray dst, InputArray mask=noArray() );
//! updates the running average (dst = dst*(1-alpha) + src*alpha)
-CV_EXPORTS_W void accumulateWeighted( InputArray src, CV_OUT InputOutputArray dst,
+CV_EXPORTS_W void accumulateWeighted( InputArray src, InputOutputArray dst,
double alpha, InputArray mask=noArray() );
//! type of the threshold operation
continue
if a.returnarg:
outlist.append((a.name, argno))
- if not a.inputarg:
+ if not a.inputarg or a.returnarg:
if a.isbig():
outarr_list.append((a.name, argno))
continue
:param criteria: Parameter specifying the termination criteria of the iterative search algorithm (after the specified maximum number of iterations ``criteria.maxCount`` or when the search window moves by less than ``criteria.epsilon`` .
- :param derivLambda: Relative weight of the spatial image derivatives impact to the optical flow estimation. If ``derivLambda=0`` , only the image intensity is used. If ``derivLambda=1`` , only derivatives are used. Any other values between 0 and 1 mean that both derivatives and the image intensity are used (in the corresponding proportions).
+ :param derivLambda: Not used.
:param flags: Operation flags:
import numpy as np\r
-import cv2, cv\r
+import cv2\r
+import cv2.cv as cv\r
import os\r
from common import splitfn\r
\r
\r
print 'ok'\r
\r
- camera_matrix = np.zeros((3, 3))\r
- dist_coefs = np.zeros(4)\r
- img_n = len(img_points)\r
- rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, (w, h), camera_matrix, dist_coefs)\r
+ rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, (w, h))\r
print "RMS:", rms\r
print "camera matrix:\n", camera_matrix\r
print "distortion coefficients: ", dist_coefs.ravel()\r
'''\r
\r
import numpy as np\r
-import cv2, cv\r
+import cv2\r
+import cv2.cv as cv\r
\r
\r
def coherence_filter(img, sigma = 11, str_sigma = 11, blend = 0.5, iter_n = 4):\r
import numpy as np\r
-import cv2, cv\r
+import cv2\r
+import cv2.cv as cv\r
from time import clock\r
import sys\r
\r
import numpy as np\r
-import cv2, cv\r
+import cv2\r
+import cv2.cv as cv\r
import os\r
\r
image_extensions = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.pbm', '.pgm', '.ppm']\r
import numpy as np\r
-import cv2, cv\r
+import cv2\r
+import cv2.cv as cv\r
from common import make_cmap\r
\r
help_message = '''USAGE: distrans.py [<image>]\r
-import cv2, cv\r
+import cv2\r
+import cv2.cv as cv\r
import video\r
import sys\r
\r
import numpy as np\r
-import cv2, cv\r
+import cv2\r
+import cv2.cv as cv\r
from video import create_capture\r
from common import clock, draw_str\r
\r
import numpy as np\r
-import cv2, cv\r
+import cv2\r
+import cv2.cv as cv\r
from common import anorm\r
\r
help_message = '''SURF image match \r
import numpy as np\r
-import cv2, cv\r
+import cv2\r
+import cv2.cv as cv\r
\r
help_message = '''USAGE: floodfill.py [<image>]\r
\r
import numpy as np\r
-import cv2, cv\r
+import cv2\r
+import cv2.cv as cv\r
import video\r
from common import anorm2, draw_str\r
from time import clock\r
\r
\r
\r
-lk_params = dict( winSize = (3, 3), \r
+lk_params = dict( winSize = (21, 21), \r
maxLevel = 2, \r
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03),\r
derivLambda = 0.0 ) \r
import numpy as np\r
-import cv2, cv\r
+import cv2\r
+import cv2.cv as cv\r
import video\r
from common import nothing, clock, draw_str\r
\r
import numpy as np\r
-import cv2, cv\r
+import cv2\r
+import cv2.cv as cv\r
\r
def detect(img, cascade):\r
rects = cascade.detectMultiScale(img, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))\r
import numpy as np\r
import math\r
-import cv2, cv\r
+import cv2\r
+import cv2.cv as cv\r
import video\r
\r
help_message = '''\r
'''\r
\r
import numpy as np\r
-import cv2, cv\r
+import cv2\r
+import cv2.cv as cv\r
\r
ply_header = '''ply\r
format ascii 1.0\r
'''\r
\r
import numpy as np\r
-import cv2, cv\r
+import cv2\r
+import cv2.cv as cv\r
from common import draw_str\r
import getopt, sys\r
from itertools import count\r
import numpy as np\r
-import cv2, cv\r
+import cv2\r
+import cv2.cv as cv\r
from common import Sketcher\r
\r
help_message = '''\r