From 20aca7440faad3d02fe28ff2036cf10dfc6bc4bf Mon Sep 17 00:00:00 2001 From: Vadim Pisarevsky Date: Thu, 9 Jun 2011 01:16:45 +0000 Subject: [PATCH] a lot of small corrections to bring down the number of undocumented functions, reported by the script; added em.cpp sample --- doc/check_docs.py | 23 ++- doc/check_docs_whitelist.txt | 55 ++++++ doc/conf.py | 2 +- .../camera_calibration_and_3d_reconstruction.rst | 60 ++++--- .../utility_and_system_functions_and_macros.rst | 2 +- .../camera_calibration_and_3d_reconstruction.rst | 6 +- .../gpu/doc/feature_detection_and_description.rst | 2 +- modules/gpu/doc/image_filtering.rst | 70 ++++---- modules/gpu/doc/image_processing.rst | 24 +-- modules/gpu/doc/matrix_reductions.rst | 12 +- modules/gpu/doc/object_detection.rst | 2 +- modules/gpu/doc/operations_on_matrices.rst | 18 +- modules/gpu/doc/per_element_operations.rst | 22 +-- modules/imgproc/doc/filtering.rst | 2 +- .../structural_analysis_and_shape_descriptors.rst | 66 +------- .../imgproc/include/opencv2/imgproc/imgproc.hpp | 2 +- modules/imgproc/src/histogram.cpp | 5 +- modules/ml/doc/boosting.rst | 26 +-- modules/ml/doc/decision_trees.rst | 66 ++++---- modules/ml/doc/expectation_maximization.rst | 147 +++------------- modules/ml/doc/k_nearest_neighbors.rst | 20 ++- modules/ml/doc/neural_networks.rst | 38 ++--- modules/ml/doc/normal_bayes_classifier.rst | 14 +- modules/ml/doc/random_trees.rst | 143 ++-------------- modules/ml/doc/statistical_models.rst | 18 +- modules/ml/doc/support_vector_machines.rst | 20 +-- modules/ml/src/em.cpp | 186 +-------------------- modules/objdetect/doc/cascade_classification.rst | 18 +- modules/python/src2/hdr_parser.py | 53 +++--- samples/cpp/em.cpp | 94 +++++++++++ 30 files changed, 472 insertions(+), 744 deletions(-) create mode 100644 samples/cpp/em.cpp diff --git a/doc/check_docs.py b/doc/check_docs.py index 5bbecf3..3a2bdb1 100644 --- a/doc/check_docs.py +++ b/doc/check_docs.py @@ -17,13 +17,13 @@ opencv_hdr_list = [ opencv_module_list = [ "core", -#"imgproc", -#"calib3d", -#"features2d", -#"video", -#"objdetect", -#"highgui", -#"ml" +"imgproc", +"calib3d", +"features2d", +"video", +"objdetect", +"highgui", +"ml" ] class RSTParser(object): @@ -49,6 +49,7 @@ class RSTParser(object): continue rst_decl = None if "(" in l: + l = l.replace("cv::", "") rst_decl = self.parser.parse_func_decl_no_wrap(l) fname = rst_decl[0] else: @@ -94,6 +95,9 @@ class RSTParser(object): fname = rst_decl[0] hdr_decls = self.fmap.get(fname, []) if not hdr_decls: + fname = fname.replace("cv.", "") + hdr_decls = self.fmap.get(fname, []) + if not hdr_decls: print "Documented function %s (%s) in %s:%d is not in the headers" % (fdecl, rst_decl[0].replace(".", "::"), docname, lineno) continue decl_idx = 0 @@ -103,7 +107,7 @@ class RSTParser(object): continue idx = 0 for a in hd[3]: - if a[0] != rst_decl[3][idx][0]: + if a[0] != rst_decl[3][idx][0] and a[0].replace("cv::", "") != rst_decl[3][idx][0]: break idx += 1 if idx == len(hd[3]): @@ -162,6 +166,9 @@ class RSTParser(object): for d in decls: dstr = self.decl2str(d) + # special hack for ML: skip old variants of the methods + if name == "ml" and ("CvMat" in dstr): + continue if dstr not in wlist_decls: misscount += 1 print "%s %s(%s)" % (d[1], d[0].replace(".", "::"), ", ".join([a[0] + " " + a[1] for a in d[3]])) diff --git a/doc/check_docs_whitelist.txt b/doc/check_docs_whitelist.txt index 9ddf9c3..0212cdc 100644 --- a/doc/check_docs_whitelist.txt +++ b/doc/check_docs_whitelist.txt @@ -14,6 +14,7 @@ # full declaration into the file # +######################################### core ##################################### cv::Mat::MSize cv::Mat::MStep cv::MatConstIterator @@ -22,5 +23,59 @@ cv::Algorithm cv::_InputArray cv::_OutputArray +######################################## imgproc ################################### +CvLSHOperations +cv::FilterEngine +cv::BaseFilter +cv::BaseRowFilter +cv::BaseColumnFilter +cv::Moments + +######################################## calib3d ################################### CvLevMarq +Mat cv::findFundamentalMat( InputArray points1, InputArray points2, OutputArray mask, int method=FM_RANSAC, double param1=3., double param2=0.99) +Mat findHomography( InputArray srcPoints, InputArray dstPoints, OutputArray mask, int method=0, double ransacReprojThreshold=3); + +########################################## ml ###################################### +CvBoostTree +CvForestTree +CvSVMKernel +CvSVMSolver +CvDTreeTrainData +CvERTreeTrainData +CvKNearest::CvKNearest +CvKNearest::clear +CvDTree::CvDTree +CvDTree::clear +CvDTree::read +CvDTree::write +CvEM::CvEM +CvEM::clear +CvEM::read +CvEM::write +CvSVM::CvSVM +CvSVM::clear +CvSVM::read +CvSVM::write +CvRTrees::CvRTrees +CvRTrees::clear +CvRTrees::read +CvRTrees::write +CvBoost::CvBoost +CvBoost::clear +CvBoost::read +CvBoost::write +CvGBTrees::CvGBTrees +CvGBTrees::clear +CvGBTrees::read +CvGBTrees::write +CvNormalBayesClassifier::CvNormalBayerClassifier +CvNormalBayesClassifier::clear +CvNormalBayesClassifier::read +CvNormalBayesClassifier::write +CvANN_MLP::CvANN_MLP +CvANN_MLP::clear +CvANN_MLP::read +CvANN_MLP::write +CvTrainTestSplit diff --git a/doc/conf.py b/doc/conf.py index fa17e7b..19fb600 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -258,7 +258,7 @@ latex_use_parts = False #latex_appendices = [] # If false, no module index is generated. -#latex_domain_indices = True +latex_domain_indices = True # -- Options for manual page output -------------------------------------------- diff --git a/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst b/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst index 6ae7c1a..9ba293b 100644 --- a/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst +++ b/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst @@ -345,9 +345,7 @@ The function converts 2D or 3D points from/to homogeneous coordinates by calling decomposeProjectionMatrix ----------------------------- -.. cpp:function:: void decomposeProjectionMatrix( InputArray projMatrix, OutputArray cameraMatrix, OutputArray rotMatrix, OutputArray transVect ) - -.. cpp:function:: void decomposeProjectionMatrix( InputArray projMatrix, OutputArray cameraMatrix, OutputArray rotMatrix, OutputArray transVect, OutputArray rotMatrixX, OutputArray rotMatrixY, OutputArray rotMatrixZ, Vec3d& eulerAngles ) +.. cpp:function:: void decomposeProjectionMatrix( InputArray projMatrix, OutputArray cameraMatrix, OutputArray rotMatrix, OutputArray transVect, OutputArray rotMatrixX=noArray(), OutputArray rotMatrixY=noArray(), OutputArray rotMatrixZ=noArray(), OutputArray eulerAngles=noArray() ) Decomposes a projection matrix into a rotation matrix and a camera matrix. @@ -365,7 +363,7 @@ decomposeProjectionMatrix :param rotMatrZ: Optional 3x3 rotation matrix around z-axis. - :param eulerAngles: Optional 3 points containing the three Euler angles of rotation. + :param eulerAngles: Optional 3-element vector containing the three Euler angles of rotation. The function computes a decomposition of a projection matrix into a calibration and a rotation matrix and the position of a camera. @@ -628,8 +626,6 @@ corresponding to the specified points. It can also be passed to .. index:: findHomography -.. _findHomography: - findHomography ------------------ .. cpp:function:: Mat findHomography( InputArray srcPoints, InputArray dstPoints, int method=0, double ransacReprojThreshold=3, OutputArray mask=noArray() ) @@ -707,9 +703,30 @@ See Also: :ref:`PerspectiveTransform` -.. index:: getOptimalNewCameraMatrix +.. index:: estimateAffine3D + +estimateAffine3D +-------------------- +.. cpp:function:: int estimateAffine3D(InputArray srcpt, InputArray dstpt, OutputArray out, OutputArray outliers, double ransacThreshold = 3.0, double confidence = 0.99) + + Computes an optimal affine transformation between two 3D point sets. + + :param srcpt: The first input 3D point set. + + :param dstpt: The second input 3D point set. + + :param out: Output 3D affine transformation matrix :math:`3 \times 4` . + + :param outliers: Output vector indicating which points are outliers. + + :param ransacThreshold: Maximum reprojection error in the RANSAC algorithm to consider a point as an inlier. -.. _getOptimalNewCameraMatrix: + :param confidence: The confidence level, between 0 and 1, that the estimated transformation will have. Anything between 0.95 and 0.99 is usually good enough. Too close to 1 values can slow down the estimation too much, lower than 0.8-0.9 confidence values can result in an incorrectly estimated transformation. + +The function estimates an optimal 3D affine transformation between two 3D point sets using the RANSAC algorithm. + + +.. index:: getOptimalNewCameraMatrix getOptimalNewCameraMatrix ----------------------------- @@ -787,7 +804,7 @@ The function computes partial derivatives of the elements of the matrix product projectPoints ----------------- -.. cpp:function:: void projectPoints( InputArray objectPoints, InputArray rvec, InputArray tvec, InputArray cameraMatrix, InputArray distCoeffs, OutputArray imagePoints, OutputArray dpdrot=noArray(), OutputArray dpdt=noArray(), OutputArray dpdf=noArray(), OutputArray dpdc=noArray(), OutputArray dpddist=noArray(), double aspectRatio=0 ) +.. cpp:function:: void projectPoints( InputArray objectPoints, InputArray rvec, InputArray tvec, InputArray cameraMatrix, InputArray distCoeffs, OutputArray imagePoints, OutputArray jacobian=noArray(), double aspectRatio=0 ) Projects 3D points to an image plane. @@ -803,15 +820,9 @@ projectPoints :param imagePoints: Output array of image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel, or ``vector`` . - :param dpdrot: Optional 2Nx3 matrix of derivatives of image points with respect to components of the rotation vector. + :param jacobian: Optional output 2Nx(10+) jacobian matrix of derivatives of image points with respect to components of the rotation vector, translation vector, focal lengths, coordinates of the principal point and the distortion coefficients. - :param dpdt: Optional 2Nx3 matrix of derivatives of image points with respect to components of the translation vector. - - :param dpdf: Optional 2Nx2 matrix of derivatives of image points with respect to :math:`f_x` and :math:`f_y` . - - :param dpdc: Optional 2Nx2 matrix of derivatives of image points with respect to :math:`c_x` and :math:`c_y` . - - :param dpddist: Optional 2Nx4 matrix of derivatives of image points with respect to distortion coefficients. + :param aspectRatio: Optional "fixed aspect ratio" parameter. If the parameter is not 0, the function assumes that the aspect ratio (*fx/fy*) is fixed and correspondingly adjusts the jacobian matrix. The function computes projections of 3D points to the image plane given intrinsic and extrinsic camera @@ -837,7 +848,7 @@ By setting ``rvec=tvec=(0,0,0)`` or by setting ``cameraMatrix`` to a 3x3 identi reprojectImageTo3D ---------------------- -.. cpp:function:: void reprojectImageTo3D( InputArray disparity, OutputArray _3dImage, InputArray Q, bool handleMissingValues=false ) +.. cpp:function:: void reprojectImageTo3D( InputArray disparity, OutputArray _3dImage, InputArray Q, bool handleMissingValues=false, int depth=-1 ) Reprojects a disparity image to 3D space. @@ -849,6 +860,8 @@ reprojectImageTo3D :param handleMissingValues: Indicates, whether the function should handle missing values (i.e. points where the disparity was not computed). If ``handleMissingValues=true``, then pixels with the minimal disparity that corresponds to the outliers (see :ref:`StereoBM::operator ()` ) are transformed to 3D points with a very large Z value (currently set to 10000). + :param ddepth: The optional output array depth. If it is ``-1``, the output image will have ``CV_32F`` depth. ``ddepth`` can also be set to ``CV_16S``, ``CV_32S`` or ``CV_32F``. + The function transforms a single-channel disparity map to a 3-channel image representing a 3D surface. That is, for each pixel ``(x,y)`` andthe corresponding disparity ``d=disparity(x,y)`` , it computes: .. math:: @@ -862,13 +875,10 @@ The matrix ``Q`` can be an arbitrary .. index:: RQDecomp3x3 -.. _RQDecomp3x3: - RQDecomp3x3 --------------- -.. cpp:function:: void RQDecomp3x3( InputArray M, OutputArray R, OutputArray Q ) -.. cpp:function:: Vec3d RQDecomp3x3( InputArray M, OutputArray R, OutputArray Q, OutputArray Qx, OutputArray Qy, OutputArray Qz ) +.. cpp:function:: Vec3d RQDecomp3x3( InputArray M, OutputArray R, OutputArray Q, OutputArray Qx=noArray(), OutputArray Qy=noArray(), OutputArray Qz=noArray() ) Computes an RQ decomposition of 3x3 matrices. @@ -878,11 +888,11 @@ RQDecomp3x3 :param Q: Output 3x3 orthogonal matrix. - :param Qx: Optional 3x3 rotation matrix around x-axis. + :param Qx: Optional output 3x3 rotation matrix around x-axis. - :param Qy: Optional 3x3 rotation matrix around y-axis. + :param Qy: Optional output 3x3 rotation matrix around y-axis. - :param Qz: Optional 3x3 rotation matrix around z-axis. + :param Qz: Optional output 3x3 rotation matrix around z-axis. The function computes a RQ decomposition using the given rotations. This function is used in :ref:`DecomposeProjectionMatrix` to decompose the left 3x3 submatrix of a projection matrix into a camera and a rotation matrix. diff --git a/modules/core/doc/utility_and_system_functions_and_macros.rst b/modules/core/doc/utility_and_system_functions_and_macros.rst index add5bd6..3ce99f1 100644 --- a/modules/core/doc/utility_and_system_functions_and_macros.rst +++ b/modules/core/doc/utility_and_system_functions_and_macros.rst @@ -290,6 +290,6 @@ useOptimized ----------------- .. cpp:function:: bool useOptimized() - Returns status if the optimized code use + Returns status of the optimized code use The function returns true if the optimized code is enabled, false otherwise. diff --git a/modules/gpu/doc/camera_calibration_and_3d_reconstruction.rst b/modules/gpu/doc/camera_calibration_and_3d_reconstruction.rst index ad93776..bb38253 100644 --- a/modules/gpu/doc/camera_calibration_and_3d_reconstruction.rst +++ b/modules/gpu/doc/camera_calibration_and_3d_reconstruction.rst @@ -490,7 +490,7 @@ gpu::reprojectImageTo3D :param stream: Stream for the asynchronous version. -See Also: :c:cpp:func:`reprojectImageTo3D` . +See Also: :cpp:func:`reprojectImageTo3D` . .. index:: gpu::solvePnPRansac @@ -507,7 +507,7 @@ gpu::solvePnPRansac :param camera_mat: 3x3 matrix of intrinsic camera parameters. - :param dist_coef: Distortion coefficients. See :c:cpp:func:`undistortPoints` for details. + :param dist_coef: Distortion coefficients. See :cpp:func:`undistortPoints` for details. :param rvec: Output 3D rotation vector. @@ -523,5 +523,5 @@ gpu::solvePnPRansac :param inliers: Output vector of inlier indices. -See Also :c:cpp:func:`solvePnPRansac`. +See Also :cpp:func:`solvePnPRansac`. \ No newline at end of file diff --git a/modules/gpu/doc/feature_detection_and_description.rst b/modules/gpu/doc/feature_detection_and_description.rst index b89c6c5..a3516f5 100644 --- a/modules/gpu/doc/feature_detection_and_description.rst +++ b/modules/gpu/doc/feature_detection_and_description.rst @@ -195,7 +195,7 @@ gpu::BruteForceMatcher_GPU::match Finds the best match for each descriptor from a query set with train descriptors. See Also: -:c:cpp:func:`DescriptorMatcher::match` +:cpp:func:`DescriptorMatcher::match` .. index:: gpu::BruteForceMatcher_GPU::matchSingle diff --git a/modules/gpu/doc/image_filtering.rst b/modules/gpu/doc/image_filtering.rst index b6ea58a..cb5cc7d 100644 --- a/modules/gpu/doc/image_filtering.rst +++ b/modules/gpu/doc/image_filtering.rst @@ -224,7 +224,7 @@ gpu::createBoxFilter_GPU This filter does not check out-of-border accesses, so only a proper sub-matrix of a bigger matrix has to be passed to it. -See Also: :c:cpp:func:`boxFilter` +See Also: :cpp:func:`boxFilter` .. index:: gpu::boxFilter @@ -248,7 +248,7 @@ gpu::boxFilter This filter does not check out-of-border accesses, so only a proper sub-matrix of a bigger matrix has to be passed to it. -See Also: :c:cpp:func:`boxFilter` +See Also: :cpp:func:`boxFilter` .. index:: gpu::blur @@ -270,7 +270,7 @@ gpu::blur This filter does not check out-of-border accesses, so only a proper sub-matrix of a bigger matrix has to be passed to it. -See Also: :c:cpp:func:`blur`, :cpp:func:`gpu::boxFilter` +See Also: :cpp:func:`blur`, :cpp:func:`gpu::boxFilter` .. index:: gpu::createMorphologyFilter_GPU @@ -296,7 +296,7 @@ gpu::createMorphologyFilter_GPU This filter does not check out-of-border accesses, so only a proper sub-matrix of a bigger matrix has to be passed to it. -See Also: :c:cpp:func:`createMorphologyFilter` +See Also: :cpp:func:`createMorphologyFilter` .. index:: gpu::erode @@ -320,7 +320,7 @@ gpu::erode This filter does not check out-of-border accesses, so only a proper sub-matrix of a bigger matrix has to be passed to it. -See Also: :c:cpp:func:`erode` +See Also: :cpp:func:`erode` .. index:: gpu::dilate @@ -344,7 +344,7 @@ gpu::dilate This filter does not check out-of-border accesses, so only a proper sub-matrix of a bigger matrix has to be passed to it. -See Also: :c:cpp:func:`dilate` +See Also: :cpp:func:`dilate` .. index:: gpu::morphologyEx @@ -381,7 +381,7 @@ gpu::morphologyEx This filter does not check out-of-border accesses, so only a proper sub-matrix of a bigger matrix has to be passed to it. -See Also: :c:cpp:func:`morphologyEx` +See Also: :cpp:func:`morphologyEx` .. index:: gpu::createLinearFilter_GPU @@ -407,7 +407,7 @@ gpu::createLinearFilter_GPU This filter does not check out-of-border accesses, so only a proper sub-matrix of a bigger matrix has to be passed to it. -See Also: :c:cpp:func:`createLinearFilter` +See Also: :cpp:func:`createLinearFilter` .. index:: gpu::filter2D @@ -431,7 +431,7 @@ gpu::filter2D This filter does not check out-of-border accesses, so only a proper sub-matrix of a bigger matrix has to be passed to it. -See Also: :c:cpp:func:`filter2D` +See Also: :cpp:func:`filter2D` .. index:: gpu::Laplacian @@ -447,15 +447,15 @@ gpu::Laplacian :param ddepth: Desired depth of the destination image. It supports only the same depth as the source image depth. - :param ksize: Aperture size used to compute the second-derivative filters (see :c:cpp:func:`getDerivKernels`). It must be positive and odd. Only ``ksize`` = 1 and ``ksize`` = 3 are supported. + :param ksize: Aperture size used to compute the second-derivative filters (see :cpp:func:`getDerivKernels`). It must be positive and odd. Only ``ksize`` = 1 and ``ksize`` = 3 are supported. - :param scale: Optional scale factor for the computed Laplacian values. By default, no scaling is applied (see :c:cpp:func:`getDerivKernels` ). + :param scale: Optional scale factor for the computed Laplacian values. By default, no scaling is applied (see :cpp:func:`getDerivKernels` ). **Note:** This filter does not check out-of-border accesses, so only a proper sub-matrix of a bigger matrix has to be passed to it. -See Also: :c:cpp:func:`Laplacian`,:cpp:func:`gpu::filter2D` . +See Also: :cpp:func:`Laplacian`,:cpp:func:`gpu::filter2D` . .. index:: gpu::getLinearRowFilter_GPU @@ -473,7 +473,7 @@ gpu::getLinearRowFilter_GPU :param anchor: Anchor position within the kernel. Negative values mean that the anchor is positioned at the aperture center. - :param borderType: Pixel extrapolation method. For details, see :c:cpp:func:`borderInterpolate`. For details on limitations, see below. + :param borderType: Pixel extrapolation method. For details, see :cpp:func:`borderInterpolate`. For details on limitations, see below. There are two versions of the algorithm: NPP and OpenCV. * NPP version is called when ``srcType == CV_8UC1`` or ``srcType == CV_8UC4`` and ``bufType == srcType`` . Otherwise, the OpenCV version is called. NPP supports only ``BORDER_CONSTANT`` border type and does not check indices outside the image. @@ -497,13 +497,13 @@ gpu::getLinearColumnFilter_GPU :param anchor: Anchor position within the kernel. Negative values mean that the anchor is positioned at the aperture center. - :param borderType: Pixel extrapolation method. For details, see :c:cpp:func:`borderInterpolate` . For details on limitations, see below. + :param borderType: Pixel extrapolation method. For details, see :cpp:func:`borderInterpolate` . For details on limitations, see below. There are two versions of the algorithm: NPP and OpenCV. * NPP version is called when ``dstType == CV_8UC1`` or ``dstType == CV_8UC4`` and ``bufType == dstType`` . Otherwise, the OpenCV version is called. NPP supports only ``BORDER_CONSTANT`` border type and does not check indices outside the image. * OpenCV version supports only ``CV_32F`` buffer depth and ``BORDER_REFLECT101``, ``BORDER_REPLICATE``, and ``BORDER_CONSTANT`` border types. It checks indices outside image. -See Also: :cpp:func:`gpu::getLinearRowFilter_GPU`, :c:cpp:func:`createSeparableLinearFilter` +See Also: :cpp:func:`gpu::getLinearRowFilter_GPU`, :cpp:func:`createSeparableLinearFilter` .. index:: gpu::createSeparableLinearFilter_GPU @@ -521,10 +521,10 @@ gpu::createSeparableLinearFilter_GPU :param anchor: Anchor position within the kernel. Negative values mean that anchor is positioned at the aperture center. - :param rowBorderType, columnBorderType: Pixel extrapolation method in the horizontal and vertical directions For details, see :c:cpp:func:`borderInterpolate`. For details on limitations, see :cpp:func:`gpu::getLinearRowFilter_GPU`, cpp:cpp:func:`gpu::getLinearColumnFilter_GPU`. + :param rowBorderType, columnBorderType: Pixel extrapolation method in the horizontal and vertical directions For details, see :cpp:func:`borderInterpolate`. For details on limitations, see :cpp:func:`gpu::getLinearRowFilter_GPU`, cpp:cpp:func:`gpu::getLinearColumnFilter_GPU`. -See Also: :cpp:func:`gpu::getLinearRowFilter_GPU`, :cpp:func:`gpu::getLinearColumnFilter_GPU`, :c:cpp:func:`createSeparableLinearFilter` +See Also: :cpp:func:`gpu::getLinearRowFilter_GPU`, :cpp:func:`gpu::getLinearColumnFilter_GPU`, :cpp:func:`createSeparableLinearFilter` .. index:: gpu::sepFilter2D @@ -544,9 +544,9 @@ gpu::sepFilter2D :param anchor: Anchor position within the kernel. The default value ``(-1, 1)`` means that the anchor is at the kernel center. - :param rowBorderType, columnBorderType: Pixel extrapolation method. For details, see :c:cpp:func:`borderInterpolate`. + :param rowBorderType, columnBorderType: Pixel extrapolation method. For details, see :cpp:func:`borderInterpolate`. -See Also: :cpp:func:`gpu::createSeparableLinearFilter_GPU`, :c:cpp:func:`sepFilter2D` +See Also: :cpp:func:`gpu::createSeparableLinearFilter_GPU`, :cpp:func:`sepFilter2D` .. index:: gpu::createDerivFilter_GPU @@ -564,11 +564,11 @@ gpu::createDerivFilter_GPU :param dy: Derivative order in respect of y. - :param ksize: Aperture size. See :c:cpp:func:`getDerivKernels` for details. + :param ksize: Aperture size. See :cpp:func:`getDerivKernels` for details. - :param rowBorderType, columnBorderType: Pixel extrapolation method. See :c:cpp:func:`borderInterpolate` for details. + :param rowBorderType, columnBorderType: Pixel extrapolation method. See :cpp:func:`borderInterpolate` for details. -See Also: :cpp:func:`gpu::createSeparableLinearFilter_GPU`, :c:cpp:func:`createDerivFilter` +See Also: :cpp:func:`gpu::createSeparableLinearFilter_GPU`, :cpp:func:`createDerivFilter` .. index:: gpu::Sobel @@ -590,11 +590,11 @@ gpu::Sobel :param ksize: Size of the extended Sobel kernel. Possible valies are 1, 3, 5 or 7. - :param scale: Optional scale factor for the computed derivative values. By default, no scaling is applied. For details, see :c:cpp:func:`getDerivKernels` . + :param scale: Optional scale factor for the computed derivative values. By default, no scaling is applied. For details, see :cpp:func:`getDerivKernels` . - :param rowBorderType, columnBorderType: Pixel extrapolation method. See :c:cpp:func:`borderInterpolate` for details. + :param rowBorderType, columnBorderType: Pixel extrapolation method. See :cpp:func:`borderInterpolate` for details. -See Also: :cpp:func:`gpu::createSeparableLinearFilter_GPU`, :c:cpp:func:`Sobel` +See Also: :cpp:func:`gpu::createSeparableLinearFilter_GPU`, :cpp:func:`Sobel` .. index:: gpu::Scharr @@ -614,11 +614,11 @@ gpu::Scharr :param yorder: Order of the derivative y. - :param scale: Optional scale factor for the computed derivative values. By default, no scaling is applied. See :c:cpp:func:`getDerivKernels` for details. + :param scale: Optional scale factor for the computed derivative values. By default, no scaling is applied. See :cpp:func:`getDerivKernels` for details. - :param rowBorderType, columnBorderType: Pixel extrapolation method. For details, see :c:cpp:func:`borderInterpolate` and :c:cpp:func:`Scharr` . + :param rowBorderType, columnBorderType: Pixel extrapolation method. For details, see :cpp:func:`borderInterpolate` and :cpp:func:`Scharr` . -See Also: :cpp:func:`gpu::createSeparableLinearFilter_GPU`, :c:cpp:func:`Scharr` +See Also: :cpp:func:`gpu::createSeparableLinearFilter_GPU`, :cpp:func:`Scharr` .. index:: gpu::createGaussianFilter_GPU @@ -630,15 +630,15 @@ gpu::createGaussianFilter_GPU :param type: Source and destination image type. ``CV_8UC1``, ``CV_8UC4``, ``CV_16SC1``, ``CV_16SC2``, ``CV_32SC1``, ``CV_32FC1`` are supported. - :param ksize: Aperture size. See :c:cpp:func:`getGaussianKernel` for details. + :param ksize: Aperture size. See :cpp:func:`getGaussianKernel` for details. - :param sigmaX: Gaussian sigma in the horizontal direction. See :c:cpp:func:`getGaussianKernel` for details. + :param sigmaX: Gaussian sigma in the horizontal direction. See :cpp:func:`getGaussianKernel` for details. :param sigmaY: Gaussian sigma in the vertical direction. If 0, then :math:`\texttt{sigmaY}\leftarrow\texttt{sigmaX}` . - :param rowBorderType, columnBorderType: Border type to use. See :c:cpp:func:`borderInterpolate` for details. + :param rowBorderType, columnBorderType: Border type to use. See :cpp:func:`borderInterpolate` for details. -See Also: :cpp:func:`gpu::createSeparableLinearFilter_GPU`, :c:cpp:func:`createGaussianFilter` +See Also: :cpp:func:`gpu::createSeparableLinearFilter_GPU`, :cpp:func:`createGaussianFilter` .. index:: gpu::GaussianBlur @@ -654,11 +654,11 @@ gpu::GaussianBlur :param ksize: Gaussian kernel size. ``ksize.width`` and ``ksize.height`` can differ but they both must be positive and odd. If they are zeros, they are computed from ``sigmaX`` and ``sigmaY`` . - :param sigmaX, sigmaY: Gaussian kernel standard deviations in X and Y direction. If ``sigmaY`` is zero, it is set to be equal to ``sigmaX`` . If they are both zeros, they are computed from ``ksize.width`` and ``ksize.height``, respectively. See :c:cpp:func:`getGaussianKernel` for details. To fully control the result regardless of possible future modification of all this semantics, you are recommended to specify all of ``ksize``, ``sigmaX``, and ``sigmaY`` . + :param sigmaX, sigmaY: Gaussian kernel standard deviations in X and Y direction. If ``sigmaY`` is zero, it is set to be equal to ``sigmaX`` . If they are both zeros, they are computed from ``ksize.width`` and ``ksize.height``, respectively. See :cpp:func:`getGaussianKernel` for details. To fully control the result regardless of possible future modification of all this semantics, you are recommended to specify all of ``ksize``, ``sigmaX``, and ``sigmaY`` . - :param rowBorderType, columnBorderType: Pixel extrapolation method. See :c:cpp:func:`borderInterpolate` for details. + :param rowBorderType, columnBorderType: Pixel extrapolation method. See :cpp:func:`borderInterpolate` for details. -See Also: :cpp:func:`gpu::createGaussianFilter_GPU`, :c:cpp:func:`GaussianBlur` +See Also: :cpp:func:`gpu::createGaussianFilter_GPU`, :cpp:func:`GaussianBlur` .. index:: gpu::getMaxFilter_GPU diff --git a/modules/gpu/doc/image_processing.rst b/modules/gpu/doc/image_processing.rst index f903605..86cc3e9 100644 --- a/modules/gpu/doc/image_processing.rst +++ b/modules/gpu/doc/image_processing.rst @@ -42,7 +42,7 @@ gpu::meanShiftProc :param criteria: Termination criteria. See :cpp:class:`TermCriteria`. See Also: -:c:cpp:func:`gpu::meanShiftFiltering` +:cpp:func:`gpu::meanShiftFiltering` .. index:: gpu::meanShiftSegmentation @@ -81,7 +81,7 @@ gpu::integral :param sqsum: Squared integral image of the ``CV_32FC1`` type. See Also: -:c:cpp:func:`integral` +:cpp:func:`integral` .. index:: gpu::sqrIntegral @@ -128,7 +128,7 @@ gpu::cornerHarris :param borderType: Pixel extrapolation method. Only ``BORDER_REFLECT101`` and ``BORDER_REPLICATE`` are supported for now. See Also: -:c:cpp:func:`cornerHarris` +:cpp:func:`cornerHarris` .. index:: gpu::cornerMinEigenVal @@ -150,7 +150,7 @@ gpu::cornerMinEigenVal :param borderType: Pixel extrapolation method. Only ``BORDER_REFLECT101`` and ``BORDER_REPLICATE`` are supported for now. -See also: :c:cpp:func:`cornerMinEigenVal` +See also: :cpp:func:`cornerMinEigenVal` .. index:: gpu::mulSpectrums @@ -173,7 +173,7 @@ gpu::mulSpectrums Only full (not packed) ``CV_32FC2`` complex spectrums in the interleaved format are supported for now. See Also: -:c:cpp:func:`mulSpectrums` +:cpp:func:`mulSpectrums` .. index:: gpu::mulAndScaleSpectrums @@ -198,7 +198,7 @@ gpu::mulAndScaleSpectrums Only full (not packed) ``CV_32FC2`` complex spectrums in the interleaved format are supported for now. See Also: -:c:cpp:func:`mulSpectrums` +:cpp:func:`mulSpectrums` .. index:: gpu::dft @@ -237,7 +237,7 @@ gpu::dft If the source matrix is real (its type is ``CV_32FC1`` ), forward DFT is performed. The result of the DFT is packed into complex ( ``CV_32FC2`` ) matrix. So, the width of the destination matrix is ``dft_size.width / 2 + 1`` . But if the source is a single column, the height is reduced instead of the width. See Also: -:c:cpp:func:`dft` +:cpp:func:`dft` .. index:: gpu::convolve @@ -268,7 +268,7 @@ gpu::ConvolveBuf .. cpp:class:: gpu::ConvolveBuf This class provides a memory buffer for the - :c:cpp:func:`gpu::convolve` function. + :cpp:func:`gpu::convolve` function. :: struct CV_EXPORTS ConvolveBuf @@ -290,12 +290,12 @@ gpu::ConvolveBuf::ConvolveBuf .. cpp:function:: ConvolveBuf::ConvolveBuf() Constructs an empty buffer that is properly resized after the first call of the - :c:cpp:func:`convolve` function. + :cpp:func:`convolve` function. .. cpp:function:: ConvolveBuf::ConvolveBuf(Size image_size, Size templ_size) Constructs a buffer for the - :c:cpp:func:`convolve` function with respective arguments. + :cpp:func:`convolve` function with respective arguments. .. index:: gpu::matchTemplate @@ -328,7 +328,7 @@ gpu::matchTemplate * ``CV_TM_CCORR`` See Also: -:c:cpp:func:`matchTemplate` +:cpp:func:`matchTemplate` .. index:: gpu::remap @@ -354,7 +354,7 @@ gpu::remap Values of pixels with non-integer coordinates are computed using bilinear the interpolation. -See Also: :c:cpp:func:`remap` +See Also: :cpp:func:`remap` .. index:: gpu::cvtColor diff --git a/modules/gpu/doc/matrix_reductions.rst b/modules/gpu/doc/matrix_reductions.rst index a2eab9d..4d20e0a 100644 --- a/modules/gpu/doc/matrix_reductions.rst +++ b/modules/gpu/doc/matrix_reductions.rst @@ -17,7 +17,7 @@ gpu::meanStdDev :param stddev: Standard deviation value. -See Also: :c:cpp:func:`meanStdDev` +See Also: :cpp:func:`meanStdDev` .. index:: gpu::norm @@ -37,7 +37,7 @@ gpu::norm :param buf: Optional buffer to avoid extra memory allocations. It is resized automatically. -See Also: :c:cpp:func:`norm` +See Also: :cpp:func:`norm` .. index:: gpu::sum @@ -53,7 +53,7 @@ gpu::sum :param buf: Optional buffer to avoid extra memory allocations. It is resized automatically. -See Also: :c:cpp:func:`sum` +See Also: :cpp:func:`sum` .. index:: gpu::absSum @@ -105,7 +105,7 @@ gpu::minMax The function does not work with ``CV_64F`` images on GPUs with the compute capability < 1.3. -See Also: :c:cpp:func:`minMaxLoc` +See Also: :cpp:func:`minMaxLoc` .. index:: gpu::minMaxLoc @@ -135,7 +135,7 @@ gpu::minMaxLoc The function does not work with ``CV_64F`` images on GPU with the compute capability < 1.3. -See Also: :c:cpp:func:`minMaxLoc` +See Also: :cpp:func:`minMaxLoc` .. index:: gpu::countNonZero @@ -153,4 +153,4 @@ gpu::countNonZero The function does not work with ``CV_64F`` images on GPUs with the compute capability < 1.3. - See Also: :c:cpp:func:`countNonZero` + See Also: :cpp:func:`countNonZero` diff --git a/modules/gpu/doc/object_detection.rst b/modules/gpu/doc/object_detection.rst index 0b565a4..bb3c908 100644 --- a/modules/gpu/doc/object_detection.rst +++ b/modules/gpu/doc/object_detection.rst @@ -324,5 +324,5 @@ gpu::CascadeClassifier_GPU::detectMultiScale imshow("Faces", image_cpu); -See Also: :c:cpp:func:`CascadeClassifier::detectMultiScale` +See Also: :cpp:func:`CascadeClassifier::detectMultiScale` diff --git a/modules/gpu/doc/operations_on_matrices.rst b/modules/gpu/doc/operations_on_matrices.rst index 23f5ef3..69dc8ba 100644 --- a/modules/gpu/doc/operations_on_matrices.rst +++ b/modules/gpu/doc/operations_on_matrices.rst @@ -16,7 +16,7 @@ gpu::transpose :param dst: Destination matrix. See Also: -:c:cpp:func:`transpose` +:cpp:func:`transpose` .. index:: gpu::flip @@ -40,7 +40,7 @@ gpu::flip See Also: -:c:cpp:func:`flip` +:cpp:func:`flip` .. index:: gpu::LUT @@ -57,7 +57,7 @@ gpu::LUT :param dst: Destination matrix with the same depth as ``lut`` and the same number of channels as ``src``. -See Also: :c:cpp:func:`LUT` +See Also: :cpp:func:`LUT` .. index:: gpu::merge @@ -81,7 +81,7 @@ gpu::merge :param stream: Stream for the asynchronous version. -See Also: :c:cpp:func:`merge` +See Also: :cpp:func:`merge` .. index:: gpu::split @@ -103,7 +103,7 @@ gpu::split :param stream: Stream for the asynchronous version. -See Also: :c:cpp:func:`split` +See Also: :cpp:func:`split` .. index:: gpu::magnitude @@ -128,7 +128,7 @@ gpu::magnitude :param stream: Stream for the asynchronous version. See Also: -:c:cpp:func:`magnitude` +:cpp:func:`magnitude` .. index:: gpu::magnitudeSqr @@ -173,7 +173,7 @@ gpu::phase :param stream: Stream for the asynchronous version. See Also: -:c:cpp:func:`phase` +:cpp:func:`phase` .. index:: gpu::cartToPolar @@ -198,7 +198,7 @@ gpu::cartToPolar :param stream: Stream for the asynchronous version. See Also: -:c:cpp:func:`cartToPolar` +:cpp:func:`cartToPolar` .. index:: gpu::polarToCart @@ -223,4 +223,4 @@ gpu::polarToCart :param stream: Stream for the asynchronous version. See Also: -:c:cpp:func:`polarToCart` +:cpp:func:`polarToCart` diff --git a/modules/gpu/doc/per_element_operations.rst b/modules/gpu/doc/per_element_operations.rst index 9d4f6d7..078b18a 100644 --- a/modules/gpu/doc/per_element_operations.rst +++ b/modules/gpu/doc/per_element_operations.rst @@ -21,7 +21,7 @@ gpu::add :param dst: Destination matrix with the same size and type as ``src1``. -See Also: :c:cpp:func:`add` +See Also: :cpp:func:`add` .. index:: gpu::subtract @@ -39,7 +39,7 @@ gpu::subtract :param dst: Destination matrix with the same size and type as ``src1``. -See Also: :c:cpp:func:`subtract` +See Also: :cpp:func:`subtract` @@ -59,7 +59,7 @@ gpu::multiply :param dst: Destination matrix with the same size and type as ``src1``. -See Also: :c:cpp:func:`multiply` +See Also: :cpp:func:`multiply` .. index:: gpu::divide @@ -78,9 +78,9 @@ gpu::divide :param dst: Destination matrix with the same size and type as ``src1``. - This function, in contrast to :c:cpp:func:`divide`, uses a round-down rounding mode. + This function, in contrast to :cpp:func:`divide`, uses a round-down rounding mode. -See Also: :c:cpp:func:`divide` +See Also: :cpp:func:`divide` @@ -96,7 +96,7 @@ gpu::exp :param dst: Destination matrix with the same size and type as ``src``. -See Also: :c:cpp:func:`exp` +See Also: :cpp:func:`exp` @@ -112,7 +112,7 @@ gpu::log :param dst: Destination matrix with the same size and type as ``src``. -See Also: :c:cpp:func:`log` +See Also: :cpp:func:`log` @@ -132,7 +132,7 @@ gpu::absdiff :param dst: Destination matrix with the same size and type as ``src1``. -See Also: :c:cpp:func:`absdiff` +See Also: :cpp:func:`absdiff` .. index:: gpu::compare @@ -157,7 +157,7 @@ gpu::compare * **CMP_LE:** ``src1(.) <= src2(.)`` * **CMP_NE:** ``src1(.) != src2(.)`` -See Also: :c:cpp:func:`compare` +See Also: :cpp:func:`compare` .. index:: gpu::bitwise_not @@ -268,7 +268,7 @@ gpu::min :param stream: Stream for the asynchronous version. -See Also: :c:cpp:func:`min` +See Also: :cpp:func:`min` @@ -294,4 +294,4 @@ gpu::max :param stream: Stream for the asynchronous version. -See Also: :c:cpp:func:`max` +See Also: :cpp:func:`max` diff --git a/modules/imgproc/doc/filtering.rst b/modules/imgproc/doc/filtering.rst index 30c570b..770a9b0 100644 --- a/modules/imgproc/doc/filtering.rst +++ b/modules/imgproc/doc/filtering.rst @@ -697,7 +697,7 @@ createMorphologyFilter .. cpp:function:: Ptr getMorphologyColumnFilter(int op, int type, int esize, int anchor=-1) -.. cpp:function:: static inline Scalar morphologyDefaultBorderValue(){ return Scalar::all(DBL_MAX) } +.. cpp:function:: Scalar morphologyDefaultBorderValue() Creates an engine for non-separable morphological operations. diff --git a/modules/imgproc/doc/structural_analysis_and_shape_descriptors.rst b/modules/imgproc/doc/structural_analysis_and_shape_descriptors.rst index 7352c1d..3a50317 100644 --- a/modules/imgproc/doc/structural_analysis_and_shape_descriptors.rst +++ b/modules/imgproc/doc/structural_analysis_and_shape_descriptors.rst @@ -107,9 +107,9 @@ See Also: findContours ---------------- -.. cpp:function:: void findContours( InputArray image, OutputArrayOfArrays contours, OutputArray hierarchy, int mode, int method, Point offset=Point()) +.. cpp:function:: void findContours( InputOutputArray image, OutputArrayOfArrays contours, OutputArray hierarchy, int mode, int method, Point offset=Point()) -.. cpp:function:: void findContours( InputArray image, OutputArrayOfArrays contours, int mode, int method, Point offset=Point()) +.. cpp:function:: void findContours( InputOutputArray image, OutputArrayOfArrays contours, int mode, int method, Point offset=Point()) Finds contours in a binary image. @@ -264,75 +264,17 @@ boundingRect The function calculates and returns the minimal up-right bounding rectangle for the specified point set. -.. index:: estimateRigidTransform - -estimateRigidTransform --------------------------- -.. cpp:function:: Mat estimateRigidTransform( InputArray srcpt, InputArray dstpt, bool fullAffine ) - - Computes an optimal affine transformation between two 2D point sets. - - :param srcpt: The first input 2D point set, stored in ``std::vector`` or ``Mat``. - - :param dst: The second input 2D point set of the same size and the same type as ``A`` . - - :param fullAffine: If true, the function finds an optimal affine transformation with no additional resrictions (6 degrees of freedom). Otherwise, the class of transformations to choose from is limited to combinations of translation, rotation, and uniform scaling (5 degrees of freedom). - -The function finds an optimal affine transform -:math:`[A|b]` (a -:math:`2 \times 3` floating-point matrix) that approximates best the transformation from -:math:`\texttt{srcpt}_i` to -:math:`\texttt{dstpt}_i` : - -.. math:: - - [A^*|b^*] = arg \min _{[A|b]} \sum _i \| \texttt{dstpt} _i - A { \texttt{srcpt} _i}^T - b \| ^2 - -where -:math:`[A|b]` can be either arbitrary (when ``fullAffine=true`` ) or have form - -.. math:: - - \begin{bmatrix} a_{11} & a_{12} & b_1 \\ -a_{12} & a_{11} & b_2 \end{bmatrix} - -when ``fullAffine=false`` . - -See Also: -:cpp:func:`getAffineTransform`, -:cpp:func:`getPerspectiveTransform`, -:cpp:func:`findHomography` - -.. index:: estimateAffine3D - -estimateAffine3D --------------------- -.. cpp:function:: int estimateAffine3D(InputArray srcpt, InputArray dstpt, OutputArray out, OutputArray outliers, double ransacThreshold = 3.0, double confidence = 0.99) - - Computes an optimal affine transformation between two 3D point sets. - - :param srcpt: The first input 3D point set. - - :param dstpt: The second input 3D point set. - - :param out: Output 3D affine transformation matrix :math:`3 \times 4` . - - :param outliers: Output vector indicating which points are outliers. - - :param ransacThreshold: Maximum reprojection error in the RANSAC algorithm to consider a point as an inlier. - - :param confidence: The confidence level, between 0 and 1, that the estimated transformation will have. Anything between 0.95 and 0.99 is usually good enough. Too close to 1 values can slow down the estimation too much, lower than 0.8-0.9 confidence values can result in an incorrectly estimated transformation. - -The function estimates an optimal 3D affine transformation between two 3D point sets using the RANSAC algorithm. .. index:: contourArea contourArea --------------- -.. cpp:function:: double contourArea( InputArray contour ) +.. cpp:function:: double contourArea( InputArray contour, bool oriented=false ) Calculates a contour area. :param contour: Input vector of 2d points (contour vertices), stored in ``std::vector`` or ``Mat``. + :param orientation: Oriented area flag. If it is true, the function returns a signed area value, depending on the contour orientation (clockwise or counter-clockwise). Using this feature you can determine orientation of a contour by taking sign of the area. By default the parameter is ``false``, which means that the absolute value is returned. The function computes a contour area. Similarly to :cpp:func:`moments` , the area is computed using the Green formula. Thus, the returned area and the number of non-zero pixels, if you draw the contour using diff --git a/modules/imgproc/include/opencv2/imgproc/imgproc.hpp b/modules/imgproc/include/opencv2/imgproc/imgproc.hpp index 84f3e1f..3fa5024 100644 --- a/modules/imgproc/include/opencv2/imgproc/imgproc.hpp +++ b/modules/imgproc/include/opencv2/imgproc/imgproc.hpp @@ -669,7 +669,7 @@ CV_EXPORTS void calcBackProject( const Mat* images, int nimages, //! computes back projection for the set of images CV_EXPORTS void calcBackProject( const Mat* images, int nimages, const int* channels, const SparseMat& hist, - Mat& backProject, const float** ranges, + OutputArray backProject, const float** ranges, double scale=1, bool uniform=true ); //! compares two histograms stored in dense arrays diff --git a/modules/imgproc/src/histogram.cpp b/modules/imgproc/src/histogram.cpp index 80f093e..7ef2588 100644 --- a/modules/imgproc/src/histogram.cpp +++ b/modules/imgproc/src/histogram.cpp @@ -1283,7 +1283,7 @@ calcSparseBackProj_8u( vector& _ptrs, const vector& _deltas, } void cv::calcBackProject( const Mat* images, int nimages, const int* channels, - const SparseMat& hist, Mat& backProject, + const SparseMat& hist, OutputArray _backProject, const float** ranges, double scale, bool uniform ) { vector ptrs; @@ -1293,7 +1293,8 @@ void cv::calcBackProject( const Mat* images, int nimages, const int* channels, int dims = hist.dims(); CV_Assert( dims > 0 ); - backProject.create( images[0].size(), images[0].depth() ); + _backProject.create( images[0].size(), images[0].depth() ); + Mat backProject = _backProject.getMat(); histPrepareImages( images, nimages, channels, backProject, dims, hist.hdr->size, ranges, uniform, ptrs, deltas, imsize, uniranges ); diff --git a/modules/ml/doc/boosting.rst b/modules/ml/doc/boosting.rst index 85b60fb..3c523f4 100644 --- a/modules/ml/doc/boosting.rst +++ b/modules/ml/doc/boosting.rst @@ -127,7 +127,7 @@ Weak tree classifier :: virtual ~CvBoostTree(); virtual bool train( CvDTreeTrainData* _train_data, - const CvMat* subsample_idx, CvBoost* ensemble ); + const Mat& subsample_idx, CvBoost* ensemble ); virtual void scale( double s ); virtual void read( CvFileStorage* fs, CvFileNode* node, CvBoost* ensemble, CvDTreeTrainData* _data ); @@ -173,21 +173,21 @@ Boosted tree classifier :: CvBoost(); virtual ~CvBoost(); - CvBoost( const CvMat* _train_data, int _tflag, - const CvMat* _responses, const CvMat* _var_idx=0, - const CvMat* _sample_idx=0, const CvMat* _var_type=0, - const CvMat* _missing_mask=0, + CvBoost( const Mat& _train_data, int _tflag, + const Mat& _responses, const Mat& _var_idx=0, + const Mat& _sample_idx=0, const Mat& _var_type=0, + const Mat& _missing_mask=0, CvBoostParams params=CvBoostParams() ); - virtual bool train( const CvMat* _train_data, int _tflag, - const CvMat* _responses, const CvMat* _var_idx=0, - const CvMat* _sample_idx=0, const CvMat* _var_type=0, - const CvMat* _missing_mask=0, + virtual bool train( const Mat& _train_data, int _tflag, + const Mat& _responses, const Mat& _var_idx=0, + const Mat& _sample_idx=0, const Mat& _var_type=0, + const Mat& _missing_mask=0, CvBoostParams params=CvBoostParams(), bool update=false ); - virtual float predict( const CvMat* _sample, const CvMat* _missing=0, - CvMat* weak_responses=0, CvSlice slice=CV_WHOLE_SEQ, + virtual float predict( const Mat& _sample, const Mat& _missing=0, + Mat& weak_responses=0, CvSlice slice=CV_WHOLE_SEQ, bool raw_mode=false ) const; virtual void prune( CvSlice slice ); @@ -221,7 +221,7 @@ Boosted tree classifier :: CvBoost::train -------------- -.. cpp:function:: bool CvBoost::train( const CvMat* _train_data, int _tflag, const CvMat* _responses, const CvMat* _var_idx=0, const CvMat* _sample_idx=0, const CvMat* _var_type=0, const CvMat* _missing_mask=0, CvBoostParams params=CvBoostParams(), bool update=false ) +.. cpp:function:: bool CvBoost::train( const Mat& _train_data, int _tflag, const Mat& _responses, const Mat& _var_idx=Mat(), const Mat& _sample_idx=Mat(), const Mat& _var_type=Mat(), const Mat& _missing_mask=Mat(), CvBoostParams params=CvBoostParams(), bool update=false ) Trains a boosted tree classifier. @@ -233,7 +233,7 @@ The train method follows the common template. The last parameter ``update`` spec CvBoost::predict ---------------- -.. cpp:function:: float CvBoost::predict( const CvMat* sample, const CvMat* missing=0, CvMat* weak_responses=0, CvSlice slice=CV_WHOLE_SEQ, bool raw_mode=false ) const +.. cpp:function:: float CvBoost::predict( const Mat& sample, const Mat& missing=Mat(), const Range& slice=Range::all(), bool rawMode=false, bool returnSum=false ) const Predicts a response for an input sample. diff --git a/modules/ml/doc/decision_trees.rst b/modules/ml/doc/decision_trees.rst index c8e65f0..a6d227a 100644 --- a/modules/ml/doc/decision_trees.rst +++ b/modules/ml/doc/decision_trees.rst @@ -166,27 +166,27 @@ Decision tree training data and shared data for tree ensembles :: struct CvDTreeTrainData { CvDTreeTrainData(); - CvDTreeTrainData( const CvMat* _train_data, int _tflag, - const CvMat* _responses, const CvMat* _var_idx=0, - const CvMat* _sample_idx=0, const CvMat* _var_type=0, - const CvMat* _missing_mask=0, + CvDTreeTrainData( const Mat& _train_data, int _tflag, + const Mat& _responses, const Mat& _var_idx=Mat(), + const Mat& _sample_idx=Mat(), const Mat& _var_type=Mat(), + const Mat& _missing_mask=Mat(), const CvDTreeParams& _params=CvDTreeParams(), bool _shared=false, bool _add_labels=false ); virtual ~CvDTreeTrainData(); - virtual void set_data( const CvMat* _train_data, int _tflag, - const CvMat* _responses, const CvMat* _var_idx=0, - const CvMat* _sample_idx=0, const CvMat* _var_type=0, - const CvMat* _missing_mask=0, + virtual void set_data( const Mat& _train_data, int _tflag, + const Mat& _responses, const Mat& _var_idx=Mat(), + const Mat& _sample_idx=Mat(), const Mat& _var_type=Mat(), + const Mat& _missing_mask=Mat(), const CvDTreeParams& _params=CvDTreeParams(), bool _shared=false, bool _add_labels=false, bool _update_data=false ); - virtual void get_vectors( const CvMat* _subsample_idx, + virtual void get_vectors( const Mat& _subsample_idx, float* values, uchar* missing, float* responses, bool get_class_idx=false ); - virtual CvDTreeNode* subsample_data( const CvMat* _subsample_idx ); + virtual CvDTreeNode* subsample_data( const Mat& _subsample_idx ); virtual void write_params( CvFileStorage* fs ); virtual void read_params( CvFileStorage* fs, CvFileNode* node ); @@ -226,20 +226,20 @@ Decision tree training data and shared data for tree ensembles :: int buf_count, buf_size; bool shared; - CvMat* cat_count; - CvMat* cat_ofs; - CvMat* cat_map; + Mat& cat_count; + Mat& cat_ofs; + Mat& cat_map; - CvMat* counts; - CvMat* buf; - CvMat* direction; - CvMat* split_buf; + Mat& counts; + Mat& buf; + Mat& direction; + Mat& split_buf; - CvMat* var_idx; - CvMat* var_type; // i-th element = + Mat& var_idx; + Mat& var_type; // i-th element = // k<0 - ordered // k>=0 - categorical, see k-th element of cat_* arrays - CvMat* priors; + Mat& priors; CvDTreeParams params; @@ -294,19 +294,19 @@ Decision tree :: CvDTree(); virtual ~CvDTree(); - virtual bool train( const CvMat* _train_data, int _tflag, - const CvMat* _responses, const CvMat* _var_idx=0, - const CvMat* _sample_idx=0, const CvMat* _var_type=0, - const CvMat* _missing_mask=0, + virtual bool train( const Mat& _train_data, int _tflag, + const Mat& _responses, const Mat& _var_idx=Mat(), + const Mat& _sample_idx=Mat(), const Mat& _var_type=Mat(), + const Mat& _missing_mask=Mat(), CvDTreeParams params=CvDTreeParams() ); virtual bool train( CvDTreeTrainData* _train_data, - const CvMat* _subsample_idx ); + const Mat& _subsample_idx ); - virtual CvDTreeNode* predict( const CvMat* _sample, - const CvMat* _missing_data_mask=0, + virtual CvDTreeNode* predict( const Mat& _sample, + const Mat& _missing_data_mask=Mat(), bool raw_mode=false ) const; - virtual const CvMat* get_var_importance(); + virtual const Mat& get_var_importance(); virtual void clear(); virtual void read( CvFileStorage* fs, CvFileNode* node ); @@ -323,7 +323,7 @@ Decision tree :: protected: - virtual bool do_train( const CvMat* _subsample_idx ); + virtual bool do_train( const Mat& _subsample_idx ); virtual void try_split_node( CvDTreeNode* n ); virtual void split_node_data( CvDTreeNode* n ); @@ -359,7 +359,7 @@ Decision tree :: CvDTreeNode* root; int pruned_tree_idx; - CvMat* var_importance; + Mat& var_importance; CvDTreeTrainData* data; }; @@ -371,9 +371,9 @@ Decision tree :: CvDTree::train -------------- -.. cpp:function:: bool CvDTree::train( const CvMat* _train_data, int _tflag, const CvMat* _responses, const CvMat* _var_idx=0, const CvMat* _sample_idx=0, const CvMat* _var_type=0, const CvMat* _missing_mask=0, CvDTreeParams params=CvDTreeParams() ) +.. cpp:function:: bool CvDTree::train( const Mat& _train_data, int _tflag, const Mat& _responses, const Mat& _var_idx=Mat(), const Mat& _sample_idx=Mat(), const Mat& _var_type=Mat(), const Mat& _missing_mask=Mat(), CvDTreeParams params=CvDTreeParams() ) -.. cpp:function:: bool CvDTree::train( CvDTreeTrainData* _train_data, const CvMat* _subsample_idx ) +.. cpp:function:: bool CvDTree::train( CvDTreeTrainData* _train_data, const Mat& _subsample_idx ) Trains a decision tree. @@ -391,7 +391,7 @@ There are two ``train`` methods in ``CvDTree`` : CvDTree::predict ---------------- -.. cpp:function:: CvDTreeNode* CvDTree::predict( const CvMat* _sample, const CvMat* _missing_data_mask=0, bool raw_mode=false ) const +.. cpp:function:: CvDTreeNode* CvDTree::predict( const Mat& _sample, const Mat& _missing_data_mask=Mat(), bool raw_mode=false ) const Returns the leaf node of a decision tree corresponding to the input vector. diff --git a/modules/ml/doc/expectation_maximization.rst b/modules/ml/doc/expectation_maximization.rst index da6e33e..a35895c 100644 --- a/modules/ml/doc/expectation_maximization.rst +++ b/modules/ml/doc/expectation_maximization.rst @@ -108,8 +108,8 @@ Parameters of the EM algorithm :: CvTermCriteria _term_crit=cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, FLT_EPSILON), - CvMat* _probs=0, CvMat* _weights=0, - CvMat* _means=0, CvMat** _covs=0 ) : + const CvMat* _probs=0, const CvMat* _weights=0, + const CvMat* _means=0, const CvMat** _covs=0 ) : nclusters(_nclusters), cov_mat_type(_cov_mat_type), start_step(_start_step), probs(_probs), weights(_weights), means(_means), covs(_covs), @@ -149,21 +149,21 @@ EM model :: enum { START_E_STEP=1, START_M_STEP=2, START_AUTO_STEP=0 }; CvEM(); - CvEM( const CvMat* samples, const CvMat* sample_idx=0, - CvEMParams params=CvEMParams(), CvMat* labels=0 ); + CvEM( const Mat& samples, const Mat& sample_idx=Mat(), + CvEMParams params=CvEMParams(), Mat* labels=0 ); virtual ~CvEM(); - virtual bool train( const CvMat* samples, const CvMat* sample_idx=0, - CvEMParams params=CvEMParams(), CvMat* labels=0 ); + virtual bool train( const Mat& samples, const Mat& sample_idx=Mat(), + CvEMParams params=CvEMParams(), Mat* labels=0 ); - virtual float predict( const CvMat* sample, CvMat* probs ) const; + virtual float predict( const Mat& sample, Mat& probs ) const; virtual void clear(); int get_nclusters() const { return params.nclusters; } - const CvMat* get_means() const { return means; } - const CvMat** get_covs() const { return covs; } - const CvMat* get_weights() const { return weights; } - const CvMat* get_probs() const { return probs; } + const Mat& get_means() const { return means; } + const Mat&* get_covs() const { return covs; } + const Mat& get_weights() const { return weights; } + const Mat& get_probs() const { return probs; } protected: @@ -173,19 +173,19 @@ EM model :: virtual double run_em( const CvVectors& train_data ); virtual void init_auto( const CvVectors& samples ); virtual void kmeans( const CvVectors& train_data, int nclusters, - CvMat* labels, CvTermCriteria criteria, - const CvMat* means ); + Mat& labels, CvTermCriteria criteria, + const Mat& means ); CvEMParams params; double log_likelihood; - CvMat* means; - CvMat** covs; - CvMat* weights; - CvMat* probs; + Mat& means; + Mat&* covs; + Mat& weights; + Mat& probs; - CvMat* log_weight_div_det; - CvMat* inv_eigen_values; - CvMat** cov_rotate_mats; + Mat& log_weight_div_det; + Mat& inv_eigen_values; + Mat&* cov_rotate_mats; }; @@ -195,7 +195,7 @@ EM model :: CvEM::train ----------- -.. cpp:function:: void CvEM::train( const CvMat* samples, const CvMat* sample_idx=0, CvEMParams params=CvEMParams(), CvMat* labels=0 ) +.. cpp:function:: void CvEM::train( const Mat& samples, const Mat& sample_idx=Mat(), CvEMParams params=CvEMParams(), Mat* labels=0 ) Estimates the Gaussian mixture parameters from a sample set. @@ -210,110 +210,7 @@ Unlike many of the ML models, EM is an unsupervised learning algorithm and it do The trained model can be used further for prediction, just like any other classifier. The trained model is similar to the :ref:`Bayes classifier`. -Example: Clustering random samples of multi-Gaussian distribution using EM :: +For example of clustering random samples of multi-Gaussian distribution using EM see em.cpp sample in OpenCV distribution. - #include "ml.h" - #include "highgui.h" - - int main( int argc, char** argv ) - { - const int N = 4; - const int N1 = (int)sqrt((double)N); - const CvScalar colors[] = {{0,0,255}},{{0,255,0}}, - {{0,255,255}},{{255,255,0} - ; - int i, j; - int nsamples = 100; - CvRNG rng_state = cvRNG(-1); - CvMat* samples = cvCreateMat( nsamples, 2, CV_32FC1 ); - CvMat* labels = cvCreateMat( nsamples, 1, CV_32SC1 ); - IplImage* img = cvCreateImage( cvSize( 500, 500 ), 8, 3 ); - float _sample[2]; - CvMat sample = cvMat( 1, 2, CV_32FC1, _sample ); - CvEM em_model; - CvEMParams params; - CvMat samples_part; - - cvReshape( samples, samples, 2, 0 ); - for( i = 0; i < N; i++ ) - { - CvScalar mean, sigma; - - // form the training samples - cvGetRows( samples, &samples_part, i*nsamples/N, - (i+1)*nsamples/N ); - mean = cvScalar(((i - ((i/N1)+1.)*img->height/(N1+1)); - sigma = cvScalar(30,30); - cvRandArr( &rng_state, &samples_part, CV_RAND_NORMAL, - mean, sigma ); - } - cvReshape( samples, samples, 1, 0 ); - - // initialize model parameters - params.covs = NULL; - params.means = NULL; - params.weights = NULL; - params.probs = NULL; - params.nclusters = N; - params.cov_mat_type = CvEM::COV_MAT_SPHERICAL; - params.start_step = CvEM::START_AUTO_STEP; - params.term_crit.max_iter = 10; - params.term_crit.epsilon = 0.1; - params.term_crit.type = CV_TERMCRIT_ITER|CV_TERMCRIT_EPS; - - // cluster the data - em_model.train( samples, 0, params, labels ); - - #if 0 - // the piece of code shows how to repeatedly optimize the model - // with less-constrained parameters - //(COV_MAT_DIAGONAL instead of COV_MAT_SPHERICAL) - // when the output of the first stage is used as input for the second one. - CvEM em_model2; - params.cov_mat_type = CvEM::COV_MAT_DIAGONAL; - params.start_step = CvEM::START_E_STEP; - params.means = em_model.get_means(); - params.covs = (const CvMat**)em_model.get_covs(); - params.weights = em_model.get_weights(); - - em_model2.train( samples, 0, params, labels ); - // to use em_model2, replace em_model.predict() - // with em_model2.predict() below - #endif - // classify every image pixel - cvZero( img ); - for( i = 0; i < img->height; i++ ) - { - for( j = 0; j < img->width; j++ ) - { - CvPoint pt = cvPoint(j, i); - sample.data.fl[0] = (float)j; - sample.data.fl[1] = (float)i; - int response = cvRound(em_model.predict( &sample, NULL )); - CvScalar c = colors[response]; - - cvCircle( img, pt, 1, cvScalar(c.val[0]*0.75, - c.val[1]*0.75,c.val[2]*0.75), CV_FILLED ); - } - } - - //draw the clustered samples - for( i = 0; i < nsamples; i++ ) - { - CvPoint pt; - pt.x = cvRound(samples->data.fl[i*2]); - pt.y = cvRound(samples->data.fl[i*2+1]); - cvCircle( img, pt, 1, colors[labels->data.i[i]], CV_FILLED ); - } - - cvNamedWindow( "EM-clustering result", 1 ); - cvShowImage( "EM-clustering result", img ); - cvWaitKey(0); - - cvReleaseMat( &samples ); - cvReleaseMat( &labels ); - return 0; - } diff --git a/modules/ml/doc/k_nearest_neighbors.rst b/modules/ml/doc/k_nearest_neighbors.rst index 7767100..a06e351 100644 --- a/modules/ml/doc/k_nearest_neighbors.rst +++ b/modules/ml/doc/k_nearest_neighbors.rst @@ -22,15 +22,15 @@ K-Nearest Neighbors model :: CvKNearest(); virtual ~CvKNearest(); - CvKNearest( const CvMat* _train_data, const CvMat* _responses, - const CvMat* _sample_idx=0, bool _is_regression=false, int max_k=32 ); + CvKNearest( const Mat& _train_data, const Mat& _responses, + const Mat& _sample_idx=Mat(), bool _is_regression=false, int max_k=32 ); - virtual bool train( const CvMat* _train_data, const CvMat* _responses, - const CvMat* _sample_idx=0, bool is_regression=false, + virtual bool train( const Mat& _train_data, const Mat& _responses, + const Mat& _sample_idx=Mat(), bool is_regression=false, int _max_k=32, bool _update_base=false ); - virtual float find_nearest( const CvMat* _samples, int k, CvMat* results, - const float** neighbors=0, CvMat* neighbor_responses=0, CvMat* dist=0 ) const; + virtual float find_nearest( const Mat& _samples, int k, Mat* results=0, + const float** neighbors=0, Mat* neighbor_responses=0, Mat* dist=0 ) const; virtual void clear(); int get_max_k() const; @@ -49,7 +49,7 @@ K-Nearest Neighbors model :: CvKNearest::train ----------------- -.. cpp:function:: bool CvKNearest::train( const CvMat* _train_data, const CvMat* _responses, const CvMat* _sample_idx=0, bool is_regression=false, int _max_k=32, bool _update_base=false ) +.. cpp:function:: bool CvKNearest::train( const Mat& _train_data, const Mat& _responses, const Mat& _sample_idx=Mat(), bool is_regression=false, int _max_k=32, bool _update_base=false ) Trains the model. @@ -70,7 +70,7 @@ The parameter ``_update_base`` specifies whether the model is trained from scrat CvKNearest::find_nearest ------------------------ -.. cpp:function:: float CvKNearest::find_nearest( const CvMat* _samples, int k, CvMat* results=0, const float** neighbors=0, CvMat* neighbor_responses=0, CvMat* dist=0 ) const +.. cpp:function:: float CvKNearest::find_nearest( const Mat& _samples, int k, Mat* results=0, const float** neighbors=0, Mat* neighbor_responses=0, Mat* dist=0 ) const Finds the neighbors for input vectors. @@ -85,7 +85,9 @@ For a custom classification/regression prediction, the method can optionally ret For each input vector, the neighbors are sorted by their distances to the vector. -If only a single input vector is passed, all output matrices are optional and the predicted value is returned by the method. :: +If only a single input vector is passed, all output matrices are optional and the predicted value is returned by the method. + +The sample below (currently using the obsolete ``CvMat`` structures) demonstrates the use of the k-nearest classifier for 2D point classification :: #include "ml.h" #include "highgui.h" diff --git a/modules/ml/doc/neural_networks.rst b/modules/ml/doc/neural_networks.rst index 477c5b1..7e6fb8a 100644 --- a/modules/ml/doc/neural_networks.rst +++ b/modules/ml/doc/neural_networks.rst @@ -142,23 +142,23 @@ MLP model :: { public: CvANN_MLP(); - CvANN_MLP( const CvMat* _layer_sizes, + CvANN_MLP( const Mat& _layer_sizes, int _activ_func=SIGMOID_SYM, double _f_param1=0, double _f_param2=0 ); virtual ~CvANN_MLP(); - virtual void create( const CvMat* _layer_sizes, + virtual void create( const Mat& _layer_sizes, int _activ_func=SIGMOID_SYM, double _f_param1=0, double _f_param2=0 ); - virtual int train( const CvMat* _inputs, const CvMat* _outputs, - const CvMat* _sample_weights, - const CvMat* _sample_idx=0, + virtual int train( const Mat& _inputs, const Mat& _outputs, + const Mat& _sample_weights, + const Mat& _sample_idx=Mat(), CvANN_MLP_TrainParams _params = CvANN_MLP_TrainParams(), int flags=0 ); - virtual float predict( const CvMat* _inputs, - CvMat* _outputs ) const; + virtual float predict( const Mat& _inputs, + Mat& _outputs ) const; virtual void clear(); @@ -172,12 +172,12 @@ MLP model :: virtual void write( CvFileStorage* storage, const char* name ); int get_layer_count() { return layer_sizes ? layer_sizes->cols : 0; } - const CvMat* get_layer_sizes() { return layer_sizes; } + const Mat& get_layer_sizes() { return layer_sizes; } protected: - virtual bool prepare_to_train( const CvMat* _inputs, const CvMat* _outputs, - const CvMat* _sample_weights, const CvMat* _sample_idx, + virtual bool prepare_to_train( const Mat& _inputs, const Mat& _outputs, + const Mat& _sample_weights, const Mat& _sample_idx, CvANN_MLP_TrainParams _params, CvVectors* _ivecs, CvVectors* _ovecs, double** _sw, int _flags ); @@ -189,23 +189,23 @@ MLP model :: virtual int train_rprop( CvVectors _ivecs, CvVectors _ovecs, const double* _sw ); - virtual void calc_activ_func( CvMat* xf, const double* bias ) const; - virtual void calc_activ_func_deriv( CvMat* xf, CvMat* deriv, + virtual void calc_activ_func( Mat& xf, const double* bias ) const; + virtual void calc_activ_func_deriv( Mat& xf, Mat& deriv, const double* bias ) const; virtual void set_activ_func( int _activ_func=SIGMOID_SYM, double _f_param1=0, double _f_param2=0 ); virtual void init_weights(); - virtual void scale_input( const CvMat* _src, CvMat* _dst ) const; - virtual void scale_output( const CvMat* _src, CvMat* _dst ) const; + virtual void scale_input( const Mat& _src, Mat& _dst ) const; + virtual void scale_output( const Mat& _src, Mat& _dst ) const; virtual void calc_input_scale( const CvVectors* vecs, int flags ); virtual void calc_output_scale( const CvVectors* vecs, int flags ); virtual void write_params( CvFileStorage* fs ); virtual void read_params( CvFileStorage* fs, CvFileNode* node ); - CvMat* layer_sizes; - CvMat* wbuf; - CvMat* sample_weights; + Mat& layer_sizes; + Mat& wbuf; + Mat& sample_weights; double** weights; double f_param1, f_param2; double min_val, max_val, min_val1, max_val1; @@ -225,7 +225,7 @@ Unlike many other models in ML that are constructed and trained at once, in the CvANN_MLP::create ----------------- -.. cpp:function:: void CvANN_MLP::create( const CvMat* _layer_sizes, int _activ_func=SIGMOID_SYM, double _f_param1=0, double _f_param2=0 ) +.. cpp:function:: void CvANN_MLP::create( const Mat& _layer_sizes, int _activ_func=SIGMOID_SYM, double _f_param1=0, double _f_param2=0 ) Constructs MLP with the specified topology. @@ -243,7 +243,7 @@ The method creates an MLP network with the specified topology and assigns the sa CvANN_MLP::train ---------------- -.. cpp:function:: int CvANN_MLP::train( const CvMat* _inputs, const CvMat* _outputs, const CvMat* _sample_weights, const CvMat* _sample_idx=0, CvANN_MLP_TrainParams _params = CvANN_MLP_TrainParams(), int flags=0 ) +.. cpp:function:: int CvANN_MLP::train( const Mat& _inputs, const Mat& _outputs, const Mat& _sample_weights, const Mat& _sample_idx=Mat(), CvANN_MLP_TrainParams _params = CvANN_MLP_TrainParams(), int flags=0 ) Trains/updates MLP. diff --git a/modules/ml/doc/normal_bayes_classifier.rst b/modules/ml/doc/normal_bayes_classifier.rst index 971a705..664646d 100644 --- a/modules/ml/doc/normal_bayes_classifier.rst +++ b/modules/ml/doc/normal_bayes_classifier.rst @@ -21,13 +21,13 @@ Bayes classifier for normally distributed data :: CvNormalBayesClassifier(); virtual ~CvNormalBayesClassifier(); - CvNormalBayesClassifier( const CvMat* _train_data, const CvMat* _responses, - const CvMat* _var_idx=0, const CvMat* _sample_idx=0 ); + CvNormalBayesClassifier( const Mat& _train_data, const Mat& _responses, + const Mat& _var_idx=Mat(), const Mat& _sample_idx=Mat() ); - virtual bool train( const CvMat* _train_data, const CvMat* _responses, - const CvMat* _var_idx = 0, const CvMat* _sample_idx=0, bool update=false ); + virtual bool train( const Mat& _train_data, const Mat& _responses, + const Mat& _var_idx=Mat(), const Mat& _sample_idx=Mat(), bool update=false ); - virtual float predict( const CvMat* _samples, CvMat* results=0 ) const; + virtual float predict( const Mat& _samples, Mat* results=0 ) const; virtual void clear(); virtual void save( const char* filename, const char* name=0 ); @@ -46,7 +46,7 @@ Bayes classifier for normally distributed data :: CvNormalBayesClassifier::train ------------------------------ -.. cpp:function:: bool CvNormalBayesClassifier::train( const CvMat* _train_data, const CvMat* _responses, const CvMat* _var_idx =0, const CvMat* _sample_idx=0, bool update=false ) +.. cpp:function:: bool CvNormalBayesClassifier::train( const Mat& _train_data, const Mat& _responses, const Mat& _var_idx =Mat(), const Mat& _sample_idx=Mat(), bool update=false ) Trains the model. @@ -65,7 +65,7 @@ In addition, there is an ``update`` flag that identifies whether the model shoul CvNormalBayesClassifier::predict -------------------------------- -.. cpp:function:: float CvNormalBayesClassifier::predict( const CvMat* samples, CvMat* results=0 ) const +.. cpp:function:: float CvNormalBayesClassifier::predict( const Mat& samples, Mat* results=0 ) const Predicts the response for sample(s). diff --git a/modules/ml/doc/random_trees.rst b/modules/ml/doc/random_trees.rst index 8877a04..ebf876b 100644 --- a/modules/ml/doc/random_trees.rst +++ b/modules/ml/doc/random_trees.rst @@ -95,23 +95,23 @@ Random trees :: public: CvRTrees(); virtual ~CvRTrees(); - virtual bool train( const CvMat* _train_data, int _tflag, - const CvMat* _responses, const CvMat* _var_idx=0, - const CvMat* _sample_idx=0, const CvMat* _var_type=0, - const CvMat* _missing_mask=0, + virtual bool train( const Mat& _train_data, int _tflag, + const Mat& _responses, const Mat& _var_idx=Mat(), + const Mat& _sample_idx=Mat(), const Mat& _var_type=Mat(), + const Mat& _missing_mask=Mat(), CvRTParams params=CvRTParams() ); - virtual float predict( const CvMat* sample, const CvMat* missing = 0 ) + virtual float predict( const Mat& sample, const Mat& missing = 0 ) const; virtual void clear(); - virtual const CvMat* get_var_importance(); - virtual float get_proximity( const CvMat* sample_1, const CvMat* sample_2 ) + virtual const Mat& get_var_importance(); + virtual float get_proximity( const Mat& sample_1, const Mat& sample_2 ) const; virtual void read( CvFileStorage* fs, CvFileNode* node ); virtual void write( CvFileStorage* fs, const char* name ); - CvMat* get_active_var_mask(); + Mat& get_active_var_mask(); CvRNG* get_rng(); int get_tree_count() const; @@ -136,7 +136,7 @@ Random trees :: CvRTrees::train --------------- -.. cpp:function:: bool CvRTrees::train( const CvMat* train_data, int tflag, const CvMat* responses, const CvMat* comp_idx=0, const CvMat* sample_idx=0, const CvMat* var_type=0, const CvMat* missing_mask=0, CvRTParams params=CvRTParams() ) +.. cpp:function:: bool CvRTrees::train( const Mat& train_data, int tflag, const Mat& responses, const Mat& comp_idx=Mat(), const Mat& sample_idx=Mat(), const Mat& var_type=Mat(), const Mat& missing_mask=Mat(), CvRTParams params=CvRTParams() ) Trains the Random Tree model. @@ -149,7 +149,7 @@ The method ``CvRTrees::train`` is very similar to the first form of ``CvDTree::t CvRTrees::predict ----------------- -.. cpp:function:: double CvRTrees::predict( const CvMat* sample, const CvMat* missing=0 ) const +.. cpp:function:: double CvRTrees::predict( const Mat& sample, const Mat& missing=Mat() ) const Predicts the output for an input sample. @@ -161,7 +161,7 @@ The input parameters of the prediction method are the same as in ``CvDTree::pred CvRTrees::get_var_importance ---------------------------- -.. cpp:function:: const CvMat* CvRTrees::get_var_importance() const +.. cpp:function:: const Mat& CvRTrees::get_var_importance() const Retrieves the variable importance array. @@ -173,127 +173,10 @@ The method returns the variable importance vector, computed at the training stag CvRTrees::get_proximity ----------------------- -.. cpp:function:: float CvRTrees::get_proximity( const CvMat* sample_1, const CvMat* sample_2 ) const +.. cpp:function:: float CvRTrees::get_proximity( const Mat& sample_1, const Mat& sample_2 ) const Retrieves the proximity measure between two training samples. The method returns proximity measure between any two samples, which is the ratio of those trees in the ensemble, in which the samples fall into the same leaf node, to the total number of the trees. -Example: Prediction of mushroom goodness using the random-tree classifier :: - - #include - #include - #include - #include "ml.h" - - int main( void ) - { - CvStatModel* cls = NULL; - CvFileStorage* storage = cvOpenFileStorage( "Mushroom.xml", - NULL,CV_STORAGE_READ ); - CvMat* data = (CvMat*)cvReadByName(storage, NULL, "sample", 0 ); - CvMat train_data, test_data; - CvMat response; - CvMat* missed = NULL; - CvMat* comp_idx = NULL; - CvMat* sample_idx = NULL; - CvMat* type_mask = NULL; - int resp_col = 0; - int i,j; - CvRTreesParams params; - CvTreeClassifierTrainParams cart_params; - const int ntrain_samples = 1000; - const int ntest_samples = 1000; - const int nvars = 23; - - if(data == NULL || data->cols != nvars) - { - puts("Error in source data"); - return -1; - } - - cvGetSubRect( data, &train_data, cvRect(0, 0, nvars, ntrain_samples) ); - cvGetSubRect( data, &test_data, cvRect(0, ntrain_samples, nvars, - ntrain_samples + ntest_samples) ); - - resp_col = 0; - cvGetCol( &train_data, &response, resp_col); - - /* create missed variable matrix */ - missed = cvCreateMat(train_data.rows, train_data.cols, CV_8UC1); - for( i = 0; i < train_data.rows; i++ ) - for( j = 0; j < train_data.cols; j++ ) - CV_MAT_ELEM(*missed,uchar,i,j) - = (uchar)(CV_MAT_ELEM(train_data,float,i,j) < 0); - - /* create comp_idx vector */ - comp_idx = cvCreateMat(1, train_data.cols-1, CV_32SC1); - for( i = 0; i < train_data.cols; i++ ) - { - if(iresp_col)CV_MAT_ELEM(*comp_idx,int,0,i-1) = i; - } - - /* create sample_idx vector */ - sample_idx = cvCreateMat(1, train_data.rows, CV_32SC1); - for( j = i = 0; i < train_data.rows; i++ ) - { - if(CV_MAT_ELEM(response,float,i,0) < 0) continue; - CV_MAT_ELEM(*sample_idx,int,0,j) = i; - j++; - } - sample_idx->cols = j; - - /* create type mask */ - type_mask = cvCreateMat(1, train_data.cols+1, CV_8UC1); - cvSet( type_mask, cvRealScalar(CV_VAR_CATEGORICAL), 0); - - // initialize training parameters - cvSetDefaultParamTreeClassifier((CvStatModelParams*)&cart_params); - cart_params.wrong_feature_as_unknown = 1; - params.tree_params = &cart_params; - params.term_crit.max_iter = 50; - params.term_crit.epsilon = 0.1; - params.term_crit.type = CV_TERMCRIT_ITER|CV_TERMCRIT_EPS; - - puts("Random forest results"); - cls = cvCreateRTreesClassifier( &train_data, - CV_ROW_SAMPLE, - &response, - (CvStatModelParams*)& - params, - comp_idx, - sample_idx, - type_mask, - missed ); - if( cls ) - { - CvMat sample = cvMat( 1, nvars, CV_32FC1, test_data.data.fl ); - CvMat test_resp; - int wrong = 0, total = 0; - cvGetCol( &test_data, &test_resp, resp_col); - for( i = 0; i < ntest_samples; i++, sample.data.fl += nvars ) - { - if( CV_MAT_ELEM(test_resp,float,i,0) >= 0 ) - { - float resp = cls->predict( cls, &sample, NULL ); - wrong += (fabs(resp-response.data.fl[i]) > 1e-3 ) ? 1 : 0; - total++; - } - } - printf( "Test set error = - } - else - puts("Error forest creation"); - - cvReleaseMat(&missed); - cvReleaseMat(&sample_idx); - cvReleaseMat(&comp_idx); - cvReleaseMat(&type_mask); - cvReleaseMat(&data); - cvReleaseStatModel(&cls); - cvReleaseFileStorage(&storage); - return 0; - } - - +For the random trees usage example, please, see letter_recog.cpp sample in OpenCV distribution. diff --git a/modules/ml/doc/statistical_models.rst b/modules/ml/doc/statistical_models.rst index da0a2fc..ca5b1f9 100644 --- a/modules/ml/doc/statistical_models.rst +++ b/modules/ml/doc/statistical_models.rst @@ -15,20 +15,20 @@ Base class for statistical models in ML :: { public: /* CvStatModel(); */ - /* CvStatModel( const CvMat* train_data ... ); */ + /* CvStatModel( const Mat& train_data ... ); */ virtual ~CvStatModel(); virtual void clear()=0; - /* virtual bool train( const CvMat* train_data, [int tflag,] ..., const - CvMat* responses, ..., - [const CvMat* var_idx,] ..., [const CvMat* sample_idx,] ... - [const CvMat* var_type,] ..., [const CvMat* missing_mask,] + /* virtual bool train( const Mat& train_data, [int tflag,] ..., const + Mat& responses, ..., + [const Mat& var_idx,] ..., [const Mat& sample_idx,] ... + [const Mat& var_type,] ..., [const Mat& missing_mask,] ... )=0; */ - /* virtual float predict( const CvMat* sample ... ) const=0; */ + /* virtual float predict( const Mat& sample ... ) const=0; */ virtual void save( const char* filename, const char* name=0 )=0; virtual void load( const char* filename, const char* name=0 )=0; @@ -58,7 +58,7 @@ Each statistical model class in ML has a default constructor without parameters. CvStatModel::CvStatModel(...) ----------------------------- -.. cpp:function:: CvStatModel::CvStatModel( const CvMat* train_data ... ) +.. cpp:function:: CvStatModel::CvStatModel( const Mat& train_data ... ) Serves as a training constructor. @@ -161,7 +161,7 @@ The previous model state is cleared by ``clear()`` . CvStatModel::train ------------------ -.. cpp:function:: bool CvStatMode::train( const CvMat* train_data, [int tflag,] ..., const CvMat* responses, ..., [const CvMat* var_idx,] ..., [const CvMat* sample_idx,] ... [const CvMat* var_type,] ..., [const CvMat* missing_mask,] ... ) +.. cpp:function:: bool CvStatMode::train( const Mat& train_data, [int tflag,] ..., const Mat& responses, ..., [const Mat& var_idx,] ..., [const Mat& sample_idx,] ... [const Mat& var_type,] ..., [const Mat& missing_mask,] ... ) Trains the model. @@ -193,7 +193,7 @@ Usually, the previous model state is cleared by ``clear()`` before running the t CvStatModel::predict -------------------- -.. cpp:function:: float CvStatMode::predict( const CvMat* sample[, ] ) const +.. cpp:function:: float CvStatMode::predict( const Mat& sample[, ] ) const Predicts the response for a sample. diff --git a/modules/ml/doc/support_vector_machines.rst b/modules/ml/doc/support_vector_machines.rst index 298e4f5..0ff13ba 100644 --- a/modules/ml/doc/support_vector_machines.rst +++ b/modules/ml/doc/support_vector_machines.rst @@ -46,16 +46,16 @@ Support Vector Machines :: CvSVM(); virtual ~CvSVM(); - CvSVM( const CvMat* _train_data, const CvMat* _responses, - const CvMat* _var_idx=0, const CvMat* _sample_idx=0, + CvSVM( const Mat& _train_data, const Mat& _responses, + const Mat& _var_idx=Mat(), const Mat& _sample_idx=Mat(), CvSVMParams _params=CvSVMParams() ); - virtual bool train( const CvMat* _train_data, const CvMat* _responses, - const CvMat* _var_idx=0, const CvMat* _sample_idx=0, + virtual bool train( const Mat& _train_data, const Mat& _responses, + const Mat& _var_idx=Mat(), const Mat& _sample_idx=Mat(), CvSVMParams _params=CvSVMParams() ); - virtual bool train_auto( const CvMat* _train_data, const CvMat* _responses, - const CvMat* _var_idx, const CvMat* _sample_idx, CvSVMParams _params, + virtual bool train_auto( const Mat& _train_data, const Mat& _responses, + const Mat& _var_idx, const Mat& _sample_idx, CvSVMParams _params, int k_fold = 10, CvParamGrid C_grid = get_default_grid(CvSVM::C), CvParamGrid gamma_grid = get_default_grid(CvSVM::GAMMA), @@ -64,7 +64,7 @@ Support Vector Machines :: CvParamGrid coef_grid = get_default_grid(CvSVM::COEF), CvParamGrid degree_grid = get_default_grid(CvSVM::DEGREE) ); - virtual float predict( const CvMat* _sample ) const; + virtual float predict( const Mat& _sample ) const; virtual int get_support_vector_count() const; virtual const float* get_support_vector(int i) const; virtual CvSVMParams get_params() const { return params; }; @@ -100,7 +100,7 @@ SVM training parameters :: CvSVMParams( int _svm_type, int _kernel_type, double _degree, double _gamma, double _coef0, double _C, double _nu, double _p, - CvMat* _class_weights, CvTermCriteria _term_crit ); + const CvMat* _class_weights, CvTermCriteria _term_crit ); int svm_type; int kernel_type; @@ -125,7 +125,7 @@ The structure must be initialized and passed to the training method of CvSVM::train ------------ -.. cpp:function:: bool CvSVM::train( const CvMat* _train_data, const CvMat* _responses, const CvMat* _var_idx=0, const CvMat* _sample_idx=0, CvSVMParams _params=CvSVMParams() ) +.. cpp:function:: bool CvSVM::train( const Mat& _train_data, const Mat& _responses, const Mat& _var_idx=Mat(), const Mat& _sample_idx=Mat(), CvSVMParams _params=CvSVMParams() ) Trains SVM. @@ -145,7 +145,7 @@ All the other parameters are gathered in the CvSVM::train_auto ----------------- -.. cpp:function:: train_auto( const CvMat* _train_data, const CvMat* _responses, const CvMat* _var_idx, const CvMat* _sample_idx, CvSVMParams params, int k_fold = 10, CvParamGrid C_grid = get_default_grid(CvSVM::C), CvParamGrid gamma_grid = get_default_grid(CvSVM::GAMMA), CvParamGrid p_grid = get_default_grid(CvSVM::P), CvParamGrid nu_grid = get_default_grid(CvSVM::NU), CvParamGrid coef_grid = get_default_grid(CvSVM::COEF), CvParamGrid degree_grid = get_default_grid(CvSVM::DEGREE) ) +.. cpp:function:: train_auto( const Mat& _train_data, const Mat& _responses, const Mat& _var_idx, const Mat& _sample_idx, CvSVMParams params, int k_fold = 10, CvParamGrid C_grid = get_default_grid(CvSVM::C), CvParamGrid gamma_grid = get_default_grid(CvSVM::GAMMA), CvParamGrid p_grid = get_default_grid(CvSVM::P), CvParamGrid nu_grid = get_default_grid(CvSVM::NU), CvParamGrid coef_grid = get_default_grid(CvSVM::COEF), CvParamGrid degree_grid = get_default_grid(CvSVM::DEGREE) ) Trains SVM with optimal parameters. diff --git a/modules/ml/src/em.cpp b/modules/ml/src/em.cpp index a8a223f..63d833a 100644 --- a/modules/ml/src/em.cpp +++ b/modules/ml/src/em.cpp @@ -849,187 +849,13 @@ void CvEM::init_auto( const CvVectors& train_data ) void CvEM::kmeans( const CvVectors& train_data, int nclusters, CvMat* labels, CvTermCriteria termcrit, const CvMat* centers0 ) { - CvMat* centers = 0; - CvMat* old_centers = 0; - CvMat* counters = 0; - - CV_FUNCNAME( "CvEM::kmeans" ); - - __BEGIN__; - - cv::RNG rng(0xFFFFFFFF); - int i, j, k, nsamples, dims; - int iter = 0; - double max_dist = DBL_MAX; - - termcrit = cvCheckTermCriteria( termcrit, 1e-6, 100 ); - termcrit.epsilon *= termcrit.epsilon; - nsamples = train_data.count; - dims = train_data.dims; - nclusters = MIN( nclusters, nsamples ); - - CV_CALL( centers = cvCreateMat( nclusters, dims, CV_64FC1 )); - CV_CALL( old_centers = cvCreateMat( nclusters, dims, CV_64FC1 )); - CV_CALL( counters = cvCreateMat( 1, nclusters, CV_32SC1 )); - cvZero( old_centers ); - - if( centers0 ) - { - CV_CALL( cvConvert( centers0, centers )); - } - else - { - for( i = 0; i < nsamples; i++ ) - labels->data.i[i] = i*nclusters/nsamples; - cvRandShuffle( labels, &rng.state ); - } - - for( ;; ) - { - CvMat* temp; - - if( iter > 0 || centers0 ) - { - for( i = 0; i < nsamples; i++ ) - { - const float* s = train_data.data.fl[i]; - int k_best = 0; - double min_dist = DBL_MAX; - - for( k = 0; k < nclusters; k++ ) - { - const double* c = (double*)(centers->data.ptr + k*centers->step); - double dist = 0; - - for( j = 0; j <= dims - 4; j += 4 ) - { - double t0 = c[j] - s[j]; - double t1 = c[j+1] - s[j+1]; - dist += t0*t0 + t1*t1; - t0 = c[j+2] - s[j+2]; - t1 = c[j+3] - s[j+3]; - dist += t0*t0 + t1*t1; - } - - for( ; j < dims; j++ ) - { - double t = c[j] - s[j]; - dist += t*t; - } - - if( min_dist > dist ) - { - min_dist = dist; - k_best = k; - } - } - - labels->data.i[i] = k_best; - } - } - - if( ++iter > termcrit.max_iter ) - break; - - CV_SWAP( centers, old_centers, temp ); - cvZero( centers ); - cvZero( counters ); - - // update centers - for( i = 0; i < nsamples; i++ ) - { - const float* s = train_data.data.fl[i]; - k = labels->data.i[i]; - double* c = (double*)(centers->data.ptr + k*centers->step); - - for( j = 0; j <= dims - 4; j += 4 ) - { - double t0 = c[j] + s[j]; - double t1 = c[j+1] + s[j+1]; - - c[j] = t0; - c[j+1] = t1; - - t0 = c[j+2] + s[j+2]; - t1 = c[j+3] + s[j+3]; - - c[j+2] = t0; - c[j+3] = t1; - } - for( ; j < dims; j++ ) - c[j] += s[j]; - counters->data.i[k]++; - } - - if( iter > 1 ) - max_dist = 0; - - for( k = 0; k < nclusters; k++ ) - { - double* c = (double*)(centers->data.ptr + k*centers->step); - if( counters->data.i[k] != 0 ) - { - double scale = 1./counters->data.i[k]; - for( j = 0; j < dims; j++ ) - c[j] *= scale; - } - else - { - const float* s; - for( j = 0; j < 10; j++ ) - { - i = rng(nsamples); - if( counters->data.i[labels->data.i[i]] > 1 ) - break; - } - s = train_data.data.fl[i]; - for( j = 0; j < dims; j++ ) - c[j] = s[j]; - } - - if( iter > 1 ) - { - double dist = 0; - const double* c_o = (double*)(old_centers->data.ptr + k*old_centers->step); - for( j = 0; j < dims; j++ ) - { - double t = c[j] - c_o[j]; - dist += t*t; - } - if( max_dist < dist ) - max_dist = dist; - } - } - - if( max_dist < termcrit.epsilon ) - break; - } - - cvZero( counters ); + int i, nsamples = train_data.count, dims = train_data.dims; + cv::Ptr temp_mat = cvCreateMat(nsamples, dims, CV_32F); + for( i = 0; i < nsamples; i++ ) - counters->data.i[labels->data.i[i]]++; - - // ensure that we do not have empty clusters - for( k = 0; k < nclusters; k++ ) - if( counters->data.i[k] == 0 ) - for(;;) - { - i = rng(nsamples); - j = labels->data.i[i]; - if( counters->data.i[j] > 1 ) - { - labels->data.i[i] = k; - counters->data.i[j]--; - counters->data.i[k]++; - break; - } - } - - __END__; - - cvReleaseMat( ¢ers ); - cvReleaseMat( &old_centers ); - cvReleaseMat( &counters ); + memcpy( temp_mat->data.ptr + temp_mat->step*i, train_data.data.fl[i], dims*sizeof(float)); + + cvKMeans2(temp_mat, nclusters, labels, termcrit, 10); } diff --git a/modules/objdetect/doc/cascade_classification.rst b/modules/objdetect/doc/cascade_classification.rst index 9d0702b..0f7d7aa 100644 --- a/modules/objdetect/doc/cascade_classification.rst +++ b/modules/objdetect/doc/cascade_classification.rst @@ -34,7 +34,7 @@ Base class for computing feature values in cascade classifiers :: FeatureEvaluator::read -------------------------- -.. cpp:function:: bool FeatureEvaluator::read(const FileNode\& node) +.. cpp:function:: bool FeatureEvaluator::read(const FileNode& node) Reads parameters of features from the ``FileStorage`` node. @@ -60,7 +60,7 @@ FeatureEvaluator::getFeatureType FeatureEvaluator::setImage ------------------------------ -.. cpp:function:: bool FeatureEvaluator::setImage(const Mat\& img, Size origWinSize) +.. cpp:function:: bool FeatureEvaluator::setImage(const Mat& img, Size origWinSize) Sets an image where the features are computed??. @@ -193,7 +193,7 @@ The cascade classifier class for object detection :: CascadeClassifier::CascadeClassifier ---------------------------------------- -.. cpp:function:: CascadeClassifier::CascadeClassifier(const string\& filename) +.. cpp:function:: CascadeClassifier::CascadeClassifier(const string& filename) Loads a classifier from a file. @@ -211,7 +211,7 @@ CascadeClassifier::empty CascadeClassifier::load --------------------------- -.. cpp:function:: bool CascadeClassifier::load(const string\& filename) +.. cpp:function:: bool CascadeClassifier::load(const string& filename) Loads a classifier from a file. The previous content is destroyed. @@ -221,7 +221,7 @@ CascadeClassifier::load CascadeClassifier::read --------------------------- -.. cpp:function:: bool CascadeClassifier::read(const FileNode\& node) +.. cpp:function:: bool CascadeClassifier::read(const FileNode& node) Reads a classifier from a FileStorage node. The file may contain a new cascade classifier (trained traincascade application) only. @@ -229,7 +229,7 @@ CascadeClassifier::read CascadeClassifier::detectMultiScale --------------------------------------- -.. cpp:function:: void CascadeClassifier::detectMultiScale( const Mat\& image, vector\& objects, double scaleFactor=1.1, int minNeighbors=3, int flags=0, Size minSize=Size()) +.. cpp:function:: void CascadeClassifier::detectMultiScale( const Mat& image, vector& objects, double scaleFactor=1.1, int minNeighbors=3, int flags=0, Size minSize=Size()) Detects objects of different sizes in the input image. The detected objects are returned as a list of rectangles. @@ -249,7 +249,7 @@ CascadeClassifier::detectMultiScale CascadeClassifier::setImage ------------------------------- -.. cpp:function:: bool CascadeClassifier::setImage( Ptr\& feval, const Mat\& image ) +.. cpp:function:: bool CascadeClassifier::setImage( Ptr& feval, const Mat& image ) Sets an image for detection, which is called by ``detectMultiScale`` at each image level. @@ -261,7 +261,7 @@ CascadeClassifier::setImage CascadeClassifier::runAt ---------------------------- -.. cpp:function:: int CascadeClassifier::runAt( Ptr\& feval, Point pt ) +.. cpp:function:: int CascadeClassifier::runAt( Ptr& feval, Point pt ) Runs the detector at the specified point. Use ``setImage`` to set the image that the detector is working with. @@ -276,7 +276,7 @@ Otherwise, it returns ``si``, which is an index of the stage that first predicte groupRectangles ------------------- -.. cpp:function:: void groupRectangles(vector\& rectList, int groupThreshold, double eps=0.2) +.. cpp:function:: void groupRectangles(vector& rectList, int groupThreshold, double eps=0.2) Groups the object candidate rectangles. diff --git a/modules/python/src2/hdr_parser.py b/modules/python/src2/hdr_parser.py index b5a84a6..c0bfdaf 100755 --- a/modules/python/src2/hdr_parser.py +++ b/modules/python/src2/hdr_parser.py @@ -270,30 +270,41 @@ class CppHeaderParser(object): if fname.endswith("operator"): fname += "()" apos = fdecl.find("(", apos+1) - args0 = fdecl[apos+1:fdecl.rfind(")")].strip().split(",") - - args = [] - narg = "" - for arg in args0: - narg += arg.strip() - balance_paren = narg.count("(") - narg.count(")") - balance_angle = narg.count("<") - narg.count(">") - if balance_paren == 0 and balance_angle == 0: - args.append(narg.strip()) - narg = "" + fname = "cv." + fname.replace("::", ".") decl = [fname, rettype, [], []] + args0str = fdecl[apos+1:fdecl.rfind(")")].strip() - for arg in args: - dfpos = arg.find("=") - defval = "" - if dfpos >= 0: - defval = arg[dfpos+1:].strip() - arg = arg[:dfpos].strip() - pos = arg.rfind(" ") - aname = arg[pos+1:] - atype = arg[:pos] - decl[3].append([atype, aname, defval, []]) + if args0str != "": + args0 = args0str.split(",") + + args = [] + narg = "" + for arg in args0: + narg += arg.strip() + balance_paren = narg.count("(") - narg.count(")") + balance_angle = narg.count("<") - narg.count(">") + if balance_paren == 0 and balance_angle == 0: + args.append(narg.strip()) + narg = "" + + for arg in args: + dfpos = arg.find("=") + defval = "" + if dfpos >= 0: + defval = arg[dfpos+1:].strip() + arg = arg[:dfpos].strip() + pos = arg.rfind(" ") + if pos >= 0: + aname = arg[pos+1:].strip() + atype = arg[:pos].strip() + if aname.endswith("&") or aname.endswith("*") or (aname in ["int", "string", "Mat"]): + atype = (atype + " " + aname).strip() + aname = "param" + else: + atype = arg + aname = "param" + decl[3].append([atype, aname, defval, []]) return decl diff --git a/samples/cpp/em.cpp b/samples/cpp/em.cpp new file mode 100644 index 0000000..468993f --- /dev/null +++ b/samples/cpp/em.cpp @@ -0,0 +1,94 @@ +#include "opencv2/ml/ml.hpp" +#include "opencv2/highgui/highgui.hpp" + +using namespace cv; + +int main( int argc, char** argv ) +{ + const int N = 4; + const int N1 = (int)sqrt((double)N); + const Scalar colors[] = + { + Scalar(0,0,255), Scalar(0,255,0), + Scalar(0,255,255),Scalar(255,255,0) + }; + + int i, j; + int nsamples = 100; + Mat samples( nsamples, 2, CV_32FC1 ); + Mat labels; + Mat img = Mat::zeros( Size( 500, 500 ), CV_8UC3 ); + Mat sample( 1, 2, CV_32FC1 ); + CvEM em_model; + CvEMParams params; + + samples = samples.reshape(2, 0); + for( i = 0; i < N; i++ ) + { + // form the training samples + Mat samples_part = samples.rowRange(i*nsamples/N, (i+1)*nsamples/N ); + + Scalar mean(((i%N1)+1)*img.rows/(N1+1), + ((i/N1)+1)*img.rows/(N1+1)); + Scalar sigma(30,30); + randn( samples_part, mean, sigma ); + } + samples = samples.reshape(1, 0); + + // initialize model parameters + params.covs = NULL; + params.means = NULL; + params.weights = NULL; + params.probs = NULL; + params.nclusters = N; + params.cov_mat_type = CvEM::COV_MAT_SPHERICAL; + params.start_step = CvEM::START_AUTO_STEP; + params.term_crit.max_iter = 300; + params.term_crit.epsilon = 0.1; + params.term_crit.type = CV_TERMCRIT_ITER|CV_TERMCRIT_EPS; + + // cluster the data + em_model.train( samples, Mat(), params, &labels ); + +#if 0 + // the piece of code shows how to repeatedly optimize the model + // with less-constrained parameters + //(COV_MAT_DIAGONAL instead of COV_MAT_SPHERICAL) + // when the output of the first stage is used as input for the second one. + CvEM em_model2; + params.cov_mat_type = CvEM::COV_MAT_DIAGONAL; + params.start_step = CvEM::START_E_STEP; + params.means = em_model.get_means(); + params.covs = (const CvMat**)em_model.get_covs(); + params.weights = em_model.get_weights(); + + em_model2.train( samples, Mat(), params, &labels ); + // to use em_model2, replace em_model.predict() + // with em_model2.predict() below +#endif + // classify every image pixel + for( i = 0; i < img.rows; i++ ) + { + for( j = 0; j < img.cols; j++ ) + { + sample.at(0) = (float)j; + sample.at(1) = (float)i; + int response = cvRound(em_model.predict( sample )); + Scalar c = colors[response]; + + circle( img, Point(j, i), 1, c*0.75, CV_FILLED ); + } + } + + //draw the clustered samples + for( i = 0; i < nsamples; i++ ) + { + Point pt(cvRound(samples.at(i, 0)), cvRound(samples.at(i, 1))); + circle( img, pt, 1, colors[labels.at(i)], CV_FILLED ); + } + + imshow( "EM-clustering result", img ); + waitKey(0); + + return 0; +} -- 2.7.4