From 78329b0dfe865c2eaeb6f8d398b369d22c34942a Mon Sep 17 00:00:00 2001 From: Andrey Kamaev Date: Mon, 28 May 2012 14:36:15 +0000 Subject: [PATCH] New bunch of documentation fixes --- doc/check_docs2.py | 76 +++++++++---- .../camera_calibration_and_3d_reconstruction.rst | 113 -------------------- modules/contrib/doc/contrib.rst | 5 + modules/contrib/doc/stereo.rst | 117 +++++++++++++++++++++ modules/core/doc/old_basic_structures.rst | 2 +- modules/core/include/opencv2/core/types_c.h | 2 +- .../doc/common_interfaces_of_feature_detectors.rst | 6 +- modules/gpu/doc/video.rst | 60 +++++------ .../common_interfaces_of_descriptor_extractors.rst | 2 +- modules/ml/doc/expectation_maximization.rst | 1 + .../include/opencv2/objdetect/objdetect.hpp | 90 ++++++++-------- modules/python/src2/hdr_parser.py | 33 ++++-- modules/stitching/doc/exposure_compensation.rst | 22 ++-- modules/stitching/doc/motion_estimation.rst | 1 - modules/stitching/doc/warpers.rst | 26 ++--- .../include/opencv2/stitching/detail/warpers.hpp | 44 ++++---- 16 files changed, 328 insertions(+), 272 deletions(-) create mode 100644 modules/contrib/doc/stereo.rst diff --git a/doc/check_docs2.py b/doc/check_docs2.py index f1ddf7e..7a97e38 100644 --- a/doc/check_docs2.py +++ b/doc/check_docs2.py @@ -20,10 +20,29 @@ ERROR_006_INVALIDPYOLDDOC = 6 ERROR_007_INVALIDPYDOC = 7 ERROR_008_CFUNCISNOTGLOBAL = 8 ERROR_009_OVERLOADNOTFOUND = 9 +ERROR_010_UNKNOWNCLASS = 10 +ERROR_011_UNKNOWNFUNC = 11 do_python_crosscheck = True errors_disabled = [ERROR_004_MISSEDNAMESPACE] +doc_signatures_whitelist = [ +# templates +"Matx", "Vec", "SparseMat_", "Scalar_", "Mat_", "Ptr", "Size_", "Point_", "Rect_", "Point3_", +"DataType", "detail::RotationWarperBase", "flann::Index_", "CalonderDescriptorExtractor", +# the following classes reside in core bu documented in gpu. It's no good +"gpu::DevMem2D_", "gpu::PtrStep_", "gpu::PtrElemStep_", +# these are even non-template +"gpu::DeviceInfo", "gpu::GpuMat", "gpu::TargetArchs", "gpu::FeatureSet", +# black boxes +"CvArr", "CvFileStorage"] + +synonims = { + "StarDetector" : ["StarFeatureDetector"], + "MSER" : ["MserFeatureDetector"], + "GFTTDetector" : ["GoodFeaturesToTrackDetector"] +} + if do_python_crosscheck: try: import cv2 @@ -185,24 +204,28 @@ def process_module(module, path): if name.startswith("cv."): name = name[3:] name = name.replace(".", "::") - doc = rst.get(name) - if not doc: - #TODO: class is not documented - continue - doc[DOCUMENTED_MARKER] = True - # verify class marker - if not doc.get("isclass"): - logerror(ERROR_001_NOTACLASS, "class " + name + " is not marked as \"class\" in documentation", doc) - else: - # verify base - signature = doc.get("class", "") - signature = signature.replace(", public ", " ").replace(" public ", " ") - signature = signature.replace(", protected ", " ").replace(" protected ", " ") - signature = signature.replace(", private ", " ").replace(" private ", " ") - signature = ("class " + signature).strip() - hdrsignature = (cl[0] + " " + cl[1]).replace("class cv.", "class ").replace(".", "::").strip() - if signature != hdrsignature: - logerror(ERROR_003_INCORRECTBASE, "invalid base class documentation\ndocumented: " + signature + "\nactual: " + hdrsignature, doc) + sns = synonims.get(name, []) + sns.append(name) + for name in sns: + doc = rst.get(name) + if not doc: + #TODO: class is not documented + continue + doc[DOCUMENTED_MARKER] = True + # verify class marker + if not doc.get("isclass"): + logerror(ERROR_001_NOTACLASS, "class " + name + " is not marked as \"class\" in documentation", doc) + else: + # verify base + signature = doc.get("class", "") + signature = signature.replace(", public ", " ").replace(" public ", " ") + signature = signature.replace(", protected ", " ").replace(" protected ", " ") + signature = signature.replace(", private ", " ").replace(" private ", " ") + signature = ("class " + signature).strip() + #hdrsignature = (cl[0] + " " + cl[1]).replace("class cv.", "class ").replace(".", "::").strip() + hdrsignature = ("class " + name + " " + cl[1]).replace(".", "::").strip() + if signature != hdrsignature: + logerror(ERROR_003_INCORRECTBASE, "invalid base class documentation\ndocumented: " + signature + "\nactual: " + hdrsignature, doc) # process structs for st in structs: @@ -231,6 +254,7 @@ def process_module(module, path): hdrsignature = (st[0] + " " + st[1]).replace("struct cv.", "struct ").replace(".", "::").strip() if signature != hdrsignature: logerror(ERROR_003_INCORRECTBASE, "invalid base struct documentation\ndocumented: " + signature + "\nactual: " + hdrsignature, doc) + print st, doc # process functions and methods flookup = {} @@ -362,7 +386,6 @@ def process_module(module, path): else: signature.append(DOCUMENTED_MARKER) - #build dictionary for functions lookup # verify C/C++ signatures for name, doc in rst.iteritems(): decls = doc.get("decls") @@ -399,10 +422,17 @@ def process_module(module, path): if signature[-1] != DOCUMENTED_MARKER: candidates = "\n\t".join([formatSignature(f[3]) for f in fd]) logerror(ERROR_009_OVERLOADNOTFOUND, signature[0] + " function " + signature[2][0].replace(".","::") + " is documented but misses in headers (" + error + ").\nDocumented as:\n\t" + signature[1] + "\nCandidates are:\n\t" + candidates, doc) - #print hdrlist - #for d in decls: - # print d - #print rstparser.definitions + + # verify that all signatures was found in the library headers + for name, doc in rst.iteritems(): + # if doc.get(DOCUMENTED_MARKER, False): + # continue # this class/struct was found + if not doc.get(DOCUMENTED_MARKER, False) and (doc.get("isclass", False) or doc.get("isstruct", False)): + if name in doc_signatures_whitelist: + continue + logerror(ERROR_010_UNKNOWNCLASS, "class/struct " + name + " is mentioned in documentation but is not found in OpenCV headers", doc) + #for signature in decls: + # end of process_module if __name__ == "__main__": if len(sys.argv) < 2: diff --git a/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst b/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst index 8f04eb6..77067e9 100644 --- a/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst +++ b/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.rst @@ -1231,119 +1231,6 @@ The method executes the SGBM algorithm on a rectified stereo pair. See ``stereo_ .. note:: The method is not constant, so you should not use the same ``StereoSGBM`` instance from different threads simultaneously. -StereoVar ----------- - -.. ocv:class:: StereoVar - -Class for computing stereo correspondence using the variational matching algorithm :: - - class StereoVar - { - StereoVar(); - StereoVar( int levels, double pyrScale, - int nIt, int minDisp, int maxDisp, - int poly_n, double poly_sigma, float fi, - float lambda, int penalization, int cycle, - int flags); - virtual ~StereoVar(); - - virtual void operator()(InputArray left, InputArray right, OutputArray disp); - - int levels; - double pyrScale; - int nIt; - int minDisp; - int maxDisp; - int poly_n; - double poly_sigma; - float fi; - float lambda; - int penalization; - int cycle; - int flags; - - ... - }; - -The class implements the modified S. G. Kosov algorithm [Publication] that differs from the original one as follows: - - * The automatic initialization of method's parameters is added. - - * The method of Smart Iteration Distribution (SID) is implemented. - - * The support of Multi-Level Adaptation Technique (MLAT) is not included. - - * The method of dynamic adaptation of method's parameters is not included. - -StereoVar::StereoVar --------------------------- - -.. ocv:function:: StereoVar::StereoVar() - -.. ocv:function:: StereoVar::StereoVar( int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags ) - - The constructor - - :param levels: The number of pyramid layers, including the initial image. levels=1 means that no extra layers are created and only the original images are used. This parameter is ignored if flag USE_AUTO_PARAMS is set. - - :param pyrScale: Specifies the image scale (<1) to build the pyramids for each image. pyrScale=0.5 means the classical pyramid, where each next layer is twice smaller than the previous. (This parameter is ignored if flag USE_AUTO_PARAMS is set). - - :param nIt: The number of iterations the algorithm does at each pyramid level. (If the flag USE_SMART_ID is set, the number of iterations will be redistributed in such a way, that more iterations will be done on more coarser levels.) - - :param minDisp: Minimum possible disparity value. Could be negative in case the left and right input images change places. - - :param maxDisp: Maximum possible disparity value. - - :param poly_n: Size of the pixel neighbourhood used to find polynomial expansion in each pixel. The larger values mean that the image will be approximated with smoother surfaces, yielding more robust algorithm and more blurred motion field. Typically, poly_n = 3, 5 or 7 - - :param poly_sigma: Standard deviation of the Gaussian that is used to smooth derivatives that are used as a basis for the polynomial expansion. For poly_n=5 you can set poly_sigma=1.1 , for poly_n=7 a good value would be poly_sigma=1.5 - - :param fi: The smoothness parameter, ot the weight coefficient for the smoothness term. - - :param lambda: The threshold parameter for edge-preserving smoothness. (This parameter is ignored if PENALIZATION_CHARBONNIER or PENALIZATION_PERONA_MALIK is used.) - - :param penalization: Possible values: PENALIZATION_TICHONOV - linear smoothness; PENALIZATION_CHARBONNIER - non-linear edge preserving smoothness; PENALIZATION_PERONA_MALIK - non-linear edge-enhancing smoothness. (This parameter is ignored if flag USE_AUTO_PARAMS is set). - - :param cycle: Type of the multigrid cycle. Possible values: CYCLE_O and CYCLE_V for null- and v-cycles respectively. (This parameter is ignored if flag USE_AUTO_PARAMS is set). - - :param flags: The operation flags; can be a combination of the following: - - * USE_INITIAL_DISPARITY: Use the input flow as the initial flow approximation. - - * USE_EQUALIZE_HIST: Use the histogram equalization in the pre-processing phase. - - * USE_SMART_ID: Use the smart iteration distribution (SID). - - * USE_AUTO_PARAMS: Allow the method to initialize the main parameters. - - * USE_MEDIAN_FILTERING: Use the median filer of the solution in the post processing phase. - -The first constructor initializes ``StereoVar`` with all the default parameters. So, you only have to set ``StereoVar::maxDisp`` and / or ``StereoVar::minDisp`` at minimum. The second constructor enables you to set each parameter to a custom value. - - - -StereoVar::operator () ------------------------ - -.. ocv:function:: void StereoVar::operator()(InputArray left, InputArray right, OutputArray disp) - - Computes disparity using the variational algorithm for a rectified stereo pair. - - :param left: Left 8-bit single-channel or 3-channel image. - - :param right: Right image of the same size and the same type as the left one. - - :param disp: Output disparity map. It is a 8-bit signed single-channel image of the same size as the input image. - -The method executes the variational algorithm on a rectified stereo pair. See ``stereo_match.cpp`` OpenCV sample on how to prepare images and call the method. - -**Note**: - -The method is not constant, so you should not use the same ``StereoVar`` instance from different threads simultaneously. - - - stereoCalibrate ------------------- Calibrates the stereo camera. diff --git a/modules/contrib/doc/contrib.rst b/modules/contrib/doc/contrib.rst index 1bb31ec..1e81b2e 100644 --- a/modules/contrib/doc/contrib.rst +++ b/modules/contrib/doc/contrib.rst @@ -3,3 +3,8 @@ contrib. Contributed/Experimental Stuff *************************************** The module contains some recently added functionality that has not been stabilized, or functionality that is considered optional. + +.. toctree:: + :maxdepth: 2 + + stereo \ No newline at end of file diff --git a/modules/contrib/doc/stereo.rst b/modules/contrib/doc/stereo.rst new file mode 100644 index 0000000..d5f2d00 --- /dev/null +++ b/modules/contrib/doc/stereo.rst @@ -0,0 +1,117 @@ +Stereo Correspondence +======================================== + +.. highlight:: cpp + +StereoVar +---------- + +.. ocv:class:: StereoVar + +Class for computing stereo correspondence using the variational matching algorithm :: + + class StereoVar + { + StereoVar(); + StereoVar( int levels, double pyrScale, + int nIt, int minDisp, int maxDisp, + int poly_n, double poly_sigma, float fi, + float lambda, int penalization, int cycle, + int flags); + virtual ~StereoVar(); + + virtual void operator()(InputArray left, InputArray right, OutputArray disp); + + int levels; + double pyrScale; + int nIt; + int minDisp; + int maxDisp; + int poly_n; + double poly_sigma; + float fi; + float lambda; + int penalization; + int cycle; + int flags; + + ... + }; + +The class implements the modified S. G. Kosov algorithm [Publication] that differs from the original one as follows: + + * The automatic initialization of method's parameters is added. + + * The method of Smart Iteration Distribution (SID) is implemented. + + * The support of Multi-Level Adaptation Technique (MLAT) is not included. + + * The method of dynamic adaptation of method's parameters is not included. + +StereoVar::StereoVar +-------------------------- + +.. ocv:function:: StereoVar::StereoVar() + +.. ocv:function:: StereoVar::StereoVar( int levels, double pyrScale, int nIt, int minDisp, int maxDisp, int poly_n, double poly_sigma, float fi, float lambda, int penalization, int cycle, int flags ) + + The constructor + + :param levels: The number of pyramid layers, including the initial image. levels=1 means that no extra layers are created and only the original images are used. This parameter is ignored if flag USE_AUTO_PARAMS is set. + + :param pyrScale: Specifies the image scale (<1) to build the pyramids for each image. pyrScale=0.5 means the classical pyramid, where each next layer is twice smaller than the previous. (This parameter is ignored if flag USE_AUTO_PARAMS is set). + + :param nIt: The number of iterations the algorithm does at each pyramid level. (If the flag USE_SMART_ID is set, the number of iterations will be redistributed in such a way, that more iterations will be done on more coarser levels.) + + :param minDisp: Minimum possible disparity value. Could be negative in case the left and right input images change places. + + :param maxDisp: Maximum possible disparity value. + + :param poly_n: Size of the pixel neighbourhood used to find polynomial expansion in each pixel. The larger values mean that the image will be approximated with smoother surfaces, yielding more robust algorithm and more blurred motion field. Typically, poly_n = 3, 5 or 7 + + :param poly_sigma: Standard deviation of the Gaussian that is used to smooth derivatives that are used as a basis for the polynomial expansion. For poly_n=5 you can set poly_sigma=1.1 , for poly_n=7 a good value would be poly_sigma=1.5 + + :param fi: The smoothness parameter, ot the weight coefficient for the smoothness term. + + :param lambda: The threshold parameter for edge-preserving smoothness. (This parameter is ignored if PENALIZATION_CHARBONNIER or PENALIZATION_PERONA_MALIK is used.) + + :param penalization: Possible values: PENALIZATION_TICHONOV - linear smoothness; PENALIZATION_CHARBONNIER - non-linear edge preserving smoothness; PENALIZATION_PERONA_MALIK - non-linear edge-enhancing smoothness. (This parameter is ignored if flag USE_AUTO_PARAMS is set). + + :param cycle: Type of the multigrid cycle. Possible values: CYCLE_O and CYCLE_V for null- and v-cycles respectively. (This parameter is ignored if flag USE_AUTO_PARAMS is set). + + :param flags: The operation flags; can be a combination of the following: + + * USE_INITIAL_DISPARITY: Use the input flow as the initial flow approximation. + + * USE_EQUALIZE_HIST: Use the histogram equalization in the pre-processing phase. + + * USE_SMART_ID: Use the smart iteration distribution (SID). + + * USE_AUTO_PARAMS: Allow the method to initialize the main parameters. + + * USE_MEDIAN_FILTERING: Use the median filer of the solution in the post processing phase. + +The first constructor initializes ``StereoVar`` with all the default parameters. So, you only have to set ``StereoVar::maxDisp`` and / or ``StereoVar::minDisp`` at minimum. The second constructor enables you to set each parameter to a custom value. + + + +StereoVar::operator () +----------------------- + +.. ocv:function:: void StereoVar::operator()( const Mat& left, const Mat& right, Mat& disp ) + + Computes disparity using the variational algorithm for a rectified stereo pair. + + :param left: Left 8-bit single-channel or 3-channel image. + + :param right: Right image of the same size and the same type as the left one. + + :param disp: Output disparity map. It is a 8-bit signed single-channel image of the same size as the input image. + +The method executes the variational algorithm on a rectified stereo pair. See ``stereo_match.cpp`` OpenCV sample on how to prepare images and call the method. + +**Note**: + +The method is not constant, so you should not use the same ``StereoVar`` instance from different threads simultaneously. + + diff --git a/modules/core/doc/old_basic_structures.rst b/modules/core/doc/old_basic_structures.rst index c2db139..f1ce6e8 100644 --- a/modules/core/doc/old_basic_structures.rst +++ b/modules/core/doc/old_basic_structures.rst @@ -197,7 +197,7 @@ Stores coordinates of a rectangle. CvBox2D ------- +------- .. ocv:struct:: CvBox2D diff --git a/modules/core/include/opencv2/core/types_c.h b/modules/core/include/opencv2/core/types_c.h index 8c4dfcd..a2bab32 100644 --- a/modules/core/include/opencv2/core/types_c.h +++ b/modules/core/include/opencv2/core/types_c.h @@ -1130,7 +1130,7 @@ CV_INLINE CvPoint3D64f cvPoint3D64f( double x, double y, double z ) /******************************** CvSize's & CvBox **************************************/ -typedef struct +typedef struct CvSize { int width; int height; diff --git a/modules/features2d/doc/common_interfaces_of_feature_detectors.rst b/modules/features2d/doc/common_interfaces_of_feature_detectors.rst index 0449887..b2dd055 100644 --- a/modules/features2d/doc/common_interfaces_of_feature_detectors.rst +++ b/modules/features2d/doc/common_interfaces_of_feature_detectors.rst @@ -173,7 +173,7 @@ Wrapping class for feature detection using the GoodFeaturesToTrackDetector --------------------------- -.. ocv:class:: GoodFeaturesToTrackDetector +.. ocv:class:: GoodFeaturesToTrackDetector : public FeatureDetector Wrapping class for feature detection using the :ocv:func:`goodFeaturesToTrack` function. :: @@ -211,7 +211,7 @@ Wrapping class for feature detection using the MserFeatureDetector ------------------- -.. ocv:class:: MserFeatureDetector +.. ocv:class:: MserFeatureDetector : public FeatureDetector Wrapping class for feature detection using the :ocv:class:`MSER` class. :: @@ -233,7 +233,7 @@ Wrapping class for feature detection using the StarFeatureDetector ------------------- -.. ocv:class:: StarFeatureDetector +.. ocv:class:: StarFeatureDetector : public FeatureDetector Wrapping class for feature detection using the :ocv:class:`StarDetector` class. :: diff --git a/modules/gpu/doc/video.rst b/modules/gpu/doc/video.rst index 3827f71..e5c1c63 100644 --- a/modules/gpu/doc/video.rst +++ b/modules/gpu/doc/video.rst @@ -310,11 +310,11 @@ gpu::VideoWriter_GPU::VideoWriter_GPU ------------------------------------- Constructors. -.. ocv:function:: gpu::VideoWriter_GPU::VideoWriter_GPU(); -.. ocv:function:: gpu::VideoWriter_GPU::VideoWriter_GPU(const std::string& fileName, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR); -.. ocv:function:: gpu::VideoWriter_GPU::VideoWriter_GPU(const std::string& fileName, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR); -.. ocv:function:: gpu::VideoWriter_GPU::VideoWriter_GPU(const cv::Ptr& encoderCallback, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR); -.. ocv:function:: gpu::VideoWriter_GPU::VideoWriter_GPU(const cv::Ptr& encoderCallback, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR); +.. ocv:function:: gpu::VideoWriter_GPU::VideoWriter_GPU() +.. ocv:function:: gpu::VideoWriter_GPU::VideoWriter_GPU(const std::string& fileName, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR) +.. ocv:function:: gpu::VideoWriter_GPU::VideoWriter_GPU(const std::string& fileName, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR) +.. ocv:function:: gpu::VideoWriter_GPU::VideoWriter_GPU(const cv::Ptr& encoderCallback, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR) +.. ocv:function:: gpu::VideoWriter_GPU::VideoWriter_GPU(const cv::Ptr& encoderCallback, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR) :param fileName: Name of the output video file. Only AVI file format is supported. @@ -336,10 +336,10 @@ gpu::VideoWriter_GPU::open -------------------------- Initializes or reinitializes video writer. -.. ocv:function:: void gpu::VideoWriter_GPU::open(const std::string& fileName, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR); -.. ocv:function:: void gpu::VideoWriter_GPU::open(const std::string& fileName, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR); -.. ocv:function:: void gpu::VideoWriter_GPU::open(const cv::Ptr& encoderCallback, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR); -.. ocv:function:: void gpu::VideoWriter_GPU::open(const cv::Ptr& encoderCallback, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR); +.. ocv:function:: void gpu::VideoWriter_GPU::open(const std::string& fileName, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR) +.. ocv:function:: void gpu::VideoWriter_GPU::open(const std::string& fileName, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR) +.. ocv:function:: void gpu::VideoWriter_GPU::open(const cv::Ptr& encoderCallback, cv::Size frameSize, double fps, SurfaceFormat format = SF_BGR) +.. ocv:function:: void gpu::VideoWriter_GPU::open(const cv::Ptr& encoderCallback, cv::Size frameSize, double fps, const EncoderParams& params, SurfaceFormat format = SF_BGR) The method opens video writer. Parameters are the same as in the constructor :ocv:func:`gpu::VideoWriter_GPU::VideoWriter_GPU` . The method throws :ocv:class:`Exception` if error occurs. @@ -349,7 +349,7 @@ gpu::VideoWriter_GPU::isOpened ------------------------------ Returns true if video writer has been successfully initialized. -.. ocv:function:: bool gpu::VideoWriter_GPU::isOpened() const; +.. ocv:function:: bool gpu::VideoWriter_GPU::isOpened() const @@ -357,7 +357,7 @@ gpu::VideoWriter_GPU::close --------------------------- Releases the video writer. -.. ocv:function:: void gpu::VideoWriter_GPU::close(); +.. ocv:function:: void gpu::VideoWriter_GPU::close() @@ -365,7 +365,7 @@ gpu::VideoWriter_GPU::write --------------------------- Writes the next video frame. -.. ocv:function:: void gpu::VideoWriter_GPU::write(const cv::gpu::GpuMat& image, bool lastFrame = false); +.. ocv:function:: void gpu::VideoWriter_GPU::write(const cv::gpu::GpuMat& image, bool lastFrame = false) :param image: The written frame. @@ -416,7 +416,7 @@ gpu::VideoWriter_GPU::EncoderParams::EncoderParams -------------------------------------------------- Constructors. -.. ocv:function:: gpu::VideoWriter_GPU::EncoderParams::EncoderParams(); +.. ocv:function:: gpu::VideoWriter_GPU::EncoderParams::EncoderParams() .. ocv:function:: gpu::VideoWriter_GPU::EncoderParams::EncoderParams(const std::string& configFile) :param configFile: Config file name. @@ -519,7 +519,6 @@ Class for reading video from files. gpu::VideoReader_GPU::Codec --------------------------- -.. ocv:class:: gpu::VideoReader_GPU::Codec Video codecs supported by ocv:class:`gpu::VideoReader_GPU` . :: @@ -545,7 +544,6 @@ Video codecs supported by ocv:class:`gpu::VideoReader_GPU` . :: gpu::VideoReader_GPU::ChromaFormat ---------------------------------- -.. ocv:class:: gpu::VideoReader_GPU::ChromaFormat Chroma formats supported by ocv:class:`gpu::VideoReader_GPU` . :: @@ -579,9 +577,9 @@ gpu::VideoReader_GPU::VideoReader_GPU ------------------------------------- Constructors. -.. ocv:function:: gpu::VideoReader_GPU::VideoReader_GPU(); -.. ocv:function:: gpu::VideoReader_GPU::VideoReader_GPU(const std::string& filename); -.. ocv:function:: gpu::VideoReader_GPU::VideoReader_GPU(const cv::Ptr& source); +.. ocv:function:: gpu::VideoReader_GPU::VideoReader_GPU() +.. ocv:function:: gpu::VideoReader_GPU::VideoReader_GPU(const std::string& filename) +.. ocv:function:: gpu::VideoReader_GPU::VideoReader_GPU(const cv::Ptr& source) :param filename: Name of the input video file. @@ -595,8 +593,8 @@ gpu::VideoReader_GPU::open -------------------------- Initializes or reinitializes video reader. -.. ocv:function:: void gpu::VideoReader_GPU::open(const std::string& filename); -.. ocv:function:: void gpu::VideoReader_GPU::open(const cv::Ptr& source); +.. ocv:function:: void gpu::VideoReader_GPU::open(const std::string& filename) +.. ocv:function:: void gpu::VideoReader_GPU::open(const cv::Ptr& source) The method opens video reader. Parameters are the same as in the constructor :ocv:func:`gpu::VideoReader_GPU::VideoReader_GPU` . The method throws :ocv:class:`Exception` if error occurs. @@ -606,7 +604,7 @@ gpu::VideoReader_GPU::isOpened ------------------------------ Returns true if video reader has been successfully initialized. -.. ocv:function:: bool gpu::VideoReader_GPU::isOpened() const; +.. ocv:function:: bool gpu::VideoReader_GPU::isOpened() const @@ -614,7 +612,7 @@ gpu::VideoReader_GPU::close --------------------------- Releases the video reader. -.. ocv:function:: void gpu::VideoReader_GPU::close(); +.. ocv:function:: void gpu::VideoReader_GPU::close() @@ -622,7 +620,7 @@ gpu::VideoReader_GPU::read -------------------------- Grabs, decodes and returns the next video frame. -.. ocv:function:: bool gpu::VideoReader_GPU::read(GpuMat& image); +.. ocv:function:: bool gpu::VideoReader_GPU::read(GpuMat& image) If no frames has been grabbed (there are no more frames in video file), the methods return ``false`` . The method throws :ocv:class:`Exception` if error occurs. @@ -632,7 +630,7 @@ gpu::VideoReader_GPU::format ---------------------------- Returns information about video file format. -.. ocv:function:: FormatInfo gpu::VideoReader_GPU::format() const; +.. ocv:function:: FormatInfo gpu::VideoReader_GPU::format() const The method throws :ocv:class:`Exception` if video reader wasn't initialized. @@ -642,7 +640,7 @@ gpu::VideoReader_GPU::dumpFormat ---------------------------- Dump information about video file format to specified stream. -.. ocv:function:: void gpu::VideoReader_GPU::dumpFormat(std::ostream& st); +.. ocv:function:: void gpu::VideoReader_GPU::dumpFormat(std::ostream& st) :param st: Output stream. @@ -680,7 +678,7 @@ gpu::VideoReader_GPU::VideoSource::format ----------------------------------------- Returns information about video file format. -.. ocv:function:: virtual FormatInfo gpu::VideoReader_GPU::VideoSource::format() const = 0; +.. ocv:function:: virtual FormatInfo gpu::VideoReader_GPU::VideoSource::format() const = 0 @@ -688,7 +686,7 @@ gpu::VideoReader_GPU::VideoSource::start ---------------------------------------- Starts processing. -.. ocv:function:: virtual void gpu::VideoReader_GPU::VideoSource::start() = 0; +.. ocv:function:: virtual void gpu::VideoReader_GPU::VideoSource::start() = 0 Implementation must create own thread with video processing and call periodic :ocv:func:`gpu::VideoReader_GPU::VideoSource::parseVideoData` . @@ -698,7 +696,7 @@ gpu::VideoReader_GPU::VideoSource::stop --------------------------------------- Stops processing. -.. ocv:function:: virtual void gpu::VideoReader_GPU::VideoSource::stop() = 0; +.. ocv:function:: virtual void gpu::VideoReader_GPU::VideoSource::stop() = 0 @@ -706,7 +704,7 @@ gpu::VideoReader_GPU::VideoSource::isStarted -------------------------------------------- Returns ``true`` if processing was successfully started. -.. ocv:function:: virtual bool gpu::VideoReader_GPU::VideoSource::isStarted() const = 0; +.. ocv:function:: virtual bool gpu::VideoReader_GPU::VideoSource::isStarted() const = 0 @@ -714,7 +712,7 @@ gpu::VideoReader_GPU::VideoSource::hasError ------------------------------------------- Returns ``true`` if error occured during processing. -.. ocv:function:: virtual bool gpu::VideoReader_GPU::VideoSource::hasError() const = 0; +.. ocv:function:: virtual bool gpu::VideoReader_GPU::VideoSource::hasError() const = 0 @@ -722,7 +720,7 @@ gpu::VideoReader_GPU::VideoSource::parseVideoData ------------------------------------------------- Parse next video frame. Implementation must call this method after new frame was grabbed. -.. ocv:function:: bool gpu::VideoReader_GPU::VideoSource::parseVideoData(const unsigned char* data, size_t size, bool endOfStream = false); +.. ocv:function:: bool gpu::VideoReader_GPU::VideoSource::parseVideoData(const unsigned char* data, size_t size, bool endOfStream = false) :param data: Pointer to frame data. Can be ``NULL`` if ``endOfStream`` if ``true`` . diff --git a/modules/legacy/doc/common_interfaces_of_descriptor_extractors.rst b/modules/legacy/doc/common_interfaces_of_descriptor_extractors.rst index 2e7520c2..dc5d81d 100644 --- a/modules/legacy/doc/common_interfaces_of_descriptor_extractors.rst +++ b/modules/legacy/doc/common_interfaces_of_descriptor_extractors.rst @@ -13,7 +13,7 @@ descriptor extractors inherit the CalonderDescriptorExtractor --------------------------- -.. ocv:class:: CalonderDescriptorExtractor +.. ocv:class:: CalonderDescriptorExtractor : public DescriptorExtractor Wrapping class for computing descriptors by using the :ocv:class:`RTreeClassifier` class. :: diff --git a/modules/ml/doc/expectation_maximization.rst b/modules/ml/doc/expectation_maximization.rst index 4bfacce..216ff51 100644 --- a/modules/ml/doc/expectation_maximization.rst +++ b/modules/ml/doc/expectation_maximization.rst @@ -206,4 +206,5 @@ See :ocv:func:`Algorithm::get` and :ocv:func:`Algorithm::set`. The following par * ``"weights"`` *(read-only)* * ``"means"`` *(read-only)* * ``"covs"`` *(read-only)* + .. diff --git a/modules/objdetect/include/opencv2/objdetect/objdetect.hpp b/modules/objdetect/include/opencv2/objdetect/objdetect.hpp index 963f7dc..1b87943 100644 --- a/modules/objdetect/include/opencv2/objdetect/objdetect.hpp +++ b/modules/objdetect/include/opencv2/objdetect/objdetect.hpp @@ -137,7 +137,7 @@ CVAPI(void) cvReleaseHaarClassifierCascade( CvHaarClassifierCascade** cascade ); CVAPI(CvSeq*) cvHaarDetectObjects( const CvArr* image, - CvHaarClassifierCascade* cascade, CvMemStorage* storage, + CvHaarClassifierCascade* cascade, CvMemStorage* storage, double scale_factor CV_DEFAULT(1.1), int min_neighbors CV_DEFAULT(3), int flags CV_DEFAULT(0), CvSize min_size CV_DEFAULT(cvSize(0,0)), CvSize max_size CV_DEFAULT(cvSize(0,0))); @@ -160,7 +160,7 @@ CVAPI(int) cvRunHaarClassifierCascade( const CvHaarClassifierCascade* cascade, // Structure describes the position of the filter in the feature pyramid // l - level in the feature pyramid // (x, y) - coordinate in level l -typedef struct +typedef struct CvLSVMFilterPosition { int x; int y; @@ -174,14 +174,14 @@ typedef struct // penaltyFunction - vector describes penalty function (d_i in the paper) // pf[0] * x + pf[1] * y + pf[2] * x^2 + pf[3] * y^2 // FILTER DESCRIPTION -// Rectangular map (sizeX x sizeY), +// Rectangular map (sizeX x sizeY), // every cell stores feature vector (dimension = p) // H - matrix of feature vectors -// to set and get feature vectors (i,j) +// to set and get feature vectors (i,j) // used formula H[(j * sizeX + i) * p + k], where // k - component of feature vector in cell (i, j) // END OF FILTER DESCRIPTION -typedef struct{ +typedef struct CvLSVMFilterObject{ CvLSVMFilterPosition V; float fineFunction[4]; int sizeX; @@ -192,7 +192,7 @@ typedef struct{ // data type: STRUCT CvLatentSvmDetector // structure contains internal representation of trained Latent SVM detector -// num_filters - total number of filters (root plus part) in model +// num_filters - total number of filters (root plus part) in model // num_components - number of components in model // num_part_filters - array containing number of part filters for each component // filters - root and part filters for all model components @@ -210,9 +210,9 @@ typedef struct CvLatentSvmDetector CvLatentSvmDetector; // data type: STRUCT CvObjectDetection -// structure contains the bounding box and confidence level for detected object +// structure contains the bounding box and confidence level for detected object // rect - bounding box for a detected object -// score - confidence level +// score - confidence level typedef struct CvObjectDetection { CvRect rect; @@ -247,28 +247,28 @@ CVAPI(CvLatentSvmDetector*) cvLoadLatentSvmDetector(const char* filename); CVAPI(void) cvReleaseLatentSvmDetector(CvLatentSvmDetector** detector); /* -// find rectangular regions in the given image that are likely +// find rectangular regions in the given image that are likely // to contain objects and corresponding confidence levels // // API -// CvSeq* cvLatentSvmDetectObjects(const IplImage* image, -// CvLatentSvmDetector* detector, -// CvMemStorage* storage, +// CvSeq* cvLatentSvmDetectObjects(const IplImage* image, +// CvLatentSvmDetector* detector, +// CvMemStorage* storage, // float overlap_threshold = 0.5f, // int numThreads = -1); // INPUT // image - image to detect objects in // detector - Latent SVM detector in internal representation -// storage - memory storage to store the resultant sequence +// storage - memory storage to store the resultant sequence // of the object candidate rectangles -// overlap_threshold - threshold for the non-maximum suppression algorithm +// overlap_threshold - threshold for the non-maximum suppression algorithm = 0.5f [here will be the reference to original paper] // OUTPUT // sequence of detected objects (bounding boxes and confidence levels stored in CvObjectDetection structures) */ -CVAPI(CvSeq*) cvLatentSvmDetectObjects(IplImage* image, - CvLatentSvmDetector* detector, - CvMemStorage* storage, +CVAPI(CvSeq*) cvLatentSvmDetectObjects(IplImage* image, + CvLatentSvmDetector* detector, + CvMemStorage* storage, float overlap_threshold CV_DEFAULT(0.5f), int numThreads CV_DEFAULT(-1)); @@ -285,7 +285,7 @@ CV_EXPORTS CvSeq* cvHaarDetectObjectsForROC( const CvArr* image, namespace cv { - + ///////////////////////////// Object Detection //////////////////////////// /* @@ -330,22 +330,22 @@ private: CV_EXPORTS void groupRectangles(CV_OUT CV_IN_OUT vector& rectList, int groupThreshold, double eps=0.2); CV_EXPORTS_W void groupRectangles(CV_OUT CV_IN_OUT vector& rectList, CV_OUT vector& weights, int groupThreshold, double eps=0.2); CV_EXPORTS void groupRectangles( vector& rectList, int groupThreshold, double eps, vector* weights, vector* levelWeights ); -CV_EXPORTS void groupRectangles(vector& rectList, vector& rejectLevels, +CV_EXPORTS void groupRectangles(vector& rectList, vector& rejectLevels, vector& levelWeights, int groupThreshold, double eps=0.2); -CV_EXPORTS void groupRectangles_meanshift(vector& rectList, vector& foundWeights, vector& foundScales, +CV_EXPORTS void groupRectangles_meanshift(vector& rectList, vector& foundWeights, vector& foundScales, double detectThreshold = 0.0, Size winDetSize = Size(64, 128)); - + class CV_EXPORTS FeatureEvaluator { -public: +public: enum { HAAR = 0, LBP = 1, HOG = 2 }; virtual ~FeatureEvaluator(); virtual bool read(const FileNode& node); virtual Ptr clone() const; virtual int getFeatureType() const; - + virtual bool setImage(const Mat& img, Size origWinSize); virtual bool setWindow(Point p); @@ -371,7 +371,7 @@ public: CV_WRAP CascadeClassifier(); CV_WRAP CascadeClassifier( const string& filename ); virtual ~CascadeClassifier(); - + CV_WRAP virtual bool empty() const; CV_WRAP bool load( const string& filename ); virtual bool read( const FileNode& node ); @@ -475,7 +475,7 @@ public: class CV_EXPORTS MaskGenerator { public: - virtual ~MaskGenerator() {} + virtual ~MaskGenerator() {} virtual cv::Mat generateMask(const cv::Mat& src)=0; virtual void initializeMask(const cv::Mat& /*src*/) {}; }; @@ -488,7 +488,7 @@ protected: Ptr maskGenerator; }; - + //////////////// HOG (Histogram-of-Oriented-Gradients) Descriptor and Object Detector ////////////// struct CV_EXPORTS_W HOGDescriptor @@ -496,13 +496,13 @@ struct CV_EXPORTS_W HOGDescriptor public: enum { L2Hys=0 }; enum { DEFAULT_NLEVELS=64 }; - + CV_WRAP HOGDescriptor() : winSize(64,128), blockSize(16,16), blockStride(8,8), cellSize(8,8), nbins(9), derivAperture(1), winSigma(-1), - histogramNormType(HOGDescriptor::L2Hys), L2HysThreshold(0.2), gammaCorrection(true), + histogramNormType(HOGDescriptor::L2Hys), L2HysThreshold(0.2), gammaCorrection(true), nlevels(HOGDescriptor::DEFAULT_NLEVELS) {} - + CV_WRAP HOGDescriptor(Size _winSize, Size _blockSize, Size _blockStride, Size _cellSize, int _nbins, int _derivAperture=1, double _winSigma=-1, int _histogramNormType=HOGDescriptor::L2Hys, @@ -513,28 +513,28 @@ public: histogramNormType(_histogramNormType), L2HysThreshold(_L2HysThreshold), gammaCorrection(_gammaCorrection), nlevels(_nlevels) {} - + CV_WRAP HOGDescriptor(const String& filename) { load(filename); } - + HOGDescriptor(const HOGDescriptor& d) { d.copyTo(*this); } - + virtual ~HOGDescriptor() {} - + CV_WRAP size_t getDescriptorSize() const; CV_WRAP bool checkDetectorSize() const; CV_WRAP double getWinSigma() const; - + CV_WRAP virtual void setSVMDetector(InputArray _svmdetector); - + virtual bool read(FileNode& fn); virtual void write(FileStorage& fs, const String& objname) const; - + CV_WRAP virtual bool load(const String& filename, const String& objname=String()); CV_WRAP virtual void save(const String& filename, const String& objname=String()) const; virtual void copyTo(HOGDescriptor& c) const; @@ -544,9 +544,9 @@ public: Size winStride=Size(), Size padding=Size(), const vector& locations=vector()) const; //with found weights output - CV_WRAP virtual void detect(const Mat& img, CV_OUT vector& foundLocations, + CV_WRAP virtual void detect(const Mat& img, CV_OUT vector& foundLocations, CV_OUT vector& weights, - double hitThreshold=0, Size winStride=Size(), + double hitThreshold=0, Size winStride=Size(), Size padding=Size(), const vector& searchLocations=vector()) const; //without found weights output @@ -555,22 +555,22 @@ public: Size padding=Size(), const vector& searchLocations=vector()) const; //with result weights output - CV_WRAP virtual void detectMultiScale(const Mat& img, CV_OUT vector& foundLocations, - CV_OUT vector& foundWeights, double hitThreshold=0, - Size winStride=Size(), Size padding=Size(), double scale=1.05, + CV_WRAP virtual void detectMultiScale(const Mat& img, CV_OUT vector& foundLocations, + CV_OUT vector& foundWeights, double hitThreshold=0, + Size winStride=Size(), Size padding=Size(), double scale=1.05, double finalThreshold=2.0,bool useMeanshiftGrouping = false) const; //without found weights output - virtual void detectMultiScale(const Mat& img, CV_OUT vector& foundLocations, + virtual void detectMultiScale(const Mat& img, CV_OUT vector& foundLocations, double hitThreshold=0, Size winStride=Size(), - Size padding=Size(), double scale=1.05, + Size padding=Size(), double scale=1.05, double finalThreshold=2.0, bool useMeanshiftGrouping = false) const; CV_WRAP virtual void computeGradient(const Mat& img, CV_OUT Mat& grad, CV_OUT Mat& angleOfs, Size paddingTL=Size(), Size paddingBR=Size()) const; - + CV_WRAP static vector getDefaultPeopleDetector(); CV_WRAP static vector getDaimlerPeopleDetector(); - + CV_PROP Size winSize; CV_PROP Size blockSize; CV_PROP Size blockStride; diff --git a/modules/python/src2/hdr_parser.py b/modules/python/src2/hdr_parser.py index b205ede..1dc9a54 100755 --- a/modules/python/src2/hdr_parser.py +++ b/modules/python/src2/hdr_parser.py @@ -609,15 +609,34 @@ class CppHeaderParser(object): return stmt_type, "", False, None if end_token == "{": + if not self.wrap_mode and stmt.startswith("typedef struct"): + stmt_type = "struct" + try: + classname, bases, modlist = self.parse_class_decl(stmt[len("typedef "):]) + except: + print "Error at %s:%d" % (self.hname, self.lineno) + exit(1) + if classname.startswith("_Ipl"): + classname = classname[1:] + decl = [stmt_type + " " + self.get_dotted_name(classname), "", modlist, []] + if bases: + decl[1] = ": " + " ".join(bases) + return stmt_type, classname, True, decl + if stmt.startswith("class") or stmt.startswith("struct"): stmt_type = stmt.split()[0] - classname, bases, modlist = self.parse_class_decl(stmt) - decl = [] - if ("CV_EXPORTS_W" in stmt) or ("CV_EXPORTS_AS" in stmt) or (not self.wrap_mode):# and ("CV_EXPORTS" in stmt)): - decl = [stmt_type + " " + self.get_dotted_name(classname), "", modlist, []] - if bases: - decl[1] = ": " + " ".join(bases) - return stmt_type, classname, True, decl + if stmt.strip() != stmt_type: + try: + classname, bases, modlist = self.parse_class_decl(stmt) + except: + print "Error at %s:%d" % (self.hname, self.lineno) + exit(1) + decl = [] + if ("CV_EXPORTS_W" in stmt) or ("CV_EXPORTS_AS" in stmt) or (not self.wrap_mode):# and ("CV_EXPORTS" in stmt)): + decl = [stmt_type + " " + self.get_dotted_name(classname), "", modlist, []] + if bases: + decl[1] = ": " + " ".join(bases) + return stmt_type, classname, True, decl if stmt.startswith("enum"): return "enum", "", True, None diff --git a/modules/stitching/doc/exposure_compensation.rst b/modules/stitching/doc/exposure_compensation.rst index 83acd1a..3e4e203 100644 --- a/modules/stitching/doc/exposure_compensation.rst +++ b/modules/stitching/doc/exposure_compensation.rst @@ -3,9 +3,9 @@ Exposure Compensation .. highlight:: cpp -detail::ExposureCompensation +detail::ExposureCompensator ---------------------------- -.. ocv:class:: detail::ExposureCompensation +.. ocv:class:: detail::ExposureCompensator Base class for all exposure compensators. :: @@ -24,12 +24,12 @@ Base class for all exposure compensators. :: virtual void apply(int index, Point corner, Mat &image, const Mat &mask) = 0; }; -detail::ExposureCompensation::feed +detail::ExposureCompensator::feed ---------------------------------- -.. ocv:function:: void detail::ExposureCompensation::feed(const std::vector &corners, const std::vector &images, const std::vector &masks) +.. ocv:function:: void detail::ExposureCompensator::feed(const std::vector &corners, const std::vector &images, const std::vector &masks) -.. ocv:function:: void detail::ExposureCompensation::feed(const std::vector &corners, const std::vector &images, const std::vector > &masks) +.. ocv:function:: void detail::ExposureCompensator::feed(const std::vector &corners, const std::vector &images, const std::vector > &masks) :param corners: Source image top-left corners @@ -37,12 +37,12 @@ detail::ExposureCompensation::feed :param masks: Image masks to update (second value in pair specifies the value which should be used to detect where image is) -detil::ExposureCompensation::apply +detil::ExposureCompensator::apply ---------------------------------- Compensate exposure in the specified image. -.. ocv:function:: void detail::ExposureCompensation::apply(int index, Point corner, Mat &image, const Mat &mask) +.. ocv:function:: void detail::ExposureCompensator::apply(int index, Point corner, Mat &image, const Mat &mask) :param index: Image index @@ -66,7 +66,7 @@ Stub exposure compensator which does nothing. :: void apply(int /*index*/, Point /*corner*/, Mat &/*image*/, const Mat &/*mask*/) {}; }; -.. seealso:: :ocv:class:`detail::ExposureCompensation` +.. seealso:: :ocv:class:`detail::ExposureCompensator` detail::GainCompensator ----------------------- @@ -86,7 +86,7 @@ Exposure compensator which tries to remove exposure related artifacts by adjusti /* hidden */ }; -.. seealso:: :ocv:class:`detail::ExposureCompensation` +.. seealso:: :ocv:class:`detail::ExposureCompensator` detail::BlocksGainCompensator ----------------------------- @@ -97,7 +97,7 @@ Exposure compensator which tries to remove exposure related artifacts by adjusti class CV_EXPORTS BlocksGainCompensator : public ExposureCompensator { public: - BlocksGainCompensator(int bl_width = 32, int bl_height = 32) + BlocksGainCompensator(int bl_width = 32, int bl_height = 32) : bl_width_(bl_width), bl_height_(bl_height) {} void feed(const std::vector &corners, const std::vector &images, const std::vector > &masks); @@ -107,5 +107,5 @@ Exposure compensator which tries to remove exposure related artifacts by adjusti /* hidden */ }; -.. seealso:: :ocv:class:`detail::ExposureCompensation` +.. seealso:: :ocv:class:`detail::ExposureCompensator` diff --git a/modules/stitching/doc/motion_estimation.rst b/modules/stitching/doc/motion_estimation.rst index 0d35f72..a58230e 100644 --- a/modules/stitching/doc/motion_estimation.rst +++ b/modules/stitching/doc/motion_estimation.rst @@ -221,7 +221,6 @@ Implementation of the camera parameters refinement algorithm which minimizes sum detail::WaveCorrectKind ----------------------- -.. ocv:class:: detail::WaveCorrectKind Wave correction kind. :: diff --git a/modules/stitching/doc/warpers.rst b/modules/stitching/doc/warpers.rst index 9db717a..410a4ad 100644 --- a/modules/stitching/doc/warpers.rst +++ b/modules/stitching/doc/warpers.rst @@ -20,7 +20,7 @@ Rotation-only model image warper interface. :: virtual Point warp(const Mat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode, Mat &dst) = 0; - + virtual void warpBackward(const Mat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode, Size dst_size, Mat &dst) = 0; @@ -35,7 +35,7 @@ Projects the image point. .. ocv:function:: Point2f detail::RotationWarper::warpPoint(const Point2f &pt, const Mat &K, const Mat &R) :param pt: Source point - + :param K: Camera intrinsic parameters :param R: Camera rotation matrix @@ -50,7 +50,7 @@ Builds the projection maps according to the given camera data. .. ocv:function:: Rect detail::RotationWarper::buildMaps(Size src_size, const Mat &K, const Mat &R, Mat &xmap, Mat &ymap) :param src_size: Source image size - + :param K: Camera intrinsic parameters :param R: Camera rotation matrix @@ -69,7 +69,7 @@ Projects the image. .. ocv:function:: Point detal::RotationWarper::warp(const Mat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode, Mat &dst) :param src: Source image - + :param K: Camera intrinsic parameters :param R: Camera rotation matrix @@ -109,7 +109,7 @@ detail::RotationWarper::warpRoi .. ocv:function:: Rect detail::RotationWarper::warpRoi(Size src_size, const Mat &K, const Mat &R) :param src_size: Source image bounding box - + :param K: Camera intrinsic parameters :param R: Camera rotation matrix @@ -124,8 +124,8 @@ Base class for warping logic implementation. :: struct CV_EXPORTS ProjectorBase { - void setCameraParams(const Mat &K = Mat::eye(3, 3, CV_32F), - const Mat &R = Mat::eye(3, 3, CV_32F), + void setCameraParams(const Mat &K = Mat::eye(3, 3, CV_32F), + const Mat &R = Mat::eye(3, 3, CV_32F), const Mat &T = Mat::zeros(3, 1, CV_32F)); float scale; @@ -144,10 +144,10 @@ Base class for rotation-based warper using a `detail::ProjectorBase`_ derived cl template class CV_EXPORTS RotationWarperBase : public RotationWarper - { + { public: Point2f warpPoint(const Point2f &pt, const Mat &K, const Mat &R); - + Rect buildMaps(Size src_size, const Mat &K, const Mat &R, Mat &xmap, Mat &ymap); Point warp(const Mat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode, @@ -232,13 +232,13 @@ Construct an instance of the spherical warper class. .. ocv:function:: void detail::SphericalWarper::SphericalWarper(float scale) :param scale: Projected image scale multiplier - + detail::CylindricalWarper ------------------------- .. ocv:class:: detail::CylindricalWarper : public RotationWarperBase Warper that maps an image onto the x*x + z*z = 1 cylinder. :: - + class CV_EXPORTS CylindricalWarper : public RotationWarperBase { public: @@ -246,8 +246,8 @@ Warper that maps an image onto the x*x + z*z = 1 cylinder. :: protected: void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br) - { - RotationWarperBase::detectResultRoiByBorder(src_size, dst_tl, dst_br); + { + RotationWarperBase::detectResultRoiByBorder(src_size, dst_tl, dst_br); } }; diff --git a/modules/stitching/include/opencv2/stitching/detail/warpers.hpp b/modules/stitching/include/opencv2/stitching/detail/warpers.hpp index 091b4fd..645049c 100644 --- a/modules/stitching/include/opencv2/stitching/detail/warpers.hpp +++ b/modules/stitching/include/opencv2/stitching/detail/warpers.hpp @@ -75,8 +75,8 @@ public: struct CV_EXPORTS ProjectorBase { - void setCameraParams(const Mat &K = Mat::eye(3, 3, CV_32F), - const Mat &R = Mat::eye(3, 3, CV_32F), + void setCameraParams(const Mat &K = Mat::eye(3, 3, CV_32F), + const Mat &R = Mat::eye(3, 3, CV_32F), const Mat &T = Mat::zeros(3, 1, CV_32F)); float scale; @@ -90,10 +90,10 @@ struct CV_EXPORTS ProjectorBase template class CV_EXPORTS RotationWarperBase : public RotationWarper -{ +{ public: Point2f warpPoint(const Point2f &pt, const Mat &K, const Mat &R); - + Rect buildMaps(Size src_size, const Mat &K, const Mat &R, Mat &xmap, Mat &ymap); Point warp(const Mat &src, const Mat &K, const Mat &R, int interp_mode, int border_mode, @@ -179,8 +179,8 @@ public: protected: void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br) - { - RotationWarperBase::detectResultRoiByBorder(src_size, dst_tl, dst_br); + { + RotationWarperBase::detectResultRoiByBorder(src_size, dst_tl, dst_br); } }; @@ -225,11 +225,11 @@ struct CV_EXPORTS CompressedRectilinearProjector : ProjectorBase class CV_EXPORTS CompressedRectilinearWarper : public RotationWarperBase { public: - CompressedRectilinearWarper(float scale, float A = 1, float B = 1) - { + CompressedRectilinearWarper(float scale, float A = 1, float B = 1) + { projector_.a = A; projector_.b = B; - projector_.scale = scale; + projector_.scale = scale; } }; @@ -246,11 +246,11 @@ struct CV_EXPORTS CompressedRectilinearPortraitProjector : ProjectorBase class CV_EXPORTS CompressedRectilinearPortraitWarper : public RotationWarperBase { public: - CompressedRectilinearPortraitWarper(float scale, float A = 1, float B = 1) - { + CompressedRectilinearPortraitWarper(float scale, float A = 1, float B = 1) + { projector_.a = A; projector_.b = B; - projector_.scale = scale; + projector_.scale = scale; } }; @@ -267,11 +267,11 @@ struct CV_EXPORTS PaniniProjector : ProjectorBase class CV_EXPORTS PaniniWarper : public RotationWarperBase { public: - PaniniWarper(float scale, float A = 1, float B = 1) - { + PaniniWarper(float scale, float A = 1, float B = 1) + { projector_.a = A; projector_.b = B; - projector_.scale = scale; + projector_.scale = scale; } }; @@ -288,11 +288,11 @@ struct CV_EXPORTS PaniniPortraitProjector : ProjectorBase class CV_EXPORTS PaniniPortraitWarper : public RotationWarperBase { public: - PaniniPortraitWarper(float scale, float A = 1, float B = 1) - { + PaniniPortraitWarper(float scale, float A = 1, float B = 1) + { projector_.a = A; projector_.b = B; - projector_.scale = scale; + projector_.scale = scale; } }; @@ -478,8 +478,8 @@ public: protected: void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br) - { - RotationWarperBase::detectResultRoiByBorder(src_size, dst_tl, dst_br); + { + RotationWarperBase::detectResultRoiByBorder(src_size, dst_tl, dst_br); } }; @@ -497,8 +497,8 @@ public: protected: void detectResultRoi(Size src_size, Point &dst_tl, Point &dst_br) - { - RotationWarperBase::detectResultRoiByBorder(src_size, dst_tl, dst_br); + { + RotationWarperBase::detectResultRoiByBorder(src_size, dst_tl, dst_br); } }; -- 2.7.4