include(cmake/OpenCVDetectOpenCL.cmake)
endif()
+# --- Matlab/Octave ---
+include(cmake/OpenCVFindMatlab.cmake)
+
# ----------------------------------------------------------------------------
+ # Add CUDA libraries (needed for apps/tools, samples)
+ # ----------------------------------------------------------------------------
+ if(HAVE_CUDA)
+ set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} ${CUDA_LIBRARIES} ${CUDA_npp_LIBRARY})
+ if(HAVE_CUBLAS)
+ set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} ${CUDA_cublas_LIBRARY})
+ endif()
+ if(HAVE_CUFFT)
+ set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} ${CUDA_cufft_LIBRARY})
+ endif()
+ endif()
+ # ----------------------------------------------------------------------------
# Solution folders:
# ----------------------------------------------------------------------------
if(ENABLE_SOLUTION_FOLDERS)
+add_definitions(-D__OPENCV_BUILD=1)
+ link_libraries(${OPENCV_LINKER_LIBS})
add_subdirectory(haartraining)
add_subdirectory(traincascade)
- ocv_define_module(contrib opencv_imgproc opencv_calib3d opencv_ml opencv_video opencv_objdetect OPTIONAL opencv_highgui)
-ocv_define_module(contrib opencv_imgproc opencv_calib3d opencv_features2d opencv_ml opencv_video opencv_objdetect OPTIONAL opencv_highgui opencv_nonfree)
++ocv_define_module(contrib opencv_imgproc opencv_calib3d opencv_ml opencv_video opencv_objdetect OPTIONAL opencv_highgui opencv_nonfree)
#include "precomp.hpp"
#include <stdio.h>
#include <iostream>
-#include "opencv2/calib3d/calib3d.hpp"
+#include "opencv2/calib3d.hpp"
#include "opencv2/contrib/hybridtracker.hpp"
+ #ifdef HAVE_OPENCV_NONFREE
+ #include "opencv2/nonfree/nonfree.hpp"
+
+ static bool makeUseOfNonfree = initModule_nonfree();
+ #endif
+
using namespace cv;
CvFeatureTracker::CvFeatureTracker(CvFeatureTrackerParams _params) :
--- /dev/null
- //! finds line segments in the black-n-white image using probabalistic Hough transform
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
+// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_IMGPROC_HPP__
+#define __OPENCV_IMGPROC_HPP__
+
+#include "opencv2/core.hpp"
+
+/*! \namespace cv
+ Namespace where all the C++ OpenCV functionality resides
+ */
+namespace cv
+{
+
+//! type of the kernel
+enum { KERNEL_GENERAL = 0, // the kernel is generic. No any type of symmetry or other properties.
+ KERNEL_SYMMETRICAL = 1, // kernel[i] == kernel[ksize-i-1] , and the anchor is at the center
+ KERNEL_ASYMMETRICAL = 2, // kernel[i] == -kernel[ksize-i-1] , and the anchor is at the center
+ KERNEL_SMOOTH = 4, // all the kernel elements are non-negative and summed to 1
+ KERNEL_INTEGER = 8 // all the kernel coefficients are integer numbers
+ };
+
+//! type of morphological operation
+enum { MORPH_ERODE = 0,
+ MORPH_DILATE = 1,
+ MORPH_OPEN = 2,
+ MORPH_CLOSE = 3,
+ MORPH_GRADIENT = 4,
+ MORPH_TOPHAT = 5,
+ MORPH_BLACKHAT = 6
+ };
+
+//! shape of the structuring element
+enum { MORPH_RECT = 0,
+ MORPH_CROSS = 1,
+ MORPH_ELLIPSE = 2
+ };
+
+//! interpolation algorithm
+enum { INTER_NEAREST = 0, //!< nearest neighbor interpolation
+ INTER_LINEAR = 1, //!< bilinear interpolation
+ INTER_CUBIC = 2, //!< bicubic interpolation
+ INTER_AREA = 3, //!< area-based (or super) interpolation
+ INTER_LANCZOS4 = 4, //!< Lanczos interpolation over 8x8 neighborhood
+
+ INTER_MAX = 7, //!< mask for interpolation codes
+ WARP_INVERSE_MAP = 16
+ };
+
+enum { INTER_BITS = 5,
+ INTER_BITS2 = INTER_BITS * 2,
+ INTER_TAB_SIZE = 1 << INTER_BITS,
+ INTER_TAB_SIZE2 = INTER_TAB_SIZE * INTER_TAB_SIZE
+ };
+
+//! Distance types for Distance Transform and M-estimators
+enum { DIST_USER = -1, // User defined distance
+ DIST_L1 = 1, // distance = |x1-x2| + |y1-y2|
+ DIST_L2 = 2, // the simple euclidean distance
+ DIST_C = 3, // distance = max(|x1-x2|,|y1-y2|)
+ DIST_L12 = 4, // L1-L2 metric: distance = 2(sqrt(1+x*x/2) - 1))
+ DIST_FAIR = 5, // distance = c^2(|x|/c-log(1+|x|/c)), c = 1.3998
+ DIST_WELSCH = 6, // distance = c^2/2(1-exp(-(x/c)^2)), c = 2.9846
+ DIST_HUBER = 7 // distance = |x|<c ? x^2/2 : c(|x|-c/2), c=1.345
+};
+
+//! Mask size for distance transform
+enum { DIST_MASK_3 = 3,
+ DIST_MASK_5 = 5,
+ DIST_MASK_PRECISE = 0
+ };
+
+//! type of the threshold operation
+enum { THRESH_BINARY = 0, // value = value > threshold ? max_value : 0
+ THRESH_BINARY_INV = 1, // value = value > threshold ? 0 : max_value
+ THRESH_TRUNC = 2, // value = value > threshold ? threshold : value
+ THRESH_TOZERO = 3, // value = value > threshold ? value : 0
+ THRESH_TOZERO_INV = 4, // value = value > threshold ? 0 : value
+ THRESH_MASK = 7,
+ THRESH_OTSU = 8 // use Otsu algorithm to choose the optimal threshold value
+ };
+
+//! adaptive threshold algorithm
+enum { ADAPTIVE_THRESH_MEAN_C = 0,
+ ADAPTIVE_THRESH_GAUSSIAN_C = 1
+ };
+
+enum { PROJ_SPHERICAL_ORTHO = 0,
+ PROJ_SPHERICAL_EQRECT = 1
+ };
+
+//! class of the pixel in GrabCut algorithm
+enum { GC_BGD = 0, //!< background
+ GC_FGD = 1, //!< foreground
+ GC_PR_BGD = 2, //!< most probably background
+ GC_PR_FGD = 3 //!< most probably foreground
+ };
+
+//! GrabCut algorithm flags
+enum { GC_INIT_WITH_RECT = 0,
+ GC_INIT_WITH_MASK = 1,
+ GC_EVAL = 2
+};
+
+//! distanceTransform algorithm flags
+enum { DIST_LABEL_CCOMP = 0,
+ DIST_LABEL_PIXEL = 1
+ };
+
+//! floodfill algorithm flags
+enum { FLOODFILL_FIXED_RANGE = 1 << 16,
+ FLOODFILL_MASK_ONLY = 1 << 17
+ };
+
+//! type of the template matching operation
+enum { TM_SQDIFF = 0,
+ TM_SQDIFF_NORMED = 1,
+ TM_CCORR = 2,
+ TM_CCORR_NORMED = 3,
+ TM_CCOEFF = 4,
+ TM_CCOEFF_NORMED = 5
+ };
+
+//! connected components algorithm output formats
+enum { CC_STAT_LEFT = 0,
+ CC_STAT_TOP = 1,
+ CC_STAT_WIDTH = 2,
+ CC_STAT_HEIGHT = 3,
+ CC_STAT_AREA = 4,
+ CC_STAT_MAX = 5
+ };
+
+//! mode of the contour retrieval algorithm
+enum { RETR_EXTERNAL = 0, //!< retrieve only the most external (top-level) contours
+ RETR_LIST = 1, //!< retrieve all the contours without any hierarchical information
+ RETR_CCOMP = 2, //!< retrieve the connected components (that can possibly be nested)
+ RETR_TREE = 3, //!< retrieve all the contours and the whole hierarchy
+ RETR_FLOODFILL = 4
+ };
+
+//! the contour approximation algorithm
+enum { CHAIN_APPROX_NONE = 1,
+ CHAIN_APPROX_SIMPLE = 2,
+ CHAIN_APPROX_TC89_L1 = 3,
+ CHAIN_APPROX_TC89_KCOS = 4
+ };
+
+//! Variants of a Hough transform
+enum { HOUGH_STANDARD = 0,
+ HOUGH_PROBABILISTIC = 1,
+ HOUGH_MULTI_SCALE = 2,
+ HOUGH_GRADIENT = 3
+ };
+
+//! Variants of Line Segment Detector
+enum { LSD_REFINE_NONE = 0,
+ LSD_REFINE_STD = 1,
+ LSD_REFINE_ADV = 2
+ };
+
+//! Histogram comparison methods
+enum { HISTCMP_CORREL = 0,
+ HISTCMP_CHISQR = 1,
+ HISTCMP_INTERSECT = 2,
+ HISTCMP_BHATTACHARYYA = 3,
+ HISTCMP_HELLINGER = HISTCMP_BHATTACHARYYA
+ };
+
+//! the color conversion code
+enum { COLOR_BGR2BGRA = 0,
+ COLOR_RGB2RGBA = COLOR_BGR2BGRA,
+
+ COLOR_BGRA2BGR = 1,
+ COLOR_RGBA2RGB = COLOR_BGRA2BGR,
+
+ COLOR_BGR2RGBA = 2,
+ COLOR_RGB2BGRA = COLOR_BGR2RGBA,
+
+ COLOR_RGBA2BGR = 3,
+ COLOR_BGRA2RGB = COLOR_RGBA2BGR,
+
+ COLOR_BGR2RGB = 4,
+ COLOR_RGB2BGR = COLOR_BGR2RGB,
+
+ COLOR_BGRA2RGBA = 5,
+ COLOR_RGBA2BGRA = COLOR_BGRA2RGBA,
+
+ COLOR_BGR2GRAY = 6,
+ COLOR_RGB2GRAY = 7,
+ COLOR_GRAY2BGR = 8,
+ COLOR_GRAY2RGB = COLOR_GRAY2BGR,
+ COLOR_GRAY2BGRA = 9,
+ COLOR_GRAY2RGBA = COLOR_GRAY2BGRA,
+ COLOR_BGRA2GRAY = 10,
+ COLOR_RGBA2GRAY = 11,
+
+ COLOR_BGR2BGR565 = 12,
+ COLOR_RGB2BGR565 = 13,
+ COLOR_BGR5652BGR = 14,
+ COLOR_BGR5652RGB = 15,
+ COLOR_BGRA2BGR565 = 16,
+ COLOR_RGBA2BGR565 = 17,
+ COLOR_BGR5652BGRA = 18,
+ COLOR_BGR5652RGBA = 19,
+
+ COLOR_GRAY2BGR565 = 20,
+ COLOR_BGR5652GRAY = 21,
+
+ COLOR_BGR2BGR555 = 22,
+ COLOR_RGB2BGR555 = 23,
+ COLOR_BGR5552BGR = 24,
+ COLOR_BGR5552RGB = 25,
+ COLOR_BGRA2BGR555 = 26,
+ COLOR_RGBA2BGR555 = 27,
+ COLOR_BGR5552BGRA = 28,
+ COLOR_BGR5552RGBA = 29,
+
+ COLOR_GRAY2BGR555 = 30,
+ COLOR_BGR5552GRAY = 31,
+
+ COLOR_BGR2XYZ = 32,
+ COLOR_RGB2XYZ = 33,
+ COLOR_XYZ2BGR = 34,
+ COLOR_XYZ2RGB = 35,
+
+ COLOR_BGR2YCrCb = 36,
+ COLOR_RGB2YCrCb = 37,
+ COLOR_YCrCb2BGR = 38,
+ COLOR_YCrCb2RGB = 39,
+
+ COLOR_BGR2HSV = 40,
+ COLOR_RGB2HSV = 41,
+
+ COLOR_BGR2Lab = 44,
+ COLOR_RGB2Lab = 45,
+
+ COLOR_BGR2Luv = 50,
+ COLOR_RGB2Luv = 51,
+ COLOR_BGR2HLS = 52,
+ COLOR_RGB2HLS = 53,
+
+ COLOR_HSV2BGR = 54,
+ COLOR_HSV2RGB = 55,
+
+ COLOR_Lab2BGR = 56,
+ COLOR_Lab2RGB = 57,
+ COLOR_Luv2BGR = 58,
+ COLOR_Luv2RGB = 59,
+ COLOR_HLS2BGR = 60,
+ COLOR_HLS2RGB = 61,
+
+ COLOR_BGR2HSV_FULL = 66,
+ COLOR_RGB2HSV_FULL = 67,
+ COLOR_BGR2HLS_FULL = 68,
+ COLOR_RGB2HLS_FULL = 69,
+
+ COLOR_HSV2BGR_FULL = 70,
+ COLOR_HSV2RGB_FULL = 71,
+ COLOR_HLS2BGR_FULL = 72,
+ COLOR_HLS2RGB_FULL = 73,
+
+ COLOR_LBGR2Lab = 74,
+ COLOR_LRGB2Lab = 75,
+ COLOR_LBGR2Luv = 76,
+ COLOR_LRGB2Luv = 77,
+
+ COLOR_Lab2LBGR = 78,
+ COLOR_Lab2LRGB = 79,
+ COLOR_Luv2LBGR = 80,
+ COLOR_Luv2LRGB = 81,
+
+ COLOR_BGR2YUV = 82,
+ COLOR_RGB2YUV = 83,
+ COLOR_YUV2BGR = 84,
+ COLOR_YUV2RGB = 85,
+
+ // YUV 4:2:0 family to RGB
+ COLOR_YUV2RGB_NV12 = 90,
+ COLOR_YUV2BGR_NV12 = 91,
+ COLOR_YUV2RGB_NV21 = 92,
+ COLOR_YUV2BGR_NV21 = 93,
+ COLOR_YUV420sp2RGB = COLOR_YUV2RGB_NV21,
+ COLOR_YUV420sp2BGR = COLOR_YUV2BGR_NV21,
+
+ COLOR_YUV2RGBA_NV12 = 94,
+ COLOR_YUV2BGRA_NV12 = 95,
+ COLOR_YUV2RGBA_NV21 = 96,
+ COLOR_YUV2BGRA_NV21 = 97,
+ COLOR_YUV420sp2RGBA = COLOR_YUV2RGBA_NV21,
+ COLOR_YUV420sp2BGRA = COLOR_YUV2BGRA_NV21,
+
+ COLOR_YUV2RGB_YV12 = 98,
+ COLOR_YUV2BGR_YV12 = 99,
+ COLOR_YUV2RGB_IYUV = 100,
+ COLOR_YUV2BGR_IYUV = 101,
+ COLOR_YUV2RGB_I420 = COLOR_YUV2RGB_IYUV,
+ COLOR_YUV2BGR_I420 = COLOR_YUV2BGR_IYUV,
+ COLOR_YUV420p2RGB = COLOR_YUV2RGB_YV12,
+ COLOR_YUV420p2BGR = COLOR_YUV2BGR_YV12,
+
+ COLOR_YUV2RGBA_YV12 = 102,
+ COLOR_YUV2BGRA_YV12 = 103,
+ COLOR_YUV2RGBA_IYUV = 104,
+ COLOR_YUV2BGRA_IYUV = 105,
+ COLOR_YUV2RGBA_I420 = COLOR_YUV2RGBA_IYUV,
+ COLOR_YUV2BGRA_I420 = COLOR_YUV2BGRA_IYUV,
+ COLOR_YUV420p2RGBA = COLOR_YUV2RGBA_YV12,
+ COLOR_YUV420p2BGRA = COLOR_YUV2BGRA_YV12,
+
+ COLOR_YUV2GRAY_420 = 106,
+ COLOR_YUV2GRAY_NV21 = COLOR_YUV2GRAY_420,
+ COLOR_YUV2GRAY_NV12 = COLOR_YUV2GRAY_420,
+ COLOR_YUV2GRAY_YV12 = COLOR_YUV2GRAY_420,
+ COLOR_YUV2GRAY_IYUV = COLOR_YUV2GRAY_420,
+ COLOR_YUV2GRAY_I420 = COLOR_YUV2GRAY_420,
+ COLOR_YUV420sp2GRAY = COLOR_YUV2GRAY_420,
+ COLOR_YUV420p2GRAY = COLOR_YUV2GRAY_420,
+
+ // YUV 4:2:2 family to RGB
+ COLOR_YUV2RGB_UYVY = 107,
+ COLOR_YUV2BGR_UYVY = 108,
+ //COLOR_YUV2RGB_VYUY = 109,
+ //COLOR_YUV2BGR_VYUY = 110,
+ COLOR_YUV2RGB_Y422 = COLOR_YUV2RGB_UYVY,
+ COLOR_YUV2BGR_Y422 = COLOR_YUV2BGR_UYVY,
+ COLOR_YUV2RGB_UYNV = COLOR_YUV2RGB_UYVY,
+ COLOR_YUV2BGR_UYNV = COLOR_YUV2BGR_UYVY,
+
+ COLOR_YUV2RGBA_UYVY = 111,
+ COLOR_YUV2BGRA_UYVY = 112,
+ //COLOR_YUV2RGBA_VYUY = 113,
+ //COLOR_YUV2BGRA_VYUY = 114,
+ COLOR_YUV2RGBA_Y422 = COLOR_YUV2RGBA_UYVY,
+ COLOR_YUV2BGRA_Y422 = COLOR_YUV2BGRA_UYVY,
+ COLOR_YUV2RGBA_UYNV = COLOR_YUV2RGBA_UYVY,
+ COLOR_YUV2BGRA_UYNV = COLOR_YUV2BGRA_UYVY,
+
+ COLOR_YUV2RGB_YUY2 = 115,
+ COLOR_YUV2BGR_YUY2 = 116,
+ COLOR_YUV2RGB_YVYU = 117,
+ COLOR_YUV2BGR_YVYU = 118,
+ COLOR_YUV2RGB_YUYV = COLOR_YUV2RGB_YUY2,
+ COLOR_YUV2BGR_YUYV = COLOR_YUV2BGR_YUY2,
+ COLOR_YUV2RGB_YUNV = COLOR_YUV2RGB_YUY2,
+ COLOR_YUV2BGR_YUNV = COLOR_YUV2BGR_YUY2,
+
+ COLOR_YUV2RGBA_YUY2 = 119,
+ COLOR_YUV2BGRA_YUY2 = 120,
+ COLOR_YUV2RGBA_YVYU = 121,
+ COLOR_YUV2BGRA_YVYU = 122,
+ COLOR_YUV2RGBA_YUYV = COLOR_YUV2RGBA_YUY2,
+ COLOR_YUV2BGRA_YUYV = COLOR_YUV2BGRA_YUY2,
+ COLOR_YUV2RGBA_YUNV = COLOR_YUV2RGBA_YUY2,
+ COLOR_YUV2BGRA_YUNV = COLOR_YUV2BGRA_YUY2,
+
+ COLOR_YUV2GRAY_UYVY = 123,
+ COLOR_YUV2GRAY_YUY2 = 124,
+ //CV_YUV2GRAY_VYUY = CV_YUV2GRAY_UYVY,
+ COLOR_YUV2GRAY_Y422 = COLOR_YUV2GRAY_UYVY,
+ COLOR_YUV2GRAY_UYNV = COLOR_YUV2GRAY_UYVY,
+ COLOR_YUV2GRAY_YVYU = COLOR_YUV2GRAY_YUY2,
+ COLOR_YUV2GRAY_YUYV = COLOR_YUV2GRAY_YUY2,
+ COLOR_YUV2GRAY_YUNV = COLOR_YUV2GRAY_YUY2,
+
+ // alpha premultiplication
+ COLOR_RGBA2mRGBA = 125,
+ COLOR_mRGBA2RGBA = 126,
+
+ // RGB to YUV 4:2:0 family
+ COLOR_RGB2YUV_I420 = 127,
+ COLOR_BGR2YUV_I420 = 128,
+ COLOR_RGB2YUV_IYUV = COLOR_RGB2YUV_I420,
+ COLOR_BGR2YUV_IYUV = COLOR_BGR2YUV_I420,
+
+ COLOR_RGBA2YUV_I420 = 129,
+ COLOR_BGRA2YUV_I420 = 130,
+ COLOR_RGBA2YUV_IYUV = COLOR_RGBA2YUV_I420,
+ COLOR_BGRA2YUV_IYUV = COLOR_BGRA2YUV_I420,
+ COLOR_RGB2YUV_YV12 = 131,
+ COLOR_BGR2YUV_YV12 = 132,
+ COLOR_RGBA2YUV_YV12 = 133,
+ COLOR_BGRA2YUV_YV12 = 134,
+
+ // Demosaicing
+ COLOR_BayerBG2BGR = 46,
+ COLOR_BayerGB2BGR = 47,
+ COLOR_BayerRG2BGR = 48,
+ COLOR_BayerGR2BGR = 49,
+
+ COLOR_BayerBG2RGB = COLOR_BayerRG2BGR,
+ COLOR_BayerGB2RGB = COLOR_BayerGR2BGR,
+ COLOR_BayerRG2RGB = COLOR_BayerBG2BGR,
+ COLOR_BayerGR2RGB = COLOR_BayerGB2BGR,
+
+ COLOR_BayerBG2GRAY = 86,
+ COLOR_BayerGB2GRAY = 87,
+ COLOR_BayerRG2GRAY = 88,
+ COLOR_BayerGR2GRAY = 89,
+
+ // Demosaicing using Variable Number of Gradients
+ COLOR_BayerBG2BGR_VNG = 62,
+ COLOR_BayerGB2BGR_VNG = 63,
+ COLOR_BayerRG2BGR_VNG = 64,
+ COLOR_BayerGR2BGR_VNG = 65,
+
+ COLOR_BayerBG2RGB_VNG = COLOR_BayerRG2BGR_VNG,
+ COLOR_BayerGB2RGB_VNG = COLOR_BayerGR2BGR_VNG,
+ COLOR_BayerRG2RGB_VNG = COLOR_BayerBG2BGR_VNG,
+ COLOR_BayerGR2RGB_VNG = COLOR_BayerGB2BGR_VNG,
+
+ // Edge-Aware Demosaicing
+ COLOR_BayerBG2BGR_EA = 135,
+ COLOR_BayerGB2BGR_EA = 136,
+ COLOR_BayerRG2BGR_EA = 137,
+ COLOR_BayerGR2BGR_EA = 138,
+
+ COLOR_BayerBG2RGB_EA = COLOR_BayerRG2BGR_EA,
+ COLOR_BayerGB2RGB_EA = COLOR_BayerGR2BGR_EA,
+ COLOR_BayerRG2RGB_EA = COLOR_BayerBG2BGR_EA,
+ COLOR_BayerGR2RGB_EA = COLOR_BayerGB2BGR_EA,
+
+
+ COLOR_COLORCVT_MAX = 139
+};
+
+//! types of intersection between rectangles
+enum { INTERSECT_NONE = 0,
+ INTERSECT_PARTIAL = 1,
+ INTERSECT_FULL = 2
+ };
+
+/*!
+ The Base Class for 1D or Row-wise Filters
+
+ This is the base class for linear or non-linear filters that process 1D data.
+ In particular, such filters are used for the "horizontal" filtering parts in separable filters.
+
+ Several functions in OpenCV return Ptr<BaseRowFilter> for the specific types of filters,
+ and those pointers can be used directly or within cv::FilterEngine.
+*/
+class CV_EXPORTS BaseRowFilter
+{
+public:
+ //! the default constructor
+ BaseRowFilter();
+ //! the destructor
+ virtual ~BaseRowFilter();
+ //! the filtering operator. Must be overrided in the derived classes. The horizontal border interpolation is done outside of the class.
+ virtual void operator()(const uchar* src, uchar* dst, int width, int cn) = 0;
+
+ int ksize;
+ int anchor;
+};
+
+
+/*!
+ The Base Class for Column-wise Filters
+
+ This is the base class for linear or non-linear filters that process columns of 2D arrays.
+ Such filters are used for the "vertical" filtering parts in separable filters.
+
+ Several functions in OpenCV return Ptr<BaseColumnFilter> for the specific types of filters,
+ and those pointers can be used directly or within cv::FilterEngine.
+
+ Unlike cv::BaseRowFilter, cv::BaseColumnFilter may have some context information,
+ i.e. box filter keeps the sliding sum of elements. To reset the state BaseColumnFilter::reset()
+ must be called (e.g. the method is called by cv::FilterEngine)
+ */
+class CV_EXPORTS BaseColumnFilter
+{
+public:
+ //! the default constructor
+ BaseColumnFilter();
+ //! the destructor
+ virtual ~BaseColumnFilter();
+ //! the filtering operator. Must be overrided in the derived classes. The vertical border interpolation is done outside of the class.
+ virtual void operator()(const uchar** src, uchar* dst, int dststep, int dstcount, int width) = 0;
+ //! resets the internal buffers, if any
+ virtual void reset();
+
+ int ksize;
+ int anchor;
+};
+
+
+/*!
+ The Base Class for Non-Separable 2D Filters.
+
+ This is the base class for linear or non-linear 2D filters.
+
+ Several functions in OpenCV return Ptr<BaseFilter> for the specific types of filters,
+ and those pointers can be used directly or within cv::FilterEngine.
+
+ Similar to cv::BaseColumnFilter, the class may have some context information,
+ that should be reset using BaseFilter::reset() method before processing the new array.
+*/
+class CV_EXPORTS BaseFilter
+{
+public:
+ //! the default constructor
+ BaseFilter();
+ //! the destructor
+ virtual ~BaseFilter();
+ //! the filtering operator. The horizontal and the vertical border interpolation is done outside of the class.
+ virtual void operator()(const uchar** src, uchar* dst, int dststep, int dstcount, int width, int cn) = 0;
+ //! resets the internal buffers, if any
+ virtual void reset();
+
+ Size ksize;
+ Point anchor;
+};
+
+
+/*!
+ The Main Class for Image Filtering.
+
+ The class can be used to apply an arbitrary filtering operation to an image.
+ It contains all the necessary intermediate buffers, it computes extrapolated values
+ of the "virtual" pixels outside of the image etc.
+ Pointers to the initialized cv::FilterEngine instances
+ are returned by various OpenCV functions, such as cv::createSeparableLinearFilter(),
+ cv::createLinearFilter(), cv::createGaussianFilter(), cv::createDerivFilter(),
+ cv::createBoxFilter() and cv::createMorphologyFilter().
+
+ Using the class you can process large images by parts and build complex pipelines
+ that include filtering as some of the stages. If all you need is to apply some pre-defined
+ filtering operation, you may use cv::filter2D(), cv::erode(), cv::dilate() etc.
+ functions that create FilterEngine internally.
+
+ Here is the example on how to use the class to implement Laplacian operator, which is the sum of
+ second-order derivatives. More complex variant for different types is implemented in cv::Laplacian().
+
+ \code
+ void laplace_f(const Mat& src, Mat& dst)
+ {
+ CV_Assert( src.type() == CV_32F );
+ // make sure the destination array has the proper size and type
+ dst.create(src.size(), src.type());
+
+ // get the derivative and smooth kernels for d2I/dx2.
+ // for d2I/dy2 we could use the same kernels, just swapped
+ Mat kd, ks;
+ getSobelKernels( kd, ks, 2, 0, ksize, false, ktype );
+
+ // let's process 10 source rows at once
+ int DELTA = std::min(10, src.rows);
+ Ptr<FilterEngine> Fxx = createSeparableLinearFilter(src.type(),
+ dst.type(), kd, ks, Point(-1,-1), 0, borderType, borderType, Scalar() );
+ Ptr<FilterEngine> Fyy = createSeparableLinearFilter(src.type(),
+ dst.type(), ks, kd, Point(-1,-1), 0, borderType, borderType, Scalar() );
+
+ int y = Fxx->start(src), dsty = 0, dy = 0;
+ Fyy->start(src);
+ const uchar* sptr = src.data + y*src.step;
+
+ // allocate the buffers for the spatial image derivatives;
+ // the buffers need to have more than DELTA rows, because at the
+ // last iteration the output may take max(kd.rows-1,ks.rows-1)
+ // rows more than the input.
+ Mat Ixx( DELTA + kd.rows - 1, src.cols, dst.type() );
+ Mat Iyy( DELTA + kd.rows - 1, src.cols, dst.type() );
+
+ // inside the loop we always pass DELTA rows to the filter
+ // (note that the "proceed" method takes care of possibe overflow, since
+ // it was given the actual image height in the "start" method)
+ // on output we can get:
+ // * < DELTA rows (the initial buffer accumulation stage)
+ // * = DELTA rows (settled state in the middle)
+ // * > DELTA rows (then the input image is over, but we generate
+ // "virtual" rows using the border mode and filter them)
+ // this variable number of output rows is dy.
+ // dsty is the current output row.
+ // sptr is the pointer to the first input row in the portion to process
+ for( ; dsty < dst.rows; sptr += DELTA*src.step, dsty += dy )
+ {
+ Fxx->proceed( sptr, (int)src.step, DELTA, Ixx.data, (int)Ixx.step );
+ dy = Fyy->proceed( sptr, (int)src.step, DELTA, d2y.data, (int)Iyy.step );
+ if( dy > 0 )
+ {
+ Mat dstripe = dst.rowRange(dsty, dsty + dy);
+ add(Ixx.rowRange(0, dy), Iyy.rowRange(0, dy), dstripe);
+ }
+ }
+ }
+ \endcode
+*/
+class CV_EXPORTS FilterEngine
+{
+public:
+ //! the default constructor
+ FilterEngine();
+ //! the full constructor. Either _filter2D or both _rowFilter and _columnFilter must be non-empty.
+ FilterEngine(const Ptr<BaseFilter>& _filter2D,
+ const Ptr<BaseRowFilter>& _rowFilter,
+ const Ptr<BaseColumnFilter>& _columnFilter,
+ int srcType, int dstType, int bufType,
+ int _rowBorderType = BORDER_REPLICATE,
+ int _columnBorderType = -1,
+ const Scalar& _borderValue = Scalar());
+ //! the destructor
+ virtual ~FilterEngine();
+ //! reinitializes the engine. The previously assigned filters are released.
+ void init(const Ptr<BaseFilter>& _filter2D,
+ const Ptr<BaseRowFilter>& _rowFilter,
+ const Ptr<BaseColumnFilter>& _columnFilter,
+ int srcType, int dstType, int bufType,
+ int _rowBorderType = BORDER_REPLICATE,
+ int _columnBorderType = -1,
+ const Scalar& _borderValue = Scalar());
+ //! starts filtering of the specified ROI of an image of size wholeSize.
+ virtual int start(Size wholeSize, Rect roi, int maxBufRows = -1);
+ //! starts filtering of the specified ROI of the specified image.
+ virtual int start(const Mat& src, const Rect& srcRoi = Rect(0,0,-1,-1),
+ bool isolated = false, int maxBufRows = -1);
+ //! processes the next srcCount rows of the image.
+ virtual int proceed(const uchar* src, int srcStep, int srcCount,
+ uchar* dst, int dstStep);
+ //! applies filter to the specified ROI of the image. if srcRoi=(0,0,-1,-1), the whole image is filtered.
+ virtual void apply( const Mat& src, Mat& dst,
+ const Rect& srcRoi = Rect(0,0,-1,-1),
+ Point dstOfs = Point(0,0),
+ bool isolated = false);
+ //! returns true if the filter is separable
+ bool isSeparable() const { return !filter2D; }
+ //! returns the number
+ int remainingInputRows() const;
+ int remainingOutputRows() const;
+
+ int srcType;
+ int dstType;
+ int bufType;
+ Size ksize;
+ Point anchor;
+ int maxWidth;
+ Size wholeSize;
+ Rect roi;
+ int dx1;
+ int dx2;
+ int rowBorderType;
+ int columnBorderType;
+ std::vector<int> borderTab;
+ int borderElemSize;
+ std::vector<uchar> ringBuf;
+ std::vector<uchar> srcRow;
+ std::vector<uchar> constBorderValue;
+ std::vector<uchar> constBorderRow;
+ int bufStep;
+ int startY;
+ int startY0;
+ int endY;
+ int rowCount;
+ int dstY;
+ std::vector<uchar*> rows;
+
+ Ptr<BaseFilter> filter2D;
+ Ptr<BaseRowFilter> rowFilter;
+ Ptr<BaseColumnFilter> columnFilter;
+};
+
+
+//! finds arbitrary template in the grayscale image using Generalized Hough Transform
+class CV_EXPORTS GeneralizedHough : public Algorithm
+{
+public:
+ //! set template to search
+ virtual void setTemplate(InputArray templ, Point templCenter = Point(-1, -1)) = 0;
+ virtual void setTemplate(InputArray edges, InputArray dx, InputArray dy, Point templCenter = Point(-1, -1)) = 0;
+
+ //! find template on image
+ virtual void detect(InputArray image, OutputArray positions, OutputArray votes = noArray()) = 0;
+ virtual void detect(InputArray edges, InputArray dx, InputArray dy, OutputArray positions, OutputArray votes = noArray()) = 0;
+
+ //! Canny low threshold.
+ virtual void setCannyLowThresh(int cannyLowThresh) = 0;
+ virtual int getCannyLowThresh() const = 0;
+
+ //! Canny high threshold.
+ virtual void setCannyHighThresh(int cannyHighThresh) = 0;
+ virtual int getCannyHighThresh() const = 0;
+
+ //! Minimum distance between the centers of the detected objects.
+ virtual void setMinDist(double minDist) = 0;
+ virtual double getMinDist() const = 0;
+
+ //! Inverse ratio of the accumulator resolution to the image resolution.
+ virtual void setDp(double dp) = 0;
+ virtual double getDp() const = 0;
+
+ //! Maximal size of inner buffers.
+ virtual void setMaxBufferSize(int maxBufferSize) = 0;
+ virtual int getMaxBufferSize() const = 0;
+};
+
+//! Ballard, D.H. (1981). Generalizing the Hough transform to detect arbitrary shapes. Pattern Recognition 13 (2): 111-122.
+//! Detects position only without traslation and rotation
+class CV_EXPORTS GeneralizedHoughBallard : public GeneralizedHough
+{
+public:
+ //! R-Table levels.
+ virtual void setLevels(int levels) = 0;
+ virtual int getLevels() const = 0;
+
+ //! The accumulator threshold for the template centers at the detection stage. The smaller it is, the more false positions may be detected.
+ virtual void setVotesThreshold(int votesThreshold) = 0;
+ virtual int getVotesThreshold() const = 0;
+};
+
+//! Guil, N., González-Linares, J.M. and Zapata, E.L. (1999). Bidimensional shape detection using an invariant approach. Pattern Recognition 32 (6): 1025-1038.
+//! Detects position, traslation and rotation
+class CV_EXPORTS GeneralizedHoughGuil : public GeneralizedHough
+{
+public:
+ //! Angle difference in degrees between two points in feature.
+ virtual void setXi(double xi) = 0;
+ virtual double getXi() const = 0;
+
+ //! Feature table levels.
+ virtual void setLevels(int levels) = 0;
+ virtual int getLevels() const = 0;
+
+ //! Maximal difference between angles that treated as equal.
+ virtual void setAngleEpsilon(double angleEpsilon) = 0;
+ virtual double getAngleEpsilon() const = 0;
+
+ //! Minimal rotation angle to detect in degrees.
+ virtual void setMinAngle(double minAngle) = 0;
+ virtual double getMinAngle() const = 0;
+
+ //! Maximal rotation angle to detect in degrees.
+ virtual void setMaxAngle(double maxAngle) = 0;
+ virtual double getMaxAngle() const = 0;
+
+ //! Angle step in degrees.
+ virtual void setAngleStep(double angleStep) = 0;
+ virtual double getAngleStep() const = 0;
+
+ //! Angle votes threshold.
+ virtual void setAngleThresh(int angleThresh) = 0;
+ virtual int getAngleThresh() const = 0;
+
+ //! Minimal scale to detect.
+ virtual void setMinScale(double minScale) = 0;
+ virtual double getMinScale() const = 0;
+
+ //! Maximal scale to detect.
+ virtual void setMaxScale(double maxScale) = 0;
+ virtual double getMaxScale() const = 0;
+
+ //! Scale step.
+ virtual void setScaleStep(double scaleStep) = 0;
+ virtual double getScaleStep() const = 0;
+
+ //! Scale votes threshold.
+ virtual void setScaleThresh(int scaleThresh) = 0;
+ virtual int getScaleThresh() const = 0;
+
+ //! Position votes threshold.
+ virtual void setPosThresh(int posThresh) = 0;
+ virtual int getPosThresh() const = 0;
+};
+
+
+class CV_EXPORTS_W CLAHE : public Algorithm
+{
+public:
+ CV_WRAP virtual void apply(InputArray src, OutputArray dst) = 0;
+
+ CV_WRAP virtual void setClipLimit(double clipLimit) = 0;
+ CV_WRAP virtual double getClipLimit() const = 0;
+
+ CV_WRAP virtual void setTilesGridSize(Size tileGridSize) = 0;
+ CV_WRAP virtual Size getTilesGridSize() const = 0;
+
+ CV_WRAP virtual void collectGarbage() = 0;
+};
+
+
+class CV_EXPORTS_W Subdiv2D
+{
+public:
+ enum { PTLOC_ERROR = -2,
+ PTLOC_OUTSIDE_RECT = -1,
+ PTLOC_INSIDE = 0,
+ PTLOC_VERTEX = 1,
+ PTLOC_ON_EDGE = 2
+ };
+
+ enum { NEXT_AROUND_ORG = 0x00,
+ NEXT_AROUND_DST = 0x22,
+ PREV_AROUND_ORG = 0x11,
+ PREV_AROUND_DST = 0x33,
+ NEXT_AROUND_LEFT = 0x13,
+ NEXT_AROUND_RIGHT = 0x31,
+ PREV_AROUND_LEFT = 0x20,
+ PREV_AROUND_RIGHT = 0x02
+ };
+
+ CV_WRAP Subdiv2D();
+ CV_WRAP Subdiv2D(Rect rect);
+ CV_WRAP void initDelaunay(Rect rect);
+
+ CV_WRAP int insert(Point2f pt);
+ CV_WRAP void insert(const std::vector<Point2f>& ptvec);
+ CV_WRAP int locate(Point2f pt, CV_OUT int& edge, CV_OUT int& vertex);
+
+ CV_WRAP int findNearest(Point2f pt, CV_OUT Point2f* nearestPt = 0);
+ CV_WRAP void getEdgeList(CV_OUT std::vector<Vec4f>& edgeList) const;
+ CV_WRAP void getTriangleList(CV_OUT std::vector<Vec6f>& triangleList) const;
+ CV_WRAP void getVoronoiFacetList(const std::vector<int>& idx, CV_OUT std::vector<std::vector<Point2f> >& facetList,
+ CV_OUT std::vector<Point2f>& facetCenters);
+
+ CV_WRAP Point2f getVertex(int vertex, CV_OUT int* firstEdge = 0) const;
+
+ CV_WRAP int getEdge( int edge, int nextEdgeType ) const;
+ CV_WRAP int nextEdge(int edge) const;
+ CV_WRAP int rotateEdge(int edge, int rotate) const;
+ CV_WRAP int symEdge(int edge) const;
+ CV_WRAP int edgeOrg(int edge, CV_OUT Point2f* orgpt = 0) const;
+ CV_WRAP int edgeDst(int edge, CV_OUT Point2f* dstpt = 0) const;
+
+protected:
+ int newEdge();
+ void deleteEdge(int edge);
+ int newPoint(Point2f pt, bool isvirtual, int firstEdge = 0);
+ void deletePoint(int vtx);
+ void setEdgePoints( int edge, int orgPt, int dstPt );
+ void splice( int edgeA, int edgeB );
+ int connectEdges( int edgeA, int edgeB );
+ void swapEdges( int edge );
+ int isRightOf(Point2f pt, int edge) const;
+ void calcVoronoi();
+ void clearVoronoi();
+ void checkSubdiv() const;
+
+ struct CV_EXPORTS Vertex
+ {
+ Vertex();
+ Vertex(Point2f pt, bool _isvirtual, int _firstEdge=0);
+ bool isvirtual() const;
+ bool isfree() const;
+
+ int firstEdge;
+ int type;
+ Point2f pt;
+ };
+
+ struct CV_EXPORTS QuadEdge
+ {
+ QuadEdge();
+ QuadEdge(int edgeidx);
+ bool isfree() const;
+
+ int next[4];
+ int pt[4];
+ };
+
+ std::vector<Vertex> vtx;
+ std::vector<QuadEdge> qedges;
+ int freeQEdge;
+ int freePoint;
+ bool validGeometry;
+
+ int recentEdge;
+ Point2f topLeft;
+ Point2f bottomRight;
+};
+
+class CV_EXPORTS_W LineSegmentDetector : public Algorithm
+{
+public:
+/**
+ * Detect lines in the input image.
+ *
+ * @param _image A grayscale(CV_8UC1) input image.
+ * If only a roi needs to be selected, use
+ * lsd_ptr->detect(image(roi), ..., lines);
+ * lines += Scalar(roi.x, roi.y, roi.x, roi.y);
+ * @param _lines Return: A vector of Vec4i elements specifying the beginning and ending point of a line.
+ * Where Vec4i is (x1, y1, x2, y2), point 1 is the start, point 2 - end.
+ * Returned lines are strictly oriented depending on the gradient.
+ * @param width Return: Vector of widths of the regions, where the lines are found. E.g. Width of line.
+ * @param prec Return: Vector of precisions with which the lines are found.
+ * @param nfa Return: Vector containing number of false alarms in the line region, with precision of 10%.
+ * The bigger the value, logarithmically better the detection.
+ * * -1 corresponds to 10 mean false alarms
+ * * 0 corresponds to 1 mean false alarm
+ * * 1 corresponds to 0.1 mean false alarms
+ * This vector will be calculated _only_ when the objects type is REFINE_ADV
+ */
+ CV_WRAP virtual void detect(InputArray _image, OutputArray _lines,
+ OutputArray width = noArray(), OutputArray prec = noArray(),
+ OutputArray nfa = noArray()) = 0;
+
+/**
+ * Draw lines on the given canvas.
+ *
+ * @param image The image, where lines will be drawn.
+ * Should have the size of the image, where the lines were found
+ * @param lines The lines that need to be drawn
+ */
+ CV_WRAP virtual void drawSegments(InputOutputArray _image, InputArray lines) = 0;
+
+/**
+ * Draw both vectors on the image canvas. Uses blue for lines 1 and red for lines 2.
+ *
+ * @param size The size of the image, where lines were found.
+ * @param lines1 The first lines that need to be drawn. Color - Blue.
+ * @param lines2 The second lines that need to be drawn. Color - Red.
+ * @param image Optional image, where lines will be drawn.
+ * Should have the size of the image, where the lines were found
+ * @return The number of mismatching pixels between lines1 and lines2.
+ */
+ CV_WRAP virtual int compareSegments(const Size& size, InputArray lines1, InputArray lines2, InputOutputArray _image = noArray()) = 0;
+
+ virtual ~LineSegmentDetector() {};
+};
+
+//! Returns a pointer to a LineSegmentDetector class.
+CV_EXPORTS_W Ptr<LineSegmentDetector> createLineSegmentDetector(
+ int _refine = LSD_REFINE_STD, double _scale = 0.8,
+ double _sigma_scale = 0.6, double _quant = 2.0, double _ang_th = 22.5,
+ double _log_eps = 0, double _density_th = 0.7, int _n_bins = 1024);
+
+//! returns type (one of KERNEL_*) of 1D or 2D kernel specified by its coefficients.
+CV_EXPORTS int getKernelType(InputArray kernel, Point anchor);
+
+//! returns the primitive row filter with the specified kernel
+CV_EXPORTS Ptr<BaseRowFilter> getLinearRowFilter(int srcType, int bufType,
+ InputArray kernel, int anchor,
+ int symmetryType);
+
+//! returns the primitive column filter with the specified kernel
+CV_EXPORTS Ptr<BaseColumnFilter> getLinearColumnFilter(int bufType, int dstType,
+ InputArray kernel, int anchor,
+ int symmetryType, double delta = 0,
+ int bits = 0);
+
+//! returns 2D filter with the specified kernel
+CV_EXPORTS Ptr<BaseFilter> getLinearFilter(int srcType, int dstType,
+ InputArray kernel,
+ Point anchor = Point(-1,-1),
+ double delta = 0, int bits = 0);
+
+//! returns the separable linear filter engine
+CV_EXPORTS Ptr<FilterEngine> createSeparableLinearFilter(int srcType, int dstType,
+ InputArray rowKernel, InputArray columnKernel,
+ Point anchor = Point(-1,-1), double delta = 0,
+ int rowBorderType = BORDER_DEFAULT,
+ int columnBorderType = -1,
+ const Scalar& borderValue = Scalar());
+
+//! returns the non-separable linear filter engine
+CV_EXPORTS Ptr<FilterEngine> createLinearFilter(int srcType, int dstType,
+ InputArray kernel, Point _anchor = Point(-1,-1),
+ double delta = 0, int rowBorderType = BORDER_DEFAULT,
+ int columnBorderType = -1, const Scalar& borderValue = Scalar());
+
+//! returns the Gaussian kernel with the specified parameters
+CV_EXPORTS_W Mat getGaussianKernel( int ksize, double sigma, int ktype = CV_64F );
+
+//! returns the Gaussian filter engine
+CV_EXPORTS Ptr<FilterEngine> createGaussianFilter( int type, Size ksize,
+ double sigma1, double sigma2 = 0,
+ int borderType = BORDER_DEFAULT);
+
+//! initializes kernels of the generalized Sobel operator
+CV_EXPORTS_W void getDerivKernels( OutputArray kx, OutputArray ky,
+ int dx, int dy, int ksize,
+ bool normalize = false, int ktype = CV_32F );
+
+//! returns filter engine for the generalized Sobel operator
+CV_EXPORTS Ptr<FilterEngine> createDerivFilter( int srcType, int dstType,
+ int dx, int dy, int ksize,
+ int borderType = BORDER_DEFAULT );
+
+//! returns horizontal 1D box filter
+CV_EXPORTS Ptr<BaseRowFilter> getRowSumFilter(int srcType, int sumType,
+ int ksize, int anchor = -1);
+
+//! returns vertical 1D box filter
+CV_EXPORTS Ptr<BaseColumnFilter> getColumnSumFilter( int sumType, int dstType,
+ int ksize, int anchor = -1,
+ double scale = 1);
+//! returns box filter engine
+CV_EXPORTS Ptr<FilterEngine> createBoxFilter( int srcType, int dstType, Size ksize,
+ Point anchor = Point(-1,-1),
+ bool normalize = true,
+ int borderType = BORDER_DEFAULT);
+
+//! returns the Gabor kernel with the specified parameters
+CV_EXPORTS_W Mat getGaborKernel( Size ksize, double sigma, double theta, double lambd,
+ double gamma, double psi = CV_PI*0.5, int ktype = CV_64F );
+
+//! returns horizontal 1D morphological filter
+CV_EXPORTS Ptr<BaseRowFilter> getMorphologyRowFilter(int op, int type, int ksize, int anchor = -1);
+
+//! returns vertical 1D morphological filter
+CV_EXPORTS Ptr<BaseColumnFilter> getMorphologyColumnFilter(int op, int type, int ksize, int anchor = -1);
+
+//! returns 2D morphological filter
+CV_EXPORTS Ptr<BaseFilter> getMorphologyFilter(int op, int type, InputArray kernel,
+ Point anchor = Point(-1,-1));
+
+//! returns "magic" border value for erosion and dilation. It is automatically transformed to Scalar::all(-DBL_MAX) for dilation.
+static inline Scalar morphologyDefaultBorderValue() { return Scalar::all(DBL_MAX); }
+
+//! returns morphological filter engine. Only MORPH_ERODE and MORPH_DILATE are supported.
+CV_EXPORTS Ptr<FilterEngine> createMorphologyFilter(int op, int type, InputArray kernel,
+ Point anchor = Point(-1,-1), int rowBorderType = BORDER_CONSTANT,
+ int columnBorderType = -1, const Scalar& borderValue = morphologyDefaultBorderValue());
+
+//! returns structuring element of the specified shape and size
+CV_EXPORTS_W Mat getStructuringElement(int shape, Size ksize, Point anchor = Point(-1,-1));
+
+//! smooths the image using median filter.
+CV_EXPORTS_W void medianBlur( InputArray src, OutputArray dst, int ksize );
+
+//! smooths the image using Gaussian filter.
+CV_EXPORTS_W void GaussianBlur( InputArray src, OutputArray dst, Size ksize,
+ double sigmaX, double sigmaY = 0,
+ int borderType = BORDER_DEFAULT );
+
+//! smooths the image using bilateral filter
+CV_EXPORTS_W void bilateralFilter( InputArray src, OutputArray dst, int d,
+ double sigmaColor, double sigmaSpace,
+ int borderType = BORDER_DEFAULT );
+
+//! smooths the image using adaptive bilateral filter
+CV_EXPORTS_W void adaptiveBilateralFilter( InputArray src, OutputArray dst, Size ksize,
+ double sigmaSpace, Point anchor=Point(-1, -1),
+ int borderType=BORDER_DEFAULT );
+
+//! smooths the image using the box filter. Each pixel is processed in O(1) time
+CV_EXPORTS_W void boxFilter( InputArray src, OutputArray dst, int ddepth,
+ Size ksize, Point anchor = Point(-1,-1),
+ bool normalize = true,
+ int borderType = BORDER_DEFAULT );
+
+//! a synonym for normalized box filter
+CV_EXPORTS_W void blur( InputArray src, OutputArray dst,
+ Size ksize, Point anchor = Point(-1,-1),
+ int borderType = BORDER_DEFAULT );
+
+//! applies non-separable 2D linear filter to the image
+CV_EXPORTS_W void filter2D( InputArray src, OutputArray dst, int ddepth,
+ InputArray kernel, Point anchor = Point(-1,-1),
+ double delta = 0, int borderType = BORDER_DEFAULT );
+
+//! applies separable 2D linear filter to the image
+CV_EXPORTS_W void sepFilter2D( InputArray src, OutputArray dst, int ddepth,
+ InputArray kernelX, InputArray kernelY,
+ Point anchor = Point(-1,-1),
+ double delta = 0, int borderType = BORDER_DEFAULT );
+
+//! applies generalized Sobel operator to the image
+CV_EXPORTS_W void Sobel( InputArray src, OutputArray dst, int ddepth,
+ int dx, int dy, int ksize = 3,
+ double scale = 1, double delta = 0,
+ int borderType = BORDER_DEFAULT );
+
+//! applies the vertical or horizontal Scharr operator to the image
+CV_EXPORTS_W void Scharr( InputArray src, OutputArray dst, int ddepth,
+ int dx, int dy, double scale = 1, double delta = 0,
+ int borderType = BORDER_DEFAULT );
+
+//! applies Laplacian operator to the image
+CV_EXPORTS_W void Laplacian( InputArray src, OutputArray dst, int ddepth,
+ int ksize = 1, double scale = 1, double delta = 0,
+ int borderType = BORDER_DEFAULT );
+
+//! applies Canny edge detector and produces the edge map.
+CV_EXPORTS_W void Canny( InputArray image, OutputArray edges,
+ double threshold1, double threshold2,
+ int apertureSize = 3, bool L2gradient = false );
+
+//! computes minimum eigen value of 2x2 derivative covariation matrix at each pixel - the cornerness criteria
+CV_EXPORTS_W void cornerMinEigenVal( InputArray src, OutputArray dst,
+ int blockSize, int ksize = 3,
+ int borderType = BORDER_DEFAULT );
+
+//! computes Harris cornerness criteria at each image pixel
+CV_EXPORTS_W void cornerHarris( InputArray src, OutputArray dst, int blockSize,
+ int ksize, double k,
+ int borderType = BORDER_DEFAULT );
+
+//! computes both eigenvalues and the eigenvectors of 2x2 derivative covariation matrix at each pixel. The output is stored as 6-channel matrix.
+CV_EXPORTS_W void cornerEigenValsAndVecs( InputArray src, OutputArray dst,
+ int blockSize, int ksize,
+ int borderType = BORDER_DEFAULT );
+
+//! computes another complex cornerness criteria at each pixel
+CV_EXPORTS_W void preCornerDetect( InputArray src, OutputArray dst, int ksize,
+ int borderType = BORDER_DEFAULT );
+
+//! adjusts the corner locations with sub-pixel accuracy to maximize the certain cornerness criteria
+CV_EXPORTS_W void cornerSubPix( InputArray image, InputOutputArray corners,
+ Size winSize, Size zeroZone,
+ TermCriteria criteria );
+
+//! finds the strong enough corners where the cornerMinEigenVal() or cornerHarris() report the local maxima
+CV_EXPORTS_W void goodFeaturesToTrack( InputArray image, OutputArray corners,
+ int maxCorners, double qualityLevel, double minDistance,
+ InputArray mask = noArray(), int blockSize = 3,
+ bool useHarrisDetector = false, double k = 0.04 );
+
+//! finds lines in the black-n-white image using the standard or pyramid Hough transform
+CV_EXPORTS_W void HoughLines( InputArray image, OutputArray lines,
+ double rho, double theta, int threshold,
+ double srn = 0, double stn = 0 );
+
++//! finds line segments in the black-n-white image using probabilistic Hough transform
+CV_EXPORTS_W void HoughLinesP( InputArray image, OutputArray lines,
+ double rho, double theta, int threshold,
+ double minLineLength = 0, double maxLineGap = 0 );
+
+//! finds circles in the grayscale image using 2+1 gradient Hough transform
+CV_EXPORTS_W void HoughCircles( InputArray image, OutputArray circles,
+ int method, double dp, double minDist,
+ double param1 = 100, double param2 = 100,
+ int minRadius = 0, int maxRadius = 0 );
+
+//! erodes the image (applies the local minimum operator)
+CV_EXPORTS_W void erode( InputArray src, OutputArray dst, InputArray kernel,
+ Point anchor = Point(-1,-1), int iterations = 1,
+ int borderType = BORDER_CONSTANT,
+ const Scalar& borderValue = morphologyDefaultBorderValue() );
+
+//! dilates the image (applies the local maximum operator)
+CV_EXPORTS_W void dilate( InputArray src, OutputArray dst, InputArray kernel,
+ Point anchor = Point(-1,-1), int iterations = 1,
+ int borderType = BORDER_CONSTANT,
+ const Scalar& borderValue = morphologyDefaultBorderValue() );
+
+//! applies an advanced morphological operation to the image
+CV_EXPORTS_W void morphologyEx( InputArray src, OutputArray dst,
+ int op, InputArray kernel,
+ Point anchor = Point(-1,-1), int iterations = 1,
+ int borderType = BORDER_CONSTANT,
+ const Scalar& borderValue = morphologyDefaultBorderValue() );
+
+//! resizes the image
+CV_EXPORTS_W void resize( InputArray src, OutputArray dst,
+ Size dsize, double fx = 0, double fy = 0,
+ int interpolation = INTER_LINEAR );
+
+//! warps the image using affine transformation
+CV_EXPORTS_W void warpAffine( InputArray src, OutputArray dst,
+ InputArray M, Size dsize,
+ int flags = INTER_LINEAR,
+ int borderMode = BORDER_CONSTANT,
+ const Scalar& borderValue = Scalar());
+
+//! warps the image using perspective transformation
+CV_EXPORTS_W void warpPerspective( InputArray src, OutputArray dst,
+ InputArray M, Size dsize,
+ int flags = INTER_LINEAR,
+ int borderMode = BORDER_CONSTANT,
+ const Scalar& borderValue = Scalar());
+
+//! warps the image using the precomputed maps. The maps are stored in either floating-point or integer fixed-point format
+CV_EXPORTS_W void remap( InputArray src, OutputArray dst,
+ InputArray map1, InputArray map2,
+ int interpolation, int borderMode = BORDER_CONSTANT,
+ const Scalar& borderValue = Scalar());
+
+//! converts maps for remap from floating-point to fixed-point format or backwards
+CV_EXPORTS_W void convertMaps( InputArray map1, InputArray map2,
+ OutputArray dstmap1, OutputArray dstmap2,
+ int dstmap1type, bool nninterpolation = false );
+
+//! returns 2x3 affine transformation matrix for the planar rotation.
+CV_EXPORTS_W Mat getRotationMatrix2D( Point2f center, double angle, double scale );
+
+//! returns 3x3 perspective transformation for the corresponding 4 point pairs.
+CV_EXPORTS Mat getPerspectiveTransform( const Point2f src[], const Point2f dst[] );
+
+//! returns 2x3 affine transformation for the corresponding 3 point pairs.
+CV_EXPORTS Mat getAffineTransform( const Point2f src[], const Point2f dst[] );
+
+//! computes 2x3 affine transformation matrix that is inverse to the specified 2x3 affine transformation.
+CV_EXPORTS_W void invertAffineTransform( InputArray M, OutputArray iM );
+
+CV_EXPORTS_W Mat getPerspectiveTransform( InputArray src, InputArray dst );
+
+CV_EXPORTS_W Mat getAffineTransform( InputArray src, InputArray dst );
+
+//! extracts rectangle from the image at sub-pixel location
+CV_EXPORTS_W void getRectSubPix( InputArray image, Size patchSize,
+ Point2f center, OutputArray patch, int patchType = -1 );
+
+//! computes the integral image
+CV_EXPORTS_W void integral( InputArray src, OutputArray sum, int sdepth = -1 );
+
+//! computes the integral image and integral for the squared image
+CV_EXPORTS_AS(integral2) void integral( InputArray src, OutputArray sum,
+ OutputArray sqsum, int sdepth = -1 );
+
+//! computes the integral image, integral for the squared image and the tilted integral image
+CV_EXPORTS_AS(integral3) void integral( InputArray src, OutputArray sum,
+ OutputArray sqsum, OutputArray tilted,
+ int sdepth = -1 );
+
+//! adds image to the accumulator (dst += src). Unlike cv::add, dst and src can have different types.
+CV_EXPORTS_W void accumulate( InputArray src, InputOutputArray dst,
+ InputArray mask = noArray() );
+
+//! adds squared src image to the accumulator (dst += src*src).
+CV_EXPORTS_W void accumulateSquare( InputArray src, InputOutputArray dst,
+ InputArray mask = noArray() );
+//! adds product of the 2 images to the accumulator (dst += src1*src2).
+CV_EXPORTS_W void accumulateProduct( InputArray src1, InputArray src2,
+ InputOutputArray dst, InputArray mask=noArray() );
+
+//! updates the running average (dst = dst*(1-alpha) + src*alpha)
+CV_EXPORTS_W void accumulateWeighted( InputArray src, InputOutputArray dst,
+ double alpha, InputArray mask = noArray() );
+
+CV_EXPORTS_W Point2d phaseCorrelate(InputArray src1, InputArray src2,
+ InputArray window = noArray(), CV_OUT double* response = 0);
+
+CV_EXPORTS_W void createHanningWindow(OutputArray dst, Size winSize, int type);
+
+//! applies fixed threshold to the image
+CV_EXPORTS_W double threshold( InputArray src, OutputArray dst,
+ double thresh, double maxval, int type );
+
+
+//! applies variable (adaptive) threshold to the image
+CV_EXPORTS_W void adaptiveThreshold( InputArray src, OutputArray dst,
+ double maxValue, int adaptiveMethod,
+ int thresholdType, int blockSize, double C );
+
+//! smooths and downsamples the image
+CV_EXPORTS_W void pyrDown( InputArray src, OutputArray dst,
+ const Size& dstsize = Size(), int borderType = BORDER_DEFAULT );
+
+//! upsamples and smoothes the image
+CV_EXPORTS_W void pyrUp( InputArray src, OutputArray dst,
+ const Size& dstsize = Size(), int borderType = BORDER_DEFAULT );
+
+//! builds the gaussian pyramid using pyrDown() as a basic operation
+CV_EXPORTS void buildPyramid( InputArray src, OutputArrayOfArrays dst,
+ int maxlevel, int borderType = BORDER_DEFAULT );
+
+//! corrects lens distortion for the given camera matrix and distortion coefficients
+CV_EXPORTS_W void undistort( InputArray src, OutputArray dst,
+ InputArray cameraMatrix,
+ InputArray distCoeffs,
+ InputArray newCameraMatrix = noArray() );
+
+//! initializes maps for cv::remap() to correct lens distortion and optionally rectify the image
+CV_EXPORTS_W void initUndistortRectifyMap( InputArray cameraMatrix, InputArray distCoeffs,
+ InputArray R, InputArray newCameraMatrix,
+ Size size, int m1type, OutputArray map1, OutputArray map2 );
+
+//! initializes maps for cv::remap() for wide-angle
+CV_EXPORTS_W float initWideAngleProjMap( InputArray cameraMatrix, InputArray distCoeffs,
+ Size imageSize, int destImageWidth,
+ int m1type, OutputArray map1, OutputArray map2,
+ int projType = PROJ_SPHERICAL_EQRECT, double alpha = 0);
+
+//! returns the default new camera matrix (by default it is the same as cameraMatrix unless centerPricipalPoint=true)
+CV_EXPORTS_W Mat getDefaultNewCameraMatrix( InputArray cameraMatrix, Size imgsize = Size(),
+ bool centerPrincipalPoint = false );
+
+//! returns points' coordinates after lens distortion correction
+CV_EXPORTS_W void undistortPoints( InputArray src, OutputArray dst,
+ InputArray cameraMatrix, InputArray distCoeffs,
+ InputArray R = noArray(), InputArray P = noArray());
+
+//! computes the joint dense histogram for a set of images.
+CV_EXPORTS void calcHist( const Mat* images, int nimages,
+ const int* channels, InputArray mask,
+ OutputArray hist, int dims, const int* histSize,
+ const float** ranges, bool uniform = true, bool accumulate = false );
+
+//! computes the joint sparse histogram for a set of images.
+CV_EXPORTS void calcHist( const Mat* images, int nimages,
+ const int* channels, InputArray mask,
+ SparseMat& hist, int dims,
+ const int* histSize, const float** ranges,
+ bool uniform = true, bool accumulate = false );
+
+CV_EXPORTS_W void calcHist( InputArrayOfArrays images,
+ const std::vector<int>& channels,
+ InputArray mask, OutputArray hist,
+ const std::vector<int>& histSize,
+ const std::vector<float>& ranges,
+ bool accumulate = false );
+
+//! computes back projection for the set of images
+CV_EXPORTS void calcBackProject( const Mat* images, int nimages,
+ const int* channels, InputArray hist,
+ OutputArray backProject, const float** ranges,
+ double scale = 1, bool uniform = true );
+
+//! computes back projection for the set of images
+CV_EXPORTS void calcBackProject( const Mat* images, int nimages,
+ const int* channels, const SparseMat& hist,
+ OutputArray backProject, const float** ranges,
+ double scale = 1, bool uniform = true );
+
+CV_EXPORTS_W void calcBackProject( InputArrayOfArrays images, const std::vector<int>& channels,
+ InputArray hist, OutputArray dst,
+ const std::vector<float>& ranges,
+ double scale );
+
+//! compares two histograms stored in dense arrays
+CV_EXPORTS_W double compareHist( InputArray H1, InputArray H2, int method );
+
+//! compares two histograms stored in sparse arrays
+CV_EXPORTS double compareHist( const SparseMat& H1, const SparseMat& H2, int method );
+
+//! normalizes the grayscale image brightness and contrast by normalizing its histogram
+CV_EXPORTS_W void equalizeHist( InputArray src, OutputArray dst );
+
+CV_EXPORTS float EMD( InputArray signature1, InputArray signature2,
+ int distType, InputArray cost=noArray(),
+ float* lowerBound = 0, OutputArray flow = noArray() );
+
+//! segments the image using watershed algorithm
+CV_EXPORTS_W void watershed( InputArray image, InputOutputArray markers );
+
+//! filters image using meanshift algorithm
+CV_EXPORTS_W void pyrMeanShiftFiltering( InputArray src, OutputArray dst,
+ double sp, double sr, int maxLevel = 1,
+ TermCriteria termcrit=TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5,1) );
+
+//! segments the image using GrabCut algorithm
+CV_EXPORTS_W void grabCut( InputArray img, InputOutputArray mask, Rect rect,
+ InputOutputArray bgdModel, InputOutputArray fgdModel,
+ int iterCount, int mode = GC_EVAL );
+
+
+//! builds the discrete Voronoi diagram
+CV_EXPORTS_AS(distanceTransformWithLabels) void distanceTransform( InputArray src, OutputArray dst,
+ OutputArray labels, int distanceType, int maskSize,
+ int labelType = DIST_LABEL_CCOMP );
+
+//! computes the distance transform map
+CV_EXPORTS_W void distanceTransform( InputArray src, OutputArray dst,
+ int distanceType, int maskSize );
+
+
+//! fills the semi-uniform image region starting from the specified seed point
+CV_EXPORTS int floodFill( InputOutputArray image,
+ Point seedPoint, Scalar newVal, CV_OUT Rect* rect = 0,
+ Scalar loDiff = Scalar(), Scalar upDiff = Scalar(),
+ int flags = 4 );
+
+//! fills the semi-uniform image region and/or the mask starting from the specified seed point
+CV_EXPORTS_W int floodFill( InputOutputArray image, InputOutputArray mask,
+ Point seedPoint, Scalar newVal, CV_OUT Rect* rect=0,
+ Scalar loDiff = Scalar(), Scalar upDiff = Scalar(),
+ int flags = 4 );
+
+//! converts image from one color space to another
+CV_EXPORTS_W void cvtColor( InputArray src, OutputArray dst, int code, int dstCn = 0 );
+
+// main function for all demosaicing procceses
+CV_EXPORTS_W void demosaicing(InputArray _src, OutputArray _dst, int code, int dcn = 0);
+
+//! computes moments of the rasterized shape or a vector of points
+CV_EXPORTS_W Moments moments( InputArray array, bool binaryImage = false );
+
+//! computes 7 Hu invariants from the moments
+CV_EXPORTS void HuMoments( const Moments& moments, double hu[7] );
+
+CV_EXPORTS_W void HuMoments( const Moments& m, OutputArray hu );
+
+//! computes the proximity map for the raster template and the image where the template is searched for
+CV_EXPORTS_W void matchTemplate( InputArray image, InputArray templ,
+ OutputArray result, int method );
+
+
+// computes the connected components labeled image of boolean image ``image``
+// with 4 or 8 way connectivity - returns N, the total
+// number of labels [0, N-1] where 0 represents the background label.
+// ltype specifies the output label image type, an important
+// consideration based on the total number of labels or
+// alternatively the total number of pixels in the source image.
+CV_EXPORTS_W int connectedComponents(InputArray image, OutputArray labels,
+ int connectivity = 8, int ltype = CV_32S);
+
+CV_EXPORTS_W int connectedComponentsWithStats(InputArray image, OutputArray labels,
+ OutputArray stats, OutputArray centroids,
+ int connectivity = 8, int ltype = CV_32S);
+
+
+//! retrieves contours and the hierarchical information from black-n-white image.
+CV_EXPORTS_W void findContours( InputOutputArray image, OutputArrayOfArrays contours,
+ OutputArray hierarchy, int mode,
+ int method, Point offset = Point());
+
+//! retrieves contours from black-n-white image.
+CV_EXPORTS void findContours( InputOutputArray image, OutputArrayOfArrays contours,
+ int mode, int method, Point offset = Point());
+
+//! approximates contour or a curve using Douglas-Peucker algorithm
+CV_EXPORTS_W void approxPolyDP( InputArray curve,
+ OutputArray approxCurve,
+ double epsilon, bool closed );
+
+//! computes the contour perimeter (closed=true) or a curve length
+CV_EXPORTS_W double arcLength( InputArray curve, bool closed );
+
+//! computes the bounding rectangle for a contour
+CV_EXPORTS_W Rect boundingRect( InputArray points );
+
+//! computes the contour area
+CV_EXPORTS_W double contourArea( InputArray contour, bool oriented = false );
+
+//! computes the minimal rotated rectangle for a set of points
+CV_EXPORTS_W RotatedRect minAreaRect( InputArray points );
+
+//! computes boxpoints
+CV_EXPORTS_W void boxPoints(RotatedRect box, OutputArray points);
+
+//! computes the minimal enclosing circle for a set of points
+CV_EXPORTS_W void minEnclosingCircle( InputArray points,
+ CV_OUT Point2f& center, CV_OUT float& radius );
+
+//! computes the minimal enclosing triangle for a set of points and returns its area
+CV_EXPORTS_W double minEnclosingTriangle( InputArray points, CV_OUT OutputArray triangle );
+
+//! matches two contours using one of the available algorithms
+CV_EXPORTS_W double matchShapes( InputArray contour1, InputArray contour2,
+ int method, double parameter );
+
+//! computes convex hull for a set of 2D points.
+CV_EXPORTS_W void convexHull( InputArray points, OutputArray hull,
+ bool clockwise = false, bool returnPoints = true );
+
+//! computes the contour convexity defects
+CV_EXPORTS_W void convexityDefects( InputArray contour, InputArray convexhull, OutputArray convexityDefects );
+
+//! returns true if the contour is convex. Does not support contours with self-intersection
+CV_EXPORTS_W bool isContourConvex( InputArray contour );
+
+//! finds intersection of two convex polygons
+CV_EXPORTS_W float intersectConvexConvex( InputArray _p1, InputArray _p2,
+ OutputArray _p12, bool handleNested = true );
+
+//! fits ellipse to the set of 2D points
+CV_EXPORTS_W RotatedRect fitEllipse( InputArray points );
+
+//! fits line to the set of 2D points using M-estimator algorithm
+CV_EXPORTS_W void fitLine( InputArray points, OutputArray line, int distType,
+ double param, double reps, double aeps );
+
+//! checks if the point is inside the contour. Optionally computes the signed distance from the point to the contour boundary
+CV_EXPORTS_W double pointPolygonTest( InputArray contour, Point2f pt, bool measureDist );
+
+//! computes whether two rotated rectangles intersect and returns the vertices of the intersecting region
+CV_EXPORTS_W int rotatedRectangleIntersection( const RotatedRect& rect1, const RotatedRect& rect2, OutputArray intersectingRegion );
+
+CV_EXPORTS_W Ptr<CLAHE> createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8));
+
+//! Ballard, D.H. (1981). Generalizing the Hough transform to detect arbitrary shapes. Pattern Recognition 13 (2): 111-122.
+//! Detects position only without traslation and rotation
+CV_EXPORTS Ptr<GeneralizedHoughBallard> createGeneralizedHoughBallard();
+
+//! Guil, N., González-Linares, J.M. and Zapata, E.L. (1999). Bidimensional shape detection using an invariant approach. Pattern Recognition 32 (6): 1025-1038.
+//! Detects position, traslation and rotation
+CV_EXPORTS Ptr<GeneralizedHoughGuil> createGeneralizedHoughGuil();
+
+} // cv
+
+#endif
version_suffix = ''.join( (epoch, major, minor) )
self.classes[class_name].imports.add("java.lang.String")
self.java_code[class_name]["j_code"].write("""
- public static final String VERSION = "%(v)s", NATIVE_LIBRARY_NAME = "opencv_java%(vs)s";
- public static final int VERSION_EPOCH = %(ep)s;
- public static final int VERSION_MAJOR = %(ma)s;
- public static final int VERSION_MINOR = %(mi)s;
- public static final int VERSION_REVISION = %(re)s;
- public static final String VERSION_STATUS = "%(st)s";
+ // these constants are wrapped inside functions to prevent inlining
+ private static String getVersion() { return "%(v)s"; }
+ private static String getNativeLibraryName() { return "opencv_java%(vs)s"; }
+ private static int getVersionEpoch() { return %(ep)s; }
+ private static int getVersionMajor() { return %(ma)s; }
+ private static int getVersionMinor() { return %(mi)s; }
+ private static int getVersionRevision() { return %(re)s; }
++ private static String getVersionStatus() { return "%(st)s"; }
+
+ public static final String VERSION = getVersion();
+ public static final String NATIVE_LIBRARY_NAME = getNativeLibraryName();
+ public static final int VERSION_EPOCH = getVersionEpoch();
+ public static final int VERSION_MAJOR = getVersionMajor();
+ public static final int VERSION_MINOR = getVersionMinor();
+ public static final int VERSION_REVISION = getVersionRevision();
-""" % { 'v' : version_str, 'vs' : version_suffix, 'ep' : epoch, 'ma' : major, 'mi' : minor, 're' : revision } )
++ public static final String VERSION_STATUS = getVersionStatus();
+""" % { 'v' : version_str, 'vs' : version_suffix, 'ep' : epoch, 'ma' : major, 'mi' : minor, 're' : revision, 'st': status } )
def add_class(self, decl):
--- /dev/null
-
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2010-2012, Institute Of Software Chinese Academy Of Science, all rights reserved.
+// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
+// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other oclMaterials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the Intel Corporation or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_OCL_HPP__
+#define __OPENCV_OCL_HPP__
+
+#include <memory>
+#include <vector>
+
+#include "opencv2/core.hpp"
+#include "opencv2/imgproc.hpp"
+#include "opencv2/objdetect.hpp"
+#include "opencv2/ml.hpp"
+
+namespace cv
+{
+ namespace ocl
+ {
+ enum DeviceType
+ {
+ CVCL_DEVICE_TYPE_DEFAULT = (1 << 0),
+ CVCL_DEVICE_TYPE_CPU = (1 << 1),
+ CVCL_DEVICE_TYPE_GPU = (1 << 2),
+ CVCL_DEVICE_TYPE_ACCELERATOR = (1 << 3),
+ //CVCL_DEVICE_TYPE_CUSTOM = (1 << 4)
+ CVCL_DEVICE_TYPE_ALL = 0xFFFFFFFF
+ };
+
+ enum DevMemRW
+ {
+ DEVICE_MEM_R_W = 0,
+ DEVICE_MEM_R_ONLY,
+ DEVICE_MEM_W_ONLY
+ };
+
+ enum DevMemType
+ {
+ DEVICE_MEM_DEFAULT = 0,
+ DEVICE_MEM_AHP, //alloc host pointer
+ DEVICE_MEM_UHP, //use host pointer
+ DEVICE_MEM_CHP, //copy host pointer
+ DEVICE_MEM_PM //persistent memory
+ };
+
+ // these classes contain OpenCL runtime information
+
+ struct PlatformInfo;
+
+ struct DeviceInfo
+ {
+ public:
+ int _id; // reserved, don't use it
+
+ DeviceType deviceType;
+ std::string deviceProfile;
+ std::string deviceVersion;
+ std::string deviceName;
+ std::string deviceVendor;
+ int deviceVendorId;
+ std::string deviceDriverVersion;
+ std::string deviceExtensions;
+
+ size_t maxWorkGroupSize;
+ std::vector<size_t> maxWorkItemSizes;
+ int maxComputeUnits;
+ size_t localMemorySize;
+ size_t maxMemAllocSize;
+
+ int deviceVersionMajor;
+ int deviceVersionMinor;
+
+ bool haveDoubleSupport;
+ bool isUnifiedMemory; // 1 means integrated GPU, otherwise this value is 0
+
+ std::string compilationExtraOptions;
+
+ const PlatformInfo* platform;
+
+ DeviceInfo();
+ };
+
+ struct PlatformInfo
+ {
+ int _id; // reserved, don't use it
+
+ std::string platformProfile;
+ std::string platformVersion;
+ std::string platformName;
+ std::string platformVendor;
+ std::string platformExtensons;
+
+ int platformVersionMajor;
+ int platformVersionMinor;
+
+ std::vector<const DeviceInfo*> devices;
+
+ PlatformInfo();
+ };
+
+ //////////////////////////////// Initialization & Info ////////////////////////
+ typedef std::vector<const PlatformInfo*> PlatformsInfo;
+
+ CV_EXPORTS int getOpenCLPlatforms(PlatformsInfo& platforms);
+
+ typedef std::vector<const DeviceInfo*> DevicesInfo;
+
+ CV_EXPORTS int getOpenCLDevices(DevicesInfo& devices, int deviceType = CVCL_DEVICE_TYPE_GPU,
+ const PlatformInfo* platform = NULL);
+
+ // set device you want to use
+ CV_EXPORTS void setDevice(const DeviceInfo* info);
+
+ enum FEATURE_TYPE
+ {
+ FEATURE_CL_DOUBLE = 1,
+ FEATURE_CL_UNIFIED_MEM,
+ FEATURE_CL_VER_1_2
+ };
+
+ // Represents OpenCL context, interface
+ class CV_EXPORTS Context
+ {
+ protected:
+ Context() { }
+ ~Context() { }
+ public:
+ static Context *getContext();
+
+ bool supportsFeature(FEATURE_TYPE featureType) const;
+ const DeviceInfo& getDeviceInfo() const;
+
+ const void* getOpenCLContextPtr() const;
+ const void* getOpenCLCommandQueuePtr() const;
+ const void* getOpenCLDeviceIDPtr() const;
+ };
+
+ inline const void *getClContextPtr()
+ {
+ return Context::getContext()->getOpenCLContextPtr();
+ }
+
+ inline const void *getClCommandQueuePtr()
+ {
+ return Context::getContext()->getOpenCLCommandQueuePtr();
+ }
+
+ CV_EXPORTS bool supportsFeature(FEATURE_TYPE featureType);
+
+ CV_EXPORTS void finish();
+
+ enum BINARY_CACHE_MODE
+ {
+ CACHE_NONE = 0, // do not cache OpenCL binary
+ CACHE_DEBUG = 0x1 << 0, // cache OpenCL binary when built in debug mode
+ CACHE_RELEASE = 0x1 << 1, // default behavior, only cache when built in release mode
+ CACHE_ALL = CACHE_DEBUG | CACHE_RELEASE, // cache opencl binary
+ };
+ //! Enable or disable OpenCL program binary caching onto local disk
+ // After a program (*.cl files in opencl/ folder) is built at runtime, we allow the
+ // compiled OpenCL program to be cached to the path automatically as "path/*.clb"
+ // binary file, which will be reused when the OpenCV executable is started again.
+ //
+ // This feature is enabled by default.
+ CV_EXPORTS void setBinaryDiskCache(int mode = CACHE_RELEASE, cv::String path = "./");
+
+ //! set where binary cache to be saved to
+ CV_EXPORTS void setBinaryPath(const char *path);
+
+ struct ProgramSource
+ {
+ const char* name;
+ const char* programStr;
+ const char* programHash;
+
+ // Cache in memory by name (should be unique). Caching on disk disabled.
+ inline ProgramSource(const char* _name, const char* _programStr)
+ : name(_name), programStr(_programStr), programHash(NULL)
+ {
+ }
+
+ // Cache in memory by name (should be unique). Caching on disk uses programHash mark.
+ inline ProgramSource(const char* _name, const char* _programStr, const char* _programHash)
+ : name(_name), programStr(_programStr), programHash(_programHash)
+ {
+ }
+ };
+
+ //! Calls OpenCL kernel. Pass globalThreads = NULL, and cleanUp = true, to finally clean-up without executing.
+ //! Deprecated, will be replaced
+ CV_EXPORTS void openCLExecuteKernelInterop(Context *clCxt,
+ const cv::ocl::ProgramSource& source, String kernelName,
+ size_t globalThreads[3], size_t localThreads[3],
+ std::vector< std::pair<size_t, const void *> > &args,
+ int channels, int depth, const char *build_options);
+
+ class CV_EXPORTS oclMatExpr;
+ //////////////////////////////// oclMat ////////////////////////////////
+ class CV_EXPORTS oclMat
+ {
+ public:
+ //! default constructor
+ oclMat();
+ //! constructs oclMatrix of the specified size and type (_type is CV_8UC1, CV_64FC3, CV_32SC(12) etc.)
+ oclMat(int rows, int cols, int type);
+ oclMat(Size size, int type);
+ //! constucts oclMatrix and fills it with the specified value _s.
+ oclMat(int rows, int cols, int type, const Scalar &s);
+ oclMat(Size size, int type, const Scalar &s);
+ //! copy constructor
+ oclMat(const oclMat &m);
+
+ //! constructor for oclMatrix headers pointing to user-allocated data
+ oclMat(int rows, int cols, int type, void *data, size_t step = Mat::AUTO_STEP);
+ oclMat(Size size, int type, void *data, size_t step = Mat::AUTO_STEP);
+
+ //! creates a matrix header for a part of the bigger matrix
+ oclMat(const oclMat &m, const Range &rowRange, const Range &colRange);
+ oclMat(const oclMat &m, const Rect &roi);
+
+ //! builds oclMat from Mat. Perfom blocking upload to device.
+ explicit oclMat (const Mat &m);
+
+ //! destructor - calls release()
+ ~oclMat();
+
+ //! assignment operators
+ oclMat &operator = (const oclMat &m);
+ //! assignment operator. Perfom blocking upload to device.
+ oclMat &operator = (const Mat &m);
+ oclMat &operator = (const oclMatExpr& expr);
+
+ //! pefroms blocking upload data to oclMat.
+ void upload(const cv::Mat &m);
+
+
+ //! downloads data from device to host memory. Blocking calls.
+ operator Mat() const;
+ void download(cv::Mat &m) const;
+
+ //! convert to _InputArray
+ operator _InputArray();
+
+ //! convert to _OutputArray
+ operator _OutputArray();
+
+ //! returns a new oclMatrix header for the specified row
+ oclMat row(int y) const;
+ //! returns a new oclMatrix header for the specified column
+ oclMat col(int x) const;
+ //! ... for the specified row span
+ oclMat rowRange(int startrow, int endrow) const;
+ oclMat rowRange(const Range &r) const;
+ //! ... for the specified column span
+ oclMat colRange(int startcol, int endcol) const;
+ oclMat colRange(const Range &r) const;
+
+ //! returns deep copy of the oclMatrix, i.e. the data is copied
+ oclMat clone() const;
+
+ //! copies those oclMatrix elements to "m" that are marked with non-zero mask elements.
+ // It calls m.create(this->size(), this->type()).
+ // It supports any data type
+ void copyTo( oclMat &m, const oclMat &mask = oclMat()) const;
+
+ //! converts oclMatrix to another datatype with optional scalng. See cvConvertScale.
+ //It supports 8UC1 8UC4 32SC1 32SC4 32FC1 32FC4
+ void convertTo( oclMat &m, int rtype, double alpha = 1, double beta = 0 ) const;
+
+ void assignTo( oclMat &m, int type = -1 ) const;
+
+ //! sets every oclMatrix element to s
+ //It supports 8UC1 8UC4 32SC1 32SC4 32FC1 32FC4
+ oclMat& operator = (const Scalar &s);
+ //! sets some of the oclMatrix elements to s, according to the mask
+ //It supports 8UC1 8UC4 32SC1 32SC4 32FC1 32FC4
+ oclMat& setTo(const Scalar &s, const oclMat &mask = oclMat());
+ //! creates alternative oclMatrix header for the same data, with different
+ // number of channels and/or different number of rows. see cvReshape.
+ oclMat reshape(int cn, int rows = 0) const;
+
+ //! allocates new oclMatrix data unless the oclMatrix already has specified size and type.
+ // previous data is unreferenced if needed.
+ void create(int rows, int cols, int type);
+ void create(Size size, int type);
+
+ //! allocates new oclMatrix with specified device memory type.
+ void createEx(int rows, int cols, int type,
+ DevMemRW rw_type, DevMemType mem_type);
+ void createEx(Size size, int type, DevMemRW rw_type,
+ DevMemType mem_type);
+
+ //! decreases reference counter;
+ // deallocate the data when reference counter reaches 0.
+ void release();
+
+ //! swaps with other smart pointer
+ void swap(oclMat &mat);
+
+ //! locates oclMatrix header within a parent oclMatrix. See below
+ void locateROI( Size &wholeSize, Point &ofs ) const;
+ //! moves/resizes the current oclMatrix ROI inside the parent oclMatrix.
+ oclMat& adjustROI( int dtop, int dbottom, int dleft, int dright );
+ //! extracts a rectangular sub-oclMatrix
+ // (this is a generalized form of row, rowRange etc.)
+ oclMat operator()( Range rowRange, Range colRange ) const;
+ oclMat operator()( const Rect &roi ) const;
+
+ oclMat& operator+=( const oclMat& m );
+ oclMat& operator-=( const oclMat& m );
+ oclMat& operator*=( const oclMat& m );
+ oclMat& operator/=( const oclMat& m );
+
+ //! returns true if the oclMatrix data is continuous
+ // (i.e. when there are no gaps between successive rows).
+ // similar to CV_IS_oclMat_CONT(cvoclMat->type)
+ bool isContinuous() const;
+ //! returns element size in bytes,
+ // similar to CV_ELEM_SIZE(cvMat->type)
+ size_t elemSize() const;
+ //! returns the size of element channel in bytes.
+ size_t elemSize1() const;
+ //! returns element type, similar to CV_MAT_TYPE(cvMat->type)
+ int type() const;
+ //! returns element type, i.e. 8UC3 returns 8UC4 because in ocl
+ //! 3 channels element actually use 4 channel space
+ int ocltype() const;
+ //! returns element type, similar to CV_MAT_DEPTH(cvMat->type)
+ int depth() const;
+ //! returns element type, similar to CV_MAT_CN(cvMat->type)
+ int channels() const;
+ //! returns element type, return 4 for 3 channels element,
+ //!becuase 3 channels element actually use 4 channel space
+ int oclchannels() const;
+ //! returns step/elemSize1()
+ size_t step1() const;
+ //! returns oclMatrix size:
+ // width == number of columns, height == number of rows
+ Size size() const;
+ //! returns true if oclMatrix data is NULL
+ bool empty() const;
+
+ //! returns pointer to y-th row
+ uchar* ptr(int y = 0);
+ const uchar *ptr(int y = 0) const;
+
+ //! template version of the above method
+ template<typename _Tp> _Tp *ptr(int y = 0);
+ template<typename _Tp> const _Tp *ptr(int y = 0) const;
+
+ //! matrix transposition
+ oclMat t() const;
+
+ /*! includes several bit-fields:
+ - the magic signature
+ - continuity flag
+ - depth
+ - number of channels
+ */
+ int flags;
+ //! the number of rows and columns
+ int rows, cols;
+ //! a distance between successive rows in bytes; includes the gap if any
+ size_t step;
+ //! pointer to the data(OCL memory object)
+ uchar *data;
+
+ //! pointer to the reference counter;
+ // when oclMatrix points to user-allocated data, the pointer is NULL
+ int *refcount;
+
+ //! helper fields used in locateROI and adjustROI
+ //datastart and dataend are not used in current version
+ uchar *datastart;
+ uchar *dataend;
+
+ //! OpenCL context associated with the oclMat object.
+ Context *clCxt; // TODO clCtx
+ //add offset for handle ROI, calculated in byte
+ int offset;
+ //add wholerows and wholecols for the whole matrix, datastart and dataend are no longer used
+ int wholerows;
+ int wholecols;
+ };
+
+ // convert InputArray/OutputArray to oclMat references
+ CV_EXPORTS oclMat& getOclMatRef(InputArray src);
+ CV_EXPORTS oclMat& getOclMatRef(OutputArray src);
+
+ ///////////////////// mat split and merge /////////////////////////////////
+ //! Compose a multi-channel array from several single-channel arrays
+ // Support all types
+ CV_EXPORTS void merge(const oclMat *src, size_t n, oclMat &dst);
+ CV_EXPORTS void merge(const std::vector<oclMat> &src, oclMat &dst);
+
+ //! Divides multi-channel array into several single-channel arrays
+ // Support all types
+ CV_EXPORTS void split(const oclMat &src, oclMat *dst);
+ CV_EXPORTS void split(const oclMat &src, std::vector<oclMat> &dst);
+
+ ////////////////////////////// Arithmetics ///////////////////////////////////
+
+ //! adds one matrix to another with scale (dst = src1 * alpha + src2 * beta + gama)
+ // supports all data types
+ CV_EXPORTS void addWeighted(const oclMat &src1, double alpha, const oclMat &src2, double beta, double gama, oclMat &dst);
+
+ //! adds one matrix to another (dst = src1 + src2)
+ // supports all data types
+ CV_EXPORTS void add(const oclMat &src1, const oclMat &src2, oclMat &dst, const oclMat &mask = oclMat());
+ //! adds scalar to a matrix (dst = src1 + s)
+ // supports all data types
+ CV_EXPORTS void add(const oclMat &src1, const Scalar &s, oclMat &dst, const oclMat &mask = oclMat());
+
+ //! subtracts one matrix from another (dst = src1 - src2)
+ // supports all data types
+ CV_EXPORTS void subtract(const oclMat &src1, const oclMat &src2, oclMat &dst, const oclMat &mask = oclMat());
+ //! subtracts scalar from a matrix (dst = src1 - s)
+ // supports all data types
+ CV_EXPORTS void subtract(const oclMat &src1, const Scalar &s, oclMat &dst, const oclMat &mask = oclMat());
+
+ //! computes element-wise product of the two arrays (dst = src1 * scale * src2)
+ // supports all data types
+ CV_EXPORTS void multiply(const oclMat &src1, const oclMat &src2, oclMat &dst, double scale = 1);
+ //! multiplies matrix to a number (dst = scalar * src)
+ // supports all data types
+ CV_EXPORTS void multiply(double scalar, const oclMat &src, oclMat &dst);
+
+ //! computes element-wise quotient of the two arrays (dst = src1 * scale / src2)
+ // supports all data types
+ CV_EXPORTS void divide(const oclMat &src1, const oclMat &src2, oclMat &dst, double scale = 1);
+ //! computes element-wise quotient of the two arrays (dst = scale / src)
+ // supports all data types
+ CV_EXPORTS void divide(double scale, const oclMat &src1, oclMat &dst);
+
+ //! computes element-wise minimum of the two arrays (dst = min(src1, src2))
+ // supports all data types
+ CV_EXPORTS void min(const oclMat &src1, const oclMat &src2, oclMat &dst);
+
+ //! computes element-wise maximum of the two arrays (dst = max(src1, src2))
+ // supports all data types
+ CV_EXPORTS void max(const oclMat &src1, const oclMat &src2, oclMat &dst);
+
+ //! compares elements of two arrays (dst = src1 <cmpop> src2)
+ // supports all data types
+ CV_EXPORTS void compare(const oclMat &src1, const oclMat &src2, oclMat &dst, int cmpop);
+
+ //! transposes the matrix
+ // supports all data types
+ CV_EXPORTS void transpose(const oclMat &src, oclMat &dst);
+
+ //! computes element-wise absolute values of an array (dst = abs(src))
+ // supports all data types
+ CV_EXPORTS void abs(const oclMat &src, oclMat &dst);
+
+ //! computes element-wise absolute difference of two arrays (dst = abs(src1 - src2))
+ // supports all data types
+ CV_EXPORTS void absdiff(const oclMat &src1, const oclMat &src2, oclMat &dst);
+ //! computes element-wise absolute difference of array and scalar (dst = abs(src1 - s))
+ // supports all data types
+ CV_EXPORTS void absdiff(const oclMat &src1, const Scalar &s, oclMat &dst);
+
+ //! computes mean value and standard deviation of all or selected array elements
+ // supports all data types
+ CV_EXPORTS void meanStdDev(const oclMat &mtx, Scalar &mean, Scalar &stddev);
+
+ //! computes norm of array
+ // supports NORM_INF, NORM_L1, NORM_L2
+ // supports all data types
+ CV_EXPORTS double norm(const oclMat &src1, int normType = NORM_L2);
+
+ //! computes norm of the difference between two arrays
+ // supports NORM_INF, NORM_L1, NORM_L2
+ // supports all data types
+ CV_EXPORTS double norm(const oclMat &src1, const oclMat &src2, int normType = NORM_L2);
+
+ //! reverses the order of the rows, columns or both in a matrix
+ // supports all types
+ CV_EXPORTS void flip(const oclMat &src, oclMat &dst, int flipCode);
+
+ //! computes sum of array elements
+ // support all types
+ CV_EXPORTS Scalar sum(const oclMat &m);
+ CV_EXPORTS Scalar absSum(const oclMat &m);
+ CV_EXPORTS Scalar sqrSum(const oclMat &m);
+
+ //! finds global minimum and maximum array elements and returns their values
+ // support all C1 types
+ CV_EXPORTS void minMax(const oclMat &src, double *minVal, double *maxVal = 0, const oclMat &mask = oclMat());
+
+ //! finds global minimum and maximum array elements and returns their values with locations
+ // support all C1 types
+ CV_EXPORTS void minMaxLoc(const oclMat &src, double *minVal, double *maxVal = 0, Point *minLoc = 0, Point *maxLoc = 0,
+ const oclMat &mask = oclMat());
+
+ //! counts non-zero array elements
+ // support all types
+ CV_EXPORTS int countNonZero(const oclMat &src);
+
+ //! transforms 8-bit unsigned integers using lookup table: dst(i)=lut(src(i))
+ // destination array will have the depth type as lut and the same channels number as source
+ //It supports 8UC1 8UC4 only
+ CV_EXPORTS void LUT(const oclMat &src, const oclMat &lut, oclMat &dst);
+
+ //! only 8UC1 and 256 bins is supported now
+ CV_EXPORTS void calcHist(const oclMat &mat_src, oclMat &mat_hist);
+ //! only 8UC1 and 256 bins is supported now
+ CV_EXPORTS void equalizeHist(const oclMat &mat_src, oclMat &mat_dst);
+
+ //! only 8UC1 is supported now
+ CV_EXPORTS Ptr<cv::CLAHE> createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8));
+
+ //! bilateralFilter
+ // supports 8UC1 8UC4
+ CV_EXPORTS void bilateralFilter(const oclMat& src, oclMat& dst, int d, double sigmaColor, double sigmaSpace, int borderType=BORDER_DEFAULT);
+
+ //! Applies an adaptive bilateral filter to the input image
+ // This is not truly a bilateral filter. Instead of using user provided fixed parameters,
+ // the function calculates a constant at each window based on local standard deviation,
+ // and use this constant to do filtering.
+ // supports 8UC1, 8UC3
+ CV_EXPORTS void adaptiveBilateralFilter(const oclMat& src, oclMat& dst, Size ksize, double sigmaSpace, Point anchor = Point(-1, -1), int borderType=BORDER_DEFAULT);
+
+ //! computes exponent of each matrix element (dst = e**src)
+ // supports only CV_32FC1, CV_64FC1 type
+ CV_EXPORTS void exp(const oclMat &src, oclMat &dst);
+
+ //! computes natural logarithm of absolute value of each matrix element: dst = log(abs(src))
+ // supports only CV_32FC1, CV_64FC1 type
+ CV_EXPORTS void log(const oclMat &src, oclMat &dst);
+
+ //! computes magnitude of each (x(i), y(i)) vector
+ // supports only CV_32F, CV_64F type
+ CV_EXPORTS void magnitude(const oclMat &x, const oclMat &y, oclMat &magnitude);
+
+ //! computes angle (angle(i)) of each (x(i), y(i)) vector
+ // supports only CV_32F, CV_64F type
+ CV_EXPORTS void phase(const oclMat &x, const oclMat &y, oclMat &angle, bool angleInDegrees = false);
+
+ //! the function raises every element of tne input array to p
+ // support only CV_32F, CV_64F type
+ CV_EXPORTS void pow(const oclMat &x, double p, oclMat &y);
+
+ //! converts Cartesian coordinates to polar
+ // supports only CV_32F CV_64F type
+ CV_EXPORTS void cartToPolar(const oclMat &x, const oclMat &y, oclMat &magnitude, oclMat &angle, bool angleInDegrees = false);
+
+ //! converts polar coordinates to Cartesian
+ // supports only CV_32F CV_64F type
+ CV_EXPORTS void polarToCart(const oclMat &magnitude, const oclMat &angle, oclMat &x, oclMat &y, bool angleInDegrees = false);
+
+ //! perfroms per-elements bit-wise inversion
+ // supports all types
+ CV_EXPORTS void bitwise_not(const oclMat &src, oclMat &dst);
+
+ //! calculates per-element bit-wise disjunction of two arrays
+ // supports all types
+ CV_EXPORTS void bitwise_or(const oclMat &src1, const oclMat &src2, oclMat &dst, const oclMat &mask = oclMat());
+ CV_EXPORTS void bitwise_or(const oclMat &src1, const Scalar &s, oclMat &dst, const oclMat &mask = oclMat());
+
+ //! calculates per-element bit-wise conjunction of two arrays
+ // supports all types
+ CV_EXPORTS void bitwise_and(const oclMat &src1, const oclMat &src2, oclMat &dst, const oclMat &mask = oclMat());
+ CV_EXPORTS void bitwise_and(const oclMat &src1, const Scalar &s, oclMat &dst, const oclMat &mask = oclMat());
+
+ //! calculates per-element bit-wise "exclusive or" operation
+ // supports all types
+ CV_EXPORTS void bitwise_xor(const oclMat &src1, const oclMat &src2, oclMat &dst, const oclMat &mask = oclMat());
+ CV_EXPORTS void bitwise_xor(const oclMat &src1, const Scalar &s, oclMat &dst, const oclMat &mask = oclMat());
+
+ //! Logical operators
+ CV_EXPORTS oclMat operator ~ (const oclMat &);
+ CV_EXPORTS oclMat operator | (const oclMat &, const oclMat &);
+ CV_EXPORTS oclMat operator & (const oclMat &, const oclMat &);
+ CV_EXPORTS oclMat operator ^ (const oclMat &, const oclMat &);
+
+
+ //! Mathematics operators
+ CV_EXPORTS oclMatExpr operator + (const oclMat &src1, const oclMat &src2);
+ CV_EXPORTS oclMatExpr operator - (const oclMat &src1, const oclMat &src2);
+ CV_EXPORTS oclMatExpr operator * (const oclMat &src1, const oclMat &src2);
+ CV_EXPORTS oclMatExpr operator / (const oclMat &src1, const oclMat &src2);
+
+ struct CV_EXPORTS ConvolveBuf
+ {
+ Size result_size;
+ Size block_size;
+ Size user_block_size;
+ Size dft_size;
+
+ oclMat image_spect, templ_spect, result_spect;
+ oclMat image_block, templ_block, result_data;
+
+ void create(Size image_size, Size templ_size);
+ static Size estimateBlockSize(Size result_size, Size templ_size);
+ };
+
+ //! computes convolution of two images, may use discrete Fourier transform
+ // support only CV_32FC1 type
+ CV_EXPORTS void convolve(const oclMat &image, const oclMat &temp1, oclMat &result, bool ccorr = false);
+ CV_EXPORTS void convolve(const oclMat &image, const oclMat &temp1, oclMat &result, bool ccorr, ConvolveBuf& buf);
+
+ //! Performs a per-element multiplication of two Fourier spectrums.
+ //! Only full (not packed) CV_32FC2 complex spectrums in the interleaved format are supported for now.
+ //! support only CV_32FC2 type
+ CV_EXPORTS void mulSpectrums(const oclMat &a, const oclMat &b, oclMat &c, int flags, float scale, bool conjB = false);
+
+ CV_EXPORTS void cvtColor(const oclMat &src, oclMat &dst, int code, int dcn = 0);
+
+ //! initializes a scaled identity matrix
+ CV_EXPORTS void setIdentity(oclMat& src, const Scalar & val = Scalar(1));
+
+ //////////////////////////////// Filter Engine ////////////////////////////////
+
+ /*!
+ The Base Class for 1D or Row-wise Filters
+
+ This is the base class for linear or non-linear filters that process 1D data.
+ In particular, such filters are used for the "horizontal" filtering parts in separable filters.
+ */
+ class CV_EXPORTS BaseRowFilter_GPU
+ {
+ public:
+ BaseRowFilter_GPU(int ksize_, int anchor_, int bordertype_) : ksize(ksize_), anchor(anchor_), bordertype(bordertype_) {}
+ virtual ~BaseRowFilter_GPU() {}
+ virtual void operator()(const oclMat &src, oclMat &dst) = 0;
+ int ksize, anchor, bordertype;
+ };
+
+ /*!
+ The Base Class for Column-wise Filters
+
+ This is the base class for linear or non-linear filters that process columns of 2D arrays.
+ Such filters are used for the "vertical" filtering parts in separable filters.
+ */
+ class CV_EXPORTS BaseColumnFilter_GPU
+ {
+ public:
+ BaseColumnFilter_GPU(int ksize_, int anchor_, int bordertype_) : ksize(ksize_), anchor(anchor_), bordertype(bordertype_) {}
+ virtual ~BaseColumnFilter_GPU() {}
+ virtual void operator()(const oclMat &src, oclMat &dst) = 0;
+ int ksize, anchor, bordertype;
+ };
+
+ /*!
+ The Base Class for Non-Separable 2D Filters.
+
+ This is the base class for linear or non-linear 2D filters.
+ */
+ class CV_EXPORTS BaseFilter_GPU
+ {
+ public:
+ BaseFilter_GPU(const Size &ksize_, const Point &anchor_, const int &borderType_)
+ : ksize(ksize_), anchor(anchor_), borderType(borderType_) {}
+ virtual ~BaseFilter_GPU() {}
+ virtual void operator()(const oclMat &src, oclMat &dst) = 0;
+ Size ksize;
+ Point anchor;
+ int borderType;
+ };
+
+ /*!
+ The Base Class for Filter Engine.
+
+ The class can be used to apply an arbitrary filtering operation to an image.
+ It contains all the necessary intermediate buffers.
+ */
+ class CV_EXPORTS FilterEngine_GPU
+ {
+ public:
+ virtual ~FilterEngine_GPU() {}
+
+ virtual void apply(const oclMat &src, oclMat &dst, Rect roi = Rect(0, 0, -1, -1)) = 0;
+ };
+
+ //! returns the non-separable filter engine with the specified filter
+ CV_EXPORTS Ptr<FilterEngine_GPU> createFilter2D_GPU(const Ptr<BaseFilter_GPU> filter2D);
+
+ //! returns the primitive row filter with the specified kernel
+ CV_EXPORTS Ptr<BaseRowFilter_GPU> getLinearRowFilter_GPU(int srcType, int bufType, const Mat &rowKernel,
+ int anchor = -1, int bordertype = BORDER_DEFAULT);
+
+ //! returns the primitive column filter with the specified kernel
+ CV_EXPORTS Ptr<BaseColumnFilter_GPU> getLinearColumnFilter_GPU(int bufType, int dstType, const Mat &columnKernel,
+ int anchor = -1, int bordertype = BORDER_DEFAULT, double delta = 0.0);
+
+ //! returns the separable linear filter engine
+ CV_EXPORTS Ptr<FilterEngine_GPU> createSeparableLinearFilter_GPU(int srcType, int dstType, const Mat &rowKernel,
+ const Mat &columnKernel, const Point &anchor = Point(-1, -1), double delta = 0.0, int bordertype = BORDER_DEFAULT);
+
+ //! returns the separable filter engine with the specified filters
+ CV_EXPORTS Ptr<FilterEngine_GPU> createSeparableFilter_GPU(const Ptr<BaseRowFilter_GPU> &rowFilter,
+ const Ptr<BaseColumnFilter_GPU> &columnFilter);
+
+ //! returns the Gaussian filter engine
+ CV_EXPORTS Ptr<FilterEngine_GPU> createGaussianFilter_GPU(int type, Size ksize, double sigma1, double sigma2 = 0, int bordertype = BORDER_DEFAULT);
+
+ //! returns filter engine for the generalized Sobel operator
+ CV_EXPORTS Ptr<FilterEngine_GPU> createDerivFilter_GPU( int srcType, int dstType, int dx, int dy, int ksize, int borderType = BORDER_DEFAULT );
+
+ //! applies Laplacian operator to the image
+ // supports only ksize = 1 and ksize = 3 8UC1 8UC4 32FC1 32FC4 data type
+ CV_EXPORTS void Laplacian(const oclMat &src, oclMat &dst, int ddepth, int ksize = 1, double scale = 1);
+
+ //! returns 2D box filter
+ // supports CV_8UC1 and CV_8UC4 source type, dst type must be the same as source type
+ CV_EXPORTS Ptr<BaseFilter_GPU> getBoxFilter_GPU(int srcType, int dstType,
+ const Size &ksize, Point anchor = Point(-1, -1), int borderType = BORDER_DEFAULT);
+
+ //! returns box filter engine
+ CV_EXPORTS Ptr<FilterEngine_GPU> createBoxFilter_GPU(int srcType, int dstType, const Size &ksize,
+ const Point &anchor = Point(-1, -1), int borderType = BORDER_DEFAULT);
+
+ //! returns 2D filter with the specified kernel
+ // supports CV_8UC1 and CV_8UC4 types
+ CV_EXPORTS Ptr<BaseFilter_GPU> getLinearFilter_GPU(int srcType, int dstType, const Mat &kernel, const Size &ksize,
+ const Point &anchor = Point(-1, -1), int borderType = BORDER_DEFAULT);
+
+ //! returns the non-separable linear filter engine
+ CV_EXPORTS Ptr<FilterEngine_GPU> createLinearFilter_GPU(int srcType, int dstType, const Mat &kernel,
+ const Point &anchor = Point(-1, -1), int borderType = BORDER_DEFAULT);
+
+ //! smooths the image using the normalized box filter
+ // supports data type: CV_8UC1, CV_8UC4, CV_32FC1 and CV_32FC4
+ // supports border type: BORDER_CONSTANT, BORDER_REPLICATE, BORDER_REFLECT,BORDER_REFLECT_101,BORDER_WRAP
+ CV_EXPORTS void boxFilter(const oclMat &src, oclMat &dst, int ddepth, Size ksize,
+ Point anchor = Point(-1, -1), int borderType = BORDER_DEFAULT);
+
+ //! returns 2D morphological filter
+ //! only MORPH_ERODE and MORPH_DILATE are supported
+ // supports CV_8UC1, CV_8UC4, CV_32FC1 and CV_32FC4 types
+ // kernel must have CV_8UC1 type, one rows and cols == ksize.width * ksize.height
+ CV_EXPORTS Ptr<BaseFilter_GPU> getMorphologyFilter_GPU(int op, int type, const Mat &kernel, const Size &ksize,
+ Point anchor = Point(-1, -1));
+
+ //! returns morphological filter engine. Only MORPH_ERODE and MORPH_DILATE are supported.
+ CV_EXPORTS Ptr<FilterEngine_GPU> createMorphologyFilter_GPU(int op, int type, const Mat &kernel,
+ const Point &anchor = Point(-1, -1), int iterations = 1);
+
+ //! a synonym for normalized box filter
+ // supports data type: CV_8UC1, CV_8UC4, CV_32FC1 and CV_32FC4
+ // supports border type: BORDER_CONSTANT, BORDER_REPLICATE, BORDER_REFLECT,BORDER_REFLECT_101
+ static inline void blur(const oclMat &src, oclMat &dst, Size ksize, Point anchor = Point(-1, -1),
+ int borderType = BORDER_CONSTANT)
+ {
+ boxFilter(src, dst, -1, ksize, anchor, borderType);
+ }
+
+ //! applies non-separable 2D linear filter to the image
+ // Note, at the moment this function only works when anchor point is in the kernel center
+ // and kernel size supported is either 3x3 or 5x5; otherwise the function will fail to output valid result
+ CV_EXPORTS void filter2D(const oclMat &src, oclMat &dst, int ddepth, const Mat &kernel,
+ Point anchor = Point(-1, -1), int borderType = BORDER_DEFAULT);
+
+ //! applies separable 2D linear filter to the image
+ CV_EXPORTS void sepFilter2D(const oclMat &src, oclMat &dst, int ddepth, const Mat &kernelX, const Mat &kernelY,
+ Point anchor = Point(-1, -1), double delta = 0.0, int bordertype = BORDER_DEFAULT);
+
+ //! applies generalized Sobel operator to the image
+ // dst.type must equalize src.type
+ // supports data type: CV_8UC1, CV_8UC4, CV_32FC1 and CV_32FC4
+ // supports border type: BORDER_CONSTANT, BORDER_REPLICATE, BORDER_REFLECT,BORDER_REFLECT_101
+ CV_EXPORTS void Sobel(const oclMat &src, oclMat &dst, int ddepth, int dx, int dy, int ksize = 3, double scale = 1, double delta = 0.0, int bordertype = BORDER_DEFAULT);
+
+ //! applies the vertical or horizontal Scharr operator to the image
+ // dst.type must equalize src.type
+ // supports data type: CV_8UC1, CV_8UC4, CV_32FC1 and CV_32FC4
+ // supports border type: BORDER_CONSTANT, BORDER_REPLICATE, BORDER_REFLECT,BORDER_REFLECT_101
+ CV_EXPORTS void Scharr(const oclMat &src, oclMat &dst, int ddepth, int dx, int dy, double scale = 1, double delta = 0.0, int bordertype = BORDER_DEFAULT);
+
+ //! smooths the image using Gaussian filter.
+ // dst.type must equalize src.type
+ // supports data type: CV_8UC1, CV_8UC4, CV_32FC1 and CV_32FC4
+ // supports border type: BORDER_CONSTANT, BORDER_REPLICATE, BORDER_REFLECT,BORDER_REFLECT_101
+ CV_EXPORTS void GaussianBlur(const oclMat &src, oclMat &dst, Size ksize, double sigma1, double sigma2 = 0, int bordertype = BORDER_DEFAULT);
+
+ //! erodes the image (applies the local minimum operator)
+ // supports data type: CV_8UC1, CV_8UC4, CV_32FC1 and CV_32FC4
+ CV_EXPORTS void erode( const oclMat &src, oclMat &dst, const Mat &kernel, Point anchor = Point(-1, -1), int iterations = 1,
+
+ int borderType = BORDER_CONSTANT, const Scalar &borderValue = morphologyDefaultBorderValue());
+
+
+ //! dilates the image (applies the local maximum operator)
+ // supports data type: CV_8UC1, CV_8UC4, CV_32FC1 and CV_32FC4
+ CV_EXPORTS void dilate( const oclMat &src, oclMat &dst, const Mat &kernel, Point anchor = Point(-1, -1), int iterations = 1,
+
+ int borderType = BORDER_CONSTANT, const Scalar &borderValue = morphologyDefaultBorderValue());
+
+
+ //! applies an advanced morphological operation to the image
+ CV_EXPORTS void morphologyEx( const oclMat &src, oclMat &dst, int op, const Mat &kernel, Point anchor = Point(-1, -1), int iterations = 1,
+
+ int borderType = BORDER_CONSTANT, const Scalar &borderValue = morphologyDefaultBorderValue());
+
+
+ ////////////////////////////// Image processing //////////////////////////////
+ //! Does mean shift filtering on GPU.
+ CV_EXPORTS void meanShiftFiltering(const oclMat &src, oclMat &dst, int sp, int sr,
+ TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1));
+
+ //! Does mean shift procedure on GPU.
+ CV_EXPORTS void meanShiftProc(const oclMat &src, oclMat &dstr, oclMat &dstsp, int sp, int sr,
+ TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1));
+
+ //! Does mean shift segmentation with elimiation of small regions.
+ CV_EXPORTS void meanShiftSegmentation(const oclMat &src, Mat &dst, int sp, int sr, int minsize,
+ TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1));
+
+ //! applies fixed threshold to the image.
+ // supports CV_8UC1 and CV_32FC1 data type
+ // supports threshold type: THRESH_BINARY, THRESH_BINARY_INV, THRESH_TRUNC, THRESH_TOZERO, THRESH_TOZERO_INV
+ CV_EXPORTS double threshold(const oclMat &src, oclMat &dst, double thresh, double maxVal, int type = THRESH_TRUNC);
+
+ //! resizes the image
+ // Supports INTER_NEAREST, INTER_LINEAR
+ // supports CV_8UC1, CV_8UC4, CV_32FC1 and CV_32FC4 types
+ CV_EXPORTS void resize(const oclMat &src, oclMat &dst, Size dsize, double fx = 0, double fy = 0, int interpolation = INTER_LINEAR);
+
+ //! Applies a generic geometrical transformation to an image.
+
+ // Supports INTER_NEAREST, INTER_LINEAR.
-
+ // Map1 supports CV_16SC2, CV_32FC2 types.
-
+ // Src supports CV_8UC1, CV_8UC2, CV_8UC4.
- // The source 1- or 4-channel image. When m is 3 or 5, the image depth should be CV 8U or CV 32F.
+ CV_EXPORTS void remap(const oclMat &src, oclMat &dst, oclMat &map1, oclMat &map2, int interpolation, int bordertype, const Scalar &value = Scalar());
+
+ //! copies 2D array to a larger destination array and pads borders with user-specifiable constant
+ // supports CV_8UC1, CV_8UC4, CV_32SC1 types
+ CV_EXPORTS void copyMakeBorder(const oclMat &src, oclMat &dst, int top, int bottom, int left, int right, int boardtype, const Scalar &value = Scalar());
+
+ //! Smoothes image using median filter
++ // The source 1- or 4-channel image. m should be 3 or 5, the image depth should be CV_8U or CV_32F.
+ CV_EXPORTS void medianFilter(const oclMat &src, oclMat &dst, int m);
+
+ //! warps the image using affine transformation
+ // Supports INTER_NEAREST, INTER_LINEAR, INTER_CUBIC
+ // supports CV_8UC1, CV_8UC4, CV_32FC1 and CV_32FC4 types
+ CV_EXPORTS void warpAffine(const oclMat &src, oclMat &dst, const Mat &M, Size dsize, int flags = INTER_LINEAR);
+
+ //! warps the image using perspective transformation
+ // Supports INTER_NEAREST, INTER_LINEAR, INTER_CUBIC
+ // supports CV_8UC1, CV_8UC4, CV_32FC1 and CV_32FC4 types
+ CV_EXPORTS void warpPerspective(const oclMat &src, oclMat &dst, const Mat &M, Size dsize, int flags = INTER_LINEAR);
+
+ //! computes the integral image and integral for the squared image
+ // sum will have CV_32S type, sqsum - CV32F type
+ // supports only CV_8UC1 source type
+ CV_EXPORTS void integral(const oclMat &src, oclMat &sum, oclMat &sqsum);
+ CV_EXPORTS void integral(const oclMat &src, oclMat &sum);
+ CV_EXPORTS void cornerHarris(const oclMat &src, oclMat &dst, int blockSize, int ksize, double k, int bordertype = cv::BORDER_DEFAULT);
+ CV_EXPORTS void cornerHarris_dxdy(const oclMat &src, oclMat &dst, oclMat &Dx, oclMat &Dy,
+ int blockSize, int ksize, double k, int bordertype = cv::BORDER_DEFAULT);
+ CV_EXPORTS void cornerMinEigenVal(const oclMat &src, oclMat &dst, int blockSize, int ksize, int bordertype = cv::BORDER_DEFAULT);
+ CV_EXPORTS void cornerMinEigenVal_dxdy(const oclMat &src, oclMat &dst, oclMat &Dx, oclMat &Dy,
+ int blockSize, int ksize, int bordertype = cv::BORDER_DEFAULT);
+
+
+ /////////////////////////////////// ML ///////////////////////////////////////////
+
+ //! Compute closest centers for each lines in source and lable it after center's index
+ // supports CV_32FC1/CV_32FC2/CV_32FC4 data type
+ CV_EXPORTS void distanceToCenters(oclMat &dists, oclMat &labels, const oclMat &src, const oclMat ¢ers);
+
+ //!Does k-means procedure on GPU
+ // supports CV_32FC1/CV_32FC2/CV_32FC4 data type
+ CV_EXPORTS double kmeans(const oclMat &src, int K, oclMat &bestLabels,
+ TermCriteria criteria, int attemps, int flags, oclMat ¢ers);
+
+
+ ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ ///////////////////////////////////////////CascadeClassifier//////////////////////////////////////////////////////////////////
+ ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ class CV_EXPORTS OclCascadeClassifier : public cv::CascadeClassifier
+ {
+ public:
+ void detectMultiScale(oclMat &image, CV_OUT std::vector<cv::Rect>& faces,
+ double scaleFactor = 1.1, int minNeighbors = 3, int flags = 0,
+ Size minSize = Size(), Size maxSize = Size());
+ };
+
+ /////////////////////////////// Pyramid /////////////////////////////////////
+ CV_EXPORTS void pyrDown(const oclMat &src, oclMat &dst);
+
+ //! upsamples the source image and then smoothes it
+ CV_EXPORTS void pyrUp(const oclMat &src, oclMat &dst);
+
+ //! performs linear blending of two images
+ //! to avoid accuracy errors sum of weigths shouldn't be very close to zero
+ // supports only CV_8UC1 source type
+ CV_EXPORTS void blendLinear(const oclMat &img1, const oclMat &img2, const oclMat &weights1, const oclMat &weights2, oclMat &result);
+
+ //! computes vertical sum, supports only CV_32FC1 images
+ CV_EXPORTS void columnSum(const oclMat &src, oclMat &sum);
+
+ ///////////////////////////////////////// match_template /////////////////////////////////////////////////////////////
+ struct CV_EXPORTS MatchTemplateBuf
+ {
+ Size user_block_size;
+ oclMat imagef, templf;
+ std::vector<oclMat> images;
+ std::vector<oclMat> image_sums;
+ std::vector<oclMat> image_sqsums;
+ };
+
+ //! computes the proximity map for the raster template and the image where the template is searched for
+ // Supports TM_SQDIFF, TM_SQDIFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_CCOEFF, TM_CCOEFF_NORMED for type 8UC1 and 8UC4
+ // Supports TM_SQDIFF, TM_CCORR for type 32FC1 and 32FC4
+ CV_EXPORTS void matchTemplate(const oclMat &image, const oclMat &templ, oclMat &result, int method);
+
+ //! computes the proximity map for the raster template and the image where the template is searched for
+ // Supports TM_SQDIFF, TM_SQDIFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_CCOEFF, TM_CCOEFF_NORMED for type 8UC1 and 8UC4
+ // Supports TM_SQDIFF, TM_CCORR for type 32FC1 and 32FC4
+ CV_EXPORTS void matchTemplate(const oclMat &image, const oclMat &templ, oclMat &result, int method, MatchTemplateBuf &buf);
+
+
+
+ ///////////////////////////////////////////// Canny /////////////////////////////////////////////
+ struct CV_EXPORTS CannyBuf;
+
+ //! compute edges of the input image using Canny operator
+ // Support CV_8UC1 only
+ CV_EXPORTS void Canny(const oclMat &image, oclMat &edges, double low_thresh, double high_thresh, int apperture_size = 3, bool L2gradient = false);
+ CV_EXPORTS void Canny(const oclMat &image, CannyBuf &buf, oclMat &edges, double low_thresh, double high_thresh, int apperture_size = 3, bool L2gradient = false);
+ CV_EXPORTS void Canny(const oclMat &dx, const oclMat &dy, oclMat &edges, double low_thresh, double high_thresh, bool L2gradient = false);
+ CV_EXPORTS void Canny(const oclMat &dx, const oclMat &dy, CannyBuf &buf, oclMat &edges, double low_thresh, double high_thresh, bool L2gradient = false);
+
+ struct CV_EXPORTS CannyBuf
+ {
+ CannyBuf() : counter(NULL) {}
+ ~CannyBuf()
+ {
+ release();
+ }
+ explicit CannyBuf(const Size &image_size, int apperture_size = 3) : counter(NULL)
+ {
+ create(image_size, apperture_size);
+ }
+ CannyBuf(const oclMat &dx_, const oclMat &dy_);
+ void create(const Size &image_size, int apperture_size = 3);
+ void release();
+
+ oclMat dx, dy;
+ oclMat dx_buf, dy_buf;
+ oclMat magBuf, mapBuf;
+ oclMat trackBuf1, trackBuf2;
+ void *counter;
+ Ptr<FilterEngine_GPU> filterDX, filterDY;
+ };
+
+ ///////////////////////////////////////// Hough Transform /////////////////////////////////////////
+ //! HoughCircles
+ struct HoughCirclesBuf
+ {
+ oclMat edges;
+ oclMat accum;
+ oclMat srcPoints;
+ oclMat centers;
+ CannyBuf cannyBuf;
+ };
+
+ CV_EXPORTS void HoughCircles(const oclMat& src, oclMat& circles, int method, float dp, float minDist, int cannyThreshold, int votesThreshold, int minRadius, int maxRadius, int maxCircles = 4096);
+ CV_EXPORTS void HoughCircles(const oclMat& src, oclMat& circles, HoughCirclesBuf& buf, int method, float dp, float minDist, int cannyThreshold, int votesThreshold, int minRadius, int maxRadius, int maxCircles = 4096);
+ CV_EXPORTS void HoughCirclesDownload(const oclMat& d_circles, OutputArray h_circles);
+
+
+ ///////////////////////////////////////// clAmdFft related /////////////////////////////////////////
+ //! Performs a forward or inverse discrete Fourier transform (1D or 2D) of floating point matrix.
+ //! Param dft_size is the size of DFT transform.
+ //!
+ //! For complex-to-real transform it is assumed that the source matrix is packed in CLFFT's format.
+ // support src type of CV32FC1, CV32FC2
+ // support flags: DFT_INVERSE, DFT_REAL_OUTPUT, DFT_COMPLEX_OUTPUT, DFT_ROWS
+ // dft_size is the size of original input, which is used for transformation from complex to real.
+ // dft_size must be powers of 2, 3 and 5
+ // real to complex dft requires at least v1.8 clAmdFft
+ // real to complex dft output is not the same with cpu version
+ // real to complex and complex to real does not support DFT_ROWS
+ CV_EXPORTS void dft(const oclMat &src, oclMat &dst, Size dft_size = Size(), int flags = 0);
+
+ //! implements generalized matrix product algorithm GEMM from BLAS
+ // The functionality requires clAmdBlas library
+ // only support type CV_32FC1
+ // flag GEMM_3_T is not supported
+ CV_EXPORTS void gemm(const oclMat &src1, const oclMat &src2, double alpha,
+ const oclMat &src3, double beta, oclMat &dst, int flags = 0);
+
+ //////////////// HOG (Histogram-of-Oriented-Gradients) Descriptor and Object Detector //////////////
+
+ struct CV_EXPORTS HOGDescriptor
+
+ {
+
+ enum { DEFAULT_WIN_SIGMA = -1 };
+
+ enum { DEFAULT_NLEVELS = 64 };
+
+ enum { DESCR_FORMAT_ROW_BY_ROW, DESCR_FORMAT_COL_BY_COL };
+
+
+
+ HOGDescriptor(Size win_size = Size(64, 128), Size block_size = Size(16, 16),
+
+ Size block_stride = Size(8, 8), Size cell_size = Size(8, 8),
+
+ int nbins = 9, double win_sigma = DEFAULT_WIN_SIGMA,
+
+ double threshold_L2hys = 0.2, bool gamma_correction = true,
+
+ int nlevels = DEFAULT_NLEVELS);
+
+
+
+ size_t getDescriptorSize() const;
+
+ size_t getBlockHistogramSize() const;
+
+
+
+ void setSVMDetector(const std::vector<float> &detector);
+
+
+
+ static std::vector<float> getDefaultPeopleDetector();
+
+ static std::vector<float> getPeopleDetector48x96();
+
+ static std::vector<float> getPeopleDetector64x128();
+
+
+
+ void detect(const oclMat &img, std::vector<Point> &found_locations,
+
+ double hit_threshold = 0, Size win_stride = Size(),
+
+ Size padding = Size());
+
+
+
+ void detectMultiScale(const oclMat &img, std::vector<Rect> &found_locations,
+
+ double hit_threshold = 0, Size win_stride = Size(),
+
+ Size padding = Size(), double scale0 = 1.05,
+
+ int group_threshold = 2);
+
+
+
+ void getDescriptors(const oclMat &img, Size win_stride,
+
+ oclMat &descriptors,
+
+ int descr_format = DESCR_FORMAT_COL_BY_COL);
+
+
+
+ Size win_size;
+
+ Size block_size;
+
+ Size block_stride;
+
+ Size cell_size;
+
+ int nbins;
+
+ double win_sigma;
+
+ double threshold_L2hys;
+
+ bool gamma_correction;
+
+ int nlevels;
+
+
+
+ protected:
+
+ // initialize buffers; only need to do once in case of multiscale detection
+
+ void init_buffer(const oclMat &img, Size win_stride);
+
+
+
+ void computeBlockHistograms(const oclMat &img);
+
+ void computeGradient(const oclMat &img, oclMat &grad, oclMat &qangle);
+
+
+
+ double getWinSigma() const;
+
+ bool checkDetectorSize() const;
+
+
+
+ static int numPartsWithin(int size, int part_size, int stride);
+
+ static Size numPartsWithin(Size size, Size part_size, Size stride);
+
+
+
+ // Coefficients of the separating plane
+
+ float free_coef;
+
+ oclMat detector;
+
+
+
+ // Results of the last classification step
+
+ oclMat labels;
+
+ Mat labels_host;
+
+
+
+ // Results of the last histogram evaluation step
+
+ oclMat block_hists;
+
+
+
+ // Gradients conputation results
+
+ oclMat grad, qangle;
+
+
+
+ // scaled image
+
+ oclMat image_scale;
+
+
+
+ // effect size of input image (might be different from original size after scaling)
+
+ Size effect_size;
+
+ };
+
+
+ ////////////////////////feature2d_ocl/////////////////
+ /****************************************************************************************\
+ * Distance *
+ \****************************************************************************************/
+ template<typename T>
+ struct CV_EXPORTS Accumulator
+ {
+ typedef T Type;
+ };
+ template<> struct Accumulator<unsigned char>
+ {
+ typedef float Type;
+ };
+ template<> struct Accumulator<unsigned short>
+ {
+ typedef float Type;
+ };
+ template<> struct Accumulator<char>
+ {
+ typedef float Type;
+ };
+ template<> struct Accumulator<short>
+ {
+ typedef float Type;
+ };
+
+ /*
+ * Manhattan distance (city block distance) functor
+ */
+ template<class T>
+ struct CV_EXPORTS L1
+ {
+ enum { normType = NORM_L1 };
+ typedef T ValueType;
+ typedef typename Accumulator<T>::Type ResultType;
+
+ ResultType operator()( const T *a, const T *b, int size ) const
+ {
+ return normL1<ValueType, ResultType>(a, b, size);
+ }
+ };
+
+ /*
+ * Euclidean distance functor
+ */
+ template<class T>
+ struct CV_EXPORTS L2
+ {
+ enum { normType = NORM_L2 };
+ typedef T ValueType;
+ typedef typename Accumulator<T>::Type ResultType;
+
+ ResultType operator()( const T *a, const T *b, int size ) const
+ {
+ return (ResultType)std::sqrt((double)normL2Sqr<ValueType, ResultType>(a, b, size));
+ }
+ };
+
+ /*
+ * Hamming distance functor - counts the bit differences between two strings - useful for the Brief descriptor
+ * bit count of A exclusive XOR'ed with B
+ */
+ struct CV_EXPORTS Hamming
+ {
+ enum { normType = NORM_HAMMING };
+ typedef unsigned char ValueType;
+ typedef int ResultType;
+
+ /** this will count the bits in a ^ b
+ */
+ ResultType operator()( const unsigned char *a, const unsigned char *b, int size ) const
+ {
+ return normHamming(a, b, size);
+ }
+ };
+
+ ////////////////////////////////// BruteForceMatcher //////////////////////////////////
+
+ class CV_EXPORTS BruteForceMatcher_OCL_base
+ {
+ public:
+ enum DistType {L1Dist = 0, L2Dist, HammingDist};
+ explicit BruteForceMatcher_OCL_base(DistType distType = L2Dist);
+
+ // Add descriptors to train descriptor collection
+ void add(const std::vector<oclMat> &descCollection);
+
+ // Get train descriptors collection
+ const std::vector<oclMat> &getTrainDescriptors() const;
+
+ // Clear train descriptors collection
+ void clear();
+
+ // Return true if there are not train descriptors in collection
+ bool empty() const;
+
+ // Return true if the matcher supports mask in match methods
+ bool isMaskSupported() const;
+
+ // Find one best match for each query descriptor
+ void matchSingle(const oclMat &query, const oclMat &train,
+ oclMat &trainIdx, oclMat &distance,
+ const oclMat &mask = oclMat());
+
+ // Download trainIdx and distance and convert it to CPU vector with DMatch
+ static void matchDownload(const oclMat &trainIdx, const oclMat &distance, std::vector<DMatch> &matches);
+ // Convert trainIdx and distance to vector with DMatch
+ static void matchConvert(const Mat &trainIdx, const Mat &distance, std::vector<DMatch> &matches);
+
+ // Find one best match for each query descriptor
+ void match(const oclMat &query, const oclMat &train, std::vector<DMatch> &matches, const oclMat &mask = oclMat());
+
+ // Make gpu collection of trains and masks in suitable format for matchCollection function
+ void makeGpuCollection(oclMat &trainCollection, oclMat &maskCollection, const std::vector<oclMat> &masks = std::vector<oclMat>());
+
+ // Find one best match from train collection for each query descriptor
+ void matchCollection(const oclMat &query, const oclMat &trainCollection,
+ oclMat &trainIdx, oclMat &imgIdx, oclMat &distance,
+ const oclMat &masks = oclMat());
+
+ // Download trainIdx, imgIdx and distance and convert it to vector with DMatch
+ static void matchDownload(const oclMat &trainIdx, const oclMat &imgIdx, const oclMat &distance, std::vector<DMatch> &matches);
+ // Convert trainIdx, imgIdx and distance to vector with DMatch
+ static void matchConvert(const Mat &trainIdx, const Mat &imgIdx, const Mat &distance, std::vector<DMatch> &matches);
+
+ // Find one best match from train collection for each query descriptor.
+ void match(const oclMat &query, std::vector<DMatch> &matches, const std::vector<oclMat> &masks = std::vector<oclMat>());
+
+ // Find k best matches for each query descriptor (in increasing order of distances)
+ void knnMatchSingle(const oclMat &query, const oclMat &train,
+ oclMat &trainIdx, oclMat &distance, oclMat &allDist, int k,
+ const oclMat &mask = oclMat());
+
+ // Download trainIdx and distance and convert it to vector with DMatch
+ // compactResult is used when mask is not empty. If compactResult is false matches
+ // vector will have the same size as queryDescriptors rows. If compactResult is true
+ // matches vector will not contain matches for fully masked out query descriptors.
+ static void knnMatchDownload(const oclMat &trainIdx, const oclMat &distance,
+ std::vector< std::vector<DMatch> > &matches, bool compactResult = false);
+ // Convert trainIdx and distance to vector with DMatch
+ static void knnMatchConvert(const Mat &trainIdx, const Mat &distance,
+ std::vector< std::vector<DMatch> > &matches, bool compactResult = false);
+
+ // Find k best matches for each query descriptor (in increasing order of distances).
+ // compactResult is used when mask is not empty. If compactResult is false matches
+ // vector will have the same size as queryDescriptors rows. If compactResult is true
+ // matches vector will not contain matches for fully masked out query descriptors.
+ void knnMatch(const oclMat &query, const oclMat &train,
+ std::vector< std::vector<DMatch> > &matches, int k, const oclMat &mask = oclMat(),
+ bool compactResult = false);
+
+ // Find k best matches from train collection for each query descriptor (in increasing order of distances)
+ void knnMatch2Collection(const oclMat &query, const oclMat &trainCollection,
+ oclMat &trainIdx, oclMat &imgIdx, oclMat &distance,
+ const oclMat &maskCollection = oclMat());
+
+ // Download trainIdx and distance and convert it to vector with DMatch
+ // compactResult is used when mask is not empty. If compactResult is false matches
+ // vector will have the same size as queryDescriptors rows. If compactResult is true
+ // matches vector will not contain matches for fully masked out query descriptors.
+ static void knnMatch2Download(const oclMat &trainIdx, const oclMat &imgIdx, const oclMat &distance,
+ std::vector< std::vector<DMatch> > &matches, bool compactResult = false);
+ // Convert trainIdx and distance to vector with DMatch
+ static void knnMatch2Convert(const Mat &trainIdx, const Mat &imgIdx, const Mat &distance,
+ std::vector< std::vector<DMatch> > &matches, bool compactResult = false);
+
+ // Find k best matches for each query descriptor (in increasing order of distances).
+ // compactResult is used when mask is not empty. If compactResult is false matches
+ // vector will have the same size as queryDescriptors rows. If compactResult is true
+ // matches vector will not contain matches for fully masked out query descriptors.
+ void knnMatch(const oclMat &query, std::vector< std::vector<DMatch> > &matches, int k,
+ const std::vector<oclMat> &masks = std::vector<oclMat>(), bool compactResult = false);
+
+ // Find best matches for each query descriptor which have distance less than maxDistance.
+ // nMatches.at<int>(0, queryIdx) will contain matches count for queryIdx.
+ // carefully nMatches can be greater than trainIdx.cols - it means that matcher didn't find all matches,
+ // because it didn't have enough memory.
+ // If trainIdx is empty, then trainIdx and distance will be created with size nQuery x max((nTrain / 100), 10),
+ // otherwize user can pass own allocated trainIdx and distance with size nQuery x nMaxMatches
+ // Matches doesn't sorted.
+ void radiusMatchSingle(const oclMat &query, const oclMat &train,
+ oclMat &trainIdx, oclMat &distance, oclMat &nMatches, float maxDistance,
+ const oclMat &mask = oclMat());
+
+ // Download trainIdx, nMatches and distance and convert it to vector with DMatch.
+ // matches will be sorted in increasing order of distances.
+ // compactResult is used when mask is not empty. If compactResult is false matches
+ // vector will have the same size as queryDescriptors rows. If compactResult is true
+ // matches vector will not contain matches for fully masked out query descriptors.
+ static void radiusMatchDownload(const oclMat &trainIdx, const oclMat &distance, const oclMat &nMatches,
+ std::vector< std::vector<DMatch> > &matches, bool compactResult = false);
+ // Convert trainIdx, nMatches and distance to vector with DMatch.
+ static void radiusMatchConvert(const Mat &trainIdx, const Mat &distance, const Mat &nMatches,
+ std::vector< std::vector<DMatch> > &matches, bool compactResult = false);
+
+ // Find best matches for each query descriptor which have distance less than maxDistance
+ // in increasing order of distances).
+ void radiusMatch(const oclMat &query, const oclMat &train,
+ std::vector< std::vector<DMatch> > &matches, float maxDistance,
+ const oclMat &mask = oclMat(), bool compactResult = false);
+
+ // Find best matches for each query descriptor which have distance less than maxDistance.
+ // If trainIdx is empty, then trainIdx and distance will be created with size nQuery x max((nQuery / 100), 10),
+ // otherwize user can pass own allocated trainIdx and distance with size nQuery x nMaxMatches
+ // Matches doesn't sorted.
+ void radiusMatchCollection(const oclMat &query, oclMat &trainIdx, oclMat &imgIdx, oclMat &distance, oclMat &nMatches, float maxDistance,
+ const std::vector<oclMat> &masks = std::vector<oclMat>());
+
+ // Download trainIdx, imgIdx, nMatches and distance and convert it to vector with DMatch.
+ // matches will be sorted in increasing order of distances.
+ // compactResult is used when mask is not empty. If compactResult is false matches
+ // vector will have the same size as queryDescriptors rows. If compactResult is true
+ // matches vector will not contain matches for fully masked out query descriptors.
+ static void radiusMatchDownload(const oclMat &trainIdx, const oclMat &imgIdx, const oclMat &distance, const oclMat &nMatches,
+ std::vector< std::vector<DMatch> > &matches, bool compactResult = false);
+ // Convert trainIdx, nMatches and distance to vector with DMatch.
+ static void radiusMatchConvert(const Mat &trainIdx, const Mat &imgIdx, const Mat &distance, const Mat &nMatches,
+ std::vector< std::vector<DMatch> > &matches, bool compactResult = false);
+
+ // Find best matches from train collection for each query descriptor which have distance less than
+ // maxDistance (in increasing order of distances).
+ void radiusMatch(const oclMat &query, std::vector< std::vector<DMatch> > &matches, float maxDistance,
+ const std::vector<oclMat> &masks = std::vector<oclMat>(), bool compactResult = false);
+
+ DistType distType;
+
+ private:
+ std::vector<oclMat> trainDescCollection;
+ };
+
+ template <class Distance>
+ class CV_EXPORTS BruteForceMatcher_OCL;
+
+ template <typename T>
+ class CV_EXPORTS BruteForceMatcher_OCL< L1<T> > : public BruteForceMatcher_OCL_base
+ {
+ public:
+ explicit BruteForceMatcher_OCL() : BruteForceMatcher_OCL_base(L1Dist) {}
+ explicit BruteForceMatcher_OCL(L1<T> /*d*/) : BruteForceMatcher_OCL_base(L1Dist) {}
+ };
+ template <typename T>
+ class CV_EXPORTS BruteForceMatcher_OCL< L2<T> > : public BruteForceMatcher_OCL_base
+ {
+ public:
+ explicit BruteForceMatcher_OCL() : BruteForceMatcher_OCL_base(L2Dist) {}
+ explicit BruteForceMatcher_OCL(L2<T> /*d*/) : BruteForceMatcher_OCL_base(L2Dist) {}
+ };
+ template <> class CV_EXPORTS BruteForceMatcher_OCL< Hamming > : public BruteForceMatcher_OCL_base
+ {
+ public:
+ explicit BruteForceMatcher_OCL() : BruteForceMatcher_OCL_base(HammingDist) {}
+ explicit BruteForceMatcher_OCL(Hamming /*d*/) : BruteForceMatcher_OCL_base(HammingDist) {}
+ };
+
+ class CV_EXPORTS BFMatcher_OCL : public BruteForceMatcher_OCL_base
+ {
+ public:
+ explicit BFMatcher_OCL(int norm = NORM_L2) : BruteForceMatcher_OCL_base(norm == NORM_L1 ? L1Dist : norm == NORM_L2 ? L2Dist : HammingDist) {}
+ };
+
+ class CV_EXPORTS GoodFeaturesToTrackDetector_OCL
+ {
+ public:
+ explicit GoodFeaturesToTrackDetector_OCL(int maxCorners = 1000, double qualityLevel = 0.01, double minDistance = 0.0,
+ int blockSize = 3, bool useHarrisDetector = false, double harrisK = 0.04);
+
+ //! return 1 rows matrix with CV_32FC2 type
+ void operator ()(const oclMat& image, oclMat& corners, const oclMat& mask = oclMat());
+ //! download points of type Point2f to a vector. the vector's content will be erased
+ void downloadPoints(const oclMat &points, std::vector<Point2f> &points_v);
+
+ int maxCorners;
+ double qualityLevel;
+ double minDistance;
+
+ int blockSize;
+ bool useHarrisDetector;
+ double harrisK;
+ void releaseMemory()
+ {
+ Dx_.release();
+ Dy_.release();
+ eig_.release();
+ minMaxbuf_.release();
+ tmpCorners_.release();
+ }
+ private:
+ oclMat Dx_;
+ oclMat Dy_;
+ oclMat eig_;
+ oclMat minMaxbuf_;
+ oclMat tmpCorners_;
+ };
+
+ inline GoodFeaturesToTrackDetector_OCL::GoodFeaturesToTrackDetector_OCL(int maxCorners_, double qualityLevel_, double minDistance_,
+ int blockSize_, bool useHarrisDetector_, double harrisK_)
+ {
+ maxCorners = maxCorners_;
+ qualityLevel = qualityLevel_;
+ minDistance = minDistance_;
+ blockSize = blockSize_;
+ useHarrisDetector = useHarrisDetector_;
+ harrisK = harrisK_;
+ }
+
+ /////////////////////////////// PyrLKOpticalFlow /////////////////////////////////////
+
+ class CV_EXPORTS PyrLKOpticalFlow
+ {
+ public:
+ PyrLKOpticalFlow()
+ {
+ winSize = Size(21, 21);
+ maxLevel = 3;
+ iters = 30;
+ derivLambda = 0.5;
+ useInitialFlow = false;
+ minEigThreshold = 1e-4f;
+ getMinEigenVals = false;
+ isDeviceArch11_ = false;
+ }
+
+ void sparse(const oclMat &prevImg, const oclMat &nextImg, const oclMat &prevPts, oclMat &nextPts,
+ oclMat &status, oclMat *err = 0);
+
+ void dense(const oclMat &prevImg, const oclMat &nextImg, oclMat &u, oclMat &v, oclMat *err = 0);
+
+ Size winSize;
+ int maxLevel;
+ int iters;
+ double derivLambda;
+ bool useInitialFlow;
+ float minEigThreshold;
+ bool getMinEigenVals;
+
+ void releaseMemory()
+ {
+ dx_calcBuf_.release();
+ dy_calcBuf_.release();
+
+ prevPyr_.clear();
+ nextPyr_.clear();
+
+ dx_buf_.release();
+ dy_buf_.release();
+ }
+
+ private:
+ void calcSharrDeriv(const oclMat &src, oclMat &dx, oclMat &dy);
+
+ void buildImagePyramid(const oclMat &img0, std::vector<oclMat> &pyr, bool withBorder);
+
+ oclMat dx_calcBuf_;
+ oclMat dy_calcBuf_;
+
+ std::vector<oclMat> prevPyr_;
+ std::vector<oclMat> nextPyr_;
+
+ oclMat dx_buf_;
+ oclMat dy_buf_;
+
+ oclMat uPyr_[2];
+ oclMat vPyr_[2];
+
+ bool isDeviceArch11_;
+ };
+
+ class CV_EXPORTS FarnebackOpticalFlow
+ {
+ public:
+ FarnebackOpticalFlow();
+
+ int numLevels;
+ double pyrScale;
+ bool fastPyramids;
+ int winSize;
+ int numIters;
+ int polyN;
+ double polySigma;
+ int flags;
+
+ void operator ()(const oclMat &frame0, const oclMat &frame1, oclMat &flowx, oclMat &flowy);
+
+ void releaseMemory();
+
+ private:
+ void prepareGaussian(
+ int n, double sigma, float *g, float *xg, float *xxg,
+ double &ig11, double &ig03, double &ig33, double &ig55);
+
+ void setPolynomialExpansionConsts(int n, double sigma);
+
+ void updateFlow_boxFilter(
+ const oclMat& R0, const oclMat& R1, oclMat& flowx, oclMat &flowy,
+ oclMat& M, oclMat &bufM, int blockSize, bool updateMatrices);
+
+ void updateFlow_gaussianBlur(
+ const oclMat& R0, const oclMat& R1, oclMat& flowx, oclMat& flowy,
+ oclMat& M, oclMat &bufM, int blockSize, bool updateMatrices);
+
+ oclMat frames_[2];
+ oclMat pyrLevel_[2], M_, bufM_, R_[2], blurredFrame_[2];
+ std::vector<oclMat> pyramid0_, pyramid1_;
+ };
+
+ //////////////// build warping maps ////////////////////
+ //! builds plane warping maps
+ CV_EXPORTS void buildWarpPlaneMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat &R, const Mat &T, float scale, oclMat &map_x, oclMat &map_y);
+ //! builds cylindrical warping maps
+ CV_EXPORTS void buildWarpCylindricalMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat &R, float scale, oclMat &map_x, oclMat &map_y);
+ //! builds spherical warping maps
+ CV_EXPORTS void buildWarpSphericalMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat &R, float scale, oclMat &map_x, oclMat &map_y);
+ //! builds Affine warping maps
+ CV_EXPORTS void buildWarpAffineMaps(const Mat &M, bool inverse, Size dsize, oclMat &xmap, oclMat &ymap);
+
+ //! builds Perspective warping maps
+ CV_EXPORTS void buildWarpPerspectiveMaps(const Mat &M, bool inverse, Size dsize, oclMat &xmap, oclMat &ymap);
+
+ ///////////////////////////////////// interpolate frames //////////////////////////////////////////////
+ //! Interpolate frames (images) using provided optical flow (displacement field).
+ //! frame0 - frame 0 (32-bit floating point images, single channel)
+ //! frame1 - frame 1 (the same type and size)
+ //! fu - forward horizontal displacement
+ //! fv - forward vertical displacement
+ //! bu - backward horizontal displacement
+ //! bv - backward vertical displacement
+ //! pos - new frame position
+ //! newFrame - new frame
+ //! buf - temporary buffer, will have width x 6*height size, CV_32FC1 type and contain 6 oclMat;
+ //! occlusion masks 0, occlusion masks 1,
+ //! interpolated forward flow 0, interpolated forward flow 1,
+ //! interpolated backward flow 0, interpolated backward flow 1
+ //!
+ CV_EXPORTS void interpolateFrames(const oclMat &frame0, const oclMat &frame1,
+ const oclMat &fu, const oclMat &fv,
+ const oclMat &bu, const oclMat &bv,
+ float pos, oclMat &newFrame, oclMat &buf);
+
+ //! computes moments of the rasterized shape or a vector of points
+ CV_EXPORTS Moments ocl_moments(InputArray _array, bool binaryImage);
+
+ class CV_EXPORTS StereoBM_OCL
+ {
+ public:
+ enum { BASIC_PRESET = 0, PREFILTER_XSOBEL = 1 };
+
+ enum { DEFAULT_NDISP = 64, DEFAULT_WINSZ = 19 };
+
+ //! the default constructor
+ StereoBM_OCL();
+ //! the full constructor taking the camera-specific preset, number of disparities and the SAD window size. ndisparities must be multiple of 8.
+ StereoBM_OCL(int preset, int ndisparities = DEFAULT_NDISP, int winSize = DEFAULT_WINSZ);
+
+ //! the stereo correspondence operator. Finds the disparity for the specified rectified stereo pair
+ //! Output disparity has CV_8U type.
+ void operator() ( const oclMat &left, const oclMat &right, oclMat &disparity);
+
+ //! Some heuristics that tries to estmate
+ // if current GPU will be faster then CPU in this algorithm.
+ // It queries current active device.
+ static bool checkIfGpuCallReasonable();
+
+ int preset;
+ int ndisp;
+ int winSize;
+
+ // If avergeTexThreshold == 0 => post procesing is disabled
+ // If avergeTexThreshold != 0 then disparity is set 0 in each point (x,y) where for left image
+ // SumOfHorizontalGradiensInWindow(x, y, winSize) < (winSize * winSize) * avergeTexThreshold
+ // i.e. input left image is low textured.
+ float avergeTexThreshold;
+ private:
+ oclMat minSSD, leBuf, riBuf;
+ };
+
+ class CV_EXPORTS StereoBeliefPropagation
+ {
+ public:
+ enum { DEFAULT_NDISP = 64 };
+ enum { DEFAULT_ITERS = 5 };
+ enum { DEFAULT_LEVELS = 5 };
+ static void estimateRecommendedParams(int width, int height, int &ndisp, int &iters, int &levels);
+ explicit StereoBeliefPropagation(int ndisp = DEFAULT_NDISP,
+ int iters = DEFAULT_ITERS,
+ int levels = DEFAULT_LEVELS,
+ int msg_type = CV_16S);
+ StereoBeliefPropagation(int ndisp, int iters, int levels,
+ float max_data_term, float data_weight,
+ float max_disc_term, float disc_single_jump,
+ int msg_type = CV_32F);
+ void operator()(const oclMat &left, const oclMat &right, oclMat &disparity);
+ void operator()(const oclMat &data, oclMat &disparity);
+ int ndisp;
+ int iters;
+ int levels;
+ float max_data_term;
+ float data_weight;
+ float max_disc_term;
+ float disc_single_jump;
+ int msg_type;
+ private:
+ oclMat u, d, l, r, u2, d2, l2, r2;
+ std::vector<oclMat> datas;
+ oclMat out;
+ };
+
+ class CV_EXPORTS StereoConstantSpaceBP
+ {
+ public:
+ enum { DEFAULT_NDISP = 128 };
+ enum { DEFAULT_ITERS = 8 };
+ enum { DEFAULT_LEVELS = 4 };
+ enum { DEFAULT_NR_PLANE = 4 };
+ static void estimateRecommendedParams(int width, int height, int &ndisp, int &iters, int &levels, int &nr_plane);
+ explicit StereoConstantSpaceBP(
+ int ndisp = DEFAULT_NDISP,
+ int iters = DEFAULT_ITERS,
+ int levels = DEFAULT_LEVELS,
+ int nr_plane = DEFAULT_NR_PLANE,
+ int msg_type = CV_32F);
+ StereoConstantSpaceBP(int ndisp, int iters, int levels, int nr_plane,
+ float max_data_term, float data_weight, float max_disc_term, float disc_single_jump,
+ int min_disp_th = 0,
+ int msg_type = CV_32F);
+ void operator()(const oclMat &left, const oclMat &right, oclMat &disparity);
+ int ndisp;
+ int iters;
+ int levels;
+ int nr_plane;
+ float max_data_term;
+ float data_weight;
+ float max_disc_term;
+ float disc_single_jump;
+ int min_disp_th;
+ int msg_type;
+ bool use_local_init_data_cost;
+ private:
+ oclMat u[2], d[2], l[2], r[2];
+ oclMat disp_selected_pyr[2];
+ oclMat data_cost;
+ oclMat data_cost_selected;
+ oclMat temp;
+ oclMat out;
+ };
+
+ // Implementation of the Zach, Pock and Bischof Dual TV-L1 Optical Flow method
+ //
+ // see reference:
+ // [1] C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow".
+ // [2] Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation".
+ class CV_EXPORTS OpticalFlowDual_TVL1_OCL
+ {
+ public:
+ OpticalFlowDual_TVL1_OCL();
+
+ void operator ()(const oclMat& I0, const oclMat& I1, oclMat& flowx, oclMat& flowy);
+
+ void collectGarbage();
+
+ /**
+ * Time step of the numerical scheme.
+ */
+ double tau;
+
+ /**
+ * Weight parameter for the data term, attachment parameter.
+ * This is the most relevant parameter, which determines the smoothness of the output.
+ * The smaller this parameter is, the smoother the solutions we obtain.
+ * It depends on the range of motions of the images, so its value should be adapted to each image sequence.
+ */
+ double lambda;
+
+ /**
+ * Weight parameter for (u - v)^2, tightness parameter.
+ * It serves as a link between the attachment and the regularization terms.
+ * In theory, it should have a small value in order to maintain both parts in correspondence.
+ * The method is stable for a large range of values of this parameter.
+ */
+ double theta;
+
+ /**
+ * Number of scales used to create the pyramid of images.
+ */
+ int nscales;
+
+ /**
+ * Number of warpings per scale.
+ * Represents the number of times that I1(x+u0) and grad( I1(x+u0) ) are computed per scale.
+ * This is a parameter that assures the stability of the method.
+ * It also affects the running time, so it is a compromise between speed and accuracy.
+ */
+ int warps;
+
+ /**
+ * Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time.
+ * A small value will yield more accurate solutions at the expense of a slower convergence.
+ */
+ double epsilon;
+
+ /**
+ * Stopping criterion iterations number used in the numerical scheme.
+ */
+ int iterations;
+
+ bool useInitialFlow;
+
+ private:
+ void procOneScale(const oclMat& I0, const oclMat& I1, oclMat& u1, oclMat& u2);
+
+ std::vector<oclMat> I0s;
+ std::vector<oclMat> I1s;
+ std::vector<oclMat> u1s;
+ std::vector<oclMat> u2s;
+
+ oclMat I1x_buf;
+ oclMat I1y_buf;
+
+ oclMat I1w_buf;
+ oclMat I1wx_buf;
+ oclMat I1wy_buf;
+
+ oclMat grad_buf;
+ oclMat rho_c_buf;
+
+ oclMat p11_buf;
+ oclMat p12_buf;
+ oclMat p21_buf;
+ oclMat p22_buf;
+
+ oclMat diff_buf;
+ oclMat norm_buf;
+ };
+ // current supported sorting methods
+ enum
+ {
+ SORT_BITONIC, // only support power-of-2 buffer size
+ SORT_SELECTION, // cannot sort duplicate keys
+ SORT_MERGE,
+ SORT_RADIX // only support signed int/float keys(CV_32S/CV_32F)
+ };
+ //! Returns the sorted result of all the elements in input based on equivalent keys.
+ //
+ // The element unit in the values to be sorted is determined from the data type,
+ // i.e., a CV_32FC2 input {a1a2, b1b2} will be considered as two elements, regardless its
+ // matrix dimension.
+ // both keys and values will be sorted inplace
+ // Key needs to be single channel oclMat.
+ //
+ // Example:
+ // input -
+ // keys = {2, 3, 1} (CV_8UC1)
+ // values = {10,5, 4,3, 6,2} (CV_8UC2)
+ // sortByKey(keys, values, SORT_SELECTION, false);
+ // output -
+ // keys = {1, 2, 3} (CV_8UC1)
+ // values = {6,2, 10,5, 4,3} (CV_8UC2)
+ CV_EXPORTS void sortByKey(oclMat& keys, oclMat& values, int method, bool isGreaterThan = false);
+ /*!Base class for MOG and MOG2!*/
+ class CV_EXPORTS BackgroundSubtractor
+ {
+ public:
+ //! the virtual destructor
+ virtual ~BackgroundSubtractor();
+ //! the update operator that takes the next video frame and returns the current foreground mask as 8-bit binary image.
+ virtual void operator()(const oclMat& image, oclMat& fgmask, float learningRate);
+
+ //! computes a background image
+ virtual void getBackgroundImage(oclMat& backgroundImage) const = 0;
+ };
+ /*!
+ Gaussian Mixture-based Backbround/Foreground Segmentation Algorithm
+
+ The class implements the following algorithm:
+ "An improved adaptive background mixture model for real-time tracking with shadow detection"
+ P. KadewTraKuPong and R. Bowden,
+ Proc. 2nd European Workshp on Advanced Video-Based Surveillance Systems, 2001."
+ http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf
+ */
+ class CV_EXPORTS MOG: public cv::ocl::BackgroundSubtractor
+ {
+ public:
+ //! the default constructor
+ MOG(int nmixtures = -1);
+
+ //! re-initiaization method
+ void initialize(Size frameSize, int frameType);
+
+ //! the update operator
+ void operator()(const oclMat& frame, oclMat& fgmask, float learningRate = 0.f);
+
+ //! computes a background image which are the mean of all background gaussians
+ void getBackgroundImage(oclMat& backgroundImage) const;
+
+ //! releases all inner buffers
+ void release();
+
+ int history;
+ float varThreshold;
+ float backgroundRatio;
+ float noiseSigma;
+
+ private:
+ int nmixtures_;
+
+ Size frameSize_;
+ int frameType_;
+ int nframes_;
+
+ oclMat weight_;
+ oclMat sortKey_;
+ oclMat mean_;
+ oclMat var_;
+ };
+
+ /*!
+ The class implements the following algorithm:
+ "Improved adaptive Gausian mixture model for background subtraction"
+ Z.Zivkovic
+ International Conference Pattern Recognition, UK, August, 2004.
+ http://www.zoranz.net/Publications/zivkovic2004ICPR.pdf
+ */
+ class CV_EXPORTS MOG2: public cv::ocl::BackgroundSubtractor
+ {
+ public:
+ //! the default constructor
+ MOG2(int nmixtures = -1);
+
+ //! re-initiaization method
+ void initialize(Size frameSize, int frameType);
+
+ //! the update operator
+ void operator()(const oclMat& frame, oclMat& fgmask, float learningRate = -1.0f);
+
+ //! computes a background image which are the mean of all background gaussians
+ void getBackgroundImage(oclMat& backgroundImage) const;
+
+ //! releases all inner buffers
+ void release();
+
+ // parameters
+ // you should call initialize after parameters changes
+
+ int history;
+
+ //! here it is the maximum allowed number of mixture components.
+ //! Actual number is determined dynamically per pixel
+ float varThreshold;
+ // threshold on the squared Mahalanobis distance to decide if it is well described
+ // by the background model or not. Related to Cthr from the paper.
+ // This does not influence the update of the background. A typical value could be 4 sigma
+ // and that is varThreshold=4*4=16; Corresponds to Tb in the paper.
+
+ /////////////////////////
+ // less important parameters - things you might change but be carefull
+ ////////////////////////
+
+ float backgroundRatio;
+ // corresponds to fTB=1-cf from the paper
+ // TB - threshold when the component becomes significant enough to be included into
+ // the background model. It is the TB=1-cf from the paper. So I use cf=0.1 => TB=0.
+ // For alpha=0.001 it means that the mode should exist for approximately 105 frames before
+ // it is considered foreground
+ // float noiseSigma;
+ float varThresholdGen;
+
+ //correspondts to Tg - threshold on the squared Mahalan. dist. to decide
+ //when a sample is close to the existing components. If it is not close
+ //to any a new component will be generated. I use 3 sigma => Tg=3*3=9.
+ //Smaller Tg leads to more generated components and higher Tg might make
+ //lead to small number of components but they can grow too large
+ float fVarInit;
+ float fVarMin;
+ float fVarMax;
+
+ //initial variance for the newly generated components.
+ //It will will influence the speed of adaptation. A good guess should be made.
+ //A simple way is to estimate the typical standard deviation from the images.
+ //I used here 10 as a reasonable value
+ // min and max can be used to further control the variance
+ float fCT; //CT - complexity reduction prior
+ //this is related to the number of samples needed to accept that a component
+ //actually exists. We use CT=0.05 of all the samples. By setting CT=0 you get
+ //the standard Stauffer&Grimson algorithm (maybe not exact but very similar)
+
+ //shadow detection parameters
+ bool bShadowDetection; //default 1 - do shadow detection
+ unsigned char nShadowDetection; //do shadow detection - insert this value as the detection result - 127 default value
+ float fTau;
+ // Tau - shadow threshold. The shadow is detected if the pixel is darker
+ //version of the background. Tau is a threshold on how much darker the shadow can be.
+ //Tau= 0.5 means that if pixel is more than 2 times darker then it is not shadow
+ //See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003.
+
+ private:
+ int nmixtures_;
+
+ Size frameSize_;
+ int frameType_;
+ int nframes_;
+
+ oclMat weight_;
+ oclMat variance_;
+ oclMat mean_;
+
+ oclMat bgmodelUsedModes_; //keep track of number of modes per pixel
+ };
+
+ /*!***************Kalman Filter*************!*/
+ class CV_EXPORTS KalmanFilter
+ {
+ public:
+ KalmanFilter();
+ //! the full constructor taking the dimensionality of the state, of the measurement and of the control vector
+ KalmanFilter(int dynamParams, int measureParams, int controlParams=0, int type=CV_32F);
+ //! re-initializes Kalman filter. The previous content is destroyed.
+ void init(int dynamParams, int measureParams, int controlParams=0, int type=CV_32F);
+
+ const oclMat& predict(const oclMat& control=oclMat());
+ const oclMat& correct(const oclMat& measurement);
+
+ oclMat statePre; //!< predicted state (x'(k)): x(k)=A*x(k-1)+B*u(k)
+ oclMat statePost; //!< corrected state (x(k)): x(k)=x'(k)+K(k)*(z(k)-H*x'(k))
+ oclMat transitionMatrix; //!< state transition matrix (A)
+ oclMat controlMatrix; //!< control matrix (B) (not used if there is no control)
+ oclMat measurementMatrix; //!< measurement matrix (H)
+ oclMat processNoiseCov; //!< process noise covariance matrix (Q)
+ oclMat measurementNoiseCov;//!< measurement noise covariance matrix (R)
+ oclMat errorCovPre; //!< priori error estimate covariance matrix (P'(k)): P'(k)=A*P(k-1)*At + Q)*/
+ oclMat gain; //!< Kalman gain matrix (K(k)): K(k)=P'(k)*Ht*inv(H*P'(k)*Ht+R)
+ oclMat errorCovPost; //!< posteriori error estimate covariance matrix (P(k)): P(k)=(I-K(k)*H)*P'(k)
+ private:
+ oclMat temp1;
+ oclMat temp2;
+ oclMat temp3;
+ oclMat temp4;
+ oclMat temp5;
+ };
+
+ /*!***************K Nearest Neighbour*************!*/
+ class CV_EXPORTS KNearestNeighbour: public CvKNearest
+ {
+ public:
+ KNearestNeighbour();
+ ~KNearestNeighbour();
+
+ bool train(const Mat& trainData, Mat& labels, Mat& sampleIdx = Mat().setTo(Scalar::all(0)),
+ bool isRegression = false, int max_k = 32, bool updateBase = false);
+
+ void clear();
+
+ void find_nearest(const oclMat& samples, int k, oclMat& lables);
+
+ private:
+ oclMat samples_ocl;
+ };
+
+ /*!*************** SVM *************!*/
+ class CV_EXPORTS CvSVM_OCL : public CvSVM
+ {
+ public:
+ CvSVM_OCL();
+
+ CvSVM_OCL(const cv::Mat& trainData, const cv::Mat& responses,
+ const cv::Mat& varIdx=cv::Mat(), const cv::Mat& sampleIdx=cv::Mat(),
+ CvSVMParams params=CvSVMParams());
+ CV_WRAP float predict( const int row_index, Mat& src, bool returnDFVal=false ) const;
+ CV_WRAP void predict( cv::InputArray samples, cv::OutputArray results ) const;
+ CV_WRAP float predict( const cv::Mat& sample, bool returnDFVal=false ) const;
+ float predict( const CvMat* samples, CV_OUT CvMat* results ) const;
+
+ protected:
+ float predict( const int row_index, int row_len, Mat& src, bool returnDFVal=false ) const;
+ void create_kernel();
+ void create_solver();
+ };
+
+ /*!*************** END *************!*/
+ }
+}
+#if defined _MSC_VER && _MSC_VER >= 1200
+# pragma warning( push)
+# pragma warning( disable: 4267)
+#endif
+#include "opencv2/ocl/matrix_operations.hpp"
+#if defined _MSC_VER && _MSC_VER >= 1200
+# pragma warning( pop)
+#endif
+
+#endif /* __OPENCV_OCL_HPP__ */
void cv::ocl::columnSum(const oclMat &src, oclMat &dst)
{
CV_Assert(src.type() == CV_32FC1);
-
dst.create(src.size(), src.type());
- Context *clCxt = src.clCxt;
-
- const String kernelName = "columnSum";
+ int src_step = src.step / src.elemSize(), src_offset = src.offset / src.elemSize();
+ int dst_step = dst.step / dst.elemSize(), dst_offset = dst.offset / dst.elemSize();
- std::vector< pair<size_t, const void *> > args;
- args.push_back( make_pair( sizeof(cl_mem), (void *)&src.data));
- args.push_back( make_pair( sizeof(cl_mem), (void *)&dst.data));
- args.push_back( make_pair( sizeof(cl_int), (void *)&src.cols));
- args.push_back( make_pair( sizeof(cl_int), (void *)&src.rows));
- args.push_back( make_pair( sizeof(cl_int), (void *)&src_step));
- args.push_back( make_pair( sizeof(cl_int), (void *)&dst_step));
- args.push_back( make_pair( sizeof(cl_int), (void *)&src_offset));
- args.push_back( make_pair( sizeof(cl_int), (void *)&dst_offset));
+ std::vector< std::pair<size_t, const void *> > args;
-
+ args.push_back( std::make_pair( sizeof(cl_mem), (void *)&src.data));
+ args.push_back( std::make_pair( sizeof(cl_mem), (void *)&dst.data));
+ args.push_back( std::make_pair( sizeof(cl_int), (void *)&src.cols));
+ args.push_back( std::make_pair( sizeof(cl_int), (void *)&src.rows));
- args.push_back( std::make_pair( sizeof(cl_int), (void *)&src.step));
- args.push_back( std::make_pair( sizeof(cl_int), (void *)&dst.step));
++ args.push_back( std::make_pair( sizeof(cl_int), (void *)&src_step));
++ args.push_back( std::make_pair( sizeof(cl_int), (void *)&dst_step));
++ args.push_back( std::make_pair( sizeof(cl_int), (void *)&src_offset));
++ args.push_back( std::make_pair( sizeof(cl_int), (void *)&dst_offset));
size_t globalThreads[3] = {dst.cols, 1, 1};
size_t localThreads[3] = {256, 1, 1};
char compile_option[128];
sprintf(compile_option, "-D RADIUSX=%d -D RADIUSY=%d -D LSIZE0=%d -D LSIZE1=%d -D ERODE %s %s",
anchor.x, anchor.y, (int)localThreads[0], (int)localThreads[1],
- s, rectKernel?"-D RECTKERNEL":"");
-
- vector< pair<size_t, const void *> > args;
- args.push_back(make_pair(sizeof(cl_mem), (void *)&src.data));
- args.push_back(make_pair(sizeof(cl_mem), (void *)&dst.data));
- args.push_back(make_pair(sizeof(cl_int), (void *)&srcOffset_x));
- args.push_back(make_pair(sizeof(cl_int), (void *)&srcOffset_y));
- args.push_back(make_pair(sizeof(cl_int), (void *)&src.cols));
- args.push_back(make_pair(sizeof(cl_int), (void *)&src.rows));
- args.push_back(make_pair(sizeof(cl_int), (void *)&srcStep));
- args.push_back(make_pair(sizeof(cl_int), (void *)&dstStep));
- args.push_back(make_pair(sizeof(cl_mem), (void *)&mat_kernel.data));
- args.push_back(make_pair(sizeof(cl_int), (void *)&src.wholecols));
- args.push_back(make_pair(sizeof(cl_int), (void *)&src.wholerows));
- args.push_back(make_pair(sizeof(cl_int), (void *)&dstOffset));
+ rectKernel?"-D RECTKERNEL":"",
+ s);
++
+ std::vector< std::pair<size_t, const void *> > args;
+ args.push_back(std::make_pair(sizeof(cl_mem), (void *)&src.data));
+ args.push_back(std::make_pair(sizeof(cl_mem), (void *)&dst.data));
+ args.push_back(std::make_pair(sizeof(cl_int), (void *)&srcOffset_x));
+ args.push_back(std::make_pair(sizeof(cl_int), (void *)&srcOffset_y));
+ args.push_back(std::make_pair(sizeof(cl_int), (void *)&src.cols));
+ args.push_back(std::make_pair(sizeof(cl_int), (void *)&src.rows));
+ args.push_back(std::make_pair(sizeof(cl_int), (void *)&srcStep));
+ args.push_back(std::make_pair(sizeof(cl_int), (void *)&dstStep));
+ args.push_back(std::make_pair(sizeof(cl_mem), (void *)&mat_kernel.data));
+ args.push_back(std::make_pair(sizeof(cl_int), (void *)&src.wholecols));
+ args.push_back(std::make_pair(sizeof(cl_int), (void *)&src.wholerows));
+ args.push_back(std::make_pair(sizeof(cl_int), (void *)&dstOffset));
+
openCLExecuteKernel(clCxt, &filtering_morph, kernelName, globalThreads, localThreads, args, -1, -1, compile_option);
}
Context *clCxt = src.clCxt;
int filterWidth = ksize.width;
- bool ksize_3x3 = filterWidth == 3 && src.type() != CV_32FC4; // CV_32FC4 is not tuned up with filter2d_3x3 kernel
+ bool ksize_3x3 = filterWidth == 3 && src.type() != CV_32FC4 && src.type() != CV_32FC3; // CV_32FC4 is not tuned up with filter2d_3x3 kernel
- string kernelName = ksize_3x3 ? "filter2D_3x3" : "filter2D";
+ String kernelName = ksize_3x3 ? "filter2D_3x3" : "filter2D";
size_t src_offset_x = (src.offset % src.step) / src.elemSize();
size_t src_offset_y = src.offset / src.step;
dst.create(map1.size(), src.type());
- String kernelName;
+ const char * const typeMap[] = { "uchar", "char", "ushort", "short", "int", "float", "double" };
+ const char * const channelMap[] = { "", "", "2", "4", "4" };
+ const char * const interMap[] = { "INTER_NEAREST", "INTER_LINEAR", "INTER_CUBIC", "INTER_LINEAR", "INTER_LANCZOS" };
+ const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", "BORDER_WRAP",
+ "BORDER_REFLECT_101", "BORDER_TRANSPARENT" };
- string kernelName = "remap";
++ String kernelName = "remap";
if ( map1.type() == CV_32FC2 && !map2.data )
- {
- if (interpolation == INTER_LINEAR && borderType == BORDER_CONSTANT)
- kernelName = "remapLNFConstant";
- else if (interpolation == INTER_NEAREST && borderType == BORDER_CONSTANT)
- kernelName = "remapNNFConstant";
- }
- kernelName += "_32FC2";
++ kernelName = kernelName + "_32FC2";
else if (map1.type() == CV_16SC2 && !map2.data)
- {
- if (interpolation == INTER_LINEAR && borderType == BORDER_CONSTANT)
- kernelName = "remapLNSConstant";
- else if (interpolation == INTER_NEAREST && borderType == BORDER_CONSTANT)
- kernelName = "remapNNSConstant";
-
- }
- kernelName += "_16SC2";
++ kernelName = kernelName + "_16SC2";
else if (map1.type() == CV_32FC1 && map2.type() == CV_32FC1)
- {
- if (interpolation == INTER_LINEAR && borderType == BORDER_CONSTANT)
- kernelName = "remapLNF1Constant";
- else if (interpolation == INTER_NEAREST && borderType == BORDER_CONSTANT)
- kernelName = "remapNNF1Constant";
- }
- kernelName += "_2_32FC1";
++ kernelName = kernelName + "_2_32FC1";
+ else
- CV_Error(CV_StsBadArg, "Unsupported map types");
++ CV_Error(Error::StsBadArg, "Unsupported map types");
- size_t blkSizeX = 16, blkSizeY = 16;
- size_t glbSizeX;
- int cols = dst.cols;
- if (src.type() == CV_8UC1)
- {
- cols = (dst.cols + dst.offset % 4 + 3) / 4;
- glbSizeX = cols % blkSizeX == 0 ? cols : (cols / blkSizeX + 1) * blkSizeX;
+ int ocn = dst.oclchannels();
+ size_t localThreads[3] = { 16, 16, 1};
+ size_t globalThreads[3] = { dst.cols, dst.rows, 1};
- }
- else if (src.type() == CV_32FC1 && interpolation == INTER_LINEAR)
+ Mat scalar(1, 1, CV_MAKE_TYPE(dst.depth(), ocn), borderValue);
- std::string buildOptions = format("-D %s -D %s -D T=%s%s", interMap[interpolation],
- borderMap[borderType], typeMap[src.depth()], channelMap[ocn]);
++ String buildOptions = format("-D %s -D %s -D T=%s%s", interMap[interpolation],
++ borderMap[borderType], typeMap[src.depth()], channelMap[ocn]);
+
+ if (interpolation != INTER_NEAREST)
{
- cols = (dst.cols + (dst.offset >> 2) % 4 + 3) / 4;
- glbSizeX = cols % blkSizeX == 0 ? cols : (cols / blkSizeX + 1) * blkSizeX;
+ int wdepth = std::max(CV_32F, dst.depth());
+ if (!supportsDouble)
+ wdepth = std::min(CV_32F, wdepth);
+
- buildOptions += format(" -D WT=%s%s -D convertToT=convert_%s%s%s -D convertToWT=convert_%s%s"
- " -D convertToWT2=convert_%s2 -D WT2=%s2",
- typeMap[wdepth], channelMap[ocn],
- typeMap[src.depth()], channelMap[ocn], src.depth() < CV_32F ? "_sat_rte" : "",
- typeMap[wdepth], channelMap[ocn],
- typeMap[wdepth], typeMap[wdepth]);
++ buildOptions = buildOptions
++ + format(" -D WT=%s%s -D convertToT=convert_%s%s%s -D convertToWT=convert_%s%s"
++ " -D convertToWT2=convert_%s2 -D WT2=%s2",
++ typeMap[wdepth], channelMap[ocn],
++ typeMap[src.depth()], channelMap[ocn], src.depth() < CV_32F ? "_sat_rte" : "",
++ typeMap[wdepth], channelMap[ocn],
++ typeMap[wdepth], typeMap[wdepth]);
}
- else
- glbSizeX = dst.cols % blkSizeX == 0 ? dst.cols : (dst.cols / blkSizeX + 1) * blkSizeX;
- size_t glbSizeY = dst.rows % blkSizeY == 0 ? dst.rows : (dst.rows / blkSizeY + 1) * blkSizeY;
- size_t globalThreads[3] = {glbSizeX, glbSizeY, 1};
- size_t localThreads[3] = {blkSizeX, blkSizeY, 1};
+ int src_step = src.step / src.elemSize(), src_offset = src.offset / src.elemSize();
+ int map1_step = map1.step / map1.elemSize(), map1_offset = map1.offset / map1.elemSize();
+ int map2_step = map2.step / map2.elemSize(), map2_offset = map2.offset / map2.elemSize();
+ int dst_step = dst.step / dst.elemSize(), dst_offset = dst.offset / dst.elemSize();
- float borderFloat[4] = {(float)borderValue[0], (float)borderValue[1], (float)borderValue[2], (float)borderValue[3]};
- vector< pair<size_t, const void *> > args;
- args.push_back( make_pair(sizeof(cl_mem), (void *)&src.data));
- args.push_back( make_pair(sizeof(cl_mem), (void *)&dst.data));
- args.push_back( make_pair(sizeof(cl_mem), (void *)&map1.data));
+ std::vector< std::pair<size_t, const void *> > args;
- if (map1.channels() == 2)
- {
- args.push_back( std::make_pair(sizeof(cl_mem), (void *)&dst.data));
- args.push_back( std::make_pair(sizeof(cl_mem), (void *)&src.data));
- args.push_back( std::make_pair(sizeof(cl_mem), (void *)&map1.data));
- args.push_back( std::make_pair(sizeof(cl_int), (void *)&dst.offset));
- args.push_back( std::make_pair(sizeof(cl_int), (void *)&src.offset));
- args.push_back( std::make_pair(sizeof(cl_int), (void *)&map1.offset));
- args.push_back( std::make_pair(sizeof(cl_int), (void *)&dst.step));
- args.push_back( std::make_pair(sizeof(cl_int), (void *)&src.step));
- args.push_back( std::make_pair(sizeof(cl_int), (void *)&map1.step));
- args.push_back( std::make_pair(sizeof(cl_int), (void *)&src.cols));
- args.push_back( std::make_pair(sizeof(cl_int), (void *)&src.rows));
- args.push_back( std::make_pair(sizeof(cl_int), (void *)&dst.cols));
- args.push_back( std::make_pair(sizeof(cl_int), (void *)&dst.rows));
- args.push_back( std::make_pair(sizeof(cl_int), (void *)&map1.cols));
- args.push_back( std::make_pair(sizeof(cl_int), (void *)&map1.rows));
- args.push_back( std::make_pair(sizeof(cl_int), (void *)&cols));
- float borderFloat[4] = {(float)borderValue[0], (float)borderValue[1], (float)borderValue[2], (float)borderValue[3]};
-
- if (src.clCxt->supportsFeature(FEATURE_CL_DOUBLE))
- args.push_back( std::make_pair(sizeof(cl_double4), (void *)&borderValue));
- else
- args.push_back( std::make_pair(sizeof(cl_float4), (void *)&borderFloat));
- }
- if (map1.channels() == 1)
- {
- args.push_back( std::make_pair(sizeof(cl_mem), (void *)&dst.data));
- args.push_back( std::make_pair(sizeof(cl_mem), (void *)&src.data));
- args.push_back( std::make_pair(sizeof(cl_mem), (void *)&map1.data));
++ args.push_back( std::make_pair(sizeof(cl_mem), (void *)&src.data));
++ args.push_back( std::make_pair(sizeof(cl_mem), (void *)&dst.data));
++ args.push_back( std::make_pair(sizeof(cl_mem), (void *)&map1.data));
+ if (!map2.empty())
- args.push_back( make_pair(sizeof(cl_mem), (void *)&map2.data));
- args.push_back( make_pair(sizeof(cl_int), (void *)&src_offset));
- args.push_back( make_pair(sizeof(cl_int), (void *)&dst_offset));
- args.push_back( make_pair(sizeof(cl_int), (void *)&map1_offset));
+ args.push_back( std::make_pair(sizeof(cl_mem), (void *)&map2.data));
- args.push_back( std::make_pair(sizeof(cl_int), (void *)&dst.offset));
- args.push_back( std::make_pair(sizeof(cl_int), (void *)&src.offset));
- args.push_back( std::make_pair(sizeof(cl_int), (void *)&map1.offset));
- args.push_back( std::make_pair(sizeof(cl_int), (void *)&dst.step));
- args.push_back( std::make_pair(sizeof(cl_int), (void *)&src.step));
- args.push_back( std::make_pair(sizeof(cl_int), (void *)&map1.step));
- args.push_back( std::make_pair(sizeof(cl_int), (void *)&src.cols));
- args.push_back( std::make_pair(sizeof(cl_int), (void *)&src.rows));
- args.push_back( std::make_pair(sizeof(cl_int), (void *)&dst.cols));
- args.push_back( std::make_pair(sizeof(cl_int), (void *)&dst.rows));
- args.push_back( std::make_pair(sizeof(cl_int), (void *)&map1.cols));
- args.push_back( std::make_pair(sizeof(cl_int), (void *)&map1.rows));
- args.push_back( std::make_pair(sizeof(cl_int), (void *)&cols));
- if (src.clCxt->supportsFeature(FEATURE_CL_DOUBLE))
- args.push_back( std::make_pair(sizeof(cl_double4), (void *)&borderValue));
- else
- args.push_back( std::make_pair(sizeof(cl_float4), (void *)&borderFloat));
- }
- openCLExecuteKernel(clCxt, &imgproc_remap, kernelName, globalThreads, localThreads, args, src.oclchannels(), src.depth());
++ args.push_back( std::make_pair(sizeof(cl_int), (void *)&src_offset));
++ args.push_back( std::make_pair(sizeof(cl_int), (void *)&dst_offset));
++ args.push_back( std::make_pair(sizeof(cl_int), (void *)&map1_offset));
+ if (!map2.empty())
- args.push_back( make_pair(sizeof(cl_int), (void *)&map2_offset));
- args.push_back( make_pair(sizeof(cl_int), (void *)&src_step));
- args.push_back( make_pair(sizeof(cl_int), (void *)&dst_step));
- args.push_back( make_pair(sizeof(cl_int), (void *)&map1_step));
++ args.push_back( std::make_pair(sizeof(cl_int), (void *)&map2_offset));
++ args.push_back( std::make_pair(sizeof(cl_int), (void *)&src_step));
++ args.push_back( std::make_pair(sizeof(cl_int), (void *)&dst_step));
++ args.push_back( std::make_pair(sizeof(cl_int), (void *)&map1_step));
+ if (!map2.empty())
- args.push_back( make_pair(sizeof(cl_int), (void *)&map2_step));
- args.push_back( make_pair(sizeof(cl_int), (void *)&src.cols));
- args.push_back( make_pair(sizeof(cl_int), (void *)&src.rows));
- args.push_back( make_pair(sizeof(cl_int), (void *)&dst.cols));
- args.push_back( make_pair(sizeof(cl_int), (void *)&dst.rows));
- args.push_back( make_pair(scalar.elemSize(), (void *)scalar.data));
++ args.push_back( std::make_pair(sizeof(cl_int), (void *)&map2_step));
++ args.push_back( std::make_pair(sizeof(cl_int), (void *)&src.cols));
++ args.push_back( std::make_pair(sizeof(cl_int), (void *)&src.rows));
++ args.push_back( std::make_pair(sizeof(cl_int), (void *)&dst.cols));
++ args.push_back( std::make_pair(sizeof(cl_int), (void *)&dst.rows));
++ args.push_back( std::make_pair(scalar.elemSize(), (void *)scalar.data));
+
+ openCLExecuteKernel(clCxt, &imgproc_remap, kernelName, globalThreads, localThreads, args, -1, -1, buildOptions.c_str());
}
////////////////////////////////////////////////////////////////////////////////////////////
void copyMakeBorder(const oclMat &src, oclMat &dst, int top, int bottom, int left, int right, int bordertype, const Scalar &scalar)
{
- CV_Assert(top >= 0 && bottom >= 0 && left >= 0 && right >= 0);
- if ((dst.cols != dst.wholecols) || (dst.rows != dst.wholerows)) //has roi
+ if (!src.clCxt->supportsFeature(FEATURE_CL_DOUBLE) && src.depth() == CV_64F)
{
- if (((bordertype & cv::BORDER_ISOLATED) == 0) &&
- (bordertype != cv::BORDER_CONSTANT) &&
- (bordertype != cv::BORDER_REPLICATE))
- {
- CV_Error(Error::StsBadArg, "Unsupported border type");
- }
- CV_Error(CV_OpenCLDoubleNotSupported, "Selected device does not support double");
++ CV_Error(Error::OpenCLDoubleNotSupported, "Selected device does not support double");
+ return;
}
+ oclMat _src = src;
+
+ CV_Assert(top >= 0 && bottom >= 0 && left >= 0 && right >= 0);
+
+ if( _src.offset != 0 && (bordertype & BORDER_ISOLATED) == 0 )
+ {
+ Size wholeSize;
+ Point ofs;
+ _src.locateROI(wholeSize, ofs);
+ int dtop = std::min(ofs.y, top);
+ int dbottom = std::min(wholeSize.height - _src.rows - ofs.y, bottom);
+ int dleft = std::min(ofs.x, left);
+ int dright = std::min(wholeSize.width - _src.cols - ofs.x, right);
+ _src.adjustROI(dtop, dbottom, dleft, dright);
+ top -= dtop;
+ left -= dleft;
+ bottom -= dbottom;
+ right -= dright;
+ }
bordertype &= ~cv::BORDER_ISOLATED;
+
+ // TODO need to remove this conditions and fix the code
if (bordertype == cv::BORDER_REFLECT || bordertype == cv::BORDER_WRAP)
{
- CV_Assert((src.cols >= left) && (src.cols >= right) && (src.rows >= top) && (src.rows >= bottom));
+ CV_Assert((_src.cols >= left) && (_src.cols >= right) && (_src.rows >= top) && (_src.rows >= bottom));
}
else if (bordertype == cv::BORDER_REFLECT_101)
{
break;
if (bordertype_index == sizeof(__bordertype) / sizeof(int))
- CV_Error(Error::StsBadArg, "unsupported border type");
- CV_Error(CV_StsBadArg, "Unsupported border type");
++ CV_Error(Error::StsBadArg, "Unsupported border type");
- string kernelName = "copymakeborder";
+ String kernelName = "copymakeborder";
size_t localThreads[3] = {16, 16, 1};
size_t globalThreads[3] = { dst.cols, dst.rows, 1 };
- vector< pair<size_t, const void *> > args;
- args.push_back( make_pair( sizeof(cl_mem), (void *)&_src.data));
- args.push_back( make_pair( sizeof(cl_mem), (void *)&dst.data));
- args.push_back( make_pair( sizeof(cl_int), (void *)&dst.cols));
- args.push_back( make_pair( sizeof(cl_int), (void *)&dst.rows));
- args.push_back( make_pair( sizeof(cl_int), (void *)&_src.cols));
- args.push_back( make_pair( sizeof(cl_int), (void *)&_src.rows));
- args.push_back( make_pair( sizeof(cl_int), (void *)&srcStep));
- args.push_back( make_pair( sizeof(cl_int), (void *)&srcOffset));
- args.push_back( make_pair( sizeof(cl_int), (void *)&dstStep));
- args.push_back( make_pair( sizeof(cl_int), (void *)&dstOffset));
- args.push_back( make_pair( sizeof(cl_int), (void *)&top));
- args.push_back( make_pair( sizeof(cl_int), (void *)&left));
+ std::vector< std::pair<size_t, const void *> > args;
- args.push_back( std::make_pair( sizeof(cl_mem), (void *)&src.data));
++ args.push_back( std::make_pair( sizeof(cl_mem), (void *)&_src.data));
+ args.push_back( std::make_pair( sizeof(cl_mem), (void *)&dst.data));
+ args.push_back( std::make_pair( sizeof(cl_int), (void *)&dst.cols));
+ args.push_back( std::make_pair( sizeof(cl_int), (void *)&dst.rows));
- args.push_back( std::make_pair( sizeof(cl_int), (void *)&src.cols));
- args.push_back( std::make_pair( sizeof(cl_int), (void *)&src.rows));
++ args.push_back( std::make_pair( sizeof(cl_int), (void *)&_src.cols));
++ args.push_back( std::make_pair( sizeof(cl_int), (void *)&_src.rows));
+ args.push_back( std::make_pair( sizeof(cl_int), (void *)&srcStep));
+ args.push_back( std::make_pair( sizeof(cl_int), (void *)&srcOffset));
+ args.push_back( std::make_pair( sizeof(cl_int), (void *)&dstStep));
+ args.push_back( std::make_pair( sizeof(cl_int), (void *)&dstOffset));
+ args.push_back( std::make_pair( sizeof(cl_int), (void *)&top));
+ args.push_back( std::make_pair( sizeof(cl_int), (void *)&left));
const char * const typeMap[] = { "uchar", "char", "ushort", "short", "int", "float", "double" };
const char * const channelMap[] = { "", "", "2", "4", "4" };
if ((dst.type() == CV_8UC1) && ((dst.offset & 3) == 0) && ((dst.cols & 3) == 0))
{
kernelName = "bilateral2";
- globalThreads[0] = dst.cols / 4;
+ globalThreads[0] = dst.cols >> 2;
}
- vector<pair<size_t , const void *> > args;
- args.push_back( make_pair( sizeof(cl_mem), (void *)&dst.data ));
- args.push_back( make_pair( sizeof(cl_mem), (void *)&temp.data ));
- args.push_back( make_pair( sizeof(cl_int), (void *)&dst.rows ));
- args.push_back( make_pair( sizeof(cl_int), (void *)&dst.cols ));
- args.push_back( make_pair( sizeof(cl_int), (void *)&maxk ));
- args.push_back( make_pair( sizeof(cl_int), (void *)&radius ));
- args.push_back( make_pair( sizeof(cl_int), (void *)&dst_step_in_pixel ));
- args.push_back( make_pair( sizeof(cl_int), (void *)&dst_offset_in_pixel ));
- args.push_back( make_pair( sizeof(cl_int), (void *)&temp_step_in_pixel ));
- args.push_back( make_pair( sizeof(cl_int), (void *)&temp.rows ));
- args.push_back( make_pair( sizeof(cl_int), (void *)&temp.cols ));
- args.push_back( make_pair( sizeof(cl_mem), (void *)&oclcolor_weight.data ));
- args.push_back( make_pair( sizeof(cl_mem), (void *)&oclspace_weight.data ));
- args.push_back( make_pair( sizeof(cl_mem), (void *)&oclspace_ofs.data ));
+ std::vector<std::pair<size_t , const void *> > args;
+ args.push_back( std::make_pair( sizeof(cl_mem), (void *)&dst.data ));
+ args.push_back( std::make_pair( sizeof(cl_mem), (void *)&temp.data ));
+ args.push_back( std::make_pair( sizeof(cl_int), (void *)&dst.rows ));
+ args.push_back( std::make_pair( sizeof(cl_int), (void *)&dst.cols ));
+ args.push_back( std::make_pair( sizeof(cl_int), (void *)&maxk ));
+ args.push_back( std::make_pair( sizeof(cl_int), (void *)&radius ));
+ args.push_back( std::make_pair( sizeof(cl_int), (void *)&dst_step_in_pixel ));
+ args.push_back( std::make_pair( sizeof(cl_int), (void *)&dst_offset_in_pixel ));
+ args.push_back( std::make_pair( sizeof(cl_int), (void *)&temp_step_in_pixel ));
+ args.push_back( std::make_pair( sizeof(cl_int), (void *)&temp.rows ));
+ args.push_back( std::make_pair( sizeof(cl_int), (void *)&temp.cols ));
+ args.push_back( std::make_pair( sizeof(cl_mem), (void *)&oclcolor_weight.data ));
+ args.push_back( std::make_pair( sizeof(cl_mem), (void *)&oclspace_weight.data ));
+ args.push_back( std::make_pair( sizeof(cl_mem), (void *)&oclspace_ofs.data ));
+
openCLExecuteKernel(src.clCxt, &imgproc_bilateral, kernelName, globalThreads, localThreads, args, dst.oclchannels(), dst.depth());
}
+
void bilateralFilter(const oclMat &src, oclMat &dst, int radius, double sigmaclr, double sigmaspc, int borderType)
{
dst.create( src.size(), src.type() );
if ( src.depth() == CV_8U )
oclbilateralFilter_8u( src, dst, radius, sigmaclr, sigmaspc, borderType );
else
- CV_Error(Error::StsUnsupportedFormat, "Bilateral filtering is only implemented for 8uimages");
- CV_Error( CV_StsUnsupportedFormat, "Bilateral filtering is only implemented for CV_8U images" );
++ CV_Error(Error::StsUnsupportedFormat, "Bilateral filtering is only implemented for CV_8U images");
}
}
}
+//////////////////////////////////mulSpectrums////////////////////////////////////////////////////
+void cv::ocl::mulSpectrums(const oclMat &a, const oclMat &b, oclMat &c, int /*flags*/, float scale, bool conjB)
+{
+ CV_Assert(a.type() == CV_32FC2);
+ CV_Assert(b.type() == CV_32FC2);
+
+ c.create(a.size(), CV_32FC2);
+
+ size_t lt[3] = { 16, 16, 1 };
+ size_t gt[3] = { a.cols, a.rows, 1 };
+
+ String kernelName = conjB ? "mulAndScaleSpectrumsKernel_CONJ":"mulAndScaleSpectrumsKernel";
+
+ std::vector<std::pair<size_t , const void *> > args;
+ args.push_back( std::make_pair( sizeof(cl_mem), (void *)&a.data ));
+ args.push_back( std::make_pair( sizeof(cl_mem), (void *)&b.data ));
+ args.push_back( std::make_pair( sizeof(cl_float), (void *)&scale));
+ args.push_back( std::make_pair( sizeof(cl_mem), (void *)&c.data ));
+ args.push_back( std::make_pair( sizeof(cl_int), (void *)&a.cols ));
+ args.push_back( std::make_pair( sizeof(cl_int), (void *)&a.rows));
+ args.push_back( std::make_pair( sizeof(cl_int), (void *)&a.step ));
+
+ Context *clCxt = Context::getContext();
+ openCLExecuteKernel(clCxt, &imgproc_mulAndScaleSpectrums, kernelName, gt, lt, args, -1, -1);
+}
//////////////////////////////////convolve////////////////////////////////////////////////////
+// ported from CUDA module
+void cv::ocl::ConvolveBuf::create(Size image_size, Size templ_size)
+{
+ result_size = Size(image_size.width - templ_size.width + 1,
+ image_size.height - templ_size.height + 1);
+
+ block_size = user_block_size;
+ if (user_block_size.width == 0 || user_block_size.height == 0)
+ block_size = estimateBlockSize(result_size, templ_size);
+
+ dft_size.width = 1 << int(ceil(std::log(block_size.width + templ_size.width - 1.) / std::log(2.)));
+ dft_size.height = 1 << int(ceil(std::log(block_size.height + templ_size.height - 1.) / std::log(2.)));
+
+ // CUFFT has hard-coded kernels for power-of-2 sizes (up to 8192),
+ // see CUDA Toolkit 4.1 CUFFT Library Programming Guide
+ //if (dft_size.width > 8192)
+ dft_size.width = getOptimalDFTSize(block_size.width + templ_size.width - 1.);
+ //if (dft_size.height > 8192)
+ dft_size.height = getOptimalDFTSize(block_size.height + templ_size.height - 1.);
+
+ // To avoid wasting time doing small DFTs
+ dft_size.width = std::max(dft_size.width, 512);
+ dft_size.height = std::max(dft_size.height, 512);
+
+ image_block.create(dft_size, CV_32F);
+ templ_block.create(dft_size, CV_32F);
+ result_data.create(dft_size, CV_32F);
+
+ //spect_len = dft_size.height * (dft_size.width / 2 + 1);
+ image_spect.create(dft_size.height, dft_size.width / 2 + 1, CV_32FC2);
+ templ_spect.create(dft_size.height, dft_size.width / 2 + 1, CV_32FC2);
+ result_spect.create(dft_size.height, dft_size.width / 2 + 1, CV_32FC2);
+
+ // Use maximum result matrix block size for the estimated DFT block size
+ block_size.width = std::min(dft_size.width - templ_size.width + 1, result_size.width);
+ block_size.height = std::min(dft_size.height - templ_size.height + 1, result_size.height);
+}
+
+Size cv::ocl::ConvolveBuf::estimateBlockSize(Size result_size, Size /*templ_size*/)
+{
+ int width = (result_size.width + 2) / 3;
+ int height = (result_size.height + 2) / 3;
+ width = std::min(width, result_size.width);
+ height = std::min(height, result_size.height);
+ return Size(width, height);
+}
+
+static void convolve_run_fft(const oclMat &image, const oclMat &templ, oclMat &result, bool ccorr, ConvolveBuf& buf)
+{
+#if defined HAVE_CLAMDFFT
+ CV_Assert(image.type() == CV_32F);
+ CV_Assert(templ.type() == CV_32F);
+
+ buf.create(image.size(), templ.size());
+ result.create(buf.result_size, CV_32F);
+
+ Size& block_size = buf.block_size;
+ Size& dft_size = buf.dft_size;
+
+ oclMat& image_block = buf.image_block;
+ oclMat& templ_block = buf.templ_block;
+ oclMat& result_data = buf.result_data;
+
+ oclMat& image_spect = buf.image_spect;
+ oclMat& templ_spect = buf.templ_spect;
+ oclMat& result_spect = buf.result_spect;
+
+ oclMat templ_roi = templ;
+ copyMakeBorder(templ_roi, templ_block, 0, templ_block.rows - templ_roi.rows, 0,
+ templ_block.cols - templ_roi.cols, 0, Scalar());
+
+ cv::ocl::dft(templ_block, templ_spect, dft_size);
+
+ // Process all blocks of the result matrix
+ for (int y = 0; y < result.rows; y += block_size.height)
+ {
+ for (int x = 0; x < result.cols; x += block_size.width)
+ {
+ Size image_roi_size(std::min(x + dft_size.width, image.cols) - x,
+ std::min(y + dft_size.height, image.rows) - y);
+ Rect roi0(x, y, image_roi_size.width, image_roi_size.height);
+
+ oclMat image_roi(image, roi0);
+
+ copyMakeBorder(image_roi, image_block, 0, image_block.rows - image_roi.rows,
+ 0, image_block.cols - image_roi.cols, 0, Scalar());
+
+ cv::ocl::dft(image_block, image_spect, dft_size);
+
+ mulSpectrums(image_spect, templ_spect, result_spect, 0,
+ 1.f / dft_size.area(), ccorr);
+
+ cv::ocl::dft(result_spect, result_data, dft_size, cv::DFT_INVERSE | cv::DFT_REAL_OUTPUT);
+
+ Size result_roi_size(std::min(x + block_size.width, result.cols) - x,
+ std::min(y + block_size.height, result.rows) - y);
+
+ Rect roi1(x, y, result_roi_size.width, result_roi_size.height);
+ Rect roi2(0, 0, result_roi_size.width, result_roi_size.height);
+
+ oclMat result_roi(result, roi1);
+ oclMat result_block(result_data, roi2);
+
+ result_block.copyTo(result_roi);
+ }
+ }
-static void convolve_run(const oclMat &src, const oclMat &temp1, oclMat &dst, string kernelName, const cv::ocl::ProgramEntry* source)
+#else
+ CV_Error(Error::OpenCLNoAMDBlasFft, "OpenCL DFT is not implemented");
+#define UNUSED(x) (void)(x);
+ UNUSED(image) UNUSED(templ) UNUSED(result) UNUSED(ccorr) UNUSED(buf)
+#undef UNUSED
+#endif
+}
+
+static void convolve_run(const oclMat &src, const oclMat &temp1, oclMat &dst, String kernelName, const cv::ocl::ProgramEntry* source)
{
+ CV_Assert(src.depth() == CV_32FC1);
+ CV_Assert(temp1.depth() == CV_32F);
+ CV_Assert(temp1.cols <= 17 && temp1.rows <= 17);
+
dst.create(src.size(), src.type());
- Context *clCxt = src.clCxt;
- int channels = dst.oclchannels();
- int depth = dst.depth();
-
- size_t vector_length = 1;
- int offset_cols = ((dst.offset % dst.step) / dst.elemSize1()) & (vector_length - 1);
- int cols = divUp(dst.cols * channels + offset_cols, vector_length);
- int rows = dst.rows;
-
+ CV_Assert(src.cols == dst.cols && src.rows == dst.rows);
+ CV_Assert(src.type() == dst.type());
+
size_t localThreads[3] = { 16, 16, 1 };
- size_t globalThreads[3] = { cols, rows, 1 };
+ size_t globalThreads[3] = { dst.cols, dst.rows, 1 };
+
+ int src_step = src.step / src.elemSize(), src_offset = src.offset / src.elemSize();
+ int dst_step = dst.step / dst.elemSize(), dst_offset = dst.offset / dst.elemSize();
+ int temp1_step = temp1.step / temp1.elemSize(), temp1_offset = temp1.offset / temp1.elemSize();
- vector<pair<size_t , const void *> > args;
- args.push_back( make_pair( sizeof(cl_mem), (void *)&src.data ));
- args.push_back( make_pair( sizeof(cl_mem), (void *)&temp1.data ));
- args.push_back( make_pair( sizeof(cl_mem), (void *)&dst.data ));
- args.push_back( make_pair( sizeof(cl_int), (void *)&src.rows ));
- args.push_back( make_pair( sizeof(cl_int), (void *)&src.cols ));
- args.push_back( make_pair( sizeof(cl_int), (void *)&src_step ));
- args.push_back( make_pair( sizeof(cl_int), (void *)&dst_step ));
- args.push_back( make_pair( sizeof(cl_int), (void *)&temp1_step ));
- args.push_back( make_pair( sizeof(cl_int), (void *)&temp1.rows ));
- args.push_back( make_pair( sizeof(cl_int), (void *)&temp1.cols ));
- args.push_back( make_pair( sizeof(cl_int), (void *)&src_offset ));
- args.push_back( make_pair( sizeof(cl_int), (void *)&dst_offset ));
- args.push_back( make_pair( sizeof(cl_int), (void *)&temp1_offset ));
+ std::vector<std::pair<size_t , const void *> > args;
+ args.push_back( std::make_pair( sizeof(cl_mem), (void *)&src.data ));
+ args.push_back( std::make_pair( sizeof(cl_mem), (void *)&temp1.data ));
+ args.push_back( std::make_pair( sizeof(cl_mem), (void *)&dst.data ));
+ args.push_back( std::make_pair( sizeof(cl_int), (void *)&src.rows ));
- args.push_back( std::make_pair( sizeof(cl_int), (void *)&cols ));
- args.push_back( std::make_pair( sizeof(cl_int), (void *)&src.step ));
- args.push_back( std::make_pair( sizeof(cl_int), (void *)&dst.step ));
- args.push_back( std::make_pair( sizeof(cl_int), (void *)&temp1.step ));
++ args.push_back( std::make_pair( sizeof(cl_int), (void *)&src.cols ));
++ args.push_back( std::make_pair( sizeof(cl_int), (void *)&src_step ));
++ args.push_back( std::make_pair( sizeof(cl_int), (void *)&dst_step ));
++ args.push_back( std::make_pair( sizeof(cl_int), (void *)&temp1_step ));
+ args.push_back( std::make_pair( sizeof(cl_int), (void *)&temp1.rows ));
+ args.push_back( std::make_pair( sizeof(cl_int), (void *)&temp1.cols ));
++ args.push_back( std::make_pair( sizeof(cl_int), (void *)&src_offset ));
++ args.push_back( std::make_pair( sizeof(cl_int), (void *)&dst_offset ));
++ args.push_back( std::make_pair( sizeof(cl_int), (void *)&temp1_offset ));
- openCLExecuteKernel(clCxt, source, kernelName, globalThreads, localThreads, args, -1, depth);
+ openCLExecuteKernel(src.clCxt, source, kernelName, globalThreads, localThreads, args, -1, dst.depth());
}
-void cv::ocl::convolve(const oclMat &x, const oclMat &t, oclMat &y)
+
+void cv::ocl::convolve(const oclMat &x, const oclMat &t, oclMat &y, bool ccorr)
{
- CV_Assert(x.depth() == CV_32F && t.depth() == CV_32F);
- CV_Assert(t.cols <= 17 && t.rows <= 17);
-
+ CV_Assert(x.depth() == CV_32F);
+ CV_Assert(t.depth() == CV_32F);
y.create(x.size(), x.type());
-
- convolve_run(x, t, y, "convolve", &imgproc_convolve);
+ String kernelName = "convolve";
+ if(t.cols > 17 || t.rows > 17)
+ {
+ ConvolveBuf buf;
+ convolve_run_fft(x, t, y, ccorr, buf);
+ }
+ else
+ {
+ CV_Assert(ccorr == false);
+ convolve_run(x, t, y, kernelName, &imgproc_convolve);
+ }
+}
+void cv::ocl::convolve(const oclMat &image, const oclMat &templ, oclMat &result, bool ccorr, ConvolveBuf& buf)
+{
+ result.create(image.size(), image.type());
+ convolve_run_fft(image, templ, result, ccorr, buf);
}
#include "opencv2/ocl/private/opencl_dumpinfo.hpp"
- CV_TEST_MAIN(".", dumpOpenCLDevice())
+ int LOOP_TIMES = 1;
+
+ void readLoopTimes(int argc, char ** argv)
+ {
+ const char * const command_line_keys =
- "{ |test_loop_times |1 |count of iterations per each test}"
- "{h |help |false |print help info}";
++ "{ test_loop_times |1 |count of iterations per each test}"
++ "{h help |false |print help info}";
+
+ cv::CommandLineParser parser(argc, argv, command_line_keys);
- if (parser.get<bool>("help"))
++ if (parser.has("help"))
+ {
+ std::cout << "\nAvailable options besides google test option: \n";
- parser.printParams();
++ parser.printMessage();
+ }
+
+ LOOP_TIMES = parser.get<int>("test_loop_times");
+ CV_Assert(LOOP_TIMES > 0);
+ }
+
+ CV_TEST_MAIN(".", dumpOpenCLDevice(),
+ readLoopTimes(argc, argv))
//M*/
#include "test_precomp.hpp"
+
+using namespace cv;
+
#ifdef HAVE_OPENCL
- //#define MAT_DEBUG
- #ifdef MAT_DEBUG
- #define MAT_DIFF(mat, mat2)\
- {\
- for(int i = 0; i < mat.rows; i ++)\
- {\
- for(int j = 0; j < mat.cols; j ++)\
- {\
- cv::Vec4b s = mat.at<cv::Vec4b>(i, j);\
- cv::Vec4b s2 = mat2.at<cv::Vec4b>(i, j);\
- if(s != s2) printf("*");\
- else printf(".");\
- }\
- puts("\n");\
- }\
- }
- #else
- #define MAT_DIFF(mat, mat2)
- #endif
-
-
namespace
{
+ using namespace testing;
///////////////////////////////////////////////////////////////////////////////////////////////////////
// cvtColor
};
#define CVTCODE(name) cv::COLOR_ ## name
- #define OCL_TEST_P_CVTCOLOR(name) OCL_TEST_P(CvtColor, name)\
- {\
- cv::Mat src = img;\
- cv::ocl::oclMat ocl_img, dst;\
- ocl_img.upload(img);\
- cv::ocl::cvtColor(ocl_img, dst, CVTCODE(name));\
- cv::Mat dst_gold;\
- cv::cvtColor(src, dst_gold, CVTCODE(name));\
- cv::Mat dst_mat;\
- dst.download(dst_mat);\
- EXPECT_MAT_NEAR(dst_gold, dst_mat, 1e-5);\
+
+ OCL_TEST_P(CvtColor, RGB2GRAY)
+ {
+ doTest(3, 1, CVTCODE(RGB2GRAY));
}
+ OCL_TEST_P(CvtColor, GRAY2RGB)
+ {
+ doTest(1, 3, CVTCODE(GRAY2RGB));
+ };
- //add new ones here using macro
- OCL_TEST_P_CVTCOLOR(RGB2GRAY)
- OCL_TEST_P_CVTCOLOR(BGR2GRAY)
- OCL_TEST_P_CVTCOLOR(RGBA2GRAY)
- OCL_TEST_P_CVTCOLOR(BGRA2GRAY)
+ OCL_TEST_P(CvtColor, BGR2GRAY)
+ {
+ doTest(3, 1, CVTCODE(BGR2GRAY));
+ }
+ OCL_TEST_P(CvtColor, GRAY2BGR)
+ {
+ doTest(1, 3, CVTCODE(GRAY2BGR));
+ };
- OCL_TEST_P_CVTCOLOR(RGB2YUV)
- OCL_TEST_P_CVTCOLOR(BGR2YUV)
- OCL_TEST_P_CVTCOLOR(YUV2RGB)
- OCL_TEST_P_CVTCOLOR(YUV2BGR)
- OCL_TEST_P_CVTCOLOR(RGB2YCrCb)
- OCL_TEST_P_CVTCOLOR(BGR2YCrCb)
+ OCL_TEST_P(CvtColor, RGBA2GRAY)
+ {
+ doTest(3, 1, CVTCODE(RGBA2GRAY));
+ }
+ OCL_TEST_P(CvtColor, GRAY2RGBA)
+ {
+ doTest(1, 3, CVTCODE(GRAY2RGBA));
+ };
- PARAM_TEST_CASE(CvtColor_Gray2RGB, cv::Size, MatDepth, int)
+ OCL_TEST_P(CvtColor, BGRA2GRAY)
{
- cv::Size size;
- int code;
- int depth;
- cv::Mat img;
+ doTest(3, 1, CVTCODE(BGRA2GRAY));
+ }
+ OCL_TEST_P(CvtColor, GRAY2BGRA)
+ {
+ doTest(1, 3, CVTCODE(GRAY2BGRA));
+ };
- virtual void SetUp()
+ OCL_TEST_P(CvtColor, RGB2YUV)
+ {
+ doTest(3, 3, CVTCODE(RGB2YUV));
+ }
+ OCL_TEST_P(CvtColor, BGR2YUV)
+ {
+ doTest(3, 3, CVTCODE(BGR2YUV));
+ }
+ OCL_TEST_P(CvtColor, YUV2RGB)
+ {
+ doTest(3, 3, CVTCODE(YUV2RGB));
+ }
+ OCL_TEST_P(CvtColor, YUV2BGR)
+ {
+ doTest(3, 3, CVTCODE(YUV2BGR));
+ }
+ OCL_TEST_P(CvtColor, RGB2YCrCb)
+ {
+ doTest(3, 3, CVTCODE(RGB2YCrCb));
+ }
+ OCL_TEST_P(CvtColor, BGR2YCrCb)
+ {
+ doTest(3, 3, CVTCODE(BGR2YCrCb));
+ }
+
+ struct CvtColor_YUV420 : CvtColor
+ {
+ void random_roi(int channelsIn, int channelsOut)
{
- size = GET_PARAM(0);
- depth = GET_PARAM(1);
- code = GET_PARAM(2);
- img = randomMat(size, CV_MAKETYPE(depth, 1), 0.0, depth == CV_32F ? 1.0 : 255.0);
+ const int srcType = CV_MAKE_TYPE(depth, channelsIn);
+ const int dstType = CV_MAKE_TYPE(depth, channelsOut);
+
+ Size roiSize = randomSize(1, MAX_VALUE);
+ roiSize.width *= 2;
+ roiSize.height *= 3;
+ Border srcBorder = randomBorder(0, use_roi ? MAX_VALUE : 0);
+ randomSubMat(src1, src1_roi, roiSize, srcBorder, srcType, 2, 100);
+
+ Border dst1Border = randomBorder(0, use_roi ? MAX_VALUE : 0);
+ randomSubMat(dst1, dst1_roi, roiSize, dst1Border, dstType, 5, 16);
+
+ generateOclMat(gsrc1_whole, gsrc1_roi, src1, roiSize, srcBorder);
+ generateOclMat(gdst1_whole, gdst1_roi, dst1, roiSize, dst1Border);
}
};
- OCL_TEST_P(CvtColor_Gray2RGB, Accuracy)
- {
- cv::Mat src = img;
- cv::ocl::oclMat ocl_img, dst;
- ocl_img.upload(src);
- cv::ocl::cvtColor(ocl_img, dst, code);
- cv::Mat dst_gold;
- cv::cvtColor(src, dst_gold, code);
- cv::Mat dst_mat;
- dst.download(dst_mat);
- EXPECT_MAT_NEAR(dst_gold, dst_mat, 1e-5);
- }
- doTest(1, 4, CV_YUV2RGBA_NV12);
+ OCL_TEST_P(CvtColor_YUV420, YUV2RGBA_NV12)
+ {
++ doTest(1, 4, COLOR_YUV2RGBA_NV12);
+ };
- PARAM_TEST_CASE(CvtColor_YUV420, cv::Size, int)
+ OCL_TEST_P(CvtColor_YUV420, YUV2BGRA_NV12)
{
- cv::Size size;
- int code;
- doTest(1, 4, CV_YUV2BGRA_NV12);
++ doTest(1, 4, COLOR_YUV2BGRA_NV12);
+ };
- cv::Mat img;
+ OCL_TEST_P(CvtColor_YUV420, YUV2RGB_NV12)
+ {
- doTest(1, 3, CV_YUV2RGB_NV12);
++ doTest(1, 3, COLOR_YUV2RGB_NV12);
+ };
- virtual void SetUp()
- {
- size = GET_PARAM(0);
- code = GET_PARAM(1);
- img = randomMat(size, CV_8UC1, 0.0, 255.0);
- }
+ OCL_TEST_P(CvtColor_YUV420, YUV2BGR_NV12)
+ {
- doTest(1, 3, CV_YUV2BGR_NV12);
++ doTest(1, 3, COLOR_YUV2BGR_NV12);
};
- OCL_TEST_P(CvtColor_YUV420, Accuracy)
- {
- cv::Mat src = img;
- cv::ocl::oclMat ocl_img, dst;
- ocl_img.upload(src);
- cv::ocl::cvtColor(ocl_img, dst, code);
- cv::Mat dst_gold;
- cv::cvtColor(src, dst_gold, code);
- cv::Mat dst_mat;
- dst.download(dst_mat);
- MAT_DIFF(dst_mat, dst_gold);
- EXPECT_MAT_NEAR(dst_gold, dst_mat, 1e-5);
- }
- INSTANTIATE_TEST_CASE_P(OCL_ImgProc, CvtColor, testing::Combine(
- DIFFERENT_SIZES,
- testing::Values(MatDepth(CV_8U), MatDepth(CV_16U), MatDepth(CV_32F))
- ));
-
- INSTANTIATE_TEST_CASE_P(OCL_ImgProc, CvtColor_YUV420, testing::Combine(
- testing::Values(cv::Size(128, 45), cv::Size(46, 132), cv::Size(1024, 1023)),
- testing::Values((int)COLOR_YUV2RGBA_NV12, (int)COLOR_YUV2BGRA_NV12, (int)COLOR_YUV2RGB_NV12, (int)COLOR_YUV2BGR_NV12)
- ));
-
- INSTANTIATE_TEST_CASE_P(OCL_ImgProc, CvtColor_Gray2RGB, testing::Combine(
- DIFFERENT_SIZES,
- testing::Values(MatDepth(CV_8U), MatDepth(CV_16U), MatDepth(CV_32F)),
- testing::Values((int)COLOR_GRAY2BGR, (int)COLOR_GRAY2BGRA, (int)COLOR_GRAY2RGB, (int)COLOR_GRAY2RGBA)
- ));
+ INSTANTIATE_TEST_CASE_P(OCL_ImgProc, CvtColor,
+ testing::Combine(
+ testing::Values(MatDepth(CV_8U), MatDepth(CV_16U), MatDepth(CV_32F)),
+ Bool()
+ )
+ );
+
+ INSTANTIATE_TEST_CASE_P(OCL_ImgProc, CvtColor_YUV420,
+ testing::Combine(
+ testing::Values(MatDepth(CV_8U)),
+ Bool()
+ )
+ );
+
}
#endif
#ifndef __OPENCV_TEST_UTILITY_HPP__
#define __OPENCV_TEST_UTILITY_HPP__
+#include "opencv2/core.hpp"
+
- #define LOOP_TIMES 1
+ extern int LOOP_TIMES;
#define MWIDTH 256
#define MHEIGHT 256