1 /*M///////////////////////////////////////////////////////////////////////////////////////
\r
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
\r
5 // By downloading, copying, installing or using the software you agree to this license.
\r
6 // If you do not agree to this license, do not download, install,
\r
7 // copy or use the software.
\r
10 // License Agreement
\r
11 // For Open Source Computer Vision Library
\r
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
\r
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
\r
15 // Third party copyrights are property of their respective owners.
\r
17 // Redistribution and use in source and binary forms, with or without modification,
\r
18 // are permitted provided that the following conditions are met:
\r
20 // * Redistribution's of source code must retain the above copyright notice,
\r
21 // this list of conditions and the following disclaimer.
\r
23 // * Redistribution's in binary form must reproduce the above copyright notice,
\r
24 // this list of conditions and the following disclaimer in the documentation
\r
25 // and/or other GpuMaterials provided with the distribution.
\r
27 // * The name of the copyright holders may not be used to endorse or promote products
\r
28 // derived from this software without specific prior written permission.
\r
30 // This software is provided by the copyright holders and contributors "as is" and
\r
31 // any express or implied warranties, including, but not limited to, the implied
\r
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
\r
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
\r
34 // indirect, incidental, special, exemplary, or consequential damages
\r
35 // (including, but not limited to, procurement of substitute goods or services;
\r
36 // loss of use, data, or profits; or business interruption) however caused
\r
37 // and on any theory of liability, whether in contract, strict liability,
\r
38 // or tort (including negligence or otherwise) arising in any way out of
\r
39 // the use of this software, even if advised of the possibility of such damage.
\r
43 #ifndef __OPENCV_GPU_HPP__
\r
44 #define __OPENCV_GPU_HPP__
\r
47 #include "opencv2/core/core.hpp"
\r
48 #include "opencv2/imgproc/imgproc.hpp"
\r
49 #include "opencv2/objdetect/objdetect.hpp"
\r
50 #include "opencv2/gpu/devmem2d.hpp"
\r
51 #include "opencv2/features2d/features2d.hpp"
\r
57 //////////////////////////////// Initialization & Info ////////////////////////
\r
59 //! This is the only function that do not throw exceptions if the library is compiled without Cuda.
\r
60 CV_EXPORTS int getCudaEnabledDeviceCount();
\r
62 //! Functions below throw cv::Expception if the library is compiled without Cuda.
\r
64 CV_EXPORTS void setDevice(int device);
\r
65 CV_EXPORTS int getDevice();
\r
69 FEATURE_SET_COMPUTE_10 = 10,
\r
70 FEATURE_SET_COMPUTE_11 = 11,
\r
71 FEATURE_SET_COMPUTE_12 = 12,
\r
72 FEATURE_SET_COMPUTE_13 = 13,
\r
73 FEATURE_SET_COMPUTE_20 = 20,
\r
74 FEATURE_SET_COMPUTE_21 = 21,
\r
75 GLOBAL_ATOMICS = FEATURE_SET_COMPUTE_11,
\r
76 NATIVE_DOUBLE = FEATURE_SET_COMPUTE_13
\r
79 // Gives information about what GPU archs this OpenCV GPU module was
\r
81 class CV_EXPORTS TargetArchs
\r
84 static bool builtWith(FeatureSet feature_set);
\r
85 static bool has(int major, int minor);
\r
86 static bool hasPtx(int major, int minor);
\r
87 static bool hasBin(int major, int minor);
\r
88 static bool hasEqualOrLessPtx(int major, int minor);
\r
89 static bool hasEqualOrGreater(int major, int minor);
\r
90 static bool hasEqualOrGreaterPtx(int major, int minor);
\r
91 static bool hasEqualOrGreaterBin(int major, int minor);
\r
96 // Gives information about the given GPU
\r
97 class CV_EXPORTS DeviceInfo
\r
100 // Creates DeviceInfo object for the current GPU
\r
101 DeviceInfo() : device_id_(getDevice()) { query(); }
\r
103 // Creates DeviceInfo object for the given GPU
\r
104 DeviceInfo(int device_id) : device_id_(device_id) { query(); }
\r
106 string name() const { return name_; }
\r
108 // Return compute capability versions
\r
109 int majorVersion() const { return majorVersion_; }
\r
110 int minorVersion() const { return minorVersion_; }
\r
112 int multiProcessorCount() const { return multi_processor_count_; }
\r
114 size_t freeMemory() const;
\r
115 size_t totalMemory() const;
\r
117 // Checks whether device supports the given feature
\r
118 bool supports(FeatureSet feature_set) const;
\r
120 // Checks whether the GPU module can be run on the given device
\r
121 bool isCompatible() const;
\r
125 void queryMemory(size_t& free_memory, size_t& total_memory) const;
\r
130 int multi_processor_count_;
\r
135 /////////////////////////// Multi GPU Manager //////////////////////////////
\r
137 // Provides functionality for working with many GPUs
\r
138 class CV_EXPORTS MultiGpuManager
\r
142 ~MultiGpuManager();
\r
144 // Must be called before any other GPU calls
\r
147 // Makes the given GPU active
\r
148 void gpuOn(int gpu_id);
\r
150 // Finishes the piece of work on the current GPU
\r
153 static const int BAD_GPU_ID = -1;
\r
156 void operator=(const MultiGpuManager&);
\r
157 MultiGpuManager(const MultiGpuManager&);
\r
163 //////////////////////////////// Error handling ////////////////////////
\r
165 CV_EXPORTS void error(const char *error_string, const char *file, const int line, const char *func);
\r
166 CV_EXPORTS void nppError( int err, const char *file, const int line, const char *func);
\r
168 //////////////////////////////// GpuMat ////////////////////////////////
\r
172 //! Smart pointer for GPU memory with reference counting. Its interface is mostly similar with cv::Mat.
\r
173 class CV_EXPORTS GpuMat
\r
176 //! default constructor
\r
178 //! constructs GpuMatrix of the specified size and type (_type is CV_8UC1, CV_64FC3, CV_32SC(12) etc.)
\r
179 GpuMat(int rows, int cols, int type);
\r
180 GpuMat(Size size, int type);
\r
181 //! constucts GpuMatrix and fills it with the specified value _s.
\r
182 GpuMat(int rows, int cols, int type, const Scalar& s);
\r
183 GpuMat(Size size, int type, const Scalar& s);
\r
184 //! copy constructor
\r
185 GpuMat(const GpuMat& m);
\r
187 //! constructor for GpuMatrix headers pointing to user-allocated data
\r
188 GpuMat(int rows, int cols, int type, void* data, size_t step = Mat::AUTO_STEP);
\r
189 GpuMat(Size size, int type, void* data, size_t step = Mat::AUTO_STEP);
\r
191 //! creates a matrix header for a part of the bigger matrix
\r
192 GpuMat(const GpuMat& m, const Range& rowRange, const Range& colRange);
\r
193 GpuMat(const GpuMat& m, const Rect& roi);
\r
195 //! builds GpuMat from Mat. Perfom blocking upload to device.
\r
196 explicit GpuMat (const Mat& m);
\r
198 //! destructor - calls release()
\r
201 //! assignment operators
\r
202 GpuMat& operator = (const GpuMat& m);
\r
203 //! assignment operator. Perfom blocking upload to device.
\r
204 GpuMat& operator = (const Mat& m);
\r
206 //! returns lightweight DevMem2D_ structure for passing to nvcc-compiled code.
\r
207 // Contains just image size, data ptr and step.
\r
208 template <class T> operator DevMem2D_<T>() const;
\r
209 template <class T> operator PtrStep_<T>() const;
\r
211 //! pefroms blocking upload data to GpuMat.
\r
212 void upload(const cv::Mat& m);
\r
215 void upload(const CudaMem& m, Stream& stream);
\r
217 //! downloads data from device to host memory. Blocking calls.
\r
218 operator Mat() const;
\r
219 void download(cv::Mat& m) const;
\r
222 void download(CudaMem& m, Stream& stream) const;
\r
224 //! returns a new GpuMatrix header for the specified row
\r
225 GpuMat row(int y) const;
\r
226 //! returns a new GpuMatrix header for the specified column
\r
227 GpuMat col(int x) const;
\r
228 //! ... for the specified row span
\r
229 GpuMat rowRange(int startrow, int endrow) const;
\r
230 GpuMat rowRange(const Range& r) const;
\r
231 //! ... for the specified column span
\r
232 GpuMat colRange(int startcol, int endcol) const;
\r
233 GpuMat colRange(const Range& r) const;
\r
235 //! returns deep copy of the GpuMatrix, i.e. the data is copied
\r
236 GpuMat clone() const;
\r
237 //! copies the GpuMatrix content to "m".
\r
238 // It calls m.create(this->size(), this->type()).
\r
239 void copyTo( GpuMat& m ) const;
\r
240 //! copies those GpuMatrix elements to "m" that are marked with non-zero mask elements.
\r
241 void copyTo( GpuMat& m, const GpuMat& mask ) const;
\r
242 //! converts GpuMatrix to another datatype with optional scalng. See cvConvertScale.
\r
243 void convertTo( GpuMat& m, int rtype, double alpha=1, double beta=0 ) const;
\r
245 void assignTo( GpuMat& m, int type=-1 ) const;
\r
247 //! sets every GpuMatrix element to s
\r
248 GpuMat& operator = (const Scalar& s);
\r
249 //! sets some of the GpuMatrix elements to s, according to the mask
\r
250 GpuMat& setTo(const Scalar& s, const GpuMat& mask = GpuMat());
\r
251 //! creates alternative GpuMatrix header for the same data, with different
\r
252 // number of channels and/or different number of rows. see cvReshape.
\r
253 GpuMat reshape(int cn, int rows = 0) const;
\r
255 //! allocates new GpuMatrix data unless the GpuMatrix already has specified size and type.
\r
256 // previous data is unreferenced if needed.
\r
257 void create(int rows, int cols, int type);
\r
258 void create(Size size, int type);
\r
259 //! decreases reference counter;
\r
260 // deallocate the data when reference counter reaches 0.
\r
263 //! swaps with other smart pointer
\r
264 void swap(GpuMat& mat);
\r
266 //! locates GpuMatrix header within a parent GpuMatrix. See below
\r
267 void locateROI( Size& wholeSize, Point& ofs ) const;
\r
268 //! moves/resizes the current GpuMatrix ROI inside the parent GpuMatrix.
\r
269 GpuMat& adjustROI( int dtop, int dbottom, int dleft, int dright );
\r
270 //! extracts a rectangular sub-GpuMatrix
\r
271 // (this is a generalized form of row, rowRange etc.)
\r
272 GpuMat operator()( Range rowRange, Range colRange ) const;
\r
273 GpuMat operator()( const Rect& roi ) const;
\r
275 //! returns true iff the GpuMatrix data is continuous
\r
276 // (i.e. when there are no gaps between successive rows).
\r
277 // similar to CV_IS_GpuMat_CONT(cvGpuMat->type)
\r
278 bool isContinuous() const;
\r
279 //! returns element size in bytes,
\r
280 // similar to CV_ELEM_SIZE(cvMat->type)
\r
281 size_t elemSize() const;
\r
282 //! returns the size of element channel in bytes.
\r
283 size_t elemSize1() const;
\r
284 //! returns element type, similar to CV_MAT_TYPE(cvMat->type)
\r
286 //! returns element type, similar to CV_MAT_DEPTH(cvMat->type)
\r
288 //! returns element type, similar to CV_MAT_CN(cvMat->type)
\r
289 int channels() const;
\r
290 //! returns step/elemSize1()
\r
291 size_t step1() const;
\r
292 //! returns GpuMatrix size:
\r
293 // width == number of columns, height == number of rows
\r
295 //! returns true if GpuMatrix data is NULL
\r
296 bool empty() const;
\r
298 //! returns pointer to y-th row
\r
299 uchar* ptr(int y = 0);
\r
300 const uchar* ptr(int y = 0) const;
\r
302 //! template version of the above method
\r
303 template<typename _Tp> _Tp* ptr(int y = 0);
\r
304 template<typename _Tp> const _Tp* ptr(int y = 0) const;
\r
306 //! matrix transposition
\r
309 /*! includes several bit-fields:
\r
310 - the magic signature
\r
313 - number of channels
\r
316 //! the number of rows and columns
\r
318 //! a distance between successive rows in bytes; includes the gap if any
\r
320 //! pointer to the data
\r
323 //! pointer to the reference counter;
\r
324 // when GpuMatrix points to user-allocated data, the pointer is NULL
\r
327 //! helper fields used in locateROI and adjustROI
\r
332 //#define TemplatedGpuMat // experimental now, deprecated to use
\r
333 #ifdef TemplatedGpuMat
\r
334 #include "GpuMat_BetaDeprecated.hpp"
\r
337 //! Creates continuous GPU matrix
\r
338 CV_EXPORTS void createContinuous(int rows, int cols, int type, GpuMat& m);
\r
340 //! Ensures that size of the given matrix is not less than (rows, cols) size
\r
341 //! and matrix type is match specified one too
\r
342 CV_EXPORTS void ensureSizeIsEnough(int rows, int cols, int type, GpuMat& m);
\r
344 //////////////////////////////// CudaMem ////////////////////////////////
\r
345 // CudaMem is limited cv::Mat with page locked memory allocation.
\r
346 // Page locked memory is only needed for async and faster coping to GPU.
\r
347 // It is convertable to cv::Mat header without reference counting
\r
348 // so you can use it with other opencv functions.
\r
350 class CV_EXPORTS CudaMem
\r
353 enum { ALLOC_PAGE_LOCKED = 1, ALLOC_ZEROCOPY = 2, ALLOC_WRITE_COMBINED = 4 };
\r
356 CudaMem(const CudaMem& m);
\r
358 CudaMem(int rows, int cols, int type, int _alloc_type = ALLOC_PAGE_LOCKED);
\r
359 CudaMem(Size size, int type, int alloc_type = ALLOC_PAGE_LOCKED);
\r
362 //! creates from cv::Mat with coping data
\r
363 explicit CudaMem(const Mat& m, int alloc_type = ALLOC_PAGE_LOCKED);
\r
367 CudaMem& operator = (const CudaMem& m);
\r
369 //! returns deep copy of the matrix, i.e. the data is copied
\r
370 CudaMem clone() const;
\r
372 //! allocates new matrix data unless the matrix already has specified size and type.
\r
373 void create(int rows, int cols, int type, int alloc_type = ALLOC_PAGE_LOCKED);
\r
374 void create(Size size, int type, int alloc_type = ALLOC_PAGE_LOCKED);
\r
376 //! decrements reference counter and released memory if needed.
\r
379 //! returns matrix header with disabled reference counting for CudaMem data.
\r
380 Mat createMatHeader() const;
\r
381 operator Mat() const;
\r
383 //! maps host memory into device address space and returns GpuMat header for it. Throws exception if not supported by hardware.
\r
384 GpuMat createGpuMatHeader() const;
\r
385 operator GpuMat() const;
\r
387 //returns if host memory can be mapperd to gpu address space;
\r
388 static bool canMapHostMemory();
\r
390 // Please see cv::Mat for descriptions
\r
391 bool isContinuous() const;
\r
392 size_t elemSize() const;
\r
393 size_t elemSize1() const;
\r
396 int channels() const;
\r
397 size_t step1() const;
\r
399 bool empty() const;
\r
402 // Please see cv::Mat for descriptions
\r
416 //////////////////////////////// CudaStream ////////////////////////////////
\r
417 // Encapculates Cuda Stream. Provides interface for async coping.
\r
418 // Passed to each function that supports async kernel execution.
\r
419 // Reference counting is enabled
\r
421 class CV_EXPORTS Stream
\r
427 Stream(const Stream&);
\r
428 Stream& operator=(const Stream&);
\r
430 bool queryIfComplete();
\r
431 void waitForCompletion();
\r
433 //! downloads asynchronously.
\r
434 // Warning! cv::Mat must point to page locked memory (i.e. to CudaMem data or to its subMat)
\r
435 void enqueueDownload(const GpuMat& src, CudaMem& dst);
\r
436 void enqueueDownload(const GpuMat& src, Mat& dst);
\r
438 //! uploads asynchronously.
\r
439 // Warning! cv::Mat must point to page locked memory (i.e. to CudaMem data or to its ROI)
\r
440 void enqueueUpload(const CudaMem& src, GpuMat& dst);
\r
441 void enqueueUpload(const Mat& src, GpuMat& dst);
\r
443 void enqueueCopy(const GpuMat& src, GpuMat& dst);
\r
445 void enqueueMemSet(GpuMat& src, Scalar val);
\r
446 void enqueueMemSet(GpuMat& src, Scalar val, const GpuMat& mask);
\r
448 // converts matrix type, ex from float to uchar depending on type
\r
449 void enqueueConvert(const GpuMat& src, GpuMat& dst, int type, double a = 1, double b = 0);
\r
455 friend struct StreamAccessor;
\r
459 ////////////////////////////// Arithmetics ///////////////////////////////////
\r
461 //! transposes the matrix
\r
462 //! supports matrix with element size = 1, 4 and 8 bytes (CV_8UC1, CV_8UC4, CV_16UC2, CV_32FC1, etc)
\r
463 CV_EXPORTS void transpose(const GpuMat& src1, GpuMat& dst);
\r
465 //! reverses the order of the rows, columns or both in a matrix
\r
466 //! supports CV_8UC1, CV_8UC4 types
\r
467 CV_EXPORTS void flip(const GpuMat& a, GpuMat& b, int flipCode);
\r
469 //! transforms 8-bit unsigned integers using lookup table: dst(i)=lut(src(i))
\r
470 //! destination array will have the depth type as lut and the same channels number as source
\r
471 //! supports CV_8UC1, CV_8UC3 types
\r
472 CV_EXPORTS void LUT(const GpuMat& src, const Mat& lut, GpuMat& dst);
\r
474 //! makes multi-channel array out of several single-channel arrays
\r
475 CV_EXPORTS void merge(const GpuMat* src, size_t n, GpuMat& dst);
\r
477 //! makes multi-channel array out of several single-channel arrays
\r
478 CV_EXPORTS void merge(const vector<GpuMat>& src, GpuMat& dst);
\r
480 //! makes multi-channel array out of several single-channel arrays (async version)
\r
481 CV_EXPORTS void merge(const GpuMat* src, size_t n, GpuMat& dst, const Stream& stream);
\r
483 //! makes multi-channel array out of several single-channel arrays (async version)
\r
484 CV_EXPORTS void merge(const vector<GpuMat>& src, GpuMat& dst, const Stream& stream);
\r
486 //! copies each plane of a multi-channel array to a dedicated array
\r
487 CV_EXPORTS void split(const GpuMat& src, GpuMat* dst);
\r
489 //! copies each plane of a multi-channel array to a dedicated array
\r
490 CV_EXPORTS void split(const GpuMat& src, vector<GpuMat>& dst);
\r
492 //! copies each plane of a multi-channel array to a dedicated array (async version)
\r
493 CV_EXPORTS void split(const GpuMat& src, GpuMat* dst, const Stream& stream);
\r
495 //! copies each plane of a multi-channel array to a dedicated array (async version)
\r
496 CV_EXPORTS void split(const GpuMat& src, vector<GpuMat>& dst, const Stream& stream);
\r
498 //! computes magnitude of complex (x(i).re, x(i).im) vector
\r
499 //! supports only CV_32FC2 type
\r
500 CV_EXPORTS void magnitude(const GpuMat& x, GpuMat& magnitude);
\r
502 //! computes squared magnitude of complex (x(i).re, x(i).im) vector
\r
503 //! supports only CV_32FC2 type
\r
504 CV_EXPORTS void magnitudeSqr(const GpuMat& x, GpuMat& magnitude);
\r
506 //! computes magnitude of each (x(i), y(i)) vector
\r
507 //! supports only floating-point source
\r
508 CV_EXPORTS void magnitude(const GpuMat& x, const GpuMat& y, GpuMat& magnitude);
\r
510 CV_EXPORTS void magnitude(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, const Stream& stream);
\r
512 //! computes squared magnitude of each (x(i), y(i)) vector
\r
513 //! supports only floating-point source
\r
514 CV_EXPORTS void magnitudeSqr(const GpuMat& x, const GpuMat& y, GpuMat& magnitude);
\r
516 CV_EXPORTS void magnitudeSqr(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, const Stream& stream);
\r
518 //! computes angle (angle(i)) of each (x(i), y(i)) vector
\r
519 //! supports only floating-point source
\r
520 CV_EXPORTS void phase(const GpuMat& x, const GpuMat& y, GpuMat& angle, bool angleInDegrees = false);
\r
522 CV_EXPORTS void phase(const GpuMat& x, const GpuMat& y, GpuMat& angle, bool angleInDegrees, const Stream& stream);
\r
524 //! converts Cartesian coordinates to polar
\r
525 //! supports only floating-point source
\r
526 CV_EXPORTS void cartToPolar(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, GpuMat& angle, bool angleInDegrees = false);
\r
528 CV_EXPORTS void cartToPolar(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, GpuMat& angle, bool angleInDegrees, const Stream& stream);
\r
530 //! converts polar coordinates to Cartesian
\r
531 //! supports only floating-point source
\r
532 CV_EXPORTS void polarToCart(const GpuMat& magnitude, const GpuMat& angle, GpuMat& x, GpuMat& y, bool angleInDegrees = false);
\r
534 CV_EXPORTS void polarToCart(const GpuMat& magnitude, const GpuMat& angle, GpuMat& x, GpuMat& y, bool angleInDegrees, const Stream& stream);
\r
537 //////////////////////////// Per-element operations ////////////////////////////////////
\r
539 //! adds one matrix to another (c = a + b)
\r
540 //! supports CV_8UC1, CV_8UC4, CV_32SC1, CV_32FC1 types
\r
541 CV_EXPORTS void add(const GpuMat& a, const GpuMat& b, GpuMat& c);
\r
542 //! adds scalar to a matrix (c = a + s)
\r
543 //! supports CV_32FC1 and CV_32FC2 type
\r
544 CV_EXPORTS void add(const GpuMat& a, const Scalar& sc, GpuMat& c);
\r
546 //! subtracts one matrix from another (c = a - b)
\r
547 //! supports CV_8UC1, CV_8UC4, CV_32SC1, CV_32FC1 types
\r
548 CV_EXPORTS void subtract(const GpuMat& a, const GpuMat& b, GpuMat& c);
\r
549 //! subtracts scalar from a matrix (c = a - s)
\r
550 //! supports CV_32FC1 and CV_32FC2 type
\r
551 CV_EXPORTS void subtract(const GpuMat& a, const Scalar& sc, GpuMat& c);
\r
553 //! computes element-wise product of the two arrays (c = a * b)
\r
554 //! supports CV_8UC1, CV_8UC4, CV_32SC1, CV_32FC1 types
\r
555 CV_EXPORTS void multiply(const GpuMat& a, const GpuMat& b, GpuMat& c);
\r
556 //! multiplies matrix to a scalar (c = a * s)
\r
557 //! supports CV_32FC1 and CV_32FC2 type
\r
558 CV_EXPORTS void multiply(const GpuMat& a, const Scalar& sc, GpuMat& c);
\r
560 //! computes element-wise quotient of the two arrays (c = a / b)
\r
561 //! supports CV_8UC1, CV_8UC4, CV_32SC1, CV_32FC1 types
\r
562 CV_EXPORTS void divide(const GpuMat& a, const GpuMat& b, GpuMat& c);
\r
563 //! computes element-wise quotient of matrix and scalar (c = a / s)
\r
564 //! supports CV_32FC1 and CV_32FC2 type
\r
565 CV_EXPORTS void divide(const GpuMat& a, const Scalar& sc, GpuMat& c);
\r
567 //! computes exponent of each matrix element (b = e**a)
\r
568 //! supports only CV_32FC1 type
\r
569 CV_EXPORTS void exp(const GpuMat& a, GpuMat& b);
\r
571 //! computes natural logarithm of absolute value of each matrix element: b = log(abs(a))
\r
572 //! supports only CV_32FC1 type
\r
573 CV_EXPORTS void log(const GpuMat& a, GpuMat& b);
\r
575 //! computes element-wise absolute difference of two arrays (c = abs(a - b))
\r
576 //! supports CV_8UC1, CV_8UC4, CV_32SC1, CV_32FC1 types
\r
577 CV_EXPORTS void absdiff(const GpuMat& a, const GpuMat& b, GpuMat& c);
\r
578 //! computes element-wise absolute difference of array and scalar (c = abs(a - s))
\r
579 //! supports only CV_32FC1 type
\r
580 CV_EXPORTS void absdiff(const GpuMat& a, const Scalar& s, GpuMat& c);
\r
582 //! compares elements of two arrays (c = a <cmpop> b)
\r
583 //! supports CV_8UC4, CV_32FC1 types
\r
584 CV_EXPORTS void compare(const GpuMat& a, const GpuMat& b, GpuMat& c, int cmpop);
\r
586 //! performs per-elements bit-wise inversion
\r
587 CV_EXPORTS void bitwise_not(const GpuMat& src, GpuMat& dst, const GpuMat& mask=GpuMat());
\r
589 CV_EXPORTS void bitwise_not(const GpuMat& src, GpuMat& dst, const GpuMat& mask, const Stream& stream);
\r
591 //! calculates per-element bit-wise disjunction of two arrays
\r
592 CV_EXPORTS void bitwise_or(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask=GpuMat());
\r
594 CV_EXPORTS void bitwise_or(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask, const Stream& stream);
\r
596 //! calculates per-element bit-wise conjunction of two arrays
\r
597 CV_EXPORTS void bitwise_and(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask=GpuMat());
\r
599 CV_EXPORTS void bitwise_and(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask, const Stream& stream);
\r
601 //! calculates per-element bit-wise "exclusive or" operation
\r
602 CV_EXPORTS void bitwise_xor(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask=GpuMat());
\r
604 CV_EXPORTS void bitwise_xor(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask, const Stream& stream);
\r
606 //! computes per-element minimum of two arrays (dst = min(src1, src2))
\r
607 CV_EXPORTS void min(const GpuMat& src1, const GpuMat& src2, GpuMat& dst);
\r
609 CV_EXPORTS void min(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const Stream& stream);
\r
611 //! computes per-element minimum of array and scalar (dst = min(src1, src2))
\r
612 CV_EXPORTS void min(const GpuMat& src1, double src2, GpuMat& dst);
\r
614 CV_EXPORTS void min(const GpuMat& src1, double src2, GpuMat& dst, const Stream& stream);
\r
616 //! computes per-element maximum of two arrays (dst = max(src1, src2))
\r
617 CV_EXPORTS void max(const GpuMat& src1, const GpuMat& src2, GpuMat& dst);
\r
619 CV_EXPORTS void max(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const Stream& stream);
\r
621 //! computes per-element maximum of array and scalar (dst = max(src1, src2))
\r
622 CV_EXPORTS void max(const GpuMat& src1, double src2, GpuMat& dst);
\r
624 CV_EXPORTS void max(const GpuMat& src1, double src2, GpuMat& dst, const Stream& stream);
\r
627 ////////////////////////////// Image processing //////////////////////////////
\r
629 //! DST[x,y] = SRC[xmap[x,y],ymap[x,y]] with bilinear interpolation.
\r
630 //! supports CV_8UC1, CV_8UC3 source types and CV_32FC1 map type
\r
631 CV_EXPORTS void remap(const GpuMat& src, GpuMat& dst, const GpuMat& xmap, const GpuMat& ymap);
\r
633 //! Does mean shift filtering on GPU.
\r
634 CV_EXPORTS void meanShiftFiltering(const GpuMat& src, GpuMat& dst, int sp, int sr,
\r
635 TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1));
\r
637 //! Does mean shift procedure on GPU.
\r
638 CV_EXPORTS void meanShiftProc(const GpuMat& src, GpuMat& dstr, GpuMat& dstsp, int sp, int sr,
\r
639 TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1));
\r
641 //! Does mean shift segmentation with elimination of small regions.
\r
642 CV_EXPORTS void meanShiftSegmentation(const GpuMat& src, Mat& dst, int sp, int sr, int minsize,
\r
643 TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, 1));
\r
645 //! Does coloring of disparity image: [0..ndisp) -> [0..240, 1, 1] in HSV.
\r
646 //! Supported types of input disparity: CV_8U, CV_16S.
\r
647 //! Output disparity has CV_8UC4 type in BGRA format (alpha = 255).
\r
648 CV_EXPORTS void drawColorDisp(const GpuMat& src_disp, GpuMat& dst_disp, int ndisp);
\r
650 CV_EXPORTS void drawColorDisp(const GpuMat& src_disp, GpuMat& dst_disp, int ndisp, const Stream& stream);
\r
652 //! Reprojects disparity image to 3D space.
\r
653 //! Supports CV_8U and CV_16S types of input disparity.
\r
654 //! The output is a 4-channel floating-point (CV_32FC4) matrix.
\r
655 //! Each element of this matrix will contain the 3D coordinates of the point (x,y,z,1), computed from the disparity map.
\r
656 //! Q is the 4x4 perspective transformation matrix that can be obtained with cvStereoRectify.
\r
657 CV_EXPORTS void reprojectImageTo3D(const GpuMat& disp, GpuMat& xyzw, const Mat& Q);
\r
659 CV_EXPORTS void reprojectImageTo3D(const GpuMat& disp, GpuMat& xyzw, const Mat& Q, const Stream& stream);
\r
661 //! converts image from one color space to another
\r
662 CV_EXPORTS void cvtColor(const GpuMat& src, GpuMat& dst, int code, int dcn = 0);
\r
664 CV_EXPORTS void cvtColor(const GpuMat& src, GpuMat& dst, int code, int dcn, const Stream& stream);
\r
666 //! applies fixed threshold to the image
\r
667 CV_EXPORTS double threshold(const GpuMat& src, GpuMat& dst, double thresh, double maxval, int type);
\r
669 CV_EXPORTS double threshold(const GpuMat& src, GpuMat& dst, double thresh, double maxval, int type, const Stream& stream);
\r
671 //! resizes the image
\r
672 //! Supports INTER_NEAREST, INTER_LINEAR
\r
673 //! supports CV_8UC1, CV_8UC4 types
\r
674 CV_EXPORTS void resize(const GpuMat& src, GpuMat& dst, Size dsize, double fx=0, double fy=0, int interpolation = INTER_LINEAR);
\r
676 //! warps the image using affine transformation
\r
677 //! Supports INTER_NEAREST, INTER_LINEAR, INTER_CUBIC
\r
678 CV_EXPORTS void warpAffine(const GpuMat& src, GpuMat& dst, const Mat& M, Size dsize, int flags = INTER_LINEAR);
\r
680 //! warps the image using perspective transformation
\r
681 //! Supports INTER_NEAREST, INTER_LINEAR, INTER_CUBIC
\r
682 CV_EXPORTS void warpPerspective(const GpuMat& src, GpuMat& dst, const Mat& M, Size dsize, int flags = INTER_LINEAR);
\r
684 //! rotate 8bit single or four channel image
\r
685 //! Supports INTER_NEAREST, INTER_LINEAR, INTER_CUBIC
\r
686 //! supports CV_8UC1, CV_8UC4 types
\r
687 CV_EXPORTS void rotate(const GpuMat& src, GpuMat& dst, Size dsize, double angle, double xShift = 0, double yShift = 0, int interpolation = INTER_LINEAR);
\r
689 //! copies 2D array to a larger destination array and pads borders with user-specifiable constant
\r
690 //! supports CV_8UC1, CV_8UC4, CV_32SC1 and CV_32FC1 types
\r
691 CV_EXPORTS void copyMakeBorder(const GpuMat& src, GpuMat& dst, int top, int bottom, int left, int right, const Scalar& value = Scalar());
\r
693 //! computes the integral image
\r
694 //! sum will have CV_32S type, but will contain unsigned int values
\r
695 //! supports only CV_8UC1 source type
\r
696 CV_EXPORTS void integral(const GpuMat& src, GpuMat& sum);
\r
698 //! buffered version
\r
699 CV_EXPORTS void integralBuffered(const GpuMat& src, GpuMat& sum, GpuMat& buffer);
\r
701 //! computes the integral image and integral for the squared image
\r
702 //! sum will have CV_32S type, sqsum - CV32F type
\r
703 //! supports only CV_8UC1 source type
\r
704 CV_EXPORTS void integral(const GpuMat& src, GpuMat& sum, GpuMat& sqsum);
\r
706 //! computes squared integral image
\r
707 //! result matrix will have 64F type, but will contain 64U values
\r
708 //! supports source images of 8UC1 type only
\r
709 CV_EXPORTS void sqrIntegral(const GpuMat& src, GpuMat& sqsum);
\r
711 //! computes vertical sum, supports only CV_32FC1 images
\r
712 CV_EXPORTS void columnSum(const GpuMat& src, GpuMat& sum);
\r
714 //! computes the standard deviation of integral images
\r
715 //! supports only CV_32SC1 source type and CV_32FC1 sqr type
\r
716 //! output will have CV_32FC1 type
\r
717 CV_EXPORTS void rectStdDev(const GpuMat& src, const GpuMat& sqr, GpuMat& dst, const Rect& rect);
\r
719 // applies Canny edge detector and produces the edge map
\r
720 // disabled until fix crash
\r
721 //CV_EXPORTS void Canny(const GpuMat& image, GpuMat& edges, double threshold1, double threshold2, int apertureSize = 3);
\r
722 //CV_EXPORTS void Canny(const GpuMat& image, GpuMat& edges, GpuMat& buffer, double threshold1, double threshold2, int apertureSize = 3);
\r
723 //CV_EXPORTS void Canny(const GpuMat& srcDx, const GpuMat& srcDy, GpuMat& edges, double threshold1, double threshold2, int apertureSize = 3);
\r
724 //CV_EXPORTS void Canny(const GpuMat& srcDx, const GpuMat& srcDy, GpuMat& edges, GpuMat& buffer, double threshold1, double threshold2, int apertureSize = 3);
\r
726 //! computes Harris cornerness criteria at each image pixel
\r
727 CV_EXPORTS void cornerHarris(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, double k, int borderType=BORDER_REFLECT101);
\r
729 //! computes minimum eigen value of 2x2 derivative covariation matrix at each pixel - the cornerness criteria
\r
730 CV_EXPORTS void cornerMinEigenVal(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, int borderType=BORDER_REFLECT101);
\r
732 //! performs per-element multiplication of two full (not packed) Fourier spectrums
\r
733 //! supports 32FC2 matrixes only (interleaved format)
\r
734 CV_EXPORTS void mulSpectrums(const GpuMat& a, const GpuMat& b, GpuMat& c, int flags, bool conjB=false);
\r
736 //! performs per-element multiplication of two full (not packed) Fourier spectrums
\r
737 //! supports 32FC2 matrixes only (interleaved format)
\r
738 CV_EXPORTS void mulAndScaleSpectrums(const GpuMat& a, const GpuMat& b, GpuMat& c, int flags,
\r
739 float scale, bool conjB=false);
\r
741 //! Performs a forward or inverse discrete Fourier transform (1D or 2D) of floating point matrix.
\r
742 //! Param dft_size is the size of DFT transform.
\r
744 //! If the source matrix is not continous, then additional copy will be done,
\r
745 //! so to avoid copying ensure the source matrix is continous one. If you want to use
\r
746 //! preallocated output ensure it is continuous too, otherwise it will be reallocated.
\r
748 //! Being implemented via CUFFT real-to-complex transform result contains only non-redundant values
\r
749 //! in CUFFT's format. Result as full complex matrix for such kind of transform cannot be retrieved.
\r
751 //! For complex-to-real transform it is assumed that the source matrix is packed in CUFFT's format.
\r
752 CV_EXPORTS void dft(const GpuMat& src, GpuMat& dst, Size dft_size, int flags=0);
\r
754 //! computes convolution (or cross-correlation) of two images using discrete Fourier transform
\r
755 //! supports source images of 32FC1 type only
\r
756 //! result matrix will have 32FC1 type
\r
757 CV_EXPORTS void convolve(const GpuMat& image, const GpuMat& templ, GpuMat& result,
\r
760 struct CV_EXPORTS ConvolveBuf;
\r
762 //! buffered version
\r
763 CV_EXPORTS void convolve(const GpuMat& image, const GpuMat& templ, GpuMat& result,
\r
764 bool ccorr, ConvolveBuf& buf);
\r
766 struct CV_EXPORTS ConvolveBuf
\r
769 ConvolveBuf(Size image_size, Size templ_size)
\r
770 { create(image_size, templ_size); }
\r
771 void create(Size image_size, Size templ_size);
\r
774 static Size estimateBlockSize(Size result_size, Size templ_size);
\r
775 friend void convolve(const GpuMat&, const GpuMat&, GpuMat&, bool, ConvolveBuf&);
\r
782 GpuMat image_spect, templ_spect, result_spect;
\r
783 GpuMat image_block, templ_block, result_data;
\r
786 //! computes the proximity map for the raster template and the image where the template is searched for
\r
787 CV_EXPORTS void matchTemplate(const GpuMat& image, const GpuMat& templ, GpuMat& result, int method);
\r
789 //! downsamples image
\r
790 CV_EXPORTS void downsample(const GpuMat& src, GpuMat& dst, int k=2);
\r
792 //! performs linear blending of two images
\r
793 //! to avoid accuracy errors sum of weigths shouldn't be very close to zero
\r
794 CV_EXPORTS void blendLinear(const GpuMat& img1, const GpuMat& img2, const GpuMat& weights1, const GpuMat& weights2,
\r
797 ////////////////////////////// Matrix reductions //////////////////////////////
\r
799 //! computes mean value and standard deviation of all or selected array elements
\r
800 //! supports only CV_8UC1 type
\r
801 CV_EXPORTS void meanStdDev(const GpuMat& mtx, Scalar& mean, Scalar& stddev);
\r
803 //! computes norm of array
\r
804 //! supports NORM_INF, NORM_L1, NORM_L2
\r
805 //! supports all matrices except 64F
\r
806 CV_EXPORTS double norm(const GpuMat& src1, int normType=NORM_L2);
\r
808 //! computes norm of array
\r
809 //! supports NORM_INF, NORM_L1, NORM_L2
\r
810 //! supports all matrices except 64F
\r
811 CV_EXPORTS double norm(const GpuMat& src1, int normType, GpuMat& buf);
\r
813 //! computes norm of the difference between two arrays
\r
814 //! supports NORM_INF, NORM_L1, NORM_L2
\r
815 //! supports only CV_8UC1 type
\r
816 CV_EXPORTS double norm(const GpuMat& src1, const GpuMat& src2, int normType=NORM_L2);
\r
818 //! computes sum of array elements
\r
819 //! supports only single channel images
\r
820 CV_EXPORTS Scalar sum(const GpuMat& src);
\r
822 //! computes sum of array elements
\r
823 //! supports only single channel images
\r
824 CV_EXPORTS Scalar sum(const GpuMat& src, GpuMat& buf);
\r
826 //! computes sum of array elements absolute values
\r
827 //! supports only single channel images
\r
828 CV_EXPORTS Scalar absSum(const GpuMat& src);
\r
830 //! computes sum of array elements absolute values
\r
831 //! supports only single channel images
\r
832 CV_EXPORTS Scalar absSum(const GpuMat& src, GpuMat& buf);
\r
834 //! computes squared sum of array elements
\r
835 //! supports only single channel images
\r
836 CV_EXPORTS Scalar sqrSum(const GpuMat& src);
\r
838 //! computes squared sum of array elements
\r
839 //! supports only single channel images
\r
840 CV_EXPORTS Scalar sqrSum(const GpuMat& src, GpuMat& buf);
\r
842 //! finds global minimum and maximum array elements and returns their values
\r
843 CV_EXPORTS void minMax(const GpuMat& src, double* minVal, double* maxVal=0, const GpuMat& mask=GpuMat());
\r
845 //! finds global minimum and maximum array elements and returns their values
\r
846 CV_EXPORTS void minMax(const GpuMat& src, double* minVal, double* maxVal, const GpuMat& mask, GpuMat& buf);
\r
848 //! finds global minimum and maximum array elements and returns their values with locations
\r
849 CV_EXPORTS void minMaxLoc(const GpuMat& src, double* minVal, double* maxVal=0, Point* minLoc=0, Point* maxLoc=0,
\r
850 const GpuMat& mask=GpuMat());
\r
852 //! finds global minimum and maximum array elements and returns their values with locations
\r
853 CV_EXPORTS void minMaxLoc(const GpuMat& src, double* minVal, double* maxVal, Point* minLoc, Point* maxLoc,
\r
854 const GpuMat& mask, GpuMat& valbuf, GpuMat& locbuf);
\r
856 //! counts non-zero array elements
\r
857 CV_EXPORTS int countNonZero(const GpuMat& src);
\r
859 //! counts non-zero array elements
\r
860 CV_EXPORTS int countNonZero(const GpuMat& src, GpuMat& buf);
\r
863 ///////////////////////////// Calibration 3D //////////////////////////////////
\r
865 CV_EXPORTS void transformPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec,
\r
868 CV_EXPORTS void transformPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec,
\r
869 GpuMat& dst, const Stream& stream);
\r
871 CV_EXPORTS void projectPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec,
\r
872 const Mat& camera_mat, const Mat& dist_coef, GpuMat& dst);
\r
874 CV_EXPORTS void projectPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec,
\r
875 const Mat& camera_mat, const Mat& dist_coef, GpuMat& dst,
\r
876 const Stream& stream);
\r
878 CV_EXPORTS void solvePnPRansac(const Mat& object, const Mat& image, const Mat& camera_mat,
\r
879 const Mat& dist_coef, Mat& rvec, Mat& tvec, bool use_extrinsic_guess=false,
\r
880 int num_iters=100, float max_dist=8.0, int min_inlier_count=100,
\r
881 vector<int>* inliers=NULL);
\r
883 //////////////////////////////// Filter Engine ////////////////////////////////
\r
886 The Base Class for 1D or Row-wise Filters
\r
888 This is the base class for linear or non-linear filters that process 1D data.
\r
889 In particular, such filters are used for the "horizontal" filtering parts in separable filters.
\r
891 class CV_EXPORTS BaseRowFilter_GPU
\r
894 BaseRowFilter_GPU(int ksize_, int anchor_) : ksize(ksize_), anchor(anchor_) {}
\r
895 virtual ~BaseRowFilter_GPU() {}
\r
896 virtual void operator()(const GpuMat& src, GpuMat& dst) = 0;
\r
901 The Base Class for Column-wise Filters
\r
903 This is the base class for linear or non-linear filters that process columns of 2D arrays.
\r
904 Such filters are used for the "vertical" filtering parts in separable filters.
\r
906 class CV_EXPORTS BaseColumnFilter_GPU
\r
909 BaseColumnFilter_GPU(int ksize_, int anchor_) : ksize(ksize_), anchor(anchor_) {}
\r
910 virtual ~BaseColumnFilter_GPU() {}
\r
911 virtual void operator()(const GpuMat& src, GpuMat& dst) = 0;
\r
916 The Base Class for Non-Separable 2D Filters.
\r
918 This is the base class for linear or non-linear 2D filters.
\r
920 class CV_EXPORTS BaseFilter_GPU
\r
923 BaseFilter_GPU(const Size& ksize_, const Point& anchor_) : ksize(ksize_), anchor(anchor_) {}
\r
924 virtual ~BaseFilter_GPU() {}
\r
925 virtual void operator()(const GpuMat& src, GpuMat& dst) = 0;
\r
931 The Base Class for Filter Engine.
\r
933 The class can be used to apply an arbitrary filtering operation to an image.
\r
934 It contains all the necessary intermediate buffers.
\r
936 class CV_EXPORTS FilterEngine_GPU
\r
939 virtual ~FilterEngine_GPU() {}
\r
941 virtual void apply(const GpuMat& src, GpuMat& dst, Rect roi = Rect(0,0,-1,-1)) = 0;
\r
944 //! returns the non-separable filter engine with the specified filter
\r
945 CV_EXPORTS Ptr<FilterEngine_GPU> createFilter2D_GPU(const Ptr<BaseFilter_GPU>& filter2D, int srcType, int dstType);
\r
947 //! returns the separable filter engine with the specified filters
\r
948 CV_EXPORTS Ptr<FilterEngine_GPU> createSeparableFilter_GPU(const Ptr<BaseRowFilter_GPU>& rowFilter,
\r
949 const Ptr<BaseColumnFilter_GPU>& columnFilter, int srcType, int bufType, int dstType);
\r
951 //! returns horizontal 1D box filter
\r
952 //! supports only CV_8UC1 source type and CV_32FC1 sum type
\r
953 CV_EXPORTS Ptr<BaseRowFilter_GPU> getRowSumFilter_GPU(int srcType, int sumType, int ksize, int anchor = -1);
\r
955 //! returns vertical 1D box filter
\r
956 //! supports only CV_8UC1 sum type and CV_32FC1 dst type
\r
957 CV_EXPORTS Ptr<BaseColumnFilter_GPU> getColumnSumFilter_GPU(int sumType, int dstType, int ksize, int anchor = -1);
\r
959 //! returns 2D box filter
\r
960 //! supports CV_8UC1 and CV_8UC4 source type, dst type must be the same as source type
\r
961 CV_EXPORTS Ptr<BaseFilter_GPU> getBoxFilter_GPU(int srcType, int dstType, const Size& ksize, Point anchor = Point(-1, -1));
\r
963 //! returns box filter engine
\r
964 CV_EXPORTS Ptr<FilterEngine_GPU> createBoxFilter_GPU(int srcType, int dstType, const Size& ksize,
\r
965 const Point& anchor = Point(-1,-1));
\r
967 //! returns 2D morphological filter
\r
968 //! only MORPH_ERODE and MORPH_DILATE are supported
\r
969 //! supports CV_8UC1 and CV_8UC4 types
\r
970 //! kernel must have CV_8UC1 type, one rows and cols == ksize.width * ksize.height
\r
971 CV_EXPORTS Ptr<BaseFilter_GPU> getMorphologyFilter_GPU(int op, int type, const Mat& kernel, const Size& ksize,
\r
972 Point anchor=Point(-1,-1));
\r
974 //! returns morphological filter engine. Only MORPH_ERODE and MORPH_DILATE are supported.
\r
975 CV_EXPORTS Ptr<FilterEngine_GPU> createMorphologyFilter_GPU(int op, int type, const Mat& kernel,
\r
976 const Point& anchor = Point(-1,-1), int iterations = 1);
\r
978 //! returns 2D filter with the specified kernel
\r
979 //! supports CV_8UC1 and CV_8UC4 types
\r
980 CV_EXPORTS Ptr<BaseFilter_GPU> getLinearFilter_GPU(int srcType, int dstType, const Mat& kernel, const Size& ksize,
\r
981 Point anchor = Point(-1, -1));
\r
983 //! returns the non-separable linear filter engine
\r
984 CV_EXPORTS Ptr<FilterEngine_GPU> createLinearFilter_GPU(int srcType, int dstType, const Mat& kernel,
\r
985 const Point& anchor = Point(-1,-1));
\r
987 //! returns the primitive row filter with the specified kernel.
\r
988 //! supports only CV_8UC1, CV_8UC4, CV_16SC1, CV_16SC2, CV_32SC1, CV_32FC1 source type.
\r
989 //! there are two version of algorithm: NPP and OpenCV.
\r
990 //! NPP calls when srcType == CV_8UC1 or srcType == CV_8UC4 and bufType == srcType,
\r
991 //! otherwise calls OpenCV version.
\r
992 //! NPP supports only BORDER_CONSTANT border type.
\r
993 //! OpenCV version supports only CV_32F as buffer depth and
\r
994 //! BORDER_REFLECT101, BORDER_REPLICATE and BORDER_CONSTANT border types.
\r
995 CV_EXPORTS Ptr<BaseRowFilter_GPU> getLinearRowFilter_GPU(int srcType, int bufType, const Mat& rowKernel,
\r
996 int anchor = -1, int borderType = BORDER_CONSTANT);
\r
998 //! returns the primitive column filter with the specified kernel.
\r
999 //! supports only CV_8UC1, CV_8UC4, CV_16SC1, CV_16SC2, CV_32SC1, CV_32FC1 dst type.
\r
1000 //! there are two version of algorithm: NPP and OpenCV.
\r
1001 //! NPP calls when dstType == CV_8UC1 or dstType == CV_8UC4 and bufType == dstType,
\r
1002 //! otherwise calls OpenCV version.
\r
1003 //! NPP supports only BORDER_CONSTANT border type.
\r
1004 //! OpenCV version supports only CV_32F as buffer depth and
\r
1005 //! BORDER_REFLECT101, BORDER_REPLICATE and BORDER_CONSTANT border types.
\r
1006 CV_EXPORTS Ptr<BaseColumnFilter_GPU> getLinearColumnFilter_GPU(int bufType, int dstType, const Mat& columnKernel,
\r
1007 int anchor = -1, int borderType = BORDER_CONSTANT);
\r
1009 //! returns the separable linear filter engine
\r
1010 CV_EXPORTS Ptr<FilterEngine_GPU> createSeparableLinearFilter_GPU(int srcType, int dstType, const Mat& rowKernel,
\r
1011 const Mat& columnKernel, const Point& anchor = Point(-1,-1), int rowBorderType = BORDER_DEFAULT,
\r
1012 int columnBorderType = -1);
\r
1014 //! returns filter engine for the generalized Sobel operator
\r
1015 CV_EXPORTS Ptr<FilterEngine_GPU> createDerivFilter_GPU(int srcType, int dstType, int dx, int dy, int ksize,
\r
1016 int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1);
\r
1018 //! returns the Gaussian filter engine
\r
1019 CV_EXPORTS Ptr<FilterEngine_GPU> createGaussianFilter_GPU(int type, Size ksize, double sigma1, double sigma2 = 0,
\r
1020 int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1);
\r
1022 //! returns maximum filter
\r
1023 CV_EXPORTS Ptr<BaseFilter_GPU> getMaxFilter_GPU(int srcType, int dstType, const Size& ksize, Point anchor = Point(-1,-1));
\r
1025 //! returns minimum filter
\r
1026 CV_EXPORTS Ptr<BaseFilter_GPU> getMinFilter_GPU(int srcType, int dstType, const Size& ksize, Point anchor = Point(-1,-1));
\r
1028 //! smooths the image using the normalized box filter
\r
1029 //! supports CV_8UC1, CV_8UC4 types
\r
1030 CV_EXPORTS void boxFilter(const GpuMat& src, GpuMat& dst, int ddepth, Size ksize, Point anchor = Point(-1,-1));
\r
1032 //! a synonym for normalized box filter
\r
1033 static inline void blur(const GpuMat& src, GpuMat& dst, Size ksize, Point anchor = Point(-1,-1)) { boxFilter(src, dst, -1, ksize, anchor); }
\r
1035 //! erodes the image (applies the local minimum operator)
\r
1036 CV_EXPORTS void erode( const GpuMat& src, GpuMat& dst, const Mat& kernel, Point anchor = Point(-1, -1), int iterations = 1);
\r
1038 //! dilates the image (applies the local maximum operator)
\r
1039 CV_EXPORTS void dilate( const GpuMat& src, GpuMat& dst, const Mat& kernel, Point anchor = Point(-1, -1), int iterations = 1);
\r
1041 //! applies an advanced morphological operation to the image
\r
1042 CV_EXPORTS void morphologyEx( const GpuMat& src, GpuMat& dst, int op, const Mat& kernel, Point anchor = Point(-1, -1), int iterations = 1);
\r
1044 //! applies non-separable 2D linear filter to the image
\r
1045 CV_EXPORTS void filter2D(const GpuMat& src, GpuMat& dst, int ddepth, const Mat& kernel, Point anchor=Point(-1,-1));
\r
1047 //! applies separable 2D linear filter to the image
\r
1048 CV_EXPORTS void sepFilter2D(const GpuMat& src, GpuMat& dst, int ddepth, const Mat& kernelX, const Mat& kernelY,
\r
1049 Point anchor = Point(-1,-1), int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1);
\r
1051 //! applies generalized Sobel operator to the image
\r
1052 CV_EXPORTS void Sobel(const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, int ksize = 3, double scale = 1,
\r
1053 int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1);
\r
1055 //! applies the vertical or horizontal Scharr operator to the image
\r
1056 CV_EXPORTS void Scharr(const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, double scale = 1,
\r
1057 int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1);
\r
1059 //! smooths the image using Gaussian filter.
\r
1060 CV_EXPORTS void GaussianBlur(const GpuMat& src, GpuMat& dst, Size ksize, double sigma1, double sigma2 = 0,
\r
1061 int rowBorderType = BORDER_DEFAULT, int columnBorderType = -1);
\r
1063 //! applies Laplacian operator to the image
\r
1064 //! supports only ksize = 1 and ksize = 3
\r
1065 CV_EXPORTS void Laplacian(const GpuMat& src, GpuMat& dst, int ddepth, int ksize = 1, double scale = 1);
\r
1067 //////////////////////////////// Image Labeling ////////////////////////////////
\r
1069 //!performs labeling via graph cuts
\r
1070 CV_EXPORTS void graphcut(GpuMat& terminals, GpuMat& leftTransp, GpuMat& rightTransp, GpuMat& top, GpuMat& bottom, GpuMat& labels, GpuMat& buf);
\r
1072 ////////////////////////////////// Histograms //////////////////////////////////
\r
1074 //! Compute levels with even distribution. levels will have 1 row and nLevels cols and CV_32SC1 type.
\r
1075 CV_EXPORTS void evenLevels(GpuMat& levels, int nLevels, int lowerLevel, int upperLevel);
\r
1076 //! Calculates histogram with evenly distributed bins for signle channel source.
\r
1077 //! Supports CV_8UC1, CV_16UC1 and CV_16SC1 source types.
\r
1078 //! Output hist will have one row and histSize cols and CV_32SC1 type.
\r
1079 CV_EXPORTS void histEven(const GpuMat& src, GpuMat& hist, int histSize, int lowerLevel, int upperLevel);
\r
1080 //! Calculates histogram with evenly distributed bins for four-channel source.
\r
1081 //! All channels of source are processed separately.
\r
1082 //! Supports CV_8UC4, CV_16UC4 and CV_16SC4 source types.
\r
1083 //! Output hist[i] will have one row and histSize[i] cols and CV_32SC1 type.
\r
1084 CV_EXPORTS void histEven(const GpuMat& src, GpuMat hist[4], int histSize[4], int lowerLevel[4], int upperLevel[4]);
\r
1085 //! Calculates histogram with bins determined by levels array.
\r
1086 //! levels must have one row and CV_32SC1 type if source has integer type or CV_32FC1 otherwise.
\r
1087 //! Supports CV_8UC1, CV_16UC1, CV_16SC1 and CV_32FC1 source types.
\r
1088 //! Output hist will have one row and (levels.cols-1) cols and CV_32SC1 type.
\r
1089 CV_EXPORTS void histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels);
\r
1090 //! Calculates histogram with bins determined by levels array.
\r
1091 //! All levels must have one row and CV_32SC1 type if source has integer type or CV_32FC1 otherwise.
\r
1092 //! All channels of source are processed separately.
\r
1093 //! Supports CV_8UC4, CV_16UC4, CV_16SC4 and CV_32FC4 source types.
\r
1094 //! Output hist[i] will have one row and (levels[i].cols-1) cols and CV_32SC1 type.
\r
1095 CV_EXPORTS void histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4]);
\r
1097 //////////////////////////////// StereoBM_GPU ////////////////////////////////
\r
1099 class CV_EXPORTS StereoBM_GPU
\r
1102 enum { BASIC_PRESET = 0, PREFILTER_XSOBEL = 1 };
\r
1104 enum { DEFAULT_NDISP = 64, DEFAULT_WINSZ = 19 };
\r
1106 //! the default constructor
\r
1108 //! the full constructor taking the camera-specific preset, number of disparities and the SAD window size. ndisparities must be multiple of 8.
\r
1109 StereoBM_GPU(int preset, int ndisparities = DEFAULT_NDISP, int winSize = DEFAULT_WINSZ);
\r
1111 //! the stereo correspondence operator. Finds the disparity for the specified rectified stereo pair
\r
1112 //! Output disparity has CV_8U type.
\r
1113 void operator() ( const GpuMat& left, const GpuMat& right, GpuMat& disparity);
\r
1116 void operator() ( const GpuMat& left, const GpuMat& right, GpuMat& disparity, const Stream & stream);
\r
1118 //! Some heuristics that tries to estmate
\r
1119 // if current GPU will be faster then CPU in this algorithm.
\r
1120 // It queries current active device.
\r
1121 static bool checkIfGpuCallReasonable();
\r
1127 // If avergeTexThreshold == 0 => post procesing is disabled
\r
1128 // If avergeTexThreshold != 0 then disparity is set 0 in each point (x,y) where for left image
\r
1129 // SumOfHorizontalGradiensInWindow(x, y, winSize) < (winSize * winSize) * avergeTexThreshold
\r
1130 // i.e. input left image is low textured.
\r
1131 float avergeTexThreshold;
\r
1133 GpuMat minSSD, leBuf, riBuf;
\r
1136 ////////////////////////// StereoBeliefPropagation ///////////////////////////
\r
1137 // "Efficient Belief Propagation for Early Vision"
\r
1140 class CV_EXPORTS StereoBeliefPropagation
\r
1143 enum { DEFAULT_NDISP = 64 };
\r
1144 enum { DEFAULT_ITERS = 5 };
\r
1145 enum { DEFAULT_LEVELS = 5 };
\r
1147 static void estimateRecommendedParams(int width, int height, int& ndisp, int& iters, int& levels);
\r
1149 //! the default constructor
\r
1150 explicit StereoBeliefPropagation(int ndisp = DEFAULT_NDISP,
\r
1151 int iters = DEFAULT_ITERS,
\r
1152 int levels = DEFAULT_LEVELS,
\r
1153 int msg_type = CV_32F);
\r
1155 //! the full constructor taking the number of disparities, number of BP iterations on each level,
\r
1156 //! number of levels, truncation of data cost, data weight,
\r
1157 //! truncation of discontinuity cost and discontinuity single jump
\r
1158 //! DataTerm = data_weight * min(fabs(I2-I1), max_data_term)
\r
1159 //! DiscTerm = min(disc_single_jump * fabs(f1-f2), max_disc_term)
\r
1160 //! please see paper for more details
\r
1161 StereoBeliefPropagation(int ndisp, int iters, int levels,
\r
1162 float max_data_term, float data_weight,
\r
1163 float max_disc_term, float disc_single_jump,
\r
1164 int msg_type = CV_32F);
\r
1166 //! the stereo correspondence operator. Finds the disparity for the specified rectified stereo pair,
\r
1167 //! if disparity is empty output type will be CV_16S else output type will be disparity.type().
\r
1168 void operator()(const GpuMat& left, const GpuMat& right, GpuMat& disparity);
\r
1171 void operator()(const GpuMat& left, const GpuMat& right, GpuMat& disparity, Stream& stream);
\r
1174 //! version for user specified data term
\r
1175 void operator()(const GpuMat& data, GpuMat& disparity);
\r
1176 void operator()(const GpuMat& data, GpuMat& disparity, Stream& stream);
\r
1183 float max_data_term;
\r
1184 float data_weight;
\r
1185 float max_disc_term;
\r
1186 float disc_single_jump;
\r
1190 GpuMat u, d, l, r, u2, d2, l2, r2;
\r
1191 std::vector<GpuMat> datas;
\r
1195 /////////////////////////// StereoConstantSpaceBP ///////////////////////////
\r
1196 // "A Constant-Space Belief Propagation Algorithm for Stereo Matching"
\r
1197 // Qingxiong Yang, Liang Wang�, Narendra Ahuja
\r
1198 // http://vision.ai.uiuc.edu/~qyang6/
\r
1200 class CV_EXPORTS StereoConstantSpaceBP
\r
1203 enum { DEFAULT_NDISP = 128 };
\r
1204 enum { DEFAULT_ITERS = 8 };
\r
1205 enum { DEFAULT_LEVELS = 4 };
\r
1206 enum { DEFAULT_NR_PLANE = 4 };
\r
1208 static void estimateRecommendedParams(int width, int height, int& ndisp, int& iters, int& levels, int& nr_plane);
\r
1210 //! the default constructor
\r
1211 explicit StereoConstantSpaceBP(int ndisp = DEFAULT_NDISP,
\r
1212 int iters = DEFAULT_ITERS,
\r
1213 int levels = DEFAULT_LEVELS,
\r
1214 int nr_plane = DEFAULT_NR_PLANE,
\r
1215 int msg_type = CV_32F);
\r
1217 //! the full constructor taking the number of disparities, number of BP iterations on each level,
\r
1218 //! number of levels, number of active disparity on the first level, truncation of data cost, data weight,
\r
1219 //! truncation of discontinuity cost, discontinuity single jump and minimum disparity threshold
\r
1220 StereoConstantSpaceBP(int ndisp, int iters, int levels, int nr_plane,
\r
1221 float max_data_term, float data_weight, float max_disc_term, float disc_single_jump,
\r
1222 int min_disp_th = 0,
\r
1223 int msg_type = CV_32F);
\r
1225 //! the stereo correspondence operator. Finds the disparity for the specified rectified stereo pair,
\r
1226 //! if disparity is empty output type will be CV_16S else output type will be disparity.type().
\r
1227 void operator()(const GpuMat& left, const GpuMat& right, GpuMat& disparity);
\r
1230 void operator()(const GpuMat& left, const GpuMat& right, GpuMat& disparity, Stream& stream);
\r
1239 float max_data_term;
\r
1240 float data_weight;
\r
1241 float max_disc_term;
\r
1242 float disc_single_jump;
\r
1248 bool use_local_init_data_cost;
\r
1250 GpuMat u[2], d[2], l[2], r[2];
\r
1251 GpuMat disp_selected_pyr[2];
\r
1254 GpuMat data_cost_selected;
\r
1261 /////////////////////////// DisparityBilateralFilter ///////////////////////////
\r
1262 // Disparity map refinement using joint bilateral filtering given a single color image.
\r
1263 // Qingxiong Yang, Liang Wang�, Narendra Ahuja
\r
1264 // http://vision.ai.uiuc.edu/~qyang6/
\r
1266 class CV_EXPORTS DisparityBilateralFilter
\r
1269 enum { DEFAULT_NDISP = 64 };
\r
1270 enum { DEFAULT_RADIUS = 3 };
\r
1271 enum { DEFAULT_ITERS = 1 };
\r
1273 //! the default constructor
\r
1274 explicit DisparityBilateralFilter(int ndisp = DEFAULT_NDISP, int radius = DEFAULT_RADIUS, int iters = DEFAULT_ITERS);
\r
1276 //! the full constructor taking the number of disparities, filter radius,
\r
1277 //! number of iterations, truncation of data continuity, truncation of disparity continuity
\r
1278 //! and filter range sigma
\r
1279 DisparityBilateralFilter(int ndisp, int radius, int iters, float edge_threshold, float max_disc_threshold, float sigma_range);
\r
1281 //! the disparity map refinement operator. Refine disparity map using joint bilateral filtering given a single color image.
\r
1282 //! disparity must have CV_8U or CV_16S type, image must have CV_8UC1 or CV_8UC3 type.
\r
1283 void operator()(const GpuMat& disparity, const GpuMat& image, GpuMat& dst);
\r
1286 void operator()(const GpuMat& disparity, const GpuMat& image, GpuMat& dst, Stream& stream);
\r
1293 float edge_threshold;
\r
1294 float max_disc_threshold;
\r
1295 float sigma_range;
\r
1297 GpuMat table_color;
\r
1298 GpuMat table_space;
\r
1302 //////////////// HOG (Histogram-of-Oriented-Gradients) Descriptor and Object Detector //////////////
\r
1304 struct CV_EXPORTS HOGDescriptor
\r
1306 enum { DEFAULT_WIN_SIGMA = -1 };
\r
1307 enum { DEFAULT_NLEVELS = 64 };
\r
1308 enum { DESCR_FORMAT_ROW_BY_ROW, DESCR_FORMAT_COL_BY_COL };
\r
1310 HOGDescriptor(Size win_size=Size(64, 128), Size block_size=Size(16, 16),
\r
1311 Size block_stride=Size(8, 8), Size cell_size=Size(8, 8),
\r
1312 int nbins=9, double win_sigma=DEFAULT_WIN_SIGMA,
\r
1313 double threshold_L2hys=0.2, bool gamma_correction=true,
\r
1314 int nlevels=DEFAULT_NLEVELS);
\r
1316 size_t getDescriptorSize() const;
\r
1317 size_t getBlockHistogramSize() const;
\r
1319 void setSVMDetector(const vector<float>& detector);
\r
1321 static vector<float> getDefaultPeopleDetector();
\r
1322 static vector<float> getPeopleDetector48x96();
\r
1323 static vector<float> getPeopleDetector64x128();
\r
1325 void detect(const GpuMat& img, vector<Point>& found_locations,
\r
1326 double hit_threshold=0, Size win_stride=Size(),
\r
1327 Size padding=Size());
\r
1329 void detectMultiScale(const GpuMat& img, vector<Rect>& found_locations,
\r
1330 double hit_threshold=0, Size win_stride=Size(),
\r
1331 Size padding=Size(), double scale0=1.05,
\r
1332 int group_threshold=2);
\r
1334 void getDescriptors(const GpuMat& img, Size win_stride,
\r
1335 GpuMat& descriptors,
\r
1336 int descr_format=DESCR_FORMAT_COL_BY_COL);
\r
1340 Size block_stride;
\r
1344 double threshold_L2hys;
\r
1345 bool gamma_correction;
\r
1349 void computeBlockHistograms(const GpuMat& img);
\r
1350 void computeGradient(const GpuMat& img, GpuMat& grad, GpuMat& qangle);
\r
1352 double getWinSigma() const;
\r
1353 bool checkDetectorSize() const;
\r
1355 static int numPartsWithin(int size, int part_size, int stride);
\r
1356 static Size numPartsWithin(Size size, Size part_size, Size stride);
\r
1358 // Coefficients of the separating plane
\r
1362 // Results of the last classification step
\r
1363 GpuMat labels, labels_buf;
\r
1366 // Results of the last histogram evaluation step
\r
1367 GpuMat block_hists, block_hists_buf;
\r
1369 // Gradients conputation results
\r
1370 GpuMat grad, qangle, grad_buf, qangle_buf;
\r
1372 // returns subbuffer with required size, reallocates buffer if nessesary.
\r
1373 static GpuMat getBuffer(const Size& sz, int type, GpuMat& buf);
\r
1374 static GpuMat getBuffer(int rows, int cols, int type, GpuMat& buf);
\r
1376 std::vector<GpuMat> image_scales;
\r
1380 ////////////////////////////////// BruteForceMatcher //////////////////////////////////
\r
1382 class CV_EXPORTS BruteForceMatcher_GPU_base
\r
1385 enum DistType {L1Dist = 0, L2Dist};
\r
1387 explicit BruteForceMatcher_GPU_base(DistType distType = L2Dist);
\r
1389 // Add descriptors to train descriptor collection.
\r
1390 void add(const std::vector<GpuMat>& descCollection);
\r
1392 // Get train descriptors collection.
\r
1393 const std::vector<GpuMat>& getTrainDescriptors() const;
\r
1395 // Clear train descriptors collection.
\r
1398 // Return true if there are not train descriptors in collection.
\r
1399 bool empty() const;
\r
1401 // Return true if the matcher supports mask in match methods.
\r
1402 bool isMaskSupported() const;
\r
1404 // Find one best match for each query descriptor.
\r
1405 // trainIdx.at<int>(0, queryIdx) will contain best train index for queryIdx
\r
1406 // distance.at<float>(0, queryIdx) will contain distance
\r
1407 void matchSingle(const GpuMat& queryDescs, const GpuMat& trainDescs,
\r
1408 GpuMat& trainIdx, GpuMat& distance,
\r
1409 const GpuMat& mask = GpuMat());
\r
1411 // Download trainIdx and distance to CPU vector with DMatch
\r
1412 static void matchDownload(const GpuMat& trainIdx, const GpuMat& distance, std::vector<DMatch>& matches);
\r
1414 // Find one best match for each query descriptor.
\r
1415 void match(const GpuMat& queryDescs, const GpuMat& trainDescs, std::vector<DMatch>& matches,
\r
1416 const GpuMat& mask = GpuMat());
\r
1418 // Make gpu collection of trains and masks in suitable format for matchCollection function
\r
1419 void makeGpuCollection(GpuMat& trainCollection, GpuMat& maskCollection,
\r
1420 const vector<GpuMat>& masks = std::vector<GpuMat>());
\r
1422 // Find one best match from train collection for each query descriptor.
\r
1423 // trainIdx.at<int>(0, queryIdx) will contain best train index for queryIdx
\r
1424 // imgIdx.at<int>(0, queryIdx) will contain best image index for queryIdx
\r
1425 // distance.at<float>(0, queryIdx) will contain distance
\r
1426 void matchCollection(const GpuMat& queryDescs, const GpuMat& trainCollection,
\r
1427 GpuMat& trainIdx, GpuMat& imgIdx, GpuMat& distance,
\r
1428 const GpuMat& maskCollection);
\r
1430 // Download trainIdx, imgIdx and distance to CPU vector with DMatch
\r
1431 static void matchDownload(const GpuMat& trainIdx, const GpuMat& imgIdx, const GpuMat& distance,
\r
1432 std::vector<DMatch>& matches);
\r
1434 // Find one best match from train collection for each query descriptor.
\r
1435 void match(const GpuMat& queryDescs, std::vector<DMatch>& matches,
\r
1436 const std::vector<GpuMat>& masks = std::vector<GpuMat>());
\r
1438 // Find k best matches for each query descriptor (in increasing order of distances).
\r
1439 // trainIdx.at<int>(queryIdx, i) will contain index of i'th best trains (i < k).
\r
1440 // distance.at<float>(queryIdx, i) will contain distance.
\r
1441 // allDist is a buffer to store all distance between query descriptors and train descriptors
\r
1442 // it have size (nQuery,nTrain) and CV_32F type
\r
1443 // allDist.at<float>(queryIdx, trainIdx) will contain FLT_MAX, if trainIdx is one from k best,
\r
1444 // otherwise it will contain distance between queryIdx and trainIdx descriptors
\r
1445 void knnMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,
\r
1446 GpuMat& trainIdx, GpuMat& distance, GpuMat& allDist, int k, const GpuMat& mask = GpuMat());
\r
1448 // Download trainIdx and distance to CPU vector with DMatch
\r
1449 // compactResult is used when mask is not empty. If compactResult is false matches
\r
1450 // vector will have the same size as queryDescriptors rows. If compactResult is true
\r
1451 // matches vector will not contain matches for fully masked out query descriptors.
\r
1452 static void knnMatchDownload(const GpuMat& trainIdx, const GpuMat& distance,
\r
1453 std::vector< std::vector<DMatch> >& matches, bool compactResult = false);
\r
1455 // Find k best matches for each query descriptor (in increasing order of distances).
\r
1456 // compactResult is used when mask is not empty. If compactResult is false matches
\r
1457 // vector will have the same size as queryDescriptors rows. If compactResult is true
\r
1458 // matches vector will not contain matches for fully masked out query descriptors.
\r
1459 void knnMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,
\r
1460 std::vector< std::vector<DMatch> >& matches, int k, const GpuMat& mask = GpuMat(),
\r
1461 bool compactResult = false);
\r
1463 // Find k best matches for each query descriptor (in increasing order of distances).
\r
1464 // compactResult is used when mask is not empty. If compactResult is false matches
\r
1465 // vector will have the same size as queryDescriptors rows. If compactResult is true
\r
1466 // matches vector will not contain matches for fully masked out query descriptors.
\r
1467 void knnMatch(const GpuMat& queryDescs, std::vector< std::vector<DMatch> >& matches, int knn,
\r
1468 const std::vector<GpuMat>& masks = std::vector<GpuMat>(), bool compactResult = false );
\r
1470 // Find best matches for each query descriptor which have distance less than maxDistance.
\r
1471 // nMatches.at<unsigned int>(0, queruIdx) will contain matches count for queryIdx.
\r
1472 // carefully nMatches can be greater than trainIdx.cols - it means that matcher didn't find all matches,
\r
1473 // because it didn't have enough memory.
\r
1474 // trainIdx.at<int>(queruIdx, i) will contain ith train index (i < min(nMatches.at<unsigned int>(0, queruIdx), trainIdx.cols))
\r
1475 // distance.at<int>(queruIdx, i) will contain ith distance (i < min(nMatches.at<unsigned int>(0, queruIdx), trainIdx.cols))
\r
1476 // If trainIdx is empty, then trainIdx and distance will be created with size nQuery x nTrain,
\r
1477 // otherwize user can pass own allocated trainIdx and distance with size nQuery x nMaxMatches
\r
1478 // Matches doesn't sorted.
\r
1479 void radiusMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,
\r
1480 GpuMat& trainIdx, GpuMat& nMatches, GpuMat& distance, float maxDistance,
\r
1481 const GpuMat& mask = GpuMat());
\r
1483 // Download trainIdx, nMatches and distance to CPU vector with DMatch.
\r
1484 // matches will be sorted in increasing order of distances.
\r
1485 // compactResult is used when mask is not empty. If compactResult is false matches
\r
1486 // vector will have the same size as queryDescriptors rows. If compactResult is true
\r
1487 // matches vector will not contain matches for fully masked out query descriptors.
\r
1488 static void radiusMatchDownload(const GpuMat& trainIdx, const GpuMat& nMatches, const GpuMat& distance,
\r
1489 std::vector< std::vector<DMatch> >& matches, bool compactResult = false);
\r
1491 // Find best matches for each query descriptor which have distance less than maxDistance
\r
1492 // in increasing order of distances).
\r
1493 void radiusMatch(const GpuMat& queryDescs, const GpuMat& trainDescs,
\r
1494 std::vector< std::vector<DMatch> >& matches, float maxDistance,
\r
1495 const GpuMat& mask = GpuMat(), bool compactResult = false);
\r
1497 // Find best matches from train collection for each query descriptor which have distance less than
\r
1498 // maxDistance (in increasing order of distances).
\r
1499 void radiusMatch(const GpuMat& queryDescs, std::vector< std::vector<DMatch> >& matches, float maxDistance,
\r
1500 const std::vector<GpuMat>& masks = std::vector<GpuMat>(), bool compactResult = false);
\r
1503 DistType distType;
\r
1505 std::vector<GpuMat> trainDescCollection;
\r
1508 template <class Distance>
\r
1509 class CV_EXPORTS BruteForceMatcher_GPU;
\r
1511 template <typename T>
\r
1512 class CV_EXPORTS BruteForceMatcher_GPU< L1<T> > : public BruteForceMatcher_GPU_base
\r
1515 explicit BruteForceMatcher_GPU() : BruteForceMatcher_GPU_base(L1Dist) {}
\r
1516 explicit BruteForceMatcher_GPU(L1<T> /*d*/) : BruteForceMatcher_GPU_base(L1Dist) {}
\r
1518 template <typename T>
\r
1519 class CV_EXPORTS BruteForceMatcher_GPU< L2<T> > : public BruteForceMatcher_GPU_base
\r
1522 explicit BruteForceMatcher_GPU() : BruteForceMatcher_GPU_base(L2Dist) {}
\r
1523 explicit BruteForceMatcher_GPU(L2<T> /*d*/) : BruteForceMatcher_GPU_base(L2Dist) {}
\r
1526 ////////////////////////////////// CascadeClassifier_GPU //////////////////////////////////////////
\r
1527 // The cascade classifier class for object detection.
\r
1528 class CV_EXPORTS CascadeClassifier_GPU
\r
1531 CascadeClassifier_GPU();
\r
1532 CascadeClassifier_GPU(const string& filename);
\r
1533 ~CascadeClassifier_GPU();
\r
1535 bool empty() const;
\r
1536 bool load(const string& filename);
\r
1539 /* returns number of detected objects */
\r
1540 int detectMultiScale( const GpuMat& image, GpuMat& objectsBuf, double scaleFactor=1.2, int minNeighbors=4, Size minSize=Size());
\r
1542 bool findLargestObject;
\r
1543 bool visualizeInPlace;
\r
1545 Size getClassifierSize() const;
\r
1548 struct CascadeClassifierImpl;
\r
1549 CascadeClassifierImpl* impl;
\r
1552 ////////////////////////////////// SURF //////////////////////////////////////////
\r
1554 class CV_EXPORTS SURF_GPU : public CvSURFParams
\r
1557 //! the default constructor
\r
1559 //! the full constructor taking all the necessary parameters
\r
1560 explicit SURF_GPU(double _hessianThreshold, int _nOctaves=4,
\r
1561 int _nOctaveLayers=2, bool _extended=false, float _keypointsRatio=0.01f, bool _upright = false);
\r
1563 //! returns the descriptor size in float's (64 or 128)
\r
1564 int descriptorSize() const;
\r
1566 //! upload host keypoints to device memory
\r
1567 void uploadKeypoints(const vector<KeyPoint>& keypoints, GpuMat& keypointsGPU);
\r
1568 //! download keypoints from device to host memory
\r
1569 void downloadKeypoints(const GpuMat& keypointsGPU, vector<KeyPoint>& keypoints);
\r
1571 //! download descriptors from device to host memory
\r
1572 void downloadDescriptors(const GpuMat& descriptorsGPU, vector<float>& descriptors);
\r
1574 //! finds the keypoints using fast hessian detector used in SURF
\r
1575 //! supports CV_8UC1 images
\r
1576 //! keypoints will have 1 row and type CV_32FC(6)
\r
1577 //! keypoints.at<float[6]>(1, i) contains i'th keypoint
\r
1578 //! format: (x, y, laplacian, size, dir, hessian)
\r
1579 void operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints);
\r
1580 //! finds the keypoints and computes their descriptors.
\r
1581 //! Optionally it can compute descriptors for the user-provided keypoints and recompute keypoints direction
\r
1582 void operator()(const GpuMat& img, const GpuMat& mask, GpuMat& keypoints, GpuMat& descriptors,
\r
1583 bool useProvidedKeypoints = false);
\r
1585 void operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints);
\r
1586 void operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints, GpuMat& descriptors,
\r
1587 bool useProvidedKeypoints = false);
\r
1589 void operator()(const GpuMat& img, const GpuMat& mask, std::vector<KeyPoint>& keypoints, std::vector<float>& descriptors,
\r
1590 bool useProvidedKeypoints = false);
\r
1592 //! max keypoints = min(keypointsRatio * img.size().area(), 65535)
\r
1593 float keypointsRatio;
\r
1597 GpuMat sum, mask1, maskSum, intBuffer;
\r
1599 GpuMat det, trace;
\r
1601 GpuMat maxPosBuffer;
\r
1602 GpuMat featuresBuffer;
\r
1603 GpuMat keypointsBuffer;
\r
1608 //! Speckle filtering - filters small connected components on diparity image.
\r
1609 //! It sets pixel (x,y) to newVal if it coresponds to small CC with size < maxSpeckleSize.
\r
1610 //! Threshold for border between CC is diffThreshold;
\r
1611 CV_EXPORTS void filterSpeckles( Mat& img, uchar newVal, int maxSpeckleSize, uchar diffThreshold, Mat& buf);
\r
1614 #include "opencv2/gpu/matrix_operations.hpp"
\r
1616 #endif /* __OPENCV_GPU_HPP__ */
\r