1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #include "precomp.hpp"
46 using namespace cv::gpu;
48 /*stub for deprecated constructor*/
49 cv::gpu::CannyBuf::CannyBuf(const GpuMat&, const GpuMat&) { }
51 #if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
53 void cv::gpu::meanShiftFiltering(const GpuMat&, GpuMat&, int, int, TermCriteria, Stream&) { throw_nogpu(); }
54 void cv::gpu::meanShiftProc(const GpuMat&, GpuMat&, GpuMat&, int, int, TermCriteria, Stream&) { throw_nogpu(); }
55 void cv::gpu::drawColorDisp(const GpuMat&, GpuMat&, int, Stream&) { throw_nogpu(); }
56 void cv::gpu::reprojectImageTo3D(const GpuMat&, GpuMat&, const Mat&, int, Stream&) { throw_nogpu(); }
57 void cv::gpu::copyMakeBorder(const GpuMat&, GpuMat&, int, int, int, int, int, const Scalar&, Stream&) { throw_nogpu(); }
58 void cv::gpu::buildWarpPlaneMaps(Size, Rect, const Mat&, const Mat&, const Mat&, float, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
59 void cv::gpu::buildWarpCylindricalMaps(Size, Rect, const Mat&, const Mat&, float, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
60 void cv::gpu::buildWarpSphericalMaps(Size, Rect, const Mat&, const Mat&, float, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
61 void cv::gpu::rotate(const GpuMat&, GpuMat&, Size, double, double, double, int, Stream&) { throw_nogpu(); }
62 void cv::gpu::integral(const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
63 void cv::gpu::integralBuffered(const GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
64 void cv::gpu::sqrIntegral(const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
65 void cv::gpu::columnSum(const GpuMat&, GpuMat&) { throw_nogpu(); }
66 void cv::gpu::rectStdDev(const GpuMat&, const GpuMat&, GpuMat&, const Rect&, Stream&) { throw_nogpu(); }
67 void cv::gpu::evenLevels(GpuMat&, int, int, int) { throw_nogpu(); }
68 void cv::gpu::histEven(const GpuMat&, GpuMat&, int, int, int, Stream&) { throw_nogpu(); }
69 void cv::gpu::histEven(const GpuMat&, GpuMat&, GpuMat&, int, int, int, Stream&) { throw_nogpu(); }
70 void cv::gpu::histEven(const GpuMat&, GpuMat*, int*, int*, int*, Stream&) { throw_nogpu(); }
71 void cv::gpu::histEven(const GpuMat&, GpuMat*, GpuMat&, int*, int*, int*, Stream&) { throw_nogpu(); }
72 void cv::gpu::histRange(const GpuMat&, GpuMat&, const GpuMat&, Stream&) { throw_nogpu(); }
73 void cv::gpu::histRange(const GpuMat&, GpuMat&, const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
74 void cv::gpu::histRange(const GpuMat&, GpuMat*, const GpuMat*, Stream&) { throw_nogpu(); }
75 void cv::gpu::histRange(const GpuMat&, GpuMat*, const GpuMat*, GpuMat&, Stream&) { throw_nogpu(); }
76 void cv::gpu::calcHist(const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
77 void cv::gpu::calcHist(const GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
78 void cv::gpu::equalizeHist(const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
79 void cv::gpu::equalizeHist(const GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
80 void cv::gpu::equalizeHist(const GpuMat&, GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
81 void cv::gpu::cornerHarris(const GpuMat&, GpuMat&, int, int, double, int) { throw_nogpu(); }
82 void cv::gpu::cornerHarris(const GpuMat&, GpuMat&, GpuMat&, GpuMat&, int, int, double, int) { throw_nogpu(); }
83 void cv::gpu::cornerHarris(const GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, int, int, double, int, Stream&) { throw_nogpu(); }
84 void cv::gpu::cornerMinEigenVal(const GpuMat&, GpuMat&, int, int, int) { throw_nogpu(); }
85 void cv::gpu::cornerMinEigenVal(const GpuMat&, GpuMat&, GpuMat&, GpuMat&, int, int, int) { throw_nogpu(); }
86 void cv::gpu::cornerMinEigenVal(const GpuMat&, GpuMat&, GpuMat&, GpuMat&, GpuMat&, int, int, int, Stream&) { throw_nogpu(); }
87 void cv::gpu::mulSpectrums(const GpuMat&, const GpuMat&, GpuMat&, int, bool, Stream&) { throw_nogpu(); }
88 void cv::gpu::mulAndScaleSpectrums(const GpuMat&, const GpuMat&, GpuMat&, int, float, bool, Stream&) { throw_nogpu(); }
89 void cv::gpu::dft(const GpuMat&, GpuMat&, Size, int, Stream&) { throw_nogpu(); }
90 void cv::gpu::ConvolveBuf::create(Size, Size) { throw_nogpu(); }
91 void cv::gpu::convolve(const GpuMat&, const GpuMat&, GpuMat&, bool) { throw_nogpu(); }
92 void cv::gpu::convolve(const GpuMat&, const GpuMat&, GpuMat&, bool, ConvolveBuf&, Stream&) { throw_nogpu(); }
93 void cv::gpu::Canny(const GpuMat&, GpuMat&, double, double, int, bool) { throw_nogpu(); }
94 void cv::gpu::Canny(const GpuMat&, CannyBuf&, GpuMat&, double, double, int, bool) { throw_nogpu(); }
95 void cv::gpu::Canny(const GpuMat&, const GpuMat&, GpuMat&, double, double, bool) { throw_nogpu(); }
96 void cv::gpu::Canny(const GpuMat&, const GpuMat&, CannyBuf&, GpuMat&, double, double, bool) { throw_nogpu(); }
97 void cv::gpu::CannyBuf::create(const Size&, int) { throw_nogpu(); }
98 void cv::gpu::CannyBuf::release() { throw_nogpu(); }
99 cv::Ptr<cv::gpu::CLAHE> cv::gpu::createCLAHE(double, cv::Size) { throw_nogpu(); return cv::Ptr<cv::gpu::CLAHE>(); }
101 #else /* !defined (HAVE_CUDA) */
103 ////////////////////////////////////////////////////////////////////////
104 // meanShiftFiltering_GPU
106 namespace cv { namespace gpu { namespace device
110 void meanShiftFiltering_gpu(const PtrStepSzb& src, PtrStepSzb dst, int sp, int sr, int maxIter, float eps, cudaStream_t stream);
114 void cv::gpu::meanShiftFiltering(const GpuMat& src, GpuMat& dst, int sp, int sr, TermCriteria criteria, Stream& stream)
116 using namespace ::cv::gpu::device::imgproc;
119 CV_Error( CV_StsBadArg, "The input image is empty" );
121 if( src.depth() != CV_8U || src.channels() != 4 )
122 CV_Error( CV_StsUnsupportedFormat, "Only 8-bit, 4-channel images are supported" );
124 dst.create( src.size(), CV_8UC4 );
126 if( !(criteria.type & TermCriteria::MAX_ITER) )
127 criteria.maxCount = 5;
129 int maxIter = std::min(std::max(criteria.maxCount, 1), 100);
132 if( !(criteria.type & TermCriteria::EPS) )
134 eps = (float)std::max(criteria.epsilon, 0.0);
136 meanShiftFiltering_gpu(src, dst, sp, sr, maxIter, eps, StreamAccessor::getStream(stream));
139 ////////////////////////////////////////////////////////////////////////
142 namespace cv { namespace gpu { namespace device
146 void meanShiftProc_gpu(const PtrStepSzb& src, PtrStepSzb dstr, PtrStepSzb dstsp, int sp, int sr, int maxIter, float eps, cudaStream_t stream);
150 void cv::gpu::meanShiftProc(const GpuMat& src, GpuMat& dstr, GpuMat& dstsp, int sp, int sr, TermCriteria criteria, Stream& stream)
152 using namespace ::cv::gpu::device::imgproc;
155 CV_Error( CV_StsBadArg, "The input image is empty" );
157 if( src.depth() != CV_8U || src.channels() != 4 )
158 CV_Error( CV_StsUnsupportedFormat, "Only 8-bit, 4-channel images are supported" );
160 dstr.create( src.size(), CV_8UC4 );
161 dstsp.create( src.size(), CV_16SC2 );
163 if( !(criteria.type & TermCriteria::MAX_ITER) )
164 criteria.maxCount = 5;
166 int maxIter = std::min(std::max(criteria.maxCount, 1), 100);
169 if( !(criteria.type & TermCriteria::EPS) )
171 eps = (float)std::max(criteria.epsilon, 0.0);
173 meanShiftProc_gpu(src, dstr, dstsp, sp, sr, maxIter, eps, StreamAccessor::getStream(stream));
176 ////////////////////////////////////////////////////////////////////////
179 namespace cv { namespace gpu { namespace device
183 void drawColorDisp_gpu(const PtrStepSzb& src, const PtrStepSzb& dst, int ndisp, const cudaStream_t& stream);
184 void drawColorDisp_gpu(const PtrStepSz<short>& src, const PtrStepSzb& dst, int ndisp, const cudaStream_t& stream);
190 template <typename T>
191 void drawColorDisp_caller(const GpuMat& src, GpuMat& dst, int ndisp, const cudaStream_t& stream)
193 using namespace ::cv::gpu::device::imgproc;
195 dst.create(src.size(), CV_8UC4);
197 drawColorDisp_gpu((PtrStepSz<T>)src, dst, ndisp, stream);
200 typedef void (*drawColorDisp_caller_t)(const GpuMat& src, GpuMat& dst, int ndisp, const cudaStream_t& stream);
202 const drawColorDisp_caller_t drawColorDisp_callers[] = {drawColorDisp_caller<unsigned char>, 0, 0, drawColorDisp_caller<short>, 0, 0, 0, 0};
205 void cv::gpu::drawColorDisp(const GpuMat& src, GpuMat& dst, int ndisp, Stream& stream)
207 CV_Assert(src.type() == CV_8U || src.type() == CV_16S);
209 drawColorDisp_callers[src.type()](src, dst, ndisp, StreamAccessor::getStream(stream));
212 ////////////////////////////////////////////////////////////////////////
213 // reprojectImageTo3D
215 namespace cv { namespace gpu { namespace device
219 template <typename T, typename D>
220 void reprojectImageTo3D_gpu(const PtrStepSzb disp, PtrStepSzb xyz, const float* q, cudaStream_t stream);
224 void cv::gpu::reprojectImageTo3D(const GpuMat& disp, GpuMat& xyz, const Mat& Q, int dst_cn, Stream& stream)
226 using namespace cv::gpu::device::imgproc;
228 typedef void (*func_t)(const PtrStepSzb disp, PtrStepSzb xyz, const float* q, cudaStream_t stream);
229 static const func_t funcs[2][4] =
231 {reprojectImageTo3D_gpu<uchar, float3>, 0, 0, reprojectImageTo3D_gpu<short, float3>},
232 {reprojectImageTo3D_gpu<uchar, float4>, 0, 0, reprojectImageTo3D_gpu<short, float4>}
235 CV_Assert(disp.type() == CV_8U || disp.type() == CV_16S);
236 CV_Assert(Q.type() == CV_32F && Q.rows == 4 && Q.cols == 4 && Q.isContinuous());
237 CV_Assert(dst_cn == 3 || dst_cn == 4);
239 xyz.create(disp.size(), CV_MAKE_TYPE(CV_32F, dst_cn));
241 funcs[dst_cn == 4][disp.type()](disp, xyz, Q.ptr<float>(), StreamAccessor::getStream(stream));
244 ////////////////////////////////////////////////////////////////////////
247 namespace cv { namespace gpu { namespace device
251 template <typename T, int cn> void copyMakeBorder_gpu(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const T* borderValue, cudaStream_t stream);
257 template <typename T, int cn> void copyMakeBorder_caller(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderType, const Scalar& value, cudaStream_t stream)
259 using namespace ::cv::gpu::device::imgproc;
261 Scalar_<T> val(saturate_cast<T>(value[0]), saturate_cast<T>(value[1]), saturate_cast<T>(value[2]), saturate_cast<T>(value[3]));
263 copyMakeBorder_gpu<T, cn>(src, dst, top, left, borderType, val.val, stream);
267 #if defined __GNUC__ && __GNUC__ > 2 && __GNUC_MINOR__ > 4
268 typedef Npp32s __attribute__((__may_alias__)) Npp32s_a;
270 typedef Npp32s Npp32s_a;
273 void cv::gpu::copyMakeBorder(const GpuMat& src, GpuMat& dst, int top, int bottom, int left, int right, int borderType, const Scalar& value, Stream& s)
275 CV_Assert(src.depth() <= CV_32F && src.channels() <= 4);
276 CV_Assert(borderType == BORDER_REFLECT101 || borderType == BORDER_REPLICATE || borderType == BORDER_CONSTANT || borderType == BORDER_REFLECT || borderType == BORDER_WRAP);
278 dst.create(src.rows + top + bottom, src.cols + left + right, src.type());
280 cudaStream_t stream = StreamAccessor::getStream(s);
282 if (borderType == BORDER_CONSTANT && (src.type() == CV_8UC1 || src.type() == CV_8UC4 || src.type() == CV_32SC1 || src.type() == CV_32FC1))
285 srcsz.width = src.cols;
286 srcsz.height = src.rows;
289 dstsz.width = dst.cols;
290 dstsz.height = dst.rows;
292 NppStreamHandler h(stream);
298 Npp8u nVal = saturate_cast<Npp8u>(value[0]);
299 nppSafeCall( nppiCopyConstBorder_8u_C1R(src.ptr<Npp8u>(), static_cast<int>(src.step), srcsz,
300 dst.ptr<Npp8u>(), static_cast<int>(dst.step), dstsz, top, left, nVal) );
305 Npp8u nVal[] = {saturate_cast<Npp8u>(value[0]), saturate_cast<Npp8u>(value[1]), saturate_cast<Npp8u>(value[2]), saturate_cast<Npp8u>(value[3])};
306 nppSafeCall( nppiCopyConstBorder_8u_C4R(src.ptr<Npp8u>(), static_cast<int>(src.step), srcsz,
307 dst.ptr<Npp8u>(), static_cast<int>(dst.step), dstsz, top, left, nVal) );
312 Npp32s nVal = saturate_cast<Npp32s>(value[0]);
313 nppSafeCall( nppiCopyConstBorder_32s_C1R(src.ptr<Npp32s>(), static_cast<int>(src.step), srcsz,
314 dst.ptr<Npp32s>(), static_cast<int>(dst.step), dstsz, top, left, nVal) );
319 Npp32f val = saturate_cast<Npp32f>(value[0]);
320 Npp32s nVal = *(reinterpret_cast<Npp32s_a*>(&val));
321 nppSafeCall( nppiCopyConstBorder_32s_C1R(src.ptr<Npp32s>(), static_cast<int>(src.step), srcsz,
322 dst.ptr<Npp32s>(), static_cast<int>(dst.step), dstsz, top, left, nVal) );
328 cudaSafeCall( cudaDeviceSynchronize() );
332 typedef void (*caller_t)(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderType, const Scalar& value, cudaStream_t stream);
333 static const caller_t callers[6][4] =
335 { copyMakeBorder_caller<uchar, 1> , copyMakeBorder_caller<uchar, 2> , copyMakeBorder_caller<uchar, 3> , copyMakeBorder_caller<uchar, 4>},
336 {0/*copyMakeBorder_caller<schar, 1>*/, 0/*copyMakeBorder_caller<schar, 2>*/ , 0/*copyMakeBorder_caller<schar, 3>*/, 0/*copyMakeBorder_caller<schar, 4>*/},
337 { copyMakeBorder_caller<ushort, 1> , 0/*copyMakeBorder_caller<ushort, 2>*/, copyMakeBorder_caller<ushort, 3> , copyMakeBorder_caller<ushort, 4>},
338 { copyMakeBorder_caller<short, 1> , 0/*copyMakeBorder_caller<short, 2>*/ , copyMakeBorder_caller<short, 3> , copyMakeBorder_caller<short, 4>},
339 {0/*copyMakeBorder_caller<int, 1>*/, 0/*copyMakeBorder_caller<int, 2>*/ , 0/*copyMakeBorder_caller<int, 3>*/, 0/*copyMakeBorder_caller<int , 4>*/},
340 { copyMakeBorder_caller<float, 1> , 0/*copyMakeBorder_caller<float, 2>*/ , copyMakeBorder_caller<float, 3> , copyMakeBorder_caller<float ,4>}
343 caller_t func = callers[src.depth()][src.channels() - 1];
344 CV_Assert(func != 0);
347 CV_Assert(tryConvertToGpuBorderType(borderType, gpuBorderType));
349 func(src, dst, top, left, gpuBorderType, value, stream);
353 //////////////////////////////////////////////////////////////////////////////
354 // buildWarpPlaneMaps
356 namespace cv { namespace gpu { namespace device
360 void buildWarpPlaneMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y,
361 const float k_rinv[9], const float r_kinv[9], const float t[3], float scale,
362 cudaStream_t stream);
366 void cv::gpu::buildWarpPlaneMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, const Mat &T,
367 float scale, GpuMat& map_x, GpuMat& map_y, Stream& stream)
370 using namespace ::cv::gpu::device::imgproc;
372 CV_Assert(K.size() == Size(3,3) && K.type() == CV_32F);
373 CV_Assert(R.size() == Size(3,3) && R.type() == CV_32F);
374 CV_Assert((T.size() == Size(3,1) || T.size() == Size(1,3)) && T.type() == CV_32F && T.isContinuous());
376 Mat K_Rinv = K * R.t();
377 Mat R_Kinv = R * K.inv();
378 CV_Assert(K_Rinv.isContinuous());
379 CV_Assert(R_Kinv.isContinuous());
381 map_x.create(dst_roi.size(), CV_32F);
382 map_y.create(dst_roi.size(), CV_32F);
383 device::imgproc::buildWarpPlaneMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, K_Rinv.ptr<float>(), R_Kinv.ptr<float>(),
384 T.ptr<float>(), scale, StreamAccessor::getStream(stream));
387 //////////////////////////////////////////////////////////////////////////////
388 // buildWarpCylyndricalMaps
390 namespace cv { namespace gpu { namespace device
394 void buildWarpCylindricalMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y,
395 const float k_rinv[9], const float r_kinv[9], float scale,
396 cudaStream_t stream);
400 void cv::gpu::buildWarpCylindricalMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale,
401 GpuMat& map_x, GpuMat& map_y, Stream& stream)
404 using namespace ::cv::gpu::device::imgproc;
406 CV_Assert(K.size() == Size(3,3) && K.type() == CV_32F);
407 CV_Assert(R.size() == Size(3,3) && R.type() == CV_32F);
409 Mat K_Rinv = K * R.t();
410 Mat R_Kinv = R * K.inv();
411 CV_Assert(K_Rinv.isContinuous());
412 CV_Assert(R_Kinv.isContinuous());
414 map_x.create(dst_roi.size(), CV_32F);
415 map_y.create(dst_roi.size(), CV_32F);
416 device::imgproc::buildWarpCylindricalMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, K_Rinv.ptr<float>(), R_Kinv.ptr<float>(), scale, StreamAccessor::getStream(stream));
420 //////////////////////////////////////////////////////////////////////////////
421 // buildWarpSphericalMaps
423 namespace cv { namespace gpu { namespace device
427 void buildWarpSphericalMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y,
428 const float k_rinv[9], const float r_kinv[9], float scale,
429 cudaStream_t stream);
433 void cv::gpu::buildWarpSphericalMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale,
434 GpuMat& map_x, GpuMat& map_y, Stream& stream)
437 using namespace ::cv::gpu::device::imgproc;
439 CV_Assert(K.size() == Size(3,3) && K.type() == CV_32F);
440 CV_Assert(R.size() == Size(3,3) && R.type() == CV_32F);
442 Mat K_Rinv = K * R.t();
443 Mat R_Kinv = R * K.inv();
444 CV_Assert(K_Rinv.isContinuous());
445 CV_Assert(R_Kinv.isContinuous());
447 map_x.create(dst_roi.size(), CV_32F);
448 map_y.create(dst_roi.size(), CV_32F);
449 device::imgproc::buildWarpSphericalMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, K_Rinv.ptr<float>(), R_Kinv.ptr<float>(), scale, StreamAccessor::getStream(stream));
452 ////////////////////////////////////////////////////////////////////////
457 template<int DEPTH> struct NppTypeTraits;
458 template<> struct NppTypeTraits<CV_8U> { typedef Npp8u npp_t; };
459 template<> struct NppTypeTraits<CV_8S> { typedef Npp8s npp_t; };
460 template<> struct NppTypeTraits<CV_16U> { typedef Npp16u npp_t; };
461 template<> struct NppTypeTraits<CV_16S> { typedef Npp16s npp_t; };
462 template<> struct NppTypeTraits<CV_32S> { typedef Npp32s npp_t; };
463 template<> struct NppTypeTraits<CV_32F> { typedef Npp32f npp_t; };
464 template<> struct NppTypeTraits<CV_64F> { typedef Npp64f npp_t; };
466 template <int DEPTH> struct NppRotateFunc
468 typedef typename NppTypeTraits<DEPTH>::npp_t npp_t;
470 typedef NppStatus (*func_t)(const npp_t* pSrc, NppiSize oSrcSize, int nSrcStep, NppiRect oSrcROI,
471 npp_t* pDst, int nDstStep, NppiRect oDstROI,
472 double nAngle, double nShiftX, double nShiftY, int eInterpolation);
475 template <int DEPTH, typename NppRotateFunc<DEPTH>::func_t func> struct NppRotate
477 typedef typename NppRotateFunc<DEPTH>::npp_t npp_t;
479 static void call(const GpuMat& src, GpuMat& dst, Size dsize, double angle, double xShift, double yShift, int interpolation, cudaStream_t stream)
482 static const int npp_inter[] = {NPPI_INTER_NN, NPPI_INTER_LINEAR, NPPI_INTER_CUBIC};
484 NppStreamHandler h(stream);
487 srcsz.height = src.rows;
488 srcsz.width = src.cols;
490 srcroi.x = srcroi.y = 0;
491 srcroi.height = src.rows;
492 srcroi.width = src.cols;
494 dstroi.x = dstroi.y = 0;
495 dstroi.height = dst.rows;
496 dstroi.width = dst.cols;
498 nppSafeCall( func(src.ptr<npp_t>(), srcsz, static_cast<int>(src.step), srcroi,
499 dst.ptr<npp_t>(), static_cast<int>(dst.step), dstroi, angle, xShift, yShift, npp_inter[interpolation]) );
502 cudaSafeCall( cudaDeviceSynchronize() );
507 void cv::gpu::rotate(const GpuMat& src, GpuMat& dst, Size dsize, double angle, double xShift, double yShift, int interpolation, Stream& stream)
509 typedef void (*func_t)(const GpuMat& src, GpuMat& dst, Size dsize, double angle, double xShift, double yShift, int interpolation, cudaStream_t stream);
511 static const func_t funcs[6][4] =
513 {NppRotate<CV_8U, nppiRotate_8u_C1R>::call, 0, NppRotate<CV_8U, nppiRotate_8u_C3R>::call, NppRotate<CV_8U, nppiRotate_8u_C4R>::call},
515 {NppRotate<CV_16U, nppiRotate_16u_C1R>::call, 0, NppRotate<CV_16U, nppiRotate_16u_C3R>::call, NppRotate<CV_16U, nppiRotate_16u_C4R>::call},
518 {NppRotate<CV_32F, nppiRotate_32f_C1R>::call, 0, NppRotate<CV_32F, nppiRotate_32f_C3R>::call, NppRotate<CV_32F, nppiRotate_32f_C4R>::call}
521 CV_Assert(src.depth() == CV_8U || src.depth() == CV_16U || src.depth() == CV_32F);
522 CV_Assert(src.channels() == 1 || src.channels() == 3 || src.channels() == 4);
523 CV_Assert(interpolation == INTER_NEAREST || interpolation == INTER_LINEAR || interpolation == INTER_CUBIC);
525 dst.create(dsize, src.type());
526 dst.setTo(Scalar::all(0));
528 funcs[src.depth()][src.channels() - 1](src, dst, dsize, angle, xShift, yShift, interpolation, StreamAccessor::getStream(stream));
531 ////////////////////////////////////////////////////////////////////////
534 void cv::gpu::integral(const GpuMat& src, GpuMat& sum, Stream& s)
537 integralBuffered(src, sum, buffer, s);
540 namespace cv { namespace gpu { namespace device
544 void shfl_integral_gpu(const PtrStepSzb& img, PtrStepSz<unsigned int> integral, cudaStream_t stream);
548 void cv::gpu::integralBuffered(const GpuMat& src, GpuMat& sum, GpuMat& buffer, Stream& s)
550 CV_Assert(src.type() == CV_8UC1);
552 cudaStream_t stream = StreamAccessor::getStream(s);
557 src.locateROI(whole, offset);
559 if (deviceSupports(WARP_SHUFFLE_FUNCTIONS) && src.cols <= 2048
560 && offset.x % 16 == 0 && ((src.cols + 63) / 64) * 64 <= (static_cast<int>(src.step) - offset.x))
562 ensureSizeIsEnough(((src.rows + 7) / 8) * 8, ((src.cols + 63) / 64) * 64, CV_32SC1, buffer);
564 cv::gpu::device::imgproc::shfl_integral_gpu(src, buffer, stream);
566 sum.create(src.rows + 1, src.cols + 1, CV_32SC1);
568 s.enqueueMemSet(sum, Scalar::all(0));
570 sum.setTo(Scalar::all(0));
572 GpuMat inner = sum(Rect(1, 1, src.cols, src.rows));
573 GpuMat res = buffer(Rect(0, 0, src.cols, src.rows));
576 s.enqueueCopy(res, inner);
582 sum.create(src.rows + 1, src.cols + 1, CV_32SC1);
585 roiSize.width = src.cols;
586 roiSize.height = src.rows;
589 cudaSafeCall( cudaGetDeviceProperties(&prop, cv::gpu::getDevice()) );
592 ncvSafeCall( nppiStIntegralGetSize_8u32u(roiSize, &bufSize, prop) );
593 ensureSizeIsEnough(1, bufSize, CV_8UC1, buffer);
596 NppStStreamHandler h(stream);
598 ncvSafeCall( nppiStIntegral_8u32u_C1R(const_cast<Ncv8u*>(src.ptr<Ncv8u>()), static_cast<int>(src.step),
599 sum.ptr<Ncv32u>(), static_cast<int>(sum.step), roiSize, buffer.ptr<Ncv8u>(), bufSize, prop) );
602 cudaSafeCall( cudaDeviceSynchronize() );
606 //////////////////////////////////////////////////////////////////////////////
609 void cv::gpu::sqrIntegral(const GpuMat& src, GpuMat& sqsum, Stream& s)
611 CV_Assert(src.type() == CV_8U);
614 roiSize.width = src.cols;
615 roiSize.height = src.rows;
618 cudaSafeCall( cudaGetDeviceProperties(&prop, cv::gpu::getDevice()) );
621 ncvSafeCall(nppiStSqrIntegralGetSize_8u64u(roiSize, &bufSize, prop));
622 GpuMat buf(1, bufSize, CV_8U);
624 cudaStream_t stream = StreamAccessor::getStream(s);
626 NppStStreamHandler h(stream);
628 sqsum.create(src.rows + 1, src.cols + 1, CV_64F);
629 ncvSafeCall(nppiStSqrIntegral_8u64u_C1R(const_cast<Ncv8u*>(src.ptr<Ncv8u>(0)), static_cast<int>(src.step),
630 sqsum.ptr<Ncv64u>(0), static_cast<int>(sqsum.step), roiSize, buf.ptr<Ncv8u>(0), bufSize, prop));
633 cudaSafeCall( cudaDeviceSynchronize() );
636 //////////////////////////////////////////////////////////////////////////////
639 namespace cv { namespace gpu { namespace device
643 void columnSum_32F(const PtrStepSzb src, const PtrStepSzb dst);
647 void cv::gpu::columnSum(const GpuMat& src, GpuMat& dst)
649 using namespace ::cv::gpu::device::imgproc;
651 CV_Assert(src.type() == CV_32F);
653 dst.create(src.size(), CV_32F);
655 device::imgproc::columnSum_32F(src, dst);
658 void cv::gpu::rectStdDev(const GpuMat& src, const GpuMat& sqr, GpuMat& dst, const Rect& rect, Stream& s)
660 CV_Assert(src.type() == CV_32SC1 && sqr.type() == CV_64FC1);
662 dst.create(src.size(), CV_32FC1);
666 sz.height = src.rows;
669 nppRect.height = rect.height;
670 nppRect.width = rect.width;
674 cudaStream_t stream = StreamAccessor::getStream(s);
676 NppStreamHandler h(stream);
678 nppSafeCall( nppiRectStdDev_32s32f_C1R(src.ptr<Npp32s>(), static_cast<int>(src.step), sqr.ptr<Npp64f>(), static_cast<int>(sqr.step),
679 dst.ptr<Npp32f>(), static_cast<int>(dst.step), sz, nppRect) );
682 cudaSafeCall( cudaDeviceSynchronize() );
686 ////////////////////////////////////////////////////////////////////////
691 typedef NppStatus (*get_buf_size_c1_t)(NppiSize oSizeROI, int nLevels, int* hpBufferSize);
692 typedef NppStatus (*get_buf_size_c4_t)(NppiSize oSizeROI, int nLevels[], int* hpBufferSize);
694 template<int SDEPTH> struct NppHistogramEvenFuncC1
696 typedef typename NppTypeTraits<SDEPTH>::npp_t src_t;
698 typedef NppStatus (*func_ptr)(const src_t* pSrc, int nSrcStep, NppiSize oSizeROI, Npp32s * pHist,
699 int nLevels, Npp32s nLowerLevel, Npp32s nUpperLevel, Npp8u * pBuffer);
701 template<int SDEPTH> struct NppHistogramEvenFuncC4
703 typedef typename NppTypeTraits<SDEPTH>::npp_t src_t;
705 typedef NppStatus (*func_ptr)(const src_t* pSrc, int nSrcStep, NppiSize oSizeROI,
706 Npp32s * pHist[4], int nLevels[4], Npp32s nLowerLevel[4], Npp32s nUpperLevel[4], Npp8u * pBuffer);
709 template<int SDEPTH, typename NppHistogramEvenFuncC1<SDEPTH>::func_ptr func, get_buf_size_c1_t get_buf_size>
710 struct NppHistogramEvenC1
712 typedef typename NppHistogramEvenFuncC1<SDEPTH>::src_t src_t;
714 static void hist(const GpuMat& src, GpuMat& hist, GpuMat& buffer, int histSize, int lowerLevel, int upperLevel, cudaStream_t stream)
716 int levels = histSize + 1;
717 hist.create(1, histSize, CV_32S);
721 sz.height = src.rows;
724 get_buf_size(sz, levels, &buf_size);
726 ensureSizeIsEnough(1, buf_size, CV_8U, buffer);
728 NppStreamHandler h(stream);
730 nppSafeCall( func(src.ptr<src_t>(), static_cast<int>(src.step), sz, hist.ptr<Npp32s>(), levels,
731 lowerLevel, upperLevel, buffer.ptr<Npp8u>()) );
734 cudaSafeCall( cudaDeviceSynchronize() );
737 template<int SDEPTH, typename NppHistogramEvenFuncC4<SDEPTH>::func_ptr func, get_buf_size_c4_t get_buf_size>
738 struct NppHistogramEvenC4
740 typedef typename NppHistogramEvenFuncC4<SDEPTH>::src_t src_t;
742 static void hist(const GpuMat& src, GpuMat hist[4], GpuMat& buffer, int histSize[4], int lowerLevel[4], int upperLevel[4], cudaStream_t stream)
744 int levels[] = {histSize[0] + 1, histSize[1] + 1, histSize[2] + 1, histSize[3] + 1};
745 hist[0].create(1, histSize[0], CV_32S);
746 hist[1].create(1, histSize[1], CV_32S);
747 hist[2].create(1, histSize[2], CV_32S);
748 hist[3].create(1, histSize[3], CV_32S);
752 sz.height = src.rows;
754 Npp32s* pHist[] = {hist[0].ptr<Npp32s>(), hist[1].ptr<Npp32s>(), hist[2].ptr<Npp32s>(), hist[3].ptr<Npp32s>()};
757 get_buf_size(sz, levels, &buf_size);
759 ensureSizeIsEnough(1, buf_size, CV_8U, buffer);
761 NppStreamHandler h(stream);
763 nppSafeCall( func(src.ptr<src_t>(), static_cast<int>(src.step), sz, pHist, levels, lowerLevel, upperLevel, buffer.ptr<Npp8u>()) );
766 cudaSafeCall( cudaDeviceSynchronize() );
770 template<int SDEPTH> struct NppHistogramRangeFuncC1
772 typedef typename NppTypeTraits<SDEPTH>::npp_t src_t;
773 typedef Npp32s level_t;
774 enum {LEVEL_TYPE_CODE=CV_32SC1};
776 typedef NppStatus (*func_ptr)(const src_t* pSrc, int nSrcStep, NppiSize oSizeROI, Npp32s* pHist,
777 const Npp32s* pLevels, int nLevels, Npp8u* pBuffer);
779 template<> struct NppHistogramRangeFuncC1<CV_32F>
781 typedef Npp32f src_t;
782 typedef Npp32f level_t;
783 enum {LEVEL_TYPE_CODE=CV_32FC1};
785 typedef NppStatus (*func_ptr)(const Npp32f* pSrc, int nSrcStep, NppiSize oSizeROI, Npp32s* pHist,
786 const Npp32f* pLevels, int nLevels, Npp8u* pBuffer);
788 template<int SDEPTH> struct NppHistogramRangeFuncC4
790 typedef typename NppTypeTraits<SDEPTH>::npp_t src_t;
791 typedef Npp32s level_t;
792 enum {LEVEL_TYPE_CODE=CV_32SC1};
794 typedef NppStatus (*func_ptr)(const src_t* pSrc, int nSrcStep, NppiSize oSizeROI, Npp32s* pHist[4],
795 const Npp32s* pLevels[4], int nLevels[4], Npp8u* pBuffer);
797 template<> struct NppHistogramRangeFuncC4<CV_32F>
799 typedef Npp32f src_t;
800 typedef Npp32f level_t;
801 enum {LEVEL_TYPE_CODE=CV_32FC1};
803 typedef NppStatus (*func_ptr)(const Npp32f* pSrc, int nSrcStep, NppiSize oSizeROI, Npp32s* pHist[4],
804 const Npp32f* pLevels[4], int nLevels[4], Npp8u* pBuffer);
807 template<int SDEPTH, typename NppHistogramRangeFuncC1<SDEPTH>::func_ptr func, get_buf_size_c1_t get_buf_size>
808 struct NppHistogramRangeC1
810 typedef typename NppHistogramRangeFuncC1<SDEPTH>::src_t src_t;
811 typedef typename NppHistogramRangeFuncC1<SDEPTH>::level_t level_t;
812 enum {LEVEL_TYPE_CODE=NppHistogramRangeFuncC1<SDEPTH>::LEVEL_TYPE_CODE};
814 static void hist(const GpuMat& src, GpuMat& hist, const GpuMat& levels, GpuMat& buffer, cudaStream_t stream)
816 CV_Assert(levels.type() == LEVEL_TYPE_CODE && levels.rows == 1);
818 hist.create(1, levels.cols - 1, CV_32S);
822 sz.height = src.rows;
825 get_buf_size(sz, levels.cols, &buf_size);
827 ensureSizeIsEnough(1, buf_size, CV_8U, buffer);
829 NppStreamHandler h(stream);
831 nppSafeCall( func(src.ptr<src_t>(), static_cast<int>(src.step), sz, hist.ptr<Npp32s>(), levels.ptr<level_t>(), levels.cols, buffer.ptr<Npp8u>()) );
834 cudaSafeCall( cudaDeviceSynchronize() );
837 template<int SDEPTH, typename NppHistogramRangeFuncC4<SDEPTH>::func_ptr func, get_buf_size_c4_t get_buf_size>
838 struct NppHistogramRangeC4
840 typedef typename NppHistogramRangeFuncC4<SDEPTH>::src_t src_t;
841 typedef typename NppHistogramRangeFuncC1<SDEPTH>::level_t level_t;
842 enum {LEVEL_TYPE_CODE=NppHistogramRangeFuncC1<SDEPTH>::LEVEL_TYPE_CODE};
844 static void hist(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4], GpuMat& buffer, cudaStream_t stream)
846 CV_Assert(levels[0].type() == LEVEL_TYPE_CODE && levels[0].rows == 1);
847 CV_Assert(levels[1].type() == LEVEL_TYPE_CODE && levels[1].rows == 1);
848 CV_Assert(levels[2].type() == LEVEL_TYPE_CODE && levels[2].rows == 1);
849 CV_Assert(levels[3].type() == LEVEL_TYPE_CODE && levels[3].rows == 1);
851 hist[0].create(1, levels[0].cols - 1, CV_32S);
852 hist[1].create(1, levels[1].cols - 1, CV_32S);
853 hist[2].create(1, levels[2].cols - 1, CV_32S);
854 hist[3].create(1, levels[3].cols - 1, CV_32S);
856 Npp32s* pHist[] = {hist[0].ptr<Npp32s>(), hist[1].ptr<Npp32s>(), hist[2].ptr<Npp32s>(), hist[3].ptr<Npp32s>()};
857 int nLevels[] = {levels[0].cols, levels[1].cols, levels[2].cols, levels[3].cols};
858 const level_t* pLevels[] = {levels[0].ptr<level_t>(), levels[1].ptr<level_t>(), levels[2].ptr<level_t>(), levels[3].ptr<level_t>()};
862 sz.height = src.rows;
865 get_buf_size(sz, nLevels, &buf_size);
867 ensureSizeIsEnough(1, buf_size, CV_8U, buffer);
869 NppStreamHandler h(stream);
871 nppSafeCall( func(src.ptr<src_t>(), static_cast<int>(src.step), sz, pHist, pLevels, nLevels, buffer.ptr<Npp8u>()) );
874 cudaSafeCall( cudaDeviceSynchronize() );
879 void cv::gpu::evenLevels(GpuMat& levels, int nLevels, int lowerLevel, int upperLevel)
881 Mat host_levels(1, nLevels, CV_32SC1);
882 nppSafeCall( nppiEvenLevelsHost_32s(host_levels.ptr<Npp32s>(), nLevels, lowerLevel, upperLevel) );
883 levels.upload(host_levels);
886 void cv::gpu::histEven(const GpuMat& src, GpuMat& hist, int histSize, int lowerLevel, int upperLevel, Stream& stream)
889 histEven(src, hist, buf, histSize, lowerLevel, upperLevel, stream);
894 void histEven8u(PtrStepSzb src, int* hist, int binCount, int lowerLevel, int upperLevel, cudaStream_t stream);
899 void histEven8u(const GpuMat& src, GpuMat& hist, int histSize, int lowerLevel, int upperLevel, cudaStream_t stream)
901 hist.create(1, histSize, CV_32S);
902 cudaSafeCall( cudaMemsetAsync(hist.data, 0, histSize * sizeof(int), stream) );
903 hist::histEven8u(src, hist.ptr<int>(), histSize, lowerLevel, upperLevel, stream);
907 void cv::gpu::histEven(const GpuMat& src, GpuMat& hist, GpuMat& buf, int histSize, int lowerLevel, int upperLevel, Stream& stream)
909 CV_Assert(src.type() == CV_8UC1 || src.type() == CV_16UC1 || src.type() == CV_16SC1 );
911 typedef void (*hist_t)(const GpuMat& src, GpuMat& hist, GpuMat& buf, int levels, int lowerLevel, int upperLevel, cudaStream_t stream);
912 static const hist_t hist_callers[] =
914 NppHistogramEvenC1<CV_8U , nppiHistogramEven_8u_C1R , nppiHistogramEvenGetBufferSize_8u_C1R >::hist,
916 NppHistogramEvenC1<CV_16U, nppiHistogramEven_16u_C1R, nppiHistogramEvenGetBufferSize_16u_C1R>::hist,
917 NppHistogramEvenC1<CV_16S, nppiHistogramEven_16s_C1R, nppiHistogramEvenGetBufferSize_16s_C1R>::hist
920 if (src.depth() == CV_8U && deviceSupports(FEATURE_SET_COMPUTE_30))
922 histEven8u(src, hist, histSize, lowerLevel, upperLevel, StreamAccessor::getStream(stream));
926 hist_callers[src.depth()](src, hist, buf, histSize, lowerLevel, upperLevel, StreamAccessor::getStream(stream));
929 void cv::gpu::histEven(const GpuMat& src, GpuMat hist[4], int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream)
932 histEven(src, hist, buf, histSize, lowerLevel, upperLevel, stream);
935 void cv::gpu::histEven(const GpuMat& src, GpuMat hist[4], GpuMat& buf, int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream)
937 CV_Assert(src.type() == CV_8UC4 || src.type() == CV_16UC4 || src.type() == CV_16SC4 );
939 typedef void (*hist_t)(const GpuMat& src, GpuMat hist[4], GpuMat& buf, int levels[4], int lowerLevel[4], int upperLevel[4], cudaStream_t stream);
940 static const hist_t hist_callers[] =
942 NppHistogramEvenC4<CV_8U , nppiHistogramEven_8u_C4R , nppiHistogramEvenGetBufferSize_8u_C4R >::hist,
944 NppHistogramEvenC4<CV_16U, nppiHistogramEven_16u_C4R, nppiHistogramEvenGetBufferSize_16u_C4R>::hist,
945 NppHistogramEvenC4<CV_16S, nppiHistogramEven_16s_C4R, nppiHistogramEvenGetBufferSize_16s_C4R>::hist
948 hist_callers[src.depth()](src, hist, buf, histSize, lowerLevel, upperLevel, StreamAccessor::getStream(stream));
951 void cv::gpu::histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels, Stream& stream)
954 histRange(src, hist, levels, buf, stream);
957 void cv::gpu::histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels, GpuMat& buf, Stream& stream)
959 CV_Assert(src.type() == CV_8UC1 || src.type() == CV_16UC1 || src.type() == CV_16SC1 || src.type() == CV_32FC1);
961 typedef void (*hist_t)(const GpuMat& src, GpuMat& hist, const GpuMat& levels, GpuMat& buf, cudaStream_t stream);
962 static const hist_t hist_callers[] =
964 NppHistogramRangeC1<CV_8U , nppiHistogramRange_8u_C1R , nppiHistogramRangeGetBufferSize_8u_C1R >::hist,
966 NppHistogramRangeC1<CV_16U, nppiHistogramRange_16u_C1R, nppiHistogramRangeGetBufferSize_16u_C1R>::hist,
967 NppHistogramRangeC1<CV_16S, nppiHistogramRange_16s_C1R, nppiHistogramRangeGetBufferSize_16s_C1R>::hist,
969 NppHistogramRangeC1<CV_32F, nppiHistogramRange_32f_C1R, nppiHistogramRangeGetBufferSize_32f_C1R>::hist
972 hist_callers[src.depth()](src, hist, levels, buf, StreamAccessor::getStream(stream));
975 void cv::gpu::histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4], Stream& stream)
978 histRange(src, hist, levels, buf, stream);
981 void cv::gpu::histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4], GpuMat& buf, Stream& stream)
983 CV_Assert(src.type() == CV_8UC4 || src.type() == CV_16UC4 || src.type() == CV_16SC4 || src.type() == CV_32FC4);
985 typedef void (*hist_t)(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4], GpuMat& buf, cudaStream_t stream);
986 static const hist_t hist_callers[] =
988 NppHistogramRangeC4<CV_8U , nppiHistogramRange_8u_C4R , nppiHistogramRangeGetBufferSize_8u_C4R >::hist,
990 NppHistogramRangeC4<CV_16U, nppiHistogramRange_16u_C4R, nppiHistogramRangeGetBufferSize_16u_C4R>::hist,
991 NppHistogramRangeC4<CV_16S, nppiHistogramRange_16s_C4R, nppiHistogramRangeGetBufferSize_16s_C4R>::hist,
993 NppHistogramRangeC4<CV_32F, nppiHistogramRange_32f_C4R, nppiHistogramRangeGetBufferSize_32f_C4R>::hist
996 hist_callers[src.depth()](src, hist, levels, buf, StreamAccessor::getStream(stream));
1001 void histogram256(PtrStepSzb src, int* hist, cudaStream_t stream);
1002 void equalizeHist(PtrStepSzb src, PtrStepSzb dst, const int* lut, cudaStream_t stream);
1005 void cv::gpu::calcHist(const GpuMat& src, GpuMat& hist, Stream& stream)
1007 CV_Assert(src.type() == CV_8UC1);
1009 hist.create(1, 256, CV_32SC1);
1010 hist.setTo(Scalar::all(0));
1012 hist::histogram256(src, hist.ptr<int>(), StreamAccessor::getStream(stream));
1015 void cv::gpu::calcHist(const GpuMat& src, GpuMat& hist, GpuMat& buf, Stream& stream)
1018 calcHist(src, hist, stream);
1021 void cv::gpu::equalizeHist(const GpuMat& src, GpuMat& dst, Stream& stream)
1025 equalizeHist(src, dst, hist, buf, stream);
1028 void cv::gpu::equalizeHist(const GpuMat& src, GpuMat& dst, GpuMat& hist, Stream& stream)
1031 equalizeHist(src, dst, hist, buf, stream);
1034 void cv::gpu::equalizeHist(const GpuMat& src, GpuMat& dst, GpuMat& hist, GpuMat& buf, Stream& s)
1036 CV_Assert(src.type() == CV_8UC1);
1038 dst.create(src.size(), src.type());
1041 nppSafeCall( nppsIntegralGetBufferSize_32s(256, &intBufSize) );
1043 ensureSizeIsEnough(1, intBufSize + 256 * sizeof(int), CV_8UC1, buf);
1045 GpuMat intBuf(1, intBufSize, CV_8UC1, buf.ptr());
1046 GpuMat lut(1, 256, CV_32S, buf.ptr() + intBufSize);
1048 calcHist(src, hist, s);
1050 cudaStream_t stream = StreamAccessor::getStream(s);
1052 NppStreamHandler h(stream);
1054 nppSafeCall( nppsIntegral_32s(hist.ptr<Npp32s>(), lut.ptr<Npp32s>(), 256, intBuf.ptr<Npp8u>()) );
1056 hist::equalizeHist(src, dst, lut.ptr<int>(), stream);
1059 ////////////////////////////////////////////////////////////////////////
1060 // cornerHarris & minEgenVal
1062 namespace cv { namespace gpu { namespace device
1066 void cornerHarris_gpu(int block_size, float k, PtrStepSzf Dx, PtrStepSzf Dy, PtrStepSzf dst, int border_type, cudaStream_t stream);
1067 void cornerMinEigenVal_gpu(int block_size, PtrStepSzf Dx, PtrStepSzf Dy, PtrStepSzf dst, int border_type, cudaStream_t stream);
1073 void extractCovData(const GpuMat& src, GpuMat& Dx, GpuMat& Dy, GpuMat& buf, int blockSize, int ksize, int borderType, Stream& stream)
1075 double scale = static_cast<double>(1 << ((ksize > 0 ? ksize : 3) - 1)) * blockSize;
1080 if (src.depth() == CV_8U)
1085 Dx.create(src.size(), CV_32F);
1086 Dy.create(src.size(), CV_32F);
1090 Sobel(src, Dx, CV_32F, 1, 0, buf, ksize, scale, borderType, -1, stream);
1091 Sobel(src, Dy, CV_32F, 0, 1, buf, ksize, scale, borderType, -1, stream);
1095 Scharr(src, Dx, CV_32F, 1, 0, buf, scale, borderType, -1, stream);
1096 Scharr(src, Dy, CV_32F, 0, 1, buf, scale, borderType, -1, stream);
1101 void cv::gpu::cornerHarris(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, double k, int borderType)
1104 cornerHarris(src, dst, Dx, Dy, blockSize, ksize, k, borderType);
1107 void cv::gpu::cornerHarris(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, int blockSize, int ksize, double k, int borderType)
1110 cornerHarris(src, dst, Dx, Dy, buf, blockSize, ksize, k, borderType);
1113 void cv::gpu::cornerHarris(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, GpuMat& buf, int blockSize, int ksize, double k, int borderType, Stream& stream)
1115 using namespace cv::gpu::device::imgproc;
1117 CV_Assert(borderType == cv::BORDER_REFLECT101 || borderType == cv::BORDER_REPLICATE || borderType == cv::BORDER_REFLECT);
1120 CV_Assert(tryConvertToGpuBorderType(borderType, gpuBorderType));
1122 extractCovData(src, Dx, Dy, buf, blockSize, ksize, borderType, stream);
1124 dst.create(src.size(), CV_32F);
1126 cornerHarris_gpu(blockSize, static_cast<float>(k), Dx, Dy, dst, gpuBorderType, StreamAccessor::getStream(stream));
1129 void cv::gpu::cornerMinEigenVal(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, int borderType)
1132 cornerMinEigenVal(src, dst, Dx, Dy, blockSize, ksize, borderType);
1135 void cv::gpu::cornerMinEigenVal(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, int blockSize, int ksize, int borderType)
1138 cornerMinEigenVal(src, dst, Dx, Dy, buf, blockSize, ksize, borderType);
1141 void cv::gpu::cornerMinEigenVal(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, GpuMat& buf, int blockSize, int ksize, int borderType, Stream& stream)
1143 using namespace ::cv::gpu::device::imgproc;
1145 CV_Assert(borderType == cv::BORDER_REFLECT101 || borderType == cv::BORDER_REPLICATE || borderType == cv::BORDER_REFLECT);
1148 CV_Assert(tryConvertToGpuBorderType(borderType, gpuBorderType));
1150 extractCovData(src, Dx, Dy, buf, blockSize, ksize, borderType, stream);
1152 dst.create(src.size(), CV_32F);
1154 cornerMinEigenVal_gpu(blockSize, Dx, Dy, dst, gpuBorderType, StreamAccessor::getStream(stream));
1157 //////////////////////////////////////////////////////////////////////////////
1162 namespace cv { namespace gpu { namespace device
1166 void mulSpectrums(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, PtrStepSz<cufftComplex> c, cudaStream_t stream);
1168 void mulSpectrums_CONJ(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, PtrStepSz<cufftComplex> c, cudaStream_t stream);
1174 void cv::gpu::mulSpectrums(const GpuMat& a, const GpuMat& b, GpuMat& c, int flags, bool conjB, Stream& stream)
1186 using namespace ::cv::gpu::device::imgproc;
1188 typedef void (*Caller)(const PtrStep<cufftComplex>, const PtrStep<cufftComplex>, PtrStepSz<cufftComplex>, cudaStream_t stream);
1190 static Caller callers[] = { device::imgproc::mulSpectrums, device::imgproc::mulSpectrums_CONJ };
1192 CV_Assert(a.type() == b.type() && a.type() == CV_32FC2);
1193 CV_Assert(a.size() == b.size());
1195 c.create(a.size(), CV_32FC2);
1197 Caller caller = callers[(int)conjB];
1198 caller(a, b, c, StreamAccessor::getStream(stream));
1202 //////////////////////////////////////////////////////////////////////////////
1203 // mulAndScaleSpectrums
1207 namespace cv { namespace gpu { namespace device
1211 void mulAndScaleSpectrums(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, float scale, PtrStepSz<cufftComplex> c, cudaStream_t stream);
1213 void mulAndScaleSpectrums_CONJ(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, float scale, PtrStepSz<cufftComplex> c, cudaStream_t stream);
1219 void cv::gpu::mulAndScaleSpectrums(const GpuMat& a, const GpuMat& b, GpuMat& c, int flags, float scale, bool conjB, Stream& stream)
1232 using namespace ::cv::gpu::device::imgproc;
1234 typedef void (*Caller)(const PtrStep<cufftComplex>, const PtrStep<cufftComplex>, float scale, PtrStepSz<cufftComplex>, cudaStream_t stream);
1235 static Caller callers[] = { device::imgproc::mulAndScaleSpectrums, device::imgproc::mulAndScaleSpectrums_CONJ };
1237 CV_Assert(a.type() == b.type() && a.type() == CV_32FC2);
1238 CV_Assert(a.size() == b.size());
1240 c.create(a.size(), CV_32FC2);
1242 Caller caller = callers[(int)conjB];
1243 caller(a, b, scale, c, StreamAccessor::getStream(stream));
1247 //////////////////////////////////////////////////////////////////////////////
1250 void cv::gpu::dft(const GpuMat& src, GpuMat& dst, Size dft_size, int flags, Stream& stream)
1254 OPENCV_GPU_UNUSED(src);
1255 OPENCV_GPU_UNUSED(dst);
1256 OPENCV_GPU_UNUSED(dft_size);
1257 OPENCV_GPU_UNUSED(flags);
1258 OPENCV_GPU_UNUSED(stream);
1264 CV_Assert(src.type() == CV_32F || src.type() == CV_32FC2);
1266 // We don't support unpacked output (in the case of real input)
1267 CV_Assert(!(flags & DFT_COMPLEX_OUTPUT));
1269 bool is_1d_input = (dft_size.height == 1) || (dft_size.width == 1);
1270 int is_row_dft = flags & DFT_ROWS;
1271 int is_scaled_dft = flags & DFT_SCALE;
1272 int is_inverse = flags & DFT_INVERSE;
1273 bool is_complex_input = src.channels() == 2;
1274 bool is_complex_output = !(flags & DFT_REAL_OUTPUT);
1276 // We don't support real-to-real transform
1277 CV_Assert(is_complex_input || is_complex_output);
1281 // Make sure here we work with the continuous input,
1282 // as CUFFT can't handle gaps
1284 createContinuous(src.rows, src.cols, src.type(), src_data);
1285 if (src_data.data != src.data)
1286 src.copyTo(src_data);
1288 Size dft_size_opt = dft_size;
1289 if (is_1d_input && !is_row_dft)
1291 // If the source matrix is single column handle it as single row
1292 dft_size_opt.width = std::max(dft_size.width, dft_size.height);
1293 dft_size_opt.height = std::min(dft_size.width, dft_size.height);
1296 cufftType dft_type = CUFFT_R2C;
1297 if (is_complex_input)
1298 dft_type = is_complex_output ? CUFFT_C2C : CUFFT_C2R;
1300 CV_Assert(dft_size_opt.width > 1);
1303 if (is_1d_input || is_row_dft)
1304 cufftPlan1d(&plan, dft_size_opt.width, dft_type, dft_size_opt.height);
1306 cufftPlan2d(&plan, dft_size_opt.height, dft_size_opt.width, dft_type);
1308 cufftSafeCall( cufftSetStream(plan, StreamAccessor::getStream(stream)) );
1310 if (is_complex_input)
1312 if (is_complex_output)
1314 createContinuous(dft_size, CV_32FC2, dst);
1315 cufftSafeCall(cufftExecC2C(
1316 plan, src_data.ptr<cufftComplex>(), dst.ptr<cufftComplex>(),
1317 is_inverse ? CUFFT_INVERSE : CUFFT_FORWARD));
1321 createContinuous(dft_size, CV_32F, dst);
1322 cufftSafeCall(cufftExecC2R(
1323 plan, src_data.ptr<cufftComplex>(), dst.ptr<cufftReal>()));
1328 // We could swap dft_size for efficiency. Here we must reflect it
1329 if (dft_size == dft_size_opt)
1330 createContinuous(Size(dft_size.width / 2 + 1, dft_size.height), CV_32FC2, dst);
1332 createContinuous(Size(dft_size.width, dft_size.height / 2 + 1), CV_32FC2, dst);
1334 cufftSafeCall(cufftExecR2C(
1335 plan, src_data.ptr<cufftReal>(), dst.ptr<cufftComplex>()));
1338 cufftSafeCall(cufftDestroy(plan));
1341 multiply(dst, Scalar::all(1. / dft_size.area()), dst, 1, -1, stream);
1346 //////////////////////////////////////////////////////////////////////////////
1349 void cv::gpu::ConvolveBuf::create(Size image_size, Size templ_size)
1351 result_size = Size(image_size.width - templ_size.width + 1,
1352 image_size.height - templ_size.height + 1);
1354 block_size = user_block_size;
1355 if (user_block_size.width == 0 || user_block_size.height == 0)
1356 block_size = estimateBlockSize(result_size, templ_size);
1358 dft_size.width = 1 << int(ceil(std::log(block_size.width + templ_size.width - 1.) / std::log(2.)));
1359 dft_size.height = 1 << int(ceil(std::log(block_size.height + templ_size.height - 1.) / std::log(2.)));
1361 // CUFFT has hard-coded kernels for power-of-2 sizes (up to 8192),
1362 // see CUDA Toolkit 4.1 CUFFT Library Programming Guide
1363 if (dft_size.width > 8192)
1364 dft_size.width = getOptimalDFTSize(block_size.width + templ_size.width - 1);
1365 if (dft_size.height > 8192)
1366 dft_size.height = getOptimalDFTSize(block_size.height + templ_size.height - 1);
1368 // To avoid wasting time doing small DFTs
1369 dft_size.width = std::max(dft_size.width, 512);
1370 dft_size.height = std::max(dft_size.height, 512);
1372 createContinuous(dft_size, CV_32F, image_block);
1373 createContinuous(dft_size, CV_32F, templ_block);
1374 createContinuous(dft_size, CV_32F, result_data);
1376 spect_len = dft_size.height * (dft_size.width / 2 + 1);
1377 createContinuous(1, spect_len, CV_32FC2, image_spect);
1378 createContinuous(1, spect_len, CV_32FC2, templ_spect);
1379 createContinuous(1, spect_len, CV_32FC2, result_spect);
1381 // Use maximum result matrix block size for the estimated DFT block size
1382 block_size.width = std::min(dft_size.width - templ_size.width + 1, result_size.width);
1383 block_size.height = std::min(dft_size.height - templ_size.height + 1, result_size.height);
1387 Size cv::gpu::ConvolveBuf::estimateBlockSize(Size result_size, Size /*templ_size*/)
1389 int width = (result_size.width + 2) / 3;
1390 int height = (result_size.height + 2) / 3;
1391 width = std::min(width, result_size.width);
1392 height = std::min(height, result_size.height);
1393 return Size(width, height);
1397 void cv::gpu::convolve(const GpuMat& image, const GpuMat& templ, GpuMat& result, bool ccorr)
1400 convolve(image, templ, result, ccorr, buf);
1403 void cv::gpu::convolve(const GpuMat& image, const GpuMat& templ, GpuMat& result, bool ccorr, ConvolveBuf& buf, Stream& stream)
1405 using namespace ::cv::gpu::device::imgproc;
1410 StaticAssert<sizeof(float) == sizeof(cufftReal)>::check();
1411 StaticAssert<sizeof(float) * 2 == sizeof(cufftComplex)>::check();
1413 CV_Assert(image.type() == CV_32F);
1414 CV_Assert(templ.type() == CV_32F);
1416 buf.create(image.size(), templ.size());
1417 result.create(buf.result_size, CV_32F);
1419 Size& block_size = buf.block_size;
1420 Size& dft_size = buf.dft_size;
1422 GpuMat& image_block = buf.image_block;
1423 GpuMat& templ_block = buf.templ_block;
1424 GpuMat& result_data = buf.result_data;
1426 GpuMat& image_spect = buf.image_spect;
1427 GpuMat& templ_spect = buf.templ_spect;
1428 GpuMat& result_spect = buf.result_spect;
1430 cufftHandle planR2C, planC2R;
1431 cufftSafeCall(cufftPlan2d(&planC2R, dft_size.height, dft_size.width, CUFFT_C2R));
1432 cufftSafeCall(cufftPlan2d(&planR2C, dft_size.height, dft_size.width, CUFFT_R2C));
1434 cufftSafeCall( cufftSetStream(planR2C, StreamAccessor::getStream(stream)) );
1435 cufftSafeCall( cufftSetStream(planC2R, StreamAccessor::getStream(stream)) );
1437 GpuMat templ_roi(templ.size(), CV_32F, templ.data, templ.step);
1438 copyMakeBorder(templ_roi, templ_block, 0, templ_block.rows - templ_roi.rows, 0,
1439 templ_block.cols - templ_roi.cols, 0, Scalar(), stream);
1441 cufftSafeCall(cufftExecR2C(planR2C, templ_block.ptr<cufftReal>(),
1442 templ_spect.ptr<cufftComplex>()));
1444 // Process all blocks of the result matrix
1445 for (int y = 0; y < result.rows; y += block_size.height)
1447 for (int x = 0; x < result.cols; x += block_size.width)
1449 Size image_roi_size(std::min(x + dft_size.width, image.cols) - x,
1450 std::min(y + dft_size.height, image.rows) - y);
1451 GpuMat image_roi(image_roi_size, CV_32F, (void*)(image.ptr<float>(y) + x),
1453 copyMakeBorder(image_roi, image_block, 0, image_block.rows - image_roi.rows,
1454 0, image_block.cols - image_roi.cols, 0, Scalar(), stream);
1456 cufftSafeCall(cufftExecR2C(planR2C, image_block.ptr<cufftReal>(),
1457 image_spect.ptr<cufftComplex>()));
1458 mulAndScaleSpectrums(image_spect, templ_spect, result_spect, 0,
1459 1.f / dft_size.area(), ccorr, stream);
1460 cufftSafeCall(cufftExecC2R(planC2R, result_spect.ptr<cufftComplex>(),
1461 result_data.ptr<cufftReal>()));
1463 Size result_roi_size(std::min(x + block_size.width, result.cols) - x,
1464 std::min(y + block_size.height, result.rows) - y);
1465 GpuMat result_roi(result_roi_size, result.type(),
1466 (void*)(result.ptr<float>(y) + x), result.step);
1467 GpuMat result_block(result_roi_size, result_data.type(),
1468 result_data.ptr(), result_data.step);
1471 stream.enqueueCopy(result_block, result_roi);
1473 result_block.copyTo(result_roi);
1477 cufftSafeCall(cufftDestroy(planR2C));
1478 cufftSafeCall(cufftDestroy(planC2R));
1483 //////////////////////////////////////////////////////////////////////////////
1486 void cv::gpu::CannyBuf::create(const Size& image_size, int apperture_size)
1488 if (apperture_size > 0)
1490 ensureSizeIsEnough(image_size, CV_32SC1, dx);
1491 ensureSizeIsEnough(image_size, CV_32SC1, dy);
1493 if (apperture_size != 3)
1495 filterDX = createDerivFilter_GPU(CV_8UC1, CV_32S, 1, 0, apperture_size, BORDER_REPLICATE);
1496 filterDY = createDerivFilter_GPU(CV_8UC1, CV_32S, 0, 1, apperture_size, BORDER_REPLICATE);
1500 ensureSizeIsEnough(image_size, CV_32FC1, mag);
1501 ensureSizeIsEnough(image_size, CV_32SC1, map);
1503 ensureSizeIsEnough(1, image_size.area(), CV_16UC2, st1);
1504 ensureSizeIsEnough(1, image_size.area(), CV_16UC2, st2);
1507 void cv::gpu::CannyBuf::release()
1519 void calcMagnitude(PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzi dx, PtrStepSzi dy, PtrStepSzf mag, bool L2Grad);
1520 void calcMagnitude(PtrStepSzi dx, PtrStepSzi dy, PtrStepSzf mag, bool L2Grad);
1522 void calcMap(PtrStepSzi dx, PtrStepSzi dy, PtrStepSzf mag, PtrStepSzi map, float low_thresh, float high_thresh);
1524 void edgesHysteresisLocal(PtrStepSzi map, ushort2* st1);
1526 void edgesHysteresisGlobal(PtrStepSzi map, ushort2* st1, ushort2* st2);
1528 void getEdges(PtrStepSzi map, PtrStepSzb dst);
1533 void CannyCaller(const GpuMat& dx, const GpuMat& dy, CannyBuf& buf, GpuMat& dst, float low_thresh, float high_thresh)
1535 using namespace canny;
1537 buf.map.setTo(Scalar::all(0));
1538 calcMap(dx, dy, buf.mag, buf.map, low_thresh, high_thresh);
1540 edgesHysteresisLocal(buf.map, buf.st1.ptr<ushort2>());
1542 edgesHysteresisGlobal(buf.map, buf.st1.ptr<ushort2>(), buf.st2.ptr<ushort2>());
1544 getEdges(buf.map, dst);
1548 void cv::gpu::Canny(const GpuMat& src, GpuMat& dst, double low_thresh, double high_thresh, int apperture_size, bool L2gradient)
1551 Canny(src, buf, dst, low_thresh, high_thresh, apperture_size, L2gradient);
1554 void cv::gpu::Canny(const GpuMat& src, CannyBuf& buf, GpuMat& dst, double low_thresh, double high_thresh, int apperture_size, bool L2gradient)
1556 using namespace canny;
1558 CV_Assert(src.type() == CV_8UC1);
1560 if (!deviceSupports(SHARED_ATOMICS))
1561 CV_Error(CV_StsNotImplemented, "The device doesn't support shared atomics");
1563 if( low_thresh > high_thresh )
1564 std::swap( low_thresh, high_thresh);
1566 dst.create(src.size(), CV_8U);
1567 buf.create(src.size(), apperture_size);
1569 if (apperture_size == 3)
1573 src.locateROI(wholeSize, ofs);
1574 GpuMat srcWhole(wholeSize, src.type(), src.datastart, src.step);
1576 calcMagnitude(srcWhole, ofs.x, ofs.y, buf.dx, buf.dy, buf.mag, L2gradient);
1580 buf.filterDX->apply(src, buf.dx, Rect(0, 0, src.cols, src.rows));
1581 buf.filterDY->apply(src, buf.dy, Rect(0, 0, src.cols, src.rows));
1583 calcMagnitude(buf.dx, buf.dy, buf.mag, L2gradient);
1586 CannyCaller(buf.dx, buf.dy, buf, dst, static_cast<float>(low_thresh), static_cast<float>(high_thresh));
1589 void cv::gpu::Canny(const GpuMat& dx, const GpuMat& dy, GpuMat& dst, double low_thresh, double high_thresh, bool L2gradient)
1592 Canny(dx, dy, buf, dst, low_thresh, high_thresh, L2gradient);
1595 void cv::gpu::Canny(const GpuMat& dx, const GpuMat& dy, CannyBuf& buf, GpuMat& dst, double low_thresh, double high_thresh, bool L2gradient)
1597 using namespace canny;
1599 CV_Assert(TargetArchs::builtWith(SHARED_ATOMICS) && DeviceInfo().supports(SHARED_ATOMICS));
1600 CV_Assert(dx.type() == CV_32SC1 && dy.type() == CV_32SC1 && dx.size() == dy.size());
1602 if( low_thresh > high_thresh )
1603 std::swap( low_thresh, high_thresh);
1605 dst.create(dx.size(), CV_8U);
1606 buf.create(dx.size(), -1);
1608 calcMagnitude(dx, dy, buf.mag, L2gradient);
1610 CannyCaller(dx, dy, buf, dst, static_cast<float>(low_thresh), static_cast<float>(high_thresh));
1613 ////////////////////////////////////////////////////////////////////////
1618 void calcLut(PtrStepSzb src, PtrStepb lut, int tilesX, int tilesY, int2 tileSize, int clipLimit, float lutScale, cudaStream_t stream);
1619 void transform(PtrStepSzb src, PtrStepSzb dst, PtrStepb lut, int tilesX, int tilesY, int2 tileSize, cudaStream_t stream);
1624 class CLAHE_Impl : public cv::gpu::CLAHE
1627 CLAHE_Impl(double clipLimit = 40.0, int tilesX = 8, int tilesY = 8);
1629 cv::AlgorithmInfo* info() const;
1631 void apply(cv::InputArray src, cv::OutputArray dst);
1632 void apply(InputArray src, OutputArray dst, Stream& stream);
1634 void setClipLimit(double clipLimit);
1635 double getClipLimit() const;
1637 void setTilesGridSize(cv::Size tileGridSize);
1638 cv::Size getTilesGridSize() const;
1640 void collectGarbage();
1651 CLAHE_Impl::CLAHE_Impl(double clipLimit, int tilesX, int tilesY) :
1652 clipLimit_(clipLimit), tilesX_(tilesX), tilesY_(tilesY)
1656 CV_INIT_ALGORITHM(CLAHE_Impl, "CLAHE_GPU",
1657 obj.info()->addParam(obj, "clipLimit", obj.clipLimit_);
1658 obj.info()->addParam(obj, "tilesX", obj.tilesX_);
1659 obj.info()->addParam(obj, "tilesY", obj.tilesY_))
1661 void CLAHE_Impl::apply(cv::InputArray _src, cv::OutputArray _dst)
1663 apply(_src, _dst, Stream::Null());
1666 void CLAHE_Impl::apply(InputArray _src, OutputArray _dst, Stream& s)
1668 GpuMat src = _src.getGpuMat();
1670 CV_Assert( src.type() == CV_8UC1 );
1672 _dst.create( src.size(), src.type() );
1673 GpuMat dst = _dst.getGpuMat();
1675 const int histSize = 256;
1677 ensureSizeIsEnough(tilesX_ * tilesY_, histSize, CV_8UC1, lut_);
1679 cudaStream_t stream = StreamAccessor::getStream(s);
1684 if (src.cols % tilesX_ == 0 && src.rows % tilesY_ == 0)
1686 tileSize = cv::Size(src.cols / tilesX_, src.rows / tilesY_);
1691 cv::gpu::copyMakeBorder(src, srcExt_, 0, tilesY_ - (src.rows % tilesY_), 0, tilesX_ - (src.cols % tilesX_), cv::BORDER_REFLECT_101, cv::Scalar(), s);
1693 tileSize = cv::Size(srcExt_.cols / tilesX_, srcExt_.rows / tilesY_);
1694 srcForLut = srcExt_;
1697 const int tileSizeTotal = tileSize.area();
1698 const float lutScale = static_cast<float>(histSize - 1) / tileSizeTotal;
1701 if (clipLimit_ > 0.0)
1703 clipLimit = static_cast<int>(clipLimit_ * tileSizeTotal / histSize);
1704 clipLimit = std::max(clipLimit, 1);
1707 clahe::calcLut(srcForLut, lut_, tilesX_, tilesY_, make_int2(tileSize.width, tileSize.height), clipLimit, lutScale, stream);
1709 clahe::transform(src, dst, lut_, tilesX_, tilesY_, make_int2(tileSize.width, tileSize.height), stream);
1712 void CLAHE_Impl::setClipLimit(double clipLimit)
1714 clipLimit_ = clipLimit;
1717 double CLAHE_Impl::getClipLimit() const
1722 void CLAHE_Impl::setTilesGridSize(cv::Size tileGridSize)
1724 tilesX_ = tileGridSize.width;
1725 tilesY_ = tileGridSize.height;
1728 cv::Size CLAHE_Impl::getTilesGridSize() const
1730 return cv::Size(tilesX_, tilesY_);
1733 void CLAHE_Impl::collectGarbage()
1740 cv::Ptr<cv::gpu::CLAHE> cv::gpu::createCLAHE(double clipLimit, cv::Size tileGridSize)
1742 return new CLAHE_Impl(clipLimit, tileGridSize.width, tileGridSize.height);
1745 #endif /* !defined (HAVE_CUDA) */