1 /*M///////////////////////////////////////////////////////////////////////////////////////
\r
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
\r
5 // By downloading, copying, installing or using the software you agree to this license.
\r
6 // If you do not agree to this license, do not download, install,
\r
7 // copy or use the software.
\r
10 // License Agreement
\r
11 // For Open Source Computer Vision Library
\r
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
\r
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
\r
15 // Third party copyrights are property of their respective owners.
\r
17 // Redistribution and use in source and binary forms, with or without modification,
\r
18 // are permitted provided that the following conditions are met:
\r
20 // * Redistribution's of source code must retain the above copyright notice,
\r
21 // this list of conditions and the following disclaimer.
\r
23 // * Redistribution's in binary form must reproduce the above copyright notice,
\r
24 // this list of conditions and the following disclaimer in the documentation
\r
25 // and/or other materials provided with the distribution.
\r
27 // * The name of the copyright holders may not be used to endorse or promote products
\r
28 // derived from this software without specific prior written permission.
\r
30 // This software is provided by the copyright holders and contributors "as is" and
\r
31 // any express or implied warranties, including, but not limited to, the implied
\r
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
\r
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
\r
34 // indirect, incidental, special, exemplary, or consequential damages
\r
35 // (including, but not limited to, procurement of substitute goods or services;
\r
36 // loss of use, data, or profits; or business interruption) however caused
\r
37 // and on any theory of liability, whether in contract, strict liability,
\r
38 // or tort (including negligence or otherwise) arising in any way out of
\r
39 // the use of this software, even if advised of the possibility of such damage.
\r
43 #include "precomp.hpp"
\r
46 using namespace cv::gpu;
\r
48 #if !defined (HAVE_CUDA)
\r
50 void cv::gpu::remap(const GpuMat&, GpuMat&, const GpuMat&, const GpuMat&, int, int, const Scalar&, Stream&){ throw_nogpu(); }
\r
51 void cv::gpu::meanShiftFiltering(const GpuMat&, GpuMat&, int, int, TermCriteria) { throw_nogpu(); }
\r
52 void cv::gpu::meanShiftProc(const GpuMat&, GpuMat&, GpuMat&, int, int, TermCriteria) { throw_nogpu(); }
\r
53 void cv::gpu::drawColorDisp(const GpuMat&, GpuMat&, int, Stream&) { throw_nogpu(); }
\r
54 void cv::gpu::reprojectImageTo3D(const GpuMat&, GpuMat&, const Mat&, Stream&) { throw_nogpu(); }
\r
55 void cv::gpu::resize(const GpuMat&, GpuMat&, Size, double, double, int, Stream&) { throw_nogpu(); }
\r
56 void cv::gpu::copyMakeBorder(const GpuMat&, GpuMat&, int, int, int, int, int, const Scalar&, Stream&) { throw_nogpu(); }
\r
57 void cv::gpu::warpAffine(const GpuMat&, GpuMat&, const Mat&, Size, int, Stream&) { throw_nogpu(); }
\r
58 void cv::gpu::warpPerspective(const GpuMat&, GpuMat&, const Mat&, Size, int, Stream&) { throw_nogpu(); }
\r
59 void cv::gpu::buildWarpPlaneMaps(Size, Rect, const Mat&, const Mat&, float, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
\r
60 void cv::gpu::buildWarpCylindricalMaps(Size, Rect, const Mat&, const Mat&, float, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
\r
61 void cv::gpu::buildWarpSphericalMaps(Size, Rect, const Mat&, const Mat&, float, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
\r
62 void cv::gpu::rotate(const GpuMat&, GpuMat&, Size, double, double, double, int, Stream&) { throw_nogpu(); }
\r
63 void cv::gpu::integral(const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
\r
64 void cv::gpu::integralBuffered(const GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
\r
65 void cv::gpu::integral(const GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
\r
66 void cv::gpu::sqrIntegral(const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
\r
67 void cv::gpu::columnSum(const GpuMat&, GpuMat&) { throw_nogpu(); }
\r
68 void cv::gpu::rectStdDev(const GpuMat&, const GpuMat&, GpuMat&, const Rect&, Stream&) { throw_nogpu(); }
\r
69 void cv::gpu::evenLevels(GpuMat&, int, int, int) { throw_nogpu(); }
\r
70 void cv::gpu::histEven(const GpuMat&, GpuMat&, int, int, int, Stream&) { throw_nogpu(); }
\r
71 void cv::gpu::histEven(const GpuMat&, GpuMat&, GpuMat&, int, int, int, Stream&) { throw_nogpu(); }
\r
72 void cv::gpu::histEven(const GpuMat&, GpuMat*, int*, int*, int*, Stream&) { throw_nogpu(); }
\r
73 void cv::gpu::histEven(const GpuMat&, GpuMat*, GpuMat&, int*, int*, int*, Stream&) { throw_nogpu(); }
\r
74 void cv::gpu::histRange(const GpuMat&, GpuMat&, const GpuMat&, Stream&) { throw_nogpu(); }
\r
75 void cv::gpu::histRange(const GpuMat&, GpuMat&, const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
\r
76 void cv::gpu::histRange(const GpuMat&, GpuMat*, const GpuMat*, Stream&) { throw_nogpu(); }
\r
77 void cv::gpu::histRange(const GpuMat&, GpuMat*, const GpuMat*, GpuMat&, Stream&) { throw_nogpu(); }
\r
78 void cv::gpu::calcHist(const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
\r
79 void cv::gpu::calcHist(const GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
\r
80 void cv::gpu::equalizeHist(const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
\r
81 void cv::gpu::equalizeHist(const GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
\r
82 void cv::gpu::equalizeHist(const GpuMat&, GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }
\r
83 void cv::gpu::cornerHarris(const GpuMat&, GpuMat&, int, int, double, int) { throw_nogpu(); }
\r
84 void cv::gpu::cornerHarris(const GpuMat&, GpuMat&, GpuMat&, GpuMat&, int, int, double, int) { throw_nogpu(); }
\r
85 void cv::gpu::cornerMinEigenVal(const GpuMat&, GpuMat&, int, int, int) { throw_nogpu(); }
\r
86 void cv::gpu::cornerMinEigenVal(const GpuMat&, GpuMat&, GpuMat&, GpuMat&, int, int, int) { throw_nogpu(); }
\r
87 void cv::gpu::mulSpectrums(const GpuMat&, const GpuMat&, GpuMat&, int, bool) { throw_nogpu(); }
\r
88 void cv::gpu::mulAndScaleSpectrums(const GpuMat&, const GpuMat&, GpuMat&, int, float, bool) { throw_nogpu(); }
\r
89 void cv::gpu::dft(const GpuMat&, GpuMat&, Size, int) { throw_nogpu(); }
\r
90 void cv::gpu::ConvolveBuf::create(Size, Size) { throw_nogpu(); }
\r
91 void cv::gpu::convolve(const GpuMat&, const GpuMat&, GpuMat&, bool) { throw_nogpu(); }
\r
92 void cv::gpu::convolve(const GpuMat&, const GpuMat&, GpuMat&, bool, ConvolveBuf&) { throw_nogpu(); }
\r
93 void cv::gpu::pyrDown(const GpuMat&, GpuMat&, int, Stream&) { throw_nogpu(); }
\r
94 void cv::gpu::pyrUp(const GpuMat&, GpuMat&, int, Stream&) { throw_nogpu(); }
\r
95 void cv::gpu::Canny(const GpuMat&, GpuMat&, double, double, int, bool) { throw_nogpu(); }
\r
96 void cv::gpu::Canny(const GpuMat&, CannyBuf&, GpuMat&, double, double, int, bool) { throw_nogpu(); }
\r
97 void cv::gpu::Canny(const GpuMat&, const GpuMat&, GpuMat&, double, double, bool) { throw_nogpu(); }
\r
98 void cv::gpu::Canny(const GpuMat&, const GpuMat&, CannyBuf&, GpuMat&, double, double, bool) { throw_nogpu(); }
\r
99 cv::gpu::CannyBuf::CannyBuf(const GpuMat&, const GpuMat&) { throw_nogpu(); }
\r
100 void cv::gpu::CannyBuf::create(const Size&, int) { throw_nogpu(); }
\r
101 void cv::gpu::CannyBuf::release() { throw_nogpu(); }
\r
103 #else /* !defined (HAVE_CUDA) */
\r
105 ////////////////////////////////////////////////////////////////////////
\r
108 namespace cv { namespace gpu { namespace imgproc
\r
110 template <typename T> void remap_gpu(const DevMem2D& src, const DevMem2Df& xmap, const DevMem2Df& ymap, const DevMem2D& dst,
\r
111 int interpolation, int borderMode, const float* borderValue, cudaStream_t stream);
\r
114 void cv::gpu::remap(const GpuMat& src, GpuMat& dst, const GpuMat& xmap, const GpuMat& ymap, int interpolation, int borderMode, const Scalar& borderValue, Stream& stream)
\r
116 using namespace cv::gpu::imgproc;
\r
118 typedef void (*caller_t)(const DevMem2D& src, const DevMem2Df& xmap, const DevMem2Df& ymap, const DevMem2D& dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream);
\r
119 static const caller_t callers[6][4] =
\r
121 {remap_gpu<uchar>, 0/*remap_gpu<uchar2>*/, remap_gpu<uchar3>, remap_gpu<uchar4>},
\r
122 {0/*remap_gpu<schar>*/, 0/*remap_gpu<char2>*/, 0/*remap_gpu<char3>*/, 0/*remap_gpu<char4>*/},
\r
123 {remap_gpu<ushort>, 0/*remap_gpu<ushort2>*/, remap_gpu<ushort3>, remap_gpu<ushort4>},
\r
124 {remap_gpu<short>, 0/*remap_gpu<short2>*/, remap_gpu<short3>, remap_gpu<short4>},
\r
125 {0/*remap_gpu<int>*/, 0/*remap_gpu<int2>*/, 0/*remap_gpu<int3>*/, 0/*remap_gpu<int4>*/},
\r
126 {remap_gpu<float>, 0/*remap_gpu<float2>*/, remap_gpu<float3>, remap_gpu<float4>}
\r
129 CV_Assert(src.depth() <= CV_32F && src.channels() <= 4);
\r
130 CV_Assert(xmap.type() == CV_32F && ymap.type() == CV_32F && xmap.size() == ymap.size());
\r
132 caller_t func = callers[src.depth()][src.channels() - 1];
\r
133 CV_Assert(func != 0);
\r
135 CV_Assert(interpolation == INTER_NEAREST || interpolation == INTER_LINEAR || interpolation == INTER_CUBIC);
\r
137 CV_Assert(borderMode == BORDER_REFLECT101 || borderMode == BORDER_REPLICATE || borderMode == BORDER_CONSTANT || borderMode == BORDER_REFLECT || borderMode == BORDER_WRAP);
\r
139 CV_Assert(tryConvertToGpuBorderType(borderMode, gpuBorderType));
\r
141 dst.create(xmap.size(), src.type());
\r
143 Scalar_<float> borderValueFloat;
\r
144 borderValueFloat = borderValue;
\r
146 func(src, xmap, ymap, dst, interpolation, gpuBorderType, borderValueFloat.val, StreamAccessor::getStream(stream));
\r
149 ////////////////////////////////////////////////////////////////////////
\r
150 // meanShiftFiltering_GPU
\r
152 namespace cv { namespace gpu { namespace imgproc
\r
154 extern "C" void meanShiftFiltering_gpu(const DevMem2D& src, DevMem2D dst, int sp, int sr, int maxIter, float eps);
\r
157 void cv::gpu::meanShiftFiltering(const GpuMat& src, GpuMat& dst, int sp, int sr, TermCriteria criteria)
\r
160 CV_Error( CV_StsBadArg, "The input image is empty" );
\r
162 if( src.depth() != CV_8U || src.channels() != 4 )
\r
163 CV_Error( CV_StsUnsupportedFormat, "Only 8-bit, 4-channel images are supported" );
\r
165 dst.create( src.size(), CV_8UC4 );
\r
167 if( !(criteria.type & TermCriteria::MAX_ITER) )
\r
168 criteria.maxCount = 5;
\r
170 int maxIter = std::min(std::max(criteria.maxCount, 1), 100);
\r
173 if( !(criteria.type & TermCriteria::EPS) )
\r
175 eps = (float)std::max(criteria.epsilon, 0.0);
\r
177 imgproc::meanShiftFiltering_gpu(src, dst, sp, sr, maxIter, eps);
\r
180 ////////////////////////////////////////////////////////////////////////
\r
181 // meanShiftProc_GPU
\r
183 namespace cv { namespace gpu { namespace imgproc
\r
185 extern "C" void meanShiftProc_gpu(const DevMem2D& src, DevMem2D dstr, DevMem2D dstsp, int sp, int sr, int maxIter, float eps);
\r
188 void cv::gpu::meanShiftProc(const GpuMat& src, GpuMat& dstr, GpuMat& dstsp, int sp, int sr, TermCriteria criteria)
\r
191 CV_Error( CV_StsBadArg, "The input image is empty" );
\r
193 if( src.depth() != CV_8U || src.channels() != 4 )
\r
194 CV_Error( CV_StsUnsupportedFormat, "Only 8-bit, 4-channel images are supported" );
\r
196 dstr.create( src.size(), CV_8UC4 );
\r
197 dstsp.create( src.size(), CV_16SC2 );
\r
199 if( !(criteria.type & TermCriteria::MAX_ITER) )
\r
200 criteria.maxCount = 5;
\r
202 int maxIter = std::min(std::max(criteria.maxCount, 1), 100);
\r
205 if( !(criteria.type & TermCriteria::EPS) )
\r
207 eps = (float)std::max(criteria.epsilon, 0.0);
\r
209 imgproc::meanShiftProc_gpu(src, dstr, dstsp, sp, sr, maxIter, eps);
\r
212 ////////////////////////////////////////////////////////////////////////
\r
215 namespace cv { namespace gpu { namespace imgproc
\r
217 void drawColorDisp_gpu(const DevMem2D& src, const DevMem2D& dst, int ndisp, const cudaStream_t& stream);
\r
218 void drawColorDisp_gpu(const DevMem2D_<short>& src, const DevMem2D& dst, int ndisp, const cudaStream_t& stream);
\r
223 template <typename T>
\r
224 void drawColorDisp_caller(const GpuMat& src, GpuMat& dst, int ndisp, const cudaStream_t& stream)
\r
226 dst.create(src.size(), CV_8UC4);
\r
228 imgproc::drawColorDisp_gpu((DevMem2D_<T>)src, dst, ndisp, stream);
\r
231 typedef void (*drawColorDisp_caller_t)(const GpuMat& src, GpuMat& dst, int ndisp, const cudaStream_t& stream);
\r
233 const drawColorDisp_caller_t drawColorDisp_callers[] = {drawColorDisp_caller<unsigned char>, 0, 0, drawColorDisp_caller<short>, 0, 0, 0, 0};
\r
236 void cv::gpu::drawColorDisp(const GpuMat& src, GpuMat& dst, int ndisp, Stream& stream)
\r
238 CV_Assert(src.type() == CV_8U || src.type() == CV_16S);
\r
240 drawColorDisp_callers[src.type()](src, dst, ndisp, StreamAccessor::getStream(stream));
\r
243 ////////////////////////////////////////////////////////////////////////
\r
244 // reprojectImageTo3D
\r
246 namespace cv { namespace gpu { namespace imgproc
\r
248 void reprojectImageTo3D_gpu(const DevMem2D& disp, const DevMem2Df& xyzw, const float* q, const cudaStream_t& stream);
\r
249 void reprojectImageTo3D_gpu(const DevMem2D_<short>& disp, const DevMem2Df& xyzw, const float* q, const cudaStream_t& stream);
\r
254 template <typename T>
\r
255 void reprojectImageTo3D_caller(const GpuMat& disp, GpuMat& xyzw, const Mat& Q, const cudaStream_t& stream)
\r
257 xyzw.create(disp.rows, disp.cols, CV_32FC4);
\r
258 imgproc::reprojectImageTo3D_gpu((DevMem2D_<T>)disp, xyzw, Q.ptr<float>(), stream);
\r
261 typedef void (*reprojectImageTo3D_caller_t)(const GpuMat& disp, GpuMat& xyzw, const Mat& Q, const cudaStream_t& stream);
\r
263 const reprojectImageTo3D_caller_t reprojectImageTo3D_callers[] = {reprojectImageTo3D_caller<unsigned char>, 0, 0, reprojectImageTo3D_caller<short>, 0, 0, 0, 0};
\r
266 void cv::gpu::reprojectImageTo3D(const GpuMat& disp, GpuMat& xyzw, const Mat& Q, Stream& stream)
\r
268 CV_Assert((disp.type() == CV_8U || disp.type() == CV_16S) && Q.type() == CV_32F && Q.rows == 4 && Q.cols == 4);
\r
270 reprojectImageTo3D_callers[disp.type()](disp, xyzw, Q, StreamAccessor::getStream(stream));
\r
273 ////////////////////////////////////////////////////////////////////////
\r
276 namespace cv { namespace gpu { namespace imgproc
\r
278 template <typename T> void resize_gpu(const DevMem2D& src, float fx, float fy, const DevMem2D& dst, int interpolation, cudaStream_t stream);
\r
281 void cv::gpu::resize(const GpuMat& src, GpuMat& dst, Size dsize, double fx, double fy, int interpolation, Stream& s)
\r
283 CV_Assert( src.depth() <= CV_32F && src.channels() <= 4 );
\r
284 CV_Assert( interpolation == INTER_NEAREST || interpolation == INTER_LINEAR || interpolation == INTER_CUBIC );
\r
285 CV_Assert( !(dsize == Size()) || (fx > 0 && fy > 0) );
\r
287 if( dsize == Size() )
\r
289 dsize = Size(saturate_cast<int>(src.cols * fx), saturate_cast<int>(src.rows * fy));
\r
293 fx = (double)dsize.width / src.cols;
\r
294 fy = (double)dsize.height / src.rows;
\r
297 dst.create(dsize, src.type());
\r
299 if (dsize == src.size())
\r
302 s.enqueueCopy(src, dst);
\r
308 cudaStream_t stream = StreamAccessor::getStream(s);
\r
310 if ((src.type() == CV_8UC1 || src.type() == CV_8UC4) && (interpolation == INTER_NEAREST || interpolation == INTER_LINEAR))
\r
312 static const int npp_inter[] = {NPPI_INTER_NN, NPPI_INTER_LINEAR, NPPI_INTER_CUBIC, 0, NPPI_INTER_LANCZOS};
\r
315 srcsz.width = src.cols;
\r
316 srcsz.height = src.rows;
\r
318 srcrect.x = srcrect.y = 0;
\r
319 srcrect.width = src.cols;
\r
320 srcrect.height = src.rows;
\r
322 dstsz.width = dst.cols;
\r
323 dstsz.height = dst.rows;
\r
325 NppStreamHandler h(stream);
\r
327 if (src.type() == CV_8UC1)
\r
329 nppSafeCall( nppiResize_8u_C1R(src.ptr<Npp8u>(), srcsz, static_cast<int>(src.step), srcrect,
\r
330 dst.ptr<Npp8u>(), static_cast<int>(dst.step), dstsz, fx, fy, npp_inter[interpolation]) );
\r
334 nppSafeCall( nppiResize_8u_C4R(src.ptr<Npp8u>(), srcsz, static_cast<int>(src.step), srcrect,
\r
335 dst.ptr<Npp8u>(), static_cast<int>(dst.step), dstsz, fx, fy, npp_inter[interpolation]) );
\r
339 cudaSafeCall( cudaDeviceSynchronize() );
\r
343 using namespace cv::gpu::imgproc;
\r
345 typedef void (*caller_t)(const DevMem2D& src, float fx, float fy, const DevMem2D& dst, int interpolation, cudaStream_t stream);
\r
346 static const caller_t callers[6][4] =
\r
348 {resize_gpu<uchar>, 0/*resize_gpu<uchar2>*/, resize_gpu<uchar3>, resize_gpu<uchar4>},
\r
349 {0/*resize_gpu<schar>*/, 0/*resize_gpu<char2>*/, 0/*resize_gpu<char3>*/, 0/*resize_gpu<char4>*/},
\r
350 {resize_gpu<ushort>, 0/*resize_gpu<ushort2>*/, resize_gpu<ushort3>, resize_gpu<ushort4>},
\r
351 {resize_gpu<short>, 0/*resize_gpu<short2>*/, resize_gpu<short3>, resize_gpu<short4>},
\r
352 {0/*resize_gpu<int>*/, 0/*resize_gpu<int2>*/, 0/*resize_gpu<int3>*/, 0/*resize_gpu<int4>*/},
\r
353 {resize_gpu<float>, 0/*resize_gpu<float2>*/, resize_gpu<float3>, resize_gpu<float4>}
\r
356 callers[src.depth()][src.channels() - 1](src, static_cast<float>(fx), static_cast<float>(fy), dst, interpolation, stream);
\r
360 ////////////////////////////////////////////////////////////////////////
\r
363 namespace cv { namespace gpu { namespace imgproc
\r
365 template <typename T, int cn> void copyMakeBorder_gpu(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderMode, const T* borderValue, cudaStream_t stream);
\r
370 template <typename T, int cn> void copyMakeBorder_caller(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderType, const Scalar& value, cudaStream_t stream)
\r
372 Scalar_<T> val(saturate_cast<T>(value[0]), saturate_cast<T>(value[1]), saturate_cast<T>(value[2]), saturate_cast<T>(value[3]));
\r
374 imgproc::copyMakeBorder_gpu<T, cn>(src, dst, top, left, borderType, val.val, stream);
\r
378 void cv::gpu::copyMakeBorder(const GpuMat& src, GpuMat& dst, int top, int bottom, int left, int right, int borderType, const Scalar& value, Stream& s)
\r
380 CV_Assert(src.depth() <= CV_32F && src.channels() <= 4);
\r
381 CV_Assert(borderType == BORDER_REFLECT101 || borderType == BORDER_REPLICATE || borderType == BORDER_CONSTANT || borderType == BORDER_REFLECT || borderType == BORDER_WRAP);
\r
383 dst.create(src.rows + top + bottom, src.cols + left + right, src.type());
\r
385 cudaStream_t stream = StreamAccessor::getStream(s);
\r
387 if (borderType == BORDER_CONSTANT && (src.type() == CV_8UC1 || src.type() == CV_8UC4 || src.type() == CV_32SC1 || src.type() == CV_32FC1))
\r
390 srcsz.width = src.cols;
\r
391 srcsz.height = src.rows;
\r
394 dstsz.width = dst.cols;
\r
395 dstsz.height = dst.rows;
\r
397 NppStreamHandler h(stream);
\r
399 switch (src.type())
\r
403 Npp8u nVal = saturate_cast<Npp8u>(value[0]);
\r
404 nppSafeCall( nppiCopyConstBorder_8u_C1R(src.ptr<Npp8u>(), static_cast<int>(src.step), srcsz,
\r
405 dst.ptr<Npp8u>(), static_cast<int>(dst.step), dstsz, top, left, nVal) );
\r
410 Npp8u nVal[] = {saturate_cast<Npp8u>(value[0]), saturate_cast<Npp8u>(value[1]), saturate_cast<Npp8u>(value[2]), saturate_cast<Npp8u>(value[3])};
\r
411 nppSafeCall( nppiCopyConstBorder_8u_C4R(src.ptr<Npp8u>(), static_cast<int>(src.step), srcsz,
\r
412 dst.ptr<Npp8u>(), static_cast<int>(dst.step), dstsz, top, left, nVal) );
\r
417 Npp32s nVal = saturate_cast<Npp32s>(value[0]);
\r
418 nppSafeCall( nppiCopyConstBorder_32s_C1R(src.ptr<Npp32s>(), static_cast<int>(src.step), srcsz,
\r
419 dst.ptr<Npp32s>(), static_cast<int>(dst.step), dstsz, top, left, nVal) );
\r
424 Npp32f val = saturate_cast<Npp32f>(value[0]);
\r
425 Npp32s nVal = *(reinterpret_cast<Npp32s*>(&val));
\r
426 nppSafeCall( nppiCopyConstBorder_32s_C1R(src.ptr<Npp32s>(), static_cast<int>(src.step), srcsz,
\r
427 dst.ptr<Npp32s>(), static_cast<int>(dst.step), dstsz, top, left, nVal) );
\r
433 cudaSafeCall( cudaDeviceSynchronize() );
\r
437 typedef void (*caller_t)(const DevMem2D& src, const DevMem2D& dst, int top, int left, int borderType, const Scalar& value, cudaStream_t stream);
\r
438 static const caller_t callers[6][4] =
\r
440 { copyMakeBorder_caller<uchar, 1> , 0/*copyMakeBorder_caller<uchar, 2>*/ , copyMakeBorder_caller<uchar, 3> , copyMakeBorder_caller<uchar, 4>},
\r
441 {0/*copyMakeBorder_caller<schar, 1>*/, 0/*copyMakeBorder_caller<schar, 2>*/ , 0/*copyMakeBorder_caller<schar, 3>*/, 0/*copyMakeBorder_caller<schar, 4>*/},
\r
442 { copyMakeBorder_caller<ushort, 1> , 0/*copyMakeBorder_caller<ushort, 2>*/, copyMakeBorder_caller<ushort, 3> , copyMakeBorder_caller<ushort, 4>},
\r
443 { copyMakeBorder_caller<short, 1> , 0/*copyMakeBorder_caller<short, 2>*/ , copyMakeBorder_caller<short, 3> , copyMakeBorder_caller<short, 4>},
\r
444 {0/*copyMakeBorder_caller<int, 1>*/ , 0/*copyMakeBorder_caller<int, 2>*/ , 0/*copyMakeBorder_caller<int, 3>*/ , 0/*copyMakeBorder_caller<int, 4>*/},
\r
445 { copyMakeBorder_caller<float, 1> , 0/*copyMakeBorder_caller<float, 2>*/ , copyMakeBorder_caller<float, 3> , copyMakeBorder_caller<float ,4>}
\r
448 caller_t func = callers[src.depth()][src.channels() - 1];
\r
449 CV_Assert(func != 0);
\r
452 CV_Assert(tryConvertToGpuBorderType(borderType, gpuBorderType));
\r
454 func(src, dst, top, left, gpuBorderType, value, stream);
\r
458 ////////////////////////////////////////////////////////////////////////
\r
463 typedef NppStatus (*npp_warp_8u_t)(const Npp8u* pSrc, NppiSize srcSize, int srcStep, NppiRect srcRoi, Npp8u* pDst,
\r
464 int dstStep, NppiRect dstRoi, const double coeffs[][3],
\r
465 int interpolation);
\r
466 typedef NppStatus (*npp_warp_16u_t)(const Npp16u* pSrc, NppiSize srcSize, int srcStep, NppiRect srcRoi, Npp16u* pDst,
\r
467 int dstStep, NppiRect dstRoi, const double coeffs[][3],
\r
468 int interpolation);
\r
469 typedef NppStatus (*npp_warp_32s_t)(const Npp32s* pSrc, NppiSize srcSize, int srcStep, NppiRect srcRoi, Npp32s* pDst,
\r
470 int dstStep, NppiRect dstRoi, const double coeffs[][3],
\r
471 int interpolation);
\r
472 typedef NppStatus (*npp_warp_32f_t)(const Npp32f* pSrc, NppiSize srcSize, int srcStep, NppiRect srcRoi, Npp32f* pDst,
\r
473 int dstStep, NppiRect dstRoi, const double coeffs[][3],
\r
474 int interpolation);
\r
476 void nppWarpCaller(const GpuMat& src, GpuMat& dst, double coeffs[][3], const Size& dsize, int flags,
\r
477 npp_warp_8u_t npp_warp_8u[][2], npp_warp_16u_t npp_warp_16u[][2],
\r
478 npp_warp_32s_t npp_warp_32s[][2], npp_warp_32f_t npp_warp_32f[][2], cudaStream_t stream)
\r
480 static const int npp_inter[] = {NPPI_INTER_NN, NPPI_INTER_LINEAR, NPPI_INTER_CUBIC};
\r
482 int interpolation = flags & INTER_MAX;
\r
484 CV_Assert((src.depth() == CV_8U || src.depth() == CV_16U || src.depth() == CV_32S || src.depth() == CV_32F) && src.channels() != 2);
\r
485 CV_Assert(interpolation == INTER_NEAREST || interpolation == INTER_LINEAR || interpolation == INTER_CUBIC);
\r
487 dst.create(dsize, src.type());
\r
490 srcsz.height = src.rows;
\r
491 srcsz.width = src.cols;
\r
493 srcroi.x = srcroi.y = 0;
\r
494 srcroi.height = src.rows;
\r
495 srcroi.width = src.cols;
\r
497 dstroi.x = dstroi.y = 0;
\r
498 dstroi.height = dst.rows;
\r
499 dstroi.width = dst.cols;
\r
501 int warpInd = (flags & WARP_INVERSE_MAP) >> 4;
\r
503 NppStreamHandler h(stream);
\r
505 switch (src.depth())
\r
508 nppSafeCall( npp_warp_8u[src.channels()][warpInd](src.ptr<Npp8u>(), srcsz, static_cast<int>(src.step), srcroi,
\r
509 dst.ptr<Npp8u>(), static_cast<int>(dst.step), dstroi, coeffs, npp_inter[interpolation]) );
\r
512 nppSafeCall( npp_warp_16u[src.channels()][warpInd](src.ptr<Npp16u>(), srcsz, static_cast<int>(src.step), srcroi,
\r
513 dst.ptr<Npp16u>(), static_cast<int>(dst.step), dstroi, coeffs, npp_inter[interpolation]) );
\r
516 nppSafeCall( npp_warp_32s[src.channels()][warpInd](src.ptr<Npp32s>(), srcsz, static_cast<int>(src.step), srcroi,
\r
517 dst.ptr<Npp32s>(), static_cast<int>(dst.step), dstroi, coeffs, npp_inter[interpolation]) );
\r
520 nppSafeCall( npp_warp_32f[src.channels()][warpInd](src.ptr<Npp32f>(), srcsz, static_cast<int>(src.step), srcroi,
\r
521 dst.ptr<Npp32f>(), static_cast<int>(dst.step), dstroi, coeffs, npp_inter[interpolation]) );
\r
524 CV_Assert(!"Unsupported source type");
\r
528 cudaSafeCall( cudaDeviceSynchronize() );
\r
532 void cv::gpu::warpAffine(const GpuMat& src, GpuMat& dst, const Mat& M, Size dsize, int flags, Stream& s)
\r
534 static npp_warp_8u_t npp_warpAffine_8u[][2] =
\r
537 {nppiWarpAffine_8u_C1R, nppiWarpAffineBack_8u_C1R},
\r
539 {nppiWarpAffine_8u_C3R, nppiWarpAffineBack_8u_C3R},
\r
540 {nppiWarpAffine_8u_C4R, nppiWarpAffineBack_8u_C4R}
\r
542 static npp_warp_16u_t npp_warpAffine_16u[][2] =
\r
545 {nppiWarpAffine_16u_C1R, nppiWarpAffineBack_16u_C1R},
\r
547 {nppiWarpAffine_16u_C3R, nppiWarpAffineBack_16u_C3R},
\r
548 {nppiWarpAffine_16u_C4R, nppiWarpAffineBack_16u_C4R}
\r
550 static npp_warp_32s_t npp_warpAffine_32s[][2] =
\r
553 {nppiWarpAffine_32s_C1R, nppiWarpAffineBack_32s_C1R},
\r
555 {nppiWarpAffine_32s_C3R, nppiWarpAffineBack_32s_C3R},
\r
556 {nppiWarpAffine_32s_C4R, nppiWarpAffineBack_32s_C4R}
\r
558 static npp_warp_32f_t npp_warpAffine_32f[][2] =
\r
561 {nppiWarpAffine_32f_C1R, nppiWarpAffineBack_32f_C1R},
\r
563 {nppiWarpAffine_32f_C3R, nppiWarpAffineBack_32f_C3R},
\r
564 {nppiWarpAffine_32f_C4R, nppiWarpAffineBack_32f_C4R}
\r
567 CV_Assert(M.rows == 2 && M.cols == 3);
\r
569 double coeffs[2][3];
\r
570 Mat coeffsMat(2, 3, CV_64F, (void*)coeffs);
\r
571 M.convertTo(coeffsMat, coeffsMat.type());
\r
573 nppWarpCaller(src, dst, coeffs, dsize, flags, npp_warpAffine_8u, npp_warpAffine_16u, npp_warpAffine_32s, npp_warpAffine_32f, StreamAccessor::getStream(s));
\r
576 void cv::gpu::warpPerspective(const GpuMat& src, GpuMat& dst, const Mat& M, Size dsize, int flags, Stream& s)
\r
578 static npp_warp_8u_t npp_warpPerspective_8u[][2] =
\r
581 {nppiWarpPerspective_8u_C1R, nppiWarpPerspectiveBack_8u_C1R},
\r
583 {nppiWarpPerspective_8u_C3R, nppiWarpPerspectiveBack_8u_C3R},
\r
584 {nppiWarpPerspective_8u_C4R, nppiWarpPerspectiveBack_8u_C4R}
\r
586 static npp_warp_16u_t npp_warpPerspective_16u[][2] =
\r
589 {nppiWarpPerspective_16u_C1R, nppiWarpPerspectiveBack_16u_C1R},
\r
591 {nppiWarpPerspective_16u_C3R, nppiWarpPerspectiveBack_16u_C3R},
\r
592 {nppiWarpPerspective_16u_C4R, nppiWarpPerspectiveBack_16u_C4R}
\r
594 static npp_warp_32s_t npp_warpPerspective_32s[][2] =
\r
597 {nppiWarpPerspective_32s_C1R, nppiWarpPerspectiveBack_32s_C1R},
\r
599 {nppiWarpPerspective_32s_C3R, nppiWarpPerspectiveBack_32s_C3R},
\r
600 {nppiWarpPerspective_32s_C4R, nppiWarpPerspectiveBack_32s_C4R}
\r
602 static npp_warp_32f_t npp_warpPerspective_32f[][2] =
\r
605 {nppiWarpPerspective_32f_C1R, nppiWarpPerspectiveBack_32f_C1R},
\r
607 {nppiWarpPerspective_32f_C3R, nppiWarpPerspectiveBack_32f_C3R},
\r
608 {nppiWarpPerspective_32f_C4R, nppiWarpPerspectiveBack_32f_C4R}
\r
611 CV_Assert(M.rows == 3 && M.cols == 3);
\r
613 double coeffs[3][3];
\r
614 Mat coeffsMat(3, 3, CV_64F, (void*)coeffs);
\r
615 M.convertTo(coeffsMat, coeffsMat.type());
\r
617 nppWarpCaller(src, dst, coeffs, dsize, flags, npp_warpPerspective_8u, npp_warpPerspective_16u, npp_warpPerspective_32s, npp_warpPerspective_32f, StreamAccessor::getStream(s));
\r
620 //////////////////////////////////////////////////////////////////////////////
\r
621 // buildWarpPlaneMaps
\r
623 namespace cv { namespace gpu { namespace imgproc
\r
625 void buildWarpPlaneMaps(int tl_u, int tl_v, DevMem2Df map_x, DevMem2Df map_y,
\r
626 const float k_rinv[9], const float r_kinv[9], float scale,
\r
627 cudaStream_t stream);
\r
630 void cv::gpu::buildWarpPlaneMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale,
\r
631 GpuMat& map_x, GpuMat& map_y, Stream& stream)
\r
633 CV_Assert(K.size() == Size(3,3) && K.type() == CV_32F);
\r
634 CV_Assert(R.size() == Size(3,3) && R.type() == CV_32F);
\r
636 Mat K_Rinv = K * R.t();
\r
637 Mat R_Kinv = R * K.inv();
\r
638 CV_Assert(K_Rinv.isContinuous());
\r
639 CV_Assert(R_Kinv.isContinuous());
\r
641 map_x.create(dst_roi.size(), CV_32F);
\r
642 map_y.create(dst_roi.size(), CV_32F);
\r
643 imgproc::buildWarpPlaneMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, K_Rinv.ptr<float>(), R_Kinv.ptr<float>(),
\r
644 scale, StreamAccessor::getStream(stream));
\r
647 //////////////////////////////////////////////////////////////////////////////
\r
648 // buildWarpCylyndricalMaps
\r
650 namespace cv { namespace gpu { namespace imgproc
\r
652 void buildWarpCylindricalMaps(int tl_u, int tl_v, DevMem2Df map_x, DevMem2Df map_y,
\r
653 const float k_rinv[9], const float r_kinv[9], float scale,
\r
654 cudaStream_t stream);
\r
657 void cv::gpu::buildWarpCylindricalMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale,
\r
658 GpuMat& map_x, GpuMat& map_y, Stream& stream)
\r
660 CV_Assert(K.size() == Size(3,3) && K.type() == CV_32F);
\r
661 CV_Assert(R.size() == Size(3,3) && R.type() == CV_32F);
\r
663 Mat K_Rinv = K * R.t();
\r
664 Mat R_Kinv = R * K.inv();
\r
665 CV_Assert(K_Rinv.isContinuous());
\r
666 CV_Assert(R_Kinv.isContinuous());
\r
668 map_x.create(dst_roi.size(), CV_32F);
\r
669 map_y.create(dst_roi.size(), CV_32F);
\r
670 imgproc::buildWarpCylindricalMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, K_Rinv.ptr<float>(), R_Kinv.ptr<float>(),
\r
671 scale, StreamAccessor::getStream(stream));
\r
675 //////////////////////////////////////////////////////////////////////////////
\r
676 // buildWarpSphericalMaps
\r
678 namespace cv { namespace gpu { namespace imgproc
\r
680 void buildWarpSphericalMaps(int tl_u, int tl_v, DevMem2Df map_x, DevMem2Df map_y,
\r
681 const float k_rinv[9], const float r_kinv[9], float scale,
\r
682 cudaStream_t stream);
\r
685 void cv::gpu::buildWarpSphericalMaps(Size src_size, Rect dst_roi, const Mat &K, const Mat& R, float scale,
\r
686 GpuMat& map_x, GpuMat& map_y, Stream& stream)
\r
688 CV_Assert(K.size() == Size(3,3) && K.type() == CV_32F);
\r
689 CV_Assert(R.size() == Size(3,3) && R.type() == CV_32F);
\r
691 Mat K_Rinv = K * R.t();
\r
692 Mat R_Kinv = R * K.inv();
\r
693 CV_Assert(K_Rinv.isContinuous());
\r
694 CV_Assert(R_Kinv.isContinuous());
\r
696 map_x.create(dst_roi.size(), CV_32F);
\r
697 map_y.create(dst_roi.size(), CV_32F);
\r
698 imgproc::buildWarpSphericalMaps(dst_roi.tl().x, dst_roi.tl().y, map_x, map_y, K_Rinv.ptr<float>(), R_Kinv.ptr<float>(),
\r
699 scale, StreamAccessor::getStream(stream));
\r
702 ////////////////////////////////////////////////////////////////////////
\r
705 void cv::gpu::rotate(const GpuMat& src, GpuMat& dst, Size dsize, double angle, double xShift, double yShift, int interpolation, Stream& s)
\r
707 static const int npp_inter[] = {NPPI_INTER_NN, NPPI_INTER_LINEAR, NPPI_INTER_CUBIC};
\r
709 CV_Assert(src.type() == CV_8UC1 || src.type() == CV_8UC4);
\r
710 CV_Assert(interpolation == INTER_NEAREST || interpolation == INTER_LINEAR || interpolation == INTER_CUBIC);
\r
712 dst.create(dsize, src.type());
\r
715 srcsz.height = src.rows;
\r
716 srcsz.width = src.cols;
\r
718 srcroi.x = srcroi.y = 0;
\r
719 srcroi.height = src.rows;
\r
720 srcroi.width = src.cols;
\r
722 dstroi.x = dstroi.y = 0;
\r
723 dstroi.height = dst.rows;
\r
724 dstroi.width = dst.cols;
\r
726 cudaStream_t stream = StreamAccessor::getStream(s);
\r
728 NppStreamHandler h(stream);
\r
730 if (src.type() == CV_8UC1)
\r
732 nppSafeCall( nppiRotate_8u_C1R(src.ptr<Npp8u>(), srcsz, static_cast<int>(src.step), srcroi,
\r
733 dst.ptr<Npp8u>(), static_cast<int>(dst.step), dstroi, angle, xShift, yShift, npp_inter[interpolation]) );
\r
737 nppSafeCall( nppiRotate_8u_C4R(src.ptr<Npp8u>(), srcsz, static_cast<int>(src.step), srcroi,
\r
738 dst.ptr<Npp8u>(), static_cast<int>(dst.step), dstroi, angle, xShift, yShift, npp_inter[interpolation]) );
\r
742 cudaSafeCall( cudaDeviceSynchronize() );
\r
745 ////////////////////////////////////////////////////////////////////////
\r
748 void cv::gpu::integral(const GpuMat& src, GpuMat& sum, Stream& s)
\r
751 integralBuffered(src, sum, buffer, s);
\r
754 void cv::gpu::integralBuffered(const GpuMat& src, GpuMat& sum, GpuMat& buffer, Stream& s)
\r
756 CV_Assert(src.type() == CV_8UC1);
\r
758 sum.create(src.rows + 1, src.cols + 1, CV_32S);
\r
760 NcvSize32u roiSize;
\r
761 roiSize.width = src.cols;
\r
762 roiSize.height = src.rows;
\r
764 cudaDeviceProp prop;
\r
765 cudaSafeCall( cudaGetDeviceProperties(&prop, cv::gpu::getDevice()) );
\r
768 nppSafeCall( nppiStIntegralGetSize_8u32u(roiSize, &bufSize, prop) );
\r
769 ensureSizeIsEnough(1, bufSize, CV_8UC1, buffer);
\r
771 cudaStream_t stream = StreamAccessor::getStream(s);
\r
773 NppStStreamHandler h(stream);
\r
775 nppSafeCall( nppiStIntegral_8u32u_C1R(const_cast<Ncv8u*>(src.ptr<Ncv8u>()), static_cast<int>(src.step),
\r
776 sum.ptr<Ncv32u>(), static_cast<int>(sum.step), roiSize, buffer.ptr<Ncv8u>(), bufSize, prop) );
\r
779 cudaSafeCall( cudaDeviceSynchronize() );
\r
782 void cv::gpu::integral(const GpuMat& src, GpuMat& sum, GpuMat& sqsum, Stream& s)
\r
784 CV_Assert(src.type() == CV_8UC1);
\r
786 int width = src.cols + 1, height = src.rows + 1;
\r
788 sum.create(height, width, CV_32S);
\r
789 sqsum.create(height, width, CV_32F);
\r
792 sz.width = src.cols;
\r
793 sz.height = src.rows;
\r
795 cudaStream_t stream = StreamAccessor::getStream(s);
\r
797 NppStreamHandler h(stream);
\r
799 nppSafeCall( nppiSqrIntegral_8u32s32f_C1R(const_cast<Npp8u*>(src.ptr<Npp8u>()), static_cast<int>(src.step),
\r
800 sum.ptr<Npp32s>(), static_cast<int>(sum.step), sqsum.ptr<Npp32f>(), static_cast<int>(sqsum.step), sz, 0, 0.0f, height) );
\r
803 cudaSafeCall( cudaDeviceSynchronize() );
\r
806 //////////////////////////////////////////////////////////////////////////////
\r
809 void cv::gpu::sqrIntegral(const GpuMat& src, GpuMat& sqsum, Stream& s)
\r
811 CV_Assert(src.type() == CV_8U);
\r
813 NcvSize32u roiSize;
\r
814 roiSize.width = src.cols;
\r
815 roiSize.height = src.rows;
\r
817 cudaDeviceProp prop;
\r
818 cudaSafeCall( cudaGetDeviceProperties(&prop, cv::gpu::getDevice()) );
\r
821 nppSafeCall(nppiStSqrIntegralGetSize_8u64u(roiSize, &bufSize, prop));
\r
822 GpuMat buf(1, bufSize, CV_8U);
\r
824 cudaStream_t stream = StreamAccessor::getStream(s);
\r
826 NppStStreamHandler h(stream);
\r
828 sqsum.create(src.rows + 1, src.cols + 1, CV_64F);
\r
829 nppSafeCall(nppiStSqrIntegral_8u64u_C1R(const_cast<Ncv8u*>(src.ptr<Ncv8u>(0)), static_cast<int>(src.step),
\r
830 sqsum.ptr<Ncv64u>(0), static_cast<int>(sqsum.step), roiSize, buf.ptr<Ncv8u>(0), bufSize, prop));
\r
833 cudaSafeCall( cudaDeviceSynchronize() );
\r
836 //////////////////////////////////////////////////////////////////////////////
\r
839 namespace cv { namespace gpu { namespace imgproc
\r
841 void columnSum_32F(const DevMem2D src, const DevMem2D dst);
\r
844 void cv::gpu::columnSum(const GpuMat& src, GpuMat& dst)
\r
846 CV_Assert(src.type() == CV_32F);
\r
848 dst.create(src.size(), CV_32F);
\r
849 imgproc::columnSum_32F(src, dst);
\r
852 void cv::gpu::rectStdDev(const GpuMat& src, const GpuMat& sqr, GpuMat& dst, const Rect& rect, Stream& s)
\r
854 CV_Assert(src.type() == CV_32SC1 && sqr.type() == CV_32FC1);
\r
856 dst.create(src.size(), CV_32FC1);
\r
859 sz.width = src.cols;
\r
860 sz.height = src.rows;
\r
863 nppRect.height = rect.height;
\r
864 nppRect.width = rect.width;
\r
865 nppRect.x = rect.x;
\r
866 nppRect.y = rect.y;
\r
868 cudaStream_t stream = StreamAccessor::getStream(s);
\r
870 NppStreamHandler h(stream);
\r
872 nppSafeCall( nppiRectStdDev_32s32f_C1R(src.ptr<Npp32s>(), static_cast<int>(src.step), sqr.ptr<Npp32f>(), static_cast<int>(sqr.step),
\r
873 dst.ptr<Npp32f>(), static_cast<int>(dst.step), sz, nppRect) );
\r
876 cudaSafeCall( cudaDeviceSynchronize() );
\r
880 ////////////////////////////////////////////////////////////////////////
\r
885 template<int n> struct NPPTypeTraits;
\r
886 template<> struct NPPTypeTraits<CV_8U> { typedef Npp8u npp_type; };
\r
887 template<> struct NPPTypeTraits<CV_16U> { typedef Npp16u npp_type; };
\r
888 template<> struct NPPTypeTraits<CV_16S> { typedef Npp16s npp_type; };
\r
889 template<> struct NPPTypeTraits<CV_32F> { typedef Npp32f npp_type; };
\r
891 typedef NppStatus (*get_buf_size_c1_t)(NppiSize oSizeROI, int nLevels, int* hpBufferSize);
\r
892 typedef NppStatus (*get_buf_size_c4_t)(NppiSize oSizeROI, int nLevels[], int* hpBufferSize);
\r
894 template<int SDEPTH> struct NppHistogramEvenFuncC1
\r
896 typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t;
\r
898 typedef NppStatus (*func_ptr)(const src_t* pSrc, int nSrcStep, NppiSize oSizeROI, Npp32s * pHist,
\r
899 int nLevels, Npp32s nLowerLevel, Npp32s nUpperLevel, Npp8u * pBuffer);
\r
901 template<int SDEPTH> struct NppHistogramEvenFuncC4
\r
903 typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t;
\r
905 typedef NppStatus (*func_ptr)(const src_t* pSrc, int nSrcStep, NppiSize oSizeROI,
\r
906 Npp32s * pHist[4], int nLevels[4], Npp32s nLowerLevel[4], Npp32s nUpperLevel[4], Npp8u * pBuffer);
\r
909 template<int SDEPTH, typename NppHistogramEvenFuncC1<SDEPTH>::func_ptr func, get_buf_size_c1_t get_buf_size>
\r
910 struct NppHistogramEvenC1
\r
912 typedef typename NppHistogramEvenFuncC1<SDEPTH>::src_t src_t;
\r
914 static void hist(const GpuMat& src, GpuMat& hist, GpuMat& buffer, int histSize, int lowerLevel, int upperLevel, cudaStream_t stream)
\r
916 int levels = histSize + 1;
\r
917 hist.create(1, histSize, CV_32S);
\r
920 sz.width = src.cols;
\r
921 sz.height = src.rows;
\r
924 get_buf_size(sz, levels, &buf_size);
\r
926 ensureSizeIsEnough(1, buf_size, CV_8U, buffer);
\r
928 NppStreamHandler h(stream);
\r
930 nppSafeCall( func(src.ptr<src_t>(), static_cast<int>(src.step), sz, hist.ptr<Npp32s>(), levels,
\r
931 lowerLevel, upperLevel, buffer.ptr<Npp8u>()) );
\r
934 cudaSafeCall( cudaDeviceSynchronize() );
\r
937 template<int SDEPTH, typename NppHistogramEvenFuncC4<SDEPTH>::func_ptr func, get_buf_size_c4_t get_buf_size>
\r
938 struct NppHistogramEvenC4
\r
940 typedef typename NppHistogramEvenFuncC4<SDEPTH>::src_t src_t;
\r
942 static void hist(const GpuMat& src, GpuMat hist[4], GpuMat& buffer, int histSize[4], int lowerLevel[4], int upperLevel[4], cudaStream_t stream)
\r
944 int levels[] = {histSize[0] + 1, histSize[1] + 1, histSize[2] + 1, histSize[3] + 1};
\r
945 hist[0].create(1, histSize[0], CV_32S);
\r
946 hist[1].create(1, histSize[1], CV_32S);
\r
947 hist[2].create(1, histSize[2], CV_32S);
\r
948 hist[3].create(1, histSize[3], CV_32S);
\r
951 sz.width = src.cols;
\r
952 sz.height = src.rows;
\r
954 Npp32s* pHist[] = {hist[0].ptr<Npp32s>(), hist[1].ptr<Npp32s>(), hist[2].ptr<Npp32s>(), hist[3].ptr<Npp32s>()};
\r
957 get_buf_size(sz, levels, &buf_size);
\r
959 ensureSizeIsEnough(1, buf_size, CV_8U, buffer);
\r
961 NppStreamHandler h(stream);
\r
963 nppSafeCall( func(src.ptr<src_t>(), static_cast<int>(src.step), sz, pHist, levels, lowerLevel, upperLevel, buffer.ptr<Npp8u>()) );
\r
966 cudaSafeCall( cudaDeviceSynchronize() );
\r
970 template<int SDEPTH> struct NppHistogramRangeFuncC1
\r
972 typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t;
\r
973 typedef Npp32s level_t;
\r
974 enum {LEVEL_TYPE_CODE=CV_32SC1};
\r
976 typedef NppStatus (*func_ptr)(const src_t* pSrc, int nSrcStep, NppiSize oSizeROI, Npp32s* pHist,
\r
977 const Npp32s* pLevels, int nLevels, Npp8u* pBuffer);
\r
979 template<> struct NppHistogramRangeFuncC1<CV_32F>
\r
981 typedef Npp32f src_t;
\r
982 typedef Npp32f level_t;
\r
983 enum {LEVEL_TYPE_CODE=CV_32FC1};
\r
985 typedef NppStatus (*func_ptr)(const Npp32f* pSrc, int nSrcStep, NppiSize oSizeROI, Npp32s* pHist,
\r
986 const Npp32f* pLevels, int nLevels, Npp8u* pBuffer);
\r
988 template<int SDEPTH> struct NppHistogramRangeFuncC4
\r
990 typedef typename NPPTypeTraits<SDEPTH>::npp_type src_t;
\r
991 typedef Npp32s level_t;
\r
992 enum {LEVEL_TYPE_CODE=CV_32SC1};
\r
994 typedef NppStatus (*func_ptr)(const src_t* pSrc, int nSrcStep, NppiSize oSizeROI, Npp32s* pHist[4],
\r
995 const Npp32s* pLevels[4], int nLevels[4], Npp8u* pBuffer);
\r
997 template<> struct NppHistogramRangeFuncC4<CV_32F>
\r
999 typedef Npp32f src_t;
\r
1000 typedef Npp32f level_t;
\r
1001 enum {LEVEL_TYPE_CODE=CV_32FC1};
\r
1003 typedef NppStatus (*func_ptr)(const Npp32f* pSrc, int nSrcStep, NppiSize oSizeROI, Npp32s* pHist[4],
\r
1004 const Npp32f* pLevels[4], int nLevels[4], Npp8u* pBuffer);
\r
1007 template<int SDEPTH, typename NppHistogramRangeFuncC1<SDEPTH>::func_ptr func, get_buf_size_c1_t get_buf_size>
\r
1008 struct NppHistogramRangeC1
\r
1010 typedef typename NppHistogramRangeFuncC1<SDEPTH>::src_t src_t;
\r
1011 typedef typename NppHistogramRangeFuncC1<SDEPTH>::level_t level_t;
\r
1012 enum {LEVEL_TYPE_CODE=NppHistogramRangeFuncC1<SDEPTH>::LEVEL_TYPE_CODE};
\r
1014 static void hist(const GpuMat& src, GpuMat& hist, const GpuMat& levels, GpuMat& buffer, cudaStream_t stream)
\r
1016 CV_Assert(levels.type() == LEVEL_TYPE_CODE && levels.rows == 1);
\r
1018 hist.create(1, levels.cols - 1, CV_32S);
\r
1021 sz.width = src.cols;
\r
1022 sz.height = src.rows;
\r
1025 get_buf_size(sz, levels.cols, &buf_size);
\r
1027 ensureSizeIsEnough(1, buf_size, CV_8U, buffer);
\r
1029 NppStreamHandler h(stream);
\r
1031 nppSafeCall( func(src.ptr<src_t>(), static_cast<int>(src.step), sz, hist.ptr<Npp32s>(), levels.ptr<level_t>(), levels.cols, buffer.ptr<Npp8u>()) );
\r
1034 cudaSafeCall( cudaDeviceSynchronize() );
\r
1037 template<int SDEPTH, typename NppHistogramRangeFuncC4<SDEPTH>::func_ptr func, get_buf_size_c4_t get_buf_size>
\r
1038 struct NppHistogramRangeC4
\r
1040 typedef typename NppHistogramRangeFuncC4<SDEPTH>::src_t src_t;
\r
1041 typedef typename NppHistogramRangeFuncC1<SDEPTH>::level_t level_t;
\r
1042 enum {LEVEL_TYPE_CODE=NppHistogramRangeFuncC1<SDEPTH>::LEVEL_TYPE_CODE};
\r
1044 static void hist(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4], GpuMat& buffer, cudaStream_t stream)
\r
1046 CV_Assert(levels[0].type() == LEVEL_TYPE_CODE && levels[0].rows == 1);
\r
1047 CV_Assert(levels[1].type() == LEVEL_TYPE_CODE && levels[1].rows == 1);
\r
1048 CV_Assert(levels[2].type() == LEVEL_TYPE_CODE && levels[2].rows == 1);
\r
1049 CV_Assert(levels[3].type() == LEVEL_TYPE_CODE && levels[3].rows == 1);
\r
1051 hist[0].create(1, levels[0].cols - 1, CV_32S);
\r
1052 hist[1].create(1, levels[1].cols - 1, CV_32S);
\r
1053 hist[2].create(1, levels[2].cols - 1, CV_32S);
\r
1054 hist[3].create(1, levels[3].cols - 1, CV_32S);
\r
1056 Npp32s* pHist[] = {hist[0].ptr<Npp32s>(), hist[1].ptr<Npp32s>(), hist[2].ptr<Npp32s>(), hist[3].ptr<Npp32s>()};
\r
1057 int nLevels[] = {levels[0].cols, levels[1].cols, levels[2].cols, levels[3].cols};
\r
1058 const level_t* pLevels[] = {levels[0].ptr<level_t>(), levels[1].ptr<level_t>(), levels[2].ptr<level_t>(), levels[3].ptr<level_t>()};
\r
1061 sz.width = src.cols;
\r
1062 sz.height = src.rows;
\r
1065 get_buf_size(sz, nLevels, &buf_size);
\r
1067 ensureSizeIsEnough(1, buf_size, CV_8U, buffer);
\r
1069 NppStreamHandler h(stream);
\r
1071 nppSafeCall( func(src.ptr<src_t>(), static_cast<int>(src.step), sz, pHist, pLevels, nLevels, buffer.ptr<Npp8u>()) );
\r
1074 cudaSafeCall( cudaDeviceSynchronize() );
\r
1079 void cv::gpu::evenLevels(GpuMat& levels, int nLevels, int lowerLevel, int upperLevel)
\r
1081 Mat host_levels(1, nLevels, CV_32SC1);
\r
1082 nppSafeCall( nppiEvenLevelsHost_32s(host_levels.ptr<Npp32s>(), nLevels, lowerLevel, upperLevel) );
\r
1083 levels.upload(host_levels);
\r
1086 void cv::gpu::histEven(const GpuMat& src, GpuMat& hist, int histSize, int lowerLevel, int upperLevel, Stream& stream)
\r
1089 histEven(src, hist, buf, histSize, lowerLevel, upperLevel, stream);
\r
1092 void cv::gpu::histEven(const GpuMat& src, GpuMat& hist, GpuMat& buf, int histSize, int lowerLevel, int upperLevel, Stream& stream)
\r
1094 CV_Assert(src.type() == CV_8UC1 || src.type() == CV_16UC1 || src.type() == CV_16SC1 );
\r
1096 typedef void (*hist_t)(const GpuMat& src, GpuMat& hist, GpuMat& buf, int levels, int lowerLevel, int upperLevel, cudaStream_t stream);
\r
1097 static const hist_t hist_callers[] =
\r
1099 NppHistogramEvenC1<CV_8U , nppiHistogramEven_8u_C1R , nppiHistogramEvenGetBufferSize_8u_C1R >::hist,
\r
1101 NppHistogramEvenC1<CV_16U, nppiHistogramEven_16u_C1R, nppiHistogramEvenGetBufferSize_16u_C1R>::hist,
\r
1102 NppHistogramEvenC1<CV_16S, nppiHistogramEven_16s_C1R, nppiHistogramEvenGetBufferSize_16s_C1R>::hist
\r
1105 hist_callers[src.depth()](src, hist, buf, histSize, lowerLevel, upperLevel, StreamAccessor::getStream(stream));
\r
1108 void cv::gpu::histEven(const GpuMat& src, GpuMat hist[4], int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream)
\r
1111 histEven(src, hist, buf, histSize, lowerLevel, upperLevel, stream);
\r
1114 void cv::gpu::histEven(const GpuMat& src, GpuMat hist[4], GpuMat& buf, int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream)
\r
1116 CV_Assert(src.type() == CV_8UC4 || src.type() == CV_16UC4 || src.type() == CV_16SC4 );
\r
1118 typedef void (*hist_t)(const GpuMat& src, GpuMat hist[4], GpuMat& buf, int levels[4], int lowerLevel[4], int upperLevel[4], cudaStream_t stream);
\r
1119 static const hist_t hist_callers[] =
\r
1121 NppHistogramEvenC4<CV_8U , nppiHistogramEven_8u_C4R , nppiHistogramEvenGetBufferSize_8u_C4R >::hist,
\r
1123 NppHistogramEvenC4<CV_16U, nppiHistogramEven_16u_C4R, nppiHistogramEvenGetBufferSize_16u_C4R>::hist,
\r
1124 NppHistogramEvenC4<CV_16S, nppiHistogramEven_16s_C4R, nppiHistogramEvenGetBufferSize_16s_C4R>::hist
\r
1127 hist_callers[src.depth()](src, hist, buf, histSize, lowerLevel, upperLevel, StreamAccessor::getStream(stream));
\r
1130 void cv::gpu::histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels, Stream& stream)
\r
1133 histRange(src, hist, levels, buf, stream);
\r
1137 void cv::gpu::histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels, GpuMat& buf, Stream& stream)
\r
1139 CV_Assert(src.type() == CV_8UC1 || src.type() == CV_16UC1 || src.type() == CV_16SC1 || src.type() == CV_32FC1);
\r
1141 typedef void (*hist_t)(const GpuMat& src, GpuMat& hist, const GpuMat& levels, GpuMat& buf, cudaStream_t stream);
\r
1142 static const hist_t hist_callers[] =
\r
1144 NppHistogramRangeC1<CV_8U , nppiHistogramRange_8u_C1R , nppiHistogramRangeGetBufferSize_8u_C1R >::hist,
\r
1146 NppHistogramRangeC1<CV_16U, nppiHistogramRange_16u_C1R, nppiHistogramRangeGetBufferSize_16u_C1R>::hist,
\r
1147 NppHistogramRangeC1<CV_16S, nppiHistogramRange_16s_C1R, nppiHistogramRangeGetBufferSize_16s_C1R>::hist,
\r
1149 NppHistogramRangeC1<CV_32F, nppiHistogramRange_32f_C1R, nppiHistogramRangeGetBufferSize_32f_C1R>::hist
\r
1152 hist_callers[src.depth()](src, hist, levels, buf, StreamAccessor::getStream(stream));
\r
1155 void cv::gpu::histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4], Stream& stream)
\r
1158 histRange(src, hist, levels, buf, stream);
\r
1161 void cv::gpu::histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4], GpuMat& buf, Stream& stream)
\r
1163 CV_Assert(src.type() == CV_8UC4 || src.type() == CV_16UC4 || src.type() == CV_16SC4 || src.type() == CV_32FC4);
\r
1165 typedef void (*hist_t)(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4], GpuMat& buf, cudaStream_t stream);
\r
1166 static const hist_t hist_callers[] =
\r
1168 NppHistogramRangeC4<CV_8U , nppiHistogramRange_8u_C4R , nppiHistogramRangeGetBufferSize_8u_C4R >::hist,
\r
1170 NppHistogramRangeC4<CV_16U, nppiHistogramRange_16u_C4R, nppiHistogramRangeGetBufferSize_16u_C4R>::hist,
\r
1171 NppHistogramRangeC4<CV_16S, nppiHistogramRange_16s_C4R, nppiHistogramRangeGetBufferSize_16s_C4R>::hist,
\r
1173 NppHistogramRangeC4<CV_32F, nppiHistogramRange_32f_C4R, nppiHistogramRangeGetBufferSize_32f_C4R>::hist
\r
1176 hist_callers[src.depth()](src, hist, levels, buf, StreamAccessor::getStream(stream));
\r
1179 namespace cv { namespace gpu { namespace histograms
\r
1181 void histogram256_gpu(DevMem2D src, int* hist, unsigned int* buf, cudaStream_t stream);
\r
1183 const int PARTIAL_HISTOGRAM256_COUNT = 240;
\r
1184 const int HISTOGRAM256_BIN_COUNT = 256;
\r
1187 void cv::gpu::calcHist(const GpuMat& src, GpuMat& hist, Stream& stream)
\r
1190 calcHist(src, hist, buf, stream);
\r
1193 void cv::gpu::calcHist(const GpuMat& src, GpuMat& hist, GpuMat& buf, Stream& stream)
\r
1195 using namespace cv::gpu::histograms;
\r
1197 CV_Assert(src.type() == CV_8UC1);
\r
1199 hist.create(1, 256, CV_32SC1);
\r
1201 ensureSizeIsEnough(1, PARTIAL_HISTOGRAM256_COUNT * HISTOGRAM256_BIN_COUNT, CV_32SC1, buf);
\r
1203 histogram256_gpu(src, hist.ptr<int>(), buf.ptr<unsigned int>(), StreamAccessor::getStream(stream));
\r
1206 void cv::gpu::equalizeHist(const GpuMat& src, GpuMat& dst, Stream& stream)
\r
1210 equalizeHist(src, dst, hist, buf, stream);
\r
1213 void cv::gpu::equalizeHist(const GpuMat& src, GpuMat& dst, GpuMat& hist, Stream& stream)
\r
1216 equalizeHist(src, dst, hist, buf, stream);
\r
1219 namespace cv { namespace gpu { namespace histograms
\r
1221 void equalizeHist_gpu(DevMem2D src, DevMem2D dst, const int* lut, cudaStream_t stream);
\r
1224 void cv::gpu::equalizeHist(const GpuMat& src, GpuMat& dst, GpuMat& hist, GpuMat& buf, Stream& s)
\r
1226 using namespace cv::gpu::histograms;
\r
1228 CV_Assert(src.type() == CV_8UC1);
\r
1230 dst.create(src.size(), src.type());
\r
1233 nppSafeCall( nppsIntegralGetBufferSize_32s(256, &intBufSize) );
\r
1235 int bufSize = static_cast<int>(std::max(256 * 240 * sizeof(int), intBufSize + 256 * sizeof(int)));
\r
1237 ensureSizeIsEnough(1, bufSize, CV_8UC1, buf);
\r
1239 GpuMat histBuf(1, 256 * 240, CV_32SC1, buf.ptr());
\r
1240 GpuMat intBuf(1, intBufSize, CV_8UC1, buf.ptr());
\r
1241 GpuMat lut(1, 256, CV_32S, buf.ptr() + intBufSize);
\r
1243 calcHist(src, hist, histBuf, s);
\r
1245 cudaStream_t stream = StreamAccessor::getStream(s);
\r
1247 NppStreamHandler h(stream);
\r
1249 nppSafeCall( nppsIntegral_32s(hist.ptr<Npp32s>(), lut.ptr<Npp32s>(), 256, intBuf.ptr<Npp8u>()) );
\r
1252 cudaSafeCall( cudaDeviceSynchronize() );
\r
1254 equalizeHist_gpu(src, dst, lut.ptr<int>(), stream);
\r
1257 ////////////////////////////////////////////////////////////////////////
\r
1258 // cornerHarris & minEgenVal
\r
1260 namespace cv { namespace gpu { namespace imgproc {
\r
1262 void extractCovData_caller(const DevMem2Df Dx, const DevMem2Df Dy, PtrStepf dst);
\r
1263 void cornerHarris_caller(const int block_size, const float k, const DevMem2D Dx, const DevMem2D Dy, DevMem2D dst, int border_type);
\r
1264 void cornerMinEigenVal_caller(const int block_size, const DevMem2D Dx, const DevMem2D Dy, DevMem2D dst, int border_type);
\r
1270 template <typename T>
\r
1271 void extractCovData(const GpuMat& src, GpuMat& Dx, GpuMat& Dy, int blockSize, int ksize, int borderType)
\r
1273 double scale = (double)(1 << ((ksize > 0 ? ksize : 3) - 1)) * blockSize;
\r
1276 if (src.depth() == CV_8U)
\r
1280 Dx.create(src.size(), CV_32F);
\r
1281 Dy.create(src.size(), CV_32F);
\r
1285 Sobel(src, Dx, CV_32F, 1, 0, ksize, scale, borderType);
\r
1286 Sobel(src, Dy, CV_32F, 0, 1, ksize, scale, borderType);
\r
1290 Scharr(src, Dx, CV_32F, 1, 0, scale, borderType);
\r
1291 Scharr(src, Dy, CV_32F, 0, 1, scale, borderType);
\r
1295 void extractCovData(const GpuMat& src, GpuMat& Dx, GpuMat& Dy, int blockSize, int ksize, int borderType)
\r
1297 switch (src.type())
\r
1300 extractCovData<unsigned char>(src, Dx, Dy, blockSize, ksize, borderType);
\r
1303 extractCovData<float>(src, Dx, Dy, blockSize, ksize, borderType);
\r
1306 CV_Error(CV_StsBadArg, "extractCovData: unsupported type of the source matrix");
\r
1310 } // Anonymous namespace
\r
1313 bool cv::gpu::tryConvertToGpuBorderType(int cpuBorderType, int& gpuBorderType)
\r
1315 switch (cpuBorderType)
\r
1317 case cv::BORDER_REFLECT101:
\r
1318 gpuBorderType = cv::gpu::BORDER_REFLECT101_GPU;
\r
1320 case cv::BORDER_REPLICATE:
\r
1321 gpuBorderType = cv::gpu::BORDER_REPLICATE_GPU;
\r
1323 case cv::BORDER_CONSTANT:
\r
1324 gpuBorderType = cv::gpu::BORDER_CONSTANT_GPU;
\r
1326 case cv::BORDER_REFLECT:
\r
1327 gpuBorderType = cv::gpu::BORDER_REFLECT_GPU;
\r
1329 case cv::BORDER_WRAP:
\r
1330 gpuBorderType = cv::gpu::BORDER_WRAP_GPU;
\r
1338 void cv::gpu::cornerHarris(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, double k, int borderType)
\r
1341 cornerHarris(src, dst, Dx, Dy, blockSize, ksize, k, borderType);
\r
1344 void cv::gpu::cornerHarris(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, int blockSize, int ksize, double k, int borderType)
\r
1346 CV_Assert(borderType == cv::BORDER_REFLECT101 ||
\r
1347 borderType == cv::BORDER_REPLICATE);
\r
1349 int gpuBorderType;
\r
1350 CV_Assert(tryConvertToGpuBorderType(borderType, gpuBorderType));
\r
1352 extractCovData(src, Dx, Dy, blockSize, ksize, borderType);
\r
1353 dst.create(src.size(), CV_32F);
\r
1354 imgproc::cornerHarris_caller(blockSize, (float)k, Dx, Dy, dst, gpuBorderType);
\r
1357 void cv::gpu::cornerMinEigenVal(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, int borderType)
\r
1360 cornerMinEigenVal(src, dst, Dx, Dy, blockSize, ksize, borderType);
\r
1363 void cv::gpu::cornerMinEigenVal(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, int blockSize, int ksize, int borderType)
\r
1365 CV_Assert(borderType == cv::BORDER_REFLECT101 ||
\r
1366 borderType == cv::BORDER_REPLICATE);
\r
1368 int gpuBorderType;
\r
1369 CV_Assert(tryConvertToGpuBorderType(borderType, gpuBorderType));
\r
1371 extractCovData(src, Dx, Dy, blockSize, ksize, borderType);
\r
1372 dst.create(src.size(), CV_32F);
\r
1373 imgproc::cornerMinEigenVal_caller(blockSize, Dx, Dy, dst, gpuBorderType);
\r
1376 //////////////////////////////////////////////////////////////////////////////
\r
1379 namespace cv { namespace gpu { namespace imgproc
\r
1381 void mulSpectrums(const PtrStep_<cufftComplex> a, const PtrStep_<cufftComplex> b,
\r
1382 DevMem2D_<cufftComplex> c);
\r
1384 void mulSpectrums_CONJ(const PtrStep_<cufftComplex> a, const PtrStep_<cufftComplex> b,
\r
1385 DevMem2D_<cufftComplex> c);
\r
1389 void cv::gpu::mulSpectrums(const GpuMat& a, const GpuMat& b, GpuMat& c,
\r
1390 int flags, bool conjB)
\r
1392 typedef void (*Caller)(const PtrStep_<cufftComplex>, const PtrStep_<cufftComplex>,
\r
1393 DevMem2D_<cufftComplex>);
\r
1394 static Caller callers[] = { imgproc::mulSpectrums,
\r
1395 imgproc::mulSpectrums_CONJ };
\r
1397 CV_Assert(a.type() == b.type() && a.type() == CV_32FC2);
\r
1398 CV_Assert(a.size() == b.size());
\r
1400 c.create(a.size(), CV_32FC2);
\r
1402 Caller caller = callers[(int)conjB];
\r
1406 //////////////////////////////////////////////////////////////////////////////
\r
1407 // mulAndScaleSpectrums
\r
1409 namespace cv { namespace gpu { namespace imgproc
\r
1411 void mulAndScaleSpectrums(const PtrStep_<cufftComplex> a, const PtrStep_<cufftComplex> b,
\r
1412 float scale, DevMem2D_<cufftComplex> c);
\r
1414 void mulAndScaleSpectrums_CONJ(const PtrStep_<cufftComplex> a, const PtrStep_<cufftComplex> b,
\r
1415 float scale, DevMem2D_<cufftComplex> c);
\r
1419 void cv::gpu::mulAndScaleSpectrums(const GpuMat& a, const GpuMat& b, GpuMat& c,
\r
1420 int flags, float scale, bool conjB)
\r
1422 typedef void (*Caller)(const PtrStep_<cufftComplex>, const PtrStep_<cufftComplex>,
\r
1423 float scale, DevMem2D_<cufftComplex>);
\r
1424 static Caller callers[] = { imgproc::mulAndScaleSpectrums,
\r
1425 imgproc::mulAndScaleSpectrums_CONJ };
\r
1427 CV_Assert(a.type() == b.type() && a.type() == CV_32FC2);
\r
1428 CV_Assert(a.size() == b.size());
\r
1430 c.create(a.size(), CV_32FC2);
\r
1432 Caller caller = callers[(int)conjB];
\r
1433 caller(a, b, scale, c);
\r
1436 //////////////////////////////////////////////////////////////////////////////
\r
1439 void cv::gpu::dft(const GpuMat& src, GpuMat& dst, Size dft_size, int flags)
\r
1441 CV_Assert(src.type() == CV_32F || src.type() == CV_32FC2);
\r
1443 // We don't support unpacked output (in the case of real input)
\r
1444 CV_Assert(!(flags & DFT_COMPLEX_OUTPUT));
\r
1446 bool is_1d_input = (dft_size.height == 1) || (dft_size.width == 1);
\r
1447 int is_row_dft = flags & DFT_ROWS;
\r
1448 int is_scaled_dft = flags & DFT_SCALE;
\r
1449 int is_inverse = flags & DFT_INVERSE;
\r
1450 bool is_complex_input = src.channels() == 2;
\r
1451 bool is_complex_output = !(flags & DFT_REAL_OUTPUT);
\r
1453 // We don't support real-to-real transform
\r
1454 CV_Assert(is_complex_input || is_complex_output);
\r
1458 // Make sure here we work with the continuous input,
\r
1459 // as CUFFT can't handle gaps
\r
1461 createContinuous(src.rows, src.cols, src.type(), src_data);
\r
1462 if (src_data.data != src.data)
\r
1463 src.copyTo(src_data);
\r
1465 Size dft_size_opt = dft_size;
\r
1466 if (is_1d_input && !is_row_dft)
\r
1468 // If the source matrix is single column handle it as single row
\r
1469 dft_size_opt.width = std::max(dft_size.width, dft_size.height);
\r
1470 dft_size_opt.height = std::min(dft_size.width, dft_size.height);
\r
1473 cufftType dft_type = CUFFT_R2C;
\r
1474 if (is_complex_input)
\r
1475 dft_type = is_complex_output ? CUFFT_C2C : CUFFT_C2R;
\r
1477 CV_Assert(dft_size_opt.width > 1);
\r
1480 if (is_1d_input || is_row_dft)
\r
1481 cufftPlan1d(&plan, dft_size_opt.width, dft_type, dft_size_opt.height);
\r
1483 cufftPlan2d(&plan, dft_size_opt.height, dft_size_opt.width, dft_type);
\r
1485 if (is_complex_input)
\r
1487 if (is_complex_output)
\r
1489 createContinuous(dft_size, CV_32FC2, dst);
\r
1490 cufftSafeCall(cufftExecC2C(
\r
1491 plan, src_data.ptr<cufftComplex>(), dst.ptr<cufftComplex>(),
\r
1492 is_inverse ? CUFFT_INVERSE : CUFFT_FORWARD));
\r
1496 createContinuous(dft_size, CV_32F, dst);
\r
1497 cufftSafeCall(cufftExecC2R(
\r
1498 plan, src_data.ptr<cufftComplex>(), dst.ptr<cufftReal>()));
\r
1503 // We could swap dft_size for efficiency. Here we must reflect it
\r
1504 if (dft_size == dft_size_opt)
\r
1505 createContinuous(Size(dft_size.width / 2 + 1, dft_size.height), CV_32FC2, dst);
\r
1507 createContinuous(Size(dft_size.width, dft_size.height / 2 + 1), CV_32FC2, dst);
\r
1509 cufftSafeCall(cufftExecR2C(
\r
1510 plan, src_data.ptr<cufftReal>(), dst.ptr<cufftComplex>()));
\r
1513 cufftSafeCall(cufftDestroy(plan));
\r
1515 if (is_scaled_dft)
\r
1516 multiply(dst, Scalar::all(1. / dft_size.area()), dst);
\r
1519 //////////////////////////////////////////////////////////////////////////////
\r
1523 void cv::gpu::ConvolveBuf::create(Size image_size, Size templ_size)
\r
1525 result_size = Size(image_size.width - templ_size.width + 1,
\r
1526 image_size.height - templ_size.height + 1);
\r
1527 block_size = estimateBlockSize(result_size, templ_size);
\r
1529 dft_size.width = getOptimalDFTSize(block_size.width + templ_size.width - 1);
\r
1530 dft_size.height = getOptimalDFTSize(block_size.width + templ_size.height - 1);
\r
1531 createContinuous(dft_size, CV_32F, image_block);
\r
1532 createContinuous(dft_size, CV_32F, templ_block);
\r
1533 createContinuous(dft_size, CV_32F, result_data);
\r
1535 spect_len = dft_size.height * (dft_size.width / 2 + 1);
\r
1536 createContinuous(1, spect_len, CV_32FC2, image_spect);
\r
1537 createContinuous(1, spect_len, CV_32FC2, templ_spect);
\r
1538 createContinuous(1, spect_len, CV_32FC2, result_spect);
\r
1540 block_size.width = std::min(dft_size.width - templ_size.width + 1, result_size.width);
\r
1541 block_size.height = std::min(dft_size.height - templ_size.height + 1, result_size.height);
\r
1545 Size cv::gpu::ConvolveBuf::estimateBlockSize(Size result_size, Size templ_size)
\r
1548 Size bsize_min(1024, 1024);
\r
1550 // Check whether we use Fermi generation or newer GPU
\r
1551 if (DeviceInfo().majorVersion() >= 2)
\r
1553 bsize_min.width = 2048;
\r
1554 bsize_min.height = 2048;
\r
1557 Size bsize(std::max(templ_size.width * scale, bsize_min.width),
\r
1558 std::max(templ_size.height * scale, bsize_min.height));
\r
1560 bsize.width = std::min(bsize.width, result_size.width);
\r
1561 bsize.height = std::min(bsize.height, result_size.height);
\r
1566 void cv::gpu::convolve(const GpuMat& image, const GpuMat& templ, GpuMat& result,
\r
1570 convolve(image, templ, result, ccorr, buf);
\r
1574 void cv::gpu::convolve(const GpuMat& image, const GpuMat& templ, GpuMat& result,
\r
1575 bool ccorr, ConvolveBuf& buf)
\r
1577 StaticAssert<sizeof(float) == sizeof(cufftReal)>::check();
\r
1578 StaticAssert<sizeof(float) * 2 == sizeof(cufftComplex)>::check();
\r
1580 CV_Assert(image.type() == CV_32F);
\r
1581 CV_Assert(templ.type() == CV_32F);
\r
1583 buf.create(image.size(), templ.size());
\r
1584 result.create(buf.result_size, CV_32F);
\r
1586 Size& block_size = buf.block_size;
\r
1587 Size& dft_size = buf.dft_size;
\r
1589 GpuMat& image_block = buf.image_block;
\r
1590 GpuMat& templ_block = buf.templ_block;
\r
1591 GpuMat& result_data = buf.result_data;
\r
1593 GpuMat& image_spect = buf.image_spect;
\r
1594 GpuMat& templ_spect = buf.templ_spect;
\r
1595 GpuMat& result_spect = buf.result_spect;
\r
1597 cufftHandle planR2C, planC2R;
\r
1598 cufftSafeCall(cufftPlan2d(&planC2R, dft_size.height, dft_size.width, CUFFT_C2R));
\r
1599 cufftSafeCall(cufftPlan2d(&planR2C, dft_size.height, dft_size.width, CUFFT_R2C));
\r
1601 GpuMat templ_roi(templ.size(), CV_32F, templ.data, templ.step);
\r
1602 copyMakeBorder(templ_roi, templ_block, 0, templ_block.rows - templ_roi.rows, 0,
\r
1603 templ_block.cols - templ_roi.cols, 0);
\r
1605 cufftSafeCall(cufftExecR2C(planR2C, templ_block.ptr<cufftReal>(),
\r
1606 templ_spect.ptr<cufftComplex>()));
\r
1608 // Process all blocks of the result matrix
\r
1609 for (int y = 0; y < result.rows; y += block_size.height)
\r
1611 for (int x = 0; x < result.cols; x += block_size.width)
\r
1613 Size image_roi_size(std::min(x + dft_size.width, image.cols) - x,
\r
1614 std::min(y + dft_size.height, image.rows) - y);
\r
1615 GpuMat image_roi(image_roi_size, CV_32F, (void*)(image.ptr<float>(y) + x),
\r
1617 copyMakeBorder(image_roi, image_block, 0, image_block.rows - image_roi.rows,
\r
1618 0, image_block.cols - image_roi.cols, 0);
\r
1620 cufftSafeCall(cufftExecR2C(planR2C, image_block.ptr<cufftReal>(),
\r
1621 image_spect.ptr<cufftComplex>()));
\r
1622 mulAndScaleSpectrums(image_spect, templ_spect, result_spect, 0,
\r
1623 1.f / dft_size.area(), ccorr);
\r
1624 cufftSafeCall(cufftExecC2R(planC2R, result_spect.ptr<cufftComplex>(),
\r
1625 result_data.ptr<cufftReal>()));
\r
1627 Size result_roi_size(std::min(x + block_size.width, result.cols) - x,
\r
1628 std::min(y + block_size.height, result.rows) - y);
\r
1629 GpuMat result_roi(result_roi_size, result.type(),
\r
1630 (void*)(result.ptr<float>(y) + x), result.step);
\r
1631 GpuMat result_block(result_roi_size, result_data.type(),
\r
1632 result_data.ptr(), result_data.step);
\r
1633 result_block.copyTo(result_roi);
\r
1637 cufftSafeCall(cufftDestroy(planR2C));
\r
1638 cufftSafeCall(cufftDestroy(planC2R));
\r
1641 //////////////////////////////////////////////////////////////////////////////
\r
1644 namespace cv { namespace gpu { namespace imgproc
\r
1646 template <typename T, int cn> void pyrDown_gpu(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
\r
1649 void cv::gpu::pyrDown(const GpuMat& src, GpuMat& dst, int borderType, Stream& stream)
\r
1651 using namespace cv::gpu::imgproc;
\r
1653 typedef void (*func_t)(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
\r
1655 static const func_t funcs[6][4] =
\r
1657 {pyrDown_gpu<uchar, 1>, pyrDown_gpu<uchar, 2>, pyrDown_gpu<uchar, 3>, pyrDown_gpu<uchar, 4>},
\r
1658 {pyrDown_gpu<schar, 1>, pyrDown_gpu<schar, 2>, pyrDown_gpu<schar, 3>, pyrDown_gpu<schar, 4>},
\r
1659 {pyrDown_gpu<ushort, 1>, pyrDown_gpu<ushort, 2>, pyrDown_gpu<ushort, 3>, pyrDown_gpu<ushort, 4>},
\r
1660 {pyrDown_gpu<short, 1>, pyrDown_gpu<short, 2>, pyrDown_gpu<short, 3>, pyrDown_gpu<short, 4>},
\r
1661 {pyrDown_gpu<int, 1>, pyrDown_gpu<int, 2>, pyrDown_gpu<int, 3>, pyrDown_gpu<int, 4>},
\r
1662 {pyrDown_gpu<float, 1>, pyrDown_gpu<float, 2>, pyrDown_gpu<float, 3>, pyrDown_gpu<float, 4>},
\r
1665 CV_Assert(src.depth() <= CV_32F && src.channels() <= 4);
\r
1667 CV_Assert(borderType == BORDER_REFLECT101 || borderType == BORDER_REPLICATE || borderType == BORDER_CONSTANT || borderType == BORDER_REFLECT || borderType == BORDER_WRAP);
\r
1668 int gpuBorderType;
\r
1669 CV_Assert(tryConvertToGpuBorderType(borderType, gpuBorderType));
\r
1671 dst.create((src.rows + 1) / 2, (src.cols + 1) / 2, src.type());
\r
1673 funcs[src.depth()][src.channels() - 1](src, dst, gpuBorderType, StreamAccessor::getStream(stream));
\r
1677 //////////////////////////////////////////////////////////////////////////////
\r
1680 namespace cv { namespace gpu { namespace imgproc
\r
1682 template <typename T, int cn> void pyrUp_gpu(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
\r
1685 void cv::gpu::pyrUp(const GpuMat& src, GpuMat& dst, int borderType, Stream& stream)
\r
1687 using namespace cv::gpu::imgproc;
\r
1689 typedef void (*func_t)(const DevMem2D& src, const DevMem2D& dst, int borderType, cudaStream_t stream);
\r
1691 static const func_t funcs[6][4] =
\r
1693 {pyrUp_gpu<uchar, 1>, pyrUp_gpu<uchar, 2>, pyrUp_gpu<uchar, 3>, pyrUp_gpu<uchar, 4>},
\r
1694 {pyrUp_gpu<schar, 1>, pyrUp_gpu<schar, 2>, pyrUp_gpu<schar, 3>, pyrUp_gpu<schar, 4>},
\r
1695 {pyrUp_gpu<ushort, 1>, pyrUp_gpu<ushort, 2>, pyrUp_gpu<ushort, 3>, pyrUp_gpu<ushort, 4>},
\r
1696 {pyrUp_gpu<short, 1>, pyrUp_gpu<short, 2>, pyrUp_gpu<short, 3>, pyrUp_gpu<short, 4>},
\r
1697 {pyrUp_gpu<int, 1>, pyrUp_gpu<int, 2>, pyrUp_gpu<int, 3>, pyrUp_gpu<int, 4>},
\r
1698 {pyrUp_gpu<float, 1>, pyrUp_gpu<float, 2>, pyrUp_gpu<float, 3>, pyrUp_gpu<float, 4>},
\r
1701 CV_Assert(src.depth() <= CV_32F && src.channels() <= 4);
\r
1703 CV_Assert(borderType == BORDER_REFLECT101 || borderType == BORDER_REPLICATE || borderType == BORDER_CONSTANT || borderType == BORDER_REFLECT || borderType == BORDER_WRAP);
\r
1704 int gpuBorderType;
\r
1705 CV_Assert(tryConvertToGpuBorderType(borderType, gpuBorderType));
\r
1707 dst.create(src.rows*2, src.cols*2, src.type());
\r
1709 funcs[src.depth()][src.channels() - 1](src, dst, gpuBorderType, StreamAccessor::getStream(stream));
\r
1713 //////////////////////////////////////////////////////////////////////////////
\r
1716 cv::gpu::CannyBuf::CannyBuf(const GpuMat& dx_, const GpuMat& dy_) : dx(dx_), dy(dy_)
\r
1718 CV_Assert(dx_.type() == CV_32SC1 && dy_.type() == CV_32SC1 && dx_.size() == dy_.size());
\r
1720 create(dx_.size(), -1);
\r
1723 void cv::gpu::CannyBuf::create(const Size& image_size, int apperture_size)
\r
1725 ensureSizeIsEnough(image_size, CV_32SC1, dx);
\r
1726 ensureSizeIsEnough(image_size, CV_32SC1, dy);
\r
1728 if (apperture_size == 3)
\r
1730 ensureSizeIsEnough(image_size, CV_32SC1, dx_buf);
\r
1731 ensureSizeIsEnough(image_size, CV_32SC1, dy_buf);
\r
1733 else if(apperture_size > 0)
\r
1736 filterDX = createDerivFilter_GPU(CV_8UC1, CV_32S, 1, 0, apperture_size, BORDER_REPLICATE);
\r
1738 filterDY = createDerivFilter_GPU(CV_8UC1, CV_32S, 0, 1, apperture_size, BORDER_REPLICATE);
\r
1741 ensureSizeIsEnough(image_size.height + 2, image_size.width + 2, CV_32FC1, edgeBuf);
\r
1743 ensureSizeIsEnough(1, image_size.width * image_size.height, CV_16UC2, trackBuf1);
\r
1744 ensureSizeIsEnough(1, image_size.width * image_size.height, CV_16UC2, trackBuf2);
\r
1747 void cv::gpu::CannyBuf::release()
\r
1753 edgeBuf.release();
\r
1754 trackBuf1.release();
\r
1755 trackBuf2.release();
\r
1758 namespace cv { namespace gpu { namespace canny
\r
1760 void calcSobelRowPass_gpu(PtrStep src, PtrStepi dx_buf, PtrStepi dy_buf, int rows, int cols);
\r
1762 void calcMagnitude_gpu(PtrStepi dx_buf, PtrStepi dy_buf, PtrStepi dx, PtrStepi dy, PtrStepf mag, int rows, int cols, bool L2Grad);
\r
1763 void calcMagnitude_gpu(PtrStepi dx, PtrStepi dy, PtrStepf mag, int rows, int cols, bool L2Grad);
\r
1765 void calcMap_gpu(PtrStepi dx, PtrStepi dy, PtrStepf mag, PtrStepi map, int rows, int cols, float low_thresh, float high_thresh);
\r
1767 void edgesHysteresisLocal_gpu(PtrStepi map, ushort2* st1, int rows, int cols);
\r
1769 void edgesHysteresisGlobal_gpu(PtrStepi map, ushort2* st1, ushort2* st2, int rows, int cols);
\r
1771 void getEdges_gpu(PtrStepi map, PtrStep dst, int rows, int cols);
\r
1776 void CannyCaller(CannyBuf& buf, GpuMat& dst, float low_thresh, float high_thresh)
\r
1778 using namespace cv::gpu::canny;
\r
1780 calcMap_gpu(buf.dx, buf.dy, buf.edgeBuf, buf.edgeBuf, dst.rows, dst.cols, low_thresh, high_thresh);
\r
1782 edgesHysteresisLocal_gpu(buf.edgeBuf, buf.trackBuf1.ptr<ushort2>(), dst.rows, dst.cols);
\r
1784 edgesHysteresisGlobal_gpu(buf.edgeBuf, buf.trackBuf1.ptr<ushort2>(), buf.trackBuf2.ptr<ushort2>(), dst.rows, dst.cols);
\r
1786 getEdges_gpu(buf.edgeBuf, dst, dst.rows, dst.cols);
\r
1790 void cv::gpu::Canny(const GpuMat& src, GpuMat& dst, double low_thresh, double high_thresh, int apperture_size, bool L2gradient)
\r
1792 CannyBuf buf(src.size(), apperture_size);
\r
1793 Canny(src, buf, dst, low_thresh, high_thresh, apperture_size, L2gradient);
\r
1796 void cv::gpu::Canny(const GpuMat& src, CannyBuf& buf, GpuMat& dst, double low_thresh, double high_thresh, int apperture_size, bool L2gradient)
\r
1798 using namespace cv::gpu::canny;
\r
1800 CV_Assert(TargetArchs::builtWith(SHARED_ATOMICS) && DeviceInfo().supports(SHARED_ATOMICS));
\r
1801 CV_Assert(src.type() == CV_8UC1);
\r
1803 if( low_thresh > high_thresh )
\r
1804 std::swap( low_thresh, high_thresh);
\r
1806 dst.create(src.size(), CV_8U);
\r
1807 dst.setTo(Scalar::all(0));
\r
1809 buf.create(src.size(), apperture_size);
\r
1810 buf.edgeBuf.setTo(Scalar::all(0));
\r
1812 if (apperture_size == 3)
\r
1814 calcSobelRowPass_gpu(src, buf.dx_buf, buf.dy_buf, src.rows, src.cols);
\r
1816 calcMagnitude_gpu(buf.dx_buf, buf.dy_buf, buf.dx, buf.dy, buf.edgeBuf, src.rows, src.cols, L2gradient);
\r
1820 buf.filterDX->apply(src, buf.dx, Rect(0, 0, src.cols, src.rows));
\r
1821 buf.filterDY->apply(src, buf.dy, Rect(0, 0, src.cols, src.rows));
\r
1823 calcMagnitude_gpu(buf.dx, buf.dy, buf.edgeBuf, src.rows, src.cols, L2gradient);
\r
1826 CannyCaller(buf, dst, static_cast<float>(low_thresh), static_cast<float>(high_thresh));
\r
1829 void cv::gpu::Canny(const GpuMat& dx, const GpuMat& dy, GpuMat& dst, double low_thresh, double high_thresh, bool L2gradient)
\r
1831 CannyBuf buf(dx, dy);
\r
1832 Canny(dx, dy, buf, dst, low_thresh, high_thresh, L2gradient);
\r
1835 void cv::gpu::Canny(const GpuMat& dx, const GpuMat& dy, CannyBuf& buf, GpuMat& dst, double low_thresh, double high_thresh, bool L2gradient)
\r
1837 using namespace cv::gpu::canny;
\r
1839 CV_Assert(TargetArchs::builtWith(SHARED_ATOMICS) && DeviceInfo().supports(SHARED_ATOMICS));
\r
1840 CV_Assert(dx.type() == CV_32SC1 && dy.type() == CV_32SC1 && dx.size() == dy.size());
\r
1842 if( low_thresh > high_thresh )
\r
1843 std::swap( low_thresh, high_thresh);
\r
1845 dst.create(dx.size(), CV_8U);
\r
1846 dst.setTo(Scalar::all(0));
\r
1848 buf.dx = dx; buf.dy = dy;
\r
1849 buf.create(dx.size(), -1);
\r
1850 buf.edgeBuf.setTo(Scalar::all(0));
\r
1852 calcMagnitude_gpu(dx, dy, buf.edgeBuf, dx.rows, dx.cols, L2gradient);
\r
1854 CannyCaller(buf, dst, static_cast<float>(low_thresh), static_cast<float>(high_thresh));
\r
1857 #endif /* !defined (HAVE_CUDA) */
\r