1 /*M///////////////////////////////////////////////////////////////////////////////////////
\r
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
\r
5 // By downloading, copying, installing or using the software you agree to this license.
\r
6 // If you do not agree to this license, do not download, install,
\r
7 // copy or use the software.
\r
10 // License Agreement
\r
11 // For Open Source Computer Vision Library
\r
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
\r
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
\r
15 // Third party copyrights are property of their respective owners.
\r
17 // Redistribution and use in source and binary forms, with or without modification,
\r
18 // are permitted provided that the following conditions are met:
\r
20 // * Redistribution's of source code must retain the above copyright notice,
\r
21 // this list of conditions and the following disclaimer.
\r
23 // * Redistribution's in binary form must reproduce the above copyright notice,
\r
24 // this list of conditions and the following disclaimer in the documentation
\r
25 // and/or other materials provided with the distribution.
\r
27 // * The name of the copyright holders may not be used to endorse or promote products
\r
28 // derived from this software without specific prior written permission.
\r
30 // This software is provided by the copyright holders and contributors "as is" and
\r
31 // any express or implied warranties, including, but not limited to, the implied
\r
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
\r
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
\r
34 // indirect, incidental, special, exemplary, or consequential damages
\r
35 // (including, but not limited to, procurement of substitute goods or services;
\r
36 // loss of use, data, or profits; or business interruption) however caused
\r
37 // and on any theory of liability, whether in contract, strict liability,
\r
38 // or tort (including negligence or otherwise) arising in any way out of
\r
39 // the use of this software, even if advised of the possibility of such damage.
\r
43 #include "internal_shared.hpp"
\r
44 #include "opencv2/gpu/device/vec_traits.hpp"
\r
45 #include "opencv2/gpu/device/vec_math.hpp"
\r
46 #include "opencv2/gpu/device/saturate_cast.hpp"
\r
47 #include "opencv2/gpu/device/border_interpolate.hpp"
\r
49 namespace cv { namespace gpu { namespace device
\r
53 /////////////////////////////////// MeanShiftfiltering ///////////////////////////////////////////////
\r
55 texture<uchar4, 2> tex_meanshift;
\r
57 __device__ short2 do_mean_shift(int x0, int y0, unsigned char* out,
\r
58 size_t out_step, int cols, int rows,
\r
59 int sp, int sr, int maxIter, float eps)
\r
62 uchar4 c = tex2D(tex_meanshift, x0, y0 );
\r
64 // iterate meanshift procedure
\r
65 for( int iter = 0; iter < maxIter; iter++ )
\r
68 int s0 = 0, s1 = 0, s2 = 0, sx = 0, sy = 0;
\r
71 //mean shift: process pixels in window (p-sigmaSp)x(p+sigmaSp)
\r
77 for( int y = miny; y <= maxy; y++)
\r
80 for( int x = minx; x <= maxx; x++ )
\r
82 uchar4 t = tex2D( tex_meanshift, x, y );
\r
84 int norm2 = (t.x - c.x) * (t.x - c.x) + (t.y - c.y) * (t.y - c.y) + (t.z - c.z) * (t.z - c.z);
\r
87 s0 += t.x; s1 += t.y; s2 += t.z;
\r
88 sx += x; rowCount++;
\r
99 int x1 = __float2int_rz(sx*icount);
\r
100 int y1 = __float2int_rz(sy*icount);
\r
101 s0 = __float2int_rz(s0*icount);
\r
102 s1 = __float2int_rz(s1*icount);
\r
103 s2 = __float2int_rz(s2*icount);
\r
105 int norm2 = (s0 - c.x) * (s0 - c.x) + (s1 - c.y) * (s1 - c.y) + (s2 - c.z) * (s2 - c.z);
\r
107 bool stopFlag = (x0 == x1 && y0 == y1) || (::abs(x1-x0) + ::abs(y1-y0) + norm2 <= eps);
\r
110 c.x = s0; c.y = s1; c.z = s2;
\r
116 int base = (blockIdx.y * blockDim.y + threadIdx.y) * out_step + (blockIdx.x * blockDim.x + threadIdx.x) * 4 * sizeof(uchar);
\r
117 *(uchar4*)(out + base) = c;
\r
119 return make_short2((short)x0, (short)y0);
\r
122 __global__ void meanshift_kernel(unsigned char* out, size_t out_step, int cols, int rows, int sp, int sr, int maxIter, float eps )
\r
124 int x0 = blockIdx.x * blockDim.x + threadIdx.x;
\r
125 int y0 = blockIdx.y * blockDim.y + threadIdx.y;
\r
127 if( x0 < cols && y0 < rows )
\r
128 do_mean_shift(x0, y0, out, out_step, cols, rows, sp, sr, maxIter, eps);
\r
131 __global__ void meanshiftproc_kernel(unsigned char* outr, size_t outrstep,
\r
132 unsigned char* outsp, size_t outspstep,
\r
133 int cols, int rows,
\r
134 int sp, int sr, int maxIter, float eps)
\r
136 int x0 = blockIdx.x * blockDim.x + threadIdx.x;
\r
137 int y0 = blockIdx.y * blockDim.y + threadIdx.y;
\r
139 if( x0 < cols && y0 < rows )
\r
141 int basesp = (blockIdx.y * blockDim.y + threadIdx.y) * outspstep + (blockIdx.x * blockDim.x + threadIdx.x) * 2 * sizeof(short);
\r
142 *(short2*)(outsp + basesp) = do_mean_shift(x0, y0, outr, outrstep, cols, rows, sp, sr, maxIter, eps);
\r
146 void meanShiftFiltering_gpu(const DevMem2Db& src, DevMem2Db dst, int sp, int sr, int maxIter, float eps, cudaStream_t stream)
\r
148 dim3 grid(1, 1, 1);
\r
149 dim3 threads(32, 8, 1);
\r
150 grid.x = divUp(src.cols, threads.x);
\r
151 grid.y = divUp(src.rows, threads.y);
\r
153 cudaChannelFormatDesc desc = cudaCreateChannelDesc<uchar4>();
\r
154 cudaSafeCall( cudaBindTexture2D( 0, tex_meanshift, src.data, desc, src.cols, src.rows, src.step ) );
\r
156 meanshift_kernel<<< grid, threads, 0, stream >>>( dst.data, dst.step, dst.cols, dst.rows, sp, sr, maxIter, eps );
\r
157 cudaSafeCall( cudaGetLastError() );
\r
160 cudaSafeCall( cudaDeviceSynchronize() );
\r
162 //cudaSafeCall( cudaUnbindTexture( tex_meanshift ) );
\r
165 void meanShiftProc_gpu(const DevMem2Db& src, DevMem2Db dstr, DevMem2Db dstsp, int sp, int sr, int maxIter, float eps, cudaStream_t stream)
\r
167 dim3 grid(1, 1, 1);
\r
168 dim3 threads(32, 8, 1);
\r
169 grid.x = divUp(src.cols, threads.x);
\r
170 grid.y = divUp(src.rows, threads.y);
\r
172 cudaChannelFormatDesc desc = cudaCreateChannelDesc<uchar4>();
\r
173 cudaSafeCall( cudaBindTexture2D( 0, tex_meanshift, src.data, desc, src.cols, src.rows, src.step ) );
\r
175 meanshiftproc_kernel<<< grid, threads, 0, stream >>>( dstr.data, dstr.step, dstsp.data, dstsp.step, dstr.cols, dstr.rows, sp, sr, maxIter, eps );
\r
176 cudaSafeCall( cudaGetLastError() );
\r
179 cudaSafeCall( cudaDeviceSynchronize() );
\r
181 //cudaSafeCall( cudaUnbindTexture( tex_meanshift ) );
\r
184 /////////////////////////////////// drawColorDisp ///////////////////////////////////////////////
\r
186 template <typename T>
\r
187 __device__ unsigned int cvtPixel(T d, int ndisp, float S = 1, float V = 1)
\r
189 unsigned int H = ((ndisp-d) * 240)/ndisp;
\r
191 unsigned int hi = (H/60) % 6;
\r
192 float f = H/60.f - H/60;
\r
193 float p = V * (1 - S);
\r
194 float q = V * (1 - f * S);
\r
195 float t = V * (1 - (1 - f) * S);
\r
199 if (hi == 0) //R = V, G = t, B = p
\r
206 if (hi == 1) // R = q, G = V, B = p
\r
213 if (hi == 2) // R = p, G = V, B = t
\r
220 if (hi == 3) // R = p, G = q, B = V
\r
227 if (hi == 4) // R = t, G = p, B = V
\r
234 if (hi == 5) // R = V, G = p, B = q
\r
240 const unsigned int b = (unsigned int)(::max(0.f, ::min(res.x, 1.f)) * 255.f);
\r
241 const unsigned int g = (unsigned int)(::max(0.f, ::min(res.y, 1.f)) * 255.f);
\r
242 const unsigned int r = (unsigned int)(::max(0.f, ::min(res.z, 1.f)) * 255.f);
\r
243 const unsigned int a = 255U;
\r
245 return (a << 24) + (r << 16) + (g << 8) + b;
\r
248 __global__ void drawColorDisp(uchar* disp, size_t disp_step, uchar* out_image, size_t out_step, int width, int height, int ndisp)
\r
250 const int x = (blockIdx.x * blockDim.x + threadIdx.x) << 2;
\r
251 const int y = blockIdx.y * blockDim.y + threadIdx.y;
\r
253 if(x < width && y < height)
\r
255 uchar4 d4 = *(uchar4*)(disp + y * disp_step + x);
\r
258 res.x = cvtPixel(d4.x, ndisp);
\r
259 res.y = cvtPixel(d4.y, ndisp);
\r
260 res.z = cvtPixel(d4.z, ndisp);
\r
261 res.w = cvtPixel(d4.w, ndisp);
\r
263 uint4* line = (uint4*)(out_image + y * out_step);
\r
264 line[x >> 2] = res;
\r
268 __global__ void drawColorDisp(short* disp, size_t disp_step, uchar* out_image, size_t out_step, int width, int height, int ndisp)
\r
270 const int x = (blockIdx.x * blockDim.x + threadIdx.x) << 1;
\r
271 const int y = blockIdx.y * blockDim.y + threadIdx.y;
\r
273 if(x < width && y < height)
\r
275 short2 d2 = *(short2*)(disp + y * disp_step + x);
\r
278 res.x = cvtPixel(d2.x, ndisp);
\r
279 res.y = cvtPixel(d2.y, ndisp);
\r
281 uint2* line = (uint2*)(out_image + y * out_step);
\r
282 line[x >> 1] = res;
\r
287 void drawColorDisp_gpu(const DevMem2Db& src, const DevMem2Db& dst, int ndisp, const cudaStream_t& stream)
\r
289 dim3 threads(16, 16, 1);
\r
290 dim3 grid(1, 1, 1);
\r
291 grid.x = divUp(src.cols, threads.x << 2);
\r
292 grid.y = divUp(src.rows, threads.y);
\r
294 drawColorDisp<<<grid, threads, 0, stream>>>(src.data, src.step, dst.data, dst.step, src.cols, src.rows, ndisp);
\r
295 cudaSafeCall( cudaGetLastError() );
\r
298 cudaSafeCall( cudaDeviceSynchronize() );
\r
301 void drawColorDisp_gpu(const DevMem2D_<short>& src, const DevMem2Db& dst, int ndisp, const cudaStream_t& stream)
\r
303 dim3 threads(32, 8, 1);
\r
304 dim3 grid(1, 1, 1);
\r
305 grid.x = divUp(src.cols, threads.x << 1);
\r
306 grid.y = divUp(src.rows, threads.y);
\r
308 drawColorDisp<<<grid, threads, 0, stream>>>(src.data, src.step / sizeof(short), dst.data, dst.step, src.cols, src.rows, ndisp);
\r
309 cudaSafeCall( cudaGetLastError() );
\r
312 cudaSafeCall( cudaDeviceSynchronize() );
\r
315 /////////////////////////////////// reprojectImageTo3D ///////////////////////////////////////////////
\r
317 __constant__ float cq[16];
\r
319 template <typename T, typename D>
\r
320 __global__ void reprojectImageTo3D(const DevMem2D_<T> disp, PtrStep<D> xyz)
\r
322 const int x = blockIdx.x * blockDim.x + threadIdx.x;
\r
323 const int y = blockIdx.y * blockDim.y + threadIdx.y;
\r
325 if (y >= disp.rows || x >= disp.cols)
\r
328 const float qx = x * cq[ 0] + y * cq[ 1] + cq[ 3];
\r
329 const float qy = x * cq[ 4] + y * cq[ 5] + cq[ 7];
\r
330 const float qz = x * cq[ 8] + y * cq[ 9] + cq[11];
\r
331 const float qw = x * cq[12] + y * cq[13] + cq[15];
\r
333 const T d = disp(y, x);
\r
335 const float iW = 1.f / (qw + cq[14] * d);
\r
337 D v = VecTraits<D>::all(1.0f);
\r
338 v.x = (qx + cq[2] * d) * iW;
\r
339 v.y = (qy + cq[6] * d) * iW;
\r
340 v.z = (qz + cq[10] * d) * iW;
\r
345 template <typename T, typename D>
\r
346 void reprojectImageTo3D_gpu(const DevMem2Db disp, DevMem2Db xyz, const float* q, cudaStream_t stream)
\r
349 dim3 grid(divUp(disp.cols, block.x), divUp(disp.rows, block.y));
\r
351 cudaSafeCall( cudaMemcpyToSymbol(cq, q, 16 * sizeof(float)) );
\r
353 reprojectImageTo3D<T, D><<<grid, block, 0, stream>>>((DevMem2D_<T>)disp, (DevMem2D_<D>)xyz);
\r
354 cudaSafeCall( cudaGetLastError() );
\r
357 cudaSafeCall( cudaDeviceSynchronize() );
\r
360 template void reprojectImageTo3D_gpu<uchar, float3>(const DevMem2Db disp, DevMem2Db xyz, const float* q, cudaStream_t stream);
\r
361 template void reprojectImageTo3D_gpu<uchar, float4>(const DevMem2Db disp, DevMem2Db xyz, const float* q, cudaStream_t stream);
\r
362 template void reprojectImageTo3D_gpu<short, float3>(const DevMem2Db disp, DevMem2Db xyz, const float* q, cudaStream_t stream);
\r
363 template void reprojectImageTo3D_gpu<short, float4>(const DevMem2Db disp, DevMem2Db xyz, const float* q, cudaStream_t stream);
\r
365 /////////////////////////////////////////// Corner Harris /////////////////////////////////////////////////
\r
367 texture<float, cudaTextureType2D, cudaReadModeElementType> harrisDxTex(0, cudaFilterModePoint, cudaAddressModeClamp);
\r
368 texture<float, cudaTextureType2D, cudaReadModeElementType> harrisDyTex(0, cudaFilterModePoint, cudaAddressModeClamp);
\r
370 __global__ void cornerHarris_kernel(const int block_size, const float k, DevMem2Df dst)
\r
372 const int x = blockIdx.x * blockDim.x + threadIdx.x;
\r
373 const int y = blockIdx.y * blockDim.y + threadIdx.y;
\r
375 if (x < dst.cols && y < dst.rows)
\r
381 const int ibegin = y - (block_size / 2);
\r
382 const int jbegin = x - (block_size / 2);
\r
383 const int iend = ibegin + block_size;
\r
384 const int jend = jbegin + block_size;
\r
386 for (int i = ibegin; i < iend; ++i)
\r
388 for (int j = jbegin; j < jend; ++j)
\r
390 float dx = tex2D(harrisDxTex, j, i);
\r
391 float dy = tex2D(harrisDyTex, j, i);
\r
399 dst(y, x) = a * c - b * b - k * (a + c) * (a + c);
\r
403 template <typename BR, typename BC>
\r
404 __global__ void cornerHarris_kernel(const int block_size, const float k, DevMem2Df dst, const BR border_row, const BC border_col)
\r
406 const int x = blockIdx.x * blockDim.x + threadIdx.x;
\r
407 const int y = blockIdx.y * blockDim.y + threadIdx.y;
\r
409 if (x < dst.cols && y < dst.rows)
\r
415 const int ibegin = y - (block_size / 2);
\r
416 const int jbegin = x - (block_size / 2);
\r
417 const int iend = ibegin + block_size;
\r
418 const int jend = jbegin + block_size;
\r
420 for (int i = ibegin; i < iend; ++i)
\r
422 const int y = border_col.idx_row(i);
\r
424 for (int j = jbegin; j < jend; ++j)
\r
426 const int x = border_row.idx_col(j);
\r
428 float dx = tex2D(harrisDxTex, x, y);
\r
429 float dy = tex2D(harrisDyTex, x, y);
\r
437 dst(y, x) = a * c - b * b - k * (a + c) * (a + c);
\r
441 void cornerHarris_gpu(int block_size, float k, DevMem2Df Dx, DevMem2Df Dy, DevMem2Df dst, int border_type, cudaStream_t stream)
\r
444 dim3 grid(divUp(Dx.cols, block.x), divUp(Dx.rows, block.y));
\r
446 bindTexture(&harrisDxTex, Dx);
\r
447 bindTexture(&harrisDyTex, Dy);
\r
449 switch (border_type)
\r
451 case BORDER_REFLECT101_GPU:
\r
452 cornerHarris_kernel<<<grid, block, 0, stream>>>(block_size, k, dst, BrdRowReflect101<void>(Dx.cols), BrdColReflect101<void>(Dx.rows));
\r
455 case BORDER_REFLECT_GPU:
\r
456 cornerHarris_kernel<<<grid, block, 0, stream>>>(block_size, k, dst, BrdRowReflect<void>(Dx.cols), BrdColReflect<void>(Dx.rows));
\r
459 case BORDER_REPLICATE_GPU:
\r
460 cornerHarris_kernel<<<grid, block, 0, stream>>>(block_size, k, dst);
\r
464 cudaSafeCall( cudaGetLastError() );
\r
467 cudaSafeCall( cudaDeviceSynchronize() );
\r
470 /////////////////////////////////////////// Corner Min Eigen Val /////////////////////////////////////////////////
\r
472 texture<float, cudaTextureType2D, cudaReadModeElementType> minEigenValDxTex(0, cudaFilterModePoint, cudaAddressModeClamp);
\r
473 texture<float, cudaTextureType2D, cudaReadModeElementType> minEigenValDyTex(0, cudaFilterModePoint, cudaAddressModeClamp);
\r
475 __global__ void cornerMinEigenVal_kernel(const int block_size, DevMem2Df dst)
\r
477 const int x = blockIdx.x * blockDim.x + threadIdx.x;
\r
478 const int y = blockIdx.y * blockDim.y + threadIdx.y;
\r
480 if (x < dst.cols && y < dst.rows)
\r
486 const int ibegin = y - (block_size / 2);
\r
487 const int jbegin = x - (block_size / 2);
\r
488 const int iend = ibegin + block_size;
\r
489 const int jend = jbegin + block_size;
\r
491 for (int i = ibegin; i < iend; ++i)
\r
493 for (int j = jbegin; j < jend; ++j)
\r
495 float dx = tex2D(minEigenValDxTex, j, i);
\r
496 float dy = tex2D(minEigenValDyTex, j, i);
\r
507 dst(y, x) = (a + c) - sqrtf((a - c) * (a - c) + b * b);
\r
512 template <typename BR, typename BC>
\r
513 __global__ void cornerMinEigenVal_kernel(const int block_size, DevMem2Df dst, const BR border_row, const BC border_col)
\r
515 const int x = blockIdx.x * blockDim.x + threadIdx.x;
\r
516 const int y = blockIdx.y * blockDim.y + threadIdx.y;
\r
518 if (x < dst.cols && y < dst.rows)
\r
524 const int ibegin = y - (block_size / 2);
\r
525 const int jbegin = x - (block_size / 2);
\r
526 const int iend = ibegin + block_size;
\r
527 const int jend = jbegin + block_size;
\r
529 for (int i = ibegin; i < iend; ++i)
\r
531 int y = border_col.idx_row(i);
\r
533 for (int j = jbegin; j < jend; ++j)
\r
535 int x = border_row.idx_col(j);
\r
537 float dx = tex2D(minEigenValDxTex, x, y);
\r
538 float dy = tex2D(minEigenValDyTex, x, y);
\r
549 dst(y, x) = (a + c) - sqrtf((a - c) * (a - c) + b * b);
\r
553 void cornerMinEigenVal_gpu(int block_size, DevMem2Df Dx, DevMem2Df Dy, DevMem2Df dst, int border_type, cudaStream_t stream)
\r
556 dim3 grid(divUp(Dx.cols, block.x), divUp(Dx.rows, block.y));
\r
558 bindTexture(&minEigenValDxTex, Dx);
\r
559 bindTexture(&minEigenValDyTex, Dy);
\r
561 switch (border_type)
\r
563 case BORDER_REFLECT101_GPU:
\r
564 cornerMinEigenVal_kernel<<<grid, block, 0, stream>>>(block_size, dst, BrdRowReflect101<void>(Dx.cols), BrdColReflect101<void>(Dx.rows));
\r
567 case BORDER_REFLECT_GPU:
\r
568 cornerMinEigenVal_kernel<<<grid, block, 0, stream>>>(block_size, dst, BrdRowReflect<void>(Dx.cols), BrdColReflect<void>(Dx.rows));
\r
571 case BORDER_REPLICATE_GPU:
\r
572 cornerMinEigenVal_kernel<<<grid, block, 0, stream>>>(block_size, dst);
\r
576 cudaSafeCall( cudaGetLastError() );
\r
579 cudaSafeCall(cudaDeviceSynchronize());
\r
582 ////////////////////////////// Column Sum //////////////////////////////////////
\r
584 __global__ void column_sumKernel_32F(int cols, int rows, const PtrStepb src, const PtrStepb dst)
\r
586 int x = blockIdx.x * blockDim.x + threadIdx.x;
\r
590 const unsigned char* src_data = src.data + x * sizeof(float);
\r
591 unsigned char* dst_data = dst.data + x * sizeof(float);
\r
594 for (int y = 0; y < rows; ++y)
\r
596 sum += *(const float*)src_data;
\r
597 *(float*)dst_data = sum;
\r
598 src_data += src.step;
\r
599 dst_data += dst.step;
\r
605 void columnSum_32F(const DevMem2Db src, const DevMem2Db dst)
\r
608 dim3 grid(divUp(src.cols, threads.x));
\r
610 column_sumKernel_32F<<<grid, threads>>>(src.cols, src.rows, src, dst);
\r
611 cudaSafeCall( cudaGetLastError() );
\r
613 cudaSafeCall( cudaDeviceSynchronize() );
\r
617 //////////////////////////////////////////////////////////////////////////
\r
620 __global__ void mulSpectrumsKernel(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, DevMem2D_<cufftComplex> c)
\r
622 const int x = blockIdx.x * blockDim.x + threadIdx.x;
\r
623 const int y = blockIdx.y * blockDim.y + threadIdx.y;
\r
625 if (x < c.cols && y < c.rows)
\r
627 c.ptr(y)[x] = cuCmulf(a.ptr(y)[x], b.ptr(y)[x]);
\r
632 void mulSpectrums(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, DevMem2D_<cufftComplex> c, cudaStream_t stream)
\r
635 dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y));
\r
637 mulSpectrumsKernel<<<grid, threads, 0, stream>>>(a, b, c);
\r
638 cudaSafeCall( cudaGetLastError() );
\r
641 cudaSafeCall( cudaDeviceSynchronize() );
\r
645 //////////////////////////////////////////////////////////////////////////
\r
646 // mulSpectrums_CONJ
\r
648 __global__ void mulSpectrumsKernel_CONJ(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, DevMem2D_<cufftComplex> c)
\r
650 const int x = blockIdx.x * blockDim.x + threadIdx.x;
\r
651 const int y = blockIdx.y * blockDim.y + threadIdx.y;
\r
653 if (x < c.cols && y < c.rows)
\r
655 c.ptr(y)[x] = cuCmulf(a.ptr(y)[x], cuConjf(b.ptr(y)[x]));
\r
660 void mulSpectrums_CONJ(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, DevMem2D_<cufftComplex> c, cudaStream_t stream)
\r
663 dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y));
\r
665 mulSpectrumsKernel_CONJ<<<grid, threads, 0, stream>>>(a, b, c);
\r
666 cudaSafeCall( cudaGetLastError() );
\r
669 cudaSafeCall( cudaDeviceSynchronize() );
\r
673 //////////////////////////////////////////////////////////////////////////
\r
674 // mulAndScaleSpectrums
\r
676 __global__ void mulAndScaleSpectrumsKernel(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, float scale, DevMem2D_<cufftComplex> c)
\r
678 const int x = blockIdx.x * blockDim.x + threadIdx.x;
\r
679 const int y = blockIdx.y * blockDim.y + threadIdx.y;
\r
681 if (x < c.cols && y < c.rows)
\r
683 cufftComplex v = cuCmulf(a.ptr(y)[x], b.ptr(y)[x]);
\r
684 c.ptr(y)[x] = make_cuFloatComplex(cuCrealf(v) * scale, cuCimagf(v) * scale);
\r
689 void mulAndScaleSpectrums(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, float scale, DevMem2D_<cufftComplex> c, cudaStream_t stream)
\r
692 dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y));
\r
694 mulAndScaleSpectrumsKernel<<<grid, threads, 0, stream>>>(a, b, scale, c);
\r
695 cudaSafeCall( cudaGetLastError() );
\r
698 cudaSafeCall( cudaDeviceSynchronize() );
\r
702 //////////////////////////////////////////////////////////////////////////
\r
703 // mulAndScaleSpectrums_CONJ
\r
705 __global__ void mulAndScaleSpectrumsKernel_CONJ(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, float scale, DevMem2D_<cufftComplex> c)
\r
707 const int x = blockIdx.x * blockDim.x + threadIdx.x;
\r
708 const int y = blockIdx.y * blockDim.y + threadIdx.y;
\r
710 if (x < c.cols && y < c.rows)
\r
712 cufftComplex v = cuCmulf(a.ptr(y)[x], cuConjf(b.ptr(y)[x]));
\r
713 c.ptr(y)[x] = make_cuFloatComplex(cuCrealf(v) * scale, cuCimagf(v) * scale);
\r
718 void mulAndScaleSpectrums_CONJ(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, float scale, DevMem2D_<cufftComplex> c, cudaStream_t stream)
\r
721 dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y));
\r
723 mulAndScaleSpectrumsKernel_CONJ<<<grid, threads, 0, stream>>>(a, b, scale, c);
\r
724 cudaSafeCall( cudaGetLastError() );
\r
727 cudaSafeCall( cudaDeviceSynchronize() );
\r
730 //////////////////////////////////////////////////////////////////////////
\r
733 // TODO use intrinsics like __sinf and so on
\r
735 namespace build_warp_maps
\r
738 __constant__ float ck_rinv[9];
\r
739 __constant__ float cr_kinv[9];
\r
740 __constant__ float ct[3];
\r
741 __constant__ float cscale;
\r
748 static __device__ __forceinline__ void mapBackward(float u, float v, float &x, float &y)
\r
750 using namespace build_warp_maps;
\r
752 float x_ = u / cscale - ct[0];
\r
753 float y_ = v / cscale - ct[1];
\r
756 x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * (1 - ct[2]);
\r
757 y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * (1 - ct[2]);
\r
758 z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * (1 - ct[2]);
\r
766 class CylindricalMapper
\r
769 static __device__ __forceinline__ void mapBackward(float u, float v, float &x, float &y)
\r
771 using namespace build_warp_maps;
\r
774 float x_ = ::sinf(u);
\r
775 float y_ = v / cscale;
\r
776 float z_ = ::cosf(u);
\r
779 x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * z_;
\r
780 y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * z_;
\r
781 z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * z_;
\r
783 if (z > 0) { x /= z; y /= z; }
\r
789 class SphericalMapper
\r
792 static __device__ __forceinline__ void mapBackward(float u, float v, float &x, float &y)
\r
794 using namespace build_warp_maps;
\r
799 float sinv = ::sinf(v);
\r
800 float x_ = sinv * ::sinf(u);
\r
801 float y_ = -::cosf(v);
\r
802 float z_ = sinv * ::cosf(u);
\r
805 x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * z_;
\r
806 y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * z_;
\r
807 z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * z_;
\r
809 if (z > 0) { x /= z; y /= z; }
\r
815 template <typename Mapper>
\r
816 __global__ void buildWarpMapsKernel(int tl_u, int tl_v, int cols, int rows,
\r
817 PtrStepf map_x, PtrStepf map_y)
\r
819 int du = blockIdx.x * blockDim.x + threadIdx.x;
\r
820 int dv = blockIdx.y * blockDim.y + threadIdx.y;
\r
821 if (du < cols && dv < rows)
\r
823 float u = tl_u + du;
\r
824 float v = tl_v + dv;
\r
826 Mapper::mapBackward(u, v, x, y);
\r
827 map_x.ptr(dv)[du] = x;
\r
828 map_y.ptr(dv)[du] = y;
\r
833 void buildWarpPlaneMaps(int tl_u, int tl_v, DevMem2Df map_x, DevMem2Df map_y,
\r
834 const float k_rinv[9], const float r_kinv[9], const float t[3],
\r
835 float scale, cudaStream_t stream)
\r
837 cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float)));
\r
838 cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float)));
\r
839 cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ct, t, 3*sizeof(float)));
\r
840 cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float)));
\r
842 int cols = map_x.cols;
\r
843 int rows = map_x.rows;
\r
845 dim3 threads(32, 8);
\r
846 dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
\r
848 buildWarpMapsKernel<PlaneMapper><<<grid,threads>>>(tl_u, tl_v, cols, rows, map_x, map_y);
\r
849 cudaSafeCall(cudaGetLastError());
\r
851 cudaSafeCall(cudaDeviceSynchronize());
\r
855 void buildWarpCylindricalMaps(int tl_u, int tl_v, DevMem2Df map_x, DevMem2Df map_y,
\r
856 const float k_rinv[9], const float r_kinv[9], float scale,
\r
857 cudaStream_t stream)
\r
859 cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float)));
\r
860 cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float)));
\r
861 cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float)));
\r
863 int cols = map_x.cols;
\r
864 int rows = map_x.rows;
\r
866 dim3 threads(32, 8);
\r
867 dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
\r
869 buildWarpMapsKernel<CylindricalMapper><<<grid,threads>>>(tl_u, tl_v, cols, rows, map_x, map_y);
\r
870 cudaSafeCall(cudaGetLastError());
\r
872 cudaSafeCall(cudaDeviceSynchronize());
\r
876 void buildWarpSphericalMaps(int tl_u, int tl_v, DevMem2Df map_x, DevMem2Df map_y,
\r
877 const float k_rinv[9], const float r_kinv[9], float scale,
\r
878 cudaStream_t stream)
\r
880 cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float)));
\r
881 cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float)));
\r
882 cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float)));
\r
884 int cols = map_x.cols;
\r
885 int rows = map_x.rows;
\r
887 dim3 threads(32, 8);
\r
888 dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
\r
890 buildWarpMapsKernel<SphericalMapper><<<grid,threads>>>(tl_u, tl_v, cols, rows, map_x, map_y);
\r
891 cudaSafeCall(cudaGetLastError());
\r
893 cudaSafeCall(cudaDeviceSynchronize());
\r
896 //////////////////////////////////////////////////////////////////////////
\r
899 #define FILTER2D_MAX_KERNEL_SIZE 16
\r
901 __constant__ float c_filter2DKernel[FILTER2D_MAX_KERNEL_SIZE * FILTER2D_MAX_KERNEL_SIZE];
\r
903 template <class SrcT, typename D>
\r
904 __global__ void filter2D(const SrcT src, DevMem2D_<D> dst, const int kWidth, const int kHeight, const int anchorX, const int anchorY)
\r
906 typedef typename TypeVec<float, VecTraits<D>::cn>::vec_type sum_t;
\r
908 const int x = blockIdx.x * blockDim.x + threadIdx.x;
\r
909 const int y = blockIdx.y * blockDim.y + threadIdx.y;
\r
911 if (x >= dst.cols || y >= dst.rows)
\r
914 sum_t res = VecTraits<sum_t>::all(0);
\r
917 for (int i = 0; i < kHeight; ++i)
\r
919 for (int j = 0; j < kWidth; ++j)
\r
920 res = res + src(y - anchorY + i, x - anchorX + j) * c_filter2DKernel[kInd++];
\r
923 dst(y, x) = saturate_cast<D>(res);
\r
926 template <typename T, typename D, template <typename> class Brd> struct Filter2DCaller;
\r
928 #define IMPLEMENT_FILTER2D_TEX_READER(type) \
\r
929 texture< type , cudaTextureType2D, cudaReadModeElementType> tex_filter2D_ ## type (0, cudaFilterModePoint, cudaAddressModeClamp); \
\r
930 struct tex_filter2D_ ## type ## _reader \
\r
932 typedef type elem_type; \
\r
933 typedef int index_type; \
\r
936 tex_filter2D_ ## type ## _reader (int xoff_, int yoff_) : xoff(xoff_), yoff(yoff_) {} \
\r
937 __device__ __forceinline__ elem_type operator ()(index_type y, index_type x) const \
\r
939 return tex2D(tex_filter2D_ ## type , x + xoff, y + yoff); \
\r
942 template <typename D, template <typename> class Brd> struct Filter2DCaller< type , D, Brd> \
\r
944 static void call(const DevMem2D_< type > srcWhole, int xoff, int yoff, DevMem2D_<D> dst, \
\r
945 int kWidth, int kHeight, int anchorX, int anchorY, const float* borderValue, cudaStream_t stream) \
\r
947 typedef typename TypeVec<float, VecTraits< type >::cn>::vec_type work_type; \
\r
948 dim3 block(16, 16); \
\r
949 dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); \
\r
950 bindTexture(&tex_filter2D_ ## type , srcWhole); \
\r
951 tex_filter2D_ ## type ##_reader texSrc(xoff, yoff); \
\r
952 Brd<work_type> brd(dst.rows, dst.cols, VecTraits<work_type>::make(borderValue)); \
\r
953 BorderReader< tex_filter2D_ ## type ##_reader, Brd<work_type> > brdSrc(texSrc, brd); \
\r
954 filter2D<<<grid, block, 0, stream>>>(brdSrc, dst, kWidth, kHeight, anchorX, anchorY); \
\r
955 cudaSafeCall( cudaGetLastError() ); \
\r
957 cudaSafeCall( cudaDeviceSynchronize() ); \
\r
961 IMPLEMENT_FILTER2D_TEX_READER(uchar);
\r
962 IMPLEMENT_FILTER2D_TEX_READER(uchar4);
\r
964 IMPLEMENT_FILTER2D_TEX_READER(ushort);
\r
965 IMPLEMENT_FILTER2D_TEX_READER(ushort4);
\r
967 IMPLEMENT_FILTER2D_TEX_READER(float);
\r
968 IMPLEMENT_FILTER2D_TEX_READER(float4);
\r
970 #undef IMPLEMENT_FILTER2D_TEX_READER
\r
972 template <typename T, typename D>
\r
973 void filter2D_gpu(DevMem2Db srcWhole, int ofsX, int ofsY, DevMem2Db dst,
\r
974 int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel,
\r
975 int borderMode, const float* borderValue, cudaStream_t stream)
\r
977 typedef void (*func_t)(const DevMem2D_<T> srcWhole, int xoff, int yoff, DevMem2D_<D> dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* borderValue, cudaStream_t stream);
\r
978 static const func_t funcs[] =
\r
980 Filter2DCaller<T, D, BrdReflect101>::call,
\r
981 Filter2DCaller<T, D, BrdReplicate>::call,
\r
982 Filter2DCaller<T, D, BrdConstant>::call,
\r
983 Filter2DCaller<T, D, BrdReflect>::call,
\r
984 Filter2DCaller<T, D, BrdWrap>::call
\r
987 cudaSafeCall(cudaMemcpyToSymbol(c_filter2DKernel, kernel, kWidth * kHeight * sizeof(float), 0, cudaMemcpyDeviceToDevice) );
\r
989 funcs[borderMode](static_cast< DevMem2D_<T> >(srcWhole), ofsX, ofsY, static_cast< DevMem2D_<D> >(dst), kWidth, kHeight, anchorX, anchorY, borderValue, stream);
\r
992 template void filter2D_gpu<uchar, uchar>(DevMem2Db srcWhole, int ofsX, int ofsY, DevMem2Db dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, cudaStream_t stream);
\r
993 template void filter2D_gpu<uchar4, uchar4>(DevMem2Db srcWhole, int ofsX, int ofsY, DevMem2Db dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, cudaStream_t stream);
\r
994 template void filter2D_gpu<ushort, ushort>(DevMem2Db srcWhole, int ofsX, int ofsY, DevMem2Db dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, cudaStream_t stream);
\r
995 template void filter2D_gpu<ushort4, ushort4>(DevMem2Db srcWhole, int ofsX, int ofsY, DevMem2Db dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, cudaStream_t stream);
\r
996 template void filter2D_gpu<float, float>(DevMem2Db srcWhole, int ofsX, int ofsY, DevMem2Db dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, cudaStream_t stream);
\r
997 template void filter2D_gpu<float4, float4>(DevMem2Db srcWhole, int ofsX, int ofsY, DevMem2Db dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, cudaStream_t stream);
\r
998 } // namespace imgproc
\r
999 }}} // namespace cv { namespace gpu { namespace device {
\r