1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #if !defined CUDA_DISABLER
45 #include "internal_shared.hpp"
46 #include "opencv2/gpu/device/vec_traits.hpp"
47 #include "opencv2/gpu/device/vec_math.hpp"
48 #include "opencv2/gpu/device/saturate_cast.hpp"
49 #include "opencv2/gpu/device/border_interpolate.hpp"
51 namespace cv { namespace gpu { namespace device
55 /////////////////////////////////// MeanShiftfiltering ///////////////////////////////////////////////
57 texture<uchar4, 2> tex_meanshift;
59 __device__ short2 do_mean_shift(int x0, int y0, unsigned char* out,
60 size_t out_step, int cols, int rows,
61 int sp, int sr, int maxIter, float eps)
64 uchar4 c = tex2D(tex_meanshift, x0, y0 );
66 // iterate meanshift procedure
67 for( int iter = 0; iter < maxIter; iter++ )
70 int s0 = 0, s1 = 0, s2 = 0, sx = 0, sy = 0;
73 //mean shift: process pixels in window (p-sigmaSp)x(p+sigmaSp)
79 for( int y = miny; y <= maxy; y++)
82 for( int x = minx; x <= maxx; x++ )
84 uchar4 t = tex2D( tex_meanshift, x, y );
86 int norm2 = (t.x - c.x) * (t.x - c.x) + (t.y - c.y) * (t.y - c.y) + (t.z - c.z) * (t.z - c.z);
89 s0 += t.x; s1 += t.y; s2 += t.z;
101 int x1 = __float2int_rz(sx*icount);
102 int y1 = __float2int_rz(sy*icount);
103 s0 = __float2int_rz(s0*icount);
104 s1 = __float2int_rz(s1*icount);
105 s2 = __float2int_rz(s2*icount);
107 int norm2 = (s0 - c.x) * (s0 - c.x) + (s1 - c.y) * (s1 - c.y) + (s2 - c.z) * (s2 - c.z);
109 bool stopFlag = (x0 == x1 && y0 == y1) || (::abs(x1-x0) + ::abs(y1-y0) + norm2 <= eps);
112 c.x = s0; c.y = s1; c.z = s2;
118 int base = (blockIdx.y * blockDim.y + threadIdx.y) * out_step + (blockIdx.x * blockDim.x + threadIdx.x) * 4 * sizeof(uchar);
119 *(uchar4*)(out + base) = c;
121 return make_short2((short)x0, (short)y0);
124 __global__ void meanshift_kernel(unsigned char* out, size_t out_step, int cols, int rows, int sp, int sr, int maxIter, float eps )
126 int x0 = blockIdx.x * blockDim.x + threadIdx.x;
127 int y0 = blockIdx.y * blockDim.y + threadIdx.y;
129 if( x0 < cols && y0 < rows )
130 do_mean_shift(x0, y0, out, out_step, cols, rows, sp, sr, maxIter, eps);
133 __global__ void meanshiftproc_kernel(unsigned char* outr, size_t outrstep,
134 unsigned char* outsp, size_t outspstep,
136 int sp, int sr, int maxIter, float eps)
138 int x0 = blockIdx.x * blockDim.x + threadIdx.x;
139 int y0 = blockIdx.y * blockDim.y + threadIdx.y;
141 if( x0 < cols && y0 < rows )
143 int basesp = (blockIdx.y * blockDim.y + threadIdx.y) * outspstep + (blockIdx.x * blockDim.x + threadIdx.x) * 2 * sizeof(short);
144 *(short2*)(outsp + basesp) = do_mean_shift(x0, y0, outr, outrstep, cols, rows, sp, sr, maxIter, eps);
148 void meanShiftFiltering_gpu(const PtrStepSzb& src, PtrStepSzb dst, int sp, int sr, int maxIter, float eps, cudaStream_t stream)
151 dim3 threads(32, 8, 1);
152 grid.x = divUp(src.cols, threads.x);
153 grid.y = divUp(src.rows, threads.y);
155 cudaChannelFormatDesc desc = cudaCreateChannelDesc<uchar4>();
156 cudaSafeCall( cudaBindTexture2D( 0, tex_meanshift, src.data, desc, src.cols, src.rows, src.step ) );
158 meanshift_kernel<<< grid, threads, 0, stream >>>( dst.data, dst.step, dst.cols, dst.rows, sp, sr, maxIter, eps );
159 cudaSafeCall( cudaGetLastError() );
162 cudaSafeCall( cudaDeviceSynchronize() );
164 //cudaSafeCall( cudaUnbindTexture( tex_meanshift ) );
167 void meanShiftProc_gpu(const PtrStepSzb& src, PtrStepSzb dstr, PtrStepSzb dstsp, int sp, int sr, int maxIter, float eps, cudaStream_t stream)
170 dim3 threads(32, 8, 1);
171 grid.x = divUp(src.cols, threads.x);
172 grid.y = divUp(src.rows, threads.y);
174 cudaChannelFormatDesc desc = cudaCreateChannelDesc<uchar4>();
175 cudaSafeCall( cudaBindTexture2D( 0, tex_meanshift, src.data, desc, src.cols, src.rows, src.step ) );
177 meanshiftproc_kernel<<< grid, threads, 0, stream >>>( dstr.data, dstr.step, dstsp.data, dstsp.step, dstr.cols, dstr.rows, sp, sr, maxIter, eps );
178 cudaSafeCall( cudaGetLastError() );
181 cudaSafeCall( cudaDeviceSynchronize() );
183 //cudaSafeCall( cudaUnbindTexture( tex_meanshift ) );
186 /////////////////////////////////// drawColorDisp ///////////////////////////////////////////////
188 template <typename T>
189 __device__ unsigned int cvtPixel(T d, int ndisp, float S = 1, float V = 1)
191 unsigned int H = ((ndisp-d) * 240)/ndisp;
193 unsigned int hi = (H/60) % 6;
194 float f = H/60.f - H/60;
195 float p = V * (1 - S);
196 float q = V * (1 - f * S);
197 float t = V * (1 - (1 - f) * S);
201 if (hi == 0) //R = V, G = t, B = p
208 if (hi == 1) // R = q, G = V, B = p
215 if (hi == 2) // R = p, G = V, B = t
222 if (hi == 3) // R = p, G = q, B = V
229 if (hi == 4) // R = t, G = p, B = V
236 if (hi == 5) // R = V, G = p, B = q
242 const unsigned int b = (unsigned int)(::max(0.f, ::min(res.x, 1.f)) * 255.f);
243 const unsigned int g = (unsigned int)(::max(0.f, ::min(res.y, 1.f)) * 255.f);
244 const unsigned int r = (unsigned int)(::max(0.f, ::min(res.z, 1.f)) * 255.f);
245 const unsigned int a = 255U;
247 return (a << 24) + (r << 16) + (g << 8) + b;
250 __global__ void drawColorDisp(uchar* disp, size_t disp_step, uchar* out_image, size_t out_step, int width, int height, int ndisp)
252 const int x = (blockIdx.x * blockDim.x + threadIdx.x) << 2;
253 const int y = blockIdx.y * blockDim.y + threadIdx.y;
255 if(x < width && y < height)
257 uchar4 d4 = *(uchar4*)(disp + y * disp_step + x);
260 res.x = cvtPixel(d4.x, ndisp);
261 res.y = cvtPixel(d4.y, ndisp);
262 res.z = cvtPixel(d4.z, ndisp);
263 res.w = cvtPixel(d4.w, ndisp);
265 uint4* line = (uint4*)(out_image + y * out_step);
270 __global__ void drawColorDisp(short* disp, size_t disp_step, uchar* out_image, size_t out_step, int width, int height, int ndisp)
272 const int x = (blockIdx.x * blockDim.x + threadIdx.x) << 1;
273 const int y = blockIdx.y * blockDim.y + threadIdx.y;
275 if(x < width && y < height)
277 short2 d2 = *(short2*)(disp + y * disp_step + x);
280 res.x = cvtPixel(d2.x, ndisp);
281 res.y = cvtPixel(d2.y, ndisp);
283 uint2* line = (uint2*)(out_image + y * out_step);
289 void drawColorDisp_gpu(const PtrStepSzb& src, const PtrStepSzb& dst, int ndisp, const cudaStream_t& stream)
291 dim3 threads(16, 16, 1);
293 grid.x = divUp(src.cols, threads.x << 2);
294 grid.y = divUp(src.rows, threads.y);
296 drawColorDisp<<<grid, threads, 0, stream>>>(src.data, src.step, dst.data, dst.step, src.cols, src.rows, ndisp);
297 cudaSafeCall( cudaGetLastError() );
300 cudaSafeCall( cudaDeviceSynchronize() );
303 void drawColorDisp_gpu(const PtrStepSz<short>& src, const PtrStepSzb& dst, int ndisp, const cudaStream_t& stream)
305 dim3 threads(32, 8, 1);
307 grid.x = divUp(src.cols, threads.x << 1);
308 grid.y = divUp(src.rows, threads.y);
310 drawColorDisp<<<grid, threads, 0, stream>>>(src.data, src.step / sizeof(short), dst.data, dst.step, src.cols, src.rows, ndisp);
311 cudaSafeCall( cudaGetLastError() );
314 cudaSafeCall( cudaDeviceSynchronize() );
317 /////////////////////////////////// reprojectImageTo3D ///////////////////////////////////////////////
319 __constant__ float cq[16];
321 template <typename T, typename D>
322 __global__ void reprojectImageTo3D(const PtrStepSz<T> disp, PtrStep<D> xyz)
324 const int x = blockIdx.x * blockDim.x + threadIdx.x;
325 const int y = blockIdx.y * blockDim.y + threadIdx.y;
327 if (y >= disp.rows || x >= disp.cols)
330 const float qx = x * cq[ 0] + y * cq[ 1] + cq[ 3];
331 const float qy = x * cq[ 4] + y * cq[ 5] + cq[ 7];
332 const float qz = x * cq[ 8] + y * cq[ 9] + cq[11];
333 const float qw = x * cq[12] + y * cq[13] + cq[15];
335 const T d = disp(y, x);
337 const float iW = 1.f / (qw + cq[14] * d);
339 D v = VecTraits<D>::all(1.0f);
340 v.x = (qx + cq[2] * d) * iW;
341 v.y = (qy + cq[6] * d) * iW;
342 v.z = (qz + cq[10] * d) * iW;
347 template <typename T, typename D>
348 void reprojectImageTo3D_gpu(const PtrStepSzb disp, PtrStepSzb xyz, const float* q, cudaStream_t stream)
351 dim3 grid(divUp(disp.cols, block.x), divUp(disp.rows, block.y));
353 cudaSafeCall( cudaMemcpyToSymbol(cq, q, 16 * sizeof(float)) );
355 reprojectImageTo3D<T, D><<<grid, block, 0, stream>>>((PtrStepSz<T>)disp, (PtrStepSz<D>)xyz);
356 cudaSafeCall( cudaGetLastError() );
359 cudaSafeCall( cudaDeviceSynchronize() );
362 template void reprojectImageTo3D_gpu<uchar, float3>(const PtrStepSzb disp, PtrStepSzb xyz, const float* q, cudaStream_t stream);
363 template void reprojectImageTo3D_gpu<uchar, float4>(const PtrStepSzb disp, PtrStepSzb xyz, const float* q, cudaStream_t stream);
364 template void reprojectImageTo3D_gpu<short, float3>(const PtrStepSzb disp, PtrStepSzb xyz, const float* q, cudaStream_t stream);
365 template void reprojectImageTo3D_gpu<short, float4>(const PtrStepSzb disp, PtrStepSzb xyz, const float* q, cudaStream_t stream);
367 /////////////////////////////////////////// Corner Harris /////////////////////////////////////////////////
369 texture<float, cudaTextureType2D, cudaReadModeElementType> harrisDxTex(0, cudaFilterModePoint, cudaAddressModeClamp);
370 texture<float, cudaTextureType2D, cudaReadModeElementType> harrisDyTex(0, cudaFilterModePoint, cudaAddressModeClamp);
372 __global__ void cornerHarris_kernel(const int block_size, const float k, PtrStepSzf dst)
374 const int x = blockIdx.x * blockDim.x + threadIdx.x;
375 const int y = blockIdx.y * blockDim.y + threadIdx.y;
377 if (x < dst.cols && y < dst.rows)
383 const int ibegin = y - (block_size / 2);
384 const int jbegin = x - (block_size / 2);
385 const int iend = ibegin + block_size;
386 const int jend = jbegin + block_size;
388 for (int i = ibegin; i < iend; ++i)
390 for (int j = jbegin; j < jend; ++j)
392 float dx = tex2D(harrisDxTex, j, i);
393 float dy = tex2D(harrisDyTex, j, i);
401 dst(y, x) = a * c - b * b - k * (a + c) * (a + c);
405 template <typename BR, typename BC>
406 __global__ void cornerHarris_kernel(const int block_size, const float k, PtrStepSzf dst, const BR border_row, const BC border_col)
408 const int x = blockIdx.x * blockDim.x + threadIdx.x;
409 const int y = blockIdx.y * blockDim.y + threadIdx.y;
411 if (x < dst.cols && y < dst.rows)
417 const int ibegin = y - (block_size / 2);
418 const int jbegin = x - (block_size / 2);
419 const int iend = ibegin + block_size;
420 const int jend = jbegin + block_size;
422 for (int i = ibegin; i < iend; ++i)
424 const int y = border_col.idx_row(i);
426 for (int j = jbegin; j < jend; ++j)
428 const int x = border_row.idx_col(j);
430 float dx = tex2D(harrisDxTex, x, y);
431 float dy = tex2D(harrisDyTex, x, y);
439 dst(y, x) = a * c - b * b - k * (a + c) * (a + c);
443 void cornerHarris_gpu(int block_size, float k, PtrStepSzf Dx, PtrStepSzf Dy, PtrStepSzf dst, int border_type, cudaStream_t stream)
446 dim3 grid(divUp(Dx.cols, block.x), divUp(Dx.rows, block.y));
448 bindTexture(&harrisDxTex, Dx);
449 bindTexture(&harrisDyTex, Dy);
453 case BORDER_REFLECT101_GPU:
454 cornerHarris_kernel<<<grid, block, 0, stream>>>(block_size, k, dst, BrdRowReflect101<void>(Dx.cols), BrdColReflect101<void>(Dx.rows));
457 case BORDER_REFLECT_GPU:
458 cornerHarris_kernel<<<grid, block, 0, stream>>>(block_size, k, dst, BrdRowReflect<void>(Dx.cols), BrdColReflect<void>(Dx.rows));
461 case BORDER_REPLICATE_GPU:
462 cornerHarris_kernel<<<grid, block, 0, stream>>>(block_size, k, dst);
466 cudaSafeCall( cudaGetLastError() );
469 cudaSafeCall( cudaDeviceSynchronize() );
472 /////////////////////////////////////////// Corner Min Eigen Val /////////////////////////////////////////////////
474 texture<float, cudaTextureType2D, cudaReadModeElementType> minEigenValDxTex(0, cudaFilterModePoint, cudaAddressModeClamp);
475 texture<float, cudaTextureType2D, cudaReadModeElementType> minEigenValDyTex(0, cudaFilterModePoint, cudaAddressModeClamp);
477 __global__ void cornerMinEigenVal_kernel(const int block_size, PtrStepSzf dst)
479 const int x = blockIdx.x * blockDim.x + threadIdx.x;
480 const int y = blockIdx.y * blockDim.y + threadIdx.y;
482 if (x < dst.cols && y < dst.rows)
488 const int ibegin = y - (block_size / 2);
489 const int jbegin = x - (block_size / 2);
490 const int iend = ibegin + block_size;
491 const int jend = jbegin + block_size;
493 for (int i = ibegin; i < iend; ++i)
495 for (int j = jbegin; j < jend; ++j)
497 float dx = tex2D(minEigenValDxTex, j, i);
498 float dy = tex2D(minEigenValDyTex, j, i);
509 dst(y, x) = (a + c) - sqrtf((a - c) * (a - c) + b * b);
514 template <typename BR, typename BC>
515 __global__ void cornerMinEigenVal_kernel(const int block_size, PtrStepSzf dst, const BR border_row, const BC border_col)
517 const int x = blockIdx.x * blockDim.x + threadIdx.x;
518 const int y = blockIdx.y * blockDim.y + threadIdx.y;
520 if (x < dst.cols && y < dst.rows)
526 const int ibegin = y - (block_size / 2);
527 const int jbegin = x - (block_size / 2);
528 const int iend = ibegin + block_size;
529 const int jend = jbegin + block_size;
531 for (int i = ibegin; i < iend; ++i)
533 int y = border_col.idx_row(i);
535 for (int j = jbegin; j < jend; ++j)
537 int x = border_row.idx_col(j);
539 float dx = tex2D(minEigenValDxTex, x, y);
540 float dy = tex2D(minEigenValDyTex, x, y);
551 dst(y, x) = (a + c) - sqrtf((a - c) * (a - c) + b * b);
555 void cornerMinEigenVal_gpu(int block_size, PtrStepSzf Dx, PtrStepSzf Dy, PtrStepSzf dst, int border_type, cudaStream_t stream)
558 dim3 grid(divUp(Dx.cols, block.x), divUp(Dx.rows, block.y));
560 bindTexture(&minEigenValDxTex, Dx);
561 bindTexture(&minEigenValDyTex, Dy);
565 case BORDER_REFLECT101_GPU:
566 cornerMinEigenVal_kernel<<<grid, block, 0, stream>>>(block_size, dst, BrdRowReflect101<void>(Dx.cols), BrdColReflect101<void>(Dx.rows));
569 case BORDER_REFLECT_GPU:
570 cornerMinEigenVal_kernel<<<grid, block, 0, stream>>>(block_size, dst, BrdRowReflect<void>(Dx.cols), BrdColReflect<void>(Dx.rows));
573 case BORDER_REPLICATE_GPU:
574 cornerMinEigenVal_kernel<<<grid, block, 0, stream>>>(block_size, dst);
578 cudaSafeCall( cudaGetLastError() );
581 cudaSafeCall(cudaDeviceSynchronize());
584 ////////////////////////////// Column Sum //////////////////////////////////////
586 __global__ void column_sumKernel_32F(int cols, int rows, const PtrStepb src, const PtrStepb dst)
588 int x = blockIdx.x * blockDim.x + threadIdx.x;
592 const unsigned char* src_data = src.data + x * sizeof(float);
593 unsigned char* dst_data = dst.data + x * sizeof(float);
596 for (int y = 0; y < rows; ++y)
598 sum += *(const float*)src_data;
599 *(float*)dst_data = sum;
600 src_data += src.step;
601 dst_data += dst.step;
607 void columnSum_32F(const PtrStepSzb src, const PtrStepSzb dst)
610 dim3 grid(divUp(src.cols, threads.x));
612 column_sumKernel_32F<<<grid, threads>>>(src.cols, src.rows, src, dst);
613 cudaSafeCall( cudaGetLastError() );
615 cudaSafeCall( cudaDeviceSynchronize() );
619 //////////////////////////////////////////////////////////////////////////
623 __global__ void mulSpectrumsKernel(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, PtrStepSz<cufftComplex> c)
625 const int x = blockIdx.x * blockDim.x + threadIdx.x;
626 const int y = blockIdx.y * blockDim.y + threadIdx.y;
628 if (x < c.cols && y < c.rows)
630 c.ptr(y)[x] = cuCmulf(a.ptr(y)[x], b.ptr(y)[x]);
635 void mulSpectrums(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, PtrStepSz<cufftComplex> c, cudaStream_t stream)
638 dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y));
640 mulSpectrumsKernel<<<grid, threads, 0, stream>>>(a, b, c);
641 cudaSafeCall( cudaGetLastError() );
644 cudaSafeCall( cudaDeviceSynchronize() );
649 //////////////////////////////////////////////////////////////////////////
653 __global__ void mulSpectrumsKernel_CONJ(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, PtrStepSz<cufftComplex> c)
655 const int x = blockIdx.x * blockDim.x + threadIdx.x;
656 const int y = blockIdx.y * blockDim.y + threadIdx.y;
658 if (x < c.cols && y < c.rows)
660 c.ptr(y)[x] = cuCmulf(a.ptr(y)[x], cuConjf(b.ptr(y)[x]));
665 void mulSpectrums_CONJ(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, PtrStepSz<cufftComplex> c, cudaStream_t stream)
668 dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y));
670 mulSpectrumsKernel_CONJ<<<grid, threads, 0, stream>>>(a, b, c);
671 cudaSafeCall( cudaGetLastError() );
674 cudaSafeCall( cudaDeviceSynchronize() );
679 //////////////////////////////////////////////////////////////////////////
680 // mulAndScaleSpectrums
683 __global__ void mulAndScaleSpectrumsKernel(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, float scale, PtrStepSz<cufftComplex> c)
685 const int x = blockIdx.x * blockDim.x + threadIdx.x;
686 const int y = blockIdx.y * blockDim.y + threadIdx.y;
688 if (x < c.cols && y < c.rows)
690 cufftComplex v = cuCmulf(a.ptr(y)[x], b.ptr(y)[x]);
691 c.ptr(y)[x] = make_cuFloatComplex(cuCrealf(v) * scale, cuCimagf(v) * scale);
696 void mulAndScaleSpectrums(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, float scale, PtrStepSz<cufftComplex> c, cudaStream_t stream)
699 dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y));
701 mulAndScaleSpectrumsKernel<<<grid, threads, 0, stream>>>(a, b, scale, c);
702 cudaSafeCall( cudaGetLastError() );
705 cudaSafeCall( cudaDeviceSynchronize() );
710 //////////////////////////////////////////////////////////////////////////
711 // mulAndScaleSpectrums_CONJ
714 __global__ void mulAndScaleSpectrumsKernel_CONJ(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, float scale, PtrStepSz<cufftComplex> c)
716 const int x = blockIdx.x * blockDim.x + threadIdx.x;
717 const int y = blockIdx.y * blockDim.y + threadIdx.y;
719 if (x < c.cols && y < c.rows)
721 cufftComplex v = cuCmulf(a.ptr(y)[x], cuConjf(b.ptr(y)[x]));
722 c.ptr(y)[x] = make_cuFloatComplex(cuCrealf(v) * scale, cuCimagf(v) * scale);
727 void mulAndScaleSpectrums_CONJ(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, float scale, PtrStepSz<cufftComplex> c, cudaStream_t stream)
730 dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y));
732 mulAndScaleSpectrumsKernel_CONJ<<<grid, threads, 0, stream>>>(a, b, scale, c);
733 cudaSafeCall( cudaGetLastError() );
736 cudaSafeCall( cudaDeviceSynchronize() );
740 //////////////////////////////////////////////////////////////////////////
743 // TODO use intrinsics like __sinf and so on
745 namespace build_warp_maps
748 __constant__ float ck_rinv[9];
749 __constant__ float cr_kinv[9];
750 __constant__ float ct[3];
751 __constant__ float cscale;
758 static __device__ __forceinline__ void mapBackward(float u, float v, float &x, float &y)
760 using namespace build_warp_maps;
762 float x_ = u / cscale - ct[0];
763 float y_ = v / cscale - ct[1];
766 x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * (1 - ct[2]);
767 y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * (1 - ct[2]);
768 z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * (1 - ct[2]);
776 class CylindricalMapper
779 static __device__ __forceinline__ void mapBackward(float u, float v, float &x, float &y)
781 using namespace build_warp_maps;
784 float x_ = ::sinf(u);
785 float y_ = v / cscale;
786 float z_ = ::cosf(u);
789 x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * z_;
790 y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * z_;
791 z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * z_;
793 if (z > 0) { x /= z; y /= z; }
799 class SphericalMapper
802 static __device__ __forceinline__ void mapBackward(float u, float v, float &x, float &y)
804 using namespace build_warp_maps;
809 float sinv = ::sinf(v);
810 float x_ = sinv * ::sinf(u);
811 float y_ = -::cosf(v);
812 float z_ = sinv * ::cosf(u);
815 x = ck_rinv[0] * x_ + ck_rinv[1] * y_ + ck_rinv[2] * z_;
816 y = ck_rinv[3] * x_ + ck_rinv[4] * y_ + ck_rinv[5] * z_;
817 z = ck_rinv[6] * x_ + ck_rinv[7] * y_ + ck_rinv[8] * z_;
819 if (z > 0) { x /= z; y /= z; }
825 template <typename Mapper>
826 __global__ void buildWarpMapsKernel(int tl_u, int tl_v, int cols, int rows,
827 PtrStepf map_x, PtrStepf map_y)
829 int du = blockIdx.x * blockDim.x + threadIdx.x;
830 int dv = blockIdx.y * blockDim.y + threadIdx.y;
831 if (du < cols && dv < rows)
836 Mapper::mapBackward(u, v, x, y);
837 map_x.ptr(dv)[du] = x;
838 map_y.ptr(dv)[du] = y;
843 void buildWarpPlaneMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y,
844 const float k_rinv[9], const float r_kinv[9], const float t[3],
845 float scale, cudaStream_t stream)
847 cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float)));
848 cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float)));
849 cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ct, t, 3*sizeof(float)));
850 cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float)));
852 int cols = map_x.cols;
853 int rows = map_x.rows;
856 dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
858 buildWarpMapsKernel<PlaneMapper><<<grid,threads>>>(tl_u, tl_v, cols, rows, map_x, map_y);
859 cudaSafeCall(cudaGetLastError());
861 cudaSafeCall(cudaDeviceSynchronize());
865 void buildWarpCylindricalMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y,
866 const float k_rinv[9], const float r_kinv[9], float scale,
869 cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float)));
870 cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float)));
871 cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float)));
873 int cols = map_x.cols;
874 int rows = map_x.rows;
877 dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
879 buildWarpMapsKernel<CylindricalMapper><<<grid,threads>>>(tl_u, tl_v, cols, rows, map_x, map_y);
880 cudaSafeCall(cudaGetLastError());
882 cudaSafeCall(cudaDeviceSynchronize());
886 void buildWarpSphericalMaps(int tl_u, int tl_v, PtrStepSzf map_x, PtrStepSzf map_y,
887 const float k_rinv[9], const float r_kinv[9], float scale,
890 cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::ck_rinv, k_rinv, 9*sizeof(float)));
891 cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cr_kinv, r_kinv, 9*sizeof(float)));
892 cudaSafeCall(cudaMemcpyToSymbol(build_warp_maps::cscale, &scale, sizeof(float)));
894 int cols = map_x.cols;
895 int rows = map_x.rows;
898 dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
900 buildWarpMapsKernel<SphericalMapper><<<grid,threads>>>(tl_u, tl_v, cols, rows, map_x, map_y);
901 cudaSafeCall(cudaGetLastError());
903 cudaSafeCall(cudaDeviceSynchronize());
906 //////////////////////////////////////////////////////////////////////////
909 #define FILTER2D_MAX_KERNEL_SIZE 16
911 __constant__ float c_filter2DKernel[FILTER2D_MAX_KERNEL_SIZE * FILTER2D_MAX_KERNEL_SIZE];
913 template <class SrcT, typename D>
914 __global__ void filter2D(const SrcT src, PtrStepSz<D> dst, const int kWidth, const int kHeight, const int anchorX, const int anchorY)
916 typedef typename TypeVec<float, VecTraits<D>::cn>::vec_type sum_t;
918 const int x = blockIdx.x * blockDim.x + threadIdx.x;
919 const int y = blockIdx.y * blockDim.y + threadIdx.y;
921 if (x >= dst.cols || y >= dst.rows)
924 sum_t res = VecTraits<sum_t>::all(0);
927 for (int i = 0; i < kHeight; ++i)
929 for (int j = 0; j < kWidth; ++j)
930 res = res + src(y - anchorY + i, x - anchorX + j) * c_filter2DKernel[kInd++];
933 dst(y, x) = saturate_cast<D>(res);
936 template <typename T, typename D, template <typename> class Brd> struct Filter2DCaller;
938 #define IMPLEMENT_FILTER2D_TEX_READER(type) \
939 texture< type , cudaTextureType2D, cudaReadModeElementType> tex_filter2D_ ## type (0, cudaFilterModePoint, cudaAddressModeClamp); \
940 struct tex_filter2D_ ## type ## _reader \
942 typedef type elem_type; \
943 typedef int index_type; \
946 tex_filter2D_ ## type ## _reader (int xoff_, int yoff_) : xoff(xoff_), yoff(yoff_) {} \
947 __device__ __forceinline__ elem_type operator ()(index_type y, index_type x) const \
949 return tex2D(tex_filter2D_ ## type , x + xoff, y + yoff); \
952 template <typename D, template <typename> class Brd> struct Filter2DCaller< type , D, Brd> \
954 static void call(const PtrStepSz< type > srcWhole, int xoff, int yoff, PtrStepSz<D> dst, \
955 int kWidth, int kHeight, int anchorX, int anchorY, const float* borderValue, cudaStream_t stream) \
957 typedef typename TypeVec<float, VecTraits< type >::cn>::vec_type work_type; \
958 dim3 block(16, 16); \
959 dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); \
960 bindTexture(&tex_filter2D_ ## type , srcWhole); \
961 tex_filter2D_ ## type ##_reader texSrc(xoff, yoff); \
962 Brd<work_type> brd(dst.rows, dst.cols, VecTraits<work_type>::make(borderValue)); \
963 BorderReader< tex_filter2D_ ## type ##_reader, Brd<work_type> > brdSrc(texSrc, brd); \
964 filter2D<<<grid, block, 0, stream>>>(brdSrc, dst, kWidth, kHeight, anchorX, anchorY); \
965 cudaSafeCall( cudaGetLastError() ); \
967 cudaSafeCall( cudaDeviceSynchronize() ); \
971 IMPLEMENT_FILTER2D_TEX_READER(uchar);
972 IMPLEMENT_FILTER2D_TEX_READER(uchar4);
974 IMPLEMENT_FILTER2D_TEX_READER(ushort);
975 IMPLEMENT_FILTER2D_TEX_READER(ushort4);
977 IMPLEMENT_FILTER2D_TEX_READER(float);
978 IMPLEMENT_FILTER2D_TEX_READER(float4);
980 #undef IMPLEMENT_FILTER2D_TEX_READER
982 template <typename T, typename D>
983 void filter2D_gpu(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst,
984 int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel,
985 int borderMode, const float* borderValue, cudaStream_t stream)
987 typedef void (*func_t)(const PtrStepSz<T> srcWhole, int xoff, int yoff, PtrStepSz<D> dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* borderValue, cudaStream_t stream);
988 static const func_t funcs[] =
990 Filter2DCaller<T, D, BrdReflect101>::call,
991 Filter2DCaller<T, D, BrdReplicate>::call,
992 Filter2DCaller<T, D, BrdConstant>::call,
993 Filter2DCaller<T, D, BrdReflect>::call,
994 Filter2DCaller<T, D, BrdWrap>::call
998 cudaSafeCall( cudaMemcpyToSymbol(c_filter2DKernel, kernel, kWidth * kHeight * sizeof(float), 0, cudaMemcpyDeviceToDevice) );
1000 cudaSafeCall( cudaMemcpyToSymbolAsync(c_filter2DKernel, kernel, kWidth * kHeight * sizeof(float), 0, cudaMemcpyDeviceToDevice, stream) );
1002 funcs[borderMode](static_cast< PtrStepSz<T> >(srcWhole), ofsX, ofsY, static_cast< PtrStepSz<D> >(dst), kWidth, kHeight, anchorX, anchorY, borderValue, stream);
1005 template void filter2D_gpu<uchar, uchar>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, cudaStream_t stream);
1006 template void filter2D_gpu<uchar4, uchar4>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, cudaStream_t stream);
1007 template void filter2D_gpu<ushort, ushort>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, cudaStream_t stream);
1008 template void filter2D_gpu<ushort4, ushort4>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, cudaStream_t stream);
1009 template void filter2D_gpu<float, float>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, cudaStream_t stream);
1010 template void filter2D_gpu<float4, float4>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, cudaStream_t stream);
1011 } // namespace imgproc
1012 }}} // namespace cv { namespace gpu { namespace device {
1015 #endif /* CUDA_DISABLER */