1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or bpied warranties, including, but not limited to, the bpied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #include <thrust/sort.h>
44 #include "opencv2/gpu/device/common.hpp"
45 #include "opencv2/gpu/device/emulation.hpp"
47 namespace cv { namespace gpu { namespace device
51 __device__ int g_counter;
53 ////////////////////////////////////////////////////////////////////////
56 const int PIXELS_PER_THREAD = 16;
58 __global__ void buildPointList(const DevMem2Db src, unsigned int* list)
60 __shared__ int s_queues[4][32 * PIXELS_PER_THREAD];
61 __shared__ int s_qsize[4];
62 __shared__ int s_globStart[4];
64 const int x = blockIdx.x * blockDim.x * PIXELS_PER_THREAD + threadIdx.x;
65 const int y = blockIdx.y * blockDim.y + threadIdx.y;
71 s_qsize[threadIdx.y] = 0;
76 const uchar* srcRow = src.ptr(y);
77 for (int i = 0, xx = x; i < PIXELS_PER_THREAD && xx < src.cols; ++i, xx += blockDim.x)
81 const unsigned int val = (y << 16) | xx;
82 const int qidx = Emulation::smem::atomicAdd(&s_qsize[threadIdx.y], 1);
83 s_queues[threadIdx.y][qidx] = val;
89 // let one thread reserve the space required in the global list
90 if (threadIdx.x == 0 && threadIdx.y == 0)
92 // find how many items are stored in each list
94 for (int i = 0; i < blockDim.y; ++i)
96 s_globStart[i] = totalSize;
97 totalSize += s_qsize[i];
100 // calculate the offset in the global list
101 const int globalOffset = atomicAdd(&g_counter, totalSize);
102 for (int i = 0; i < blockDim.y; ++i)
103 s_globStart[i] += globalOffset;
108 // copy local queues to global queue
109 const int qsize = s_qsize[threadIdx.y];
110 int gidx = s_globStart[threadIdx.y] + threadIdx.x;
111 for(int i = threadIdx.x; i < qsize; i += blockDim.x, gidx += blockDim.x)
112 list[gidx] = s_queues[threadIdx.y][i];
115 int buildPointList_gpu(DevMem2Db src, unsigned int* list)
118 cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) );
120 cudaSafeCall( cudaMemset(counterPtr, 0, sizeof(int)) );
122 const dim3 block(32, 4);
123 const dim3 grid(divUp(src.cols, block.x * PIXELS_PER_THREAD), divUp(src.rows, block.y));
125 cudaSafeCall( cudaFuncSetCacheConfig(buildPointList, cudaFuncCachePreferShared) );
127 buildPointList<<<grid, block>>>(src, list);
128 cudaSafeCall( cudaGetLastError() );
130 cudaSafeCall( cudaDeviceSynchronize() );
133 cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) );
138 ////////////////////////////////////////////////////////////////////////
141 __global__ void linesAccumGlobal(const unsigned int* list, const int count, PtrStepi accum, const float irho, const float theta, const int numrho)
143 const int n = blockIdx.x;
144 const float ang = n * theta;
148 sincosf(ang, &sinVal, &cosVal);
152 const int shift = (numrho - 1) / 2;
154 int* accumRow = accum.ptr(n + 1);
155 for (int i = threadIdx.x; i < count; i += blockDim.x)
157 const unsigned int val = list[i];
159 const int x = (val & 0xFFFF);
160 const int y = (val >> 16) & 0xFFFF;
162 int r = __float2int_rn(x * cosVal + y * sinVal);
165 ::atomicAdd(accumRow + r + 1, 1);
169 __global__ void linesAccumShared(const unsigned int* list, const int count, PtrStepi accum, const float irho, const float theta, const int numrho)
171 extern __shared__ int smem[];
173 for (int i = threadIdx.x; i < numrho + 1; i += blockDim.x)
178 const int n = blockIdx.x;
179 const float ang = n * theta;
183 sincosf(ang, &sinVal, &cosVal);
187 const int shift = (numrho - 1) / 2;
189 for (int i = threadIdx.x; i < count; i += blockDim.x)
191 const unsigned int val = list[i];
193 const int x = (val & 0xFFFF);
194 const int y = (val >> 16) & 0xFFFF;
196 int r = __float2int_rn(x * cosVal + y * sinVal);
199 Emulation::smem::atomicAdd(&smem[r + 1], 1);
204 int* accumRow = accum.ptr(n + 1);
205 for (int i = threadIdx.x; i < numrho + 1; i += blockDim.x)
206 accumRow[i] = smem[i];
209 void linesAccum_gpu(const unsigned int* list, int count, DevMem2Di accum, float rho, float theta, size_t sharedMemPerBlock, bool has20)
211 const dim3 block(has20 ? 1024 : 512);
212 const dim3 grid(accum.rows - 2);
214 cudaSafeCall( cudaFuncSetCacheConfig(linesAccumShared, cudaFuncCachePreferShared) );
216 size_t smemSize = (accum.cols - 1) * sizeof(int);
218 if (smemSize < sharedMemPerBlock - 1000)
219 linesAccumShared<<<grid, block, smemSize>>>(list, count, accum, 1.0f / rho, theta, accum.cols - 2);
221 linesAccumGlobal<<<grid, block>>>(list, count, accum, 1.0f / rho, theta, accum.cols - 2);
223 cudaSafeCall( cudaGetLastError() );
225 cudaSafeCall( cudaDeviceSynchronize() );
228 ////////////////////////////////////////////////////////////////////////
231 __global__ void linesGetResult(const DevMem2Di accum, float2* out, int* votes, const int maxSize, const float rho, const float theta, const float threshold, const int numrho)
233 __shared__ int smem[8][32];
235 const int x = blockIdx.x * (blockDim.x - 2) + threadIdx.x;
236 const int y = blockIdx.y * (blockDim.y - 2) + threadIdx.y;
238 if (x >= accum.cols || y >= accum.rows)
241 smem[threadIdx.y][threadIdx.x] = accum(y, x);
247 if (threadIdx.x == 0 || threadIdx.x == blockDim.x - 1 || threadIdx.y == 0 || threadIdx.y == blockDim.y - 1 || r >= accum.cols - 2 || n >= accum.rows - 2)
250 if (smem[threadIdx.y][threadIdx.x] > threshold &&
251 smem[threadIdx.y][threadIdx.x] > smem[threadIdx.y - 1][threadIdx.x] &&
252 smem[threadIdx.y][threadIdx.x] >= smem[threadIdx.y + 1][threadIdx.x] &&
253 smem[threadIdx.y][threadIdx.x] > smem[threadIdx.y][threadIdx.x - 1] &&
254 smem[threadIdx.y][threadIdx.x] >= smem[threadIdx.y][threadIdx.x + 1])
256 const float radius = (r - (numrho - 1) * 0.5f) * rho;
257 const float angle = n * theta;
259 const int ind = ::atomicAdd(&g_counter, 1);
262 out[ind] = make_float2(radius, angle);
263 votes[ind] = smem[threadIdx.y][threadIdx.x];
268 int linesGetResult_gpu(DevMem2Di accum, float2* out, int* votes, int maxSize, float rho, float theta, float threshold, bool doSort)
271 cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) );
273 cudaSafeCall( cudaMemset(counterPtr, 0, sizeof(int)) );
275 const dim3 block(32, 8);
276 const dim3 grid(divUp(accum.cols, block.x - 2), divUp(accum.rows, block.y - 2));
278 linesGetResult<<<grid, block>>>(accum, out, votes, maxSize, rho, theta, threshold, accum.cols - 2);
279 cudaSafeCall( cudaGetLastError() );
281 cudaSafeCall( cudaDeviceSynchronize() );
284 cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) );
286 totalCount = ::min(totalCount, maxSize);
288 if (doSort && totalCount > 0)
290 thrust::device_ptr<float2> outPtr(out);
291 thrust::device_ptr<int> votesPtr(votes);
292 thrust::sort_by_key(votesPtr, votesPtr + totalCount, outPtr, thrust::greater<int>());