From 72f020a8f3e79d85808c65478aedfe2f6f7c4c80 Mon Sep 17 00:00:00 2001 From: Alexey Spizhevoy Date: Mon, 29 Nov 2010 07:18:11 +0000 Subject: [PATCH] added gpu::count_non_zero version for CC1.0, refactored gpu module a little --- modules/gpu/include/opencv2/gpu/gpu.hpp | 3 ++ modules/gpu/src/arithm.cpp | 34 ++++++++++---- modules/gpu/src/cuda/mathfunc.cu | 81 +++++++++++++++++++++++++++------ modules/gpu/src/initialization.cpp | 16 +++++++ tests/gpu/src/arithm.cpp | 18 ++------ 5 files changed, 117 insertions(+), 35 deletions(-) diff --git a/modules/gpu/include/opencv2/gpu/gpu.hpp b/modules/gpu/include/opencv2/gpu/gpu.hpp index 41017a4..dadad00 100644 --- a/modules/gpu/include/opencv2/gpu/gpu.hpp +++ b/modules/gpu/include/opencv2/gpu/gpu.hpp @@ -68,6 +68,9 @@ namespace cv CV_EXPORTS void getGpuMemInfo(size_t& free, size_t& total); + CV_EXPORTS bool hasNativeDoubleSupport(int device); + CV_EXPORTS bool hasAtomicsSupport(int device); + //////////////////////////////// Error handling //////////////////////// CV_EXPORTS void error(const char *error_string, const char *file, const int line, const char *func); diff --git a/modules/gpu/src/arithm.cpp b/modules/gpu/src/arithm.cpp index 0fe9e5c..c2f668b 100644 --- a/modules/gpu/src/arithm.cpp +++ b/modules/gpu/src/arithm.cpp @@ -665,15 +665,33 @@ int cv::gpu::countNonZero(const GpuMat& src, GpuMat& buf) get_buf_size_required(buf_size.width, buf_size.height); buf.create(buf_size, CV_8U); - switch (src.type()) + int device = getDevice(); + if (hasAtomicsSupport(device)) + { + switch (src.type()) + { + case CV_8U: return count_non_zero_caller(src, buf); + case CV_8S: return count_non_zero_caller(src, buf); + case CV_16U: return count_non_zero_caller(src, buf); + case CV_16S: return count_non_zero_caller(src, buf); + case CV_32S: return count_non_zero_caller(src, buf); + case CV_32F: return count_non_zero_caller(src, buf); + case CV_64F: + if (hasNativeDoubleSupport(device)) + return count_non_zero_caller(src, buf); + } + } + else { - case CV_8U: return count_non_zero_caller(src, buf); - case CV_8S: return count_non_zero_caller(src, buf); - case CV_16U: return count_non_zero_caller(src, buf); - case CV_16S: return count_non_zero_caller(src, buf); - case CV_32S: return count_non_zero_caller(src, buf); - case CV_32F: return count_non_zero_caller(src, buf); - case CV_64F: return count_non_zero_caller(src, buf); + switch (src.type()) + { + case CV_8U: return count_non_zero_caller_2steps(src, buf); + case CV_8S: return count_non_zero_caller_2steps(src, buf); + case CV_16U: return count_non_zero_caller_2steps(src, buf); + case CV_16S: return count_non_zero_caller_2steps(src, buf); + case CV_32S: return count_non_zero_caller_2steps(src, buf); + case CV_32F: return count_non_zero_caller_2steps(src, buf); + } } CV_Error(CV_StsBadArg, "countNonZero: unsupported type"); diff --git a/modules/gpu/src/cuda/mathfunc.cu b/modules/gpu/src/cuda/mathfunc.cu index a70ae69..f8d65fb 100644 --- a/modules/gpu/src/cuda/mathfunc.cu +++ b/modules/gpu/src/cuda/mathfunc.cu @@ -908,6 +908,27 @@ namespace cv { namespace gpu { namespace mathfunc } + template + __device__ void sum_shared_mem(volatile T* data, const unsigned int tid) + { + T sum = data[tid]; + + if (size >= 512) if (tid < 256) { data[tid] = sum = sum + data[tid + 256]; } __syncthreads(); + if (size >= 256) if (tid < 128) { data[tid] = sum = sum + data[tid + 128]; } __syncthreads(); + if (size >= 128) if (tid < 64) { data[tid] = sum = sum + data[tid + 64]; } __syncthreads(); + + if (tid < 32) + { + if (size >= 64) data[tid] = sum = sum + data[tid + 32]; + if (size >= 32) data[tid] = sum = sum + data[tid + 16]; + if (size >= 16) data[tid] = sum = sum + data[tid + 8]; + if (size >= 8) data[tid] = sum = sum + data[tid + 4]; + if (size >= 4) data[tid] = sum = sum + data[tid + 2]; + if (size >= 2) data[tid] = sum = sum + data[tid + 1]; + } + } + + template __global__ void count_non_zero_kernel(const DevMem2D src, volatile unsigned int* count) { @@ -928,12 +949,9 @@ namespace cv { namespace gpu { namespace mathfunc scount[tid] = cnt; __syncthreads(); - for (unsigned int step = nthreads / 2; step > 0; step >>= 1) - { - if (tid < step) scount[tid] += scount[tid + step]; - __syncthreads(); - } + sum_shared_mem(scount, tid); +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 110 __shared__ bool is_last; if (tid == 0) @@ -950,16 +968,12 @@ namespace cv { namespace gpu { namespace mathfunc if (is_last) { scount[tid] = tid < gridDim.x * gridDim.y ? count[tid] : 0; - - for (unsigned int step = nthreads / 2; step > 0; step >>= 1) - { - if (tid < step) scount[tid] += scount[tid + step]; - __syncthreads(); - } - + sum_shared_mem(scount, tid); if (tid == 0) count[0] = scount[0]; } - +#else + if (tid == 0) count[blockIdx.y * gridDim.x + blockIdx.x] = scount[0]; +#endif } @@ -990,6 +1004,47 @@ namespace cv { namespace gpu { namespace mathfunc template int count_non_zero_caller(const DevMem2D, PtrStep); template int count_non_zero_caller(const DevMem2D, PtrStep); + + template + __global__ void count_non_zero_kernel_2ndstep(unsigned int* count, int size) + { + __shared__ unsigned int scount[nthreads]; + unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x; + + scount[tid] = tid < size ? count[tid] : 0; + sum_shared_mem(scount, tid); + + if (tid == 0) count[0] = scount[0]; + } + + + template + int count_non_zero_caller_2steps(const DevMem2D src, PtrStep buf) + { + dim3 threads, grid; + estimate_thread_cfg(threads, grid); + estimate_kernel_consts(src.cols, src.rows, threads, grid); + + unsigned int* count_buf = (unsigned int*)buf.ptr(0); + + cudaSafeCall(cudaMemcpyToSymbol(blocks_finished, &czero, sizeof(blocks_finished))); + count_non_zero_kernel<256, T><<>>(src, count_buf); + count_non_zero_kernel_2ndstep<256, T><<<1, 256>>>(count_buf, grid.x * grid.y); + cudaSafeCall(cudaThreadSynchronize()); + + unsigned int count; + cudaSafeCall(cudaMemcpy(&count, count_buf, sizeof(int), cudaMemcpyDeviceToHost)); + + return count; + } + + template int count_non_zero_caller_2steps(const DevMem2D, PtrStep); + template int count_non_zero_caller_2steps(const DevMem2D, PtrStep); + template int count_non_zero_caller_2steps(const DevMem2D, PtrStep); + template int count_non_zero_caller_2steps(const DevMem2D, PtrStep); + template int count_non_zero_caller_2steps(const DevMem2D, PtrStep); + template int count_non_zero_caller_2steps(const DevMem2D, PtrStep); + } // namespace countnonzero }}} diff --git a/modules/gpu/src/initialization.cpp b/modules/gpu/src/initialization.cpp index 0e05d14..fae118e 100644 --- a/modules/gpu/src/initialization.cpp +++ b/modules/gpu/src/initialization.cpp @@ -55,6 +55,8 @@ CV_EXPORTS int cv::gpu::getDevice() { throw_nogpu(); return 0; } CV_EXPORTS void cv::gpu::getComputeCapability(int /*device*/, int& /*major*/, int& /*minor*/) { throw_nogpu(); } CV_EXPORTS int cv::gpu::getNumberOfSMs(int /*device*/) { throw_nogpu(); return 0; } CV_EXPORTS void cv::gpu::getGpuMemInfo(size_t& /*free*/, size_t& /*total*/) { throw_nogpu(); } +CV_EXPORTS bool cv::gpu::hasNativeDoubleSupport(int /*device*/) { throw_nogpu(); return false; } +CV_EXPORTS bool cv::gpu::hasAtomicsSupport(int /*device*/) { throw_nogpu(); return false; } #else /* !defined (HAVE_CUDA) */ @@ -106,5 +108,19 @@ CV_EXPORTS void cv::gpu::getGpuMemInfo(size_t& free, size_t& total) cudaSafeCall( cudaMemGetInfo( &free, &total ) ); } +CV_EXPORTS bool cv::gpu::hasNativeDoubleSupport(int device) +{ + int major, minor; + getComputeCapability(device, major, minor); + return major > 1 || (major == 1 && minor >= 3); +} + +CV_EXPORTS bool cv::gpu::hasAtomicsSupport(int device) +{ + int major, minor; + getComputeCapability(device, major, minor); + return major > 1 || (major == 1 && minor >= 1); +} + #endif diff --git a/tests/gpu/src/arithm.cpp b/tests/gpu/src/arithm.cpp index a2b8df5..b9f0b16 100644 --- a/tests/gpu/src/arithm.cpp +++ b/tests/gpu/src/arithm.cpp @@ -681,11 +681,7 @@ struct CV_GpuMinMaxTest: public CvTest void run(int) { int depth_end; - int major, minor; - cv::gpu::getComputeCapability(getDevice(), major, minor); - - if (minor >= 1) depth_end = CV_64F; else depth_end = CV_32F; - + if (cv::gpu::hasNativeDoubleSupport(cv::gpu::getDevice())) depth_end = CV_64F; else depth_end = CV_32F; for (int cn = 1; cn <= 4; ++cn) for (int depth = CV_8U; depth <= depth_end; ++depth) { @@ -760,10 +756,7 @@ struct CV_GpuMinMaxLocTest: public CvTest void run(int) { int depth_end; - int major, minor; - cv::gpu::getComputeCapability(getDevice(), major, minor); - - if (minor >= 1) depth_end = CV_64F; else depth_end = CV_32F; + if (cv::gpu::hasNativeDoubleSupport(cv::gpu::getDevice())) depth_end = CV_64F; else depth_end = CV_32F; for (int depth = CV_8U; depth <= depth_end; ++depth) { int rows = 1, cols = 3; @@ -829,11 +822,8 @@ struct CV_GpuCountNonZeroTest: CvTest { srand(0); int depth_end; - int major, minor; - cv::gpu::getComputeCapability(getDevice(), major, minor); - - if (minor >= 1) depth_end = CV_64F; else depth_end = CV_32F; - for (int depth = CV_8U; depth <= depth_end; ++depth) + if (cv::gpu::hasNativeDoubleSupport(cv::gpu::getDevice())) depth_end = CV_64F; else depth_end = CV_32F; + for (int depth = CV_8U; depth <= CV_32F; ++depth) { for (int i = 0; i < 4; ++i) { -- 2.7.4