\r
// Performs reduction in shared memory\r
template <int size, typename T>\r
- __device__ void sum_in_smem(volatile T* data, const uint tid)\r
+ __device__ void sumInSmem(volatile T* data, const uint tid)\r
{\r
T sum = data[tid];\r
\r
\r
// Estimates good thread configuration\r
// - threads variable satisfies to threads.x * threads.y == 256\r
- void estimate_thread_cfg(int cols, int rows, dim3& threads, dim3& grid)\r
+ void estimateThreadCfg(int cols, int rows, dim3& threads, dim3& grid)\r
{\r
threads = dim3(32, 8);\r
grid = dim3(divUp(cols, threads.x * 8), divUp(rows, threads.y * 32));\r
\r
\r
// Returns required buffer sizes\r
- void get_buf_size_required(int cols, int rows, int elem_size, int& bufcols, int& bufrows)\r
+ void getBufSizeRequired(int cols, int rows, int elem_size, int& bufcols, int& bufrows)\r
{\r
dim3 threads, grid;\r
- estimate_thread_cfg(cols, rows, threads, grid);\r
+ estimateThreadCfg(cols, rows, threads, grid);\r
bufcols = grid.x * grid.y * elem_size; \r
bufrows = 2;\r
}\r
\r
\r
// Estimates device constants which are used in the kernels using specified thread configuration\r
- void set_kernel_consts(int cols, int rows, const dim3& threads, const dim3& grid)\r
+ void setKernelConsts(int cols, int rows, const dim3& threads, const dim3& grid)\r
{ \r
int twidth = divUp(divUp(cols, grid.x), threads.x);\r
int theight = divUp(divUp(rows, grid.y), threads.y);\r
\r
\r
template <int size, typename T>\r
- __device__ void find_min_max_in_smem(volatile T* minval, volatile T* maxval, const uint tid)\r
+ __device__ void findMinMaxInSmem(volatile T* minval, volatile T* maxval, const uint tid)\r
{\r
if (size >= 512) { if (tid < 256) { merge(tid, 256, minval, maxval); } __syncthreads(); }\r
if (size >= 256) { if (tid < 128) { merge(tid, 128, minval, maxval); } __syncthreads(); }\r
\r
\r
template <int nthreads, typename T, typename Mask>\r
- __global__ void min_max_kernel(const DevMem2D src, Mask mask, T* minval, T* maxval)\r
+ __global__ void minMaxKernel(const DevMem2D src, Mask mask, T* minval, T* maxval)\r
{\r
typedef typename MinMaxTypeTraits<T>::best_type best_type;\r
__shared__ best_type sminval[nthreads];\r
smaxval[tid] = mymax;\r
__syncthreads();\r
\r
- find_min_max_in_smem<nthreads, best_type>(sminval, smaxval, tid);\r
+ findMinMaxInSmem<nthreads, best_type>(sminval, smaxval, tid);\r
\r
if (tid == 0) \r
{\r
smaxval[tid] = maxval[idx];\r
__syncthreads();\r
\r
- find_min_max_in_smem<nthreads, best_type>(sminval, smaxval, tid);\r
+ findMinMaxInSmem<nthreads, best_type>(sminval, smaxval, tid);\r
\r
if (tid == 0) \r
{\r
\r
\r
template <typename T>\r
- void min_max_mask_caller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval, PtrStep buf)\r
+ void minMaxMaskCaller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval, PtrStep buf)\r
{\r
dim3 threads, grid;\r
- estimate_thread_cfg(src.cols, src.rows, threads, grid);\r
- set_kernel_consts(src.cols, src.rows, threads, grid);\r
+ estimateThreadCfg(src.cols, src.rows, threads, grid);\r
+ setKernelConsts(src.cols, src.rows, threads, grid);\r
\r
T* minval_buf = (T*)buf.ptr(0);\r
T* maxval_buf = (T*)buf.ptr(1);\r
\r
- min_max_kernel<256, T, Mask8U><<<grid, threads>>>(src, Mask8U(mask), minval_buf, maxval_buf);\r
+ minMaxKernel<256, T, Mask8U><<<grid, threads>>>(src, Mask8U(mask), minval_buf, maxval_buf);\r
cudaSafeCall(cudaThreadSynchronize());\r
\r
T minval_, maxval_;\r
*maxval = maxval_;\r
} \r
\r
- template void min_max_mask_caller<uchar>(const DevMem2D, const PtrStep, double*, double*, PtrStep);\r
- template void min_max_mask_caller<char>(const DevMem2D, const PtrStep, double*, double*, PtrStep);\r
- template void min_max_mask_caller<ushort>(const DevMem2D, const PtrStep, double*, double*, PtrStep);\r
- template void min_max_mask_caller<short>(const DevMem2D, const PtrStep, double*, double*, PtrStep);\r
- template void min_max_mask_caller<int>(const DevMem2D, const PtrStep, double*, double*, PtrStep);\r
- template void min_max_mask_caller<float>(const DevMem2D, const PtrStep, double*, double*, PtrStep);\r
- template void min_max_mask_caller<double>(const DevMem2D, const PtrStep, double*, double*, PtrStep);\r
+ template void minMaxMaskCaller<uchar>(const DevMem2D, const PtrStep, double*, double*, PtrStep);\r
+ template void minMaxMaskCaller<char>(const DevMem2D, const PtrStep, double*, double*, PtrStep);\r
+ template void minMaxMaskCaller<ushort>(const DevMem2D, const PtrStep, double*, double*, PtrStep);\r
+ template void minMaxMaskCaller<short>(const DevMem2D, const PtrStep, double*, double*, PtrStep);\r
+ template void minMaxMaskCaller<int>(const DevMem2D, const PtrStep, double*, double*, PtrStep);\r
+ template void minMaxMaskCaller<float>(const DevMem2D, const PtrStep, double*, double*, PtrStep);\r
+ template void minMaxMaskCaller<double>(const DevMem2D, const PtrStep, double*, double*, PtrStep);\r
\r
\r
template <typename T>\r
- void min_max_caller(const DevMem2D src, double* minval, double* maxval, PtrStep buf)\r
+ void minMaxCaller(const DevMem2D src, double* minval, double* maxval, PtrStep buf)\r
{\r
dim3 threads, grid;\r
- estimate_thread_cfg(src.cols, src.rows, threads, grid);\r
- set_kernel_consts(src.cols, src.rows, threads, grid);\r
+ estimateThreadCfg(src.cols, src.rows, threads, grid);\r
+ setKernelConsts(src.cols, src.rows, threads, grid);\r
\r
T* minval_buf = (T*)buf.ptr(0);\r
T* maxval_buf = (T*)buf.ptr(1);\r
\r
- min_max_kernel<256, T, MaskTrue><<<grid, threads>>>(src, MaskTrue(), minval_buf, maxval_buf);\r
+ minMaxKernel<256, T, MaskTrue><<<grid, threads>>>(src, MaskTrue(), minval_buf, maxval_buf);\r
cudaSafeCall(cudaThreadSynchronize());\r
\r
T minval_, maxval_;\r
*maxval = maxval_;\r
} \r
\r
- template void min_max_caller<uchar>(const DevMem2D, double*, double*, PtrStep);\r
- template void min_max_caller<char>(const DevMem2D, double*, double*, PtrStep);\r
- template void min_max_caller<ushort>(const DevMem2D, double*, double*, PtrStep);\r
- template void min_max_caller<short>(const DevMem2D, double*, double*, PtrStep);\r
- template void min_max_caller<int>(const DevMem2D, double*, double*, PtrStep);\r
- template void min_max_caller<float>(const DevMem2D, double*,double*, PtrStep);\r
- template void min_max_caller<double>(const DevMem2D, double*, double*, PtrStep);\r
+ template void minMaxCaller<uchar>(const DevMem2D, double*, double*, PtrStep);\r
+ template void minMaxCaller<char>(const DevMem2D, double*, double*, PtrStep);\r
+ template void minMaxCaller<ushort>(const DevMem2D, double*, double*, PtrStep);\r
+ template void minMaxCaller<short>(const DevMem2D, double*, double*, PtrStep);\r
+ template void minMaxCaller<int>(const DevMem2D, double*, double*, PtrStep);\r
+ template void minMaxCaller<float>(const DevMem2D, double*,double*, PtrStep);\r
+ template void minMaxCaller<double>(const DevMem2D, double*, double*, PtrStep);\r
\r
\r
template <int nthreads, typename T>\r
- __global__ void min_max_pass2_kernel(T* minval, T* maxval, int size)\r
+ __global__ void minMaxPass2Kernel(T* minval, T* maxval, int size)\r
{\r
typedef typename MinMaxTypeTraits<T>::best_type best_type;\r
__shared__ best_type sminval[nthreads];\r
smaxval[tid] = maxval[idx];\r
__syncthreads();\r
\r
- find_min_max_in_smem<nthreads, best_type>(sminval, smaxval, tid);\r
+ findMinMaxInSmem<nthreads, best_type>(sminval, smaxval, tid);\r
\r
if (tid == 0) \r
{\r
\r
\r
template <typename T>\r
- void min_max_mask_multipass_caller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval, PtrStep buf)\r
+ void minMaxMaskMultipassCaller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval, PtrStep buf)\r
{\r
dim3 threads, grid;\r
- estimate_thread_cfg(src.cols, src.rows, threads, grid);\r
- set_kernel_consts(src.cols, src.rows, threads, grid);\r
+ estimateThreadCfg(src.cols, src.rows, threads, grid);\r
+ setKernelConsts(src.cols, src.rows, threads, grid);\r
\r
T* minval_buf = (T*)buf.ptr(0);\r
T* maxval_buf = (T*)buf.ptr(1);\r
\r
- min_max_kernel<256, T, Mask8U><<<grid, threads>>>(src, Mask8U(mask), minval_buf, maxval_buf);\r
- min_max_pass2_kernel<256, T><<<1, 256>>>(minval_buf, maxval_buf, grid.x * grid.y);\r
+ minMaxKernel<256, T, Mask8U><<<grid, threads>>>(src, Mask8U(mask), minval_buf, maxval_buf);\r
+ minMaxPass2Kernel<256, T><<<1, 256>>>(minval_buf, maxval_buf, grid.x * grid.y);\r
cudaSafeCall(cudaThreadSynchronize());\r
\r
T minval_, maxval_;\r
*maxval = maxval_;\r
}\r
\r
- template void min_max_mask_multipass_caller<uchar>(const DevMem2D, const PtrStep, double*, double*, PtrStep);\r
- template void min_max_mask_multipass_caller<char>(const DevMem2D, const PtrStep, double*, double*, PtrStep);\r
- template void min_max_mask_multipass_caller<ushort>(const DevMem2D, const PtrStep, double*, double*, PtrStep);\r
- template void min_max_mask_multipass_caller<short>(const DevMem2D, const PtrStep, double*, double*, PtrStep);\r
- template void min_max_mask_multipass_caller<int>(const DevMem2D, const PtrStep, double*, double*, PtrStep);\r
- template void min_max_mask_multipass_caller<float>(const DevMem2D, const PtrStep, double*, double*, PtrStep);\r
+ template void minMaxMaskMultipassCaller<uchar>(const DevMem2D, const PtrStep, double*, double*, PtrStep);\r
+ template void minMaxMaskMultipassCaller<char>(const DevMem2D, const PtrStep, double*, double*, PtrStep);\r
+ template void minMaxMaskMultipassCaller<ushort>(const DevMem2D, const PtrStep, double*, double*, PtrStep);\r
+ template void minMaxMaskMultipassCaller<short>(const DevMem2D, const PtrStep, double*, double*, PtrStep);\r
+ template void minMaxMaskMultipassCaller<int>(const DevMem2D, const PtrStep, double*, double*, PtrStep);\r
+ template void minMaxMaskMultipassCaller<float>(const DevMem2D, const PtrStep, double*, double*, PtrStep);\r
\r
\r
template <typename T>\r
- void min_max_multipass_caller(const DevMem2D src, double* minval, double* maxval, PtrStep buf)\r
+ void minMaxMultipassCaller(const DevMem2D src, double* minval, double* maxval, PtrStep buf)\r
{\r
dim3 threads, grid;\r
- estimate_thread_cfg(src.cols, src.rows, threads, grid);\r
- set_kernel_consts(src.cols, src.rows, threads, grid);\r
+ estimateThreadCfg(src.cols, src.rows, threads, grid);\r
+ setKernelConsts(src.cols, src.rows, threads, grid);\r
\r
T* minval_buf = (T*)buf.ptr(0);\r
T* maxval_buf = (T*)buf.ptr(1);\r
\r
- min_max_kernel<256, T, MaskTrue><<<grid, threads>>>(src, MaskTrue(), minval_buf, maxval_buf);\r
- min_max_pass2_kernel<256, T><<<1, 256>>>(minval_buf, maxval_buf, grid.x * grid.y);\r
+ minMaxKernel<256, T, MaskTrue><<<grid, threads>>>(src, MaskTrue(), minval_buf, maxval_buf);\r
+ minMaxPass2Kernel<256, T><<<1, 256>>>(minval_buf, maxval_buf, grid.x * grid.y);\r
cudaSafeCall(cudaThreadSynchronize());\r
\r
T minval_, maxval_;\r
*maxval = maxval_;\r
}\r
\r
- template void min_max_multipass_caller<uchar>(const DevMem2D, double*, double*, PtrStep);\r
- template void min_max_multipass_caller<char>(const DevMem2D, double*, double*, PtrStep);\r
- template void min_max_multipass_caller<ushort>(const DevMem2D, double*, double*, PtrStep);\r
- template void min_max_multipass_caller<short>(const DevMem2D, double*, double*, PtrStep);\r
- template void min_max_multipass_caller<int>(const DevMem2D, double*, double*, PtrStep);\r
- template void min_max_multipass_caller<float>(const DevMem2D, double*, double*, PtrStep);\r
+ template void minMaxMultipassCaller<uchar>(const DevMem2D, double*, double*, PtrStep);\r
+ template void minMaxMultipassCaller<char>(const DevMem2D, double*, double*, PtrStep);\r
+ template void minMaxMultipassCaller<ushort>(const DevMem2D, double*, double*, PtrStep);\r
+ template void minMaxMultipassCaller<short>(const DevMem2D, double*, double*, PtrStep);\r
+ template void minMaxMultipassCaller<int>(const DevMem2D, double*, double*, PtrStep);\r
+ template void minMaxMultipassCaller<float>(const DevMem2D, double*, double*, PtrStep);\r
\r
} // namespace minmax\r
\r
\r
// Estimates good thread configuration\r
// - threads variable satisfies to threads.x * threads.y == 256\r
- void estimate_thread_cfg(int cols, int rows, dim3& threads, dim3& grid)\r
+ void estimateThreadCfg(int cols, int rows, dim3& threads, dim3& grid)\r
{\r
threads = dim3(32, 8);\r
grid = dim3(divUp(cols, threads.x * 8), divUp(rows, threads.y * 32));\r
\r
\r
// Returns required buffer sizes\r
- void get_buf_size_required(int cols, int rows, int elem_size, int& b1cols, \r
+ void getBufSizeRequired(int cols, int rows, int elem_size, int& b1cols, \r
int& b1rows, int& b2cols, int& b2rows)\r
{\r
dim3 threads, grid;\r
- estimate_thread_cfg(cols, rows, threads, grid);\r
+ estimateThreadCfg(cols, rows, threads, grid);\r
b1cols = grid.x * grid.y * elem_size; // For values\r
b1rows = 2;\r
b2cols = grid.x * grid.y * sizeof(int); // For locations\r
\r
\r
// Estimates device constants which are used in the kernels using specified thread configuration\r
- void set_kernel_consts(int cols, int rows, const dim3& threads, const dim3& grid)\r
+ void setKernelConsts(int cols, int rows, const dim3& threads, const dim3& grid)\r
{ \r
int twidth = divUp(divUp(cols, grid.x), threads.x);\r
int theight = divUp(divUp(rows, grid.y), threads.y);\r
\r
\r
template <int size, typename T>\r
- __device__ void find_min_max_loc_in_smem(volatile T* minval, volatile T* maxval, volatile uint* minloc, \r
- volatile uint* maxloc, const uint tid)\r
+ __device__ void findMinMaxLocInSmem(volatile T* minval, volatile T* maxval, volatile uint* minloc, \r
+ volatile uint* maxloc, const uint tid)\r
{\r
if (size >= 512) { if (tid < 256) { merge(tid, 256, minval, maxval, minloc, maxloc); } __syncthreads(); }\r
if (size >= 256) { if (tid < 128) { merge(tid, 128, minval, maxval, minloc, maxloc); } __syncthreads(); }\r
\r
\r
template <int nthreads, typename T, typename Mask>\r
- __global__ void min_max_loc_kernel(const DevMem2D src, Mask mask, T* minval, T* maxval, \r
- uint* minloc, uint* maxloc)\r
+ __global__ void minMaxLocKernel(const DevMem2D src, Mask mask, T* minval, T* maxval, \r
+ uint* minloc, uint* maxloc)\r
{\r
typedef typename MinMaxTypeTraits<T>::best_type best_type;\r
__shared__ best_type sminval[nthreads];\r
uint tid = threadIdx.y * blockDim.x + threadIdx.x;\r
\r
T mymin = numeric_limits_gpu<T>::max();\r
- T mymax = numeric_limits_gpu<T>::is_signed ? -numeric_limits_gpu<T>::max() : numeric_limits_gpu<T>::min(); \r
+ T mymax = numeric_limits_gpu<T>::is_signed ? -numeric_limits_gpu<T>::max() : \r
+ numeric_limits_gpu<T>::min(); \r
uint myminloc = 0;\r
uint mymaxloc = 0;\r
uint y_end = min(y0 + (ctheight - 1) * blockDim.y + 1, src.rows);\r
smaxloc[tid] = mymaxloc;\r
__syncthreads();\r
\r
- find_min_max_loc_in_smem<nthreads, best_type>(sminval, smaxval, sminloc, smaxloc, tid);\r
+ findMinMaxLocInSmem<nthreads, best_type>(sminval, smaxval, sminloc, smaxloc, tid);\r
\r
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 110\r
__shared__ bool is_last;\r
smaxloc[tid] = maxloc[idx];\r
__syncthreads();\r
\r
- find_min_max_loc_in_smem<nthreads, best_type>(sminval, smaxval, sminloc, smaxloc, tid);\r
+ findMinMaxLocInSmem<nthreads, best_type>(sminval, smaxval, sminloc, smaxloc, tid);\r
\r
if (tid == 0) \r
{\r
\r
\r
template <typename T>\r
- void min_max_loc_mask_caller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval, \r
- int minloc[2], int maxloc[2], PtrStep valbuf, PtrStep locbuf)\r
+ void minMaxLocMaskCaller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval, \r
+ int minloc[2], int maxloc[2], PtrStep valbuf, PtrStep locbuf)\r
{\r
dim3 threads, grid;\r
- estimate_thread_cfg(src.cols, src.rows, threads, grid);\r
- set_kernel_consts(src.cols, src.rows, threads, grid);\r
+ estimateThreadCfg(src.cols, src.rows, threads, grid);\r
+ setKernelConsts(src.cols, src.rows, threads, grid);\r
\r
T* minval_buf = (T*)valbuf.ptr(0);\r
T* maxval_buf = (T*)valbuf.ptr(1);\r
uint* minloc_buf = (uint*)locbuf.ptr(0);\r
uint* maxloc_buf = (uint*)locbuf.ptr(1);\r
\r
- min_max_loc_kernel<256, T, Mask8U><<<grid, threads>>>(src, Mask8U(mask), minval_buf, maxval_buf, minloc_buf, maxloc_buf);\r
+ minMaxLocKernel<256, T, Mask8U><<<grid, threads>>>(src, Mask8U(mask), minval_buf, maxval_buf, \r
+ minloc_buf, maxloc_buf);\r
cudaSafeCall(cudaThreadSynchronize());\r
\r
T minval_, maxval_;\r
maxloc[1] = maxloc_ / src.cols; maxloc[0] = maxloc_ - maxloc[1] * src.cols;\r
}\r
\r
- template void min_max_loc_mask_caller<uchar>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
- template void min_max_loc_mask_caller<char>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
- template void min_max_loc_mask_caller<ushort>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
- template void min_max_loc_mask_caller<short>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
- template void min_max_loc_mask_caller<int>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
- template void min_max_loc_mask_caller<float>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
- template void min_max_loc_mask_caller<double>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
+ template void minMaxLocMaskCaller<uchar>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
+ template void minMaxLocMaskCaller<char>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
+ template void minMaxLocMaskCaller<ushort>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
+ template void minMaxLocMaskCaller<short>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
+ template void minMaxLocMaskCaller<int>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
+ template void minMaxLocMaskCaller<float>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
+ template void minMaxLocMaskCaller<double>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
\r
\r
template <typename T>\r
- void min_max_loc_caller(const DevMem2D src, double* minval, double* maxval, \r
+ void minMaxLocCaller(const DevMem2D src, double* minval, double* maxval, \r
int minloc[2], int maxloc[2], PtrStep valbuf, PtrStep locbuf)\r
{\r
dim3 threads, grid;\r
- estimate_thread_cfg(src.cols, src.rows, threads, grid);\r
- set_kernel_consts(src.cols, src.rows, threads, grid);\r
+ estimateThreadCfg(src.cols, src.rows, threads, grid);\r
+ setKernelConsts(src.cols, src.rows, threads, grid);\r
\r
T* minval_buf = (T*)valbuf.ptr(0);\r
T* maxval_buf = (T*)valbuf.ptr(1);\r
uint* minloc_buf = (uint*)locbuf.ptr(0);\r
uint* maxloc_buf = (uint*)locbuf.ptr(1);\r
\r
- min_max_loc_kernel<256, T, MaskTrue><<<grid, threads>>>(src, MaskTrue(), minval_buf, maxval_buf, minloc_buf, maxloc_buf);\r
+ minMaxLocKernel<256, T, MaskTrue><<<grid, threads>>>(src, MaskTrue(), minval_buf, maxval_buf, \r
+ minloc_buf, maxloc_buf);\r
cudaSafeCall(cudaThreadSynchronize());\r
\r
T minval_, maxval_;\r
maxloc[1] = maxloc_ / src.cols; maxloc[0] = maxloc_ - maxloc[1] * src.cols;\r
}\r
\r
- template void min_max_loc_caller<uchar>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
- template void min_max_loc_caller<char>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
- template void min_max_loc_caller<ushort>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
- template void min_max_loc_caller<short>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
- template void min_max_loc_caller<int>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
- template void min_max_loc_caller<float>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
- template void min_max_loc_caller<double>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
+ template void minMaxLocCaller<uchar>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
+ template void minMaxLocCaller<char>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
+ template void minMaxLocCaller<ushort>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
+ template void minMaxLocCaller<short>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
+ template void minMaxLocCaller<int>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
+ template void minMaxLocCaller<float>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
+ template void minMaxLocCaller<double>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
\r
\r
// This kernel will be used only when compute capability is 1.0\r
template <int nthreads, typename T>\r
- __global__ void min_max_loc_pass2_kernel(T* minval, T* maxval, uint* minloc, uint* maxloc, int size)\r
+ __global__ void minMaxLocPass2Kernel(T* minval, T* maxval, uint* minloc, uint* maxloc, int size)\r
{\r
typedef typename MinMaxTypeTraits<T>::best_type best_type;\r
__shared__ best_type sminval[nthreads];\r
smaxloc[tid] = maxloc[idx];\r
__syncthreads();\r
\r
- find_min_max_loc_in_smem<nthreads, best_type>(sminval, smaxval, sminloc, smaxloc, tid);\r
+ findMinMaxLocInSmem<nthreads, best_type>(sminval, smaxval, sminloc, smaxloc, tid);\r
\r
if (tid == 0) \r
{\r
\r
\r
template <typename T>\r
- void min_max_loc_mask_multipass_caller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval, \r
- int minloc[2], int maxloc[2], PtrStep valbuf, PtrStep locbuf)\r
+ void minMaxLocMaskMultipassCaller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval, \r
+ int minloc[2], int maxloc[2], PtrStep valbuf, PtrStep locbuf)\r
{\r
dim3 threads, grid;\r
- estimate_thread_cfg(src.cols, src.rows, threads, grid);\r
- set_kernel_consts(src.cols, src.rows, threads, grid);\r
+ estimateThreadCfg(src.cols, src.rows, threads, grid);\r
+ setKernelConsts(src.cols, src.rows, threads, grid);\r
\r
T* minval_buf = (T*)valbuf.ptr(0);\r
T* maxval_buf = (T*)valbuf.ptr(1);\r
uint* minloc_buf = (uint*)locbuf.ptr(0);\r
uint* maxloc_buf = (uint*)locbuf.ptr(1);\r
\r
- min_max_loc_kernel<256, T, Mask8U><<<grid, threads>>>(src, Mask8U(mask), minval_buf, maxval_buf, minloc_buf, maxloc_buf);\r
- min_max_loc_pass2_kernel<256, T><<<1, 256>>>(minval_buf, maxval_buf, minloc_buf, maxloc_buf, grid.x * grid.y);\r
+ minMaxLocKernel<256, T, Mask8U><<<grid, threads>>>(src, Mask8U(mask), minval_buf, maxval_buf, \r
+ minloc_buf, maxloc_buf);\r
+ minMaxLocPass2Kernel<256, T><<<1, 256>>>(minval_buf, maxval_buf, minloc_buf, maxloc_buf, grid.x * grid.y);\r
cudaSafeCall(cudaThreadSynchronize());\r
\r
T minval_, maxval_;\r
maxloc[1] = maxloc_ / src.cols; maxloc[0] = maxloc_ - maxloc[1] * src.cols;\r
}\r
\r
- template void min_max_loc_mask_multipass_caller<uchar>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
- template void min_max_loc_mask_multipass_caller<char>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
- template void min_max_loc_mask_multipass_caller<ushort>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
- template void min_max_loc_mask_multipass_caller<short>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
- template void min_max_loc_mask_multipass_caller<int>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
- template void min_max_loc_mask_multipass_caller<float>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
+ template void minMaxLocMaskMultipassCaller<uchar>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
+ template void minMaxLocMaskMultipassCaller<char>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
+ template void minMaxLocMaskMultipassCaller<ushort>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
+ template void minMaxLocMaskMultipassCaller<short>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
+ template void minMaxLocMaskMultipassCaller<int>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
+ template void minMaxLocMaskMultipassCaller<float>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
\r
\r
template <typename T>\r
- void min_max_loc_multipass_caller(const DevMem2D src, double* minval, double* maxval, \r
- int minloc[2], int maxloc[2], PtrStep valbuf, PtrStep locbuf)\r
+ void minMaxLocMultipassCaller(const DevMem2D src, double* minval, double* maxval, \r
+ int minloc[2], int maxloc[2], PtrStep valbuf, PtrStep locbuf)\r
{\r
dim3 threads, grid;\r
- estimate_thread_cfg(src.cols, src.rows, threads, grid);\r
- set_kernel_consts(src.cols, src.rows, threads, grid);\r
+ estimateThreadCfg(src.cols, src.rows, threads, grid);\r
+ setKernelConsts(src.cols, src.rows, threads, grid);\r
\r
T* minval_buf = (T*)valbuf.ptr(0);\r
T* maxval_buf = (T*)valbuf.ptr(1);\r
uint* minloc_buf = (uint*)locbuf.ptr(0);\r
uint* maxloc_buf = (uint*)locbuf.ptr(1);\r
\r
- min_max_loc_kernel<256, T, MaskTrue><<<grid, threads>>>(src, MaskTrue(), minval_buf, maxval_buf, minloc_buf, maxloc_buf);\r
- min_max_loc_pass2_kernel<256, T><<<1, 256>>>(minval_buf, maxval_buf, minloc_buf, maxloc_buf, grid.x * grid.y);\r
+ minMaxLocKernel<256, T, MaskTrue><<<grid, threads>>>(src, MaskTrue(), minval_buf, maxval_buf, \r
+ minloc_buf, maxloc_buf);\r
+ minMaxLocPass2Kernel<256, T><<<1, 256>>>(minval_buf, maxval_buf, minloc_buf, maxloc_buf, grid.x * grid.y);\r
cudaSafeCall(cudaThreadSynchronize());\r
\r
T minval_, maxval_;\r
maxloc[1] = maxloc_ / src.cols; maxloc[0] = maxloc_ - maxloc[1] * src.cols;\r
}\r
\r
- template void min_max_loc_multipass_caller<uchar>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
- template void min_max_loc_multipass_caller<char>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
- template void min_max_loc_multipass_caller<ushort>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
- template void min_max_loc_multipass_caller<short>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
- template void min_max_loc_multipass_caller<int>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
- template void min_max_loc_multipass_caller<float>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
+ template void minMaxLocMultipassCaller<uchar>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
+ template void minMaxLocMultipassCaller<char>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
+ template void minMaxLocMultipassCaller<ushort>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
+ template void minMaxLocMultipassCaller<short>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
+ template void minMaxLocMultipassCaller<int>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
+ template void minMaxLocMultipassCaller<float>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
\r
} // namespace minmaxloc\r
\r
\r
__device__ uint blocks_finished = 0;\r
\r
- void estimate_thread_cfg(int cols, int rows, dim3& threads, dim3& grid)\r
+ void estimateThreadCfg(int cols, int rows, dim3& threads, dim3& grid)\r
{\r
threads = dim3(32, 8);\r
grid = dim3(divUp(cols, threads.x * 8), divUp(rows, threads.y * 32));\r
}\r
\r
\r
- void get_buf_size_required(int cols, int rows, int& bufcols, int& bufrows)\r
+ void getBufSizeRequired(int cols, int rows, int& bufcols, int& bufrows)\r
{\r
dim3 threads, grid;\r
- estimate_thread_cfg(cols, rows, threads, grid);\r
+ estimateThreadCfg(cols, rows, threads, grid);\r
bufcols = grid.x * grid.y * sizeof(int);\r
bufrows = 1;\r
}\r
\r
\r
- void set_kernel_consts(int cols, int rows, const dim3& threads, const dim3& grid)\r
+ void setKernelConsts(int cols, int rows, const dim3& threads, const dim3& grid)\r
{ \r
int twidth = divUp(divUp(cols, grid.x), threads.x);\r
int theight = divUp(divUp(rows, grid.y), threads.y);\r
\r
\r
template <int nthreads, typename T>\r
- __global__ void count_non_zero_kernel(const DevMem2D src, volatile uint* count)\r
+ __global__ void countNonZeroKernel(const DevMem2D src, volatile uint* count)\r
{\r
__shared__ uint scount[nthreads];\r
\r
scount[tid] = cnt;\r
__syncthreads();\r
\r
- sum_in_smem<nthreads, uint>(scount, tid);\r
+ sumInSmem<nthreads, uint>(scount, tid);\r
\r
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 110\r
__shared__ bool is_last;\r
scount[tid] = tid < gridDim.x * gridDim.y ? count[tid] : 0;\r
__syncthreads();\r
\r
- sum_in_smem<nthreads, uint>(scount, tid);\r
+ sumInSmem<nthreads, uint>(scount, tid);\r
\r
if (tid == 0) \r
{\r
\r
\r
template <typename T>\r
- int count_non_zero_caller(const DevMem2D src, PtrStep buf)\r
+ int countNonZeroCaller(const DevMem2D src, PtrStep buf)\r
{\r
dim3 threads, grid;\r
- estimate_thread_cfg(src.cols, src.rows, threads, grid);\r
- set_kernel_consts(src.cols, src.rows, threads, grid);\r
+ estimateThreadCfg(src.cols, src.rows, threads, grid);\r
+ setKernelConsts(src.cols, src.rows, threads, grid);\r
\r
uint* count_buf = (uint*)buf.ptr(0);\r
\r
- count_non_zero_kernel<256, T><<<grid, threads>>>(src, count_buf);\r
+ countNonZeroKernel<256, T><<<grid, threads>>>(src, count_buf);\r
cudaSafeCall(cudaThreadSynchronize());\r
\r
uint count;\r
return count;\r
} \r
\r
- template int count_non_zero_caller<uchar>(const DevMem2D, PtrStep);\r
- template int count_non_zero_caller<char>(const DevMem2D, PtrStep);\r
- template int count_non_zero_caller<ushort>(const DevMem2D, PtrStep);\r
- template int count_non_zero_caller<short>(const DevMem2D, PtrStep);\r
- template int count_non_zero_caller<int>(const DevMem2D, PtrStep);\r
- template int count_non_zero_caller<float>(const DevMem2D, PtrStep);\r
- template int count_non_zero_caller<double>(const DevMem2D, PtrStep);\r
+ template int countNonZeroCaller<uchar>(const DevMem2D, PtrStep);\r
+ template int countNonZeroCaller<char>(const DevMem2D, PtrStep);\r
+ template int countNonZeroCaller<ushort>(const DevMem2D, PtrStep);\r
+ template int countNonZeroCaller<short>(const DevMem2D, PtrStep);\r
+ template int countNonZeroCaller<int>(const DevMem2D, PtrStep);\r
+ template int countNonZeroCaller<float>(const DevMem2D, PtrStep);\r
+ template int countNonZeroCaller<double>(const DevMem2D, PtrStep);\r
\r
\r
template <int nthreads, typename T>\r
- __global__ void count_non_zero_pass2_kernel(uint* count, int size)\r
+ __global__ void countNonZeroPass2Kernel(uint* count, int size)\r
{\r
__shared__ uint scount[nthreads];\r
uint tid = threadIdx.y * blockDim.x + threadIdx.x;\r
scount[tid] = tid < size ? count[tid] : 0;\r
__syncthreads();\r
\r
- sum_in_smem<nthreads, uint>(scount, tid);\r
+ sumInSmem<nthreads, uint>(scount, tid);\r
\r
if (tid == 0) \r
count[0] = scount[0];\r
\r
\r
template <typename T>\r
- int count_non_zero_multipass_caller(const DevMem2D src, PtrStep buf)\r
+ int countNonZeroMultipassCaller(const DevMem2D src, PtrStep buf)\r
{\r
dim3 threads, grid;\r
- estimate_thread_cfg(src.cols, src.rows, threads, grid);\r
- set_kernel_consts(src.cols, src.rows, threads, grid);\r
+ estimateThreadCfg(src.cols, src.rows, threads, grid);\r
+ setKernelConsts(src.cols, src.rows, threads, grid);\r
\r
uint* count_buf = (uint*)buf.ptr(0);\r
\r
- count_non_zero_kernel<256, T><<<grid, threads>>>(src, count_buf);\r
- count_non_zero_pass2_kernel<256, T><<<1, 256>>>(count_buf, grid.x * grid.y);\r
+ countNonZeroKernel<256, T><<<grid, threads>>>(src, count_buf);\r
+ countNonZeroPass2Kernel<256, T><<<1, 256>>>(count_buf, grid.x * grid.y);\r
cudaSafeCall(cudaThreadSynchronize());\r
\r
uint count;\r
return count;\r
} \r
\r
- template int count_non_zero_multipass_caller<uchar>(const DevMem2D, PtrStep);\r
- template int count_non_zero_multipass_caller<char>(const DevMem2D, PtrStep);\r
- template int count_non_zero_multipass_caller<ushort>(const DevMem2D, PtrStep);\r
- template int count_non_zero_multipass_caller<short>(const DevMem2D, PtrStep);\r
- template int count_non_zero_multipass_caller<int>(const DevMem2D, PtrStep);\r
- template int count_non_zero_multipass_caller<float>(const DevMem2D, PtrStep);\r
+ template int countNonZeroMultipassCaller<uchar>(const DevMem2D, PtrStep);\r
+ template int countNonZeroMultipassCaller<char>(const DevMem2D, PtrStep);\r
+ template int countNonZeroMultipassCaller<ushort>(const DevMem2D, PtrStep);\r
+ template int countNonZeroMultipassCaller<short>(const DevMem2D, PtrStep);\r
+ template int countNonZeroMultipassCaller<int>(const DevMem2D, PtrStep);\r
+ template int countNonZeroMultipassCaller<float>(const DevMem2D, PtrStep);\r
\r
} // namespace countnonzero\r
\r
const int threads_x = 32;\r
const int threads_y = 8;\r
\r
- void estimate_thread_cfg(int cols, int rows, dim3& threads, dim3& grid)\r
+ void estimateThreadCfg(int cols, int rows, dim3& threads, dim3& grid)\r
{\r
threads = dim3(threads_x, threads_y);\r
grid = dim3(divUp(cols, threads.x * threads.y), \r
}\r
\r
\r
- void get_buf_size_required(int cols, int rows, int cn, int& bufcols, int& bufrows)\r
+ void getBufSizeRequired(int cols, int rows, int cn, int& bufcols, int& bufrows)\r
{\r
dim3 threads, grid;\r
- estimate_thread_cfg(cols, rows, threads, grid);\r
+ estimateThreadCfg(cols, rows, threads, grid);\r
bufcols = grid.x * grid.y * sizeof(double) * cn;\r
bufrows = 1;\r
}\r
\r
\r
- void set_kernel_consts(int cols, int rows, const dim3& threads, const dim3& grid)\r
+ void setKernelConsts(int cols, int rows, const dim3& threads, const dim3& grid)\r
{ \r
int twidth = divUp(divUp(cols, grid.x), threads.x);\r
int theight = divUp(divUp(rows, grid.y), threads.y);\r
}\r
\r
template <typename T, typename R, typename Op, int nthreads>\r
- __global__ void sum_kernel(const DevMem2D src, R* result)\r
+ __global__ void sumKernel(const DevMem2D src, R* result)\r
{\r
__shared__ R smem[nthreads];\r
\r
smem[tid] = sum;\r
__syncthreads();\r
\r
- sum_in_smem<nthreads, R>(smem, tid);\r
+ sumInSmem<nthreads, R>(smem, tid);\r
\r
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 110\r
__shared__ bool is_last;\r
smem[tid] = tid < gridDim.x * gridDim.y ? result[tid] : 0;\r
__syncthreads();\r
\r
- sum_in_smem<nthreads, R>(smem, tid);\r
+ sumInSmem<nthreads, R>(smem, tid);\r
\r
if (tid == 0) \r
{\r
\r
\r
template <typename T, typename R, int nthreads>\r
- __global__ void sum_pass2_kernel(R* result, int size)\r
+ __global__ void sumPass2Kernel(R* result, int size)\r
{\r
__shared__ R smem[nthreads];\r
int tid = threadIdx.y * blockDim.x + threadIdx.x;\r
smem[tid] = tid < size ? result[tid] : 0;\r
__syncthreads();\r
\r
- sum_in_smem<nthreads, R>(smem, tid);\r
+ sumInSmem<nthreads, R>(smem, tid);\r
\r
if (tid == 0) \r
result[0] = smem[0];\r
\r
\r
template <typename T, typename R, typename Op, int nthreads>\r
- __global__ void sum_kernel_C2(const DevMem2D src, typename TypeVec<R, 2>::vec_t* result)\r
+ __global__ void sumKernel_C2(const DevMem2D src, typename TypeVec<R, 2>::vec_t* result)\r
{\r
typedef typename TypeVec<T, 2>::vec_t SrcType;\r
typedef typename TypeVec<R, 2>::vec_t DstType;\r
smem[tid + nthreads] = sum.y;\r
__syncthreads();\r
\r
- sum_in_smem<nthreads, R>(smem, tid);\r
- sum_in_smem<nthreads, R>(smem + nthreads, tid);\r
+ sumInSmem<nthreads, R>(smem, tid);\r
+ sumInSmem<nthreads, R>(smem + nthreads, tid);\r
\r
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 110\r
__shared__ bool is_last;\r
smem[tid + nthreads] = res.y;\r
__syncthreads();\r
\r
- sum_in_smem<nthreads, R>(smem, tid);\r
- sum_in_smem<nthreads, R>(smem + nthreads, tid);\r
+ sumInSmem<nthreads, R>(smem, tid);\r
+ sumInSmem<nthreads, R>(smem + nthreads, tid);\r
\r
if (tid == 0) \r
{\r
\r
\r
template <typename T, typename R, int nthreads>\r
- __global__ void sum_pass2_kernel_C2(typename TypeVec<R, 2>::vec_t* result, int size)\r
+ __global__ void sumPass2Kernel_C2(typename TypeVec<R, 2>::vec_t* result, int size)\r
{\r
typedef typename TypeVec<R, 2>::vec_t DstType;\r
\r
smem[tid + nthreads] = res.y;\r
__syncthreads();\r
\r
- sum_in_smem<nthreads, R>(smem, tid);\r
- sum_in_smem<nthreads, R>(smem + nthreads, tid);\r
+ sumInSmem<nthreads, R>(smem, tid);\r
+ sumInSmem<nthreads, R>(smem + nthreads, tid);\r
\r
if (tid == 0) \r
{\r
\r
\r
template <typename T, typename R, typename Op, int nthreads>\r
- __global__ void sum_kernel_C3(const DevMem2D src, typename TypeVec<R, 3>::vec_t* result)\r
+ __global__ void sumKernel_C3(const DevMem2D src, typename TypeVec<R, 3>::vec_t* result)\r
{\r
typedef typename TypeVec<T, 3>::vec_t SrcType;\r
typedef typename TypeVec<R, 3>::vec_t DstType;\r
smem[tid + 2 * nthreads] = sum.z;\r
__syncthreads();\r
\r
- sum_in_smem<nthreads, R>(smem, tid);\r
- sum_in_smem<nthreads, R>(smem + nthreads, tid);\r
- sum_in_smem<nthreads, R>(smem + 2 * nthreads, tid);\r
+ sumInSmem<nthreads, R>(smem, tid);\r
+ sumInSmem<nthreads, R>(smem + nthreads, tid);\r
+ sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);\r
\r
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 110\r
__shared__ bool is_last;\r
smem[tid + 2 * nthreads] = res.z;\r
__syncthreads();\r
\r
- sum_in_smem<nthreads, R>(smem, tid);\r
- sum_in_smem<nthreads, R>(smem + nthreads, tid);\r
- sum_in_smem<nthreads, R>(smem + 2 * nthreads, tid);\r
+ sumInSmem<nthreads, R>(smem, tid);\r
+ sumInSmem<nthreads, R>(smem + nthreads, tid);\r
+ sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);\r
\r
if (tid == 0) \r
{\r
\r
\r
template <typename T, typename R, int nthreads>\r
- __global__ void sum_pass2_kernel_C3(typename TypeVec<R, 3>::vec_t* result, int size)\r
+ __global__ void sumPass2Kernel_C3(typename TypeVec<R, 3>::vec_t* result, int size)\r
{\r
typedef typename TypeVec<R, 3>::vec_t DstType;\r
\r
smem[tid + 2 * nthreads] = res.z;\r
__syncthreads();\r
\r
- sum_in_smem<nthreads, R>(smem, tid);\r
- sum_in_smem<nthreads, R>(smem + nthreads, tid);\r
- sum_in_smem<nthreads, R>(smem + 2 * nthreads, tid);\r
+ sumInSmem<nthreads, R>(smem, tid);\r
+ sumInSmem<nthreads, R>(smem + nthreads, tid);\r
+ sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);\r
\r
if (tid == 0) \r
{\r
}\r
\r
template <typename T, typename R, typename Op, int nthreads>\r
- __global__ void sum_kernel_C4(const DevMem2D src, typename TypeVec<R, 4>::vec_t* result)\r
+ __global__ void sumKernel_C4(const DevMem2D src, typename TypeVec<R, 4>::vec_t* result)\r
{\r
typedef typename TypeVec<T, 4>::vec_t SrcType;\r
typedef typename TypeVec<R, 4>::vec_t DstType;\r
smem[tid + 3 * nthreads] = sum.w;\r
__syncthreads();\r
\r
- sum_in_smem<nthreads, R>(smem, tid);\r
- sum_in_smem<nthreads, R>(smem + nthreads, tid);\r
- sum_in_smem<nthreads, R>(smem + 2 * nthreads, tid);\r
- sum_in_smem<nthreads, R>(smem + 3 * nthreads, tid);\r
+ sumInSmem<nthreads, R>(smem, tid);\r
+ sumInSmem<nthreads, R>(smem + nthreads, tid);\r
+ sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);\r
+ sumInSmem<nthreads, R>(smem + 3 * nthreads, tid);\r
\r
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 110\r
__shared__ bool is_last;\r
smem[tid + 3 * nthreads] = res.w;\r
__syncthreads();\r
\r
- sum_in_smem<nthreads, R>(smem, tid);\r
- sum_in_smem<nthreads, R>(smem + nthreads, tid);\r
- sum_in_smem<nthreads, R>(smem + 2 * nthreads, tid);\r
- sum_in_smem<nthreads, R>(smem + 3 * nthreads, tid);\r
+ sumInSmem<nthreads, R>(smem, tid);\r
+ sumInSmem<nthreads, R>(smem + nthreads, tid);\r
+ sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);\r
+ sumInSmem<nthreads, R>(smem + 3 * nthreads, tid);\r
\r
if (tid == 0) \r
{\r
\r
\r
template <typename T, typename R, int nthreads>\r
- __global__ void sum_pass2_kernel_C4(typename TypeVec<R, 4>::vec_t* result, int size)\r
+ __global__ void sumPass2Kernel_C4(typename TypeVec<R, 4>::vec_t* result, int size)\r
{\r
typedef typename TypeVec<R, 4>::vec_t DstType;\r
\r
smem[tid + 3 * nthreads] = res.z;\r
__syncthreads();\r
\r
- sum_in_smem<nthreads, R>(smem, tid);\r
- sum_in_smem<nthreads, R>(smem + nthreads, tid);\r
- sum_in_smem<nthreads, R>(smem + 2 * nthreads, tid);\r
- sum_in_smem<nthreads, R>(smem + 3 * nthreads, tid);\r
+ sumInSmem<nthreads, R>(smem, tid);\r
+ sumInSmem<nthreads, R>(smem + nthreads, tid);\r
+ sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);\r
+ sumInSmem<nthreads, R>(smem + 3 * nthreads, tid);\r
\r
if (tid == 0) \r
{\r
\r
\r
template <typename T>\r
- void sum_multipass_caller(const DevMem2D src, PtrStep buf, double* sum, int cn)\r
+ void sumMultipassCaller(const DevMem2D src, PtrStep buf, double* sum, int cn)\r
{\r
using namespace sum;\r
typedef typename SumType<T>::R R;\r
\r
dim3 threads, grid;\r
- estimate_thread_cfg(src.cols, src.rows, threads, grid);\r
- set_kernel_consts(src.cols, src.rows, threads, grid);\r
+ estimateThreadCfg(src.cols, src.rows, threads, grid);\r
+ setKernelConsts(src.cols, src.rows, threads, grid);\r
\r
switch (cn)\r
{\r
case 1:\r
- sum_kernel<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
+ sumKernel<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
src, (typename TypeVec<R, 1>::vec_t*)buf.ptr(0));\r
- sum_pass2_kernel<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(\r
+ sumPass2Kernel<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(\r
(typename TypeVec<R, 1>::vec_t*)buf.ptr(0), grid.x * grid.y);\r
case 2:\r
- sum_kernel_C2<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
+ sumKernel_C2<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
src, (typename TypeVec<R, 2>::vec_t*)buf.ptr(0));\r
- sum_pass2_kernel_C2<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(\r
+ sumPass2Kernel_C2<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(\r
(typename TypeVec<R, 2>::vec_t*)buf.ptr(0), grid.x * grid.y);\r
case 3:\r
- sum_kernel_C3<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
+ sumKernel_C3<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
src, (typename TypeVec<R, 3>::vec_t*)buf.ptr(0));\r
- sum_pass2_kernel_C3<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(\r
+ sumPass2Kernel_C3<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(\r
(typename TypeVec<R, 3>::vec_t*)buf.ptr(0), grid.x * grid.y);\r
case 4:\r
- sum_kernel_C4<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
+ sumKernel_C4<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
src, (typename TypeVec<R, 4>::vec_t*)buf.ptr(0));\r
- sum_pass2_kernel_C4<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(\r
+ sumPass2Kernel_C4<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(\r
(typename TypeVec<R, 4>::vec_t*)buf.ptr(0), grid.x * grid.y);\r
}\r
cudaSafeCall(cudaThreadSynchronize());\r
sum[3] = result[3];\r
} \r
\r
- template void sum_multipass_caller<uchar>(const DevMem2D, PtrStep, double*, int);\r
- template void sum_multipass_caller<char>(const DevMem2D, PtrStep, double*, int);\r
- template void sum_multipass_caller<ushort>(const DevMem2D, PtrStep, double*, int);\r
- template void sum_multipass_caller<short>(const DevMem2D, PtrStep, double*, int);\r
- template void sum_multipass_caller<int>(const DevMem2D, PtrStep, double*, int);\r
- template void sum_multipass_caller<float>(const DevMem2D, PtrStep, double*, int);\r
+ template void sumMultipassCaller<uchar>(const DevMem2D, PtrStep, double*, int);\r
+ template void sumMultipassCaller<char>(const DevMem2D, PtrStep, double*, int);\r
+ template void sumMultipassCaller<ushort>(const DevMem2D, PtrStep, double*, int);\r
+ template void sumMultipassCaller<short>(const DevMem2D, PtrStep, double*, int);\r
+ template void sumMultipassCaller<int>(const DevMem2D, PtrStep, double*, int);\r
+ template void sumMultipassCaller<float>(const DevMem2D, PtrStep, double*, int);\r
\r
\r
template <typename T>\r
- void sum_caller(const DevMem2D src, PtrStep buf, double* sum, int cn)\r
+ void sumCaller(const DevMem2D src, PtrStep buf, double* sum, int cn)\r
{\r
using namespace sum;\r
typedef typename SumType<T>::R R;\r
\r
dim3 threads, grid;\r
- estimate_thread_cfg(src.cols, src.rows, threads, grid);\r
- set_kernel_consts(src.cols, src.rows, threads, grid);\r
+ estimateThreadCfg(src.cols, src.rows, threads, grid);\r
+ setKernelConsts(src.cols, src.rows, threads, grid);\r
\r
switch (cn)\r
{\r
case 1:\r
- sum_kernel<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
+ sumKernel<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
src, (typename TypeVec<R, 1>::vec_t*)buf.ptr(0));\r
break;\r
case 2:\r
- sum_kernel_C2<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
+ sumKernel_C2<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
src, (typename TypeVec<R, 2>::vec_t*)buf.ptr(0));\r
break;\r
case 3:\r
- sum_kernel_C3<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
+ sumKernel_C3<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
src, (typename TypeVec<R, 3>::vec_t*)buf.ptr(0));\r
break;\r
case 4:\r
- sum_kernel_C4<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
+ sumKernel_C4<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
src, (typename TypeVec<R, 4>::vec_t*)buf.ptr(0));\r
break;\r
}\r
sum[3] = result[3];\r
} \r
\r
- template void sum_caller<uchar>(const DevMem2D, PtrStep, double*, int);\r
- template void sum_caller<char>(const DevMem2D, PtrStep, double*, int);\r
- template void sum_caller<ushort>(const DevMem2D, PtrStep, double*, int);\r
- template void sum_caller<short>(const DevMem2D, PtrStep, double*, int);\r
- template void sum_caller<int>(const DevMem2D, PtrStep, double*, int);\r
- template void sum_caller<float>(const DevMem2D, PtrStep, double*, int);\r
+ template void sumCaller<uchar>(const DevMem2D, PtrStep, double*, int);\r
+ template void sumCaller<char>(const DevMem2D, PtrStep, double*, int);\r
+ template void sumCaller<ushort>(const DevMem2D, PtrStep, double*, int);\r
+ template void sumCaller<short>(const DevMem2D, PtrStep, double*, int);\r
+ template void sumCaller<int>(const DevMem2D, PtrStep, double*, int);\r
+ template void sumCaller<float>(const DevMem2D, PtrStep, double*, int);\r
\r
\r
template <typename T>\r
- void sqsum_multipass_caller(const DevMem2D src, PtrStep buf, double* sum, int cn)\r
+ void sqrSumMultipassCaller(const DevMem2D src, PtrStep buf, double* sum, int cn)\r
{\r
using namespace sum;\r
typedef typename SumType<T>::R R;\r
\r
dim3 threads, grid;\r
- estimate_thread_cfg(src.cols, src.rows, threads, grid);\r
- set_kernel_consts(src.cols, src.rows, threads, grid);\r
+ estimateThreadCfg(src.cols, src.rows, threads, grid);\r
+ setKernelConsts(src.cols, src.rows, threads, grid);\r
\r
switch (cn)\r
{\r
case 1:\r
- sum_kernel<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
+ sumKernel<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
src, (typename TypeVec<R, 1>::vec_t*)buf.ptr(0));\r
- sum_pass2_kernel<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(\r
+ sumPass2Kernel<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(\r
(typename TypeVec<R, 1>::vec_t*)buf.ptr(0), grid.x * grid.y);\r
break;\r
case 2:\r
- sum_kernel_C2<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
+ sumKernel_C2<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
src, (typename TypeVec<R, 2>::vec_t*)buf.ptr(0));\r
- sum_pass2_kernel_C2<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(\r
+ sumPass2Kernel_C2<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(\r
(typename TypeVec<R, 2>::vec_t*)buf.ptr(0), grid.x * grid.y);\r
break;\r
case 3:\r
- sum_kernel_C3<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
+ sumKernel_C3<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
src, (typename TypeVec<R, 3>::vec_t*)buf.ptr(0));\r
- sum_pass2_kernel_C3<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(\r
+ sumPass2Kernel_C3<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(\r
(typename TypeVec<R, 3>::vec_t*)buf.ptr(0), grid.x * grid.y);\r
break;\r
case 4:\r
- sum_kernel_C4<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
+ sumKernel_C4<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
src, (typename TypeVec<R, 4>::vec_t*)buf.ptr(0));\r
- sum_pass2_kernel_C4<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(\r
+ sumPass2Kernel_C4<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(\r
(typename TypeVec<R, 4>::vec_t*)buf.ptr(0), grid.x * grid.y);\r
break;\r
}\r
sum[3] = result[3];\r
} \r
\r
- template void sqsum_multipass_caller<uchar>(const DevMem2D, PtrStep, double*, int);\r
- template void sqsum_multipass_caller<char>(const DevMem2D, PtrStep, double*, int);\r
- template void sqsum_multipass_caller<ushort>(const DevMem2D, PtrStep, double*, int);\r
- template void sqsum_multipass_caller<short>(const DevMem2D, PtrStep, double*, int);\r
- template void sqsum_multipass_caller<int>(const DevMem2D, PtrStep, double*, int);\r
- template void sqsum_multipass_caller<float>(const DevMem2D, PtrStep, double*, int);\r
+ template void sqrSumMultipassCaller<uchar>(const DevMem2D, PtrStep, double*, int);\r
+ template void sqrSumMultipassCaller<char>(const DevMem2D, PtrStep, double*, int);\r
+ template void sqrSumMultipassCaller<ushort>(const DevMem2D, PtrStep, double*, int);\r
+ template void sqrSumMultipassCaller<short>(const DevMem2D, PtrStep, double*, int);\r
+ template void sqrSumMultipassCaller<int>(const DevMem2D, PtrStep, double*, int);\r
+ template void sqrSumMultipassCaller<float>(const DevMem2D, PtrStep, double*, int);\r
\r
\r
template <typename T>\r
- void sqsum_caller(const DevMem2D src, PtrStep buf, double* sum, int cn)\r
+ void sqrSumCaller(const DevMem2D src, PtrStep buf, double* sum, int cn)\r
{\r
using namespace sum;\r
typedef typename SumType<T>::R R;\r
\r
dim3 threads, grid;\r
- estimate_thread_cfg(src.cols, src.rows, threads, grid);\r
- set_kernel_consts(src.cols, src.rows, threads, grid);\r
+ estimateThreadCfg(src.cols, src.rows, threads, grid);\r
+ setKernelConsts(src.cols, src.rows, threads, grid);\r
\r
switch (cn)\r
{\r
case 1:\r
- sum_kernel<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
+ sumKernel<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
src, (typename TypeVec<R, 1>::vec_t*)buf.ptr(0));\r
break;\r
case 2:\r
- sum_kernel_C2<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
+ sumKernel_C2<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
src, (typename TypeVec<R, 2>::vec_t*)buf.ptr(0));\r
break;\r
case 3:\r
- sum_kernel_C3<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
+ sumKernel_C3<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
src, (typename TypeVec<R, 3>::vec_t*)buf.ptr(0));\r
break;\r
case 4:\r
- sum_kernel_C4<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
+ sumKernel_C4<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(\r
src, (typename TypeVec<R, 4>::vec_t*)buf.ptr(0));\r
break;\r
}\r
sum[3] = result[3];\r
}\r
\r
- template void sqsum_caller<uchar>(const DevMem2D, PtrStep, double*, int);\r
- template void sqsum_caller<char>(const DevMem2D, PtrStep, double*, int);\r
- template void sqsum_caller<ushort>(const DevMem2D, PtrStep, double*, int);\r
- template void sqsum_caller<short>(const DevMem2D, PtrStep, double*, int);\r
- template void sqsum_caller<int>(const DevMem2D, PtrStep, double*, int);\r
- template void sqsum_caller<float>(const DevMem2D, PtrStep, double*, int);\r
+ template void sqrSumCaller<uchar>(const DevMem2D, PtrStep, double*, int);\r
+ template void sqrSumCaller<char>(const DevMem2D, PtrStep, double*, int);\r
+ template void sqrSumCaller<ushort>(const DevMem2D, PtrStep, double*, int);\r
+ template void sqrSumCaller<short>(const DevMem2D, PtrStep, double*, int);\r
+ template void sqrSumCaller<int>(const DevMem2D, PtrStep, double*, int);\r
+ template void sqrSumCaller<float>(const DevMem2D, PtrStep, double*, int);\r
}}}
\ No newline at end of file
namespace cv { namespace gpu { namespace mathfunc\r
{\r
template <typename T>\r
- void sum_caller(const DevMem2D src, PtrStep buf, double* sum, int cn);\r
+ void sumCaller(const DevMem2D src, PtrStep buf, double* sum, int cn);\r
\r
template <typename T>\r
- void sum_multipass_caller(const DevMem2D src, PtrStep buf, double* sum, int cn);\r
+ void sumMultipassCaller(const DevMem2D src, PtrStep buf, double* sum, int cn);\r
\r
template <typename T>\r
- void sqsum_caller(const DevMem2D src, PtrStep buf, double* sum, int cn);\r
+ void sqrSumCaller(const DevMem2D src, PtrStep buf, double* sum, int cn);\r
\r
template <typename T>\r
- void sqsum_multipass_caller(const DevMem2D src, PtrStep buf, double* sum, int cn);\r
+ void sqrSumMultipassCaller(const DevMem2D src, PtrStep buf, double* sum, int cn);\r
\r
namespace sum\r
{\r
- void get_buf_size_required(int cols, int rows, int cn, int& bufcols, int& bufrows);\r
+ void getBufSizeRequired(int cols, int rows, int cn, int& bufcols, int& bufrows);\r
}\r
}}}\r
\r
using namespace mathfunc;\r
\r
typedef void (*Caller)(const DevMem2D, PtrStep, double*, int);\r
- static const Caller callers[2][7] = \r
- { { sum_multipass_caller<unsigned char>, sum_multipass_caller<char>, \r
- sum_multipass_caller<unsigned short>, sum_multipass_caller<short>, \r
- sum_multipass_caller<int>, sum_multipass_caller<float>, 0 },\r
- { sum_caller<unsigned char>, sum_caller<char>, \r
- sum_caller<unsigned short>, sum_caller<short>, \r
- sum_caller<int>, sum_caller<float>, 0 } };\r
-\r
- Size bufSize;\r
- sum::get_buf_size_required(src.cols, src.rows, src.channels(), bufSize.width, bufSize.height); \r
- ensureSizeIsEnough(bufSize, CV_8U, buf);\r
-\r
- Caller caller = callers[hasAtomicsSupport(getDevice())][src.depth()];\r
+\r
+ static Caller multipass_callers[7] = { \r
+ sumMultipassCaller<unsigned char>, sumMultipassCaller<char>, \r
+ sumMultipassCaller<unsigned short>, sumMultipassCaller<short>, \r
+ sumMultipassCaller<int>, sumMultipassCaller<float>, 0 };\r
+\r
+ static Caller singlepass_callers[7] = { \r
+ sumCaller<unsigned char>, sumCaller<char>, \r
+ sumCaller<unsigned short>, sumCaller<short>, \r
+ sumCaller<int>, sumCaller<float>, 0 };\r
+\r
+ Size buf_size;\r
+ sum::getBufSizeRequired(src.cols, src.rows, src.channels(), \r
+ buf_size.width, buf_size.height); \r
+ ensureSizeIsEnough(buf_size, CV_8U, buf);\r
+\r
+ Caller* callers = multipass_callers;\r
+ if (ptxVersionIsGreaterOrEqual(1, 1) && hasAtomicsSupport(getDevice()))\r
+ callers = singlepass_callers;\r
+\r
+ Caller caller = callers[src.depth()];\r
if (!caller) CV_Error(CV_StsBadArg, "sum: unsupported type");\r
\r
double result[4];\r
using namespace mathfunc;\r
\r
typedef void (*Caller)(const DevMem2D, PtrStep, double*, int);\r
- static const Caller callers[2][7] = \r
- { { sqsum_multipass_caller<unsigned char>, sqsum_multipass_caller<char>, \r
- sqsum_multipass_caller<unsigned short>, sqsum_multipass_caller<short>, \r
- sqsum_multipass_caller<int>, sqsum_multipass_caller<float>, 0 },\r
- { sqsum_caller<unsigned char>, sqsum_caller<char>, \r
- sqsum_caller<unsigned short>, sqsum_caller<short>, \r
- sqsum_caller<int>, sqsum_caller<float>, 0 } };\r
-\r
- Size bufSize;\r
- sum::get_buf_size_required(src.cols, src.rows, src.channels(), bufSize.width, bufSize.height); \r
- ensureSizeIsEnough(bufSize, CV_8U, buf);\r
-\r
- Caller caller = callers[hasAtomicsSupport(getDevice())][src.depth()];\r
+\r
+ static Caller multipass_callers[7] = { \r
+ sqrSumMultipassCaller<unsigned char>, sqrSumMultipassCaller<char>, \r
+ sqrSumMultipassCaller<unsigned short>, sqrSumMultipassCaller<short>, \r
+ sqrSumMultipassCaller<int>, sqrSumMultipassCaller<float>, 0 };\r
+\r
+ static Caller singlepass_callers[7] = { \r
+ sqrSumCaller<unsigned char>, sqrSumCaller<char>, \r
+ sqrSumCaller<unsigned short>, sqrSumCaller<short>, \r
+ sqrSumCaller<int>, sqrSumCaller<float>, 0 };\r
+\r
+ Caller* callers = multipass_callers;\r
+ if (ptxVersionIsGreaterOrEqual(1, 1) && hasAtomicsSupport(getDevice()))\r
+ callers = singlepass_callers;\r
+\r
+ Size buf_size;\r
+ sum::getBufSizeRequired(src.cols, src.rows, src.channels(), \r
+ buf_size.width, buf_size.height); \r
+ ensureSizeIsEnough(buf_size, CV_8U, buf);\r
+\r
+ Caller caller = callers[src.depth()];\r
if (!caller) CV_Error(CV_StsBadArg, "sqrSum: unsupported type");\r
\r
double result[4];\r
\r
namespace cv { namespace gpu { namespace mathfunc { namespace minmax {\r
\r
- void get_buf_size_required(int cols, int rows, int elem_size, int& bufcols, int& bufrows);\r
+ void getBufSizeRequired(int cols, int rows, int elem_size, int& bufcols, int& bufrows);\r
\r
template <typename T> \r
- void min_max_caller(const DevMem2D src, double* minval, double* maxval, PtrStep buf);\r
+ void minMaxCaller(const DevMem2D src, double* minval, double* maxval, PtrStep buf);\r
\r
template <typename T> \r
- void min_max_mask_caller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval, PtrStep buf);\r
+ void minMaxMaskCaller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval, PtrStep buf);\r
\r
template <typename T> \r
- void min_max_multipass_caller(const DevMem2D src, double* minval, double* maxval, PtrStep buf);\r
+ void minMaxMultipassCaller(const DevMem2D src, double* minval, double* maxval, PtrStep buf);\r
\r
template <typename T> \r
- void min_max_mask_multipass_caller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval, PtrStep buf);\r
+ void minMaxMaskMultipassCaller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval, PtrStep buf);\r
\r
}}}}\r
\r
typedef void (*Caller)(const DevMem2D, double*, double*, PtrStep);\r
typedef void (*MaskedCaller)(const DevMem2D, const PtrStep, double*, double*, PtrStep);\r
\r
- static const Caller callers[2][7] = \r
- { { min_max_multipass_caller<unsigned char>, min_max_multipass_caller<char>, \r
- min_max_multipass_caller<unsigned short>, min_max_multipass_caller<short>, \r
- min_max_multipass_caller<int>, min_max_multipass_caller<float>, 0 },\r
- { min_max_caller<unsigned char>, min_max_caller<char>, \r
- min_max_caller<unsigned short>, min_max_caller<short>, \r
- min_max_caller<int>, min_max_caller<float>, min_max_caller<double> } };\r
+ static Caller multipass_callers[7] = { \r
+ minMaxMultipassCaller<unsigned char>, minMaxMultipassCaller<char>, \r
+ minMaxMultipassCaller<unsigned short>, minMaxMultipassCaller<short>, \r
+ minMaxMultipassCaller<int>, minMaxMultipassCaller<float>, 0 };\r
\r
- static const MaskedCaller masked_callers[2][7] = \r
- { { min_max_mask_multipass_caller<unsigned char>, min_max_mask_multipass_caller<char>, \r
- min_max_mask_multipass_caller<unsigned short>, min_max_mask_multipass_caller<short>, \r
- min_max_mask_multipass_caller<int>, min_max_mask_multipass_caller<float>, 0 },\r
- { min_max_mask_caller<unsigned char>, min_max_mask_caller<char>, \r
- min_max_mask_caller<unsigned short>, min_max_mask_caller<short>, \r
- min_max_mask_caller<int>, min_max_mask_caller<float>, \r
- min_max_mask_caller<double> } };\r
+ static Caller singlepass_callers[7] = { \r
+ minMaxCaller<unsigned char>, minMaxCaller<char>, \r
+ minMaxCaller<unsigned short>, minMaxCaller<short>, \r
+ minMaxCaller<int>, minMaxCaller<float>, minMaxCaller<double> };\r
\r
+ static MaskedCaller masked_multipass_callers[7] = { \r
+ minMaxMaskMultipassCaller<unsigned char>, minMaxMaskMultipassCaller<char>, \r
+ minMaxMaskMultipassCaller<unsigned short>, minMaxMaskMultipassCaller<short>,\r
+ minMaxMaskMultipassCaller<int>, minMaxMaskMultipassCaller<float>, 0 };\r
+\r
+ static MaskedCaller masked_singlepass_callers[7] = { \r
+ minMaxMaskCaller<unsigned char>, minMaxMaskCaller<char>, \r
+ minMaxMaskCaller<unsigned short>, minMaxMaskCaller<short>, \r
+ minMaxMaskCaller<int>, minMaxMaskCaller<float>, \r
+ minMaxMaskCaller<double> };\r
\r
CV_Assert(src.channels() == 1);\r
CV_Assert(mask.empty() || (mask.type() == CV_8U && src.size() == mask.size()));\r
double minVal_; if (!minVal) minVal = &minVal_;\r
double maxVal_; if (!maxVal) maxVal = &maxVal_;\r
\r
- Size bufSize;\r
- get_buf_size_required(src.cols, src.rows, src.elemSize(), bufSize.width, bufSize.height);\r
- ensureSizeIsEnough(bufSize, CV_8U, buf);\r
+ Size buf_size;\r
+ getBufSizeRequired(src.cols, src.rows, src.elemSize(), buf_size.width, buf_size.height);\r
+ ensureSizeIsEnough(buf_size, CV_8U, buf);\r
\r
if (mask.empty())\r
{\r
- Caller caller = callers[hasAtomicsSupport(getDevice())][src.type()];\r
+ Caller* callers = multipass_callers;\r
+ if (ptxVersionIsGreaterOrEqual(1, 1) && hasAtomicsSupport(getDevice()))\r
+ callers = singlepass_callers;\r
+\r
+ Caller caller = callers[src.type()];\r
if (!caller) CV_Error(CV_StsBadArg, "minMax: unsupported type");\r
caller(src, minVal, maxVal, buf);\r
}\r
else\r
{\r
- MaskedCaller caller = masked_callers[hasAtomicsSupport(getDevice())][src.type()];\r
+ MaskedCaller* callers = masked_multipass_callers;\r
+ if (ptxVersionIsGreaterOrEqual(1, 1) && hasAtomicsSupport(getDevice()))\r
+ callers = masked_singlepass_callers;\r
+\r
+ MaskedCaller caller = callers[src.type()];\r
if (!caller) CV_Error(CV_StsBadArg, "minMax: unsupported type");\r
caller(src, mask, minVal, maxVal, buf);\r
}\r
\r
namespace cv { namespace gpu { namespace mathfunc { namespace minmaxloc {\r
\r
- void get_buf_size_required(int cols, int rows, int elem_size, int& b1cols, \r
+ void getBufSizeRequired(int cols, int rows, int elem_size, int& b1cols, \r
int& b1rows, int& b2cols, int& b2rows);\r
\r
template <typename T> \r
- void min_max_loc_caller(const DevMem2D src, double* minval, double* maxval, \r
+ void minMaxLocCaller(const DevMem2D src, double* minval, double* maxval, \r
int minloc[2], int maxloc[2], PtrStep valBuf, PtrStep locBuf);\r
\r
template <typename T> \r
- void min_max_loc_mask_caller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval, \r
+ void minMaxLocMaskCaller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval, \r
int minloc[2], int maxloc[2], PtrStep valBuf, PtrStep locBuf);\r
\r
template <typename T> \r
- void min_max_loc_multipass_caller(const DevMem2D src, double* minval, double* maxval, \r
+ void minMaxLocMultipassCaller(const DevMem2D src, double* minval, double* maxval, \r
int minloc[2], int maxloc[2], PtrStep valBuf, PtrStep locBuf);\r
\r
template <typename T> \r
- void min_max_loc_mask_multipass_caller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval, \r
+ void minMaxLocMaskMultipassCaller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval, \r
int minloc[2], int maxloc[2], PtrStep valBuf, PtrStep locBuf);\r
}}}}\r
\r
typedef void (*Caller)(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
typedef void (*MaskedCaller)(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);\r
\r
- static const Caller callers[2][7] = \r
- { { min_max_loc_multipass_caller<unsigned char>, min_max_loc_multipass_caller<char>, \r
- min_max_loc_multipass_caller<unsigned short>, min_max_loc_multipass_caller<short>, \r
- min_max_loc_multipass_caller<int>, min_max_loc_multipass_caller<float>, 0 },\r
- { min_max_loc_caller<unsigned char>, min_max_loc_caller<char>, \r
- min_max_loc_caller<unsigned short>, min_max_loc_caller<short>, \r
- min_max_loc_caller<int>, min_max_loc_caller<float>, min_max_loc_caller<double> } };\r
-\r
- static const MaskedCaller masked_callers[2][7] = \r
- { { min_max_loc_mask_multipass_caller<unsigned char>, min_max_loc_mask_multipass_caller<char>, \r
- min_max_loc_mask_multipass_caller<unsigned short>, min_max_loc_mask_multipass_caller<short>, \r
- min_max_loc_mask_multipass_caller<int>, min_max_loc_mask_multipass_caller<float>, 0 },\r
- { min_max_loc_mask_caller<unsigned char>, min_max_loc_mask_caller<char>, \r
- min_max_loc_mask_caller<unsigned short>, min_max_loc_mask_caller<short>, \r
- min_max_loc_mask_caller<int>, min_max_loc_mask_caller<float>, min_max_loc_mask_caller<double> } };\r
+ static Caller multipass_callers[7] = { \r
+ minMaxLocMultipassCaller<unsigned char>, minMaxLocMultipassCaller<char>, \r
+ minMaxLocMultipassCaller<unsigned short>, minMaxLocMultipassCaller<short>, \r
+ minMaxLocMultipassCaller<int>, minMaxLocMultipassCaller<float>, 0 };\r
+\r
+ static Caller singlepass_callers[7] = { \r
+ minMaxLocCaller<unsigned char>, minMaxLocCaller<char>, \r
+ minMaxLocCaller<unsigned short>, minMaxLocCaller<short>, \r
+ minMaxLocCaller<int>, minMaxLocCaller<float>, minMaxLocCaller<double> };\r
+\r
+ static MaskedCaller masked_multipass_callers[7] = { \r
+ minMaxLocMaskMultipassCaller<unsigned char>, minMaxLocMaskMultipassCaller<char>, \r
+ minMaxLocMaskMultipassCaller<unsigned short>, minMaxLocMaskMultipassCaller<short>, \r
+ minMaxLocMaskMultipassCaller<int>, minMaxLocMaskMultipassCaller<float>, 0 };\r
+\r
+ static MaskedCaller masked_singlepass_callers[7] = { \r
+ minMaxLocMaskCaller<unsigned char>, minMaxLocMaskCaller<char>, \r
+ minMaxLocMaskCaller<unsigned short>, minMaxLocMaskCaller<short>, \r
+ minMaxLocMaskCaller<int>, minMaxLocMaskCaller<float>, \r
+ minMaxLocMaskCaller<double> };\r
\r
CV_Assert(src.channels() == 1);\r
CV_Assert(mask.empty() || (mask.type() == CV_8U && src.size() == mask.size()));\r
int minLoc_[2];\r
int maxLoc_[2];\r
\r
- Size valBufSize, locBufSize;\r
- get_buf_size_required(src.cols, src.rows, src.elemSize(), valBufSize.width, \r
- valBufSize.height, locBufSize.width, locBufSize.height);\r
- ensureSizeIsEnough(valBufSize, CV_8U, valBuf);\r
- ensureSizeIsEnough(locBufSize, CV_8U, locBuf);\r
+ Size valbuf_size, locbuf_size;\r
+ getBufSizeRequired(src.cols, src.rows, src.elemSize(), valbuf_size.width, \r
+ valbuf_size.height, locbuf_size.width, locbuf_size.height);\r
+ ensureSizeIsEnough(valbuf_size, CV_8U, valBuf);\r
+ ensureSizeIsEnough(locbuf_size, CV_8U, locBuf);\r
\r
if (mask.empty())\r
{\r
- Caller caller = callers[hasAtomicsSupport(getDevice())][src.type()];\r
+ Caller* callers = multipass_callers;\r
+ if (ptxVersionIsGreaterOrEqual(1, 1) && hasAtomicsSupport(getDevice()))\r
+ callers = singlepass_callers;\r
+\r
+ Caller caller = callers[src.type()];\r
if (!caller) CV_Error(CV_StsBadArg, "minMaxLoc: unsupported type");\r
caller(src, minVal, maxVal, minLoc_, maxLoc_, valBuf, locBuf);\r
}\r
else\r
{\r
- MaskedCaller caller = masked_callers[hasAtomicsSupport(getDevice())][src.type()];\r
+ MaskedCaller* callers = masked_multipass_callers;\r
+ if (ptxVersionIsGreaterOrEqual(1, 1) && hasAtomicsSupport(getDevice()))\r
+ callers = masked_singlepass_callers;\r
+\r
+ MaskedCaller caller = callers[src.type()];\r
if (!caller) CV_Error(CV_StsBadArg, "minMaxLoc: unsupported type");\r
caller(src, mask, minVal, maxVal, minLoc_, maxLoc_, valBuf, locBuf);\r
}\r
\r
namespace cv { namespace gpu { namespace mathfunc { namespace countnonzero {\r
\r
- void get_buf_size_required(int cols, int rows, int& bufcols, int& bufrows);\r
+ void getBufSizeRequired(int cols, int rows, int& bufcols, int& bufrows);\r
\r
template <typename T> \r
- int count_non_zero_caller(const DevMem2D src, PtrStep buf);\r
+ int countNonZeroCaller(const DevMem2D src, PtrStep buf);\r
\r
template <typename T> \r
- int count_non_zero_multipass_caller(const DevMem2D src, PtrStep buf);\r
+ int countNonZeroMultipassCaller(const DevMem2D src, PtrStep buf);\r
\r
}}}}\r
\r
\r
typedef int (*Caller)(const DevMem2D src, PtrStep buf);\r
\r
- static const Caller callers[2][7] = \r
- { { count_non_zero_multipass_caller<unsigned char>, count_non_zero_multipass_caller<char>,\r
- count_non_zero_multipass_caller<unsigned short>, count_non_zero_multipass_caller<short>,\r
- count_non_zero_multipass_caller<int>, count_non_zero_multipass_caller<float>, 0},\r
- { count_non_zero_caller<unsigned char>, count_non_zero_caller<char>,\r
- count_non_zero_caller<unsigned short>, count_non_zero_caller<short>,\r
- count_non_zero_caller<int>, count_non_zero_caller<float>, count_non_zero_caller<double> } };\r
+ static Caller multipass_callers[7] = { \r
+ countNonZeroMultipassCaller<unsigned char>, countNonZeroMultipassCaller<char>,\r
+ countNonZeroMultipassCaller<unsigned short>, countNonZeroMultipassCaller<short>,\r
+ countNonZeroMultipassCaller<int>, countNonZeroMultipassCaller<float>, 0 };\r
+\r
+ static Caller singlepass_callers[7] = { \r
+ countNonZeroCaller<unsigned char>, countNonZeroCaller<char>,\r
+ countNonZeroCaller<unsigned short>, countNonZeroCaller<short>,\r
+ countNonZeroCaller<int>, countNonZeroCaller<float>, \r
+ countNonZeroCaller<double> };\r
\r
CV_Assert(src.channels() == 1);\r
CV_Assert(src.type() != CV_64F || hasNativeDoubleSupport(getDevice()));\r
\r
- Size bufSize;\r
- get_buf_size_required(src.cols, src.rows, bufSize.width, bufSize.height);\r
- ensureSizeIsEnough(bufSize, CV_8U, buf);\r
+ Size buf_size;\r
+ getBufSizeRequired(src.cols, src.rows, buf_size.width, buf_size.height);\r
+ ensureSizeIsEnough(buf_size, CV_8U, buf);\r
+\r
+ Caller* callers = multipass_callers;\r
+ if (ptxVersionIsGreaterOrEqual(1, 1) && hasAtomicsSupport(getDevice()))\r
+ callers = singlepass_callers;\r
\r
- Caller caller = callers[hasAtomicsSupport(getDevice())][src.type()];\r
+ Caller caller = callers[src.type()];\r
if (!caller) CV_Error(CV_StsBadArg, "countNonZero: unsupported type");\r
return caller(src, buf);\r
}\r