template <> struct MinMaxTypeTraits<float> { typedef float best_type; };\r
template <> struct MinMaxTypeTraits<double> { typedef double best_type; };\r
\r
- template <typename T, int op> struct Cmp {};\r
+ template <typename T, int op> struct Opt {};\r
\r
template <typename T>\r
- struct Cmp<T, MIN> \r
+ struct Opt<T, MIN> \r
{\r
static __device__ void call(unsigned int tid, unsigned int offset, volatile T* optval)\r
{\r
};\r
\r
template <typename T>\r
- struct Cmp<T, MAX> \r
+ struct Opt<T, MAX> \r
{\r
static __device__ void call(unsigned int tid, unsigned int offset, volatile T* optval)\r
{\r
\r
__syncthreads();\r
\r
- if (nthreads >= 512) if (tid < 256) { Cmp<best_type, op>::call(tid, 256, soptval); __syncthreads(); }\r
- if (nthreads >= 256) if (tid < 128) { Cmp<best_type, op>::call(tid, 128, soptval); __syncthreads(); }\r
- if (nthreads >= 128) if (tid < 64) { Cmp<best_type, op>::call(tid, 64, soptval); __syncthreads(); }\r
+ if (nthreads >= 512) if (tid < 256) { Opt<best_type, op>::call(tid, 256, soptval); __syncthreads(); }\r
+ if (nthreads >= 256) if (tid < 128) { Opt<best_type, op>::call(tid, 128, soptval); __syncthreads(); }\r
+ if (nthreads >= 128) if (tid < 64) { Opt<best_type, op>::call(tid, 64, soptval); __syncthreads(); }\r
\r
if (tid < 32)\r
{\r
- if (nthreads >= 64) Cmp<best_type, op>::call(tid, 32, soptval);\r
- if (nthreads >= 32) Cmp<best_type, op>::call(tid, 16, soptval);\r
- if (nthreads >= 16) Cmp<best_type, op>::call(tid, 8, soptval);\r
- if (nthreads >= 8) Cmp<best_type, op>::call(tid, 4, soptval);\r
- if (nthreads >= 4) Cmp<best_type, op>::call(tid, 2, soptval);\r
- if (nthreads >= 2) Cmp<best_type, op>::call(tid, 1, soptval);\r
+ if (nthreads >= 64) Opt<best_type, op>::call(tid, 32, soptval);\r
+ if (nthreads >= 32) Opt<best_type, op>::call(tid, 16, soptval);\r
+ if (nthreads >= 16) Opt<best_type, op>::call(tid, 8, soptval);\r
+ if (nthreads >= 8) Opt<best_type, op>::call(tid, 4, soptval);\r
+ if (nthreads >= 4) Opt<best_type, op>::call(tid, 2, soptval);\r
+ if (nthreads >= 2) Opt<best_type, op>::call(tid, 1, soptval);\r
}\r
\r
if (tid == 0) ((T*)optval.ptr(blockIdx.y))[blockIdx.x] = (T)soptval[0];\r
}\r
-\r
\r
template <typename T>\r
void min_max_caller(const DevMem2D src, double* minval, double* maxval)\r
dim3 threads(32, 8);\r
\r
// Allocate memory for aux. buffers\r
- DevMem2D minval_buf[2]; DevMem2D maxval_buf[2];\r
+ DevMem2D minval_buf[2]; \r
minval_buf[0].cols = divUp(src.cols, threads.x); \r
minval_buf[0].rows = divUp(src.rows, threads.y);\r
minval_buf[1].cols = divUp(minval_buf[0].cols, threads.x); \r
minval_buf[1].rows = divUp(minval_buf[0].rows, threads.y);\r
+ cudaSafeCall(cudaMallocPitch(&minval_buf[0].data, &minval_buf[0].step, minval_buf[0].cols * sizeof(T), minval_buf[0].rows));\r
+ cudaSafeCall(cudaMallocPitch(&minval_buf[1].data, &minval_buf[1].step, minval_buf[1].cols * sizeof(T), minval_buf[1].rows));\r
+\r
+ DevMem2D maxval_buf[2]; \r
maxval_buf[0].cols = divUp(src.cols, threads.x); \r
maxval_buf[0].rows = divUp(src.rows, threads.y);\r
maxval_buf[1].cols = divUp(maxval_buf[0].cols, threads.x); \r
maxval_buf[1].rows = divUp(maxval_buf[0].rows, threads.y);\r
- cudaSafeCall(cudaMallocPitch(&minval_buf[0].data, &minval_buf[0].step, minval_buf[0].cols * sizeof(T), minval_buf[0].rows));\r
- cudaSafeCall(cudaMallocPitch(&minval_buf[1].data, &minval_buf[1].step, minval_buf[1].cols * sizeof(T), minval_buf[1].rows));\r
cudaSafeCall(cudaMallocPitch(&maxval_buf[0].data, &maxval_buf[0].step, maxval_buf[0].cols * sizeof(T), maxval_buf[0].rows));\r
cudaSafeCall(cudaMallocPitch(&maxval_buf[1].data, &maxval_buf[1].step, maxval_buf[1].cols * sizeof(T), maxval_buf[1].rows));\r
\r
template void min_max_caller<float>(const DevMem2D, double*, double*);\r
template void min_max_caller<double>(const DevMem2D, double*, double*);\r
\r
+ template <typename T, int op> struct OptLoc {};\r
+ \r
+ template <typename T>\r
+ struct OptLoc<T, MIN> \r
+ {\r
+ static __device__ void call(unsigned int tid, unsigned int offset, volatile T* optval, volatile unsigned int* optloc)\r
+ {\r
+ T val = optval[tid + offset];\r
+ if (val < optval[tid])\r
+ {\r
+ optval[tid] = val;\r
+ optloc[tid] = optloc[tid + offset];\r
+ }\r
+ }\r
+ };\r
+\r
+ template <typename T>\r
+ struct OptLoc<T, MAX> \r
+ {\r
+ static __device__ void call(unsigned int tid, unsigned int offset, volatile T* optval, volatile unsigned int* optloc)\r
+ {\r
+ T val = optval[tid + offset];\r
+ if (val > optval[tid])\r
+ {\r
+ optval[tid] = val;\r
+ optloc[tid] = optloc[tid + offset];\r
+ }\r
+ }\r
+ };\r
+\r
+ template <int nthreads, int op, typename T>\r
+ __global__ void opt_loc_init_kernel(int cols, int rows, const PtrStep src, PtrStep optval, PtrStep optloc)\r
+ {\r
+ typedef typename MinMaxTypeTraits<T>::best_type best_type;\r
+ __shared__ best_type soptval[nthreads];\r
+ __shared__ unsigned int soptloc[nthreads];\r
+\r
+ unsigned int x0 = blockIdx.x * blockDim.x;\r
+ unsigned int y0 = blockIdx.y * blockDim.y;\r
+ unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x;\r
+\r
+ if (x0 + threadIdx.x < cols && y0 + threadIdx.y < rows)\r
+ {\r
+ soptval[tid] = ((const T*)src.ptr(y0 + threadIdx.y))[x0 + threadIdx.x];\r
+ soptloc[tid] = (y0 + threadIdx.y) * cols + x0 + threadIdx.x;\r
+ }\r
+ else\r
+ {\r
+ soptval[tid] = ((const T*)src.ptr(y0))[x0];\r
+ soptloc[tid] = y0 * cols + x0;\r
+ }\r
+\r
+ __syncthreads();\r
+\r
+ if (nthreads >= 512) if (tid < 256) { OptLoc<best_type, op>::call(tid, 256, soptval, soptloc); __syncthreads(); }\r
+ if (nthreads >= 256) if (tid < 128) { OptLoc<best_type, op>::call(tid, 128, soptval, soptloc); __syncthreads(); }\r
+ if (nthreads >= 128) if (tid < 64) { OptLoc<best_type, op>::call(tid, 64, soptval, soptloc); __syncthreads(); }\r
+\r
+ if (tid < 32)\r
+ {\r
+ if (nthreads >= 64) OptLoc<best_type, op>::call(tid, 32, soptval, soptloc);\r
+ if (nthreads >= 32) OptLoc<best_type, op>::call(tid, 16, soptval, soptloc);\r
+ if (nthreads >= 16) OptLoc<best_type, op>::call(tid, 8, soptval, soptloc);\r
+ if (nthreads >= 8) OptLoc<best_type, op>::call(tid, 4, soptval, soptloc);\r
+ if (nthreads >= 4) OptLoc<best_type, op>::call(tid, 2, soptval, soptloc);\r
+ if (nthreads >= 2) OptLoc<best_type, op>::call(tid, 1, soptval, soptloc);\r
+ }\r
+\r
+ if (tid == 0) \r
+ {\r
+ ((T*)optval.ptr(blockIdx.y))[blockIdx.x] = (T)soptval[0];\r
+ ((unsigned int*)optloc.ptr(blockIdx.y))[blockIdx.x] = soptloc[0];\r
+ }\r
+ }\r
+\r
+ template <int nthreads, int op, typename T>\r
+ __global__ void opt_loc_kernel(int cols, int rows, const PtrStep src, const PtrStep loc, PtrStep optval, PtrStep optloc)\r
+ {\r
+ typedef typename MinMaxTypeTraits<T>::best_type best_type;\r
+ __shared__ best_type soptval[nthreads];\r
+ __shared__ unsigned int soptloc[nthreads];\r
+\r
+ unsigned int x0 = blockIdx.x * blockDim.x;\r
+ unsigned int y0 = blockIdx.y * blockDim.y;\r
+ unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x;\r
+\r
+ if (x0 + threadIdx.x < cols && y0 + threadIdx.y < rows)\r
+ {\r
+ soptval[tid] = ((const T*)src.ptr(y0 + threadIdx.y))[x0 + threadIdx.x];\r
+ soptloc[tid] = ((const unsigned int*)loc.ptr(y0 + threadIdx.y))[x0 + threadIdx.x];\r
+ }\r
+ else\r
+ {\r
+ soptval[tid] = ((const T*)src.ptr(y0))[x0];\r
+ soptloc[tid] = ((const unsigned int*)loc.ptr(y0))[x0];\r
+ }\r
+\r
+ __syncthreads();\r
+\r
+ if (nthreads >= 512) if (tid < 256) { OptLoc<best_type, op>::call(tid, 256, soptval, soptloc); __syncthreads(); }\r
+ if (nthreads >= 256) if (tid < 128) { OptLoc<best_type, op>::call(tid, 128, soptval, soptloc); __syncthreads(); }\r
+ if (nthreads >= 128) if (tid < 64) { OptLoc<best_type, op>::call(tid, 64, soptval, soptloc); __syncthreads(); }\r
+\r
+ if (tid < 32)\r
+ {\r
+ if (nthreads >= 64) OptLoc<best_type, op>::call(tid, 32, soptval, soptloc);\r
+ if (nthreads >= 32) OptLoc<best_type, op>::call(tid, 16, soptval, soptloc);\r
+ if (nthreads >= 16) OptLoc<best_type, op>::call(tid, 8, soptval, soptloc);\r
+ if (nthreads >= 8) OptLoc<best_type, op>::call(tid, 4, soptval, soptloc);\r
+ if (nthreads >= 4) OptLoc<best_type, op>::call(tid, 2, soptval, soptloc);\r
+ if (nthreads >= 2) OptLoc<best_type, op>::call(tid, 1, soptval, soptloc);\r
+ }\r
+\r
+ if (tid == 0) \r
+ {\r
+ ((T*)optval.ptr(blockIdx.y))[blockIdx.x] = (T)soptval[0];\r
+ ((unsigned int*)optloc.ptr(blockIdx.y))[blockIdx.x] = soptloc[0];\r
+ }\r
+ }\r
+\r
+ template <typename T>\r
+ void min_max_loc_caller(const DevMem2D src, double* minval, double* maxval, int* minlocx, int* minlocy, \r
+ int* maxlocx, int* maxlocy)\r
+ {\r
+ dim3 threads(32, 8);\r
+\r
+ // Allocate memory for aux. buffers\r
+\r
+ DevMem2D minval_buf[2]; \r
+ minval_buf[0].cols = divUp(src.cols, threads.x); \r
+ minval_buf[0].rows = divUp(src.rows, threads.y);\r
+ minval_buf[1].cols = divUp(minval_buf[0].cols, threads.x); \r
+ minval_buf[1].rows = divUp(minval_buf[0].rows, threads.y);\r
+ cudaSafeCall(cudaMallocPitch(&minval_buf[0].data, &minval_buf[0].step, minval_buf[0].cols * sizeof(T), minval_buf[0].rows));\r
+ cudaSafeCall(cudaMallocPitch(&minval_buf[1].data, &minval_buf[1].step, minval_buf[1].cols * sizeof(T), minval_buf[1].rows));\r
+\r
+ DevMem2D maxval_buf[2]; \r
+ maxval_buf[0].cols = divUp(src.cols, threads.x); \r
+ maxval_buf[0].rows = divUp(src.rows, threads.y);\r
+ maxval_buf[1].cols = divUp(maxval_buf[0].cols, threads.x); \r
+ maxval_buf[1].rows = divUp(maxval_buf[0].rows, threads.y);\r
+ cudaSafeCall(cudaMallocPitch(&maxval_buf[0].data, &maxval_buf[0].step, maxval_buf[0].cols * sizeof(T), maxval_buf[0].rows));\r
+ cudaSafeCall(cudaMallocPitch(&maxval_buf[1].data, &maxval_buf[1].step, maxval_buf[1].cols * sizeof(T), maxval_buf[1].rows));\r
+\r
+ DevMem2D minloc_buf[2]; \r
+ minloc_buf[0].cols = divUp(src.cols, threads.x); \r
+ minloc_buf[0].rows = divUp(src.rows, threads.y);\r
+ minloc_buf[1].cols = divUp(minloc_buf[0].cols, threads.x); \r
+ minloc_buf[1].rows = divUp(minloc_buf[0].rows, threads.y);\r
+ cudaSafeCall(cudaMallocPitch(&minloc_buf[0].data, &minloc_buf[0].step, minloc_buf[0].cols * sizeof(int), minloc_buf[0].rows));\r
+ cudaSafeCall(cudaMallocPitch(&minloc_buf[1].data, &minloc_buf[1].step, minloc_buf[1].cols * sizeof(int), minloc_buf[1].rows));\r
+\r
+ DevMem2D maxloc_buf[2]; \r
+ maxloc_buf[0].cols = divUp(src.cols, threads.x); \r
+ maxloc_buf[0].rows = divUp(src.rows, threads.y);\r
+ maxloc_buf[1].cols = divUp(maxloc_buf[0].cols, threads.x); \r
+ maxloc_buf[1].rows = divUp(maxloc_buf[0].rows, threads.y);\r
+ cudaSafeCall(cudaMallocPitch(&maxloc_buf[0].data, &maxloc_buf[0].step, maxloc_buf[0].cols * sizeof(int), maxloc_buf[0].rows));\r
+ cudaSafeCall(cudaMallocPitch(&maxloc_buf[1].data, &maxloc_buf[1].step, maxloc_buf[1].cols * sizeof(int), maxloc_buf[1].rows));\r
+\r
+ int curbuf = 0;\r
+ dim3 cursize(src.cols, src.rows);\r
+ dim3 grid(divUp(cursize.x, threads.x), divUp(cursize.y, threads.y));\r
+\r
+ opt_loc_init_kernel<256, MIN, T><<<grid, threads>>>(cursize.x, cursize.y, src, minval_buf[curbuf], minloc_buf[curbuf]);\r
+ opt_loc_init_kernel<256, MAX, T><<<grid, threads>>>(cursize.x, cursize.y, src, maxval_buf[curbuf], maxloc_buf[curbuf]);\r
+ cursize = grid;\r
+ \r
+ while (cursize.x > 1 || cursize.y > 1)\r
+ {\r
+ grid.x = divUp(cursize.x, threads.x); \r
+ grid.y = divUp(cursize.y, threads.y); \r
+ opt_loc_kernel<256, MIN, T><<<grid, threads>>>(cursize.x, cursize.y, minval_buf[curbuf], minloc_buf[curbuf], \r
+ minval_buf[1 - curbuf], minloc_buf[1 - curbuf]);\r
+ opt_loc_kernel<256, MAX, T><<<grid, threads>>>(cursize.x, cursize.y, maxval_buf[curbuf], maxloc_buf[curbuf], \r
+ maxval_buf[1 - curbuf], maxloc_buf[1 - curbuf]);\r
+ curbuf = 1 - curbuf;\r
+ cursize = grid;\r
+ }\r
+\r
+ cudaSafeCall(cudaThreadSynchronize());\r
+\r
+ // Copy results from device to host\r
+\r
+ T minval_, maxval_;\r
+ cudaSafeCall(cudaMemcpy(&minval_, minval_buf[curbuf].ptr(0), sizeof(T), cudaMemcpyDeviceToHost));\r
+ cudaSafeCall(cudaMemcpy(&maxval_, maxval_buf[curbuf].ptr(0), sizeof(T), cudaMemcpyDeviceToHost));\r
+ *minval = minval_;\r
+ *maxval = maxval_;\r
+\r
+ unsigned int minloc, maxloc;\r
+ cudaSafeCall(cudaMemcpy(&minloc, minloc_buf[curbuf].ptr(0), sizeof(int), cudaMemcpyDeviceToHost));\r
+ cudaSafeCall(cudaMemcpy(&maxloc, maxloc_buf[curbuf].ptr(0), sizeof(int), cudaMemcpyDeviceToHost));\r
+ *minlocy = minloc / src.cols; *minlocx = minloc - *minlocy * src.cols;\r
+ *maxlocy = maxloc / src.cols; *maxlocx = maxloc - *maxlocy * src.cols;\r
+\r
+ // Release aux. buffers\r
+ cudaSafeCall(cudaFree(minval_buf[0].data));\r
+ cudaSafeCall(cudaFree(minval_buf[1].data));\r
+ cudaSafeCall(cudaFree(maxval_buf[0].data));\r
+ cudaSafeCall(cudaFree(maxval_buf[1].data));\r
+ cudaSafeCall(cudaFree(minloc_buf[0].data));\r
+ cudaSafeCall(cudaFree(minloc_buf[1].data));\r
+ cudaSafeCall(cudaFree(maxloc_buf[0].data));\r
+ cudaSafeCall(cudaFree(maxloc_buf[1].data));\r
+ }\r
+\r
+ template void min_max_loc_caller<unsigned char>(const DevMem2D, double*, double*, int*, int*, int*, int*);\r
+ template void min_max_loc_caller<signed char>(const DevMem2D, double*, double*, int*, int*, int*, int*);\r
+ template void min_max_loc_caller<unsigned short>(const DevMem2D, double*, double*, int*, int*, int*, int*);\r
+ template void min_max_loc_caller<signed short>(const DevMem2D, double*, double*, int*, int*, int*, int*);\r
+ template void min_max_loc_caller<int>(const DevMem2D, double*, double*, int*, int*, int*, int*);\r
+ template void min_max_loc_caller<float>(const DevMem2D, double*, double*, int*, int*, int*, int*);\r
+ template void min_max_loc_caller<double>(const DevMem2D, double*, double*, int*, int*, int*, int*);\r
+\r
}}}\r