////////////////////////////////////////////////////////////////////////\r
// minMax\r
\r
-namespace cv { namespace gpu { namespace mathfunc {\r
+namespace cv { namespace gpu { namespace mathfunc { namespace minmax {\r
+\r
+ void get_buf_size_required(int elem_size, int& b1cols, int& b1rows, \r
+ int& b2cols, int& b2rows);\r
+ \r
template <typename T> \r
- void min_max_caller(const DevMem2D src, double* minval, double* maxval);\r
-}}}\r
+ void min_max_caller(const DevMem2D src, double* minval, double* maxval, \r
+ unsigned char* minval_buf, unsigned char* maxval_buf);\r
+\r
+ template <typename T> \r
+ void min_max_caller_2steps(const DevMem2D src, double* minval, double* maxval, \r
+ unsigned char* minval_buf, unsigned char* maxval_buf);\r
+\r
+}}}}\r
\r
void cv::gpu::minMax(const GpuMat& src, double* minVal, double* maxVal)\r
{\r
- GpuMat src_ = src.reshape(1);\r
+ using namespace mathfunc::minmax;\r
\r
double maxVal_;\r
- if (!maxVal) \r
- maxVal = &maxVal_;\r
+ if (!maxVal) maxVal = &maxVal_;\r
+\r
+ GpuMat src_ = src.reshape(1);\r
+\r
+ // Allocate GPU buffers\r
+ Size b1size, b2size;\r
+ get_buf_size_required(src.elemSize(), b1size.width, b1size.height, b2size.width, b2size.height);\r
+ GpuMat b1(b1size, CV_8U), b2(b2size, CV_8U);\r
+\r
+ int major, minor;\r
+ getComputeCapability(getDevice(), major, minor);\r
\r
- switch (src_.type())\r
+ if (major >= 1 && minor >= 1)\r
{\r
- case CV_8U:\r
- mathfunc::min_max_caller<unsigned char>(src_, minVal, maxVal);\r
- break;\r
- case CV_8S:\r
- mathfunc::min_max_caller<signed char>(src_, minVal, maxVal);\r
- break;\r
- case CV_16U:\r
- mathfunc::min_max_caller<unsigned short>(src_, minVal, maxVal);\r
- break;\r
- case CV_16S:\r
- mathfunc::min_max_caller<signed short>(src_, minVal, maxVal);\r
- break;\r
- case CV_32S:\r
- mathfunc::min_max_caller<int>(src_, minVal, maxVal);\r
- break;\r
- case CV_32F:\r
- mathfunc::min_max_caller<float>(src_, minVal, maxVal);\r
- break;\r
- case CV_64F:\r
- mathfunc::min_max_caller<double>(src_, minVal, maxVal);\r
- break;\r
- default:\r
- CV_Error(CV_StsBadArg, "Unsupported type");\r
+ switch (src_.type())\r
+ {\r
+ case CV_8U: min_max_caller<unsigned char>(src_, minVal, maxVal, b1.data, b2.data); break;\r
+ case CV_8S: min_max_caller<signed char>(src_, minVal, maxVal, b1.data, b2.data); break;\r
+ case CV_16U: min_max_caller<unsigned short>(src_, minVal, maxVal, b1.data, b2.data); break;\r
+ case CV_16S: min_max_caller<signed short>(src_, minVal, maxVal, b1.data, b2.data); break;\r
+ case CV_32S: min_max_caller<int>(src_, minVal, maxVal, b1.data, b2.data); break;\r
+ case CV_32F: min_max_caller<float>(src_, minVal, maxVal, b1.data, b2.data); break;\r
+ case CV_64F: min_max_caller<double>(src_, minVal, maxVal, b1.data, b2.data); break;\r
+ default: CV_Error(CV_StsBadArg, "Unsupported type");\r
+ }\r
+ }\r
+ else\r
+ {\r
+ switch (src_.type())\r
+ {\r
+ case CV_8U: min_max_caller_2steps<unsigned char>(src_, minVal, maxVal, b1.data, b2.data); break;\r
+ case CV_8S: min_max_caller_2steps<signed char>(src_, minVal, maxVal, b1.data, b2.data); break;\r
+ case CV_16U: min_max_caller_2steps<unsigned short>(src_, minVal, maxVal, b1.data, b2.data); break;\r
+ case CV_16S: min_max_caller_2steps<signed short>(src_, minVal, maxVal, b1.data, b2.data); break;\r
+ case CV_32S: min_max_caller_2steps<int>(src_, minVal, maxVal, b1.data, b2.data); break;\r
+ case CV_32F: min_max_caller_2steps<float>(src_, minVal, maxVal, b1.data, b2.data); break;\r
+ default: CV_Error(CV_StsBadArg, "Unsupported type");\r
+ }\r
}\r
}\r
\r
////////////////////////////////////////////////////////////////////////\r
// minMaxLoc\r
\r
-namespace cv { namespace gpu { namespace mathfunc {\r
+namespace cv { namespace gpu { namespace mathfunc { namespace minmaxloc {\r
+\r
template <typename T> \r
- void min_max_loc_caller(const DevMem2D src, double* minval, double* maxval, int* minlocx, int* minlocy,\r
- int* maxlocx, int* maxlocy);\r
-}}}\r
+ void min_max_loc_caller(const DevMem2D src, double* minval, double* maxval, \r
+ int* minlocx, int* minlocy, int* maxlocx, int* maxlocy);\r
+\r
+}}}}\r
\r
void cv::gpu::minMaxLoc(const GpuMat& src, double* minVal, double* maxVal, Point* minLoc, Point* maxLoc)\r
{\r
+ using namespace mathfunc::minmaxloc;\r
+\r
CV_Assert(src.channels() == 1);\r
\r
double maxVal_;\r
switch (src.type())\r
{\r
case CV_8U:\r
- mathfunc::min_max_loc_caller<unsigned char>(src, minVal, maxVal, &minLoc->x, &minLoc->y, &maxLoc->x, &maxLoc->y);\r
+ min_max_loc_caller<unsigned char>(src, minVal, maxVal, &minLoc->x, &minLoc->y, &maxLoc->x, &maxLoc->y);\r
break;\r
case CV_8S:\r
- mathfunc::min_max_loc_caller<signed char>(src, minVal, maxVal, &minLoc->x, &minLoc->y, &maxLoc->x, &maxLoc->y);\r
+ min_max_loc_caller<signed char>(src, minVal, maxVal, &minLoc->x, &minLoc->y, &maxLoc->x, &maxLoc->y);\r
break;\r
case CV_16U:\r
- mathfunc::min_max_loc_caller<unsigned short>(src, minVal, maxVal, &minLoc->x, &minLoc->y, &maxLoc->x, &maxLoc->y);\r
+ min_max_loc_caller<unsigned short>(src, minVal, maxVal, &minLoc->x, &minLoc->y, &maxLoc->x, &maxLoc->y);\r
break;\r
case CV_16S:\r
- mathfunc::min_max_loc_caller<signed short>(src, minVal, maxVal, &minLoc->x, &minLoc->y, &maxLoc->x, &maxLoc->y);\r
+ min_max_loc_caller<signed short>(src, minVal, maxVal, &minLoc->x, &minLoc->y, &maxLoc->x, &maxLoc->y);\r
break;\r
case CV_32S:\r
- mathfunc::min_max_loc_caller<int>(src, minVal, maxVal, &minLoc->x, &minLoc->y, &maxLoc->x, &maxLoc->y);\r
+ min_max_loc_caller<int>(src, minVal, maxVal, &minLoc->x, &minLoc->y, &maxLoc->x, &maxLoc->y);\r
break;\r
case CV_32F:\r
- mathfunc::min_max_loc_caller<float>(src, minVal, maxVal, &minLoc->x, &minLoc->y, &maxLoc->x, &maxLoc->y);\r
+ min_max_loc_caller<float>(src, minVal, maxVal, &minLoc->x, &minLoc->y, &maxLoc->x, &maxLoc->y);\r
break;\r
case CV_64F:\r
- mathfunc::min_max_loc_caller<double>(src, minVal, maxVal, &minLoc->x, &minLoc->y, &maxLoc->x, &maxLoc->y);\r
+ min_max_loc_caller<double>(src, minVal, maxVal, &minLoc->x, &minLoc->y, &maxLoc->x, &maxLoc->y);\r
break;\r
default:\r
CV_Error(CV_StsBadArg, "Unsupported type");\r
\r
#include "cuda_shared.hpp"\r
#include "transform.hpp"\r
+#include "limits_gpu.hpp"\r
\r
using namespace cv::gpu;\r
+using namespace cv::gpu::device;\r
\r
#ifndef CV_PI\r
#define CV_PI 3.1415926535897932384626433832795f\r
//////////////////////////////////////////////////////////////////////////////\r
// Min max\r
\r
- enum { MIN, MAX }; \r
-\r
+ // To avoid shared banck confilict we convert reach value into value of \r
+ // appropriate type (32 bits minimum)\r
template <typename T> struct MinMaxTypeTraits {};\r
template <> struct MinMaxTypeTraits<unsigned char> { typedef int best_type; };\r
template <> struct MinMaxTypeTraits<signed char> { typedef int best_type; };\r
template <> struct MinMaxTypeTraits<float> { typedef float best_type; };\r
template <> struct MinMaxTypeTraits<double> { typedef double best_type; };\r
\r
- template <typename T, int op> struct Opt {};\r
- \r
- template <typename T>\r
- struct Opt<T, MIN> \r
+ // Available optimization operations\r
+ enum { OP_MIN, OP_MAX };\r
+\r
+ namespace minmax \r
{\r
- static __device__ void call(unsigned int tid, unsigned int offset, volatile T* optval)\r
- {\r
- optval[tid] = min(optval[tid], optval[tid + offset]); \r
- }\r
- };\r
\r
+ __constant__ int ctwidth;\r
+ __constant__ int ctheight;\r
+\r
+ static const unsigned int czero = 0;\r
+\r
+ // Estimates good thread configuration\r
+ // - threads variable satisfies to threads.x * threads.y == 256\r
+ void estimate_thread_cfg(dim3& threads, dim3& grid)\r
+ {\r
+ threads = dim3(64, 4);\r
+ grid = dim3(6, 5);\r
+ }\r
+\r
+ // Returns required buffer sizes\r
+ void get_buf_size_required(int elem_size, int& b1cols, int& b1rows, int& b2cols, int& b2rows)\r
+ {\r
+ dim3 threads, grid;\r
+ estimate_thread_cfg(threads, grid);\r
+ b1cols = grid.x * grid.y * elem_size; b1rows = 1;\r
+ b2cols = grid.x * grid.y * elem_size; b2rows = 1;\r
+ }\r
+\r
+ // Estimates device constants which are used in the kernels using specified thread configuration\r
+ void estimate_kernel_consts(int cols, int rows, const dim3& threads, const dim3& grid)\r
+ { \r
+ int twidth = divUp(divUp(cols, grid.x), threads.x);\r
+ int theight = divUp(divUp(rows, grid.y), threads.y);\r
+ cudaSafeCall(cudaMemcpyToSymbol(ctwidth, &twidth, sizeof(ctwidth))); \r
+ cudaSafeCall(cudaMemcpyToSymbol(ctheight, &theight, sizeof(ctheight))); \r
+ } \r
+\r
+ // Does min and max in shared memory\r
template <typename T>\r
- struct Opt<T, MAX> \r
+ __device__ void merge(unsigned int tid, unsigned int offset, volatile T* minval, volatile T* maxval)\r
{\r
- static __device__ void call(unsigned int tid, unsigned int offset, volatile T* optval)\r
- {\r
- optval[tid] = max(optval[tid], optval[tid + offset]);\r
- }\r
- };\r
+ minval[tid] = min(minval[tid], minval[tid + offset]);\r
+ maxval[tid] = max(maxval[tid], maxval[tid + offset]);\r
+ }\r
\r
+ // Global counter of blocks finished its work\r
+ __device__ unsigned int blocks_finished;\r
\r
- template <int nthreads, int op, typename T>\r
- __global__ void opt_kernel(int cols, int rows, const PtrStep src, PtrStep optval)\r
+ template <int nthreads, typename T>\r
+ __global__ void min_max_kernel(int cols, int rows, const PtrStep src, T* minval, T* maxval)\r
{\r
typedef typename MinMaxTypeTraits<T>::best_type best_type;\r
- __shared__ best_type soptval[nthreads];\r
+ __shared__ best_type sminval[nthreads];\r
+ __shared__ best_type smaxval[nthreads];\r
\r
- unsigned int x0 = blockIdx.x * blockDim.x;\r
- unsigned int y0 = blockIdx.y * blockDim.y;\r
+ unsigned int x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;\r
+ unsigned int y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;\r
unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x;\r
\r
- if (x0 + threadIdx.x < cols && y0 + threadIdx.y < rows)\r
- soptval[tid] = ((const T*)src.ptr(y0 + threadIdx.y))[x0 + threadIdx.x];\r
- else\r
- soptval[tid] = ((const T*)src.ptr(y0))[x0];\r
+ T val;\r
+ T mymin = numeric_limits_gpu<T>::max();\r
+ T mymax = numeric_limits_gpu<T>::min();\r
+ for (unsigned int y = 0; y < ctheight && y0 + y * blockDim.y < rows; ++y)\r
+ {\r
+ const T* ptr = (const T*)src.ptr(y0 + y * blockDim.y);\r
+ for (unsigned int x = 0; x < ctwidth && x0 + x * blockDim.x < cols; ++x)\r
+ {\r
+ val = ptr[x0 + x * blockDim.x];\r
+ mymin = min(mymin, val);\r
+ mymax = max(mymax, val);\r
+ }\r
+ }\r
+\r
+ sminval[tid] = mymin;\r
+ smaxval[tid] = mymax;\r
\r
__syncthreads();\r
\r
- if (nthreads >= 512) if (tid < 256) { Opt<best_type, op>::call(tid, 256, soptval); __syncthreads(); }\r
- if (nthreads >= 256) if (tid < 128) { Opt<best_type, op>::call(tid, 128, soptval); __syncthreads(); }\r
- if (nthreads >= 128) if (tid < 64) { Opt<best_type, op>::call(tid, 64, soptval); __syncthreads(); }\r
+ if (nthreads >= 512) if (tid < 256) { merge(tid, 256, sminval, smaxval); __syncthreads(); }\r
+ if (nthreads >= 256) if (tid < 128) { merge(tid, 128, sminval, smaxval); __syncthreads(); }\r
+ if (nthreads >= 128) if (tid < 64) { merge(tid, 64, sminval, smaxval); __syncthreads(); }\r
\r
if (tid < 32)\r
{\r
- if (nthreads >= 64) Opt<best_type, op>::call(tid, 32, soptval);\r
- if (nthreads >= 32) Opt<best_type, op>::call(tid, 16, soptval);\r
- if (nthreads >= 16) Opt<best_type, op>::call(tid, 8, soptval);\r
- if (nthreads >= 8) Opt<best_type, op>::call(tid, 4, soptval);\r
- if (nthreads >= 4) Opt<best_type, op>::call(tid, 2, soptval);\r
- if (nthreads >= 2) Opt<best_type, op>::call(tid, 1, soptval);\r
+ if (nthreads >= 64) merge(tid, 32, sminval, smaxval);\r
+ if (nthreads >= 32) merge(tid, 16, sminval, smaxval);\r
+ if (nthreads >= 16) merge(tid, 8, sminval, smaxval);\r
+ if (nthreads >= 8) merge(tid, 4, sminval, smaxval);\r
+ if (nthreads >= 4) merge(tid, 2, sminval, smaxval);\r
+ if (nthreads >= 2) merge(tid, 1, sminval, smaxval);\r
}\r
\r
- if (tid == 0) ((T*)optval.ptr(blockIdx.y))[blockIdx.x] = (T)soptval[0];\r
+ __syncthreads();\r
+\r
+ if (tid == 0) \r
+ {\r
+ minval[blockIdx.y * gridDim.x + blockIdx.x] = (T)sminval[0];\r
+ maxval[blockIdx.y * gridDim.x + blockIdx.x] = (T)smaxval[0];\r
+ }\r
+\r
+#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 110\r
+ \r
+ // Process partial results in the first thread of the last block \r
+ if ((gridDim.x > 1 || gridDim.y > 1) && tid == 0)\r
+ {\r
+ __threadfence();\r
+ if (atomicInc(&blocks_finished, gridDim.x * gridDim.y) == gridDim.x * gridDim.y - 1)\r
+ {\r
+ mymin = numeric_limits_gpu<T>::max();\r
+ mymax = numeric_limits_gpu<T>::min();\r
+ for (unsigned int i = 0; i < gridDim.x * gridDim.y; ++i)\r
+ { \r
+ mymin = min(mymin, minval[i]);\r
+ mymax = max(mymax, maxval[i]);\r
+ }\r
+ minval[0] = mymin;\r
+ maxval[0] = mymax;\r
+ }\r
+ }\r
+\r
+#endif\r
+ }\r
+\r
+ // This kernel will be used only when compute capability is 1.0\r
+ template <typename T>\r
+ __global__ void min_max_kernel_2ndstep(T* minval, T* maxval, int size)\r
+ {\r
+ T val;\r
+ T mymin = numeric_limits_gpu<T>::max();\r
+ T mymax = numeric_limits_gpu<T>::min();\r
+ for (unsigned int i = 0; i < size; ++i)\r
+ { \r
+ val = minval[i]; if (val < mymin) mymin = val;\r
+ val = maxval[i]; if (val > mymax) mymax = val;\r
+ }\r
+ minval[0] = mymin;\r
+ maxval[0] = mymax;\r
}\r
\r
template <typename T>\r
- void min_max_caller(const DevMem2D src, double* minval, double* maxval)\r
+ void min_max_caller(const DevMem2D src, double* minval, double* maxval, \r
+ unsigned char* minval_buf, unsigned char* maxval_buf)\r
{\r
- dim3 threads(32, 8);\r
+ dim3 threads, grid;\r
+ estimate_thread_cfg(threads, grid);\r
+ estimate_kernel_consts(src.cols, src.rows, threads, grid);\r
\r
- // Allocate memory for aux. buffers\r
- DevMem2D minval_buf[2]; \r
- minval_buf[0].cols = divUp(src.cols, threads.x); \r
- minval_buf[0].rows = divUp(src.rows, threads.y);\r
- minval_buf[1].cols = divUp(minval_buf[0].cols, threads.x); \r
- minval_buf[1].rows = divUp(minval_buf[0].rows, threads.y);\r
- cudaSafeCall(cudaMallocPitch(&minval_buf[0].data, &minval_buf[0].step, minval_buf[0].cols * sizeof(T), minval_buf[0].rows));\r
- cudaSafeCall(cudaMallocPitch(&minval_buf[1].data, &minval_buf[1].step, minval_buf[1].cols * sizeof(T), minval_buf[1].rows));\r
+ cudaSafeCall(cudaMemcpyToSymbol(blocks_finished, &czero, sizeof(blocks_finished)));\r
+ min_max_kernel<256, T><<<grid, threads>>>(src.cols, src.rows, src, (T*)minval_buf, (T*)maxval_buf);\r
\r
- DevMem2D maxval_buf[2]; \r
- maxval_buf[0].cols = divUp(src.cols, threads.x); \r
- maxval_buf[0].rows = divUp(src.rows, threads.y);\r
- maxval_buf[1].cols = divUp(maxval_buf[0].cols, threads.x); \r
- maxval_buf[1].rows = divUp(maxval_buf[0].rows, threads.y);\r
- cudaSafeCall(cudaMallocPitch(&maxval_buf[0].data, &maxval_buf[0].step, maxval_buf[0].cols * sizeof(T), maxval_buf[0].rows));\r
- cudaSafeCall(cudaMallocPitch(&maxval_buf[1].data, &maxval_buf[1].step, maxval_buf[1].cols * sizeof(T), maxval_buf[1].rows));\r
-\r
- int curbuf = 0;\r
- dim3 cursize(src.cols, src.rows);\r
- dim3 grid(divUp(cursize.x, threads.x), divUp(cursize.y, threads.y));\r
+ cudaSafeCall(cudaThreadSynchronize());\r
\r
- opt_kernel<256, MIN, T><<<grid, threads>>>(cursize.x, cursize.y, src, minval_buf[curbuf]);\r
- opt_kernel<256, MAX, T><<<grid, threads>>>(cursize.x, cursize.y, src, maxval_buf[curbuf]);\r
- cursize = grid;\r
+ T minval_, maxval_;\r
+ cudaSafeCall(cudaMemcpy(&minval_, minval_buf, sizeof(T), cudaMemcpyDeviceToHost));\r
+ cudaSafeCall(cudaMemcpy(&maxval_, maxval_buf, sizeof(T), cudaMemcpyDeviceToHost));\r
+ *minval = minval_;\r
+ *maxval = maxval_;\r
+ }\r
\r
- while (cursize.x > 1 || cursize.y > 1)\r
- {\r
- grid.x = divUp(cursize.x, threads.x); \r
- grid.y = divUp(cursize.y, threads.y); \r
- opt_kernel<256, MIN, T><<<grid, threads>>>(cursize.x, cursize.y, minval_buf[curbuf], minval_buf[1 - curbuf]);\r
- opt_kernel<256, MAX, T><<<grid, threads>>>(cursize.x, cursize.y, maxval_buf[curbuf], maxval_buf[1 - curbuf]);\r
- curbuf = 1 - curbuf;\r
- cursize = grid;\r
- }\r
+ template <typename T>\r
+ void min_max_caller_2steps(const DevMem2D src, double* minval, double* maxval, \r
+ unsigned char* minval_buf, unsigned char* maxval_buf)\r
+ {\r
+ dim3 threads, grid;\r
+ estimate_thread_cfg(threads, grid);\r
+ estimate_kernel_consts(src.cols, src.rows, threads, grid);\r
\r
+ cudaSafeCall(cudaMemcpyToSymbol(blocks_finished, &czero, sizeof(blocks_finished)));\r
+ min_max_kernel<256, T><<<grid, threads>>>(src.cols, src.rows, src, (T*)minval_buf, (T*)maxval_buf);\r
+ min_max_kernel_2ndstep<T><<<1, 1>>>((T*)minval_buf, (T*)maxval_buf, grid.x * grid.y);\r
cudaSafeCall(cudaThreadSynchronize());\r
\r
- // Copy results from device to host\r
T minval_, maxval_;\r
- cudaSafeCall(cudaMemcpy(&minval_, minval_buf[curbuf].ptr(0), sizeof(T), cudaMemcpyDeviceToHost));\r
- cudaSafeCall(cudaMemcpy(&maxval_, maxval_buf[curbuf].ptr(0), sizeof(T), cudaMemcpyDeviceToHost));\r
+ cudaSafeCall(cudaMemcpy(&minval_, minval_buf, sizeof(T), cudaMemcpyDeviceToHost));\r
+ cudaSafeCall(cudaMemcpy(&maxval_, maxval_buf, sizeof(T), cudaMemcpyDeviceToHost));\r
*minval = minval_;\r
*maxval = maxval_;\r
-\r
- // Release aux. buffers\r
- cudaSafeCall(cudaFree(minval_buf[0].data));\r
- cudaSafeCall(cudaFree(minval_buf[1].data));\r
- cudaSafeCall(cudaFree(maxval_buf[0].data));\r
- cudaSafeCall(cudaFree(maxval_buf[1].data));\r
}\r
\r
- template void min_max_caller<unsigned char>(const DevMem2D, double*, double*);\r
- template void min_max_caller<signed char>(const DevMem2D, double*, double*);\r
- template void min_max_caller<unsigned short>(const DevMem2D, double*, double*);\r
- template void min_max_caller<signed short>(const DevMem2D, double*, double*);\r
- template void min_max_caller<int>(const DevMem2D, double*, double*);\r
- template void min_max_caller<float>(const DevMem2D, double*, double*);\r
- template void min_max_caller<double>(const DevMem2D, double*, double*);\r
+ template void min_max_caller<unsigned char>(const DevMem2D, double*, double*, unsigned char*, unsigned char*);\r
+ template void min_max_caller<signed char>(const DevMem2D, double*, double*, unsigned char*, unsigned char*);\r
+ template void min_max_caller<unsigned short>(const DevMem2D, double*, double*, unsigned char*, unsigned char*);\r
+ template void min_max_caller<signed short>(const DevMem2D, double*, double*, unsigned char*, unsigned char*);\r
+ template void min_max_caller<int>(const DevMem2D, double*, double*, unsigned char*, unsigned char*);\r
+ template void min_max_caller<float>(const DevMem2D, double*, double*, unsigned char*, unsigned char*);\r
+ template void min_max_caller<double>(const DevMem2D, double*, double*, unsigned char*, unsigned char*);\r
+\r
+ template void min_max_caller_2steps<unsigned char>(const DevMem2D, double*, double*, unsigned char*, unsigned char*);\r
+ template void min_max_caller_2steps<signed char>(const DevMem2D, double*, double*, unsigned char*, unsigned char*);\r
+ template void min_max_caller_2steps<unsigned short>(const DevMem2D, double*, double*, unsigned char*, unsigned char*);\r
+ template void min_max_caller_2steps<signed short>(const DevMem2D, double*, double*, unsigned char*, unsigned char*);\r
+ template void min_max_caller_2steps<int>(const DevMem2D, double*, double*, unsigned char*, unsigned char*);\r
+ template void min_max_caller_2steps<float>(const DevMem2D, double*, double*, unsigned char*, unsigned char*);\r
+\r
+ } // namespace minmax\r
+\r
+ namespace minmaxloc {\r
\r
template <typename T, int op> struct OptLoc {};\r
\r
template <typename T>\r
- struct OptLoc<T, MIN> \r
+ struct OptLoc<T, OP_MIN> \r
{\r
static __device__ void call(unsigned int tid, unsigned int offset, volatile T* optval, volatile unsigned int* optloc)\r
{\r
};\r
\r
template <typename T>\r
- struct OptLoc<T, MAX> \r
+ struct OptLoc<T, OP_MAX> \r
{\r
static __device__ void call(unsigned int tid, unsigned int offset, volatile T* optval, volatile unsigned int* optloc)\r
{\r
dim3 cursize(src.cols, src.rows);\r
dim3 grid(divUp(cursize.x, threads.x), divUp(cursize.y, threads.y));\r
\r
- opt_loc_init_kernel<256, MIN, T><<<grid, threads>>>(cursize.x, cursize.y, src, minval_buf[curbuf], minloc_buf[curbuf]);\r
- opt_loc_init_kernel<256, MAX, T><<<grid, threads>>>(cursize.x, cursize.y, src, maxval_buf[curbuf], maxloc_buf[curbuf]);\r
+ opt_loc_init_kernel<256, OP_MIN, T><<<grid, threads>>>(cursize.x, cursize.y, src, minval_buf[curbuf], minloc_buf[curbuf]);\r
+ opt_loc_init_kernel<256, OP_MAX, T><<<grid, threads>>>(cursize.x, cursize.y, src, maxval_buf[curbuf], maxloc_buf[curbuf]);\r
cursize = grid;\r
\r
while (cursize.x > 1 || cursize.y > 1)\r
{\r
grid.x = divUp(cursize.x, threads.x); \r
grid.y = divUp(cursize.y, threads.y); \r
- opt_loc_kernel<256, MIN, T><<<grid, threads>>>(cursize.x, cursize.y, minval_buf[curbuf], minloc_buf[curbuf], \r
- minval_buf[1 - curbuf], minloc_buf[1 - curbuf]);\r
- opt_loc_kernel<256, MAX, T><<<grid, threads>>>(cursize.x, cursize.y, maxval_buf[curbuf], maxloc_buf[curbuf], \r
- maxval_buf[1 - curbuf], maxloc_buf[1 - curbuf]);\r
+ opt_loc_kernel<256, OP_MIN, T><<<grid, threads>>>(cursize.x, cursize.y, minval_buf[curbuf], minloc_buf[curbuf], \r
+ minval_buf[1 - curbuf], minloc_buf[1 - curbuf]);\r
+ opt_loc_kernel<256, OP_MAX, T><<<grid, threads>>>(cursize.x, cursize.y, maxval_buf[curbuf], maxloc_buf[curbuf], \r
+ maxval_buf[1 - curbuf], maxloc_buf[1 - curbuf]);\r
curbuf = 1 - curbuf;\r
cursize = grid;\r
}\r
template void min_max_loc_caller<float>(const DevMem2D, double*, double*, int*, int*, int*, int*);\r
template void min_max_loc_caller<double>(const DevMem2D, double*, double*, int*, int*, int*, int*);\r
\r
+ } // namespace minmaxloc\r
+\r
}}}\r