#if !defined CUDA_DISABLER
-#include <opencv2/gpu/device/lbp.hpp>
-#include <opencv2/gpu/device/vec_traits.hpp>
-#include <opencv2/gpu/device/saturate_cast.hpp>
+#include "lbp.hpp"
+#include "opencv2/gpu/device/vec_traits.hpp"
+#include "opencv2/gpu/device/saturate_cast.hpp"
namespace cv { namespace gpu { namespace device
{
}
}}}
-#endif /* CUDA_DISABLER */
\ No newline at end of file
+#endif /* CUDA_DISABLER */
return functor_type(); \\r
} \\r
};\r
+\r
+ #undef CV_DESCALE\r
}}} // namespace cv { namespace gpu { namespace device\r
\r
#endif // __OPENCV_GPU_COLOR_DETAIL_HPP__\r
//\r
//M*/\r
\r
-#ifndef __OPENCV_GPU_UTILITY_DETAIL_HPP__\r
-#define __OPENCV_GPU_UTILITY_DETAIL_HPP__\r
+#ifndef __OPENCV_GPU_REDUCTION_DETAIL_HPP__\r
+#define __OPENCV_GPU_REDUCTION_DETAIL_HPP__\r
\r
-namespace cv { namespace gpu { namespace device \r
+namespace cv { namespace gpu { namespace device\r
{\r
namespace utility_detail\r
{\r
///////////////////////////////////////////////////////////////////////////////\r
- // Reduction\r
+ // Reductor\r
\r
template <int n> struct WarpReductor\r
{\r
template <typename T, typename Op> static __device__ __forceinline__ void reduce(volatile T* data, T& partial_reduction, int tid, const Op& op)\r
{\r
if (tid < n)\r
- data[tid] = partial_reduction; \r
+ data[tid] = partial_reduction;\r
if (n > 32) __syncthreads();\r
\r
if (n > 32)\r
{\r
- if (tid < n - 32) \r
+ if (tid < n - 32)\r
data[tid] = partial_reduction = op(partial_reduction, data[tid + 32]);\r
if (tid < 16)\r
{\r
}\r
else if (n > 16)\r
{\r
- if (tid < n - 16) \r
+ if (tid < n - 16)\r
data[tid] = partial_reduction = op(partial_reduction, data[tid + 16]);\r
if (tid < 8)\r
{\r
}\r
else if (n > 8)\r
{\r
- if (tid < n - 8) \r
+ if (tid < n - 8)\r
data[tid] = partial_reduction = op(partial_reduction, data[tid + 8]);\r
if (tid < 4)\r
{\r
}\r
else if (n > 4)\r
{\r
- if (tid < n - 4) \r
+ if (tid < n - 4)\r
data[tid] = partial_reduction = op(partial_reduction, data[tid + 4]);\r
if (tid < 2)\r
{\r
data[tid] = partial_reduction = op(partial_reduction, data[tid + 2]);\r
data[tid] = partial_reduction = op(partial_reduction, data[tid + 1]);\r
}\r
- } \r
+ }\r
else if (n > 2)\r
{\r
- if (tid < n - 2) \r
+ if (tid < n - 2)\r
data[tid] = partial_reduction = op(partial_reduction, data[tid + 2]);\r
if (tid < 2)\r
{\r
data[tid] = partial_reduction = op(partial_reduction, data[tid + 1]);\r
}\r
- } \r
+ }\r
}\r
};\r
template <> struct WarpReductor<64>\r
{\r
data[tid] = partial_reduction;\r
__syncthreads();\r
- \r
- if (tid < 32) \r
+\r
+ if (tid < 32)\r
{\r
data[tid] = partial_reduction = op(partial_reduction, data[tid + 32]);\r
data[tid] = partial_reduction = op(partial_reduction, data[tid + 16]);\r
data[tid] = partial_reduction = op(partial_reduction, data[tid + 8 ]);\r
data[tid] = partial_reduction = op(partial_reduction, data[tid + 4 ]);\r
data[tid] = partial_reduction = op(partial_reduction, data[tid + 2 ]);\r
- data[tid] = partial_reduction = op(partial_reduction, data[tid + 1 ]); \r
+ data[tid] = partial_reduction = op(partial_reduction, data[tid + 1 ]);\r
}\r
}\r
};\r
template <typename T, typename Op> static __device__ void reduce(volatile T* data, T& partial_reduction, int tid, const Op& op)\r
{\r
data[tid] = partial_reduction;\r
- \r
- if (tid < 16) \r
+\r
+ if (tid < 16)\r
{\r
data[tid] = partial_reduction = op(partial_reduction, data[tid + 16]);\r
data[tid] = partial_reduction = op(partial_reduction, data[tid + 8 ]);\r
data[tid] = partial_reduction = op(partial_reduction, data[tid + 4 ]);\r
data[tid] = partial_reduction = op(partial_reduction, data[tid + 2 ]);\r
- data[tid] = partial_reduction = op(partial_reduction, data[tid + 1 ]); \r
+ data[tid] = partial_reduction = op(partial_reduction, data[tid + 1 ]);\r
}\r
}\r
};\r
template <typename T, typename Op> static __device__ void reduce(volatile T* data, T& partial_reduction, int tid, const Op& op)\r
{\r
data[tid] = partial_reduction;\r
- \r
- if (tid < 8) \r
+\r
+ if (tid < 8)\r
{\r
data[tid] = partial_reduction = op(partial_reduction, data[tid + 8 ]);\r
data[tid] = partial_reduction = op(partial_reduction, data[tid + 4 ]);\r
data[tid] = partial_reduction = op(partial_reduction, data[tid + 2 ]);\r
- data[tid] = partial_reduction = op(partial_reduction, data[tid + 1 ]); \r
+ data[tid] = partial_reduction = op(partial_reduction, data[tid + 1 ]);\r
}\r
}\r
};\r
template <typename T, typename Op> static __device__ void reduce(volatile T* data, T& partial_reduction, int tid, const Op& op)\r
{\r
data[tid] = partial_reduction;\r
- \r
- if (tid < 4) \r
+\r
+ if (tid < 4)\r
{\r
data[tid] = partial_reduction = op(partial_reduction, data[tid + 4 ]);\r
data[tid] = partial_reduction = op(partial_reduction, data[tid + 2 ]);\r
- data[tid] = partial_reduction = op(partial_reduction, data[tid + 1 ]); \r
+ data[tid] = partial_reduction = op(partial_reduction, data[tid + 1 ]);\r
}\r
}\r
};\r
\r
///////////////////////////////////////////////////////////////////////////////\r
// PredValWarpReductor\r
- \r
+\r
template <int n> struct PredValWarpReductor;\r
template <> struct PredValWarpReductor<64>\r
{\r
- template <typename T, typename V, typename Pred> \r
+ template <typename T, typename V, typename Pred>\r
static __device__ void reduce(T& myData, V& myVal, volatile T* sdata, V* sval, int tid, const Pred& pred)\r
{\r
if (tid < 32)\r
sdata[tid] = myData = reg;\r
sval[tid] = myVal = sval[tid + 4];\r
}\r
- \r
+\r
reg = sdata[tid + 2];\r
if (pred(reg, myData))\r
{\r
sdata[tid] = myData = reg;\r
sval[tid] = myVal = sval[tid + 2];\r
}\r
- \r
+\r
reg = sdata[tid + 1];\r
if (pred(reg, myData))\r
{\r
};\r
template <> struct PredValWarpReductor<32>\r
{\r
- template <typename T, typename V, typename Pred> \r
+ template <typename T, typename V, typename Pred>\r
static __device__ void reduce(T& myData, V& myVal, volatile T* sdata, V* sval, int tid, const Pred& pred)\r
{\r
if (tid < 16)\r
sdata[tid] = myData = reg;\r
sval[tid] = myVal = sval[tid + 4];\r
}\r
- \r
+\r
reg = sdata[tid + 2];\r
if (pred(reg, myData))\r
{\r
sdata[tid] = myData = reg;\r
sval[tid] = myVal = sval[tid + 2];\r
}\r
- \r
+\r
reg = sdata[tid + 1];\r
if (pred(reg, myData))\r
{\r
\r
template <> struct PredValWarpReductor<16>\r
{\r
- template <typename T, typename V, typename Pred> \r
+ template <typename T, typename V, typename Pred>\r
static __device__ void reduce(T& myData, V& myVal, volatile T* sdata, V* sval, int tid, const Pred& pred)\r
{\r
if (tid < 8)\r
sdata[tid] = myData = reg;\r
sval[tid] = myVal = sval[tid + 4];\r
}\r
- \r
+\r
reg = sdata[tid + 2];\r
if (pred(reg, myData))\r
{\r
sdata[tid] = myData = reg;\r
sval[tid] = myVal = sval[tid + 2];\r
}\r
- \r
+\r
reg = sdata[tid + 1];\r
if (pred(reg, myData))\r
{\r
};\r
template <> struct PredValWarpReductor<8>\r
{\r
- template <typename T, typename V, typename Pred> \r
+ template <typename T, typename V, typename Pred>\r
static __device__ void reduce(T& myData, V& myVal, volatile T* sdata, V* sval, int tid, const Pred& pred)\r
{\r
if (tid < 4)\r
sdata[tid] = myData = reg;\r
sval[tid] = myVal = sval[tid + 4];\r
}\r
- \r
+\r
reg = sdata[tid + 2];\r
if (pred(reg, myData))\r
{\r
sdata[tid] = myData = reg;\r
sval[tid] = myVal = sval[tid + 2];\r
}\r
- \r
+\r
reg = sdata[tid + 1];\r
if (pred(reg, myData))\r
{\r
myData = sdata[tid];\r
myVal = sval[tid];\r
\r
- if (n >= 512 && tid < 256) \r
+ if (n >= 512 && tid < 256)\r
{\r
T reg = sdata[tid + 256];\r
\r
sdata[tid] = myData = reg;\r
sval[tid] = myVal = sval[tid + 256];\r
}\r
- __syncthreads(); \r
+ __syncthreads();\r
}\r
- if (n >= 256 && tid < 128) \r
+ if (n >= 256 && tid < 128)\r
{\r
T reg = sdata[tid + 128];\r
\r
sdata[tid] = myData = reg;\r
sval[tid] = myVal = sval[tid + 128];\r
}\r
- __syncthreads(); \r
+ __syncthreads();\r
}\r
- if (n >= 128 && tid < 64) \r
+ if (n >= 128 && tid < 64)\r
{\r
T reg = sdata[tid + 64];\r
\r
sdata[tid] = myData = reg;\r
sval[tid] = myVal = sval[tid + 64];\r
}\r
- __syncthreads(); \r
- } \r
+ __syncthreads();\r
+ }\r
\r
if (tid < 32)\r
{\r
- if (n >= 64) \r
- { \r
+ if (n >= 64)\r
+ {\r
T reg = sdata[tid + 32];\r
\r
if (pred(reg, myData))\r
sval[tid] = myVal = sval[tid + 32];\r
}\r
}\r
- if (n >= 32) \r
- { \r
+ if (n >= 32)\r
+ {\r
T reg = sdata[tid + 16];\r
\r
if (pred(reg, myData))\r
sval[tid] = myVal = sval[tid + 16];\r
}\r
}\r
- if (n >= 16) \r
- { \r
+ if (n >= 16)\r
+ {\r
T reg = sdata[tid + 8];\r
\r
if (pred(reg, myData))\r
sval[tid] = myVal = sval[tid + 8];\r
}\r
}\r
- if (n >= 8) \r
- { \r
+ if (n >= 8)\r
+ {\r
T reg = sdata[tid + 4];\r
\r
if (pred(reg, myData))\r
sval[tid] = myVal = sval[tid + 4];\r
}\r
}\r
- if (n >= 4) \r
- { \r
+ if (n >= 4)\r
+ {\r
T reg = sdata[tid + 2];\r
\r
if (pred(reg, myData))\r
{\r
sdata[tid] = myData = reg;\r
sval[tid] = myVal = sval[tid + 2];\r
- } \r
+ }\r
}\r
- if (n >= 2) \r
- { \r
+ if (n >= 2)\r
+ {\r
T reg = sdata[tid + 1];\r
\r
if (pred(reg, myData))\r
template <int n> struct PredVal2WarpReductor;\r
template <> struct PredVal2WarpReductor<64>\r
{\r
- template <typename T, typename V1, typename V2, typename Pred> \r
+ template <typename T, typename V1, typename V2, typename Pred>\r
static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)\r
{\r
if (tid < 32)\r
sval1[tid] = myVal1 = sval1[tid + 4];\r
sval2[tid] = myVal2 = sval2[tid + 4];\r
}\r
- \r
+\r
reg = sdata[tid + 2];\r
if (pred(reg, myData))\r
{\r
sval1[tid] = myVal1 = sval1[tid + 2];\r
sval2[tid] = myVal2 = sval2[tid + 2];\r
}\r
- \r
+\r
reg = sdata[tid + 1];\r
if (pred(reg, myData))\r
{\r
};\r
template <> struct PredVal2WarpReductor<32>\r
{\r
- template <typename T, typename V1, typename V2, typename Pred> \r
+ template <typename T, typename V1, typename V2, typename Pred>\r
static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)\r
{\r
if (tid < 16)\r
sval1[tid] = myVal1 = sval1[tid + 4];\r
sval2[tid] = myVal2 = sval2[tid + 4];\r
}\r
- \r
+\r
reg = sdata[tid + 2];\r
if (pred(reg, myData))\r
{\r
sval1[tid] = myVal1 = sval1[tid + 2];\r
sval2[tid] = myVal2 = sval2[tid + 2];\r
}\r
- \r
+\r
reg = sdata[tid + 1];\r
if (pred(reg, myData))\r
{\r
\r
template <> struct PredVal2WarpReductor<16>\r
{\r
- template <typename T, typename V1, typename V2, typename Pred> \r
+ template <typename T, typename V1, typename V2, typename Pred>\r
static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)\r
{\r
if (tid < 8)\r
sval1[tid] = myVal1 = sval1[tid + 4];\r
sval2[tid] = myVal2 = sval2[tid + 4];\r
}\r
- \r
+\r
reg = sdata[tid + 2];\r
if (pred(reg, myData))\r
{\r
sval1[tid] = myVal1 = sval1[tid + 2];\r
sval2[tid] = myVal2 = sval2[tid + 2];\r
}\r
- \r
+\r
reg = sdata[tid + 1];\r
if (pred(reg, myData))\r
{\r
};\r
template <> struct PredVal2WarpReductor<8>\r
{\r
- template <typename T, typename V1, typename V2, typename Pred> \r
+ template <typename T, typename V1, typename V2, typename Pred>\r
static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)\r
{\r
if (tid < 4)\r
sval1[tid] = myVal1 = sval1[tid + 4];\r
sval2[tid] = myVal2 = sval2[tid + 4];\r
}\r
- \r
+\r
reg = sdata[tid + 2];\r
if (pred(reg, myData))\r
{\r
sval1[tid] = myVal1 = sval1[tid + 2];\r
sval2[tid] = myVal2 = sval2[tid + 2];\r
}\r
- \r
+\r
reg = sdata[tid + 1];\r
if (pred(reg, myData))\r
{\r
template <bool warp> struct PredVal2ReductionDispatcher;\r
template <> struct PredVal2ReductionDispatcher<true>\r
{\r
- template <int n, typename T, typename V1, typename V2, typename Pred> \r
+ template <int n, typename T, typename V1, typename V2, typename Pred>\r
static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)\r
{\r
PredVal2WarpReductor<n>::reduce(myData, myVal1, myVal2, sdata, sval1, sval2, tid, pred);\r
};\r
template <> struct PredVal2ReductionDispatcher<false>\r
{\r
- template <int n, typename T, typename V1, typename V2, typename Pred> \r
+ template <int n, typename T, typename V1, typename V2, typename Pred>\r
static __device__ void reduce(T& myData, V1& myVal1, V2& myVal2, volatile T* sdata, V1* sval1, V2* sval2, int tid, const Pred& pred)\r
{\r
myData = sdata[tid];\r
myVal1 = sval1[tid];\r
myVal2 = sval2[tid];\r
\r
- if (n >= 512 && tid < 256) \r
+ if (n >= 512 && tid < 256)\r
{\r
T reg = sdata[tid + 256];\r
\r
sval1[tid] = myVal1 = sval1[tid + 256];\r
sval2[tid] = myVal2 = sval2[tid + 256];\r
}\r
- __syncthreads(); \r
+ __syncthreads();\r
}\r
- if (n >= 256 && tid < 128) \r
+ if (n >= 256 && tid < 128)\r
{\r
T reg = sdata[tid + 128];\r
\r
sval1[tid] = myVal1 = sval1[tid + 128];\r
sval2[tid] = myVal2 = sval2[tid + 128];\r
}\r
- __syncthreads(); \r
+ __syncthreads();\r
}\r
- if (n >= 128 && tid < 64) \r
+ if (n >= 128 && tid < 64)\r
{\r
T reg = sdata[tid + 64];\r
\r
sval1[tid] = myVal1 = sval1[tid + 64];\r
sval2[tid] = myVal2 = sval2[tid + 64];\r
}\r
- __syncthreads(); \r
- } \r
+ __syncthreads();\r
+ }\r
\r
if (tid < 32)\r
{\r
- if (n >= 64) \r
- { \r
+ if (n >= 64)\r
+ {\r
T reg = sdata[tid + 32];\r
\r
if (pred(reg, myData))\r
sval2[tid] = myVal2 = sval2[tid + 32];\r
}\r
}\r
- if (n >= 32) \r
- { \r
+ if (n >= 32)\r
+ {\r
T reg = sdata[tid + 16];\r
\r
if (pred(reg, myData))\r
sval2[tid] = myVal2 = sval2[tid + 16];\r
}\r
}\r
- if (n >= 16) \r
- { \r
+ if (n >= 16)\r
+ {\r
T reg = sdata[tid + 8];\r
\r
if (pred(reg, myData))\r
sval2[tid] = myVal2 = sval2[tid + 8];\r
}\r
}\r
- if (n >= 8) \r
- { \r
+ if (n >= 8)\r
+ {\r
T reg = sdata[tid + 4];\r
\r
if (pred(reg, myData))\r
sval2[tid] = myVal2 = sval2[tid + 4];\r
}\r
}\r
- if (n >= 4) \r
- { \r
+ if (n >= 4)\r
+ {\r
T reg = sdata[tid + 2];\r
\r
if (pred(reg, myData))\r
sdata[tid] = myData = reg;\r
sval1[tid] = myVal1 = sval1[tid + 2];\r
sval2[tid] = myVal2 = sval2[tid + 2];\r
- } \r
+ }\r
}\r
- if (n >= 2) \r
- { \r
+ if (n >= 2)\r
+ {\r
T reg = sdata[tid + 1];\r
\r
if (pred(reg, myData))\r
} // namespace utility_detail\r
}}} // namespace cv { namespace gpu { namespace device\r
\r
-#endif // __OPENCV_GPU_UTILITY_DETAIL_HPP__\r
+#endif // __OPENCV_GPU_REDUCTION_DETAIL_HPP__\r
#include "../vec_traits.hpp"\r
#include "../functional.hpp"\r
\r
-namespace cv { namespace gpu { namespace device \r
+namespace cv { namespace gpu { namespace device\r
{\r
namespace transform_detail\r
{\r
};\r
\r
template <typename T, typename D, typename UnOp, typename Mask>\r
- __global__ static void transformSmart(const PtrStepSz<T> src_, PtrStep<D> dst_, const Mask mask, const UnOp op)\r
+ static __global__ void transformSmart(const PtrStepSz<T> src_, PtrStep<D> dst_, const Mask mask, const UnOp op)\r
{\r
typedef TransformFunctorTraits<UnOp> ft;\r
typedef typename UnaryReadWriteTraits<T, D, ft::smart_shift>::read_type read_type;\r
}\r
\r
template <typename T, typename D, typename UnOp, typename Mask>\r
- static __global__ void transformSimple(const PtrStepSz<T> src, PtrStep<D> dst, const Mask mask, const UnOp op)\r
+ __global__ static void transformSimple(const PtrStepSz<T> src, PtrStep<D> dst, const Mask mask, const UnOp op)\r
{\r
- const int x = blockDim.x * blockIdx.x + threadIdx.x;\r
- const int y = blockDim.y * blockIdx.y + threadIdx.y;\r
+ const int x = blockDim.x * blockIdx.x + threadIdx.x;\r
+ const int y = blockDim.y * blockIdx.y + threadIdx.y;\r
\r
if (x < src.cols && y < src.rows && mask(y, x))\r
{\r
}\r
\r
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>\r
- __global__ static void transformSmart(const PtrStepSz<T1> src1_, const PtrStep<T2> src2_, PtrStep<D> dst_, \r
+ static __global__ void transformSmart(const PtrStepSz<T1> src1_, const PtrStep<T2> src2_, PtrStep<D> dst_,\r
const Mask mask, const BinOp op)\r
{\r
typedef TransformFunctorTraits<BinOp> ft;\r
const read_type1 src1_n_el = ((const read_type1*)src1)[x];\r
const read_type2 src2_n_el = ((const read_type2*)src2)[x];\r
write_type dst_n_el = ((const write_type*)dst)[x];\r
- \r
+\r
OpUnroller<ft::smart_shift>::unroll(src1_n_el, src2_n_el, dst_n_el, mask, op, x_shifted, y);\r
\r
((write_type*)dst)[x] = dst_n_el;\r
}\r
\r
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>\r
- static __global__ void transformSimple(const PtrStepSz<T1> src1, const PtrStep<T2> src2, PtrStep<D> dst, \r
+ static __global__ void transformSimple(const PtrStepSz<T1> src1, const PtrStep<T2> src2, PtrStep<D> dst,\r
const Mask mask, const BinOp op)\r
{\r
- const int x = blockDim.x * blockIdx.x + threadIdx.x;\r
- const int y = blockDim.y * blockIdx.y + threadIdx.y;\r
+ const int x = blockDim.x * blockIdx.x + threadIdx.x;\r
+ const int y = blockDim.y * blockIdx.y + threadIdx.y;\r
\r
if (x < src1.cols && y < src1.rows && mask(y, x))\r
{\r
typedef TransformFunctorTraits<UnOp> ft;\r
\r
const dim3 threads(ft::simple_block_dim_x, ft::simple_block_dim_y, 1);\r
- const dim3 grid(divUp(src.cols, threads.x), divUp(src.rows, threads.y), 1); \r
+ const dim3 grid(divUp(src.cols, threads.x), divUp(src.rows, threads.y), 1);\r
\r
transformSimple<T, D><<<grid, threads, 0, stream>>>(src, dst, mask, op);\r
cudaSafeCall( cudaGetLastError() );\r
\r
if (stream == 0)\r
- cudaSafeCall( cudaDeviceSynchronize() ); \r
+ cudaSafeCall( cudaDeviceSynchronize() );\r
}\r
\r
template <typename T1, typename T2, typename D, typename BinOp, typename Mask>\r
typedef TransformFunctorTraits<BinOp> ft;\r
\r
const dim3 threads(ft::simple_block_dim_x, ft::simple_block_dim_y, 1);\r
- const dim3 grid(divUp(src1.cols, threads.x), divUp(src1.rows, threads.y), 1); \r
+ const dim3 grid(divUp(src1.cols, threads.x), divUp(src1.rows, threads.y), 1);\r
\r
transformSimple<T1, T2, D><<<grid, threads, 0, stream>>>(src1, src2, dst, mask, op);\r
cudaSafeCall( cudaGetLastError() );\r
\r
if (stream == 0)\r
- cudaSafeCall( cudaDeviceSynchronize() ); \r
+ cudaSafeCall( cudaDeviceSynchronize() );\r
}\r
};\r
template<> struct TransformDispatcher<true>\r
\r
StaticAssert<ft::smart_shift != 1>::check();\r
\r
- if (!isAligned(src.data, ft::smart_shift * sizeof(T)) || !isAligned(src.step, ft::smart_shift * sizeof(T)) || \r
+ if (!isAligned(src.data, ft::smart_shift * sizeof(T)) || !isAligned(src.step, ft::smart_shift * sizeof(T)) ||\r
!isAligned(dst.data, ft::smart_shift * sizeof(D)) || !isAligned(dst.step, ft::smart_shift * sizeof(D)))\r
{\r
TransformDispatcher<false>::call(src, dst, op, mask, stream);\r
}\r
\r
const dim3 threads(ft::smart_block_dim_x, ft::smart_block_dim_y, 1);\r
- const dim3 grid(divUp(src.cols, threads.x * ft::smart_shift), divUp(src.rows, threads.y), 1); \r
+ const dim3 grid(divUp(src.cols, threads.x * ft::smart_shift), divUp(src.rows, threads.y), 1);\r
\r
transformSmart<T, D><<<grid, threads, 0, stream>>>(src, dst, mask, op);\r
cudaSafeCall( cudaGetLastError() );\r
}\r
\r
const dim3 threads(ft::smart_block_dim_x, ft::smart_block_dim_y, 1);\r
- const dim3 grid(divUp(src1.cols, threads.x * ft::smart_shift), divUp(src1.rows, threads.y), 1); \r
+ const dim3 grid(divUp(src1.cols, threads.x * ft::smart_shift), divUp(src1.rows, threads.y), 1);\r
\r
transformSmart<T1, T2, D><<<grid, threads, 0, stream>>>(src1, src2, dst, mask, op);\r
cudaSafeCall( cudaGetLastError() );\r
\r
if (stream == 0)\r
- cudaSafeCall( cudaDeviceSynchronize() ); \r
+ cudaSafeCall( cudaDeviceSynchronize() );\r
}\r
- }; \r
+ };\r
} // namespace transform_detail\r
}}} // namespace cv { namespace gpu { namespace device\r
\r
#ifndef __OPENCV_GPU_SCAN_HPP__
#define __OPENCV_GPU_SCAN_HPP__
- enum ScanKind { EXCLUSIVE = 0, INCLUSIVE = 1 };
+#include "common.hpp"
- template <ScanKind Kind, typename T, typename F> struct WarpScan
- {
- __device__ __forceinline__ WarpScan() {}
- __device__ __forceinline__ WarpScan(const WarpScan& other) { (void)other; }
-
- __device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx)
- {
- const unsigned int lane = idx & 31;
- F op;
+namespace cv { namespace gpu { namespace device
+{
+ enum ScanKind { EXCLUSIVE = 0, INCLUSIVE = 1 };
- if ( lane >= 1) ptr [idx ] = op(ptr [idx - 1], ptr [idx]);
- if ( lane >= 2) ptr [idx ] = op(ptr [idx - 2], ptr [idx]);
- if ( lane >= 4) ptr [idx ] = op(ptr [idx - 4], ptr [idx]);
- if ( lane >= 8) ptr [idx ] = op(ptr [idx - 8], ptr [idx]);
- if ( lane >= 16) ptr [idx ] = op(ptr [idx - 16], ptr [idx]);
+ template <ScanKind Kind, typename T, typename F> struct WarpScan
+ {
+ __device__ __forceinline__ WarpScan() {}
+ __device__ __forceinline__ WarpScan(const WarpScan& other) { (void)other; }
- if( Kind == INCLUSIVE )
- return ptr [idx];
- else
- return (lane > 0) ? ptr [idx - 1] : 0;
- }
+ __device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx)
+ {
+ const unsigned int lane = idx & 31;
+ F op;
+
+ if ( lane >= 1) ptr [idx ] = op(ptr [idx - 1], ptr [idx]);
+ if ( lane >= 2) ptr [idx ] = op(ptr [idx - 2], ptr [idx]);
+ if ( lane >= 4) ptr [idx ] = op(ptr [idx - 4], ptr [idx]);
+ if ( lane >= 8) ptr [idx ] = op(ptr [idx - 8], ptr [idx]);
+ if ( lane >= 16) ptr [idx ] = op(ptr [idx - 16], ptr [idx]);
+
+ if( Kind == INCLUSIVE )
+ return ptr [idx];
+ else
+ return (lane > 0) ? ptr [idx - 1] : 0;
+ }
+
+ __device__ __forceinline__ unsigned int index(const unsigned int tid)
+ {
+ return tid;
+ }
- __device__ __forceinline__ unsigned int index(const unsigned int tid)
- {
- return tid;
- }
+ __device__ __forceinline__ void init(volatile T *ptr){}
- __device__ __forceinline__ void init(volatile T *ptr){}
+ static const int warp_offset = 0;
- static const int warp_offset = 0;
+ typedef WarpScan<INCLUSIVE, T, F> merge;
+ };
- typedef WarpScan<INCLUSIVE, T, F> merge;
- };
+ template <ScanKind Kind , typename T, typename F> struct WarpScanNoComp
+ {
+ __device__ __forceinline__ WarpScanNoComp() {}
+ __device__ __forceinline__ WarpScanNoComp(const WarpScanNoComp& other) { (void)other; }
- template <ScanKind Kind , typename T, typename F> struct WarpScanNoComp
+ __device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx)
+ {
+ const unsigned int lane = threadIdx.x & 31;
+ F op;
+
+ ptr [idx ] = op(ptr [idx - 1], ptr [idx]);
+ ptr [idx ] = op(ptr [idx - 2], ptr [idx]);
+ ptr [idx ] = op(ptr [idx - 4], ptr [idx]);
+ ptr [idx ] = op(ptr [idx - 8], ptr [idx]);
+ ptr [idx ] = op(ptr [idx - 16], ptr [idx]);
+
+ if( Kind == INCLUSIVE )
+ return ptr [idx];
+ else
+ return (lane > 0) ? ptr [idx - 1] : 0;
+ }
+
+ __device__ __forceinline__ unsigned int index(const unsigned int tid)
{
- __device__ __forceinline__ WarpScanNoComp() {}
- __device__ __forceinline__ WarpScanNoComp(const WarpScanNoComp& other) { (void)other; }
-
- __device__ __forceinline__ T operator()( volatile T *ptr , const unsigned int idx)
- {
- const unsigned int lane = threadIdx.x & 31;
- F op;
-
- ptr [idx ] = op(ptr [idx - 1], ptr [idx]);
- ptr [idx ] = op(ptr [idx - 2], ptr [idx]);
- ptr [idx ] = op(ptr [idx - 4], ptr [idx]);
- ptr [idx ] = op(ptr [idx - 8], ptr [idx]);
- ptr [idx ] = op(ptr [idx - 16], ptr [idx]);
-
- if( Kind == INCLUSIVE )
- return ptr [idx];
- else
- return (lane > 0) ? ptr [idx - 1] : 0;
- }
-
- __device__ __forceinline__ unsigned int index(const unsigned int tid)
- {
- return (tid >> warp_log) * warp_smem_stride + 16 + (tid & warp_mask);
- }
-
- __device__ __forceinline__ void init(volatile T *ptr)
- {
- ptr[threadIdx.x] = 0;
- }
-
- static const int warp_smem_stride = 32 + 16 + 1;
- static const int warp_offset = 16;
- static const int warp_log = 5;
- static const int warp_mask = 31;
-
- typedef WarpScanNoComp<INCLUSIVE, T, F> merge;
- };
-
- template <ScanKind Kind , typename T, typename Sc, typename F> struct BlockScan
+ return (tid >> warp_log) * warp_smem_stride + 16 + (tid & warp_mask);
+ }
+
+ __device__ __forceinline__ void init(volatile T *ptr)
{
- __device__ __forceinline__ BlockScan() {}
- __device__ __forceinline__ BlockScan(const BlockScan& other) { (void)other; }
+ ptr[threadIdx.x] = 0;
+ }
+
+ static const int warp_smem_stride = 32 + 16 + 1;
+ static const int warp_offset = 16;
+ static const int warp_log = 5;
+ static const int warp_mask = 31;
+
+ typedef WarpScanNoComp<INCLUSIVE, T, F> merge;
+ };
- __device__ __forceinline__ T operator()(volatile T *ptr)
- {
- const unsigned int tid = threadIdx.x;
- const unsigned int lane = tid & warp_mask;
- const unsigned int warp = tid >> warp_log;
+ template <ScanKind Kind , typename T, typename Sc, typename F> struct BlockScan
+ {
+ __device__ __forceinline__ BlockScan() {}
+ __device__ __forceinline__ BlockScan(const BlockScan& other) { (void)other; }
+
+ __device__ __forceinline__ T operator()(volatile T *ptr)
+ {
+ const unsigned int tid = threadIdx.x;
+ const unsigned int lane = tid & warp_mask;
+ const unsigned int warp = tid >> warp_log;
- Sc scan;
- typename Sc::merge merge_scan;
- const unsigned int idx = scan.index(tid);
+ Sc scan;
+ typename Sc::merge merge_scan;
+ const unsigned int idx = scan.index(tid);
- T val = scan(ptr, idx);
- __syncthreads ();
+ T val = scan(ptr, idx);
+ __syncthreads ();
- if( warp == 0)
- scan.init(ptr);
- __syncthreads ();
+ if( warp == 0)
+ scan.init(ptr);
+ __syncthreads ();
- if( lane == 31 )
- ptr [scan.warp_offset + warp ] = (Kind == INCLUSIVE) ? val : ptr [idx];
- __syncthreads ();
+ if( lane == 31 )
+ ptr [scan.warp_offset + warp ] = (Kind == INCLUSIVE) ? val : ptr [idx];
+ __syncthreads ();
- if( warp == 0 )
- merge_scan(ptr, idx);
- __syncthreads();
+ if( warp == 0 )
+ merge_scan(ptr, idx);
+ __syncthreads();
- if ( warp > 0)
- val = ptr [scan.warp_offset + warp - 1] + val;
- __syncthreads ();
+ if ( warp > 0)
+ val = ptr [scan.warp_offset + warp - 1] + val;
+ __syncthreads ();
- ptr[idx] = val;
- __syncthreads ();
+ ptr[idx] = val;
+ __syncthreads ();
- return val ;
- }
+ return val ;
+ }
- static const int warp_log = 5;
- static const int warp_mask = 31;
- };
+ static const int warp_log = 5;
+ static const int warp_mask = 31;
+ };
+}}}
-#endif
\ No newline at end of file
+#endif // __OPENCV_GPU_SCAN_HPP__
__OPENCV_GPU_HOST_DEVICE__ static void check() {};\r
};\r
}\r
-\r
- using ::cv::gpu::device::Static;\r
}}\r
\r
#undef __OPENCV_GPU_HOST_DEVICE__\r
\r
-#endif /* __OPENCV_GPU_GPU_DEVICE_STATIC_CHECK_HPP__ */
\ No newline at end of file
+#endif /* __OPENCV_GPU_GPU_DEVICE_STATIC_CHECK_HPP__ */\r
\r
#include "saturate_cast.hpp"\r
#include "datamov_utils.hpp"\r
-#include "detail/utility_detail.hpp"\r
+#include "detail/reduction_detail.hpp"\r
\r
namespace cv { namespace gpu { namespace device\r
{\r