\r
namespace cv { namespace gpu { namespace mathfunc\r
{\r
- void bitwise_not_caller(int rows, int cols, const PtrStep src, int elemSize, PtrStep dst, cudaStream_t stream);\r
- void bitwise_not_caller(int rows, int cols, const PtrStep src, int elemSize, PtrStep dst, const PtrStep mask, cudaStream_t stream);\r
- void bitwise_or_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elemSize, PtrStep dst, cudaStream_t stream);\r
- void bitwise_or_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elemSize, PtrStep dst, const PtrStep mask, cudaStream_t stream);\r
- void bitwise_and_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elemSize, PtrStep dst, cudaStream_t stream);\r
- void bitwise_and_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elemSize, PtrStep dst, const PtrStep mask, cudaStream_t stream);\r
- void bitwise_xor_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elemSize, PtrStep dst, cudaStream_t stream);\r
- void bitwise_xor_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elemSize, PtrStep dst, const PtrStep mask, cudaStream_t stream);\r
-\r
-\r
- template <int opid, typename Mask>\r
- void bitwise_bin_op(int rows, int cols, const PtrStep src1, const PtrStep src2, PtrStep dst, int elem_size, Mask mask, cudaStream_t stream);\r
+ void bitwise_not_caller(int rows, int cols, int elem_size1, int cn, const PtrStep src, PtrStep dst, cudaStream_t stream);\r
+\r
+ template <typename T>\r
+ void bitwise_mask_not_caller(int rows, int cols, int cn, const PtrStep src, const PtrStep mask, PtrStep dst, cudaStream_t stream);\r
+\r
+ void bitwise_or_caller(int rows, int cols, int elem_size1, int cn, const PtrStep src1, const PtrStep src2, PtrStep dst, cudaStream_t stream);\r
+\r
+ template <typename T>\r
+ void bitwise_mask_or_caller(int rows, int cols, int cn, const PtrStep src1, const PtrStep src2, const PtrStep mask, PtrStep dst, cudaStream_t stream);\r
+\r
+ void bitwise_and_caller(int rows, int cols, int elem_size1, int cn, const PtrStep src1, const PtrStep src2, PtrStep dst, cudaStream_t stream);\r
+\r
+ template <typename T>\r
+ void bitwise_mask_and_caller(int rows, int cols, int cn, const PtrStep src1, const PtrStep src2, const PtrStep mask, PtrStep dst, cudaStream_t stream);\r
+\r
+ void bitwise_xor_caller(int rows, int cols, int elem_size1, int cn, const PtrStep src1, const PtrStep src2, PtrStep dst, cudaStream_t stream);\r
+\r
+ template <typename T>\r
+ void bitwise_mask_xor_caller(int rows, int cols, int cn, const PtrStep src1, const PtrStep src2, const PtrStep mask, PtrStep dst, cudaStream_t stream);\r
}}}\r
\r
namespace\r
void bitwise_not_caller(const GpuMat& src, GpuMat& dst, cudaStream_t stream)\r
{\r
dst.create(src.size(), src.type());\r
- mathfunc::bitwise_not_caller(src.rows, src.cols, src, src.elemSize(), dst, stream);\r
+\r
+ cv::gpu::mathfunc::bitwise_not_caller(src.rows, src.cols, src.elemSize1(), \r
+ dst.channels(), src, dst, stream);\r
}\r
\r
+\r
void bitwise_not_caller(const GpuMat& src, GpuMat& dst, const GpuMat& mask, cudaStream_t stream)\r
{\r
+ using namespace cv::gpu;\r
+\r
+ typedef void (*Caller)(int, int, int, const PtrStep, const PtrStep, PtrStep, cudaStream_t);\r
+ static Caller callers[] = {mathfunc::bitwise_mask_not_caller<unsigned char>, mathfunc::bitwise_mask_not_caller<unsigned char>, \r
+ mathfunc::bitwise_mask_not_caller<unsigned short>, mathfunc::bitwise_mask_not_caller<unsigned short>,\r
+ mathfunc::bitwise_mask_not_caller<unsigned int>, mathfunc::bitwise_mask_not_caller<unsigned int>,\r
+ mathfunc::bitwise_mask_not_caller<unsigned int>};\r
+\r
CV_Assert(mask.type() == CV_8U && mask.size() == src.size());\r
dst.create(src.size(), src.type());\r
- mathfunc::bitwise_not_caller(src.rows, src.cols, src, src.elemSize(), dst, mask, stream);\r
+\r
+ Caller caller = callers[src.depth()];\r
+ CV_Assert(caller);\r
+\r
+ int cn = src.depth() != CV_64F ? src.channels() : src.channels() * (sizeof(double) / sizeof(unsigned int));\r
+ caller(src.rows, src.cols, cn, src, mask, dst, stream);\r
}\r
\r
+\r
void bitwise_or_caller(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, cudaStream_t stream)\r
{\r
CV_Assert(src1.size() == src2.size() && src1.type() == src2.type());\r
dst.create(src1.size(), src1.type());\r
- mathfunc::bitwise_or_caller(dst.rows, dst.cols, src1, src2, dst.elemSize(), dst, stream);\r
+\r
+ cv::gpu::mathfunc::bitwise_or_caller(dst.rows, dst.cols, dst.elemSize1(), \r
+ dst.channels(), src1, src2, dst, stream);\r
}\r
\r
+\r
void bitwise_or_caller(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask, cudaStream_t stream)\r
{\r
+ using namespace cv::gpu;\r
+\r
+ typedef void (*Caller)(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);\r
+ static Caller callers[] = {mathfunc::bitwise_mask_or_caller<unsigned char>, mathfunc::bitwise_mask_or_caller<unsigned char>, \r
+ mathfunc::bitwise_mask_or_caller<unsigned short>, mathfunc::bitwise_mask_or_caller<unsigned short>,\r
+ mathfunc::bitwise_mask_or_caller<unsigned int>, mathfunc::bitwise_mask_or_caller<unsigned int>,\r
+ mathfunc::bitwise_mask_or_caller<unsigned int>};\r
+\r
CV_Assert(src1.size() == src2.size() && src1.type() == src2.type());\r
- CV_Assert(mask.type() == CV_8U && mask.size() == src1.size());\r
dst.create(src1.size(), src1.type());\r
- mathfunc::bitwise_or_caller(dst.rows, dst.cols, src1, src2, dst.elemSize(), dst, mask, stream);\r
+\r
+ Caller caller = callers[src1.depth()];\r
+ CV_Assert(caller);\r
+\r
+ int cn = dst.depth() != CV_64F ? dst.channels() : dst.channels() * (sizeof(double) / sizeof(unsigned int));\r
+ caller(dst.rows, dst.cols, cn, src1, src2, mask, dst, stream);\r
}\r
\r
+\r
void bitwise_and_caller(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, cudaStream_t stream)\r
{\r
CV_Assert(src1.size() == src2.size() && src1.type() == src2.type());\r
dst.create(src1.size(), src1.type());\r
- mathfunc::bitwise_and_caller(dst.rows, dst.cols, src1, src2, dst.elemSize(), dst, stream);\r
+\r
+ cv::gpu::mathfunc::bitwise_and_caller(dst.rows, dst.cols, dst.elemSize1(), \r
+ dst.channels(), src1, src2, dst, stream);\r
}\r
\r
+\r
void bitwise_and_caller(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask, cudaStream_t stream)\r
{\r
+ using namespace cv::gpu;\r
+\r
+ typedef void (*Caller)(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);\r
+ static Caller callers[] = {mathfunc::bitwise_mask_and_caller<unsigned char>, mathfunc::bitwise_mask_and_caller<unsigned char>, \r
+ mathfunc::bitwise_mask_and_caller<unsigned short>, mathfunc::bitwise_mask_and_caller<unsigned short>,\r
+ mathfunc::bitwise_mask_and_caller<unsigned int>, mathfunc::bitwise_mask_and_caller<unsigned int>,\r
+ mathfunc::bitwise_mask_and_caller<unsigned int>};\r
+\r
CV_Assert(src1.size() == src2.size() && src1.type() == src2.type());\r
- CV_Assert(mask.type() == CV_8U && mask.size() == src1.size());\r
dst.create(src1.size(), src1.type());\r
- mathfunc::bitwise_and_caller(dst.rows, dst.cols, src1, src2, dst.elemSize(), dst, mask, stream);\r
+\r
+ Caller caller = callers[src1.depth()];\r
+ CV_Assert(caller);\r
+\r
+ int cn = dst.depth() != CV_64F ? dst.channels() : dst.channels() * (sizeof(double) / sizeof(unsigned int));\r
+ caller(dst.rows, dst.cols, cn, src1, src2, mask, dst, stream);\r
}\r
\r
+\r
void bitwise_xor_caller(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, cudaStream_t stream)\r
{\r
- CV_Assert(src1.size() == src2.size());\r
- CV_Assert(src1.type() == src2.type());\r
+ CV_Assert(src1.size() == src2.size() && src1.type() == src2.type());\r
dst.create(src1.size(), src1.type());\r
- mathfunc::bitwise_xor_caller(dst.rows, dst.cols, src1, src2, dst.elemSize(), dst, stream);\r
+\r
+ cv::gpu::mathfunc::bitwise_xor_caller(dst.rows, dst.cols, dst.elemSize1(), \r
+ dst.channels(), src1, src2, dst, stream);\r
}\r
\r
+\r
void bitwise_xor_caller(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask, cudaStream_t stream)\r
{\r
+ using namespace cv::gpu;\r
+\r
+ typedef void (*Caller)(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);\r
+ static Caller callers[] = {mathfunc::bitwise_mask_xor_caller<unsigned char>, mathfunc::bitwise_mask_xor_caller<unsigned char>, \r
+ mathfunc::bitwise_mask_xor_caller<unsigned short>, mathfunc::bitwise_mask_xor_caller<unsigned short>,\r
+ mathfunc::bitwise_mask_xor_caller<unsigned int>, mathfunc::bitwise_mask_xor_caller<unsigned int>,\r
+ mathfunc::bitwise_mask_xor_caller<unsigned int>};\r
+\r
CV_Assert(src1.size() == src2.size() && src1.type() == src2.type());\r
- CV_Assert(mask.type() == CV_8U && mask.size() == src1.size());\r
dst.create(src1.size(), src1.type());\r
- mathfunc::bitwise_xor_caller(dst.rows, dst.cols, src1, src2, dst.elemSize(), dst, mask, stream);\r
+\r
+ Caller caller = callers[src1.depth()];\r
+ CV_Assert(caller);\r
+\r
+ int cn = dst.depth() != CV_64F ? dst.channels() : dst.channels() * (sizeof(double) / sizeof(unsigned int));\r
+ caller(dst.rows, dst.cols, cn, src1, src2, mask, dst, stream);\r
}\r
}\r
\r
template <typename T>\r
struct UnOp<T, UN_OP_NOT>\r
{ \r
- typedef typename TypeVec<T, 2>::vec_t Vec2;\r
- typedef typename TypeVec<T, 3>::vec_t Vec3;\r
- typedef typename TypeVec<T, 4>::vec_t Vec4;\r
static __device__ T call(T v) { return ~v; }\r
- static __device__ Vec2 call(Vec2 v) { return VecTraits<Vec2>::make(~v.x, ~v.y); }\r
- static __device__ Vec3 call(Vec3 v) { return VecTraits<Vec3>::make(~v.x, ~v.y, ~v.z); }\r
- static __device__ Vec4 call(Vec4 v) { return VecTraits<Vec4>::make(~v.x, ~v.y, ~v.z, ~v.w); }\r
};\r
\r
\r
template <int opid>\r
- __global__ void bitwise_un_op(int rows, int cols, const PtrStep src, PtrStep dst)\r
+ __global__ void bitwise_un_op_kernel(int rows, int width, const PtrStep src, PtrStep dst)\r
{\r
const int x = (blockDim.x * blockIdx.x + threadIdx.x) * 4;\r
const int y = blockDim.y * blockIdx.y + threadIdx.y;\r
{\r
uchar* dst_ptr = dst.ptr(y) + x;\r
const uchar* src_ptr = src.ptr(y) + x;\r
- if (x + sizeof(uint) - 1 < cols)\r
+ if (x + sizeof(uint) - 1 < width)\r
{\r
*(uint*)dst_ptr = UnOp<uint, opid>::call(*(uint*)src_ptr);\r
}\r
else\r
{\r
- const uchar* src_end = src.ptr(y) + cols;\r
+ const uchar* src_end = src.ptr(y) + width;\r
while (src_ptr < src_end)\r
{\r
*dst_ptr++ = UnOp<uchar, opid>::call(*src_ptr++);\r
}\r
\r
\r
- template <typename T, int cn, int opid>\r
- __global__ void bitwise_un_op(int rows, int cols, const PtrStep src, PtrStep dst, const PtrStep mask)\r
+ template <int opid>\r
+ void bitwise_un_op(int rows, int width, const PtrStep src, PtrStep dst, cudaStream_t stream)\r
{\r
- typedef typename TypeVec<T, cn>::vec_t Type;\r
- const int x = blockDim.x * blockIdx.x + threadIdx.x;\r
- const int y = blockDim.y * blockIdx.y + threadIdx.y;\r
+ dim3 threads(16, 16);\r
+ dim3 grid(divUp(width, threads.x * sizeof(uint)), \r
+ divUp(rows, threads.y));\r
\r
- if (x < cols && y < rows && mask.ptr(y)[x]) \r
- {\r
- Type* dst_row = (Type*)dst.ptr(y);\r
- const Type* src_row = (const Type*)src.ptr(y);\r
- dst_row[x] = UnOp<T, opid>::call(src_row[x]);\r
- }\r
+ bitwise_un_op_kernel<opid><<<grid, threads>>>(rows, width, src, dst);\r
+\r
+ if (stream == 0) \r
+ cudaSafeCall(cudaThreadSynchronize());\r
}\r
\r
\r
- template <typename T, int cn, int opid>\r
- __global__ void bitwise_un_op_two_loads(int rows, int cols, const PtrStep src, PtrStep dst, const PtrStep mask)\r
+ template <typename T, int opid>\r
+ __global__ void bitwise_un_op_kernel(int rows, int cols, int cn, const PtrStep src, const PtrStep mask, PtrStep dst)\r
{\r
- typedef typename TypeVec<T, cn>::vec_t Type;\r
const int x = blockDim.x * blockIdx.x + threadIdx.x;\r
const int y = blockDim.y * blockIdx.y + threadIdx.y;\r
\r
- if (x < cols && y < rows && mask.ptr(y)[x]) \r
+ if (x < cols && y < rows && mask.ptr(y)[x / cn]) \r
{\r
- Type* dst_row = (Type*)dst.ptr(y);\r
- const Type* src_row = (const Type*)src.ptr(y);\r
- dst_row[2 * x] = UnOp<T, opid>::call(src_row[2 * x]);\r
- dst_row[2 * x + 1] = UnOp<T, opid>::call(src_row[2 * x + 1]);\r
+ T* dst_row = (T*)dst.ptr(y);\r
+ const T* src_row = (const T*)src.ptr(y);\r
+\r
+ dst_row[x] = UnOp<T, opid>::call(src_row[x]);\r
}\r
}\r
\r
\r
- template <int opid>\r
- void bitwise_un_op(int rows, int cols, const PtrStep src, PtrStep dst, int elem_size, cudaStream_t stream)\r
+ template <typename T, int opid>\r
+ void bitwise_un_op(int rows, int cols, int cn, const PtrStep src, const PtrStep mask, PtrStep dst, cudaStream_t stream)\r
{\r
dim3 threads(16, 16);\r
- dim3 grid(divUp(cols * elem_size, threads.x * sizeof(uint)), \r
- divUp(rows, threads.y));\r
- bitwise_un_op<opid><<<grid, threads>>>(rows, cols * elem_size, src, dst);\r
- if (stream == 0) \r
- cudaSafeCall(cudaThreadSynchronize());\r
- }\r
+ dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));\r
\r
+ bitwise_un_op_kernel<T, opid><<<grid, threads>>>(rows, cols, cn, src, mask, dst); \r
\r
- template <int opid>\r
- void bitwise_un_op(int rows, int cols, const PtrStep src, PtrStep dst, int elem_size, const PtrStep mask, cudaStream_t stream)\r
- {\r
- dim3 threads(16, 16);\r
- dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));\r
- switch (elem_size)\r
- {\r
- case 1: \r
- bitwise_un_op<uchar, 1, opid><<<grid, threads>>>(rows, cols, src, dst, mask); \r
- break;\r
- case 2: \r
- bitwise_un_op<ushort, 1, opid><<<grid, threads>>>(rows, cols, src, dst, mask); \r
- break;\r
- case 3: \r
- bitwise_un_op<uchar, 3, opid><<<grid, threads>>>(rows, cols, src, dst, mask); \r
- break;\r
- case 4: \r
- bitwise_un_op<uint, 1, opid><<<grid, threads>>>(rows, cols, src, dst, mask); \r
- break;\r
- case 6: \r
- bitwise_un_op<ushort, 3, opid><<<grid, threads>>>(rows, cols, src, dst, mask); \r
- break;\r
- case 8: \r
- bitwise_un_op<uint, 2, opid><<<grid, threads>>>(rows, cols, src, dst, mask); \r
- break;\r
- case 12: \r
- bitwise_un_op<uint, 3, opid><<<grid, threads>>>(rows, cols, src, dst, mask); \r
- break;\r
- case 16: \r
- bitwise_un_op<uint, 4, opid><<<grid, threads>>>(rows, cols, src, dst, mask); \r
- break;\r
- case 24: \r
- bitwise_un_op_two_loads<uint, 3, opid><<<grid, threads>>>(rows, cols, src, dst, mask); \r
- break;\r
- case 32: \r
- bitwise_un_op_two_loads<uint, 4, opid><<<grid, threads>>>(rows, cols, src, dst, mask); \r
- break;\r
- }\r
if (stream == 0) \r
cudaSafeCall(cudaThreadSynchronize());\r
}\r
\r
\r
- void bitwise_not_caller(int rows, int cols, const PtrStep src, int elem_size, PtrStep dst, cudaStream_t stream)\r
+ void bitwise_not_caller(int rows, int cols, int elem_size1, int cn, const PtrStep src, PtrStep dst, cudaStream_t stream)\r
{\r
- bitwise_un_op<UN_OP_NOT>(rows, cols, src, dst, elem_size, stream);\r
+ bitwise_un_op<UN_OP_NOT>(rows, cols * elem_size1 * cn, src, dst, stream);\r
}\r
\r
\r
- void bitwise_not_caller(int rows, int cols,const PtrStep src, int elem_size, PtrStep dst, const PtrStep mask, cudaStream_t stream)\r
+ template <typename T>\r
+ void bitwise_mask_not_caller(int rows, int cols, int cn, const PtrStep src, const PtrStep mask, PtrStep dst, cudaStream_t stream)\r
{\r
- bitwise_un_op<UN_OP_NOT>(rows, cols, src, dst, elem_size, mask, stream);\r
+ bitwise_un_op<T, UN_OP_NOT>(rows, cols * cn, cn, src, mask, dst, stream);\r
}\r
\r
+ template void bitwise_mask_not_caller<uchar>(int, int, int, const PtrStep, const PtrStep, PtrStep, cudaStream_t);\r
+ template void bitwise_mask_not_caller<ushort>(int, int, int, const PtrStep, const PtrStep, PtrStep, cudaStream_t);\r
+ template void bitwise_mask_not_caller<uint>(int, int, int, const PtrStep, const PtrStep, PtrStep, cudaStream_t);\r
+\r
//------------------------------------------------------------------------\r
// Binary operations\r
\r
template <typename T>\r
struct BinOp<T, BIN_OP_OR>\r
{ \r
- typedef typename TypeVec<T, 2>::vec_t Vec2;\r
- typedef typename TypeVec<T, 3>::vec_t Vec3;\r
- typedef typename TypeVec<T, 4>::vec_t Vec4;\r
static __device__ T call(T a, T b) { return a | b; } \r
- static __device__ Vec2 call(Vec2 a, Vec2 b) { return VecTraits<Vec2>::make(a.x | b.x, a.y | b.y); } \r
- static __device__ Vec3 call(Vec3 a, Vec3 b) { return VecTraits<Vec3>::make(a.x | b.x, a.y | b.y, a.z | b.z); } \r
- static __device__ Vec4 call(Vec4 a, Vec4 b) { return VecTraits<Vec4>::make(a.x | b.x, a.y | b.y, a.z | b.z, a.w | b.w); } \r
};\r
\r
\r
template <typename T>\r
struct BinOp<T, BIN_OP_AND>\r
{ \r
- typedef typename TypeVec<T, 2>::vec_t Vec2;\r
- typedef typename TypeVec<T, 3>::vec_t Vec3;\r
- typedef typename TypeVec<T, 4>::vec_t Vec4;\r
static __device__ T call(T a, T b) { return a & b; } \r
- static __device__ Vec2 call(Vec2 a, Vec2 b) { return VecTraits<Vec2>::make(a.x & b.x, a.y & b.y); } \r
- static __device__ Vec3 call(Vec3 a, Vec3 b) { return VecTraits<Vec3>::make(a.x & b.x, a.y & b.y, a.z & b.z); } \r
- static __device__ Vec4 call(Vec4 a, Vec4 b) { return VecTraits<Vec4>::make(a.x & b.x, a.y & b.y, a.z & b.z, a.w & b.w); } \r
};\r
\r
template <typename T>\r
struct BinOp<T, BIN_OP_XOR>\r
{ \r
- typedef typename TypeVec<T, 2>::vec_t Vec2;\r
- typedef typename TypeVec<T, 3>::vec_t Vec3;\r
- typedef typename TypeVec<T, 4>::vec_t Vec4;\r
static __device__ T call(T a, T b) { return a ^ b; } \r
- static __device__ Vec2 call(Vec2 a, Vec2 b) { return VecTraits<Vec2>::make(a.x ^ b.x, a.y ^ b.y); } \r
- static __device__ Vec3 call(Vec3 a, Vec3 b) { return VecTraits<Vec3>::make(a.x ^ b.x, a.y ^ b.y, a.z ^ b.z); } \r
- static __device__ Vec4 call(Vec4 a, Vec4 b) { return VecTraits<Vec4>::make(a.x ^ b.x, a.y ^ b.y, a.z ^ b.z, a.w ^ b.w); } \r
};\r
\r
\r
template <int opid>\r
- __global__ void bitwise_bin_op(int rows, int cols, const PtrStep src1, const PtrStep src2, PtrStep dst)\r
+ __global__ void bitwise_bin_op_kernel(int rows, int width, const PtrStep src1, const PtrStep src2, PtrStep dst)\r
{\r
const int x = (blockDim.x * blockIdx.x + threadIdx.x) * 4;\r
const int y = blockDim.y * blockIdx.y + threadIdx.y;\r
uchar* dst_ptr = dst.ptr(y) + x;\r
const uchar* src1_ptr = src1.ptr(y) + x;\r
const uchar* src2_ptr = src2.ptr(y) + x;\r
- if (x + sizeof(uint) - 1 < cols)\r
+\r
+ if (x + sizeof(uint) - 1 < width)\r
{\r
*(uint*)dst_ptr = BinOp<uint, opid>::call(*(uint*)src1_ptr, *(uint*)src2_ptr);\r
}\r
else\r
{\r
- const uchar* src1_end = src1.ptr(y) + cols;\r
+ const uchar* src1_end = src1.ptr(y) + width;\r
while (src1_ptr < src1_end)\r
{\r
*dst_ptr++ = BinOp<uchar, opid>::call(*src1_ptr++, *src2_ptr++);\r
}\r
\r
\r
- template <typename T, int cn, int opid>\r
- __global__ void bitwise_bin_op(int rows, int cols, const PtrStep src1, const PtrStep src2, \r
- PtrStep dst, const PtrStep mask)\r
+ template <int opid>\r
+ void bitwise_bin_op(int rows, int width, const PtrStep src1, const PtrStep src2, PtrStep dst, \r
+ cudaStream_t stream)\r
{\r
- typedef typename TypeVec<T, cn>::vec_t Type;\r
- const int x = blockDim.x * blockIdx.x + threadIdx.x;\r
- const int y = blockDim.y * blockIdx.y + threadIdx.y;\r
+ dim3 threads(16, 16);\r
+ dim3 grid(divUp(width, threads.x * sizeof(uint)), divUp(rows, threads.y));\r
\r
- if (x < cols && y < rows && mask.ptr(y)[x]) \r
- {\r
- Type* dst_row = (Type*)dst.ptr(y);\r
- const Type* src1_row = (const Type*)src1.ptr(y);\r
- const Type* src2_row = (const Type*)src2.ptr(y);\r
- dst_row[x] = BinOp<T, opid>::call(src1_row[x], src2_row[x]);\r
- }\r
+ bitwise_bin_op_kernel<opid><<<grid, threads>>>(rows, width, src1, src2, dst);\r
+\r
+ if (stream == 0) \r
+ cudaSafeCall(cudaThreadSynchronize());\r
}\r
\r
\r
- template <typename T, int cn, int opid>\r
- __global__ void bitwise_bin_op_two_loads(int rows, int cols, const PtrStep src1, const PtrStep src2, \r
- PtrStep dst, const PtrStep mask)\r
+ template <typename T, int opid>\r
+ __global__ void bitwise_bin_op_kernel(\r
+ int rows, int cols, int cn, const PtrStep src1, const PtrStep src2, \r
+ const PtrStep mask, PtrStep dst)\r
{\r
- typedef typename TypeVec<T, cn>::vec_t Type;\r
const int x = blockDim.x * blockIdx.x + threadIdx.x;\r
const int y = blockDim.y * blockIdx.y + threadIdx.y;\r
\r
- if (x < cols && y < rows && mask.ptr(y)[x]) \r
+ if (x < cols && y < rows && mask.ptr(y)[x / cn]) \r
{\r
- Type* dst_row = (Type*)dst.ptr(y);\r
- const Type* src1_row = (const Type*)src1.ptr(y);\r
- const Type* src2_row = (const Type*)src2.ptr(y);\r
- dst_row[2 * x] = BinOp<T, opid>::call(src1_row[2 * x], src2_row[2 * x]);\r
- dst_row[2 * x + 1] = BinOp<T, opid>::call(src1_row[2 * x + 1], src2_row[2 * x + 1]);\r
+ T* dst_row = (T*)dst.ptr(y);\r
+ const T* src1_row = (const T*)src1.ptr(y);\r
+ const T* src2_row = (const T*)src2.ptr(y);\r
+\r
+ dst_row[x] = BinOp<T, opid>::call(src1_row[x], src2_row[x]);\r
}\r
}\r
\r
\r
- template <int opid>\r
- void bitwise_bin_op(int rows, int cols, const PtrStep src1, const PtrStep src2, PtrStep dst, \r
- int elem_size, cudaStream_t stream)\r
+ template <typename T, int opid>\r
+ void bitwise_bin_op(int rows, int cols, int cn, const PtrStep src1, const PtrStep src2, \r
+ const PtrStep mask, PtrStep dst, cudaStream_t stream)\r
{\r
dim3 threads(16, 16);\r
- dim3 grid(divUp(cols * elem_size, threads.x * sizeof(uint)), \r
- divUp(rows, threads.y));\r
- bitwise_bin_op<opid><<<grid, threads>>>(rows, cols * elem_size, src1, src2, dst);\r
- if (stream == 0) \r
- cudaSafeCall(cudaThreadSynchronize());\r
- }\r
+ dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));\r
\r
+ bitwise_bin_op_kernel<T, opid><<<grid, threads>>>(rows, cols, cn, src1, src2, mask, dst); \r
\r
- template <int opid>\r
- void bitwise_bin_op(int rows, int cols, const PtrStep src1, const PtrStep src2, PtrStep dst, \r
- int elem_size, const PtrStep mask, cudaStream_t stream)\r
- {\r
- dim3 threads(16, 16);\r
- dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));\r
- switch (elem_size)\r
- {\r
- case 1: \r
- bitwise_bin_op<uchar, 1, opid><<<grid, threads>>>(rows, cols, src1, src2, dst, mask); \r
- break;\r
- case 2: \r
- bitwise_bin_op<ushort, 1, opid><<<grid, threads>>>(rows, cols, src1, src2, dst, mask); \r
- break;\r
- case 3: \r
- bitwise_bin_op<uchar, 3, opid><<<grid, threads>>>(rows, cols, src1, src2, dst, mask); \r
- break;\r
- case 4: \r
- bitwise_bin_op<uint, 1, opid><<<grid, threads>>>(rows, cols, src1, src2, dst, mask); \r
- break;\r
- case 6: \r
- bitwise_bin_op<ushort, 3, opid><<<grid, threads>>>(rows, cols, src1, src2, dst, mask); \r
- break;\r
- case 8: \r
- bitwise_bin_op<uint, 2, opid><<<grid, threads>>>(rows, cols, src1, src2, dst, mask); \r
- break;\r
- case 12: \r
- bitwise_bin_op<uint, 3, opid><<<grid, threads>>>(rows, cols, src1, src2, dst, mask); \r
- break;\r
- case 16: \r
- bitwise_bin_op<uint, 4, opid><<<grid, threads>>>(rows, cols, src1, src2, dst, mask); \r
- break;\r
- case 24: \r
- bitwise_bin_op_two_loads<uint, 3, opid><<<grid, threads>>>(rows, cols, src1, src2, dst, mask); \r
- break;\r
- case 32: \r
- bitwise_bin_op_two_loads<uint, 4, opid><<<grid, threads>>>(rows, cols, src1, src2, dst, mask); \r
- break;\r
- }\r
if (stream == 0) \r
cudaSafeCall(cudaThreadSynchronize());\r
}\r
\r
\r
- void bitwise_or_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elem_size, PtrStep dst, cudaStream_t stream)\r
+ void bitwise_or_caller(int rows, int cols, int elem_size1, int cn, const PtrStep src1, const PtrStep src2, PtrStep dst, cudaStream_t stream)\r
{\r
- bitwise_bin_op<BIN_OP_OR>(rows, cols, src1, src2, dst, elem_size, stream);\r
+ bitwise_bin_op<BIN_OP_OR>(rows, cols * elem_size1 * cn, src1, src2, dst, stream);\r
}\r
\r
\r
- void bitwise_or_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elem_size, PtrStep dst, const PtrStep mask, cudaStream_t stream)\r
+ template <typename T>\r
+ void bitwise_mask_or_caller(int rows, int cols, int cn, const PtrStep src1, const PtrStep src2, const PtrStep mask, PtrStep dst, cudaStream_t stream)\r
{\r
- bitwise_bin_op<BIN_OP_OR>(rows, cols, src1, src2, dst, elem_size, mask, stream);\r
+ bitwise_bin_op<T, BIN_OP_OR>(rows, cols * cn, cn, src1, src2, mask, dst, stream);\r
}\r
\r
+ template void bitwise_mask_or_caller<uchar>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);\r
+ template void bitwise_mask_or_caller<ushort>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);\r
+ template void bitwise_mask_or_caller<uint>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);\r
\r
- void bitwise_and_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elem_size, PtrStep dst, cudaStream_t stream)\r
+\r
+ void bitwise_and_caller(int rows, int cols, int elem_size1, int cn, const PtrStep src1, const PtrStep src2, PtrStep dst, cudaStream_t stream)\r
{\r
- bitwise_bin_op<BIN_OP_AND>(rows, cols, src1, src2, dst, elem_size, stream);\r
+ bitwise_bin_op<BIN_OP_AND>(rows, cols * elem_size1 * cn, src1, src2, dst, stream);\r
}\r
\r
\r
- void bitwise_and_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elem_size, PtrStep dst, const PtrStep mask, cudaStream_t stream)\r
+ template <typename T>\r
+ void bitwise_mask_and_caller(int rows, int cols, int cn, const PtrStep src1, const PtrStep src2, const PtrStep mask, PtrStep dst, cudaStream_t stream)\r
{\r
- bitwise_bin_op<BIN_OP_AND>(rows, cols, src1, src2, dst, elem_size, mask, stream);\r
+ bitwise_bin_op<T, BIN_OP_AND>(rows, cols * cn, cn, src1, src2, mask, dst, stream);\r
}\r
\r
+ template void bitwise_mask_and_caller<uchar>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);\r
+ template void bitwise_mask_and_caller<ushort>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);\r
+ template void bitwise_mask_and_caller<uint>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);\r
\r
- void bitwise_xor_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elem_size, PtrStep dst, cudaStream_t stream)\r
+\r
+ void bitwise_xor_caller(int rows, int cols, int elem_size1, int cn, const PtrStep src1, const PtrStep src2, PtrStep dst, cudaStream_t stream)\r
{\r
- bitwise_bin_op<BIN_OP_XOR>(rows, cols, src1, src2, dst, elem_size, stream);\r
+ bitwise_bin_op<BIN_OP_XOR>(rows, cols * elem_size1 * cn, src1, src2, dst, stream);\r
}\r
\r
\r
- void bitwise_xor_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elem_size, PtrStep dst, const PtrStep mask, cudaStream_t stream)\r
+ template <typename T>\r
+ void bitwise_mask_xor_caller(int rows, int cols, int cn, const PtrStep src1, const PtrStep src2, const PtrStep mask, PtrStep dst, cudaStream_t stream)\r
{\r
- bitwise_bin_op<BIN_OP_XOR>(rows, cols, src1, src2, dst, elem_size, mask, stream);\r
- } \r
+ bitwise_bin_op<T, BIN_OP_XOR>(rows, cols * cn, cn, src1, src2, mask, dst, stream);\r
+ }\r
+\r
+ template void bitwise_mask_xor_caller<uchar>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);\r
+ template void bitwise_mask_xor_caller<ushort>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);\r
+ template void bitwise_mask_xor_caller<uint>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);\r
\r
\r
\r