//! computes magnitude of each (x(i), y(i)) vector\r
//! supports only floating-point source\r
CV_EXPORTS void magnitude(const GpuMat& x, const GpuMat& y, GpuMat& magnitude);\r
- //! Async version\r
+ //! async version\r
CV_EXPORTS void magnitude(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, const Stream& stream);\r
\r
//! computes squared magnitude of each (x(i), y(i)) vector\r
//! supports only floating-point source\r
CV_EXPORTS void magnitudeSqr(const GpuMat& x, const GpuMat& y, GpuMat& magnitude);\r
- //! Async version\r
+ //! async version\r
CV_EXPORTS void magnitudeSqr(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, const Stream& stream);\r
\r
//! computes angle (angle(i)) of each (x(i), y(i)) vector\r
//! supports only floating-point source\r
CV_EXPORTS void phase(const GpuMat& x, const GpuMat& y, GpuMat& angle, bool angleInDegrees = false);\r
- //! Async version\r
+ //! async version\r
CV_EXPORTS void phase(const GpuMat& x, const GpuMat& y, GpuMat& angle, bool angleInDegrees, const Stream& stream);\r
\r
//! converts Cartesian coordinates to polar\r
//! supports only floating-point source\r
CV_EXPORTS void cartToPolar(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, GpuMat& angle, bool angleInDegrees = false);\r
- //! Async version\r
+ //! async version\r
CV_EXPORTS void cartToPolar(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, GpuMat& angle, bool angleInDegrees, const Stream& stream);\r
\r
//! converts polar coordinates to Cartesian\r
//! supports only floating-point source\r
CV_EXPORTS void polarToCart(const GpuMat& magnitude, const GpuMat& angle, GpuMat& x, GpuMat& y, bool angleInDegrees = false);\r
- //! Async version\r
+ //! async version\r
CV_EXPORTS void polarToCart(const GpuMat& magnitude, const GpuMat& angle, GpuMat& x, GpuMat& y, bool angleInDegrees, const Stream& stream);\r
\r
\r
- //! Perfroms per-elements bit-wise inversion \r
- CV_EXPORTS void bitwise_not(const GpuMat& src, GpuMat& dst);\r
- //! Async version\r
- CV_EXPORTS void bitwise_not(const GpuMat& src, GpuMat& dst, const Stream& stream);\r
+ //! perfroms per-elements bit-wise inversion \r
+ CV_EXPORTS void bitwise_not(const GpuMat& src, GpuMat& dst, const GpuMat& mask=GpuMat());\r
+ //! async version\r
+ CV_EXPORTS void bitwise_not(const GpuMat& src, GpuMat& dst, const GpuMat& mask, const Stream& stream);\r
\r
- //! Calculates per-element bit-wise disjunction of two arrays\r
- CV_EXPORTS void bitwise_or(const GpuMat& src1, const GpuMat& src2, GpuMat& dst);\r
- //! Async version\r
- CV_EXPORTS void bitwise_or(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const Stream& stream);\r
+ //! calculates per-element bit-wise disjunction of two arrays\r
+ CV_EXPORTS void bitwise_or(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask=GpuMat());\r
+ //! async version\r
+ CV_EXPORTS void bitwise_or(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask, const Stream& stream);\r
\r
- //! Calculates per-element bit-wise conjunction of two arrays\r
- CV_EXPORTS void bitwise_and(const GpuMat& src1, const GpuMat& src2, GpuMat& dst);\r
- //! Async version\r
- CV_EXPORTS void bitwise_and(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const Stream& stream);\r
+ //! calculates per-element bit-wise conjunction of two arrays\r
+ CV_EXPORTS void bitwise_and(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask=GpuMat());\r
+ //! async version\r
+ CV_EXPORTS void bitwise_and(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask, const Stream& stream);\r
\r
- //! Calculates per-element bit-wise "exclusive or" operation\r
- CV_EXPORTS void bitwise_xor(const GpuMat& src1, const GpuMat& src2, GpuMat& dst);\r
- //! Async version\r
- CV_EXPORTS void bitwise_xor(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const Stream& stream);\r
+ //! calculates per-element bit-wise "exclusive or" operation\r
+ CV_EXPORTS void bitwise_xor(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask=GpuMat());\r
+ //! async version\r
+ CV_EXPORTS void bitwise_xor(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask, const Stream& stream);\r
\r
//! Logical operators\r
CV_EXPORTS GpuMat operator ~ (const GpuMat& src);\r
//! Supported types of input disparity: CV_8U, CV_16S.\r
//! Output disparity has CV_8UC4 type in BGRA format (alpha = 255).\r
CV_EXPORTS void drawColorDisp(const GpuMat& src_disp, GpuMat& dst_disp, int ndisp);\r
- //! Async version\r
+ //! async version\r
CV_EXPORTS void drawColorDisp(const GpuMat& src_disp, GpuMat& dst_disp, int ndisp, const Stream& stream);\r
\r
//! Reprojects disparity image to 3D space. \r
//! Each element of this matrix will contain the 3D coordinates of the point (x,y,z,1), computed from the disparity map.\r
//! Q is the 4x4 perspective transformation matrix that can be obtained with cvStereoRectify.\r
CV_EXPORTS void reprojectImageTo3D(const GpuMat& disp, GpuMat& xyzw, const Mat& Q);\r
- //! Async version\r
+ //! async version\r
CV_EXPORTS void reprojectImageTo3D(const GpuMat& disp, GpuMat& xyzw, const Mat& Q, const Stream& stream);\r
\r
//! converts image from one color space to another\r
CV_EXPORTS void cvtColor(const GpuMat& src, GpuMat& dst, int code, int dcn = 0);\r
- //! Async version\r
+ //! async version\r
CV_EXPORTS void cvtColor(const GpuMat& src, GpuMat& dst, int code, int dcn, const Stream& stream);\r
\r
//! applies fixed threshold to the image. \r
//! Output disparity has CV_8U type.\r
void operator() ( const GpuMat& left, const GpuMat& right, GpuMat& disparity);\r
\r
- //! Async version\r
+ //! async version\r
void operator() ( const GpuMat& left, const GpuMat& right, GpuMat& disparity, const Stream & stream);\r
\r
//! Some heuristics that tries to estmate\r
//! if disparity is empty output type will be CV_16S else output type will be disparity.type().\r
void operator()(const GpuMat& left, const GpuMat& right, GpuMat& disparity);\r
\r
- //! Async version\r
+ //! async version\r
void operator()(const GpuMat& left, const GpuMat& right, GpuMat& disparity, Stream& stream);\r
\r
\r
//! if disparity is empty output type will be CV_16S else output type will be disparity.type().\r
void operator()(const GpuMat& left, const GpuMat& right, GpuMat& disparity);\r
\r
- //! Async version\r
+ //! async version\r
void operator()(const GpuMat& left, const GpuMat& right, GpuMat& disparity, Stream& stream);\r
\r
int ndisp;\r
//! disparity must have CV_8U or CV_16S type, image must have CV_8UC1 or CV_8UC3 type.\r
void operator()(const GpuMat& disparity, const GpuMat& image, GpuMat& dst);\r
\r
- //! Async version\r
+ //! async version\r
void operator()(const GpuMat& disparity, const GpuMat& image, GpuMat& dst, Stream& stream);\r
\r
private:\r
void cv::gpu::cartToPolar(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, bool, const Stream&) { throw_nogpu(); }\r
void cv::gpu::polarToCart(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, bool) { throw_nogpu(); }\r
void cv::gpu::polarToCart(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, bool, const Stream&) { throw_nogpu(); }\r
-void cv::gpu::bitwise_not(const GpuMat&, GpuMat&) { throw_nogpu(); }\r
-void cv::gpu::bitwise_not(const GpuMat&, GpuMat&, const Stream& stream) { throw_nogpu(); }\r
-void cv::gpu::bitwise_or(const GpuMat&, const GpuMat&, GpuMat&) { throw_nogpu(); }\r
-void cv::gpu::bitwise_or(const GpuMat&, const GpuMat&, GpuMat&, const Stream& stream) { throw_nogpu(); }\r
-void cv::gpu::bitwise_and(const GpuMat&, const GpuMat&, GpuMat&) { throw_nogpu(); }\r
-void cv::gpu::bitwise_and(const GpuMat&, const GpuMat&, GpuMat&, const Stream& stream) { throw_nogpu(); }\r
-void cv::gpu::bitwise_xor(const GpuMat&, const GpuMat&, GpuMat&) { throw_nogpu(); }\r
-void cv::gpu::bitwise_xor(const GpuMat&, const GpuMat&, GpuMat&, const Stream& stream) { throw_nogpu(); }\r
+void cv::gpu::bitwise_not(const GpuMat&, GpuMat&, const GpuMat&) { throw_nogpu(); }\r
+void cv::gpu::bitwise_not(const GpuMat&, GpuMat&, const GpuMat&, const Stream&) { throw_nogpu(); }\r
+void cv::gpu::bitwise_or(const GpuMat&, const GpuMat&, GpuMat&, const GpuMat&) { throw_nogpu(); }\r
+void cv::gpu::bitwise_or(const GpuMat&, const GpuMat&, GpuMat&, const GpuMat&, const Stream&) { throw_nogpu(); }\r
+void cv::gpu::bitwise_and(const GpuMat&, const GpuMat&, GpuMat&, const GpuMat&) { throw_nogpu(); }\r
+void cv::gpu::bitwise_and(const GpuMat&, const GpuMat&, GpuMat&, const GpuMat&, const Stream&) { throw_nogpu(); }\r
+void cv::gpu::bitwise_xor(const GpuMat&, const GpuMat&, GpuMat&, const GpuMat&) { throw_nogpu(); }\r
+void cv::gpu::bitwise_xor(const GpuMat&, const GpuMat&, GpuMat&, const GpuMat&, const Stream&) { throw_nogpu(); }\r
cv::gpu::GpuMat cv::gpu::operator ~ (const GpuMat&) { throw_nogpu(); return GpuMat(); }\r
cv::gpu::GpuMat cv::gpu::operator | (const GpuMat&, const GpuMat&) { throw_nogpu(); return GpuMat(); }\r
cv::gpu::GpuMat cv::gpu::operator & (const GpuMat&, const GpuMat&) { throw_nogpu(); return GpuMat(); }\r
\r
namespace cv { namespace gpu { namespace mathfunc \r
{\r
- void bitwise_not_caller(const DevMem2D src, int elemSize, PtrStep dst, cudaStream_t stream);\r
- void bitwise_or_caller(int cols, int rows, const PtrStep src1, const PtrStep src2, int elemSize, PtrStep dst, cudaStream_t stream);\r
- void bitwise_and_caller(int cols, int rows, const PtrStep src1, const PtrStep src2, int elemSize, PtrStep dst, cudaStream_t stream);\r
- void bitwise_xor_caller(int cols, int rows, const PtrStep src1, const PtrStep src2, int elemSize, PtrStep dst, cudaStream_t stream);\r
+ void bitwise_not_caller(int rows, int cols, const PtrStep src, int elemSize, PtrStep dst, cudaStream_t stream);\r
+ void bitwise_not_caller(int rows, int cols, const PtrStep src, int elemSize, PtrStep dst, const PtrStep mask, cudaStream_t stream);\r
+ void bitwise_or_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elemSize, PtrStep dst, cudaStream_t stream);\r
+ void bitwise_or_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elemSize, PtrStep dst, const PtrStep mask, cudaStream_t stream);\r
+ void bitwise_and_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elemSize, PtrStep dst, cudaStream_t stream);\r
+ void bitwise_and_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elemSize, PtrStep dst, const PtrStep mask, cudaStream_t stream);\r
+ void bitwise_xor_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elemSize, PtrStep dst, cudaStream_t stream);\r
+ void bitwise_xor_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elemSize, PtrStep dst, const PtrStep mask, cudaStream_t stream);\r
+\r
+\r
+ template <int opid, typename Mask>\r
+ void bitwise_bin_op(int rows, int cols, const PtrStep src1, const PtrStep src2, PtrStep dst, int elem_size, Mask mask, cudaStream_t stream);\r
}}}\r
\r
namespace \r
void bitwise_not_caller(const GpuMat& src, GpuMat& dst, cudaStream_t stream)\r
{\r
dst.create(src.size(), src.type());\r
- mathfunc::bitwise_not_caller(src, src.elemSize(), dst, stream);\r
+ mathfunc::bitwise_not_caller(src.rows, src.cols, src, src.elemSize(), dst, stream);\r
+ }\r
+\r
+ void bitwise_not_caller(const GpuMat& src, GpuMat& dst, const GpuMat& mask, cudaStream_t stream)\r
+ {\r
+ CV_Assert(mask.type() == CV_8U && mask.size() == src.size());\r
+ dst.create(src.size(), src.type());\r
+ mathfunc::bitwise_not_caller(src.rows, src.cols, src, src.elemSize(), dst, mask, stream);\r
}\r
\r
void bitwise_or_caller(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, cudaStream_t stream)\r
{\r
- CV_Assert(src1.size() == src2.size());\r
- CV_Assert(src1.type() == src2.type());\r
+ CV_Assert(src1.size() == src2.size() && src1.type() == src2.type());\r
+ dst.create(src1.size(), src1.type());\r
+ mathfunc::bitwise_or_caller(dst.rows, dst.cols, src1, src2, dst.elemSize(), dst, stream);\r
+ }\r
\r
+ void bitwise_or_caller(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask, cudaStream_t stream)\r
+ {\r
+ CV_Assert(src1.size() == src2.size() && src1.type() == src2.type());\r
+ CV_Assert(mask.type() == CV_8U && mask.size() == src1.size());\r
dst.create(src1.size(), src1.type());\r
- mathfunc::bitwise_or_caller(dst.cols, dst.rows, src1, src2, dst.elemSize(), dst, stream);\r
+ mathfunc::bitwise_or_caller(dst.rows, dst.cols, src1, src2, dst.elemSize(), dst, mask, stream);\r
}\r
\r
void bitwise_and_caller(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, cudaStream_t stream)\r
{\r
- CV_Assert(src1.size() == src2.size());\r
- CV_Assert(src1.type() == src2.type());\r
+ CV_Assert(src1.size() == src2.size() && src1.type() == src2.type());\r
+ dst.create(src1.size(), src1.type());\r
+ mathfunc::bitwise_and_caller(dst.rows, dst.cols, src1, src2, dst.elemSize(), dst, stream);\r
+ }\r
\r
+ void bitwise_and_caller(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask, cudaStream_t stream)\r
+ {\r
+ CV_Assert(src1.size() == src2.size() && src1.type() == src2.type());\r
+ CV_Assert(mask.type() == CV_8U && mask.size() == src1.size());\r
dst.create(src1.size(), src1.type());\r
- mathfunc::bitwise_and_caller(dst.cols, dst.rows, src1, src2, dst.elemSize(), dst, stream);\r
+ mathfunc::bitwise_and_caller(dst.rows, dst.cols, src1, src2, dst.elemSize(), dst, mask, stream);\r
}\r
\r
void bitwise_xor_caller(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, cudaStream_t stream)\r
{\r
CV_Assert(src1.size() == src2.size());\r
CV_Assert(src1.type() == src2.type());\r
+ dst.create(src1.size(), src1.type());\r
+ mathfunc::bitwise_xor_caller(dst.rows, dst.cols, src1, src2, dst.elemSize(), dst, stream);\r
+ }\r
\r
+ void bitwise_xor_caller(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask, cudaStream_t stream)\r
+ {\r
+ CV_Assert(src1.size() == src2.size() && src1.type() == src2.type());\r
+ CV_Assert(mask.type() == CV_8U && mask.size() == src1.size());\r
dst.create(src1.size(), src1.type());\r
- mathfunc::bitwise_xor_caller(dst.cols, dst.rows, src1, src2, dst.elemSize(), dst, stream);\r
+ mathfunc::bitwise_xor_caller(dst.rows, dst.cols, src1, src2, dst.elemSize(), dst, mask, stream);\r
}\r
}\r
\r
-void cv::gpu::bitwise_not(const GpuMat& src, GpuMat& dst)\r
+void cv::gpu::bitwise_not(const GpuMat& src, GpuMat& dst, const GpuMat& mask)\r
{\r
- ::bitwise_not_caller(src, dst, 0);\r
+ if (mask.empty()) \r
+ ::bitwise_not_caller(src, dst, 0);\r
+ else\r
+ ::bitwise_not_caller(src, dst, mask, 0);\r
}\r
\r
-void cv::gpu::bitwise_not(const GpuMat& src, GpuMat& dst, const Stream& stream)\r
+void cv::gpu::bitwise_not(const GpuMat& src, GpuMat& dst, const GpuMat& mask, const Stream& stream)\r
{\r
- ::bitwise_not_caller(src, dst, StreamAccessor::getStream(stream));\r
+ if (mask.empty())\r
+ ::bitwise_not_caller(src, dst, StreamAccessor::getStream(stream));\r
+ else\r
+ ::bitwise_not_caller(src, dst, mask, StreamAccessor::getStream(stream));\r
}\r
\r
-void cv::gpu::bitwise_or(const GpuMat& src1, const GpuMat& src2, GpuMat& dst)\r
+void cv::gpu::bitwise_or(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask)\r
{\r
- ::bitwise_or_caller(src1, src2, dst, 0);\r
+ if (mask.empty())\r
+ ::bitwise_or_caller(src1, src2, dst, 0);\r
+ else\r
+ ::bitwise_or_caller(src1, src2, dst, mask, 0);\r
}\r
\r
-void cv::gpu::bitwise_or(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const Stream& stream)\r
+void cv::gpu::bitwise_or(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask, const Stream& stream)\r
{\r
- ::bitwise_or_caller(src1, src2, dst, StreamAccessor::getStream(stream));\r
+ if (mask.empty())\r
+ ::bitwise_or_caller(src1, src2, dst, StreamAccessor::getStream(stream));\r
+ else\r
+ ::bitwise_or_caller(src1, src2, dst, mask, StreamAccessor::getStream(stream));\r
}\r
\r
-void cv::gpu::bitwise_and(const GpuMat& src1, const GpuMat& src2, GpuMat& dst)\r
+void cv::gpu::bitwise_and(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask)\r
{\r
- ::bitwise_and_caller(src1, src2, dst, 0);\r
+ if (mask.empty())\r
+ ::bitwise_and_caller(src1, src2, dst, 0);\r
+ else\r
+ ::bitwise_and_caller(src1, src2, dst, mask, 0);\r
}\r
\r
-void cv::gpu::bitwise_and(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const Stream& stream)\r
+void cv::gpu::bitwise_and(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask, const Stream& stream)\r
{\r
- ::bitwise_and_caller(src1, src2, dst, StreamAccessor::getStream(stream));\r
+ if (mask.empty())\r
+ ::bitwise_and_caller(src1, src2, dst, StreamAccessor::getStream(stream));\r
+ else\r
+ ::bitwise_and_caller(src1, src2, dst, mask, StreamAccessor::getStream(stream));\r
}\r
\r
-void cv::gpu::bitwise_xor(const GpuMat& src1, const GpuMat& src2, GpuMat& dst)\r
+void cv::gpu::bitwise_xor(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask)\r
{\r
- ::bitwise_xor_caller(src1, src2, dst, 0);\r
+ if (mask.empty())\r
+ ::bitwise_xor_caller(src1, src2, dst, 0);\r
+ else\r
+ ::bitwise_xor_caller(src1, src2, dst, mask, 0);\r
}\r
\r
-void cv::gpu::bitwise_xor(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const Stream& stream)\r
+void cv::gpu::bitwise_xor(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& mask, const Stream& stream)\r
{\r
- ::bitwise_xor_caller(src1, src2, dst, StreamAccessor::getStream(stream));\r
+ if (mask.empty())\r
+ ::bitwise_xor_caller(src1, src2, dst, StreamAccessor::getStream(stream));\r
+ else\r
+ ::bitwise_xor_caller(src1, src2, dst, mask, StreamAccessor::getStream(stream));\r
+\r
}\r
\r
cv::gpu::GpuMat cv::gpu::operator ~ (const GpuMat& src)\r
//////////////////////////////////////////////////////////////////////////////\r
// Per-element bit-wise logical matrix operations\r
\r
-\r
- __global__ void bitwise_not_kernel(int cols, int rows, const PtrStep src, PtrStep dst)\r
+ struct Mask8U\r
{\r
- const int x = blockDim.x * blockIdx.x + threadIdx.x;\r
- const int y = blockDim.y * blockIdx.y + threadIdx.y;\r
-\r
- if (x < cols && y < rows)\r
- {\r
- dst.ptr(y)[x] = ~src.ptr(y)[x];\r
- }\r
- }\r
-\r
+ explicit Mask8U(PtrStep mask): mask(mask) {}\r
+ __device__ bool operator()(int y, int x) { return mask.ptr(y)[x]; }\r
+ PtrStep mask;\r
+ };\r
+ struct MaskTrue { __device__ bool operator()(int y, int x) { return true; } };\r
\r
- void bitwise_not_caller(const DevMem2D src, int elemSize, PtrStep dst, cudaStream_t stream)\r
- {\r
- dim3 threads(16, 16, 1);\r
- dim3 grid(divUp(src.cols * elemSize, threads.x), divUp(src.rows, threads.y), 1);\r
+ // Unary operations\r
\r
- bitwise_not_kernel<<<grid, threads, 0, stream>>>(src.cols * elemSize, src.rows, src, dst);\r
+ enum { UN_OP_NOT };\r
\r
- if (stream == 0)\r
- cudaSafeCall(cudaThreadSynchronize());\r
- }\r
+ template <typename T, int opid>\r
+ struct UnOp { __device__ T operator()(T lhs, T rhs); };\r
\r
+ template <typename T>\r
+ struct UnOp<T, UN_OP_NOT>{ __device__ T operator()(T x) { return ~x; } };\r
\r
- __global__ void bitwise_or_kernel(int cols, int rows, const PtrStep src1, const PtrStep src2, PtrStep dst)\r
+ template <typename T, int cn, typename UnOp, typename Mask>\r
+ __global__ void bitwise_un_op(int rows, int cols, const PtrStep src, PtrStep dst, UnOp op, Mask mask)\r
{\r
const int x = blockDim.x * blockIdx.x + threadIdx.x;\r
const int y = blockDim.y * blockIdx.y + threadIdx.y;\r
\r
- if (x < cols && y < rows)\r
+ if (x < cols && y < rows && mask(y, x)) \r
{\r
- dst.ptr(y)[x] = src1.ptr(y)[x] | src2.ptr(y)[x];\r
+ T* dsty = (T*)dst.ptr(y);\r
+ const T* srcy = (const T*)src.ptr(y);\r
+\r
+ #pragma unroll\r
+ for (int i = 0; i < cn; ++i)\r
+ dsty[cn * x + i] = op(srcy[cn * x + i]);\r
}\r
}\r
\r
-\r
- void bitwise_or_caller(int cols, int rows, const PtrStep src1, const PtrStep src2, int elemSize, PtrStep dst, cudaStream_t stream)\r
+ template <int opid, typename Mask>\r
+ void bitwise_un_op(int rows, int cols, const PtrStep src, PtrStep dst, int elem_size, Mask mask, cudaStream_t stream)\r
{\r
- dim3 threads(16, 16, 1);\r
- dim3 grid(divUp(cols * elemSize, threads.x), divUp(rows, threads.y), 1);\r
-\r
- bitwise_or_kernel<<<grid, threads, 0, stream>>>(cols * elemSize, rows, src1, src2, dst);\r
-\r
- if (stream == 0)\r
- cudaSafeCall(cudaThreadSynchronize());\r
+ dim3 threads(16, 16);\r
+ dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));\r
+ switch (elem_size)\r
+ {\r
+ case 1: bitwise_un_op<unsigned char, 1><<<grid, threads>>>(rows, cols, src, dst, UnOp<unsigned char, opid>(), mask); break;\r
+ case 2: bitwise_un_op<unsigned short, 1><<<grid, threads>>>(rows, cols, src, dst, UnOp<unsigned short, opid>(), mask); break;\r
+ case 3: bitwise_un_op<unsigned char, 3><<<grid, threads>>>(rows, cols, src, dst, UnOp<unsigned char, opid>(), mask); break;\r
+ case 4: bitwise_un_op<unsigned int, 1><<<grid, threads>>>(rows, cols, src, dst, UnOp<unsigned int, opid>(), mask); break;\r
+ case 6: bitwise_un_op<unsigned short, 3><<<grid, threads>>>(rows, cols, src, dst, UnOp<unsigned short, opid>(), mask); break;\r
+ case 8: bitwise_un_op<unsigned int, 2><<<grid, threads>>>(rows, cols, src, dst, UnOp<unsigned int, opid>(), mask); break; \r
+ case 12: bitwise_un_op<unsigned int, 3><<<grid, threads>>>(rows, cols, src, dst, UnOp<unsigned int, opid>(), mask); break;\r
+ case 16: bitwise_un_op<unsigned int, 4><<<grid, threads>>>(rows, cols, src, dst, UnOp<unsigned int, opid>(), mask); break;\r
+ case 24: bitwise_un_op<unsigned int, 6><<<grid, threads>>>(rows, cols, src, dst, UnOp<unsigned int, opid>(), mask); break;\r
+ case 32: bitwise_un_op<unsigned int, 8><<<grid, threads>>>(rows, cols, src, dst, UnOp<unsigned int, opid>(), mask); break;\r
+ }\r
+ if (stream == 0) cudaSafeCall(cudaThreadSynchronize()); \r
}\r
\r
-\r
- __global__ void bitwise_and_kernel(int cols, int rows, const PtrStep src1, const PtrStep src2, PtrStep dst)\r
+ void bitwise_not_caller(int rows, int cols,const PtrStep src, int elem_size, PtrStep dst, cudaStream_t stream)\r
{\r
- const int x = blockDim.x * blockIdx.x + threadIdx.x;\r
- const int y = blockDim.y * blockIdx.y + threadIdx.y;\r
+ bitwise_un_op<UN_OP_NOT>(rows, cols, src, dst, elem_size, MaskTrue(), stream);\r
+ }\r
\r
- if (x < cols && y < rows)\r
- {\r
- dst.ptr(y)[x] = src1.ptr(y)[x] & src2.ptr(y)[x];\r
- }\r
+ void bitwise_not_caller(int rows, int cols,const PtrStep src, int elem_size, PtrStep dst, const PtrStep mask, cudaStream_t stream)\r
+ {\r
+ bitwise_un_op<UN_OP_NOT>(rows, cols, src, dst, elem_size, Mask8U(mask), stream);\r
}\r
\r
+ // Binary operations\r
\r
- void bitwise_and_caller(int cols, int rows, const PtrStep src1, const PtrStep src2, int elemSize, PtrStep dst, cudaStream_t stream)\r
- {\r
- dim3 threads(16, 16, 1);\r
- dim3 grid(divUp(cols * elemSize, threads.x), divUp(rows, threads.y), 1);\r
+ enum { BIN_OP_OR, BIN_OP_AND, BIN_OP_XOR };\r
\r
- bitwise_and_kernel<<<grid, threads, 0, stream>>>(cols * elemSize, rows, src1, src2, dst);\r
+ template <typename T, int opid>\r
+ struct BinOp { __device__ T operator()(T lhs, T rhs); };\r
\r
- if (stream == 0)\r
- cudaSafeCall(cudaThreadSynchronize());\r
- }\r
+ template <typename T>\r
+ struct BinOp<T, BIN_OP_OR>{ __device__ T operator()(T lhs, T rhs) { return lhs | rhs; } };\r
\r
+ template <typename T>\r
+ struct BinOp<T, BIN_OP_AND>{ __device__ T operator()(T lhs, T rhs) { return lhs & rhs; } };\r
\r
+ template <typename T>\r
+ struct BinOp<T, BIN_OP_XOR>{ __device__ T operator()(T lhs, T rhs) { return lhs ^ rhs; } };\r
\r
- __global__ void bitwise_xor_kernel(int cols, int rows, const PtrStep src1, const PtrStep src2, PtrStep dst)\r
+ template <typename T, int cn, typename BinOp, typename Mask>\r
+ __global__ void bitwise_bin_op(int rows, int cols, const PtrStep src1, const PtrStep src2, PtrStep dst, BinOp op, Mask mask)\r
{\r
const int x = blockDim.x * blockIdx.x + threadIdx.x;\r
const int y = blockDim.y * blockIdx.y + threadIdx.y;\r
\r
- if (x < cols && y < rows)\r
+ if (x < cols && y < rows && mask(y, x)) \r
{\r
- dst.ptr(y)[x] = src1.ptr(y)[x] ^ src2.ptr(y)[x];\r
+ T* dsty = (T*)dst.ptr(y);\r
+ const T* src1y = (const T*)src1.ptr(y);\r
+ const T* src2y = (const T*)src2.ptr(y);\r
+\r
+ #pragma unroll\r
+ for (int i = 0; i < cn; ++i)\r
+ dsty[cn * x + i] = op(src1y[cn * x + i], src2y[cn * x + i]);\r
}\r
}\r
\r
+ template <int opid, typename Mask>\r
+ void bitwise_bin_op(int rows, int cols, const PtrStep src1, const PtrStep src2, PtrStep dst, int elem_size, Mask mask, cudaStream_t stream)\r
+ {\r
+ dim3 threads(16, 16);\r
+ dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));\r
+ switch (elem_size)\r
+ {\r
+ case 1: bitwise_bin_op<unsigned char, 1><<<grid, threads>>>(rows, cols, src1, src2, dst, BinOp<unsigned char, opid>(), mask); break;\r
+ case 2: bitwise_bin_op<unsigned short, 1><<<grid, threads>>>(rows, cols, src1, src2, dst, BinOp<unsigned short, opid>(), mask); break;\r
+ case 3: bitwise_bin_op<unsigned char, 3><<<grid, threads>>>(rows, cols, src1, src2, dst, BinOp<unsigned char, opid>(), mask); break;\r
+ case 4: bitwise_bin_op<unsigned int, 1><<<grid, threads>>>(rows, cols, src1, src2, dst, BinOp<unsigned int, opid>(), mask); break;\r
+ case 6: bitwise_bin_op<unsigned short, 3><<<grid, threads>>>(rows, cols, src1, src2, dst, BinOp<unsigned short, opid>(), mask); break;\r
+ case 8: bitwise_bin_op<unsigned int, 2><<<grid, threads>>>(rows, cols, src1, src2, dst, BinOp<unsigned int, opid>(), mask); break; \r
+ case 12: bitwise_bin_op<unsigned int, 3><<<grid, threads>>>(rows, cols, src1, src2, dst, BinOp<unsigned int, opid>(), mask); break;\r
+ case 16: bitwise_bin_op<unsigned int, 4><<<grid, threads>>>(rows, cols, src1, src2, dst, BinOp<unsigned int, opid>(), mask); break;\r
+ case 24: bitwise_bin_op<unsigned int, 6><<<grid, threads>>>(rows, cols, src1, src2, dst, BinOp<unsigned int, opid>(), mask); break;\r
+ case 32: bitwise_bin_op<unsigned int, 8><<<grid, threads>>>(rows, cols, src1, src2, dst, BinOp<unsigned int, opid>(), mask); break;\r
+ }\r
+ if (stream == 0) cudaSafeCall(cudaThreadSynchronize()); \r
+ }\r
\r
- void bitwise_xor_caller(int cols, int rows, const PtrStep src1, const PtrStep src2, int elemSize, PtrStep dst, cudaStream_t stream)\r
+ void bitwise_or_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elem_size, PtrStep dst, cudaStream_t stream)\r
{\r
- dim3 threads(16, 16, 1);\r
- dim3 grid(divUp(cols * elemSize, threads.x), divUp(rows, threads.y), 1);\r
+ bitwise_bin_op<BIN_OP_OR>(rows, cols, src1, src2, dst, elem_size, MaskTrue(), stream);\r
+ }\r
\r
- bitwise_xor_kernel<<<grid, threads, 0, stream>>>(cols * elemSize, rows, src1, src2, dst);\r
+ void bitwise_or_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elem_size, PtrStep dst, const PtrStep mask, cudaStream_t stream)\r
+ {\r
+ bitwise_bin_op<BIN_OP_OR>(rows, cols, src1, src2, dst, elem_size, Mask8U(mask), stream);\r
+ }\r
\r
- if (stream == 0)\r
- cudaSafeCall(cudaThreadSynchronize());\r
+ void bitwise_and_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elem_size, PtrStep dst, cudaStream_t stream)\r
+ {\r
+ bitwise_bin_op<BIN_OP_AND>(rows, cols, src1, src2, dst, elem_size, MaskTrue(), stream);\r
+ }\r
+\r
+ void bitwise_and_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elem_size, PtrStep dst, const PtrStep mask, cudaStream_t stream)\r
+ {\r
+ bitwise_bin_op<BIN_OP_AND>(rows, cols, src1, src2, dst, elem_size, Mask8U(mask), stream);\r
+ }\r
+\r
+ void bitwise_xor_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elem_size, PtrStep dst, cudaStream_t stream)\r
+ {\r
+ bitwise_bin_op<BIN_OP_XOR>(rows, cols, src1, src2, dst, elem_size, MaskTrue(), stream);\r
}\r
+\r
+ void bitwise_xor_caller(int rows, int cols, const PtrStep src1, const PtrStep src2, int elem_size, PtrStep dst, const PtrStep mask, cudaStream_t stream)\r
+ {\r
+ bitwise_bin_op<BIN_OP_XOR>(rows, cols, src1, src2, dst, elem_size, Mask8U(mask), stream);\r
+ } \r
}}}\r
int rows, cols;\r
for (int depth = CV_8U; depth <= CV_64F; ++depth)\r
for (int cn = 1; cn <= 4; ++cn)\r
- for (int attempt = 0; attempt < 5; ++attempt)\r
+ for (int attempt = 0; attempt < 3; ++attempt)\r
{\r
rows = 1 + rand() % 100;\r
cols = 1 + rand() % 100;\r
}\r
\r
Mat dst_gold = ~src;\r
- gpu::GpuMat dst = ~gpu::GpuMat(src);\r
+\r
+ gpu::GpuMat mask(src.size(), CV_8U);\r
+ mask.setTo(Scalar(1));\r
+\r
+ gpu::GpuMat dst;\r
+ gpu::bitwise_not(gpu::GpuMat(src), dst, mask);\r
\r
CHECK(dst_gold.size() == dst.size(), CvTS::FAIL_INVALID_OUTPUT);\r
CHECK(dst_gold.type() == dst.type(), CvTS::FAIL_INVALID_OUTPUT); \r
\r
CHECK(dst_gold.size() == dst.size(), CvTS::FAIL_INVALID_OUTPUT);\r
CHECK(dst_gold.type() == dst.type(), CvTS::FAIL_INVALID_OUTPUT); \r
-\r
Mat dsth(dst);\r
for (int i = 0; i < dst_gold.rows; ++i) \r
CHECK(memcmp(dst_gold.ptr(i), dsth.ptr(i), dst_gold.cols * dst_gold.elemSize()) == 0, CvTS::FAIL_INVALID_OUTPUT)\r
+\r
+ Mat mask(src1.size(), CV_8U);\r
+ randu(mask, Scalar(0), Scalar(255));\r
+\r
+ Mat dst_gold2(dst_gold.size(), dst_gold.type()); dst_gold2.setTo(Scalar::all(0));\r
+ gpu::GpuMat dst2(dst.size(), dst.type()); dst2.setTo(Scalar::all(0));\r
+ bitwise_or(src1, src2, dst_gold2, mask);\r
+ gpu::bitwise_or(gpu::GpuMat(src1), gpu::GpuMat(src2), dst2, gpu::GpuMat(mask));\r
+\r
+ CHECK(dst_gold2.size() == dst2.size(), CvTS::FAIL_INVALID_OUTPUT);\r
+ CHECK(dst_gold2.type() == dst2.type(), CvTS::FAIL_INVALID_OUTPUT); \r
+ dsth = dst2;\r
+ for (int i = 0; i < dst_gold.rows; ++i) \r
+ CHECK(memcmp(dst_gold2.ptr(i), dsth.ptr(i), dst_gold2.cols * dst_gold2.elemSize()) == 0, CvTS::FAIL_INVALID_OUTPUT)\r
}\r
\r
void test_bitwise_and(int rows, int cols, int type)\r
\r
CHECK(dst_gold.size() == dst.size(), CvTS::FAIL_INVALID_OUTPUT);\r
CHECK(dst_gold.type() == dst.type(), CvTS::FAIL_INVALID_OUTPUT); \r
-\r
Mat dsth(dst);\r
for (int i = 0; i < dst_gold.rows; ++i) \r
CHECK(memcmp(dst_gold.ptr(i), dsth.ptr(i), dst_gold.cols * dst_gold.elemSize()) == 0, CvTS::FAIL_INVALID_OUTPUT)\r
+\r
+\r
+ Mat mask(src1.size(), CV_8U);\r
+ randu(mask, Scalar(0), Scalar(255));\r
+\r
+ Mat dst_gold2(dst_gold.size(), dst_gold.type()); dst_gold2.setTo(Scalar::all(0));\r
+ gpu::GpuMat dst2(dst.size(), dst.type()); dst2.setTo(Scalar::all(0));\r
+ bitwise_and(src1, src2, dst_gold2, mask);\r
+ gpu::bitwise_and(gpu::GpuMat(src1), gpu::GpuMat(src2), dst2, gpu::GpuMat(mask));\r
+\r
+ CHECK(dst_gold2.size() == dst2.size(), CvTS::FAIL_INVALID_OUTPUT);\r
+ CHECK(dst_gold2.type() == dst2.type(), CvTS::FAIL_INVALID_OUTPUT); \r
+ dsth = dst2;\r
+ for (int i = 0; i < dst_gold.rows; ++i) \r
+ CHECK(memcmp(dst_gold2.ptr(i), dsth.ptr(i), dst_gold2.cols * dst_gold2.elemSize()) == 0, CvTS::FAIL_INVALID_OUTPUT)\r
}\r
\r
void test_bitwise_xor(int rows, int cols, int type)\r
\r
CHECK(dst_gold.size() == dst.size(), CvTS::FAIL_INVALID_OUTPUT);\r
CHECK(dst_gold.type() == dst.type(), CvTS::FAIL_INVALID_OUTPUT); \r
-\r
Mat dsth(dst);\r
for (int i = 0; i < dst_gold.rows; ++i) \r
CHECK(memcmp(dst_gold.ptr(i), dsth.ptr(i), dst_gold.cols * dst_gold.elemSize()) == 0, CvTS::FAIL_INVALID_OUTPUT)\r
+\r
+\r
+ Mat mask(src1.size(), CV_8U);\r
+ randu(mask, Scalar(0), Scalar(255));\r
+\r
+ Mat dst_gold2(dst_gold.size(), dst_gold.type()); dst_gold2.setTo(Scalar::all(0));\r
+ gpu::GpuMat dst2(dst.size(), dst.type()); dst2.setTo(Scalar::all(0));\r
+ bitwise_xor(src1, src2, dst_gold2, mask);\r
+ gpu::bitwise_xor(gpu::GpuMat(src1), gpu::GpuMat(src2), dst2, gpu::GpuMat(mask));\r
+\r
+ CHECK(dst_gold2.size() == dst2.size(), CvTS::FAIL_INVALID_OUTPUT);\r
+ CHECK(dst_gold2.type() == dst2.type(), CvTS::FAIL_INVALID_OUTPUT); \r
+ dsth = dst2;\r
+ for (int i = 0; i < dst_gold.rows; ++i) \r
+ CHECK(memcmp(dst_gold2.ptr(i), dsth.ptr(i), dst_gold2.cols * dst_gold2.elemSize()) == 0, CvTS::FAIL_INVALID_OUTPUT)\r
}\r
} gpu_bitwise_test;\r
\r