//! computes magnitude of each (x(i), y(i)) vector\r
//! supports only floating-point source\r
CV_EXPORTS void magnitude(const GpuMat& x, const GpuMat& y, GpuMat& magnitude);\r
- //! Acync version\r
+ //! Async version\r
CV_EXPORTS void magnitude(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, const Stream& stream);\r
\r
//! computes squared magnitude of each (x(i), y(i)) vector\r
//! supports only floating-point source\r
CV_EXPORTS void magnitudeSqr(const GpuMat& x, const GpuMat& y, GpuMat& magnitude);\r
- //! Acync version\r
+ //! Async version\r
CV_EXPORTS void magnitudeSqr(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, const Stream& stream);\r
\r
//! computes angle (angle(i)) of each (x(i), y(i)) vector\r
//! supports only floating-point source\r
CV_EXPORTS void phase(const GpuMat& x, const GpuMat& y, GpuMat& angle, bool angleInDegrees = false);\r
- //! Acync version\r
+ //! Async version\r
CV_EXPORTS void phase(const GpuMat& x, const GpuMat& y, GpuMat& angle, bool angleInDegrees, const Stream& stream);\r
\r
//! converts Cartesian coordinates to polar\r
//! supports only floating-point source\r
CV_EXPORTS void cartToPolar(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, GpuMat& angle, bool angleInDegrees = false);\r
- //! Acync version\r
+ //! Async version\r
CV_EXPORTS void cartToPolar(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, GpuMat& angle, bool angleInDegrees, const Stream& stream);\r
\r
//! converts polar coordinates to Cartesian\r
//! supports only floating-point source\r
CV_EXPORTS void polarToCart(const GpuMat& magnitude, const GpuMat& angle, GpuMat& x, GpuMat& y, bool angleInDegrees = false);\r
- //! Acync version\r
+ //! Async version\r
CV_EXPORTS void polarToCart(const GpuMat& magnitude, const GpuMat& angle, GpuMat& x, GpuMat& y, bool angleInDegrees, const Stream& stream);\r
\r
+\r
+ //! Perfroms per-elements bit-wise inversion \r
+ CV_EXPORTS void bitwise_not(const GpuMat& src, GpuMat& dst);\r
+ //! Async version\r
+ CV_EXPORTS void bitwise_not(const GpuMat& src, GpuMat& dst, const Stream& stream);\r
+\r
+ //! Calculates per-element bit-wise disjunction of two arrays\r
+ CV_EXPORTS void bitwise_or(const GpuMat& src1, const GpuMat& src2, GpuMat& dst);\r
+ //! Async version\r
+ CV_EXPORTS void bitwise_or(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const Stream& stream);\r
+\r
+ //! Calculates per-element bit-wise conjunction of two arrays\r
+ CV_EXPORTS void bitwise_and(const GpuMat& src1, const GpuMat& src2, GpuMat& dst);\r
+ //! Async version\r
+ CV_EXPORTS void bitwise_and(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const Stream& stream);\r
+\r
+ //! Calculates per-element bit-wise "exclusive or" operation\r
+ CV_EXPORTS void bitwise_xor(const GpuMat& src1, const GpuMat& src2, GpuMat& dst);\r
+ //! Async version\r
+ CV_EXPORTS void bitwise_xor(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const Stream& stream);\r
+\r
+ //! Logical operators\r
+ CV_EXPORTS GpuMat operator ~ (const GpuMat& src);\r
+ CV_EXPORTS GpuMat operator | (const GpuMat& src1, const GpuMat& src2);\r
+ CV_EXPORTS GpuMat operator & (const GpuMat& src1, const GpuMat& src2);\r
+ CV_EXPORTS GpuMat operator ^ (const GpuMat& src1, const GpuMat& src2);\r
+\r
+\r
////////////////////////////// Image processing //////////////////////////////\r
\r
//! DST[x,y] = SRC[xmap[x,y],ymap[x,y]] with bilinear interpolation.\r
//! Supported types of input disparity: CV_8U, CV_16S.\r
//! Output disparity has CV_8UC4 type in BGRA format (alpha = 255).\r
CV_EXPORTS void drawColorDisp(const GpuMat& src_disp, GpuMat& dst_disp, int ndisp);\r
- //! Acync version\r
+ //! Async version\r
CV_EXPORTS void drawColorDisp(const GpuMat& src_disp, GpuMat& dst_disp, int ndisp, const Stream& stream);\r
\r
//! Reprojects disparity image to 3D space. \r
//! Each element of this matrix will contain the 3D coordinates of the point (x,y,z,1), computed from the disparity map.\r
//! Q is the 4x4 perspective transformation matrix that can be obtained with cvStereoRectify.\r
CV_EXPORTS void reprojectImageTo3D(const GpuMat& disp, GpuMat& xyzw, const Mat& Q);\r
- //! Acync version\r
+ //! Async version\r
CV_EXPORTS void reprojectImageTo3D(const GpuMat& disp, GpuMat& xyzw, const Mat& Q, const Stream& stream);\r
\r
//! converts image from one color space to another\r
CV_EXPORTS void cvtColor(const GpuMat& src, GpuMat& dst, int code, int dcn = 0);\r
- //! Acync version\r
+ //! Async version\r
CV_EXPORTS void cvtColor(const GpuMat& src, GpuMat& dst, int code, int dcn, const Stream& stream);\r
\r
//! applies fixed threshold to the image. \r
//! Output disparity has CV_8U type.\r
void operator() ( const GpuMat& left, const GpuMat& right, GpuMat& disparity);\r
\r
- //! Acync version\r
+ //! Async version\r
void operator() ( const GpuMat& left, const GpuMat& right, GpuMat& disparity, const Stream & stream);\r
\r
//! Some heuristics that tries to estmate\r
//! if disparity is empty output type will be CV_16S else output type will be disparity.type().\r
void operator()(const GpuMat& left, const GpuMat& right, GpuMat& disparity);\r
\r
- //! Acync version\r
+ //! Async version\r
void operator()(const GpuMat& left, const GpuMat& right, GpuMat& disparity, Stream& stream);\r
\r
\r
//! if disparity is empty output type will be CV_16S else output type will be disparity.type().\r
void operator()(const GpuMat& left, const GpuMat& right, GpuMat& disparity);\r
\r
- //! Acync version\r
+ //! Async version\r
void operator()(const GpuMat& left, const GpuMat& right, GpuMat& disparity, Stream& stream);\r
\r
int ndisp;\r
//! disparity must have CV_8U or CV_16S type, image must have CV_8UC1 or CV_8UC3 type.\r
void operator()(const GpuMat& disparity, const GpuMat& image, GpuMat& dst);\r
\r
- //! Acync version\r
+ //! Async version\r
void operator()(const GpuMat& disparity, const GpuMat& image, GpuMat& dst, Stream& stream);\r
\r
private:\r
void cv::gpu::cartToPolar(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, bool, const Stream&) { throw_nogpu(); }\r
void cv::gpu::polarToCart(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, bool) { throw_nogpu(); }\r
void cv::gpu::polarToCart(const GpuMat&, const GpuMat&, GpuMat&, GpuMat&, bool, const Stream&) { throw_nogpu(); }\r
+void cv::gpu::bitwise_not(const GpuMat&, GpuMat&) { throw_nogpu(); }\r
+void cv::gpu::bitwise_not(const GpuMat&, GpuMat&, const Stream& stream) { throw_nogpu(); }\r
+void cv::gpu::bitwise_or(const GpuMat&, const GpuMat&, GpuMat&) { throw_nogpu(); }\r
+void cv::gpu::bitwise_or(const GpuMat&, const GpuMat&, GpuMat&, const Stream& stream) { throw_nogpu(); }\r
+void cv::gpu::bitwise_and(const GpuMat&, const GpuMat&, GpuMat&) { throw_nogpu(); }\r
+void cv::gpu::bitwise_and(const GpuMat&, const GpuMat&, GpuMat&, const Stream& stream) { throw_nogpu(); }\r
+void cv::gpu::bitwise_xor(const GpuMat&, const GpuMat&, GpuMat&) { throw_nogpu(); }\r
+void cv::gpu::bitwise_xor(const GpuMat&, const GpuMat&, GpuMat&, const Stream& stream) { throw_nogpu(); }\r
+cv::gpu::GpuMat cv::gpu::operator ~ (const GpuMat&) { throw_nogpu(); return GpuMat(); }\r
+cv::gpu::GpuMat cv::gpu::operator | (const GpuMat&, const GpuMat&) { throw_nogpu(); return GpuMat(); }\r
+cv::gpu::GpuMat cv::gpu::operator & (const GpuMat&, const GpuMat&) { throw_nogpu(); return GpuMat(); }\r
+cv::gpu::GpuMat cv::gpu::operator ^ (const GpuMat&, const GpuMat&) { throw_nogpu(); return GpuMat(); }\r
\r
#else /* !defined (HAVE_CUDA) */\r
\r
::polarToCart_caller(magnitude, angle, x, y, angleInDegrees, StreamAccessor::getStream(stream));\r
}\r
\r
+//////////////////////////////////////////////////////////////////////////////\r
+// Per-element bit-wise logical matrix operations\r
+\r
+namespace cv { namespace gpu { namespace mathfunc \r
+{\r
+ void bitwise_not_caller(const DevMem2D src, int elemSize, PtrStep dst, cudaStream_t stream);\r
+ void bitwise_or_caller(int cols, int rows, const PtrStep src1, const PtrStep src2, int elemSize, PtrStep dst, cudaStream_t stream);\r
+ void bitwise_and_caller(int cols, int rows, const PtrStep src1, const PtrStep src2, int elemSize, PtrStep dst, cudaStream_t stream);\r
+ void bitwise_xor_caller(int cols, int rows, const PtrStep src1, const PtrStep src2, int elemSize, PtrStep dst, cudaStream_t stream);\r
+}}}\r
+\r
+namespace \r
+{\r
+ void bitwise_not_caller(const GpuMat& src, GpuMat& dst, cudaStream_t stream)\r
+ {\r
+ dst.create(src.size(), src.type());\r
+ mathfunc::bitwise_not_caller(src, src.elemSize(), dst, stream);\r
+ }\r
+\r
+ void bitwise_or_caller(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, cudaStream_t stream)\r
+ {\r
+ CV_Assert(src1.size() == src2.size());\r
+ CV_Assert(src1.type() == src2.type());\r
+\r
+ dst.create(src1.size(), src1.type());\r
+ mathfunc::bitwise_or_caller(dst.cols, dst.rows, src1, src2, dst.elemSize(), dst, stream);\r
+ }\r
+\r
+ void bitwise_and_caller(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, cudaStream_t stream)\r
+ {\r
+ CV_Assert(src1.size() == src2.size());\r
+ CV_Assert(src1.type() == src2.type());\r
+\r
+ dst.create(src1.size(), src1.type());\r
+ mathfunc::bitwise_and_caller(dst.cols, dst.rows, src1, src2, dst.elemSize(), dst, stream);\r
+ }\r
+\r
+ void bitwise_xor_caller(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, cudaStream_t stream)\r
+ {\r
+ CV_Assert(src1.size() == src2.size());\r
+ CV_Assert(src1.type() == src2.type());\r
+\r
+ dst.create(src1.size(), src1.type());\r
+ mathfunc::bitwise_xor_caller(dst.cols, dst.rows, src1, src2, dst.elemSize(), dst, stream);\r
+ }\r
+}\r
+\r
+void cv::gpu::bitwise_not(const GpuMat& src, GpuMat& dst)\r
+{\r
+ ::bitwise_not_caller(src, dst, 0);\r
+}\r
+\r
+void cv::gpu::bitwise_not(const GpuMat& src, GpuMat& dst, const Stream& stream)\r
+{\r
+ ::bitwise_not_caller(src, dst, StreamAccessor::getStream(stream));\r
+}\r
+\r
+void cv::gpu::bitwise_or(const GpuMat& src1, const GpuMat& src2, GpuMat& dst)\r
+{\r
+ ::bitwise_or_caller(src1, src2, dst, 0);\r
+}\r
+\r
+void cv::gpu::bitwise_or(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const Stream& stream)\r
+{\r
+ ::bitwise_or_caller(src1, src2, dst, StreamAccessor::getStream(stream));\r
+}\r
+\r
+void cv::gpu::bitwise_and(const GpuMat& src1, const GpuMat& src2, GpuMat& dst)\r
+{\r
+ ::bitwise_and_caller(src1, src2, dst, 0);\r
+}\r
+\r
+void cv::gpu::bitwise_and(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const Stream& stream)\r
+{\r
+ ::bitwise_and_caller(src1, src2, dst, StreamAccessor::getStream(stream));\r
+}\r
+\r
+void cv::gpu::bitwise_xor(const GpuMat& src1, const GpuMat& src2, GpuMat& dst)\r
+{\r
+ ::bitwise_xor_caller(src1, src2, dst, 0);\r
+}\r
+\r
+void cv::gpu::bitwise_xor(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const Stream& stream)\r
+{\r
+ ::bitwise_xor_caller(src1, src2, dst, StreamAccessor::getStream(stream));\r
+}\r
+\r
+cv::gpu::GpuMat cv::gpu::operator ~ (const GpuMat& src)\r
+{\r
+ GpuMat dst;\r
+ bitwise_not(src, dst);\r
+ return dst;\r
+}\r
+\r
+cv::gpu::GpuMat cv::gpu::operator | (const GpuMat& src1, const GpuMat& src2)\r
+{\r
+ GpuMat dst;\r
+ bitwise_or(src1, src2, dst);\r
+ return dst;\r
+}\r
+\r
+cv::gpu::GpuMat cv::gpu::operator & (const GpuMat& src1, const GpuMat& src2)\r
+{\r
+ GpuMat dst;\r
+ bitwise_and(src1, src2, dst);\r
+ return dst;\r
+}\r
+\r
+cv::gpu::GpuMat cv::gpu::operator ^ (const GpuMat& src1, const GpuMat& src2)\r
+{\r
+ GpuMat dst;\r
+ bitwise_xor(src1, src2, dst);\r
+ return dst;\r
+}\r
+\r
+\r
#endif /* !defined (HAVE_CUDA) */\r
{\r
compare_ne<float, float>(src1, src2, dst);\r
}\r
+\r
+\r
+//////////////////////////////////////////////////////////////////////////////\r
+// Per-element bit-wise logical matrix operations\r
+\r
+\r
+ __global__ void bitwise_not_kernel(int cols, int rows, const PtrStep src, PtrStep dst)\r
+ {\r
+ const int x = blockDim.x * blockIdx.x + threadIdx.x;\r
+ const int y = blockDim.y * blockIdx.y + threadIdx.y;\r
+\r
+ if (x < cols && y < rows)\r
+ {\r
+ dst.ptr(y)[x] = ~src.ptr(y)[x];\r
+ }\r
+ }\r
+\r
+\r
+ void bitwise_not_caller(const DevMem2D src, int elemSize, PtrStep dst, cudaStream_t stream)\r
+ {\r
+ dim3 threads(16, 16, 1);\r
+ dim3 grid(divUp(src.cols * elemSize, threads.x), divUp(src.rows, threads.y), 1);\r
+\r
+ bitwise_not_kernel<<<grid, threads, 0, stream>>>(src.cols * elemSize, src.rows, src, dst);\r
+\r
+ if (stream == 0)\r
+ cudaSafeCall(cudaThreadSynchronize());\r
+ }\r
+\r
+\r
+ __global__ void bitwise_or_kernel(int cols, int rows, const PtrStep src1, const PtrStep src2, PtrStep dst)\r
+ {\r
+ const int x = blockDim.x * blockIdx.x + threadIdx.x;\r
+ const int y = blockDim.y * blockIdx.y + threadIdx.y;\r
+\r
+ if (x < cols && y < rows)\r
+ {\r
+ dst.ptr(y)[x] = src1.ptr(y)[x] | src2.ptr(y)[x];\r
+ }\r
+ }\r
+\r
+\r
+ void bitwise_or_caller(int cols, int rows, const PtrStep src1, const PtrStep src2, int elemSize, PtrStep dst, cudaStream_t stream)\r
+ {\r
+ dim3 threads(16, 16, 1);\r
+ dim3 grid(divUp(cols * elemSize, threads.x), divUp(rows, threads.y), 1);\r
+\r
+ bitwise_or_kernel<<<grid, threads, 0, stream>>>(cols * elemSize, rows, src1, src2, dst);\r
+\r
+ if (stream == 0)\r
+ cudaSafeCall(cudaThreadSynchronize());\r
+ }\r
+\r
+\r
+ __global__ void bitwise_and_kernel(int cols, int rows, const PtrStep src1, const PtrStep src2, PtrStep dst)\r
+ {\r
+ const int x = blockDim.x * blockIdx.x + threadIdx.x;\r
+ const int y = blockDim.y * blockIdx.y + threadIdx.y;\r
+\r
+ if (x < cols && y < rows)\r
+ {\r
+ dst.ptr(y)[x] = src1.ptr(y)[x] & src2.ptr(y)[x];\r
+ }\r
+ }\r
+\r
+\r
+ void bitwise_and_caller(int cols, int rows, const PtrStep src1, const PtrStep src2, int elemSize, PtrStep dst, cudaStream_t stream)\r
+ {\r
+ dim3 threads(16, 16, 1);\r
+ dim3 grid(divUp(cols * elemSize, threads.x), divUp(rows, threads.y), 1);\r
+\r
+ bitwise_and_kernel<<<grid, threads, 0, stream>>>(cols * elemSize, rows, src1, src2, dst);\r
+\r
+ if (stream == 0)\r
+ cudaSafeCall(cudaThreadSynchronize());\r
+ }\r
+\r
+\r
+\r
+ __global__ void bitwise_xor_kernel(int cols, int rows, const PtrStep src1, const PtrStep src2, PtrStep dst)\r
+ {\r
+ const int x = blockDim.x * blockIdx.x + threadIdx.x;\r
+ const int y = blockDim.y * blockIdx.y + threadIdx.y;\r
+\r
+ if (x < cols && y < rows)\r
+ {\r
+ dst.ptr(y)[x] = src1.ptr(y)[x] ^ src2.ptr(y)[x];\r
+ }\r
+ }\r
+\r
+\r
+ void bitwise_xor_caller(int cols, int rows, const PtrStep src1, const PtrStep src2, int elemSize, PtrStep dst, cudaStream_t stream)\r
+ {\r
+ dim3 threads(16, 16, 1);\r
+ dim3 grid(divUp(cols * elemSize, threads.x), divUp(rows, threads.y), 1);\r
+\r
+ bitwise_xor_kernel<<<grid, threads, 0, stream>>>(cols * elemSize, rows, src1, src2, dst);\r
+\r
+ if (stream == 0)\r
+ cudaSafeCall(cudaThreadSynchronize());\r
+ }\r
}}}\r
--- /dev/null
+/*M///////////////////////////////////////////////////////////////////////////////////////\r
+//\r
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.\r
+//\r
+// By downloading, copying, installing or using the software you agree to this license.\r
+// If you do not agree to this license, do not download, install,\r
+// copy or use the software.\r
+//\r
+//\r
+// Intel License Agreement\r
+// For Open Source Computer Vision Library\r
+//\r
+// Copyright (C) 2000, Intel Corporation, all rights reserved.\r
+// Third party copyrights are property of their respective owners.\r
+//\r
+// Redistribution and use in source and binary forms, with or without modification,\r
+// are permitted provided that the following conditions are met:\r
+//\r
+// * Redistribution's of source code must retain the above copyright notice,\r
+// this list of conditions and the following disclaimer.\r
+//\r
+// * Redistribution's in binary form must reproduce the above copyright notice,\r
+// this list of conditions and the following disclaimer in the documentation\r
+// and/or other materials provided with the distribution.\r
+//\r
+// * The name of Intel Corporation may not be used to endorse or promote products\r
+// derived from this software without specific prior written permission.\r
+//\r
+// This software is provided by the copyright holders and contributors "as is" and\r
+// any express or implied warranties, including, but not limited to, the implied\r
+// warranties of merchantability and fitness for a particular purpose are disclaimed.\r
+// In no event shall the Intel Corporation or contributors be liable for any direct,\r
+// indirect, incidental, special, exemplary, or consequential damages\r
+// (including, but not limited to, procurement of substitute goods or services;\r
+// loss of use, data, or profits; or business interruption) however caused\r
+// and on any theory of liability, whether in contract, strict liability,\r
+// or tort (including negligence or otherwise) arising in any way out of\r
+// the use of this software, even if advised of the possibility of such damage.\r
+//\r
+//M*/\r
+\r
+#include <iostream>\r
+#include <limits>\r
+#include "gputest.hpp"\r
+\r
+#define CHECK(pred, err) if (!(pred)) { \\r
+ ts->printf(CvTS::LOG, "Fail: \"%s\" at line: %d\n", #pred, __LINE__); \\r
+ ts->set_failed_test_info(err); \\r
+ return; }\r
+\r
+using namespace cv;\r
+using namespace std;\r
+\r
+struct CV_GpuBitwiseTest: public CvTest\r
+{\r
+ CV_GpuBitwiseTest(): CvTest("GPU-BitwiseOpers", "bitwiseMatOperators") {}\r
+\r
+ void run(int)\r
+ {\r
+ int rows, cols;\r
+ for (int depth = CV_8U; depth <= CV_64F; ++depth)\r
+ for (int cn = 1; cn <= 4; ++cn)\r
+ for (int attempt = 0; attempt < 5; ++attempt)\r
+ {\r
+ rows = 1 + rand() % 100;\r
+ cols = 1 + rand() % 100;\r
+ test_bitwise_not(rows, cols, CV_MAKETYPE(depth, cn));\r
+ test_bitwise_or(rows, cols, CV_MAKETYPE(depth, cn));\r
+ test_bitwise_and(rows, cols, CV_MAKETYPE(depth, cn));\r
+ test_bitwise_xor(rows, cols, CV_MAKETYPE(depth, cn));\r
+ }\r
+ }\r
+\r
+ void test_bitwise_not(int rows, int cols, int type)\r
+ {\r
+ Mat src(rows, cols, type);\r
+\r
+ RNG rng;\r
+ for (int i = 0; i < src.rows; ++i)\r
+ {\r
+ Mat row(1, src.cols * src.elemSize(), CV_8U, src.ptr(i));\r
+ rng.fill(row, RNG::UNIFORM, Scalar(0), Scalar(255));\r
+ }\r
+\r
+ Mat dst_gold = ~src;\r
+ gpu::GpuMat dst = ~gpu::GpuMat(src);\r
+\r
+ CHECK(dst_gold.size() == dst.size(), CvTS::FAIL_INVALID_OUTPUT);\r
+ CHECK(dst_gold.type() == dst.type(), CvTS::FAIL_INVALID_OUTPUT); \r
+\r
+ Mat dsth(dst);\r
+ for (int i = 0; i < dst_gold.rows; ++i) \r
+ CHECK(memcmp(dst_gold.ptr(i), dsth.ptr(i), dst_gold.cols * dst_gold.elemSize()) == 0, CvTS::FAIL_INVALID_OUTPUT)\r
+ }\r
+\r
+ void test_bitwise_or(int rows, int cols, int type)\r
+ {\r
+ Mat src1(rows, cols, type);\r
+ Mat src2(rows, cols, type);\r
+\r
+ RNG rng;\r
+ for (int i = 0; i < src1.rows; ++i)\r
+ {\r
+ Mat row1(1, src1.cols * src1.elemSize(), CV_8U, src1.ptr(i));\r
+ rng.fill(row1, RNG::UNIFORM, Scalar(0), Scalar(255));\r
+ Mat row2(1, src2.cols * src2.elemSize(), CV_8U, src2.ptr(i));\r
+ rng.fill(row2, RNG::UNIFORM, Scalar(0), Scalar(255));\r
+ }\r
+\r
+ Mat dst_gold = src1 | src2;\r
+ gpu::GpuMat dst = gpu::GpuMat(src1) | gpu::GpuMat(src2);\r
+\r
+ CHECK(dst_gold.size() == dst.size(), CvTS::FAIL_INVALID_OUTPUT);\r
+ CHECK(dst_gold.type() == dst.type(), CvTS::FAIL_INVALID_OUTPUT); \r
+\r
+ Mat dsth(dst);\r
+ for (int i = 0; i < dst_gold.rows; ++i) \r
+ CHECK(memcmp(dst_gold.ptr(i), dsth.ptr(i), dst_gold.cols * dst_gold.elemSize()) == 0, CvTS::FAIL_INVALID_OUTPUT)\r
+ }\r
+\r
+ void test_bitwise_and(int rows, int cols, int type)\r
+ {\r
+ Mat src1(rows, cols, type);\r
+ Mat src2(rows, cols, type);\r
+\r
+ RNG rng;\r
+ for (int i = 0; i < src1.rows; ++i)\r
+ {\r
+ Mat row1(1, src1.cols * src1.elemSize(), CV_8U, src1.ptr(i));\r
+ rng.fill(row1, RNG::UNIFORM, Scalar(0), Scalar(255));\r
+ Mat row2(1, src2.cols * src2.elemSize(), CV_8U, src2.ptr(i));\r
+ rng.fill(row2, RNG::UNIFORM, Scalar(0), Scalar(255));\r
+ }\r
+\r
+ Mat dst_gold = src1 & src2;\r
+\r
+ gpu::GpuMat dst = gpu::GpuMat(src1) & gpu::GpuMat(src2);\r
+\r
+ CHECK(dst_gold.size() == dst.size(), CvTS::FAIL_INVALID_OUTPUT);\r
+ CHECK(dst_gold.type() == dst.type(), CvTS::FAIL_INVALID_OUTPUT); \r
+\r
+ Mat dsth(dst);\r
+ for (int i = 0; i < dst_gold.rows; ++i) \r
+ CHECK(memcmp(dst_gold.ptr(i), dsth.ptr(i), dst_gold.cols * dst_gold.elemSize()) == 0, CvTS::FAIL_INVALID_OUTPUT)\r
+ }\r
+\r
+ void test_bitwise_xor(int rows, int cols, int type)\r
+ {\r
+ Mat src1(rows, cols, type);\r
+ Mat src2(rows, cols, type);\r
+\r
+ RNG rng;\r
+ for (int i = 0; i < src1.rows; ++i)\r
+ {\r
+ Mat row1(1, src1.cols * src1.elemSize(), CV_8U, src1.ptr(i));\r
+ rng.fill(row1, RNG::UNIFORM, Scalar(0), Scalar(255));\r
+ Mat row2(1, src2.cols * src2.elemSize(), CV_8U, src2.ptr(i));\r
+ rng.fill(row2, RNG::UNIFORM, Scalar(0), Scalar(255));\r
+ }\r
+\r
+ Mat dst_gold = src1 ^ src2;\r
+\r
+ gpu::GpuMat dst = gpu::GpuMat(src1) ^ gpu::GpuMat(src2);\r
+\r
+ CHECK(dst_gold.size() == dst.size(), CvTS::FAIL_INVALID_OUTPUT);\r
+ CHECK(dst_gold.type() == dst.type(), CvTS::FAIL_INVALID_OUTPUT); \r
+\r
+ Mat dsth(dst);\r
+ for (int i = 0; i < dst_gold.rows; ++i) \r
+ CHECK(memcmp(dst_gold.ptr(i), dsth.ptr(i), dst_gold.cols * dst_gold.elemSize()) == 0, CvTS::FAIL_INVALID_OUTPUT)\r
+ }\r
+} gpu_bitwise_test;\r
+\r