From 224f18b06cfc2ff731ac0f970b10c5011fb97ec3 Mon Sep 17 00:00:00 2001 From: Vladislav Vinogradov Date: Tue, 27 Aug 2013 12:59:23 +0400 Subject: [PATCH] bitwise operation optimization --- modules/cudaarithm/src/cuda/bitwise_mat.cu | 121 ++++++++++++++++++++++------- 1 file changed, 92 insertions(+), 29 deletions(-) diff --git a/modules/cudaarithm/src/cuda/bitwise_mat.cu b/modules/cudaarithm/src/cuda/bitwise_mat.cu index e67d002..b2bf288 100644 --- a/modules/cudaarithm/src/cuda/bitwise_mat.cu +++ b/modules/cudaarithm/src/cuda/bitwise_mat.cu @@ -58,21 +58,6 @@ void bitMat(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& m ////////////////////////////////////////////////////////////////////////////// /// bitwise_not -namespace -{ - template - void bitMatNot(const GpuMat& src, GpuMat& dst, const GpuMat& mask, Stream& stream) - { - GlobPtrSz vsrc = globPtr((T*) src.data, src.step, src.rows, src.cols * src.channels()); - GlobPtrSz vdst = globPtr((T*) dst.data, dst.step, src.rows, src.cols * src.channels()); - - if (mask.data) - gridTransformUnary(vsrc, vdst, bit_not(), singleMaskChannels(globPtr(mask), src.channels()), stream); - else - gridTransformUnary(vsrc, vdst, bit_not(), stream); - } -} - void cv::cuda::bitwise_not(InputArray _src, OutputArray _dst, InputArray _mask, Stream& stream) { GpuMat src = _src.getGpuMat(); @@ -86,17 +71,59 @@ void cv::cuda::bitwise_not(InputArray _src, OutputArray _dst, InputArray _mask, _dst.create(src.size(), src.type()); GpuMat dst = _dst.getGpuMat(); - if (depth == CV_32F || depth == CV_32S) - { - bitMatNot(src, dst, mask, stream); - } - else if (depth == CV_16S || depth == CV_16U) + if (mask.empty()) { - bitMatNot(src, dst, mask, stream); + const int bcols = (int) (src.cols * src.elemSize()); + + if ((bcols & 3) == 0) + { + const int vcols = bcols >> 2; + + GlobPtrSz vsrc = globPtr((uint*) src.data, src.step, src.rows, vcols); + GlobPtrSz vdst = globPtr((uint*) dst.data, dst.step, src.rows, vcols); + + gridTransformUnary(vsrc, vdst, bit_not(), stream); + } + else if ((bcols & 1) == 0) + { + const int vcols = bcols >> 1; + + GlobPtrSz vsrc = globPtr((ushort*) src.data, src.step, src.rows, vcols); + GlobPtrSz vdst = globPtr((ushort*) dst.data, dst.step, src.rows, vcols); + + gridTransformUnary(vsrc, vdst, bit_not(), stream); + } + else + { + GlobPtrSz vsrc = globPtr((uchar*) src.data, src.step, src.rows, bcols); + GlobPtrSz vdst = globPtr((uchar*) dst.data, dst.step, src.rows, bcols); + + gridTransformUnary(vsrc, vdst, bit_not(), stream); + } } else { - bitMatNot(src, dst, mask, stream); + if (depth == CV_32F || depth == CV_32S) + { + GlobPtrSz vsrc = globPtr((uint*) src.data, src.step, src.rows, src.cols * src.channels()); + GlobPtrSz vdst = globPtr((uint*) dst.data, dst.step, src.rows, src.cols * src.channels()); + + gridTransformUnary(vsrc, vdst, bit_not(), singleMaskChannels(globPtr(mask), src.channels()), stream); + } + else if (depth == CV_16S || depth == CV_16U) + { + GlobPtrSz vsrc = globPtr((ushort*) src.data, src.step, src.rows, src.cols * src.channels()); + GlobPtrSz vdst = globPtr((ushort*) dst.data, dst.step, src.rows, src.cols * src.channels()); + + gridTransformUnary(vsrc, vdst, bit_not(), singleMaskChannels(globPtr(mask), src.channels()), stream); + } + else + { + GlobPtrSz vsrc = globPtr((uchar*) src.data, src.step, src.rows, src.cols * src.channels()); + GlobPtrSz vdst = globPtr((uchar*) dst.data, dst.step, src.rows, src.cols * src.channels()); + + gridTransformUnary(vsrc, vdst, bit_not(), singleMaskChannels(globPtr(mask), src.channels()), stream); + } } } @@ -146,17 +173,53 @@ void bitMat(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, const GpuMat& m CV_DbgAssert( depth <= CV_32F ); CV_DbgAssert( op >= 0 && op < 3 ); - if (depth == CV_32F || depth == CV_32S) - { - funcs32[op](src1, src2, dst, mask, stream); - } - else if (depth == CV_16S || depth == CV_16U) + if (mask.empty()) { - funcs16[op](src1, src2, dst, mask, stream); + const int bcols = (int) (src1.cols * src1.elemSize()); + + if ((bcols & 3) == 0) + { + const int vcols = bcols >> 2; + + GpuMat vsrc1(src1.rows, vcols, CV_32SC1, src1.data, src1.step); + GpuMat vsrc2(src1.rows, vcols, CV_32SC1, src2.data, src2.step); + GpuMat vdst(src1.rows, vcols, CV_32SC1, dst.data, dst.step); + + funcs32[op](vsrc1, vsrc2, vdst, GpuMat(), stream); + } + else if ((bcols & 1) == 0) + { + const int vcols = bcols >> 1; + + GpuMat vsrc1(src1.rows, vcols, CV_16UC1, src1.data, src1.step); + GpuMat vsrc2(src1.rows, vcols, CV_16UC1, src2.data, src2.step); + GpuMat vdst(src1.rows, vcols, CV_16UC1, dst.data, dst.step); + + funcs16[op](vsrc1, vsrc2, vdst, GpuMat(), stream); + } + else + { + GpuMat vsrc1(src1.rows, bcols, CV_8UC1, src1.data, src1.step); + GpuMat vsrc2(src1.rows, bcols, CV_8UC1, src2.data, src2.step); + GpuMat vdst(src1.rows, bcols, CV_8UC1, dst.data, dst.step); + + funcs8[op](vsrc1, vsrc2, vdst, GpuMat(), stream); + } } else { - funcs8[op](src1, src2, dst, mask, stream); + if (depth == CV_32F || depth == CV_32S) + { + funcs32[op](src1, src2, dst, mask, stream); + } + else if (depth == CV_16S || depth == CV_16U) + { + funcs16[op](src1, src2, dst, mask, stream); + } + else + { + funcs8[op](src1, src2, dst, mask, stream); + } } } -- 2.7.4