//! Output hist will have one row, 256 cols and CV32SC1 type.\r
CV_EXPORTS void calcHist(const GpuMat& src, GpuMat& hist, Stream& stream = Stream::Null());\r
CV_EXPORTS void calcHist(const GpuMat& src, GpuMat& hist, GpuMat& buf, Stream& stream = Stream::Null());\r
+ \r
+ //! normalizes the grayscale image brightness and contrast by normalizing its histogram\r
+ CV_EXPORTS void equalizeHist(const GpuMat& src, GpuMat& dst, Stream& stream = Stream::Null());\r
+ CV_EXPORTS void equalizeHist(const GpuMat& src, GpuMat& dst, GpuMat& hist, Stream& stream = Stream::Null());\r
+ CV_EXPORTS void equalizeHist(const GpuMat& src, GpuMat& dst, GpuMat& hist, GpuMat& buf, Stream& stream = Stream::Null());\r
\r
//////////////////////////////// StereoBM_GPU ////////////////////////////////\r
\r
if (stream == 0)\r
cudaSafeCall( cudaDeviceSynchronize() );\r
}\r
+\r
+ __global__ void equalizeHist(DevMem2D src, PtrStep dst, const int* lut)\r
+ {\r
+ __shared__ int s_lut[256];\r
+\r
+ const int tid = threadIdx.y * blockDim.x + threadIdx.x;\r
+\r
+ s_lut[tid] = lut[tid];\r
+ __syncthreads();\r
+\r
+ const int x = blockIdx.x * blockDim.x + threadIdx.x;\r
+ const int y = blockIdx.y * blockDim.y + threadIdx.y;\r
+\r
+ if (x < src.cols && y < src.rows)\r
+ {\r
+ dst.ptr(y)[x] = __float2int_rn(255.0f * s_lut[src.ptr(y)[x]] / (src.cols * src.rows));\r
+ }\r
+ }\r
+\r
+ void equalizeHist_gpu(DevMem2D src, DevMem2D dst, const int* lut, cudaStream_t stream)\r
+ {\r
+ dim3 block(16, 16);\r
+ dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));\r
+\r
+ equalizeHist<<<grid, block, 0, stream>>>(src, dst, lut);\r
+ cudaSafeCall( cudaGetLastError() );\r
+\r
+ if (stream == 0)\r
+ cudaSafeCall( cudaDeviceSynchronize() );\r
+ }\r
}}}\r
void cv::gpu::histRange(const GpuMat&, GpuMat*, const GpuMat*, Stream&) { throw_nogpu(); }\r
void cv::gpu::calcHist(const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }\r
void cv::gpu::calcHist(const GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }\r
+void cv::gpu::equalizeHist(const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }\r
+void cv::gpu::equalizeHist(const GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }\r
+void cv::gpu::equalizeHist(const GpuMat&, GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }\r
void cv::gpu::cornerHarris(const GpuMat&, GpuMat&, int, int, double, int) { throw_nogpu(); }\r
void cv::gpu::cornerMinEigenVal(const GpuMat&, GpuMat&, int, int, int) { throw_nogpu(); }\r
void cv::gpu::mulSpectrums(const GpuMat&, const GpuMat&, GpuMat&, int, bool) { throw_nogpu(); }\r
histogram256_gpu(src, hist.ptr<int>(), buf.ptr<unsigned int>(), StreamAccessor::getStream(stream));\r
}\r
\r
+void cv::gpu::equalizeHist(const GpuMat& src, GpuMat& dst, Stream& stream)\r
+{\r
+ GpuMat hist;\r
+ GpuMat buf;\r
+ equalizeHist(src, dst, hist, buf, stream);\r
+}\r
+\r
+void cv::gpu::equalizeHist(const GpuMat& src, GpuMat& dst, GpuMat& hist, Stream& stream)\r
+{\r
+ GpuMat buf;\r
+ equalizeHist(src, dst, hist, buf, stream);\r
+}\r
+\r
+namespace cv { namespace gpu { namespace histograms\r
+{\r
+ void equalizeHist_gpu(DevMem2D src, DevMem2D dst, const int* lut, cudaStream_t stream);\r
+}}}\r
+\r
+void cv::gpu::equalizeHist(const GpuMat& src, GpuMat& dst, GpuMat& hist, GpuMat& buf, Stream& s)\r
+{\r
+ using namespace cv::gpu::histograms;\r
+\r
+ CV_Assert(src.type() == CV_8UC1);\r
+\r
+ dst.create(src.size(), src.type());\r
+\r
+ int intBufSize;\r
+ nppSafeCall( nppsIntegralGetBufferSize_32s(256, &intBufSize) );\r
+\r
+ int bufSize = std::max(256 * 240 * sizeof(int), intBufSize + 256 * sizeof(int));\r
+\r
+ ensureSizeIsEnough(1, bufSize, CV_8UC1, buf);\r
+\r
+ GpuMat histBuf(1, 256 * 240, CV_32SC1, buf.ptr());\r
+ GpuMat intBuf(1, intBufSize, CV_8UC1, buf.ptr());\r
+ GpuMat lut(1, 256, CV_32S, buf.ptr() + intBufSize);\r
+\r
+ calcHist(src, hist, histBuf, s);\r
+\r
+ cudaStream_t stream = StreamAccessor::getStream(s);\r
+\r
+ NppStreamHandler h(stream);\r
+\r
+ nppSafeCall( nppsIntegral_32s(hist.ptr<Npp32s>(), lut.ptr<Npp32s>(), 256, intBuf.ptr<Npp8u>()) );\r
+ \r
+ if (stream == 0)\r
+ cudaSafeCall( cudaDeviceSynchronize() );\r
+\r
+ equalizeHist_gpu(src, dst, lut.ptr<int>(), stream);\r
+}\r
+\r
////////////////////////////////////////////////////////////////////////\r
// cornerHarris & minEgenVal\r
\r
\r
INSTANTIATE_TEST_CASE_P(ImgProc, CalcHist, testing::ValuesIn(devices()));\r
\r
+struct EqualizeHist : testing::TestWithParam<cv::gpu::DeviceInfo>\r
+{\r
+ cv::gpu::DeviceInfo devInfo;\r
+\r
+ cv::Size size;\r
+ cv::Mat src;\r
+ cv::Mat dst_gold;\r
+ \r
+ virtual void SetUp()\r
+ {\r
+ devInfo = GetParam();\r
+\r
+ cv::gpu::setDevice(devInfo.deviceID());\r
+\r
+ cv::RNG& rng = cvtest::TS::ptr()->get_rng();\r
+\r
+ size = cv::Size(rng.uniform(100, 200), rng.uniform(100, 200));\r
+ \r
+ src = cvtest::randomMat(rng, size, CV_8UC1, 0, 255, false);\r
+\r
+ cv::equalizeHist(src, dst_gold);\r
+ }\r
+};\r
+\r
+TEST_P(EqualizeHist, Accuracy)\r
+{\r
+ PRINT_PARAM(devInfo);\r
+ PRINT_PARAM(size);\r
+\r
+ cv::Mat dst;\r
+ \r
+ ASSERT_NO_THROW(\r
+ cv::gpu::GpuMat gpuDst;\r
+\r
+ cv::gpu::equalizeHist(cv::gpu::GpuMat(src), gpuDst);\r
+\r
+ gpuDst.download(dst);\r
+ );\r
+\r
+ EXPECT_MAT_NEAR(dst_gold, dst, 3.0);\r
+}\r
+\r
+INSTANTIATE_TEST_CASE_P(ImgProc, EqualizeHist, testing::ValuesIn(devices()));\r
+\r
///////////////////////////////////////////////////////////////////////////////////////////////////////\r
// cornerHarris\r
\r