\r
//! computes Harris cornerness criteria at each image pixel\r
CV_EXPORTS void cornerHarris(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, double k, int borderType=BORDER_REFLECT101);\r
+ CV_EXPORTS void cornerHarris(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, int blockSize, int ksize, double k, int borderType=BORDER_REFLECT101);\r
\r
//! computes minimum eigen value of 2x2 derivative covariation matrix at each pixel - the cornerness criteria\r
CV_EXPORTS void cornerMinEigenVal(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, int borderType=BORDER_REFLECT101);\r
+ CV_EXPORTS void cornerMinEigenVal(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, int blockSize, int ksize, int borderType=BORDER_REFLECT101);\r
\r
//! performs per-element multiplication of two full (not packed) Fourier spectrums\r
//! supports 32FC2 matrixes only (interleaved format)\r
//! Supports CV_8UC1, CV_16UC1 and CV_16SC1 source types.\r
//! Output hist will have one row and histSize cols and CV_32SC1 type.\r
CV_EXPORTS void histEven(const GpuMat& src, GpuMat& hist, int histSize, int lowerLevel, int upperLevel, Stream& stream = Stream::Null());\r
+ CV_EXPORTS void histEven(const GpuMat& src, GpuMat& hist, GpuMat& buf, int histSize, int lowerLevel, int upperLevel, Stream& stream = Stream::Null());\r
//! Calculates histogram with evenly distributed bins for four-channel source.\r
//! All channels of source are processed separately.\r
//! Supports CV_8UC4, CV_16UC4 and CV_16SC4 source types.\r
//! Output hist[i] will have one row and histSize[i] cols and CV_32SC1 type.\r
CV_EXPORTS void histEven(const GpuMat& src, GpuMat hist[4], int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream = Stream::Null());\r
+ CV_EXPORTS void histEven(const GpuMat& src, GpuMat hist[4], GpuMat& buf, int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream = Stream::Null());\r
//! Calculates histogram with bins determined by levels array.\r
//! levels must have one row and CV_32SC1 type if source has integer type or CV_32FC1 otherwise.\r
//! Supports CV_8UC1, CV_16UC1, CV_16SC1 and CV_32FC1 source types.\r
//! Output hist will have one row and (levels.cols-1) cols and CV_32SC1 type.\r
CV_EXPORTS void histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels, Stream& stream = Stream::Null());\r
+ CV_EXPORTS void histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels, GpuMat& buf, Stream& stream = Stream::Null());\r
//! Calculates histogram with bins determined by levels array.\r
//! All levels must have one row and CV_32SC1 type if source has integer type or CV_32FC1 otherwise.\r
//! All channels of source are processed separately.\r
//! Supports CV_8UC4, CV_16UC4, CV_16SC4 and CV_32FC4 source types.\r
//! Output hist[i] will have one row and (levels[i].cols-1) cols and CV_32SC1 type.\r
CV_EXPORTS void histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4], Stream& stream = Stream::Null());\r
+ CV_EXPORTS void histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4], GpuMat& buf, Stream& stream = Stream::Null());\r
\r
//! Calculates histogram for 8u one channel image\r
//! Output hist will have one row, 256 cols and CV32SC1 type.\r
void cv::gpu::rectStdDev(const GpuMat&, const GpuMat&, GpuMat&, const Rect&, Stream&) { throw_nogpu(); }\r
void cv::gpu::evenLevels(GpuMat&, int, int, int) { throw_nogpu(); }\r
void cv::gpu::histEven(const GpuMat&, GpuMat&, int, int, int, Stream&) { throw_nogpu(); }\r
+void cv::gpu::histEven(const GpuMat&, GpuMat&, GpuMat&, int, int, int, Stream&) { throw_nogpu(); }\r
void cv::gpu::histEven(const GpuMat&, GpuMat*, int*, int*, int*, Stream&) { throw_nogpu(); }\r
+void cv::gpu::histEven(const GpuMat&, GpuMat*, GpuMat&, int*, int*, int*, Stream&) { throw_nogpu(); }\r
void cv::gpu::histRange(const GpuMat&, GpuMat&, const GpuMat&, Stream&) { throw_nogpu(); }\r
+void cv::gpu::histRange(const GpuMat&, GpuMat&, const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }\r
void cv::gpu::histRange(const GpuMat&, GpuMat*, const GpuMat*, Stream&) { throw_nogpu(); }\r
+void cv::gpu::histRange(const GpuMat&, GpuMat*, const GpuMat*, GpuMat&, Stream&) { throw_nogpu(); }\r
void cv::gpu::calcHist(const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }\r
void cv::gpu::calcHist(const GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }\r
void cv::gpu::equalizeHist(const GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }\r
void cv::gpu::equalizeHist(const GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }\r
void cv::gpu::equalizeHist(const GpuMat&, GpuMat&, GpuMat&, GpuMat&, Stream&) { throw_nogpu(); }\r
void cv::gpu::cornerHarris(const GpuMat&, GpuMat&, int, int, double, int) { throw_nogpu(); }\r
+void cv::gpu::cornerHarris(const GpuMat&, GpuMat&, GpuMat&, GpuMat&, int, int, double, int) { throw_nogpu(); }\r
void cv::gpu::cornerMinEigenVal(const GpuMat&, GpuMat&, int, int, int) { throw_nogpu(); }\r
+void cv::gpu::cornerMinEigenVal(const GpuMat&, GpuMat&, GpuMat&, GpuMat&, int, int, int) { throw_nogpu(); }\r
void cv::gpu::mulSpectrums(const GpuMat&, const GpuMat&, GpuMat&, int, bool) { throw_nogpu(); }\r
void cv::gpu::mulAndScaleSpectrums(const GpuMat&, const GpuMat&, GpuMat&, int, float, bool) { throw_nogpu(); }\r
void cv::gpu::dft(const GpuMat&, GpuMat&, Size, int) { throw_nogpu(); }\r
\r
CV_Assert((src.type() == CV_8U || src.type() == CV_8UC3) && xmap.type() == CV_32F && ymap.type() == CV_32F);\r
\r
- GpuMat out;\r
- if (dst.data != src.data)\r
- out = dst;\r
+ dst.create(xmap.size(), src.type());\r
\r
- out.create(xmap.size(), src.type());\r
-\r
- callers[src.channels() - 1](src, xmap, ymap, out);\r
-\r
- dst = out;\r
+ callers[src.channels() - 1](src, xmap, ymap, dst);\r
}\r
\r
////////////////////////////////////////////////////////////////////////\r
template <typename T>\r
void drawColorDisp_caller(const GpuMat& src, GpuMat& dst, int ndisp, const cudaStream_t& stream)\r
{\r
- GpuMat out;\r
- if (dst.data != src.data)\r
- out = dst;\r
- out.create(src.size(), CV_8UC4);\r
-\r
- imgproc::drawColorDisp_gpu((DevMem2D_<T>)src, out, ndisp, stream);\r
+ dst.create(src.size(), CV_8UC4);\r
\r
- dst = out;\r
+ imgproc::drawColorDisp_gpu((DevMem2D_<T>)src, dst, ndisp, stream);\r
}\r
\r
typedef void (*drawColorDisp_caller_t)(const GpuMat& src, GpuMat& dst, int ndisp, const cudaStream_t& stream);\r
{\r
typedef typename NppHistogramEvenFuncC1<SDEPTH>::src_t src_t;\r
\r
- static void hist(const GpuMat& src, GpuMat& hist, int histSize, int lowerLevel, int upperLevel, cudaStream_t stream)\r
+ static void hist(const GpuMat& src, GpuMat& hist, GpuMat& buffer, int histSize, int lowerLevel, int upperLevel, cudaStream_t stream)\r
{\r
int levels = histSize + 1;\r
hist.create(1, histSize, CV_32S);\r
sz.width = src.cols;\r
sz.height = src.rows;\r
\r
- GpuMat buffer;\r
int buf_size;\r
-\r
get_buf_size(sz, levels, &buf_size);\r
- buffer.create(1, buf_size, CV_8U);\r
+\r
+ ensureSizeIsEnough(1, buf_size, CV_8U, buffer);\r
\r
NppStreamHandler h(stream);\r
\r
{\r
typedef typename NppHistogramEvenFuncC4<SDEPTH>::src_t src_t;\r
\r
- static void hist(const GpuMat& src, GpuMat hist[4], int histSize[4], int lowerLevel[4], int upperLevel[4], cudaStream_t stream)\r
+ static void hist(const GpuMat& src, GpuMat hist[4], GpuMat& buffer, int histSize[4], int lowerLevel[4], int upperLevel[4], cudaStream_t stream)\r
{\r
int levels[] = {histSize[0] + 1, histSize[1] + 1, histSize[2] + 1, histSize[3] + 1};\r
hist[0].create(1, histSize[0], CV_32S);\r
\r
Npp32s* pHist[] = {hist[0].ptr<Npp32s>(), hist[1].ptr<Npp32s>(), hist[2].ptr<Npp32s>(), hist[3].ptr<Npp32s>()};\r
\r
- GpuMat buffer;\r
int buf_size;\r
-\r
get_buf_size(sz, levels, &buf_size);\r
- buffer.create(1, buf_size, CV_8U);\r
+\r
+ ensureSizeIsEnough(1, buf_size, CV_8U, buffer);\r
\r
NppStreamHandler h(stream);\r
\r
typedef typename NppHistogramRangeFuncC1<SDEPTH>::level_t level_t;\r
enum {LEVEL_TYPE_CODE=NppHistogramRangeFuncC1<SDEPTH>::LEVEL_TYPE_CODE};\r
\r
- static void hist(const GpuMat& src, GpuMat& hist, const GpuMat& levels, cudaStream_t stream)\r
+ static void hist(const GpuMat& src, GpuMat& hist, const GpuMat& levels, GpuMat& buffer, cudaStream_t stream)\r
{\r
CV_Assert(levels.type() == LEVEL_TYPE_CODE && levels.rows == 1);\r
\r
sz.width = src.cols;\r
sz.height = src.rows;\r
\r
- GpuMat buffer;\r
int buf_size;\r
-\r
get_buf_size(sz, levels.cols, &buf_size);\r
- buffer.create(1, buf_size, CV_8U);\r
+ \r
+ ensureSizeIsEnough(1, buf_size, CV_8U, buffer);\r
\r
NppStreamHandler h(stream);\r
\r
typedef typename NppHistogramRangeFuncC1<SDEPTH>::level_t level_t;\r
enum {LEVEL_TYPE_CODE=NppHistogramRangeFuncC1<SDEPTH>::LEVEL_TYPE_CODE};\r
\r
- static void hist(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4], cudaStream_t stream)\r
+ static void hist(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4], GpuMat& buffer, cudaStream_t stream)\r
{\r
CV_Assert(levels[0].type() == LEVEL_TYPE_CODE && levels[0].rows == 1);\r
CV_Assert(levels[1].type() == LEVEL_TYPE_CODE && levels[1].rows == 1);\r
sz.width = src.cols;\r
sz.height = src.rows;\r
\r
- GpuMat buffer;\r
int buf_size;\r
-\r
get_buf_size(sz, nLevels, &buf_size);\r
- buffer.create(1, buf_size, CV_8U);\r
+\r
+ ensureSizeIsEnough(1, buf_size, CV_8U, buffer);\r
\r
NppStreamHandler h(stream);\r
\r
\r
void cv::gpu::histEven(const GpuMat& src, GpuMat& hist, int histSize, int lowerLevel, int upperLevel, Stream& stream)\r
{\r
+ GpuMat buf;\r
+ histEven(src, hist, buf, histSize, lowerLevel, upperLevel, stream);\r
+}\r
+\r
+void cv::gpu::histEven(const GpuMat& src, GpuMat& hist, GpuMat& buf, int histSize, int lowerLevel, int upperLevel, Stream& stream)\r
+{\r
CV_Assert(src.type() == CV_8UC1 || src.type() == CV_16UC1 || src.type() == CV_16SC1 );\r
\r
- typedef void (*hist_t)(const GpuMat& src, GpuMat& hist, int levels, int lowerLevel, int upperLevel, cudaStream_t stream);\r
+ typedef void (*hist_t)(const GpuMat& src, GpuMat& hist, GpuMat& buf, int levels, int lowerLevel, int upperLevel, cudaStream_t stream);\r
static const hist_t hist_callers[] =\r
{\r
NppHistogramEvenC1<CV_8U , nppiHistogramEven_8u_C1R , nppiHistogramEvenGetBufferSize_8u_C1R >::hist,\r
NppHistogramEvenC1<CV_16S, nppiHistogramEven_16s_C1R, nppiHistogramEvenGetBufferSize_16s_C1R>::hist\r
};\r
\r
- hist_callers[src.depth()](src, hist, histSize, lowerLevel, upperLevel, StreamAccessor::getStream(stream));\r
+ hist_callers[src.depth()](src, hist, buf, histSize, lowerLevel, upperLevel, StreamAccessor::getStream(stream));\r
}\r
\r
void cv::gpu::histEven(const GpuMat& src, GpuMat hist[4], int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream)\r
{\r
+ GpuMat buf;\r
+ histEven(src, hist, buf, histSize, lowerLevel, upperLevel, stream);\r
+}\r
+\r
+void cv::gpu::histEven(const GpuMat& src, GpuMat hist[4], GpuMat& buf, int histSize[4], int lowerLevel[4], int upperLevel[4], Stream& stream)\r
+{\r
CV_Assert(src.type() == CV_8UC4 || src.type() == CV_16UC4 || src.type() == CV_16SC4 );\r
\r
- typedef void (*hist_t)(const GpuMat& src, GpuMat hist[4], int levels[4], int lowerLevel[4], int upperLevel[4], cudaStream_t stream);\r
+ typedef void (*hist_t)(const GpuMat& src, GpuMat hist[4], GpuMat& buf, int levels[4], int lowerLevel[4], int upperLevel[4], cudaStream_t stream);\r
static const hist_t hist_callers[] =\r
{\r
NppHistogramEvenC4<CV_8U , nppiHistogramEven_8u_C4R , nppiHistogramEvenGetBufferSize_8u_C4R >::hist,\r
NppHistogramEvenC4<CV_16S, nppiHistogramEven_16s_C4R, nppiHistogramEvenGetBufferSize_16s_C4R>::hist\r
};\r
\r
- hist_callers[src.depth()](src, hist, histSize, lowerLevel, upperLevel, StreamAccessor::getStream(stream));\r
+ hist_callers[src.depth()](src, hist, buf, histSize, lowerLevel, upperLevel, StreamAccessor::getStream(stream));\r
}\r
\r
void cv::gpu::histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels, Stream& stream)\r
{\r
+ GpuMat buf;\r
+ histRange(src, hist, levels, buf, stream);\r
+}\r
+\r
+\r
+void cv::gpu::histRange(const GpuMat& src, GpuMat& hist, const GpuMat& levels, GpuMat& buf, Stream& stream)\r
+{\r
CV_Assert(src.type() == CV_8UC1 || src.type() == CV_16UC1 || src.type() == CV_16SC1 || src.type() == CV_32FC1);\r
\r
- typedef void (*hist_t)(const GpuMat& src, GpuMat& hist, const GpuMat& levels, cudaStream_t stream);\r
+ typedef void (*hist_t)(const GpuMat& src, GpuMat& hist, const GpuMat& levels, GpuMat& buf, cudaStream_t stream);\r
static const hist_t hist_callers[] =\r
{\r
NppHistogramRangeC1<CV_8U , nppiHistogramRange_8u_C1R , nppiHistogramRangeGetBufferSize_8u_C1R >::hist,\r
NppHistogramRangeC1<CV_32F, nppiHistogramRange_32f_C1R, nppiHistogramRangeGetBufferSize_32f_C1R>::hist\r
};\r
\r
- hist_callers[src.depth()](src, hist, levels, StreamAccessor::getStream(stream));\r
+ hist_callers[src.depth()](src, hist, levels, buf, StreamAccessor::getStream(stream));\r
}\r
\r
void cv::gpu::histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4], Stream& stream)\r
{\r
+ GpuMat buf;\r
+ histRange(src, hist, levels, buf, stream);\r
+}\r
+\r
+void cv::gpu::histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4], GpuMat& buf, Stream& stream)\r
+{\r
CV_Assert(src.type() == CV_8UC4 || src.type() == CV_16UC4 || src.type() == CV_16SC4 || src.type() == CV_32FC4);\r
\r
- typedef void (*hist_t)(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4], cudaStream_t stream);\r
+ typedef void (*hist_t)(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4], GpuMat& buf, cudaStream_t stream);\r
static const hist_t hist_callers[] =\r
{\r
NppHistogramRangeC4<CV_8U , nppiHistogramRange_8u_C4R , nppiHistogramRangeGetBufferSize_8u_C4R >::hist,\r
NppHistogramRangeC4<CV_32F, nppiHistogramRange_32f_C4R, nppiHistogramRangeGetBufferSize_32f_C4R>::hist\r
};\r
\r
- hist_callers[src.depth()](src, hist, levels, StreamAccessor::getStream(stream));\r
+ hist_callers[src.depth()](src, hist, levels, buf, StreamAccessor::getStream(stream));\r
}\r
\r
namespace cv { namespace gpu { namespace histograms\r
scale *= 255.;\r
scale = 1./scale;\r
\r
- GpuMat tmp_buf(src.size(), CV_32F);\r
Dx.create(src.size(), CV_32F);\r
Dy.create(src.size(), CV_32F);\r
\r
\r
void cv::gpu::cornerHarris(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, double k, int borderType)\r
{\r
+ GpuMat Dx, Dy;\r
+ cornerHarris(src, dst, Dx, Dy, blockSize, ksize, k, borderType);\r
+}\r
+\r
+void cv::gpu::cornerHarris(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, int blockSize, int ksize, double k, int borderType)\r
+{\r
CV_Assert(borderType == cv::BORDER_REFLECT101 ||\r
borderType == cv::BORDER_REPLICATE);\r
\r
int gpuBorderType;\r
CV_Assert(tryConvertToGpuBorderType(borderType, gpuBorderType));\r
\r
- GpuMat Dx, Dy;\r
extractCovData(src, Dx, Dy, blockSize, ksize, borderType);\r
dst.create(src.size(), CV_32F);\r
imgproc::cornerHarris_caller(blockSize, (float)k, Dx, Dy, dst, gpuBorderType);\r
\r
void cv::gpu::cornerMinEigenVal(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, int borderType)\r
{ \r
+ GpuMat Dx, Dy;\r
+ cornerMinEigenVal(src, dst, Dx, Dy, blockSize, ksize, borderType);\r
+}\r
+\r
+void cv::gpu::cornerMinEigenVal(const GpuMat& src, GpuMat& dst, GpuMat& Dx, GpuMat& Dy, int blockSize, int ksize, int borderType)\r
+{ \r
CV_Assert(borderType == cv::BORDER_REFLECT101 ||\r
borderType == cv::BORDER_REPLICATE);\r
\r
int gpuBorderType;\r
CV_Assert(tryConvertToGpuBorderType(borderType, gpuBorderType));\r
\r
- GpuMat Dx, Dy;\r
extractCovData(src, Dx, Dy, blockSize, ksize, borderType); \r
dst.create(src.size(), CV_32F);\r
imgproc::cornerMinEigenVal_caller(blockSize, Dx, Dy, dst, gpuBorderType);\r