//! supports only CV_8UC1 source type\r
CV_EXPORTS void integral(GpuMat& src, GpuMat& sum, GpuMat& sqsum);\r
\r
+ //! computes vertical sum, supports only CV_32FC1 images\r
+ CV_EXPORTS void columnSum(const GpuMat& src, GpuMat& sum);\r
+\r
//! computes the standard deviation of integral images\r
//! supports only CV_32SC1 source type and CV_32FC1 sqr type\r
//! output will have CV_32FC1 type\r
\r
#include "internal_shared.hpp"\r
#include "opencv2/gpu/device/border_interpolate.hpp"\r
-#include "internal_shared.hpp"\r
\r
using namespace cv::gpu;\r
using namespace cv::gpu::device;\r
cudaSafeCall(cudaUnbindTexture(minEigenValDxTex));\r
cudaSafeCall(cudaUnbindTexture(minEigenValDyTex));\r
}\r
+\r
+////////////////////////////// Column Sum //////////////////////////////////////\r
+\r
+ __global__ void columnSumKernel_32F(int cols, int rows, const PtrStep src, const PtrStep dst)\r
+ {\r
+ int x = blockIdx.x * blockDim.x + threadIdx.x;\r
+\r
+ const float* src_data = (const float*)src.data + x;\r
+ float* dst_data = (float*)dst.data + x;\r
+\r
+ if (x < cols)\r
+ {\r
+ float sum = 0.f;\r
+ for (int y = 0; y < rows; ++y)\r
+ {\r
+ sum += src_data[y];\r
+ dst_data[y] = sum;\r
+ }\r
+ }\r
+ }\r
+\r
+\r
+ void columnSum_32F(const DevMem2D src, const DevMem2D dst)\r
+ {\r
+ dim3 threads(256);\r
+ dim3 grid(divUp(src.cols, threads.x));\r
+\r
+ columnSumKernel_32F<<<grid, threads>>>(src.cols, src.rows, src, dst);\r
+ cudaSafeCall(cudaThreadSynchronize());\r
+ }\r
+\r
}}}\r
\r
texture<unsigned char, 2> templTex_8U;\r
\r
\r
-__global__ void matchTemplateKernel_8U_SQDIFF(int w, int h, DevMem2Df result)\r
+__global__ void matchTemplateNaiveKernel_8U_SQDIFF(int w, int h, DevMem2Df result)\r
{\r
int x = blockDim.x * blockIdx.x + threadIdx.x;\r
int y = blockDim.y * blockIdx.y + threadIdx.y;\r
}\r
\r
\r
-void matchTemplate_8U_SQDIFF(const DevMem2D image, const DevMem2D templ, DevMem2Df result)\r
+void matchTemplateNaive_8U_SQDIFF(const DevMem2D image, const DevMem2D templ, DevMem2Df result)\r
{\r
dim3 threads(32, 8);\r
dim3 grid(divUp(image.cols - templ.cols + 1, threads.x), \r
imageTex_8U.filterMode = cudaFilterModePoint;\r
templTex_8U.filterMode = cudaFilterModePoint;\r
\r
- matchTemplateKernel_8U_SQDIFF<<<grid, threads>>>(templ.cols, templ.rows, result);\r
+ matchTemplateNaiveKernel_8U_SQDIFF<<<grid, threads>>>(templ.cols, templ.rows, result);\r
cudaSafeCall(cudaThreadSynchronize());\r
cudaSafeCall(cudaUnbindTexture(imageTex_8U));\r
cudaSafeCall(cudaUnbindTexture(templTex_8U));\r
texture<float, 2> templTex_32F;\r
\r
\r
-__global__ void matchTemplateKernel_32F_SQDIFF(int w, int h, DevMem2Df result)\r
+__global__ void matchTemplateNaiveKernel_32F_SQDIFF(int w, int h, DevMem2Df result)\r
{\r
int x = blockDim.x * blockIdx.x + threadIdx.x;\r
int y = blockDim.y * blockIdx.y + threadIdx.y;\r
}\r
\r
\r
-void matchTemplate_32F_SQDIFF(const DevMem2D image, const DevMem2D templ, DevMem2Df result)\r
+void matchTemplateNaive_32F_SQDIFF(const DevMem2D image, const DevMem2D templ, DevMem2Df result)\r
{\r
dim3 threads(32, 8);\r
dim3 grid(divUp(image.cols - templ.cols + 1, threads.x), \r
imageTex_8U.filterMode = cudaFilterModePoint;\r
templTex_8U.filterMode = cudaFilterModePoint;\r
\r
- matchTemplateKernel_32F_SQDIFF<<<grid, threads>>>(templ.cols, templ.rows, result);\r
+ matchTemplateNaiveKernel_32F_SQDIFF<<<grid, threads>>>(templ.cols, templ.rows, result);\r
cudaSafeCall(cudaThreadSynchronize());\r
cudaSafeCall(cudaUnbindTexture(imageTex_32F));\r
cudaSafeCall(cudaUnbindTexture(templTex_32F));\r
dim3 threads(256);\r
dim3 grid(divUp(n, threads.x));\r
multiplyAndNormalizeSpectsKernel<<<grid, threads>>>(n, scale, a, b, c);\r
+ cudaSafeCall(cudaThreadSynchronize());\r
}\r
\r
\r
void cv::gpu::warpPerspective(const GpuMat&, GpuMat&, const Mat&, Size, int) { throw_nogpu(); }\r
void cv::gpu::rotate(const GpuMat&, GpuMat&, Size, double, double, double, int) { throw_nogpu(); }\r
void cv::gpu::integral(GpuMat&, GpuMat&, GpuMat&) { throw_nogpu(); }\r
+void cv::gpu::columnSum(const GpuMat&, GpuMat&) { throw_nogpu(); }\r
void cv::gpu::rectStdDev(const GpuMat&, const GpuMat&, GpuMat&, const Rect&) { throw_nogpu(); }\r
void cv::gpu::Canny(const GpuMat&, GpuMat&, double, double, int) { throw_nogpu(); }\r
void cv::gpu::evenLevels(GpuMat&, int, int, int) { throw_nogpu(); }\r
sum.step, sqsum.ptr<Npp32f>(), sqsum.step, sz, 0, 0.0f, h) );\r
}\r
\r
+//////////////////////////////////////////////////////////////////////////////\r
+// columnSum\r
+\r
+namespace cv { namespace gpu { namespace imgproc\r
+{\r
+ void columnSum_32F(const DevMem2D src, const DevMem2D dst);\r
+}}}\r
+\r
+void cv::gpu::columnSum(const GpuMat& src, GpuMat& dst)\r
+{\r
+ CV_Assert(src.type() == CV_32F);\r
+\r
+ dst.create(src.size(), CV_32F);\r
+ imgproc::columnSum_32F(src, dst);\r
+}\r
+\r
void cv::gpu::rectStdDev(const GpuMat& src, const GpuMat& sqr, GpuMat& dst, const Rect& rect)\r
{\r
CV_Assert(src.type() == CV_32SC1 && sqr.type() == CV_32FC1);\r
//M*/\r
\r
#include "precomp.hpp"\r
-#include <cufft.h>\r
#include <iostream>\r
#include <utility>\r
\r
\r
#else\r
\r
+#include <cufft.h>\r
+\r
namespace cv { namespace gpu { namespace imgproc \r
{ \r
void multiplyAndNormalizeSpects(int n, float scale, const cufftComplex* a, \r
const cufftComplex* b, cufftComplex* c);\r
- void matchTemplate_8U_SQDIFF(const DevMem2D image, const DevMem2D templ, DevMem2Df result);\r
- void matchTemplate_32F_SQDIFF(const DevMem2D image, const DevMem2D templ, DevMem2Df result);\r
+ void matchTemplateNaive_8U_SQDIFF(const DevMem2D image, const DevMem2D templ, DevMem2Df result);\r
+ void matchTemplateNaive_32F_SQDIFF(const DevMem2D image, const DevMem2D templ, DevMem2Df result);\r
}}}\r
\r
\r
void matchTemplate<CV_8U, CV_TM_SQDIFF>(const GpuMat& image, const GpuMat& templ, GpuMat& result)\r
{\r
result.create(image.rows - templ.rows + 1, image.cols - templ.cols + 1, CV_32F);\r
- imgproc::matchTemplate_8U_SQDIFF(image, templ, result);\r
+ imgproc::matchTemplateNaive_8U_SQDIFF(image, templ, result);\r
}\r
\r
\r
void matchTemplate<CV_32F, CV_TM_SQDIFF>(const GpuMat& image, const GpuMat& templ, GpuMat& result)\r
{\r
result.create(image.rows - templ.rows + 1, image.cols - templ.cols + 1, CV_32F);\r
- imgproc::matchTemplate_32F_SQDIFF(image, templ, result);\r
+ imgproc::matchTemplateNaive_32F_SQDIFF(image, templ, result);\r
}\r
\r
\r
F(cout << "gpu_block: " << clock() - t << endl;)\r
if (!check(dst_gold, Mat(dst), 0.25f * h * w * 1e-5f)) return;\r
\r
- gen(image, n, m, CV_32F);\r
- gen(templ, h, w, CV_32F);\r
- F(t = clock();)\r
- matchTemplate(image, templ, dst_gold, CV_TM_CCORR);\r
- F(cout << "cpu:" << clock() - t << endl;)\r
- F(t = clock();)\r
- gpu::matchTemplate(gpu::GpuMat(image), gpu::GpuMat(templ), dst, CV_TM_CCORR);\r
- F(cout << "gpu_block: " << clock() - t << endl;)\r
- if (!check(dst_gold, Mat(dst), 0.25f * h * w * 1e-5f)) return;\r
+ //gen(image, n, m, CV_32F);\r
+ //gen(templ, h, w, CV_32F);\r
+ //F(t = clock();)\r
+ //matchTemplate(image, templ, dst_gold, CV_TM_CCORR);\r
+ //F(cout << "cpu:" << clock() - t << endl;)\r
+ //F(t = clock();)\r
+ //gpu::matchTemplate(gpu::GpuMat(image), gpu::GpuMat(templ), dst, CV_TM_CCORR);\r
+ //F(cout << "gpu_block: " << clock() - t << endl;)\r
+ //if (!check(dst_gold, Mat(dst), 0.25f * h * w * 1e-5f)) return;\r
}\r
}\r
catch (const Exception& e)\r