int cv::gpu::CascadeClassifier_GPU::detectMultiScale( const GpuMat& , GpuMat& , double , int , Size) { throw_nogpu(); return 0; }\r
\r
// ============ LBP cascade ==============================================//\r
-cv::gpu::CascadeClassifier_GPU_LBP::CascadeClassifier_GPU_LBP() { throw_nogpu(); }\r
-cv::gpu::CascadeClassifier_GPU_LBP::~CascadeClassifier_GPU_LBP() { throw_nogpu(); }\r
+cv::gpu::CascadeClassifier_GPU_LBP::CascadeClassifier_GPU_LBP(cv::Size /*frameSize*/){ throw_nogpu(); }\r
+cv::gpu::CascadeClassifier_GPU_LBP::~CascadeClassifier_GPU_LBP() { throw_nogpu(); }\r
\r
bool cv::gpu::CascadeClassifier_GPU_LBP::empty() const { throw_nogpu(); return true; }\r
bool cv::gpu::CascadeClassifier_GPU_LBP::load(const string&) { throw_nogpu(); return true; }\r
Size cv::gpu::CascadeClassifier_GPU_LBP::getClassifierSize() const { throw_nogpu(); return Size(); }\r
void cv::gpu::CascadeClassifier_GPU_LBP::preallocateIntegralBuffer(cv::Size /*desired*/) { throw_nogpu();}\r
+void cv::gpu::CascadeClassifier_GPU_LBP::initializeBuffers(cv::Size /*frame*/) { throw_nogpu();}\r
\r
int cv::gpu::CascadeClassifier_GPU_LBP::detectMultiScale(const cv::gpu::GpuMat& /*image*/, cv::gpu::GpuMat& /*scaledImageBuffer*/, cv::gpu::GpuMat& /*objectsBuf*/,\r
double /*scaleFactor*/, int /*minNeighbors*/, cv::Size /*maxObjectSize*/){ throw_nogpu(); return 0;}\r
\r
#else\r
\r
-cv::gpu::CascadeClassifier_GPU_LBP::CascadeClassifier_GPU_LBP(){}\r
+cv::gpu::CascadeClassifier_GPU_LBP::CascadeClassifier_GPU_LBP(cv::Size detectionFrameSize)\r
+{\r
+ if (detectionFrameSize != cv::Size())\r
+ initializeBuffers(detectionFrameSize);\r
+}\r
+\r
+void cv::gpu::CascadeClassifier_GPU_LBP::initializeBuffers(cv::Size frame)\r
+{\r
+ if (resuzeBuffer.empty() || frame.width > resuzeBuffer.cols || frame.height > resuzeBuffer.rows)\r
+ {\r
+ resuzeBuffer.create(frame, CV_8UC1);\r
+\r
+ integral.create(frame.height + 1, frame.width + 1, CV_32SC1);\r
+ NcvSize32u roiSize;\r
+ roiSize.width = frame.width;\r
+ roiSize.height = frame.height;\r
+\r
+ cudaDeviceProp prop;\r
+ cudaSafeCall( cudaGetDeviceProperties(&prop, cv::gpu::getDevice()) );\r
+\r
+ Ncv32u bufSize;\r
+ ncvSafeCall( nppiStIntegralGetSize_8u32u(roiSize, &bufSize, prop) );\r
+ // printf("HERE!!!!!!!%d\n", bufSize);\r
+ integralBuffer.create(1, bufSize, CV_8UC1);\r
+ }\r
+}\r
\r
cv::gpu::CascadeClassifier_GPU_LBP::~CascadeClassifier_GPU_LBP(){}\r
\r
objects.create(1 , defaultObjSearchNum, CV_32SC4);\r
\r
GpuMat candidates(1 , defaultObjSearchNum, CV_32SC4);\r
+ // GpuMat candidates(objects);\r
if (maxObjectSize == cv::Size())\r
maxObjectSize = image.size();\r
\r
- scaledImageBuffer.create(image.rows + 1, image.cols + 1, CV_8U);\r
+ initializeBuffers(image.size());\r
+\r
unsigned int* classified = new unsigned int[1];\r
*classified = 0;\r
unsigned int* dclassified;\r
// if( windowSize.width < minObjectSize.width || windowSize.height < minObjectSize.height )\r
// continue;\r
\r
- cv::gpu::resize(image, scaledImageBuffer, scaledImageSize, 0, 0, CV_INTER_LINEAR);\r
- cv::gpu::integral(scaledImageBuffer, integral);\r
+ GpuMat scaledImg(resuzeBuffer, cv::Rect(0, 0, scaledImageSize.width, scaledImageSize.height));\r
+ GpuMat scaledIntegral(integral, cv::Rect(0, 0, scaledImageSize.width + 1, scaledImageSize.height + 1));\r
+ GpuMat currBuff = integralBuffer;//(integralBuffer, cv::Rect(0, 0, integralBuffer.width, integralBuffer.height));\r
+\r
+ cv::gpu::resize(image, scaledImg, scaledImageSize, 0, 0, CV_INTER_LINEAR);\r
+ cv::gpu::integralBuffered(scaledImg, scaledIntegral, currBuff);\r
\r
step = (factor <= 2.) + 1;\r
\r
cv::gpu::device::lbp::classifyStump(stage_mat, stage_mat.cols / sizeof(Stage), nodes_mat, leaves_mat, subsets_mat, features_mat,\r
- integral, processingRectSize.width, processingRectSize.height, windowSize.width, windowSize.height, factor, step, subsetSize, candidates, dclassified);\r
+ scaledIntegral, processingRectSize.width, processingRectSize.height, windowSize.width, windowSize.height, factor, step, subsetSize, candidates, dclassified);\r
}\r
if (groupThreshold <= 0 || objects.empty())\r
return 0;\r