int subsetN = (maxCatCount + 31)/32;
queue<CvDTreeNode*> internalNodesQueue;
int size = (int)pow( 2.f, (float)ensemble->get_params().max_depth);
- Ptr<float> leafVals = new float[size];
+ std::vector<float> leafVals(size);
int leafValIdx = 0;
int internalNodeIdx = 1;
CvDTreeNode* tempNode;
cascadeParams = _cascadeParams;
featureParams = CvFeatureParams::create(cascadeParams.featureType);
featureParams->init(_featureParams);
- stageParams = new CvCascadeBoostParams;
+ stageParams = makePtr<CvCascadeBoostParams>();
*stageParams = _stageParams;
featureEvaluator = CvFeatureEvaluator::create(cascadeParams.featureType);
- featureEvaluator->init( (CvFeatureParams*)featureParams, numPos + numNeg, cascadeParams.winSize );
+ featureEvaluator->init( featureParams, numPos + numNeg, cascadeParams.winSize );
stageClassifiers.reserve( numStages );
}
cout << "PARAMETERS:" << endl;
break;
}
- CvCascadeBoost* tempStage = new CvCascadeBoost;
- bool isStageTrained = tempStage->train( (CvFeatureEvaluator*)featureEvaluator,
+ Ptr<CvCascadeBoost> tempStage = makePtr<CvCascadeBoost>();
+ bool isStageTrained = tempStage->train( featureEvaluator,
curNumSamples, _precalcValBufSize, _precalcIdxBufSize,
- *((CvCascadeBoostParams*)stageParams) );
+ *stageParams );
cout << "END>" << endl;
if(!isStageTrained)
void CvCascadeClassifier::writeFeatures( FileStorage &fs, const Mat& featureMap ) const
{
- ((CvFeatureEvaluator*)((Ptr<CvFeatureEvaluator>)featureEvaluator))->writeFeatures( fs, featureMap );
+ featureEvaluator->writeFeatures( fs, featureMap );
}
void CvCascadeClassifier::writeStages( FileStorage &fs, const Mat& featureMap ) const
sprintf( cmnt, "stage %d", i );
cvWriteComment( fs.fs, cmnt, 0 );
fs << "{";
- ((CvCascadeBoost*)((Ptr<CvCascadeBoost>)*it))->write( fs, featureMap );
+ (*it)->write( fs, featureMap );
fs << "}";
}
fs << "]";
if ( !node.isMap() || !cascadeParams.read( node ) )
return false;
- stageParams = new CvCascadeBoostParams;
+ stageParams = makePtr<CvCascadeBoostParams>();
FileNode rnode = node[CC_STAGE_PARAMS];
if ( !stageParams->read( rnode ) )
return false;
FileNodeIterator it = rnode.begin();
for( int i = 0; i < min( (int)rnode.size(), numStages ); i++, it++ )
{
- CvCascadeBoost* tempStage = new CvCascadeBoost;
- if ( !tempStage->read( *it, (CvFeatureEvaluator *)featureEvaluator, *((CvCascadeBoostParams*)stageParams) ) )
- {
- delete tempStage;
+ Ptr<CvCascadeBoost> tempStage = makePtr<CvCascadeBoost>();
+ if ( !tempStage->read( *it, featureEvaluator, *stageParams) )
return false;
- }
stageClassifiers.push_back(tempStage);
}
return true;
fs << "{";
fs << ICV_HAAR_FEATURE_NAME << "{";
- ((CvHaarEvaluator*)((CvFeatureEvaluator*)featureEvaluator))->writeFeature( fs, tempNode->split->var_idx );
+ ((CvHaarEvaluator*)featureEvaluator.get())->writeFeature( fs, tempNode->split->var_idx );
fs << "}";
fs << ICV_HAAR_THRESHOLD_NAME << tempNode->split->ord.c;
if ( !readParams( node ) )
return false;
featureEvaluator = CvFeatureEvaluator::create(cascadeParams.featureType);
- featureEvaluator->init( ((CvFeatureParams*)featureParams), numPos + numNeg, cascadeParams.winSize );
+ featureEvaluator->init( featureParams, numPos + numNeg, cascadeParams.winSize );
fs.release();
char buf[10];
node = fs.getFirstTopLevelNode();
if ( !fs.isOpened() )
break;
- CvCascadeBoost *tempStage = new CvCascadeBoost;
+ Ptr<CvCascadeBoost> tempStage = makePtr<CvCascadeBoost>();
- if ( !tempStage->read( node, (CvFeatureEvaluator*)featureEvaluator, *((CvCascadeBoostParams*)stageParams )) )
+ if ( !tempStage->read( node, featureEvaluator, *stageParams ))
{
- delete tempStage;
fs.release();
break;
}
for( vector< Ptr<CvCascadeBoost> >::const_iterator it = stageClassifiers.begin();
it != stageClassifiers.end(); it++ )
- ((CvCascadeBoost*)((Ptr<CvCascadeBoost>)(*it)))->markUsedFeaturesInMap( featureMap );
+ (*it)->markUsedFeaturesInMap( featureMap );
for( int fi = 0, idx = 0; fi < varCount; fi++ )
if ( featureMap.at<int>(0, fi) >= 0 )
CvCascadeParams cascadeParams;
CvCascadeBoostParams stageParams;
- Ptr<CvFeatureParams> featureParams[] = { Ptr<CvFeatureParams>(new CvHaarFeatureParams),
- Ptr<CvFeatureParams>(new CvLBPFeatureParams),
- Ptr<CvFeatureParams>(new CvHOGFeatureParams)
+ Ptr<CvFeatureParams> featureParams[] = { makePtr<CvHaarFeatureParams>(),
+ makePtr<CvLBPFeatureParams>(),
+ makePtr<CvHOGFeatureParams>()
};
int fc = sizeof(featureParams)/sizeof(featureParams[0]);
if( argc == 1 )
global float * optr = output +
mad24(gid + 1, elements_per_row, - 1 + out_offset / 4);
- float4 result = (float4)(0), out_v4;
+ float4 result_v4 = (float4)(0), out_v4;
+ float result = 0;
// we assume elements_per_row is multple of 4
- for(int i = 0; i < elements_per_row / 4; ++i, optr -= 4)
+ for(int i = 0; i < 4; ++ i, -- optr)
+ {
+ if(i < elements_per_row - cols)
+ {
+ *optr = result;
+ }
+ else
+ {
+ result = *optr + _a * result;
+ *optr = result;
+ }
+ }
+ result_v4.x = result;
+ optr -= 3;
+ for(int i = 1; i < elements_per_row / 4; ++i, optr -= 4)
{
// shift left, `offset` is type `size_t` so it cannot be negative
- out_v4 = vload4(0, optr - 3);
+ out_v4 = vload4(0, optr);
- result.w = out_v4.w + _a * result.x;
- result.z = out_v4.z + _a * result.w;
- result.y = out_v4.y + _a * result.z;
- result.x = out_v4.x + _a * result.y;
+ result_v4.w = out_v4.w + _a * result_v4.x;
+ result_v4.z = out_v4.z + _a * result_v4.w;
+ result_v4.y = out_v4.y + _a * result_v4.z;
+ result_v4.x = out_v4.x + _a * result_v4.y;
- vstore4(result, 0, optr - 3);
+ vstore4(result_v4, 0, optr);
}
}
buffer + mad24(rows - gid, elements_per_row, -1 + buffer_offset / 4);
float4 buf_v4, out_v4, res_v4 = (float4)(0);
-
- for(int i = 0; i < elements_per_row / 4; ++i, optr -= 4, bptr -= 4)
- {
- buf_v4 = vload4(0, bptr - 3);
- out_v4 = vload4(0, optr - 3);
+ float result = 0;
+ // we assume elements_per_row is multple of 4
+ for(int i = 0; i < 4; ++ i, -- optr, -- bptr)
+ {
+ if(i < elements_per_row - cols)
+ {
+ *optr = result;
+ }
+ else
+ {
+ result = *optr + *bptr * result;
+ *optr = result;
+ }
+ }
+ res_v4.x = result;
+ optr -= 3;
+ bptr -= 3;
+ for(int i = 0; i < elements_per_row / 4 - 1; ++i, optr -= 4, bptr -= 4)
+ {
+ buf_v4 = vload4(0, bptr);
+ out_v4 = vload4(0, optr);
res_v4.w = out_v4.w + buf_v4.w * res_v4.x;
res_v4.z = out_v4.z + buf_v4.z * res_v4.w;
res_v4.y = out_v4.y + buf_v4.y * res_v4.z;
res_v4.x = out_v4.x + buf_v4.x * res_v4.y;
- vstore4(res_v4, 0, optr - 3);
+ vstore4(res_v4, 0, optr);
}
}
};
// smart pointers allocation :
-Ptr<Retina> createRetina(Size inputSize){ return new RetinaImpl(inputSize); }
-Ptr<Retina> createRetina(Size inputSize, const bool colorMode, int colorSamplingMethod, const bool useRetinaLogSampling, const double reductionFactor, const double samplingStrenght){return new RetinaImpl(inputSize, colorMode, colorSamplingMethod, useRetinaLogSampling, reductionFactor, samplingStrenght);}
+Ptr<Retina> createRetina(Size inputSize){ return makePtr<RetinaImpl>(inputSize); }
+Ptr<Retina> createRetina(Size inputSize, const bool colorMode, int colorSamplingMethod, const bool useRetinaLogSampling, const double reductionFactor, const double samplingStrenght){
+ return makePtr<RetinaImpl>(inputSize, colorMode, colorSamplingMethod, useRetinaLogSampling, reductionFactor, samplingStrenght);
+}
// RetinaImpl code
// computing photoreceptors local density
MAKE_OCLMAT_SLICES(_RGBmosaic, 3);
MAKE_OCLMAT_SLICES(_colorLocalDensity, 3);
-
+ _colorLocalDensity.setTo(0);
_spatiotemporalLPfilter(_RGBmosaic_slices[0], _colorLocalDensity_slices[0]);
_spatiotemporalLPfilter(_RGBmosaic_slices[1], _colorLocalDensity_slices[1]);
_spatiotemporalLPfilter(_RGBmosaic_slices[2], _colorLocalDensity_slices[2]);
}
} /* namespace ocl */
-Ptr<Retina> createRetina_OCL(Size getInputSize){ return new ocl::RetinaOCLImpl(getInputSize); }
+Ptr<Retina> createRetina_OCL(Size getInputSize){ return makePtr<ocl::RetinaOCLImpl>(getInputSize); }
Ptr<Retina> createRetina_OCL(Size getInputSize, const bool colorMode, int colorSamplingMethod, const bool useRetinaLogSampling, const double reductionFactor, const double samplingStrenght)
{
- return new ocl::RetinaOCLImpl(getInputSize, colorMode, colorSamplingMethod, useRetinaLogSampling, reductionFactor, samplingStrenght);
+ return makePtr<ocl::RetinaOCLImpl>(getInputSize, colorMode, colorSamplingMethod, useRetinaLogSampling, reductionFactor, samplingStrenght);
}
} /* namespace bioinspired */
_imageOutput.resize(nbPixels*3);
_temp2.resize(nbPixels);
// allocate the main filter with 2 setup sets properties (one for each low pass filter
- _multiuseFilter = new BasicRetinaFilter(imageInput.height, imageInput.width, 2);
+ _multiuseFilter = makePtr<BasicRetinaFilter>(imageInput.height, imageInput.width, 2);
// allocate the color manager (multiplexer/demultiplexer
- _colorEngine = new RetinaColor(imageInput.height, imageInput.width);
+ _colorEngine = makePtr<RetinaColor>(imageInput.height, imageInput.width);
// setup filter behaviors with default values
setup();
}
CV_EXPORTS Ptr<RetinaFastToneMapping> createRetinaFastToneMapping(Size inputSize)
{
- return new RetinaFastToneMappingImpl(inputSize);
+ return makePtr<RetinaFastToneMappingImpl>(inputSize);
}
}// end of namespace bioinspired
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
-#if defined(HAVE_OPENCV_OCL) && defined(HAVE_OPENCL)
+#if defined(HAVE_OPENCV_OCL)
#include "opencv2/ocl.hpp"
#define RETINA_ITERATIONS 5
-------------------
Finds centers in the grid of circles.
-.. ocv:function:: bool findCirclesGrid( InputArray image, Size patternSize, OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID, const Ptr<FeatureDetector> &blobDetector = new SimpleBlobDetector() )
+.. ocv:function:: bool findCirclesGrid( InputArray image, Size patternSize, OutputArray centers, int flags=CALIB_CB_SYMMETRIC_GRID, const Ptr<FeatureDetector> &blobDetector = makePtr<SimpleBlobDetector>() )
.. ocv:pyfunction:: cv2.findCirclesGrid(image, patternSize[, centers[, flags[, blobDetector]]]) -> retval, centers
//! finds circles' grid pattern of the specified size in the image
CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize,
OutputArray centers, int flags = CALIB_CB_SYMMETRIC_GRID,
- const Ptr<FeatureDetector> &blobDetector = new SimpleBlobDetector());
+ const Ptr<FeatureDetector> &blobDetector = makePtr<SimpleBlobDetector>());
//! finds intrinsic and extrinsic camera parameters from several fews of a known calibration pattern.
CV_EXPORTS_W double calibrateCamera( InputArrayOfArrays objectPoints,
#ifdef HAVE_TBB
// limit concurrency to get determenistic result
- cv::Ptr<tbb::task_scheduler_init> one_thread = new tbb::task_scheduler_init(1);
+ tbb::task_scheduler_init one_thread(1);
#endif
TEST_CYCLE()
if( !out_corners )
CV_Error( CV_StsNullPtr, "Null pointer to corners" );
- storage = cvCreateMemStorage(0);
- thresh_img = cvCreateMat( img->rows, img->cols, CV_8UC1 );
+ storage.reset(cvCreateMemStorage(0));
+ thresh_img.reset(cvCreateMat( img->rows, img->cols, CV_8UC1 ));
#ifdef DEBUG_CHESSBOARD
dbg_img = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3 );
{
// equalize the input image histogram -
// that should make the contrast between "black" and "white" areas big enough
- norm_img = cvCreateMat( img->rows, img->cols, CV_8UC1 );
+ norm_img.reset(cvCreateMat( img->rows, img->cols, CV_8UC1 ));
if( CV_MAT_CN(img->type) != 1 )
{
cv::Ptr<CvMat> gray;
if( CV_MAT_CN(img->type) != 1 )
{
- gray = cvCreateMat(img->rows, img->cols, CV_8UC1);
+ gray.reset(cvCreateMat(img->rows, img->cols, CV_8UC1));
cvCvtColor(img, gray, CV_BGR2GRAY);
}
else
{
- gray = cvCloneMat(img);
+ gray.reset(cvCloneMat(img));
}
int wsize = 2;
cvFindCornerSubPix( gray, out_corners, pattern_size.width*pattern_size.height,
int *all_count, CvCBQuad **all_quads, CvCBCorner **corners,
CvSize pattern_size, CvMemStorage* storage )
{
- cv::Ptr<CvMemStorage> temp_storage = cvCreateChildMemStorage( storage );
+ cv::Ptr<CvMemStorage> temp_storage(cvCreateChildMemStorage( storage ));
CvSeq* stack = cvCreateSeq( 0, sizeof(*stack), sizeof(void*), temp_storage );
// first find an interior quad
// create an array of quadrangle centers
cv::AutoBuffer<CvPoint2D32f> centers( quad_count );
- cv::Ptr<CvMemStorage> temp_storage = cvCreateMemStorage(0);
+ cv::Ptr<CvMemStorage> temp_storage(cvCreateMemStorage(0));
for( i = 0; i < quad_count; i++ )
{
icvFindConnectedQuads( CvCBQuad *quad, int quad_count, CvCBQuad **out_group,
int group_idx, CvMemStorage* storage )
{
- cv::Ptr<CvMemStorage> temp_storage = cvCreateChildMemStorage( storage );
+ cv::Ptr<CvMemStorage> temp_storage(cvCreateChildMemStorage( storage ));
CvSeq* stack = cvCreateSeq( 0, sizeof(*stack), sizeof(void*), temp_storage );
int i, count = 0;
min_size = 25; //cvRound( image->cols * image->rows * .03 * 0.01 * 0.92 );
// create temporary storage for contours and the sequence of pointers to found quadrangles
- temp_storage = cvCreateChildMemStorage( storage );
+ temp_storage.reset(cvCreateChildMemStorage( storage ));
root = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvSeq*), temp_storage );
// initialize contour retrieving routine
(objectPoints->rows == count && CV_MAT_CN(objectPoints->type)*objectPoints->cols == 3) ||
(objectPoints->rows == 3 && CV_MAT_CN(objectPoints->type) == 1 && objectPoints->cols == count)))
{
- matM = cvCreateMat( objectPoints->rows, objectPoints->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(objectPoints->type)) );
+ matM.reset(cvCreateMat( objectPoints->rows, objectPoints->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(objectPoints->type)) ));
cvConvert(objectPoints, matM);
}
else
(imagePoints->rows == count && CV_MAT_CN(imagePoints->type)*imagePoints->cols == 2) ||
(imagePoints->rows == 2 && CV_MAT_CN(imagePoints->type) == 1 && imagePoints->cols == count)))
{
- _m = cvCreateMat( imagePoints->rows, imagePoints->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(imagePoints->type)) );
+ _m.reset(cvCreateMat( imagePoints->rows, imagePoints->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(imagePoints->type)) ));
cvConvert(imagePoints, _m);
}
else
if( CV_MAT_TYPE(dpdr->type) == CV_64FC1 )
{
- _dpdr = cvCloneMat(dpdr);
+ _dpdr.reset(cvCloneMat(dpdr));
}
else
- _dpdr = cvCreateMat( 2*count, 3, CV_64FC1 );
+ _dpdr.reset(cvCreateMat( 2*count, 3, CV_64FC1 ));
dpdr_p = _dpdr->data.db;
dpdr_step = _dpdr->step/sizeof(dpdr_p[0]);
}
if( CV_MAT_TYPE(dpdt->type) == CV_64FC1 )
{
- _dpdt = cvCloneMat(dpdt);
+ _dpdt.reset(cvCloneMat(dpdt));
}
else
- _dpdt = cvCreateMat( 2*count, 3, CV_64FC1 );
+ _dpdt.reset(cvCreateMat( 2*count, 3, CV_64FC1 ));
dpdt_p = _dpdt->data.db;
dpdt_step = _dpdt->step/sizeof(dpdt_p[0]);
}
if( CV_MAT_TYPE(dpdf->type) == CV_64FC1 )
{
- _dpdf = cvCloneMat(dpdf);
+ _dpdf.reset(cvCloneMat(dpdf));
}
else
- _dpdf = cvCreateMat( 2*count, 2, CV_64FC1 );
+ _dpdf.reset(cvCreateMat( 2*count, 2, CV_64FC1 ));
dpdf_p = _dpdf->data.db;
dpdf_step = _dpdf->step/sizeof(dpdf_p[0]);
}
if( CV_MAT_TYPE(dpdc->type) == CV_64FC1 )
{
- _dpdc = cvCloneMat(dpdc);
+ _dpdc.reset(cvCloneMat(dpdc));
}
else
- _dpdc = cvCreateMat( 2*count, 2, CV_64FC1 );
+ _dpdc.reset(cvCreateMat( 2*count, 2, CV_64FC1 ));
dpdc_p = _dpdc->data.db;
dpdc_step = _dpdc->step/sizeof(dpdc_p[0]);
}
if( CV_MAT_TYPE(dpdk->type) == CV_64FC1 )
{
- _dpdk = cvCloneMat(dpdk);
+ _dpdk.reset(cvCloneMat(dpdk));
}
else
- _dpdk = cvCreateMat( dpdk->rows, dpdk->cols, CV_64FC1 );
+ _dpdk.reset(cvCreateMat( dpdk->rows, dpdk->cols, CV_64FC1 ));
dpdk_p = _dpdk->data.db;
dpdk_step = _dpdk->step/sizeof(dpdk_p[0]);
}
CV_IS_MAT(A) && CV_IS_MAT(rvec) && CV_IS_MAT(tvec) );
count = MAX(objectPoints->cols, objectPoints->rows);
- matM = cvCreateMat( 1, count, CV_64FC3 );
- _m = cvCreateMat( 1, count, CV_64FC2 );
+ matM.reset(cvCreateMat( 1, count, CV_64FC3 ));
+ _m.reset(cvCreateMat( 1, count, CV_64FC2 ));
cvConvertPointsHomogeneous( objectPoints, matM );
cvConvertPointsHomogeneous( imagePoints, _m );
CV_Assert( (CV_MAT_DEPTH(tvec->type) == CV_64F || CV_MAT_DEPTH(tvec->type) == CV_32F) &&
(tvec->rows == 1 || tvec->cols == 1) && tvec->rows*tvec->cols*CV_MAT_CN(tvec->type) == 3 );
- _mn = cvCreateMat( 1, count, CV_64FC2 );
- _Mxy = cvCreateMat( 1, count, CV_64FC2 );
+ _mn.reset(cvCreateMat( 1, count, CV_64FC2 ));
+ _Mxy.reset(cvCreateMat( 1, count, CV_64FC2 ));
// normalize image points
// (unapply the intrinsic matrix transformation and distortion)
CvPoint3D64f* M = (CvPoint3D64f*)matM->data.db;
CvPoint2D64f* mn = (CvPoint2D64f*)_mn->data.db;
- matL = cvCreateMat( 2*count, 12, CV_64F );
+ matL.reset(cvCreateMat( 2*count, 12, CV_64F ));
L = matL->data.db;
for( i = 0; i < count; i++, L += 24 )
if( objectPoints->rows != 1 || imagePoints->rows != 1 )
CV_Error( CV_StsBadSize, "object points and image points must be a single-row matrices" );
- matA = cvCreateMat( 2*nimages, 2, CV_64F );
- _b = cvCreateMat( 2*nimages, 1, CV_64F );
+ matA.reset(cvCreateMat( 2*nimages, 2, CV_64F ));
+ _b.reset(cvCreateMat( 2*nimages, 1, CV_64F ));
a[2] = (imageSize.width - 1)*0.5;
a[5] = (imageSize.height - 1)*0.5;
- _allH = cvCreateMat( nimages, 9, CV_64F );
+ _allH.reset(cvCreateMat( nimages, 9, CV_64F ));
// extract vanishing points in order to obtain initial value for the focal length
for( i = 0, pos = 0; i < nimages; i++, pos += ni )
total += ni;
}
- matM = cvCreateMat( 1, total, CV_64FC3 );
- _m = cvCreateMat( 1, total, CV_64FC2 );
+ matM.reset(cvCreateMat( 1, total, CV_64FC3 ));
+ _m.reset(cvCreateMat( 1, total, CV_64FC2 ));
cvConvertPointsHomogeneous( objectPoints, matM );
cvConvertPointsHomogeneous( imagePoints, _m );
nparams = NINTRINSIC + nimages*6;
- _Ji = cvCreateMat( maxPoints*2, NINTRINSIC, CV_64FC1 );
- _Je = cvCreateMat( maxPoints*2, 6, CV_64FC1 );
- _err = cvCreateMat( maxPoints*2, 1, CV_64FC1 );
+ _Ji.reset(cvCreateMat( maxPoints*2, NINTRINSIC, CV_64FC1 ));
+ _Je.reset(cvCreateMat( maxPoints*2, 6, CV_64FC1 ));
+ _err.reset(cvCreateMat( maxPoints*2, 1, CV_64FC1 ));
cvZero( _Ji );
_k = cvMat( distCoeffs->rows, distCoeffs->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), k);
CV_MAT_TYPE(_npoints->type) == CV_32SC1 );
nimages = _npoints->cols + _npoints->rows - 1;
- npoints = cvCreateMat( _npoints->rows, _npoints->cols, _npoints->type );
+ npoints.reset(cvCreateMat( _npoints->rows, _npoints->cols, _npoints->type ));
cvCopy( _npoints, npoints );
for( i = 0, pointsTotal = 0; i < nimages; i++ )
pointsTotal += npoints->data.i[i];
}
- objectPoints = cvCreateMat( _objectPoints->rows, _objectPoints->cols,
- CV_64FC(CV_MAT_CN(_objectPoints->type)));
+ objectPoints.reset(cvCreateMat( _objectPoints->rows, _objectPoints->cols,
+ CV_64FC(CV_MAT_CN(_objectPoints->type))));
cvConvert( _objectPoints, objectPoints );
cvReshape( objectPoints, objectPoints, 3, 1 );
K[k] = cvMat(3,3,CV_64F,A[k]);
Dist[k] = cvMat(1,8,CV_64F,dk[k]);
- imagePoints[k] = cvCreateMat( points->rows, points->cols, CV_64FC(CV_MAT_CN(points->type)));
+ imagePoints[k].reset(cvCreateMat( points->rows, points->cols, CV_64FC(CV_MAT_CN(points->type))));
cvConvert( points, imagePoints[k] );
cvReshape( imagePoints[k], imagePoints[k], 2, 1 );
recomputeIntrinsics = (flags & CV_CALIB_FIX_INTRINSIC) == 0;
- err = cvCreateMat( maxPoints*2, 1, CV_64F );
- Je = cvCreateMat( maxPoints*2, 6, CV_64F );
- J_LR = cvCreateMat( maxPoints*2, 6, CV_64F );
- Ji = cvCreateMat( maxPoints*2, NINTRINSIC, CV_64F );
+ err.reset(cvCreateMat( maxPoints*2, 1, CV_64F ));
+ Je.reset(cvCreateMat( maxPoints*2, 6, CV_64F ));
+ J_LR.reset(cvCreateMat( maxPoints*2, 6, CV_64F ));
+ Ji.reset(cvCreateMat( maxPoints*2, NINTRINSIC, CV_64F ));
cvZero( Ji );
// we optimize for the inter-camera R(3),t(3), then, optionally,
nparams = 6*(nimages+1) + (recomputeIntrinsics ? NINTRINSIC*2 : 0);
// storage for initial [om(R){i}|t{i}] (in order to compute the median for each component)
- RT0 = cvCreateMat( 6, nimages, CV_64F );
+ RT0.reset(cvCreateMat( 6, nimages, CV_64F ));
solver.init( nparams, 0, termCrit );
if( recomputeIntrinsics )
{
const int N = 9;
int x, y, k;
- cv::Ptr<CvMat> _pts = cvCreateMat(1, N*N, CV_32FC2);
+ cv::Ptr<CvMat> _pts(cvCreateMat(1, N*N, CV_32FC2));
CvPoint2D32f* pts = (CvPoint2D32f*)(_pts->data.ptr);
for( y = k = 0; y < N; y++ )
npoints = _points1->rows * _points1->cols * CV_MAT_CN(_points1->type) / 2;
- _m1 = cvCreateMat( _points1->rows, _points1->cols, CV_64FC(CV_MAT_CN(_points1->type)) );
- _m2 = cvCreateMat( _points2->rows, _points2->cols, CV_64FC(CV_MAT_CN(_points2->type)) );
- _lines1 = cvCreateMat( 1, npoints, CV_64FC3 );
- _lines2 = cvCreateMat( 1, npoints, CV_64FC3 );
+ _m1.reset(cvCreateMat( _points1->rows, _points1->cols, CV_64FC(CV_MAT_CN(_points1->type)) ));
+ _m2.reset(cvCreateMat( _points2->rows, _points2->cols, CV_64FC(CV_MAT_CN(_points2->type)) ));
+ _lines1.reset(cvCreateMat( 1, npoints, CV_64FC3 ));
+ _lines2.reset(cvCreateMat( 1, npoints, CV_64FC3 ));
cvConvert( F0, &F );
CvLevMarq::CvLevMarq()
{
- mask = prevParam = param = J = err = JtJ = JtJN = JtErr = JtJV = JtJW = Ptr<CvMat>();
lambdaLg10 = 0; state = DONE;
criteria = cvTermCriteria(0,0,0);
iters = 0;
CvLevMarq::CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria0, bool _completeSymmFlag )
{
- mask = prevParam = param = J = err = JtJ = JtJN = JtErr = JtJV = JtJW = Ptr<CvMat>();
init(nparams, nerrs, criteria0, _completeSymmFlag);
}
{
if( !param || param->rows != nparams || nerrs != (err ? err->rows : 0) )
clear();
- mask = cvCreateMat( nparams, 1, CV_8U );
+ mask.reset(cvCreateMat( nparams, 1, CV_8U ));
cvSet(mask, cvScalarAll(1));
- prevParam = cvCreateMat( nparams, 1, CV_64F );
- param = cvCreateMat( nparams, 1, CV_64F );
- JtJ = cvCreateMat( nparams, nparams, CV_64F );
- JtJN = cvCreateMat( nparams, nparams, CV_64F );
- JtJV = cvCreateMat( nparams, nparams, CV_64F );
- JtJW = cvCreateMat( nparams, 1, CV_64F );
- JtErr = cvCreateMat( nparams, 1, CV_64F );
+ prevParam.reset(cvCreateMat( nparams, 1, CV_64F ));
+ param.reset(cvCreateMat( nparams, 1, CV_64F ));
+ JtJ.reset(cvCreateMat( nparams, nparams, CV_64F ));
+ JtJN.reset(cvCreateMat( nparams, nparams, CV_64F ));
+ JtJV.reset(cvCreateMat( nparams, nparams, CV_64F ));
+ JtJW.reset(cvCreateMat( nparams, 1, CV_64F ));
+ JtErr.reset(cvCreateMat( nparams, 1, CV_64F ));
if( nerrs > 0 )
{
- J = cvCreateMat( nerrs, nparams, CV_64F );
- err = cvCreateMat( nerrs, 1, CV_64F );
+ J.reset(cvCreateMat( nerrs, nparams, CV_64F ));
+ err.reset(cvCreateMat( nerrs, 1, CV_64F ));
}
prevErrNorm = DBL_MAX;
lambdaLg10 = -3;
{
double change;
- CV_Assert( err.empty() );
+ CV_Assert( !err );
if( state == DONE )
{
_param = param;
Mat E;
if( method == RANSAC )
- createRANSACPointSetRegistrator(new EMEstimatorCallback, 5, threshold, prob)->run(points1, points2, E, _mask);
+ createRANSACPointSetRegistrator(makePtr<EMEstimatorCallback>(), 5, threshold, prob)->run(points1, points2, E, _mask);
else
- createLMeDSPointSetRegistrator(new EMEstimatorCallback, 5, prob)->run(points1, points2, E, _mask);
+ createLMeDSPointSetRegistrator(makePtr<EMEstimatorCallback>(), 5, prob)->run(points1, points2, E, _mask);
return E;
}
if( ransacReprojThreshold <= 0 )
ransacReprojThreshold = defaultRANSACReprojThreshold;
- Ptr<PointSetRegistrator::Callback> cb = new HomographyEstimatorCallback;
+ Ptr<PointSetRegistrator::Callback> cb = makePtr<HomographyEstimatorCallback>();
if( method == 0 || npoints == 4 )
{
if( method == RANSAC || method == LMEDS )
cb->runKernel( src, dst, H );
Mat H8(8, 1, CV_64F, H.ptr<double>());
- createLMSolver(new HomographyRefineCallback(src, dst), 10)->run(H8);
+ createLMSolver(makePtr<HomographyRefineCallback>(src, dst), 10)->run(H8);
}
}
if( npoints < 7 )
return Mat();
- Ptr<PointSetRegistrator::Callback> cb = new FMEstimatorCallback;
+ Ptr<PointSetRegistrator::Callback> cb = makePtr<FMEstimatorCallback>();
int result;
if( npoints == 7 || method == FM_8POINT )
int ptype = param0.type();
CV_Assert( (param0.cols == 1 || param0.rows == 1) && (ptype == CV_32F || ptype == CV_64F));
- CV_Assert( !cb.empty() );
+ CV_Assert( cb );
int lx = param0.rows + param0.cols - 1;
param0.convertTo(x, CV_64F);
Ptr<LMSolver> createLMSolver(const Ptr<LMSolver::Callback>& cb, int maxIters)
{
CV_Assert( !LMSolverImpl_info_auto.name().empty() );
- return new LMSolverImpl(cb, maxIters);
+ return makePtr<LMSolverImpl>(cb, maxIters);
}
}
RNG rng((uint64)-1);
- CV_Assert( !cb.empty() );
+ CV_Assert( cb );
CV_Assert( confidence > 0 && confidence < 1 );
CV_Assert( count >= 0 && count2 == count );
RNG rng((uint64)-1);
- CV_Assert( !cb.empty() );
+ CV_Assert( cb );
CV_Assert( confidence > 0 && confidence < 1 );
CV_Assert( count >= 0 && count2 == count );
double _confidence, int _maxIters)
{
CV_Assert( !RANSACPointSetRegistrator_info_auto.name().empty() );
- return new RANSACPointSetRegistrator(_cb, _modelPoints, _threshold, _confidence, _maxIters);
+ return Ptr<PointSetRegistrator>(
+ new RANSACPointSetRegistrator(_cb, _modelPoints, _threshold, _confidence, _maxIters));
}
int _modelPoints, double _confidence, int _maxIters)
{
CV_Assert( !LMeDSPointSetRegistrator_info_auto.name().empty() );
- return new LMeDSPointSetRegistrator(_cb, _modelPoints, _confidence, _maxIters);
+ return Ptr<PointSetRegistrator>(
+ new LMeDSPointSetRegistrator(_cb, _modelPoints, _confidence, _maxIters));
}
class Affine3DEstimatorCallback : public PointSetRegistrator::Callback
param1 = param1 <= 0 ? 3 : param1;
param2 = (param2 < epsilon) ? 0.99 : (param2 > 1 - epsilon) ? 0.99 : param2;
- return createRANSACPointSetRegistrator(new Affine3DEstimatorCallback, 4, param1, param2)->run(dFrom, dTo, _out, _inliers);
+ return createRANSACPointSetRegistrator(makePtr<Affine3DEstimatorCallback>(), 4, param1, param2)->run(dFrom, dTo, _out, _inliers);
}
cv::Ptr<cv::StereoBM> cv::createStereoBM(int _numDisparities, int _SADWindowSize)
{
- return new StereoBMImpl(_numDisparities, _SADWindowSize);
+ return makePtr<StereoBMImpl>(_numDisparities, _SADWindowSize);
}
/* End of file. */
int speckleWindowSize, int speckleRange,
int mode)
{
- return new StereoSGBMImpl(minDisparity, numDisparities, SADWindowSize,
- P1, P2, disp12MaxDiff,
- preFilterCap, uniquenessRatio,
- speckleWindowSize, speckleRange,
- mode);
+ return Ptr<StereoSGBM>(
+ new StereoSGBMImpl(minDisparity, numDisparities, SADWindowSize,
+ P1, P2, disp12MaxDiff,
+ preFilterCap, uniquenessRatio,
+ speckleWindowSize, speckleRange,
+ mode));
}
Rect getValidDisparityROI( Rect roi1, Rect roi2,
}
// Make sure F uses double precision
- F = cvCreateMat(3,3,CV_64FC1);
+ F.reset(cvCreateMat(3,3,CV_64FC1));
cvConvert(F_, F);
// Make sure points1 uses double precision
- points1 = cvCreateMat(points1_->rows,points1_->cols,CV_64FC2);
+ points1.reset(cvCreateMat(points1_->rows,points1_->cols,CV_64FC2));
cvConvert(points1_, points1);
// Make sure points2 uses double precision
- points2 = cvCreateMat(points2_->rows,points2_->cols,CV_64FC2);
+ points2.reset(cvCreateMat(points2_->rows,points2_->cols,CV_64FC2));
cvConvert(points2_, points2);
- tmp33 = cvCreateMat(3,3,CV_64FC1);
- tmp31 = cvCreateMat(3,1,CV_64FC1), tmp31_2 = cvCreateMat(3,1,CV_64FC1);
- T1i = cvCreateMat(3,3,CV_64FC1), T2i = cvCreateMat(3,3,CV_64FC1);
- R1 = cvCreateMat(3,3,CV_64FC1), R2 = cvCreateMat(3,3,CV_64FC1);
- TFT = cvCreateMat(3,3,CV_64FC1), TFTt = cvCreateMat(3,3,CV_64FC1), RTFTR = cvCreateMat(3,3,CV_64FC1);
- U = cvCreateMat(3,3,CV_64FC1);
- S = cvCreateMat(3,3,CV_64FC1);
- V = cvCreateMat(3,3,CV_64FC1);
- e1 = cvCreateMat(3,1,CV_64FC1), e2 = cvCreateMat(3,1,CV_64FC1);
+ tmp33.reset(cvCreateMat(3,3,CV_64FC1));
+ tmp31.reset(cvCreateMat(3,1,CV_64FC1)), tmp31_2.reset(cvCreateMat(3,1,CV_64FC1));
+ T1i.reset(cvCreateMat(3,3,CV_64FC1)), T2i.reset(cvCreateMat(3,3,CV_64FC1));
+ R1.reset(cvCreateMat(3,3,CV_64FC1)), R2.reset(cvCreateMat(3,3,CV_64FC1));
+ TFT.reset(cvCreateMat(3,3,CV_64FC1)), TFTt.reset(cvCreateMat(3,3,CV_64FC1)), RTFTR.reset(cvCreateMat(3,3,CV_64FC1));
+ U.reset(cvCreateMat(3,3,CV_64FC1));
+ S.reset(cvCreateMat(3,3,CV_64FC1));
+ V.reset(cvCreateMat(3,3,CV_64FC1));
+ e1.reset(cvCreateMat(3,1,CV_64FC1)), e2.reset(cvCreateMat(3,1,CV_64FC1));
double x1, y1, x2, y2;
double scale;
double f1, f2, a, b, c, d;
- polynomial = cvCreateMat(1,7,CV_64FC1);
- result = cvCreateMat(1,6,CV_64FC2);
+ polynomial.reset(cvCreateMat(1,7,CV_64FC1));
+ result.reset(cvCreateMat(1,6,CV_64FC2));
double t_min, s_val, t, s;
for (int p = 0; p < points1->cols; ++p) {
// Replace F by T2-t * F * T1-t
{
// limit concurrency to get determenistic result
cv::theRNG().state = 20121010;
- cv::Ptr<tbb::task_scheduler_init> one_thread = new tbb::task_scheduler_init(1);
+ tbb::task_scheduler_init one_thread(1);
solvePnPRansac(object, image, camera_mat, dist_coef, rvec1, tvec1);
}
{
// single thread again
cv::theRNG().state = 20121010;
- cv::Ptr<tbb::task_scheduler_init> one_thread = new tbb::task_scheduler_init(1);
+ tbb::task_scheduler_init one_thread(1);
solvePnPRansac(object, image, camera_mat, dist_coef, rvec2, tvec2);
}
stateThread(STATE_THREAD_STOPPED),
timeWhenDetectingThreadStartedWork(-1)
{
- CV_Assert(!_detector.empty());
+ CV_Assert(_detector);
cascadeInThread = _detector;
cascadeForTracking(trackingDetector)
{
CV_Assert( (params.maxTrackLifetime >= 0)
-// && (!mainDetector.empty())
- && (!trackingDetector.empty()) );
+// && mainDetector
+ && trackingDetector );
- if (!mainDetector.empty()) {
- separateDetectionWork = new SeparateDetectionWork(*this, mainDetector);
+ if (mainDetector) {
+ separateDetectionWork.reset(new SeparateDetectionWork(*this, mainDetector));
}
weightsPositionsSmoothing.push_back(1);
{
CV_Assert(imageGray.type()==CV_8UC1);
- if ( (!separateDetectionWork.empty()) && (!separateDetectionWork->isWorking()) ) {
+ if ( separateDetectionWork && !separateDetectionWork->isWorking() ) {
separateDetectionWork->run();
}
std::vector<Rect> rectsWhereRegions;
bool shouldHandleResult=false;
- if (!separateDetectionWork.empty()) {
+ if (separateDetectionWork) {
shouldHandleResult = separateDetectionWork->communicateWithDetectingThread(imageGray, rectsWhereRegions);
}
bool cv::DetectionBasedTracker::run()
{
- if (!separateDetectionWork.empty()) {
+ if (separateDetectionWork) {
return separateDetectionWork->run();
}
return false;
void cv::DetectionBasedTracker::stop()
{
- if (!separateDetectionWork.empty()) {
+ if (separateDetectionWork) {
separateDetectionWork->stop();
}
}
void cv::DetectionBasedTracker::resetTracking()
{
- if (!separateDetectionWork.empty()) {
+ if (separateDetectionWork) {
separateDetectionWork->resetTracking();
}
trackedObjects.clear();
return false;
}
- if (!separateDetectionWork.empty()) {
+ if (separateDetectionWork) {
separateDetectionWork->lock();
}
parameters=params;
- if (!separateDetectionWork.empty()) {
+ if (separateDetectionWork) {
separateDetectionWork->unlock();
}
return true;
Ptr<FaceRecognizer> createEigenFaceRecognizer(int num_components, double threshold)
{
- return new Eigenfaces(num_components, threshold);
+ return makePtr<Eigenfaces>(num_components, threshold);
}
Ptr<FaceRecognizer> createFisherFaceRecognizer(int num_components, double threshold)
{
- return new Fisherfaces(num_components, threshold);
+ return makePtr<Fisherfaces>(num_components, threshold);
}
Ptr<FaceRecognizer> createLBPHFaceRecognizer(int radius, int neighbors,
int grid_x, int grid_y, double threshold)
{
- return new LBPH(radius, neighbors, grid_x, grid_y, threshold);
+ return makePtr<LBPH>(radius, neighbors, grid_x, grid_y, threshold);
}
CV_INIT_ALGORITHM(Eigenfaces, "FaceRecognizer.Eigenfaces",
bool initModule_contrib()
{
- Ptr<Algorithm> efaces = createEigenfaces_hidden(), ffaces = createFisherfaces_hidden(), lbph = createLBPH_hidden();
+ Ptr<Algorithm> efaces = createEigenfaces_ptr_hidden(), ffaces = createFisherfaces_ptr_hidden(), lbph = createLBPH_ptr_hidden();
return efaces->info() != 0 && ffaces->info() != 0 && lbph->info() != 0;
}
{
case CvFeatureTrackerParams::SIFT:
dd = Algorithm::create<Feature2D>("Feature2D.SIFT");
- if( dd.empty() )
+ if( !dd )
CV_Error(CV_StsNotImplemented, "OpenCV has been compiled without SIFT support");
dd->set("nOctaveLayers", 5);
dd->set("contrastThreshold", 0.04);
break;
case CvFeatureTrackerParams::SURF:
dd = Algorithm::create<Feature2D>("Feature2D.SURF");
- if( dd.empty() )
+ if( !dd )
CV_Error(CV_StsNotImplemented, "OpenCV has been compiled without SURF support");
dd->set("hessianThreshold", 400);
dd->set("nOctaves", 3);
break;
}
- matcher = new BFMatcher(NORM_L2);
+ matcher = makePtr<BFMatcher>(int(NORM_L2));
}
CvFeatureTracker::~CvFeatureTracker()
:param _class_id: object id
+KeyPoint::convert
+--------------------
+
+This method converts vector of keypoints to vector of points or the reverse, where each keypoint is assigned the same size and the same orientation.
+
+.. ocv:function:: void KeyPoint::convert(const std::vector<KeyPoint>& keypoints, std::vector<Point2f>& points2f, const std::vector<int>& keypointIndexes=std::vector<int>())
+
+.. ocv:function:: void KeyPoint::convert(const std::vector<Point2f>& points2f, std::vector<KeyPoint>& keypoints, float size=1, float response=1, int octave=0, int class_id=-1)
+
+.. ocv:pyfunction:: cv2.KeyPoint_convert(keypoints[, keypointIndexes]) -> points2f
+
+.. ocv:pyfunction:: cv2.KeyPoint_convert(points2f[, size[, response[, octave[, class_id]]]]) -> keypoints
+
+ :param keypoints: Keypoints obtained from any feature detection algorithm like SIFT/SURF/ORB
+
+ :param points2f: Array of (x,y) coordinates of each keypoint
+
+ :param keypointIndexes: Array of indexes of keypoints to be converted to points. (Acts like a mask to convert only specified keypoints)
+
+ :param _size: keypoint diameter
+
+ :param _response: keypoint detector response on the keypoint (that is, strength of the keypoint)
+
+ :param _octave: pyramid octave in which the keypoint has been detected
+
+ :param _class_id: object id
+
+
+KeyPoint::overlap
+--------------------
+
+This method computes overlap for pair of keypoints. Overlap is the ratio between area of keypoint regions' intersection and area of keypoint regions' union (considering keypoint region as circle). If they don't overlap, we get zero. If they coincide at same location with same size, we get 1.
+
+.. ocv:function:: float KeyPoint::overlap(const KeyPoint& kp1, const KeyPoint& kp2)
+
+.. ocv:pyfunction:: cv2.KeyPoint_overlap(kp1, kp2) -> retval
+
+ :param kp1: First keypoint
+
+ :param kp2: Second keypoint
+
+
DMatch
------
.. ocv:class:: DMatch
};
-
-.. _Ptr:
-
Ptr
---
.. ocv:class:: Ptr
-Template class for smart reference-counting pointers ::
+Template class for smart pointers with shared ownership. ::
- template<typename _Tp> class Ptr
+ template<typename T>
+ struct Ptr
{
- public:
- // default constructor
+ typedef T element_type;
+
Ptr();
- // constructor that wraps the object pointer
- Ptr(_Tp* _obj);
- // destructor: calls release()
+
+ template<typename Y>
+ explicit Ptr(Y* p);
+ template<typename Y, typename D>
+ Ptr(Y* p, D d);
+
+ Ptr(const Ptr& o);
+ template<typename Y>
+ Ptr(const Ptr<Y>& o);
+ template<typename Y>
+ Ptr(const Ptr<Y>& o, T* p);
+
~Ptr();
- // copy constructor; increments ptr's reference counter
- Ptr(const Ptr& ptr);
- // assignment operator; decrements own reference counter
- // (with release()) and increments ptr's reference counter
- Ptr& operator = (const Ptr& ptr);
- // increments reference counter
- void addref();
- // decrements reference counter; when it becomes 0,
- // delete_obj() is called
+
+ Ptr& operator = (const Ptr& o);
+ template<typename Y>
+ Ptr& operator = (const Ptr<Y>& o);
+
void release();
- // user-specified custom object deletion operation.
- // by default, "delete obj;" is called
- void delete_obj();
- // returns true if obj == 0;
+
+ template<typename Y>
+ void reset(Y* p);
+ template<typename Y, typename D>
+ void reset(Y* p, D d);
+
+ void swap(Ptr& o);
+
+ T* get() const;
+
+ T& operator * () const;
+ T* operator -> () const;
+ operator T* () const;
+
bool empty() const;
- // provide access to the object fields and methods
- _Tp* operator -> ();
- const _Tp* operator -> () const;
-
- // return the underlying object pointer;
- // thanks to the methods, the Ptr<_Tp> can be
- // used instead of _Tp*
- operator _Tp* ();
- operator const _Tp*() const;
- protected:
- // the encapsulated object pointer
- _Tp* obj;
- // the associated reference counter
- int* refcount;
+ template<typename Y>
+ Ptr<Y> staticCast() const;
+ template<typename Y>
+ Ptr<Y> constCast() const;
+ template<typename Y>
+ Ptr<Y> dynamicCast() const;
};
-The ``Ptr<_Tp>`` class is a template class that wraps pointers of the corresponding type. It is
-similar to ``shared_ptr`` that is part of the Boost library
-(http://www.boost.org/doc/libs/1_40_0/libs/smart_ptr/shared_ptr.htm) and also part of the
-`C++0x <http://en.wikipedia.org/wiki/C++0x>`_ standard.
+A ``Ptr<T>`` pretends to be a pointer to an object of type T.
+Unlike an ordinary pointer, however, the object will be automatically
+cleaned up once all ``Ptr`` instances pointing to it are destroyed.
-This class provides the following options:
+``Ptr`` is similar to ``boost::shared_ptr`` that is part of the Boost library
+(http://www.boost.org/doc/libs/release/libs/smart_ptr/shared_ptr.htm)
+and ``std::shared_ptr`` from the `C++11 <http://en.wikipedia.org/wiki/C++11>`_ standard.
+
+This class provides the following advantages:
*
Default constructor, copy constructor, and assignment operator for an arbitrary C++ class
- or a C structure. For some objects, like files, windows, mutexes, sockets, and others, a copy
+ or C structure. For some objects, like files, windows, mutexes, sockets, and others, a copy
constructor or an assignment operator are difficult to define. For some other objects, like
complex classifiers in OpenCV, copy constructors are absent and not easy to implement. Finally,
some of complex OpenCV and your own data structures may be written in C.
- However, copy constructors and default constructors can simplify programming a lot.Besides,
- they are often required (for example, by STL containers). By wrapping a pointer to such a
- complex object ``TObj`` to ``Ptr<TObj>``, you automatically get all of the necessary
+ However, copy constructors and default constructors can simplify programming a lot. Besides,
+ they are often required (for example, by STL containers). By using a ``Ptr`` to such an
+ object instead of the object itself, you automatically get all of the necessary
constructors and the assignment operator.
*
*O(1)* complexity of the above-mentioned operations. While some structures, like ``std::vector``,
provide a copy constructor and an assignment operator, the operations may take a considerable
- amount of time if the data structures are large. But if the structures are put into ``Ptr<>``,
+ amount of time if the data structures are large. But if the structures are put into a ``Ptr``,
the overhead is small and independent of the data size.
*
- Automatic destruction, even for C structures. See the example below with ``FILE*``.
+ Automatic and customizable cleanup, even for C structures. See the example below with ``FILE*``.
*
Heterogeneous collections of objects. The standard STL and most other C++ and OpenCV containers
can store only objects of the same type and the same size. The classical solution to store objects
- of different types in the same container is to store pointers to the base class ``base_class_t*``
- instead but then you loose the automatic memory management. Again, by using ``Ptr<base_class_t>()``
- instead of the raw pointers, you can solve the problem.
-
-The ``Ptr`` class treats the wrapped object as a black box. The reference counter is allocated and
-managed separately. The only thing the pointer class needs to know about the object is how to
-deallocate it. This knowledge is encapsulated in the ``Ptr::delete_obj()`` method that is called when
-the reference counter becomes 0. If the object is a C++ class instance, no additional coding is
-needed, because the default implementation of this method calls ``delete obj;``. However, if the
-object is deallocated in a different way, the specialized method should be created. For example,
-if you want to wrap ``FILE``, the ``delete_obj`` may be implemented as follows: ::
-
- template<> inline void Ptr<FILE>::delete_obj()
- {
- fclose(obj); // no need to clear the pointer afterwards,
- // it is done externally.
- }
- ...
-
- // now use it:
- Ptr<FILE> f(fopen("myfile.txt", "r"));
- if(f.empty())
- throw ...;
+ of different types in the same container is to store pointers to the base class (``Base*``)
+ instead but then you lose the automatic memory management. Again, by using ``Ptr<Base>``
+ instead of raw pointers, you can solve the problem.
+
+A ``Ptr`` is said to *own* a pointer - that is, for each ``Ptr`` there is a pointer that will be deleted
+once all ``Ptr`` instances that own it are destroyed. The owned pointer may be null, in which case nothing is deleted.
+Each ``Ptr`` also *stores* a pointer. The stored pointer is the pointer the ``Ptr`` pretends to be;
+that is, the one you get when you use :ocv:func:`Ptr::get` or the conversion to ``T*``. It's usually
+the same as the owned pointer, but if you use casts or the general shared-ownership constructor, the two may diverge:
+the ``Ptr`` will still own the original pointer, but will itself point to something else.
+
+The owned pointer is treated as a black box. The only thing ``Ptr`` needs to know about it is how to
+delete it. This knowledge is encapsulated in the *deleter* - an auxiliary object that is associated
+with the owned pointer and shared between all ``Ptr`` instances that own it. The default deleter is
+an instance of ``DefaultDeleter``, which uses the standard C++ ``delete`` operator; as such it
+will work with any pointer allocated with the standard ``new`` operator.
+
+However, if the pointer must be deleted in a different way, you must specify a custom deleter upon
+``Ptr`` construction. A deleter is simply a callable object that accepts the pointer as its sole argument.
+For example, if you want to wrap ``FILE``, you may do so as follows::
+
+ Ptr<FILE> f(fopen("myfile.txt", "w"), fclose);
+ if(!f) throw ...;
fprintf(f, ....);
...
- // the file will be closed automatically by the Ptr<FILE> destructor.
+ // the file will be closed automatically by f's destructor.
+Alternatively, if you want all pointers of a particular type to be deleted the same way,
+you can specialize ``DefaultDeleter<T>::operator()`` for that type, like this::
-.. note:: The reference increment/decrement operations are implemented as atomic operations,
- and therefore it is normally safe to use the classes in multi-threaded applications.
- The same is true for :ocv:class:`Mat` and other C++ OpenCV classes that operate on
- the reference counters.
+ namespace cv {
+ template<> void DefaultDeleter<FILE>::operator ()(FILE * obj) const
+ {
+ fclose(obj);
+ }
+ }
-Ptr::Ptr
---------
-Various Ptr constructors.
+For convenience, the following types from the OpenCV C API already have such a specialization
+that calls the appropriate release function:
+
+* ``CvCapture``
+* :ocv:struct:`CvDTreeSplit`
+* :ocv:struct:`CvFileStorage`
+* ``CvHaarClassifierCascade``
+* :ocv:struct:`CvMat`
+* :ocv:struct:`CvMatND`
+* :ocv:struct:`CvMemStorage`
+* :ocv:struct:`CvSparseMat`
+* ``CvVideoWriter``
+* :ocv:struct:`IplImage`
+
+.. note:: The shared ownership mechanism is implemented with reference counting. As such,
+ cyclic ownership (e.g. when object ``a`` contains a ``Ptr`` to object ``b``, which
+ contains a ``Ptr`` to object ``a``) will lead to all involved objects never being
+ cleaned up. Avoid such situations.
+
+.. note:: It is safe to concurrently read (but not write) a ``Ptr`` instance from multiple threads
+ and therefore it is normally safe to use it in multi-threaded applications.
+ The same is true for :ocv:class:`Mat` and other C++ OpenCV classes that use internal
+ reference counts.
+
+Ptr::Ptr (null)
+------------------
.. ocv:function:: Ptr::Ptr()
-.. ocv:function:: Ptr::Ptr(_Tp* _obj)
-.. ocv:function:: Ptr::Ptr(const Ptr& ptr)
- :param _obj: Object for copy.
- :param ptr: Object for copy.
+ The default constructor creates a null ``Ptr`` - one that owns and stores a null pointer.
+
+Ptr::Ptr (assuming ownership)
+-----------------------------
+
+.. ocv:function:: template<typename Y> Ptr::Ptr(Y* p)
+.. ocv:function:: template<typename Y, typename D> Ptr::Ptr(Y* p, D d)
+
+ :param d: Deleter to use for the owned pointer.
+ :param p: Pointer to own.
+
+ If ``p`` is null, these are equivalent to the default constructor.
+
+ Otherwise, these constructors assume ownership of ``p`` - that is, the created ``Ptr`` owns
+ and stores ``p`` and assumes it is the sole owner of it. Don't use them if ``p`` is already
+ owned by another ``Ptr``, or else ``p`` will get deleted twice.
+
+ With the first constructor, ``DefaultDeleter<Y>()`` becomes the associated deleter (so ``p``
+ will eventually be deleted with the standard ``delete`` operator). ``Y`` must be a complete
+ type at the point of invocation.
+
+ With the second constructor, ``d`` becomes the associated deleter.
+
+ ``Y*`` must be convertible to ``T*``.
+
+ .. note:: It is often easier to use :ocv:func:`makePtr` instead.
+
+Ptr::Ptr (sharing ownership)
+----------------------------
+
+.. ocv:function:: Ptr::Ptr(const Ptr& o)
+.. ocv:function:: template<typename Y> Ptr::Ptr(const Ptr<Y>& o)
+.. ocv:function:: template<typename Y> Ptr::Ptr(const Ptr<Y>& o, T* p)
+
+ :param o: ``Ptr`` to share ownership with.
+ :param p: Pointer to store.
+
+ These constructors create a ``Ptr`` that shares ownership with another ``Ptr`` - that is,
+ own the same pointer as ``o``.
+
+ With the first two, the same pointer is stored, as well; for the second, ``Y*`` must be convertible to ``T*``.
+
+ With the third, ``p`` is stored, and ``Y`` may be any type. This constructor allows to have completely
+ unrelated owned and stored pointers, and should be used with care to avoid confusion. A relatively
+ benign use is to create a non-owning ``Ptr``, like this::
+
+ ptr = Ptr<T>(Ptr<T>(), dont_delete_me); // owns nothing; will not delete the pointer.
Ptr::~Ptr
---------
-The Ptr destructor.
.. ocv:function:: Ptr::~Ptr()
+ The destructor is equivalent to calling :ocv:func:`Ptr::release`.
+
Ptr::operator =
----------------
-Assignment operator.
-.. ocv:function:: Ptr& Ptr::operator = (const Ptr& ptr)
+.. ocv:function:: Ptr& Ptr::operator = (const Ptr& o)
+.. ocv:function:: template<typename Y> Ptr& Ptr::operator = (const Ptr<Y>& o)
- :param ptr: Object for assignment.
+ :param o: ``Ptr`` to share ownership with.
-Decrements own reference counter (with ``release()``) and increments ptr's reference counter.
-
-Ptr::addref
------------
-Increments reference counter.
+ Assignment replaces the current ``Ptr`` instance with one that owns and stores same
+ pointers as ``o`` and then destroys the old instance.
-.. ocv:function:: void Ptr::addref()
Ptr::release
------------
-Decrements reference counter; when it becomes 0, ``delete_obj()`` is called.
.. ocv:function:: void Ptr::release()
-Ptr::delete_obj
----------------
-User-specified custom object deletion operation. By default, ``delete obj;`` is called.
+ If no other ``Ptr`` instance owns the owned pointer, deletes it with the associated deleter.
+ Then sets both the owned and the stored pointers to ``NULL``.
+
+
+Ptr::reset
+----------
+
+.. ocv:function:: template<typename Y> void Ptr::reset(Y* p)
+.. ocv:function:: template<typename Y, typename D> void Ptr::reset(Y* p, D d)
+
+ :param d: Deleter to use for the owned pointer.
+ :param p: Pointer to own.
+
+ ``ptr.reset(...)`` is equivalent to ``ptr = Ptr<T>(...)``.
+
+Ptr::swap
+---------
-.. ocv:function:: void Ptr::delete_obj()
+.. ocv:function:: void Ptr::swap(Ptr& o)
+
+ :param o: ``Ptr`` to swap with.
+
+ Swaps the owned and stored pointers (and deleters, if any) of this and ``o``.
+
+Ptr::get
+--------
+
+.. ocv:function:: T* Ptr::get() const
+
+ Returns the stored pointer.
+
+Ptr pointer emulation
+---------------------
+
+.. ocv:function:: T& Ptr::operator * () const
+.. ocv:function:: T* Ptr::operator -> () const
+.. ocv:function:: Ptr::operator T* () const
+
+ These operators are what allows ``Ptr`` to pretend to be a pointer.
+
+ If ``ptr`` is a ``Ptr<T>``, then ``*ptr`` is equivalent to ``*ptr.get()``
+ and ``ptr->foo`` is equivalent to ``ptr.get()->foo``. In addition, ``ptr``
+ is implicitly convertible to ``T*``, and such conversion is equivalent to
+ ``ptr.get()``. As a corollary, ``if (ptr)`` is equivalent to ``if (ptr.get())``.
+ In other words, a ``Ptr`` behaves as if it was its own stored pointer.
Ptr::empty
----------
-Returns true if obj == 0;
-bool empty() const;
+.. ocv:function:: bool Ptr::empty() const
-Ptr::operator ->
-----------------
-Provide access to the object fields and methods.
+ ``ptr.empty()`` is equivalent to ``!ptr.get()``.
+
+Ptr casts
+---------
-.. ocv:function:: template<typename _Tp> _Tp* Ptr::operator -> ()
-.. ocv:function:: template<typename _Tp> const _Tp* Ptr::operator -> () const
+.. ocv:function:: template<typename Y> Ptr<Y> Ptr::staticCast() const
+.. ocv:function:: template<typename Y> Ptr<Y> Ptr::constCast() const
+.. ocv:function:: template<typename Y> Ptr<Y> Ptr::dynamicCast() const
+ If ``ptr`` is a ``Ptr``, then ``ptr.fooCast<Y>()`` is equivalent to
+ ``Ptr<Y>(ptr, foo_cast<Y>(ptr.get()))``. That is, these functions create
+ a new ``Ptr`` with the same owned pointer and a cast stored pointer.
-Ptr::operator _Tp*
-------------------
-Returns the underlying object pointer. Thanks to the methods, the ``Ptr<_Tp>`` can be used instead
-of ``_Tp*``.
+Ptr global swap
+---------------
+
+.. ocv:function:: template<typename T> void swap(Ptr<T>& ptr1, Ptr<T>& ptr2)
+
+ Equivalent to ``ptr1.swap(ptr2)``. Provided to help write generic algorithms.
+
+Ptr comparisons
+---------------
+
+.. ocv:function:: template<typename T> bool operator == (const Ptr<T>& ptr1, const Ptr<T>& ptr2)
+.. ocv:function:: template<typename T> bool operator != (const Ptr<T>& ptr1, const Ptr<T>& ptr2)
+
+ Return whether ``ptr1.get()`` and ``ptr2.get()`` are equal and not equal, respectively.
+
+makePtr
+-------
+
+.. ocv:function:: template<typename T> Ptr<T> makePtr()
+.. ocv:function:: template<typename T, typename A1> Ptr<T> makePtr(const A1& a1)
+.. ocv:function:: template<typename T, typename A1, typename A2> Ptr<T> makePtr(const A1& a1, const A2& a2)
+.. ocv:function:: template<typename T, typename A1, typename A2, typename A3> Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3)
+
+ (and so on...)
-.. ocv:function:: template<typename _Tp> Ptr::operator _Tp* ()
-.. ocv:function:: template<typename _Tp> Ptr::operator const _Tp*() const
+ ``makePtr<T>(...)`` is equivalent to ``Ptr<T>(new T(...))``. It is shorter than the latter, and
+ it's marginally safer than using a constructor or :ocv:func:`Ptr::reset`, since it ensures that
+ the owned pointer is new and thus not owned by any other ``Ptr`` instance.
+ Unfortunately, perfect forwarding is impossible to implement in C++03, and so ``makePtr`` is limited
+ to constructors of ``T`` that have up to 10 arguments, none of which are non-const references.
Mat
---
:param name: The algorithm name, one of the names returned by ``Algorithm::getList()``.
-This static method creates a new instance of the specified algorithm. If there is no such algorithm, the method will silently return null pointer (that can be checked by ``Ptr::empty()`` method). Also, you should specify the particular ``Algorithm`` subclass as ``_Tp`` (or simply ``Algorithm`` if you do not know it at that point). ::
+This static method creates a new instance of the specified algorithm. If there is no such algorithm, the method will silently return a null pointer. Also, you should specify the particular ``Algorithm`` subclass as ``_Tp`` (or simply ``Algorithm`` if you do not know it at that point). ::
Ptr<BackgroundSubtractor> bgfg = Algorithm::create<BackgroundSubtractor>("BackgroundSubtractor.MOG2");
// matrix will be deallocated, since it is not referenced by anyone
C = C.clone();
-You see that the use of ``Mat`` and other basic structures is simple. But what about high-level classes or even user data types created without taking automatic memory management into account? For them, OpenCV offers the ``Ptr<>`` template class that is similar to ``std::shared_ptr`` from C++ TR1. So, instead of using plain pointers::
+You see that the use of ``Mat`` and other basic structures is simple. But what about high-level classes or even user
+data types created without taking automatic memory management into account? For them, OpenCV offers the :ocv:class:`Ptr`
+template class that is similar to ``std::shared_ptr`` from C++11. So, instead of using plain pointers::
T* ptr = new T(...);
you can use::
- Ptr<T> ptr = new T(...);
+ Ptr<T> ptr(new T(...));
-That is, ``Ptr<T> ptr`` encapsulates a pointer to a ``T`` instance and a reference counter associated with the pointer. See the
-:ocv:class:`Ptr`
-description for details.
+or::
+
+ Ptr<T> ptr = makePtr<T>(...);
+
+``Ptr<T>`` encapsulates a pointer to a ``T`` instance and a reference counter associated with the pointer. See the
+:ocv:class:`Ptr` description for details.
.. _AutomaticAllocation:
-//////// specializied implementations of Ptr::delete_obj() for classic OpenCV types ////////
+////// specialized implementations of DefaultDeleter::operator() for classic OpenCV types //////
-template<> CV_EXPORTS void Ptr<CvMat>::delete_obj();
-template<> CV_EXPORTS void Ptr<IplImage>::delete_obj();
-template<> CV_EXPORTS void Ptr<CvMatND>::delete_obj();
-template<> CV_EXPORTS void Ptr<CvSparseMat>::delete_obj();
-template<> CV_EXPORTS void Ptr<CvMemStorage>::delete_obj();
+template<> CV_EXPORTS void DefaultDeleter<CvMat>::operator ()(CvMat* obj) const;
+template<> CV_EXPORTS void DefaultDeleter<IplImage>::operator ()(IplImage* obj) const;
+template<> CV_EXPORTS void DefaultDeleter<CvMatND>::operator ()(CvMatND* obj) const;
+template<> CV_EXPORTS void DefaultDeleter<CvSparseMat>::operator ()(CvSparseMat* obj) const;
+template<> CV_EXPORTS void DefaultDeleter<CvMemStorage>::operator ()(CvMemStorage* obj) const;
////////////// convenient wrappers for operating old-style dynamic structures //////////////
}} // namespace cv { namespace cuda {
-namespace cv {
-
-template <> CV_EXPORTS void Ptr<cv::cuda::Stream::Impl>::delete_obj();
-template <> CV_EXPORTS void Ptr<cv::cuda::Event::Impl>::delete_obj();
-
-}
#include "opencv2/core/cuda.inl.hpp"
size_type max_size() const { return cv::max(static_cast<_Tp>(-1)/sizeof(_Tp), 1); }
};
+namespace detail
+{
+// Metafunction to avoid taking a reference to void.
+template<typename T>
+struct RefOrVoid { typedef T& type; };
-//////////////////// generic_type ref-counting pointer class for C/C++ objects ////////////////////////
+template<>
+struct RefOrVoid<void>{ typedef void type; };
-/*!
- Smart pointer to dynamically allocated objects.
+template<>
+struct RefOrVoid<const void>{ typedef const void type; };
- This is template pointer-wrapping class that stores the associated reference counter along with the
- object pointer. The class is similar to std::smart_ptr<> from the recent addons to the C++ standard,
- but is shorter to write :) and self-contained (i.e. does add any dependency on the compiler or an external library).
+template<>
+struct RefOrVoid<volatile void>{ typedef volatile void type; };
- Basically, you can use "Ptr<MyObjectType> ptr" (or faster "const Ptr<MyObjectType>& ptr" for read-only access)
- everywhere instead of "MyObjectType* ptr", where MyObjectType is some C structure or a C++ class.
- To make it all work, you need to specialize Ptr<>::delete_obj(), like:
+template<>
+struct RefOrVoid<const volatile void>{ typedef const volatile void type; };
- \code
- template<> CV_EXPORTS void Ptr<MyObjectType>::delete_obj() { call_destructor_func(obj); }
- \endcode
+// This class would be private to Ptr, if it didn't have to be a non-template.
+struct PtrOwner;
+
+}
+
+template<typename Y>
+struct DefaultDeleter
+{
+ void operator () (Y* p) const;
+};
- \note{if MyObjectType is a C++ class with a destructor, you do not need to specialize delete_obj(),
- since the default implementation calls "delete obj;"}
+/*
+ A smart shared pointer class with reference counting.
- \note{Another good property of the class is that the operations on the reference counter are atomic,
- i.e. it is safe to use the class in multi-threaded applications}
+ A Ptr<T> stores a pointer and owns a (potentially different) pointer.
+ The stored pointer has type T and is the one returned by get() et al,
+ while the owned pointer can have any type and is the one deleted
+ when there are no more Ptrs that own it. You can't directly obtain the
+ owned pointer.
+
+ The interface of this class is mostly a subset of that of C++11's
+ std::shared_ptr.
*/
-template<typename _Tp> class Ptr
+template<typename T>
+struct Ptr
{
-public:
- //! empty constructor
+ /* Generic programming support. */
+ typedef T element_type;
+
+ /* Ptr that owns NULL and stores NULL. */
Ptr();
- //! take ownership of the pointer. The associated reference counter is allocated and set to 1
- Ptr(_Tp* _obj);
- //! calls release()
+
+ /* Ptr that owns p and stores p. The owned pointer will be deleted with
+ DefaultDeleter<Y>. Y must be a complete type and Y* must be
+ convertible to T*. */
+ template<typename Y>
+ explicit Ptr(Y* p);
+
+ /* Ptr that owns p and stores p. The owned pointer will be deleted by
+ calling d(p). Y* must be convertible to T*. */
+ template<typename Y, typename D>
+ Ptr(Y* p, D d);
+
+ /* Same as the constructor below; it exists to suppress the generation
+ of the implicit copy constructor. */
+ Ptr(const Ptr& o);
+
+ /* Ptr that owns the same pointer as o and stores the same pointer as o,
+ converted to T*. Naturally, Y* must be convertible to T*. */
+ template<typename Y>
+ Ptr(const Ptr<Y>& o);
+
+ /* Ptr that owns same pointer as o, and stores p. Useful for casts and
+ creating non-owning Ptrs. */
+ template<typename Y>
+ Ptr(const Ptr<Y>& o, T* p);
+
+ /* Equivalent to release(). */
~Ptr();
- //! copy constructor. Copies the members and calls addref()
- Ptr(const Ptr& ptr);
- template<typename _Tp2> Ptr(const Ptr<_Tp2>& ptr);
- //! copy operator. Calls ptr.addref() and release() before copying the members
- Ptr& operator = (const Ptr& ptr);
- //! increments the reference counter
- void addref();
- //! decrements the reference counter. If it reaches 0, delete_obj() is called
+
+ /* Same as assignment below; exists to suppress the generation of the
+ implicit assignment operator. */
+ Ptr& operator = (const Ptr& o);
+
+ template<typename Y>
+ Ptr& operator = (const Ptr<Y>& o);
+
+ /* Resets both the owned and stored pointers to NULL. Deletes the owned
+ pointer with the associated deleter if it's not owned by any other
+ Ptr and is non-zero. It's called reset() in std::shared_ptr; here
+ it is release() for compatibility with old OpenCV versions. */
void release();
- //! deletes the object. Override if needed
- void delete_obj();
- //! returns true iff obj==NULL
+
+ /* Equivalent to assigning from Ptr<T>(p). */
+ template<typename Y>
+ void reset(Y* p);
+
+ /* Equivalent to assigning from Ptr<T>(p, d). */
+ template<typename Y, typename D>
+ void reset(Y* p, D d);
+
+ /* Swaps the stored and owned pointers of this and o. */
+ void swap(Ptr& o);
+
+ /* Returns the stored pointer. */
+ T* get() const;
+
+ /* Ordinary pointer emulation. */
+ typename detail::RefOrVoid<T>::type operator * () const;
+ T* operator -> () const;
+
+ /* Equivalent to get(). */
+ operator T* () const;
+
+ /* Equivalent to !*this. */
bool empty() const;
- //! cast pointer to another type
- template<typename _Tp2> Ptr<_Tp2> ptr();
- template<typename _Tp2> const Ptr<_Tp2> ptr() const;
+ /* Returns a Ptr that owns the same pointer as this, and stores the same
+ pointer as this, except converted via static_cast to Y*. */
+ template<typename Y>
+ Ptr<Y> staticCast() const;
+
+ /* Ditto for const_cast. */
+ template<typename Y>
+ Ptr<Y> constCast() const;
- //! helper operators making "Ptr<T> ptr" use very similar to "T* ptr".
- _Tp* operator -> ();
- const _Tp* operator -> () const;
+ /* Ditto for dynamic_cast. */
+ template<typename Y>
+ Ptr<Y> dynamicCast() const;
- operator _Tp* ();
- operator const _Tp*() const;
+private:
+ detail::PtrOwner* owner;
+ T* stored;
- _Tp* obj; //< the object pointer.
- int* refcount; //< the associated reference counter
+ template<typename Y>
+ friend struct Ptr; // have to do this for the cross-type copy constructor
};
+/* Overload of the generic swap. */
+template<typename T>
+void swap(Ptr<T>& ptr1, Ptr<T>& ptr2);
+
+/* Obvious comparisons. */
+template<typename T>
+bool operator == (const Ptr<T>& ptr1, const Ptr<T>& ptr2);
+template<typename T>
+bool operator != (const Ptr<T>& ptr1, const Ptr<T>& ptr2);
+
+/* Convenience creation functions. In the far future, there may be variadic templates here. */
+template<typename T>
+Ptr<T> makePtr();
+template<typename T, typename A1>
+Ptr<T> makePtr(const A1& a1);
+template<typename T, typename A1, typename A2>
+Ptr<T> makePtr(const A1& a1, const A2& a2);
+template<typename T, typename A1, typename A2, typename A3>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3);
+template<typename T, typename A1, typename A2, typename A3, typename A4>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4);
+template<typename T, typename A1, typename A2, typename A3, typename A4, typename A5>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5);
+template<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6);
+template<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6, typename A7>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7);
+template<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6, typename A7, typename A8>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8);
+template<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6, typename A7, typename A8, typename A9>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9);
+template<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6, typename A7, typename A8, typename A9, typename A10>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9, const A10& a10);
//////////////////////////////// string class ////////////////////////////////
};
-
-/////////////////////////// cv::Ptr implementation ///////////////////////////
-
-template<typename _Tp> inline
-Ptr<_Tp>::Ptr()
- : obj(0), refcount(0) {}
-
-template<typename _Tp> inline
-Ptr<_Tp>::Ptr(_Tp* _obj)
- : obj(_obj)
-{
- if(obj)
- {
- refcount = (int*)fastMalloc(sizeof(*refcount));
- *refcount = 1;
- }
- else
- refcount = 0;
-}
-
-template<typename _Tp> template<typename _Tp2>
-Ptr<_Tp>::Ptr(const Ptr<_Tp2>& p)
- : obj(0), refcount(0)
-{
- if (p.empty())
- return;
-
- _Tp* p_casted = dynamic_cast<_Tp*>(p.obj);
- if (!p_casted)
- return;
-
- obj = p_casted;
- refcount = p.refcount;
- addref();
-}
-
-template<typename _Tp> inline
-Ptr<_Tp>::~Ptr()
-{
- release();
-}
-
-template<typename _Tp> inline
-void Ptr<_Tp>::addref()
-{
- if( refcount )
- CV_XADD(refcount, 1);
-}
-
-template<typename _Tp> inline
-void Ptr<_Tp>::release()
-{
- if( refcount && CV_XADD(refcount, -1) == 1 )
- {
- delete_obj();
- fastFree(refcount);
- }
- refcount = 0;
- obj = 0;
-}
-
-template<typename _Tp> inline
-void Ptr<_Tp>::delete_obj()
-{
- if( obj )
- delete obj;
-}
-
-template<typename _Tp> inline
-Ptr<_Tp>::Ptr(const Ptr<_Tp>& _ptr)
-{
- obj = _ptr.obj;
- refcount = _ptr.refcount;
- addref();
-}
-
-template<typename _Tp> inline
-Ptr<_Tp>& Ptr<_Tp>::operator = (const Ptr<_Tp>& _ptr)
-{
- int* _refcount = _ptr.refcount;
- if( _refcount )
- CV_XADD(_refcount, 1);
- release();
- obj = _ptr.obj;
- refcount = _refcount;
- return *this;
-}
-
-template<typename _Tp> inline
-_Tp* Ptr<_Tp>::operator -> ()
-{
- return obj;
-}
-
-template<typename _Tp> inline
-const _Tp* Ptr<_Tp>::operator -> () const
-{
- return obj;
-}
-
-template<typename _Tp> inline
-Ptr<_Tp>::operator _Tp* ()
-{
- return obj;
-}
-
-template<typename _Tp> inline
-Ptr<_Tp>::operator const _Tp*() const
-{
- return obj;
-}
-
-template<typename _Tp> inline
-bool Ptr<_Tp>::empty() const
-{
- return obj == 0;
-}
-
-template<typename _Tp> template<typename _Tp2> inline
-Ptr<_Tp2> Ptr<_Tp>::ptr()
-{
- Ptr<_Tp2> p;
- if( !obj )
- return p;
-
- _Tp2* obj_casted = dynamic_cast<_Tp2*>(obj);
- if (!obj_casted)
- return p;
-
- if( refcount )
- CV_XADD(refcount, 1);
-
- p.obj = obj_casted;
- p.refcount = refcount;
- return p;
-}
-
-template<typename _Tp> template<typename _Tp2> inline
-const Ptr<_Tp2> Ptr<_Tp>::ptr() const
-{
- Ptr<_Tp2> p;
- if( !obj )
- return p;
-
- _Tp2* obj_casted = dynamic_cast<_Tp2*>(obj);
- if (!obj_casted)
- return p;
-
- if( refcount )
- CV_XADD(refcount, 1);
-
- p.obj = obj_casted;
- p.refcount = refcount;
- return p;
-}
-
-template<class _Tp, class _Tp2> static inline
-bool operator == (const Ptr<_Tp>& a, const Ptr<_Tp2>& b)
-{
- return a.refcount == b.refcount;
-}
-
-template<class _Tp, class _Tp2> static inline
-bool operator != (const Ptr<_Tp>& a, const Ptr<_Tp2>& b)
-{
- return a.refcount != b.refcount;
-}
-
-
-
////////////////////////// cv::String implementation /////////////////////////
inline
}
}
+#include "opencv2/core/ptr.inl.hpp"
+
#endif //__OPENCV_CORE_CVSTD_HPP__
}}
-namespace cv {
-
-template <> CV_EXPORTS void Ptr<cv::ogl::Buffer::Impl>::delete_obj();
-template <> CV_EXPORTS void Ptr<cv::ogl::Texture2D::Impl>::delete_obj();
-
-}
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
template<typename _Tp> inline
Ptr<_Tp> Algorithm::create(const String& name)
{
- return _create(name).ptr<_Tp>();
+ return _create(name).dynamicCast<_Tp>();
}
template<typename _Tp> inline
void Algorithm::set(const char* _name, const Ptr<_Tp>& value)
{
- Ptr<Algorithm> algo_ptr = value. template ptr<cv::Algorithm>();
- if (algo_ptr.empty()) {
+ Ptr<Algorithm> algo_ptr = value. template dynamicCast<cv::Algorithm>();
+ if (!algo_ptr) {
CV_Error( Error::StsUnsupportedFormat, "unknown/unsupported Ptr type of the second parameter of the method Algorithm::set");
}
info()->set(this, _name, ParamType<Algorithm>::type, &algo_ptr);
void Algorithm::setAlgorithm(const char* _name, const Ptr<_Tp>& value)
{
Ptr<Algorithm> algo_ptr = value. template ptr<cv::Algorithm>();
- if (algo_ptr.empty()) {
+ if (!algo_ptr) {
CV_Error( Error::StsUnsupportedFormat, "unknown/unsupported Ptr type of the second parameter of the method Algorithm::set");
}
info()->set(this, _name, ParamType<Algorithm>::type, &algo_ptr);
//! the full constructor that opens file storage for reading or writing
CV_WRAP FileStorage(const String& source, int flags, const String& encoding=String());
//! the constructor that takes pointer to the C FileStorage structure
- FileStorage(CvFileStorage* fs);
+ FileStorage(CvFileStorage* fs, bool owning=true);
//! the destructor. calls release()
virtual ~FileStorage();
CV_WRAP FileNode operator[](const char* nodename) const;
//! returns pointer to the underlying C FileStorage structure
- CvFileStorage* operator *() { return fs; }
+ CvFileStorage* operator *() { return fs.get(); }
//! returns pointer to the underlying C FileStorage structure
- const CvFileStorage* operator *() const { return fs; }
+ const CvFileStorage* operator *() const { return fs.get(); }
//! writes one or more numbers of the specified format to the currently written structure
void writeRaw( const String& fmt, const uchar* vec, size_t len );
//! writes the registered C structure (CvMat, CvMatND, CvSeq). See cvWrite()
int state; //!< the writer state
};
-template<> CV_EXPORTS void Ptr<CvFileStorage>::delete_obj();
+template<> CV_EXPORTS void DefaultDeleter<CvFileStorage>::operator ()(CvFileStorage* obj) const;
/*!
File Storage Node class
} //namespace cv
#define CV_INIT_ALGORITHM(classname, algname, memberinit) \
- static ::cv::Algorithm* create##classname##_hidden() \
+ static inline ::cv::Algorithm* create##classname##_hidden() \
{ \
return new classname; \
} \
\
- static ::cv::AlgorithmInfo& classname##_info() \
+ static inline ::cv::Ptr< ::cv::Algorithm> create##classname##_ptr_hidden() \
+ { \
+ return ::cv::makePtr<classname>(); \
+ } \
+ \
+ static inline ::cv::AlgorithmInfo& classname##_info() \
{ \
static ::cv::AlgorithmInfo classname##_info_var(algname, create##classname##_hidden); \
return classname##_info_var; \
--- /dev/null
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2013, NVIDIA Corporation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the copyright holders or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#ifndef __OPENCV_CORE_PTR_INL_HPP__
+#define __OPENCV_CORE_PTR_INL_HPP__
+
+#include <algorithm>
+
+namespace cv {
+
+template<typename Y>
+void DefaultDeleter<Y>::operator () (Y* p) const
+{
+ delete p;
+}
+
+namespace detail
+{
+
+struct PtrOwner
+{
+ PtrOwner() : refCount(1)
+ {}
+
+ void incRef()
+ {
+ CV_XADD(&refCount, 1);
+ }
+
+ void decRef()
+ {
+ if (CV_XADD(&refCount, -1) == 1) deleteSelf();
+ }
+
+protected:
+ /* This doesn't really need to be virtual, since PtrOwner is never deleted
+ directly, but it doesn't hurt and it helps avoid warnings. */
+ virtual ~PtrOwner()
+ {}
+
+ virtual void deleteSelf() = 0;
+
+private:
+ unsigned int refCount;
+
+ // noncopyable
+ PtrOwner(const PtrOwner&);
+ PtrOwner& operator = (const PtrOwner&);
+};
+
+template<typename Y, typename D>
+struct PtrOwnerImpl : PtrOwner
+{
+ PtrOwnerImpl(Y* p, D d) : owned(p), deleter(d)
+ {}
+
+ void deleteSelf()
+ {
+ deleter(owned);
+ delete this;
+ }
+
+private:
+ Y* owned;
+ D deleter;
+};
+
+
+}
+
+template<typename T>
+Ptr<T>::Ptr() : owner(NULL), stored(NULL)
+{}
+
+template<typename T>
+template<typename Y>
+Ptr<T>::Ptr(Y* p)
+ : owner(p
+ ? new detail::PtrOwnerImpl<Y, DefaultDeleter<Y> >(p, DefaultDeleter<Y>())
+ : NULL),
+ stored(p)
+{}
+
+template<typename T>
+template<typename Y, typename D>
+Ptr<T>::Ptr(Y* p, D d)
+ : owner(p
+ ? new detail::PtrOwnerImpl<Y, D>(p, d)
+ : NULL),
+ stored(p)
+{}
+
+template<typename T>
+Ptr<T>::Ptr(const Ptr& o) : owner(o.owner), stored(o.stored)
+{
+ if (owner) owner->incRef();
+}
+
+template<typename T>
+template<typename Y>
+Ptr<T>::Ptr(const Ptr<Y>& o) : owner(o.owner), stored(o.stored)
+{
+ if (owner) owner->incRef();
+}
+
+template<typename T>
+template<typename Y>
+Ptr<T>::Ptr(const Ptr<Y>& o, T* p) : owner(o.owner), stored(p)
+{
+ if (owner) owner->incRef();
+}
+
+template<typename T>
+Ptr<T>::~Ptr()
+{
+ release();
+}
+
+template<typename T>
+Ptr<T>& Ptr<T>::operator = (const Ptr<T>& o)
+{
+ Ptr(o).swap(*this);
+ return *this;
+}
+
+template<typename T>
+template<typename Y>
+Ptr<T>& Ptr<T>::operator = (const Ptr<Y>& o)
+{
+ Ptr(o).swap(*this);
+ return *this;
+}
+
+template<typename T>
+void Ptr<T>::release()
+{
+ if (owner) owner->decRef();
+ owner = NULL;
+ stored = NULL;
+}
+
+template<typename T>
+template<typename Y>
+void Ptr<T>::reset(Y* p)
+{
+ Ptr(p).swap(*this);
+}
+
+template<typename T>
+template<typename Y, typename D>
+void Ptr<T>::reset(Y* p, D d)
+{
+ Ptr(p, d).swap(*this);
+}
+
+template<typename T>
+void Ptr<T>::swap(Ptr<T>& o)
+{
+ std::swap(owner, o.owner);
+ std::swap(stored, o.stored);
+}
+
+template<typename T>
+T* Ptr<T>::get() const
+{
+ return stored;
+}
+
+template<typename T>
+typename detail::RefOrVoid<T>::type Ptr<T>::operator * () const
+{
+ return *stored;
+}
+
+template<typename T>
+T* Ptr<T>::operator -> () const
+{
+ return stored;
+}
+
+template<typename T>
+Ptr<T>::operator T* () const
+{
+ return stored;
+}
+
+
+template<typename T>
+bool Ptr<T>::empty() const
+{
+ return !stored;
+}
+
+template<typename T>
+template<typename Y>
+Ptr<Y> Ptr<T>::staticCast() const
+{
+ return Ptr<Y>(*this, static_cast<Y*>(stored));
+}
+
+template<typename T>
+template<typename Y>
+Ptr<Y> Ptr<T>::constCast() const
+{
+ return Ptr<Y>(*this, const_cast<Y*>(stored));
+}
+
+template<typename T>
+template<typename Y>
+Ptr<Y> Ptr<T>::dynamicCast() const
+{
+ return Ptr<Y>(*this, dynamic_cast<Y*>(stored));
+}
+
+template<typename T>
+void swap(Ptr<T>& ptr1, Ptr<T>& ptr2){
+ ptr1.swap(ptr2);
+}
+
+template<typename T>
+bool operator == (const Ptr<T>& ptr1, const Ptr<T>& ptr2)
+{
+ return ptr1.get() == ptr2.get();
+}
+
+template<typename T>
+bool operator != (const Ptr<T>& ptr1, const Ptr<T>& ptr2)
+{
+ return ptr1.get() != ptr2.get();
+}
+
+template<typename T>
+Ptr<T> makePtr()
+{
+ return Ptr<T>(new T());
+}
+
+template<typename T, typename A1>
+Ptr<T> makePtr(const A1& a1)
+{
+ return Ptr<T>(new T(a1));
+}
+
+template<typename T, typename A1, typename A2>
+Ptr<T> makePtr(const A1& a1, const A2& a2)
+{
+ return Ptr<T>(new T(a1, a2));
+}
+
+template<typename T, typename A1, typename A2, typename A3>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3)
+{
+ return Ptr<T>(new T(a1, a2, a3));
+}
+
+template<typename T, typename A1, typename A2, typename A3, typename A4>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4)
+{
+ return Ptr<T>(new T(a1, a2, a3, a4));
+}
+
+template<typename T, typename A1, typename A2, typename A3, typename A4, typename A5>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5)
+{
+ return Ptr<T>(new T(a1, a2, a3, a4, a5));
+}
+
+template<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6)
+{
+ return Ptr<T>(new T(a1, a2, a3, a4, a5, a6));
+}
+
+template<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6, typename A7>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7)
+{
+ return Ptr<T>(new T(a1, a2, a3, a4, a5, a6, a7));
+}
+
+template<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6, typename A7, typename A8>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8)
+{
+ return Ptr<T>(new T(a1, a2, a3, a4, a5, a6, a7, a8));
+}
+
+template<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6, typename A7, typename A8, typename A9>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9)
+{
+ return Ptr<T>(new T(a1, a2, a3, a4, a5, a6, a7, a8, a9));
+}
+
+template<typename T, typename A1, typename A2, typename A3, typename A4, typename A5, typename A6, typename A7, typename A8, typename A9, typename A10>
+Ptr<T> makePtr(const A1& a1, const A2& a2, const A3& a3, const A4& a4, const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9, const A10& a10)
+{
+ return Ptr<T>(new T(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10));
+}
+
+} // namespace cv
+
+#endif // __OPENCV_CORE_PTR_INL_HPP__
size_t hash() const;
//! converts vector of keypoints to vector of points
- static void convert(const std::vector<KeyPoint>& keypoints,
- CV_OUT std::vector<Point2f>& points2f,
- const std::vector<int>& keypointIndexes=std::vector<int>());
+ CV_WRAP static void convert(const std::vector<KeyPoint>& keypoints,
+ CV_OUT std::vector<Point2f>& points2f,
+ const std::vector<int>& keypointIndexes=std::vector<int>());
//! converts vector of points to the vector of keypoints, where each keypoint is assigned the same size and the same orientation
- static void convert(const std::vector<Point2f>& points2f,
- CV_OUT std::vector<KeyPoint>& keypoints,
- float size=1, float response=1, int octave=0, int class_id=-1);
+ CV_WRAP static void convert(const std::vector<Point2f>& points2f,
+ CV_OUT std::vector<KeyPoint>& keypoints,
+ float size=1, float response=1, int octave=0, int class_id=-1);
//! computes overlap for pair of keypoints;
//! overlap is a ratio between area of keypoint regions intersection and
//! area of keypoint regions union (now keypoint region is circle)
- static float overlap(const KeyPoint& kp1, const KeyPoint& kp2);
+ CV_WRAP static float overlap(const KeyPoint& kp1, const KeyPoint& kp2);
CV_PROP_RW Point2f pt; //!< coordinates of the keypoints
CV_PROP_RW float size; //!< diameter of the meaningful keypoint neighborhood
Algorithm::Constructor c = 0;
if( !alglist().find(name, c) )
return Ptr<Algorithm>();
- return c();
+ return Ptr<Algorithm>(c());
}
Algorithm::Algorithm()
else if( p.type == Param::ALGORITHM )
{
Ptr<Algorithm> nestedAlgo = Algorithm::_create((String)n["name"]);
- CV_Assert( !nestedAlgo.empty() );
+ CV_Assert( nestedAlgo );
nestedAlgo->read(n);
info->set(algo, pname.c_str(), p.type, &nestedAlgo, true);
}
namespace cv
{
-template<> void Ptr<CvMat>::delete_obj()
+template<> void DefaultDeleter<CvMat>::operator ()(CvMat* obj) const
{ cvReleaseMat(&obj); }
-template<> void Ptr<IplImage>::delete_obj()
+template<> void DefaultDeleter<IplImage>::operator ()(IplImage* obj) const
{ cvReleaseImage(&obj); }
-template<> void Ptr<CvMatND>::delete_obj()
+template<> void DefaultDeleter<CvMatND>::operator ()(CvMatND* obj) const
{ cvReleaseMatND(&obj); }
-template<> void Ptr<CvSparseMat>::delete_obj()
+template<> void DefaultDeleter<CvSparseMat>::operator ()(CvSparseMat* obj) const
{ cvReleaseSparseMat(&obj); }
-template<> void Ptr<CvMemStorage>::delete_obj()
+template<> void DefaultDeleter<CvMemStorage>::operator ()(CvMemStorage* obj) const
{ cvReleaseMemStorage(&obj); }
-template<> void Ptr<CvFileStorage>::delete_obj()
+template<> void DefaultDeleter<CvFileStorage>::operator ()(CvFileStorage* obj) const
{ cvReleaseFileStorage(&obj); }
}
#ifndef HAVE_CUDA
throw_no_cuda();
#else
- impl_ = new Impl;
+ impl_ = makePtr<Impl>();
#endif
}
Stream& cv::cuda::Stream::Null()
{
- static Stream s(new Impl(0));
+ static Stream s(Ptr<Impl>(new Impl(0)));
return s;
}
#endif
}
-template <> void cv::Ptr<Stream::Impl>::delete_obj()
-{
- if (obj) delete obj;
-}
////////////////////////////////////////////////////////////////
// Stream
(void) flags;
throw_no_cuda();
#else
- impl_ = new Impl(flags);
+ impl_ = makePtr<Impl>(flags);
#endif
}
return ms;
#endif
}
-
-template <> void cv::Ptr<Event::Impl>::delete_obj()
-{
- if (obj) delete obj;
-}
#endif
}
-template <> void cv::Ptr<cv::ogl::Buffer::Impl>::delete_obj()
-{
- if (obj) delete obj;
-}
//////////////////////////////////////////////////////////////////////////////////////////
// ogl::Texture
#endif
}
-template <> void cv::Ptr<cv::ogl::Texture2D::Impl>::delete_obj()
-{
- if (obj) delete obj;
-}
////////////////////////////////////////////////////////////////////////
// ogl::Arrays
cv::Ptr<cv::Formatted> format(const cv::Mat& mtx) const
{
char braces[5] = {'\0', '\0', ';', '\0', '\0'};
- return new FormattedImpl("[", "]", mtx, braces,
+ return cv::makePtr<FormattedImpl>("[", "]", mtx, &*braces,
mtx.cols == 1 || !multiline, mtx.depth() == CV_64F ? prec64f : prec32f );
}
};
char braces[5] = {'[', ']', '\0', '[', ']'};
if (mtx.cols == 1)
braces[0] = braces[1] = '\0';
- return new FormattedImpl("[", "]", mtx, braces,
+ return cv::makePtr<FormattedImpl>("[", "]", mtx, &*braces,
mtx.cols*mtx.channels() == 1 || !multiline, mtx.depth() == CV_64F ? prec64f : prec32f );
}
};
char braces[5] = {'[', ']', '\0', '[', ']'};
if (mtx.cols == 1)
braces[0] = braces[1] = '\0';
- return new FormattedImpl("array([", cv::format("], type='%s')", numpyTypes[mtx.depth()]), mtx, braces,
+ return cv::makePtr<FormattedImpl>("array([",
+ cv::format("], type='%s')", numpyTypes[mtx.depth()]), mtx, &*braces,
mtx.cols*mtx.channels() == 1 || !multiline, mtx.depth() == CV_64F ? prec64f : prec32f );
}
};
cv::Ptr<cv::Formatted> format(const cv::Mat& mtx) const
{
char braces[5] = {'\0', '\0', '\0', '\0', '\0'};
- return new FormattedImpl(cv::String(), mtx.rows > 1 ? cv::String("\n") : cv::String(), mtx, braces,
+ return cv::makePtr<FormattedImpl>(cv::String(),
+ mtx.rows > 1 ? cv::String("\n") : cv::String(), mtx, &*braces,
mtx.cols*mtx.channels() == 1 || !multiline, mtx.depth() == CV_64F ? prec64f : prec32f );
}
};
cv::Ptr<cv::Formatted> format(const cv::Mat& mtx) const
{
char braces[5] = {'\0', '\0', ',', '\0', '\0'};
- return new FormattedImpl("{", "}", mtx, braces,
+ return cv::makePtr<FormattedImpl>("{", "}", mtx, &*braces,
mtx.cols == 1 || !multiline, mtx.depth() == CV_64F ? prec64f : prec32f );
}
};
switch(fmt)
{
case FMT_MATLAB:
- return new MatlabFormatter();
+ return makePtr<MatlabFormatter>();
case FMT_CSV:
- return new CSVFormatter();
+ return makePtr<CSVFormatter>();
case FMT_PYTHON:
- return new PythonFormatter();
+ return makePtr<PythonFormatter>();
case FMT_NUMPY:
- return new NumpyFormatter();
+ return makePtr<NumpyFormatter>();
case FMT_C:
- return new CFormatter();
+ return makePtr<CFormatter>();
}
- return new MatlabFormatter();
+ return makePtr<MatlabFormatter>();
}
} // cv
open( filename, flags, encoding );
}
-FileStorage::FileStorage(CvFileStorage* _fs)
+FileStorage::FileStorage(CvFileStorage* _fs, bool owning)
{
- fs = Ptr<CvFileStorage>(_fs);
+ if (owning) fs.reset(_fs);
+ else fs = Ptr<CvFileStorage>(Ptr<CvFileStorage>(), _fs);
+
state = _fs ? NAME_EXPECTED + INSIDE_MAP : UNDEFINED;
}
bool FileStorage::open(const String& filename, int flags, const String& encoding)
{
release();
- fs = Ptr<CvFileStorage>(cvOpenFileStorage( filename.c_str(), 0, flags,
- !encoding.empty() ? encoding.c_str() : 0));
+ fs.reset(cvOpenFileStorage( filename.c_str(), 0, flags,
+ !encoding.empty() ? encoding.c_str() : 0));
bool ok = isOpened();
state = ok ? NAME_EXPECTED + INSIDE_MAP : UNDEFINED;
return ok;
bool FileStorage::isOpened() const
{
- return !fs.empty() && fs.obj->is_opened;
+ return fs && fs->is_opened;
}
void FileStorage::release()
String FileStorage::releaseAndGetString()
{
String buf;
- if( fs.obj && fs.obj->outbuf )
- icvClose(fs.obj, &buf);
+ if( fs && fs->outbuf )
+ icvClose(fs, &buf);
release();
return buf;
// TODO: the 4 functions below need to be implemented more efficiently
void write( FileStorage& fs, const String& name, const SparseMat& value )
{
- Ptr<CvSparseMat> mat = cvCreateSparseMat(value);
+ Ptr<CvSparseMat> mat(cvCreateSparseMat(value));
cvWrite( *fs, name.size() ? name.c_str() : 0, mat );
}
default_mat.copyTo(mat);
return;
}
- Ptr<CvSparseMat> m = (CvSparseMat*)cvRead((CvFileStorage*)node.fs, (CvFileNode*)*node);
- CV_Assert(CV_IS_SPARSE_MAT(m.obj));
+ Ptr<CvSparseMat> m((CvSparseMat*)cvRead((CvFileStorage*)node.fs, (CvFileNode*)*node));
+ CV_Assert(CV_IS_SPARSE_MAT(m));
m->copyToSparseMat(mat);
}
iterations = max_struct_size*2;
gen = struct_idx = iter = -1;
test_progress = -1;
-
- storage = 0;
}
{
t = cvtest::randReal(rng)*(max_log_storage_block_size - min_log_storage_block_size)
+ min_log_storage_block_size;
- storage = cvCreateMemStorage( cvRound( exp(t * CV_LOG2) ) );
+ storage.reset(cvCreateMemStorage( cvRound( exp(t * CV_LOG2) ) ));
}
iter = struct_idx = -1;
{
struct_idx = iter = -1;
- if( storage.empty() )
+ if( !storage )
{
t = cvtest::randReal(rng)*(max_log_storage_block_size - min_log_storage_block_size)
+ min_log_storage_block_size;
- storage = cvCreateMemStorage( cvRound( exp(t * CV_LOG2) ) );
+ storage.reset(cvCreateMemStorage( cvRound( exp(t * CV_LOG2) ) ));
}
for( iter = 0; iter < iterations/10; iter++ )
{
struct_idx = iter = -1;
t = cvtest::randReal(rng)*(max_log_storage_block_size - min_log_storage_block_size) + min_log_storage_block_size;
- storage = cvCreateMemStorage( cvRound( exp(t * CV_LOG2) ) );
+ storage.reset(cvCreateMemStorage( cvRound( exp(t * CV_LOG2) ) ));
for( int i = 0; i < struct_count; i++ )
{
cvTsReleaseSimpleSet( (CvTsSimpleSet**)&simple_struct[i] );
simple_struct[i] = cvTsCreateSimpleSet( max_struct_size, pure_elem_size );
- cxcore_struct[i] = cvCreateSet( 0, sizeof(CvSet), elem_size, storage );
+ cxcore_struct[i] = cvCreateSet( 0, sizeof(CvSet), elem_size, storage );
}
if( test_set_ops( iterations*100 ) < 0 )
int block_size = cvRound( exp(t * CV_LOG2) );
block_size = MAX(block_size, (int)(sizeof(CvGraph) + sizeof(CvMemBlock) + sizeof(CvSeqBlock)));
- storage = cvCreateMemStorage(block_size);
+ storage.reset(cvCreateMemStorage(block_size));
for( i = 0; i < struct_count; i++ )
{
storage_blocksize = MAX(storage_blocksize, (int)(sizeof(CvGraph) + sizeof(CvMemBlock) + sizeof(CvSeqBlock)));
storage_blocksize = MAX(storage_blocksize, (int)(sizeof(CvGraphEdge) + sizeof(CvMemBlock) + sizeof(CvSeqBlock)));
storage_blocksize = MAX(storage_blocksize, (int)(sizeof(CvGraphVtx) + sizeof(CvMemBlock) + sizeof(CvSeqBlock)));
- storage = cvCreateMemStorage(storage_blocksize);
+ storage.reset(cvCreateMemStorage(storage_blocksize));
if( gen == 0 )
{
cvRelease((void**)&m_nd);
- Ptr<CvSparseMat> m_s = (CvSparseMat*)fs["test_sparse_mat"].readObj();
- Ptr<CvSparseMat> _test_sparse_ = cvCreateSparseMat(test_sparse_mat);
- Ptr<CvSparseMat> _test_sparse = (CvSparseMat*)cvClone(_test_sparse_);
+ Ptr<CvSparseMat> m_s((CvSparseMat*)fs["test_sparse_mat"].readObj());
+ Ptr<CvSparseMat> _test_sparse_(cvCreateSparseMat(test_sparse_mat));
+ Ptr<CvSparseMat> _test_sparse((CvSparseMat*)cvClone(_test_sparse_));
SparseMat m_s2;
fs["test_sparse_mat"] >> m_s2;
- Ptr<CvSparseMat> _m_s2 = cvCreateSparseMat(m_s2);
+ Ptr<CvSparseMat> _m_s2(cvCreateSparseMat(m_s2));
if( !m_s || !CV_IS_SPARSE_MAT(m_s) ||
- !cvTsCheckSparse(m_s, _test_sparse,0) ||
- !cvTsCheckSparse(_m_s2, _test_sparse,0))
+ !cvTsCheckSparse(m_s, _test_sparse, 0) ||
+ !cvTsCheckSparse(_m_s2, _test_sparse, 0))
{
ts->printf( cvtest::TS::LOG, "the read sparse matrix is not correct\n" );
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
cvSetReal3D(&matA, idx1[0], idx1[1], idx1[2], -val0);
cvSetND(&matB, idx0, val1);
cvSet3D(&matB, idx1[0], idx1[1], idx1[2], -val1);
- Ptr<CvMatND> matC = cvCloneMatND(&matB);
+ Ptr<CvMatND> matC(cvCloneMatND(&matB));
if( A.at<float>(idx0[0], idx0[1], idx0[2]) != val0 ||
A.at<float>(idx1[0], idx1[1], idx1[2]) != -val0 ||
}
}
- Ptr<CvSparseMat> M2 = cvCreateSparseMat(M);
+ Ptr<CvSparseMat> M2(cvCreateSparseMat(M));
MatND Md;
M.copyTo(Md);
SparseMat M3; SparseMat(Md).convertTo(M3, Md.type(), 2);
--- /dev/null
+/*M///////////////////////////////////////////////////////////////////////////////////////
+//
+// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
+//
+// By downloading, copying, installing or using the software you agree to this license.
+// If you do not agree to this license, do not download, install,
+// copy or use the software.
+//
+//
+// License Agreement
+// For Open Source Computer Vision Library
+//
+// Copyright (C) 2013, NVIDIA Corporation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+//
+// Redistribution and use in source and binary forms, with or without modification,
+// are permitted provided that the following conditions are met:
+//
+// * Redistribution's of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistribution's in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// * The name of the copyright holders may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// This software is provided by the copyright holders and contributors "as is" and
+// any express or implied warranties, including, but not limited to, the implied
+// warranties of merchantability and fitness for a particular purpose are disclaimed.
+// In no event shall the copyright holders or contributors be liable for any direct,
+// indirect, incidental, special, exemplary, or consequential damages
+// (including, but not limited to, procurement of substitute goods or services;
+// loss of use, data, or profits; or business interruption) however caused
+// and on any theory of liability, whether in contract, strict liability,
+// or tort (including negligence or otherwise) arising in any way out of
+// the use of this software, even if advised of the possibility of such damage.
+//
+//M*/
+
+#include "test_precomp.hpp"
+
+using namespace cv;
+
+namespace {
+
+struct Reporter {
+ Reporter(bool* deleted) : deleted_(deleted)
+ { *deleted_ = false; }
+
+ // the destructor is virtual, so that we can test dynamic_cast later
+ virtual ~Reporter()
+ { *deleted_ = true; }
+
+private:
+ bool* deleted_;
+
+ Reporter(const Reporter&);
+ Reporter& operator = (const Reporter&);
+};
+
+struct ReportingDeleter {
+ ReportingDeleter(bool* deleted) : deleted_(deleted)
+ { *deleted_ = false; }
+
+ void operator()(void*)
+ { *deleted_ = true; }
+
+private:
+ bool* deleted_;
+};
+
+int dummyObject;
+
+}
+
+TEST(Core_Ptr, default_ctor)
+{
+ Ptr<int> p;
+ EXPECT_EQ(NULL, p.get());
+}
+
+TEST(Core_Ptr, owning_ctor)
+{
+ bool deleted = false;
+
+ {
+ Reporter* r = new Reporter(&deleted);
+ Ptr<void> p(r);
+ EXPECT_EQ(r, p.get());
+ }
+
+ EXPECT_TRUE(deleted);
+
+ {
+ Ptr<int> p(&dummyObject, ReportingDeleter(&deleted));
+ EXPECT_EQ(&dummyObject, p.get());
+ }
+
+ EXPECT_TRUE(deleted);
+
+ {
+ Ptr<void> p((void*)0, ReportingDeleter(&deleted));
+ EXPECT_EQ(NULL, p.get());
+ }
+
+ EXPECT_FALSE(deleted);
+}
+
+TEST(Core_Ptr, sharing_ctor)
+{
+ bool deleted = false;
+
+ {
+ Ptr<Reporter> p1(new Reporter(&deleted));
+ Ptr<Reporter> p2(p1);
+ EXPECT_EQ(p1.get(), p2.get());
+ p1.release();
+ EXPECT_FALSE(deleted);
+ }
+
+ EXPECT_TRUE(deleted);
+
+ {
+ Ptr<Reporter> p1(new Reporter(&deleted));
+ Ptr<void> p2(p1);
+ EXPECT_EQ(p1.get(), p2.get());
+ p1.release();
+ EXPECT_FALSE(deleted);
+ }
+
+ EXPECT_TRUE(deleted);
+
+ {
+ Ptr<Reporter> p1(new Reporter(&deleted));
+ Ptr<int> p2(p1, &dummyObject);
+ EXPECT_EQ(&dummyObject, p2.get());
+ p1.release();
+ EXPECT_FALSE(deleted);
+ }
+
+ EXPECT_TRUE(deleted);
+}
+
+TEST(Core_Ptr, assignment)
+{
+ bool deleted1 = false, deleted2 = false;
+
+ {
+ Ptr<Reporter> p1(new Reporter(&deleted1));
+ p1 = p1;
+ EXPECT_FALSE(deleted1);
+ }
+
+ EXPECT_TRUE(deleted1);
+
+ {
+ Ptr<Reporter> p1(new Reporter(&deleted1));
+ Ptr<Reporter> p2(new Reporter(&deleted2));
+ p2 = p1;
+ EXPECT_TRUE(deleted2);
+ EXPECT_EQ(p1.get(), p2.get());
+ p1.release();
+ EXPECT_FALSE(deleted1);
+ }
+
+ EXPECT_TRUE(deleted1);
+
+ {
+ Ptr<Reporter> p1(new Reporter(&deleted1));
+ Ptr<void> p2(new Reporter(&deleted2));
+ p2 = p1;
+ EXPECT_TRUE(deleted2);
+ EXPECT_EQ(p1.get(), p2.get());
+ p1.release();
+ EXPECT_FALSE(deleted1);
+ }
+
+ EXPECT_TRUE(deleted1);
+}
+
+TEST(Core_Ptr, release)
+{
+ bool deleted = false;
+
+ Ptr<Reporter> p1(new Reporter(&deleted));
+ p1.release();
+ EXPECT_TRUE(deleted);
+ EXPECT_EQ(NULL, p1.get());
+}
+
+TEST(Core_Ptr, reset)
+{
+ bool deleted_old = false, deleted_new = false;
+
+ {
+ Ptr<void> p(new Reporter(&deleted_old));
+ Reporter* r = new Reporter(&deleted_new);
+ p.reset(r);
+ EXPECT_TRUE(deleted_old);
+ EXPECT_EQ(r, p.get());
+ }
+
+ EXPECT_TRUE(deleted_new);
+
+ {
+ Ptr<void> p(new Reporter(&deleted_old));
+ p.reset(&dummyObject, ReportingDeleter(&deleted_new));
+ EXPECT_TRUE(deleted_old);
+ EXPECT_EQ(&dummyObject, p.get());
+ }
+
+ EXPECT_TRUE(deleted_new);
+}
+
+TEST(Core_Ptr, swap)
+{
+ bool deleted1 = false, deleted2 = false;
+
+ {
+ Reporter* r1 = new Reporter(&deleted1);
+ Reporter* r2 = new Reporter(&deleted2);
+ Ptr<Reporter> p1(r1), p2(r2);
+ p1.swap(p2);
+ EXPECT_EQ(r1, p2.get());
+ EXPECT_EQ(r2, p1.get());
+ EXPECT_FALSE(deleted1);
+ EXPECT_FALSE(deleted2);
+ p1.release();
+ EXPECT_TRUE(deleted2);
+ }
+
+ EXPECT_TRUE(deleted1);
+
+ {
+ Reporter* r1 = new Reporter(&deleted1);
+ Reporter* r2 = new Reporter(&deleted2);
+ Ptr<Reporter> p1(r1), p2(r2);
+ swap(p1, p2);
+ EXPECT_EQ(r1, p2.get());
+ EXPECT_EQ(r2, p1.get());
+ EXPECT_FALSE(deleted1);
+ EXPECT_FALSE(deleted2);
+ p1.release();
+ EXPECT_TRUE(deleted2);
+ }
+
+ EXPECT_TRUE(deleted1);
+}
+
+TEST(Core_Ptr, accessors)
+{
+ {
+ Ptr<int> p;
+ EXPECT_EQ(NULL, static_cast<int*>(p));
+ EXPECT_TRUE(p.empty());
+ }
+
+ {
+ Size* s = new Size();
+ Ptr<Size> p(s);
+ EXPECT_EQ(s, static_cast<Size*>(p));
+ EXPECT_EQ(s, &*p);
+ EXPECT_EQ(&s->width, &p->width);
+ EXPECT_FALSE(p.empty());
+ }
+}
+
+namespace {
+
+struct SubReporterBase {
+ virtual ~SubReporterBase() {}
+ int padding;
+};
+
+/* multiple inheritance, so that casts do something interesting */
+struct SubReporter : SubReporterBase, Reporter
+{
+ SubReporter(bool* deleted) : Reporter(deleted)
+ {}
+};
+
+}
+
+TEST(Core_Ptr, casts)
+{
+ bool deleted = false;
+
+ {
+ Ptr<const Reporter> p1(new Reporter(&deleted));
+ Ptr<Reporter> p2 = p1.constCast<Reporter>();
+ EXPECT_EQ(p1.get(), p2.get());
+ p1.release();
+ EXPECT_FALSE(deleted);
+ }
+
+ EXPECT_TRUE(deleted);
+
+ {
+ SubReporter* sr = new SubReporter(&deleted);
+ Ptr<Reporter> p1(sr);
+ // This next check isn't really for Ptr itself; it checks that Reporter
+ // is at a non-zero offset within SubReporter, so that the next
+ // check will give us more confidence that the cast actually did something.
+ EXPECT_NE(static_cast<void*>(sr), static_cast<void*>(p1.get()));
+ Ptr<SubReporter> p2 = p1.staticCast<SubReporter>();
+ EXPECT_EQ(sr, p2.get());
+ p1.release();
+ EXPECT_FALSE(deleted);
+ }
+
+ EXPECT_TRUE(deleted);
+
+ {
+ SubReporter* sr = new SubReporter(&deleted);
+ Ptr<Reporter> p1(sr);
+ EXPECT_NE(static_cast<void*>(sr), static_cast<void*>(p1.get()));
+ Ptr<void> p2 = p1.dynamicCast<void>();
+ EXPECT_EQ(sr, p2.get());
+ p1.release();
+ EXPECT_FALSE(deleted);
+ }
+
+ EXPECT_TRUE(deleted);
+
+ {
+ Ptr<Reporter> p1(new Reporter(&deleted));
+ Ptr<SubReporter> p2 = p1.dynamicCast<SubReporter>();
+ EXPECT_EQ(NULL, p2.get());
+ p1.release();
+ EXPECT_FALSE(deleted);
+ }
+
+ EXPECT_TRUE(deleted);
+}
+
+TEST(Core_Ptr, comparisons)
+{
+ Ptr<int> p1, p2(new int), p3(new int);
+ Ptr<int> p4(p2, p3.get());
+
+ // Not using EXPECT_EQ here, since none of them are really "expected" or "actual".
+ EXPECT_TRUE(p1 == p1);
+ EXPECT_TRUE(p2 == p2);
+ EXPECT_TRUE(p2 != p3);
+ EXPECT_TRUE(p2 != p4);
+ EXPECT_TRUE(p3 == p4);
+}
+
+TEST(Core_Ptr, make)
+{
+ bool deleted = true;
+
+ {
+ Ptr<void> p = makePtr<Reporter>(&deleted);
+ EXPECT_FALSE(deleted);
+ }
+
+ EXPECT_TRUE(deleted);
+}
+
+namespace {
+
+struct SpeciallyDeletable
+{
+ SpeciallyDeletable() : deleted(false)
+ {}
+ bool deleted;
+};
+
+}
+
+namespace cv {
+
+template<>
+void DefaultDeleter<SpeciallyDeletable>::operator()(SpeciallyDeletable * obj) const
+{ obj->deleted = true; }
+
+}
+
+TEST(Core_Ptr, specialized_deleter)
+{
+ SpeciallyDeletable sd;
+
+ { Ptr<void> p(&sd); }
+
+ ASSERT_TRUE(sd.deleted);
+}
ncvAssertCUDAReturn(cudaGetDeviceProperties(&devProp, devId), NCV_CUDA_ERROR);
// Load the classifier from file (assuming its size is about 1 mb) using a simple allocator
- gpuCascadeAllocator = new NCVMemNativeAllocator(NCVMemoryTypeDevice, static_cast<int>(devProp.textureAlignment));
- cpuCascadeAllocator = new NCVMemNativeAllocator(NCVMemoryTypeHostPinned, static_cast<int>(devProp.textureAlignment));
+ gpuCascadeAllocator = makePtr<NCVMemNativeAllocator>(NCVMemoryTypeDevice, static_cast<int>(devProp.textureAlignment));
+ cpuCascadeAllocator = makePtr<NCVMemNativeAllocator>(NCVMemoryTypeHostPinned, static_cast<int>(devProp.textureAlignment));
ncvAssertPrintReturn(gpuCascadeAllocator->isInitialized(), "Error creating cascade GPU allocator", NCV_CUDA_ERROR);
ncvAssertPrintReturn(cpuCascadeAllocator->isInitialized(), "Error creating cascade CPU allocator", NCV_CUDA_ERROR);
ncvStat = ncvHaarGetClassifierSize(classifierFile, haarNumStages, haarNumNodes, haarNumFeatures);
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error reading classifier size (check the file)", NCV_FILE_ERROR);
- h_haarStages = new NCVVectorAlloc<HaarStage64>(*cpuCascadeAllocator, haarNumStages);
- h_haarNodes = new NCVVectorAlloc<HaarClassifierNode128>(*cpuCascadeAllocator, haarNumNodes);
- h_haarFeatures = new NCVVectorAlloc<HaarFeature64>(*cpuCascadeAllocator, haarNumFeatures);
+ h_haarStages.reset (new NCVVectorAlloc<HaarStage64>(*cpuCascadeAllocator, haarNumStages));
+ h_haarNodes.reset (new NCVVectorAlloc<HaarClassifierNode128>(*cpuCascadeAllocator, haarNumNodes));
+ h_haarFeatures.reset(new NCVVectorAlloc<HaarFeature64>(*cpuCascadeAllocator, haarNumFeatures));
ncvAssertPrintReturn(h_haarStages->isMemAllocated(), "Error in cascade CPU allocator", NCV_CUDA_ERROR);
ncvAssertPrintReturn(h_haarNodes->isMemAllocated(), "Error in cascade CPU allocator", NCV_CUDA_ERROR);
ncvStat = ncvHaarLoadFromFile_host(classifierFile, haar, *h_haarStages, *h_haarNodes, *h_haarFeatures);
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "Error loading classifier", NCV_FILE_ERROR);
- d_haarStages = new NCVVectorAlloc<HaarStage64>(*gpuCascadeAllocator, haarNumStages);
- d_haarNodes = new NCVVectorAlloc<HaarClassifierNode128>(*gpuCascadeAllocator, haarNumNodes);
- d_haarFeatures = new NCVVectorAlloc<HaarFeature64>(*gpuCascadeAllocator, haarNumFeatures);
+ d_haarStages.reset (new NCVVectorAlloc<HaarStage64>(*gpuCascadeAllocator, haarNumStages));
+ d_haarNodes.reset (new NCVVectorAlloc<HaarClassifierNode128>(*gpuCascadeAllocator, haarNumNodes));
+ d_haarFeatures.reset(new NCVVectorAlloc<HaarFeature64>(*gpuCascadeAllocator, haarNumFeatures));
ncvAssertPrintReturn(d_haarStages->isMemAllocated(), "Error in cascade GPU allocator", NCV_CUDA_ERROR);
ncvAssertPrintReturn(d_haarNodes->isMemAllocated(), "Error in cascade GPU allocator", NCV_CUDA_ERROR);
ncvAssertReturnNcvStat(ncvStat);
ncvAssertCUDAReturn(cudaStreamSynchronize(0), NCV_CUDA_ERROR);
- gpuAllocator = new NCVMemStackAllocator(NCVMemoryTypeDevice, gpuCounter.maxSize(), static_cast<int>(devProp.textureAlignment));
- cpuAllocator = new NCVMemStackAllocator(NCVMemoryTypeHostPinned, cpuCounter.maxSize(), static_cast<int>(devProp.textureAlignment));
+ gpuAllocator = makePtr<NCVMemStackAllocator>(NCVMemoryTypeDevice, gpuCounter.maxSize(), static_cast<int>(devProp.textureAlignment));
+ cpuAllocator = makePtr<NCVMemStackAllocator>(NCVMemoryTypeHostPinned, cpuCounter.maxSize(), static_cast<int>(devProp.textureAlignment));
ncvAssertPrintReturn(gpuAllocator->isInitialized(), "Error creating GPU memory allocator", NCV_CUDA_ERROR);
ncvAssertPrintReturn(cpuAllocator->isInitialized(), "Error creating CPU memory allocator", NCV_CUDA_ERROR);
CV_Error(Error::StsNotImplemented, "The library was build without CUFFT");
return Ptr<Convolution>();
#else
- return new ConvolutionImpl(user_block_size);
+ return makePtr<ConvolutionImpl>(user_block_size);
#endif
}
Ptr<LookUpTable> cv::cuda::createLookUpTable(InputArray lut)
{
- return new LookUpTableImpl(lut);
+ return makePtr<LookUpTableImpl>(lut);
}
////////////////////////////////////////////////////////////////////////
namespace cv
{
- template<> void Ptr<CvBGStatModel>::delete_obj()
+ template<> void DefaultDeleter<CvBGStatModel>::operator ()(CvBGStatModel* obj) const
{
cvReleaseBGStatModel(&obj);
}
Ptr<cuda::BackgroundSubtractorFGD> cv::cuda::createBackgroundSubtractorFGD(const FGDParams& params)
{
- return new FGDImpl(params);
+ return makePtr<FGDImpl>(params);
}
#endif // HAVE_CUDA
Ptr<cuda::BackgroundSubtractorGMG> cv::cuda::createBackgroundSubtractorGMG(int initializationFrames, double decisionThreshold)
{
- return new GMGImpl(initializationFrames, decisionThreshold);
+ return makePtr<GMGImpl>(initializationFrames, decisionThreshold);
}
#endif
Ptr<cuda::BackgroundSubtractorMOG> cv::cuda::createBackgroundSubtractorMOG(int history, int nmixtures, double backgroundRatio, double noiseSigma)
{
- return new MOGImpl(history, nmixtures, backgroundRatio, noiseSigma);
+ return makePtr<MOGImpl>(history, nmixtures, backgroundRatio, noiseSigma);
}
#endif
Ptr<cuda::BackgroundSubtractorMOG2> cv::cuda::createBackgroundSubtractorMOG2(int history, double varThreshold, bool detectShadows)
{
- return new MOG2Impl(history, varThreshold, detectShadows);
+ return makePtr<MOG2Impl>(history, varThreshold, detectShadows);
}
#endif
namespace cv
{
- template<> void Ptr<CvBGStatModel>::delete_obj()
+ template<> void DefaultDeleter<CvBGStatModel>::operator ()(CvBGStatModel* obj) const
{
cvReleaseBGStatModel(&obj);
}
#endif
}
-template <> void cv::Ptr<cv::cudacodec::detail::Thread::Impl>::delete_obj()
-{
- if (obj) delete obj;
-}
-
#endif // HAVE_NVCUVID
}}}
-namespace cv {
- template <> void Ptr<cv::cudacodec::detail::Thread::Impl>::delete_obj();
-}
-
#endif // __THREAD_WRAPPERS_HPP__
dstType = CV_MAKE_TYPE(CV_MAT_DEPTH(dstType), CV_MAT_CN(srcType));
- return new NPPBoxFilter(srcType, dstType, ksize, anchor, borderMode, borderVal);
+ return makePtr<NPPBoxFilter>(srcType, dstType, ksize, anchor, borderMode, borderVal);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
dstType = CV_MAKE_TYPE(CV_MAT_DEPTH(dstType), CV_MAT_CN(srcType));
- return new LinearFilter(srcType, dstType, kernel, anchor, borderMode, borderVal);
+ return makePtr<LinearFilter>(srcType, dstType, kernel, anchor, borderMode, borderVal);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
if (columnBorderMode < 0)
columnBorderMode = rowBorderMode;
- return new SeparableLinearFilter(srcType, dstType, rowKernel, columnKernel, anchor, rowBorderMode, columnBorderMode);
+ return makePtr<SeparableLinearFilter>(srcType, dstType, rowKernel, columnKernel, anchor, rowBorderMode, columnBorderMode);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
{
case MORPH_ERODE:
case MORPH_DILATE:
- return new MorphologyFilter(op, srcType, kernel, anchor, iterations);
+ return makePtr<MorphologyFilter>(op, srcType, kernel, anchor, iterations);
break;
case MORPH_OPEN:
- return new MorphologyOpenFilter(srcType, kernel, anchor, iterations);
+ return makePtr<MorphologyOpenFilter>(srcType, kernel, anchor, iterations);
break;
case MORPH_CLOSE:
- return new MorphologyCloseFilter(srcType, kernel, anchor, iterations);
+ return makePtr<MorphologyCloseFilter>(srcType, kernel, anchor, iterations);
break;
case MORPH_GRADIENT:
- return new MorphologyGradientFilter(srcType, kernel, anchor, iterations);
+ return makePtr<MorphologyGradientFilter>(srcType, kernel, anchor, iterations);
break;
case MORPH_TOPHAT:
- return new MorphologyTophatFilter(srcType, kernel, anchor, iterations);
+ return makePtr<MorphologyTophatFilter>(srcType, kernel, anchor, iterations);
break;
case MORPH_BLACKHAT:
- return new MorphologyBlackhatFilter(srcType, kernel, anchor, iterations);
+ return makePtr<MorphologyBlackhatFilter>(srcType, kernel, anchor, iterations);
break;
default:
namespace
{
- enum
+ enum RankType
{
RANK_MAX,
RANK_MIN
Ptr<Filter> cv::cuda::createBoxMaxFilter(int srcType, Size ksize, Point anchor, int borderMode, Scalar borderVal)
{
- return new NPPRankFilter(RANK_MAX, srcType, ksize, anchor, borderMode, borderVal);
+ return makePtr<NPPRankFilter>(RANK_MAX, srcType, ksize, anchor, borderMode, borderVal);
}
Ptr<Filter> cv::cuda::createBoxMinFilter(int srcType, Size ksize, Point anchor, int borderMode, Scalar borderVal)
{
- return new NPPRankFilter(RANK_MIN, srcType, ksize, anchor, borderMode, borderVal);
+ return makePtr<NPPRankFilter>(RANK_MIN, srcType, ksize, anchor, borderMode, borderVal);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
Ptr<Filter> cv::cuda::createRowSumFilter(int srcType, int dstType, int ksize, int anchor, int borderMode, Scalar borderVal)
{
- return new NppRowSumFilter(srcType, dstType, ksize, anchor, borderMode, borderVal);
+ return makePtr<NppRowSumFilter>(srcType, dstType, ksize, anchor, borderMode, borderVal);
}
namespace
Ptr<Filter> cv::cuda::createColumnSumFilter(int srcType, int dstType, int ksize, int anchor, int borderMode, Scalar borderVal)
{
- return new NppColumnSumFilter(srcType, dstType, ksize, anchor, borderMode, borderVal);
+ return makePtr<NppColumnSumFilter>(srcType, dstType, ksize, anchor, borderMode, borderVal);
}
#endif
Ptr<CannyEdgeDetector> cv::cuda::createCannyEdgeDetector(double low_thresh, double high_thresh, int apperture_size, bool L2gradient)
{
- return new CannyImpl(low_thresh, high_thresh, apperture_size, L2gradient);
+ return makePtr<CannyImpl>(low_thresh, high_thresh, apperture_size, L2gradient);
}
#endif /* !defined (HAVE_CUDA) */
Ptr<cuda::CornernessCriteria> cv::cuda::createHarrisCorner(int srcType, int blockSize, int ksize, double k, int borderType)
{
- return new Harris(srcType, blockSize, ksize, k, borderType);
+ return makePtr<Harris>(srcType, blockSize, ksize, k, borderType);
}
Ptr<cuda::CornernessCriteria> cv::cuda::createMinEigenValCorner(int srcType, int blockSize, int ksize, int borderType)
{
- return new MinEigenVal(srcType, blockSize, ksize, borderType);
+ return makePtr<MinEigenVal>(srcType, blockSize, ksize, borderType);
}
#endif /* !defined (HAVE_CUDA) */
Ptr<GeneralizedHoughBallard> cv::cuda::createGeneralizedHoughBallard()
{
- return new GeneralizedHoughBallardImpl;
+ return makePtr<GeneralizedHoughBallardImpl>();
}
// GeneralizedHoughGuil
Ptr<GeneralizedHoughGuil> cv::cuda::createGeneralizedHoughGuil()
{
- return new GeneralizedHoughGuilImpl;
+ return makePtr<GeneralizedHoughGuilImpl>();
}
#endif /* !defined (HAVE_CUDA) */
Ptr<cuda::CornersDetector> cv::cuda::createGoodFeaturesToTrackDetector(int srcType, int maxCorners, double qualityLevel, double minDistance,
int blockSize, bool useHarrisDetector, double harrisK)
{
- return new GoodFeaturesToTrackDetector(srcType, maxCorners, qualityLevel, minDistance, blockSize, useHarrisDetector, harrisK);
+ return Ptr<cuda::CornersDetector>(
+ new GoodFeaturesToTrackDetector(srcType, maxCorners, qualityLevel, minDistance, blockSize, useHarrisDetector, harrisK));
}
#endif /* !defined (HAVE_CUDA) */
cv::Ptr<cv::cuda::CLAHE> cv::cuda::createCLAHE(double clipLimit, cv::Size tileGridSize)
{
- return new CLAHE_Impl(clipLimit, tileGridSize.width, tileGridSize.height);
+ return makePtr<CLAHE_Impl>(clipLimit, tileGridSize.width, tileGridSize.height);
}
////////////////////////////////////////////////////////////////////////
Ptr<HoughCirclesDetector> cv::cuda::createHoughCirclesDetector(float dp, float minDist, int cannyThreshold, int votesThreshold, int minRadius, int maxRadius, int maxCircles)
{
- return new HoughCirclesDetectorImpl(dp, minDist, cannyThreshold, votesThreshold, minRadius, maxRadius, maxCircles);
+ return makePtr<HoughCirclesDetectorImpl>(dp, minDist, cannyThreshold, votesThreshold, minRadius, maxRadius, maxCircles);
}
#endif /* !defined (HAVE_CUDA) */
Ptr<HoughLinesDetector> cv::cuda::createHoughLinesDetector(float rho, float theta, int threshold, bool doSort, int maxLines)
{
- return new HoughLinesDetectorImpl(rho, theta, threshold, doSort, maxLines);
+ return makePtr<HoughLinesDetectorImpl>(rho, theta, threshold, doSort, maxLines);
}
#endif /* !defined (HAVE_CUDA) */
Ptr<HoughSegmentDetector> cv::cuda::createHoughSegmentDetector(float rho, float theta, int minLineLength, int maxLineGap, int maxLines)
{
- return new HoughSegmentDetectorImpl(rho, theta, minLineLength, maxLineGap, maxLines);
+ return makePtr<HoughSegmentDetectorImpl>(rho, theta, minLineLength, maxLineGap, maxLines);
}
#endif /* !defined (HAVE_CUDA) */
switch (method)
{
case TM_SQDIFF:
- return new Match_SQDIFF_32F;
+ return makePtr<Match_SQDIFF_32F>();
case TM_CCORR:
- return new Match_CCORR_32F(user_block_size);
+ return makePtr<Match_CCORR_32F>(user_block_size);
default:
CV_Error( Error::StsBadFlag, "Unsopported method" );
switch (method)
{
case TM_SQDIFF:
- return new Match_SQDIFF_8U(user_block_size);
+ return makePtr<Match_SQDIFF_8U>(user_block_size);
case TM_SQDIFF_NORMED:
- return new Match_SQDIFF_NORMED_8U(user_block_size);
+ return makePtr<Match_SQDIFF_NORMED_8U>(user_block_size);
case TM_CCORR:
- return new Match_CCORR_8U(user_block_size);
+ return makePtr<Match_CCORR_8U>(user_block_size);
case TM_CCORR_NORMED:
- return new Match_CCORR_NORMED_8U(user_block_size);
+ return makePtr<Match_CCORR_NORMED_8U>(user_block_size);
case TM_CCOEFF:
- return new Match_CCOEFF_8U(user_block_size);
+ return makePtr<Match_CCOEFF_8U>(user_block_size);
case TM_CCOEFF_NORMED:
- return new Match_CCOEFF_NORMED_8U(user_block_size);
+ return makePtr<Match_CCOEFF_NORMED_8U>(user_block_size);
default:
CV_Error( Error::StsBadFlag, "Unsopported method" );
haarClassifierNodes.resize(0);
haarFeatures.resize(0);
- cv::Ptr<CvHaarClassifierCascade> oldCascade = (CvHaarClassifierCascade*)cvLoad(filename.c_str(), 0, 0, 0);
- if (oldCascade.empty())
+ cv::Ptr<CvHaarClassifierCascade> oldCascade((CvHaarClassifierCascade*)cvLoad(filename.c_str(), 0, 0, 0));
+ if (!oldCascade)
{
return NCV_HAAR_XML_LOADING_EXCEPTION;
}
Ptr<cuda::DisparityBilateralFilter> cv::cuda::createDisparityBilateralFilter(int ndisp, int radius, int iters)
{
- return new DispBilateralFilterImpl(ndisp, radius, iters);
+ return makePtr<DispBilateralFilterImpl>(ndisp, radius, iters);
}
#endif /* !defined (HAVE_CUDA) */
Ptr<cuda::StereoBM> cv::cuda::createStereoBM(int numDisparities, int blockSize)
{
- return new StereoBMImpl(numDisparities, blockSize);
+ return makePtr<StereoBMImpl>(numDisparities, blockSize);
}
#endif /* !defined (HAVE_CUDA) */
Ptr<cuda::StereoBeliefPropagation> cv::cuda::createStereoBeliefPropagation(int ndisp, int iters, int levels, int msg_type)
{
- return new StereoBPImpl(ndisp, iters, levels, msg_type);
+ return makePtr<StereoBPImpl>(ndisp, iters, levels, msg_type);
}
void cv::cuda::StereoBeliefPropagation::estimateRecommendedParams(int width, int height, int& ndisp, int& iters, int& levels)
Ptr<cuda::StereoConstantSpaceBP> cv::cuda::createStereoConstantSpaceBP(int ndisp, int iters, int levels, int nr_plane, int msg_type)
{
- return new StereoCSBPImpl(ndisp, iters, levels, nr_plane, msg_type);
+ return makePtr<StereoCSBPImpl>(ndisp, iters, levels, nr_plane, msg_type);
}
void cv::cuda::StereoConstantSpaceBP::estimateRecommendedParams(int width, int height, int& ndisp, int& iters, int& levels, int& nr_plane)
throw_no_cuda();
return Ptr<ImagePyramid>();
#else
- return new ImagePyramidImpl(img, nLayers, stream);
+ return Ptr<ImagePyramid>(new ImagePyramidImpl(img, nLayers, stream));
#endif
}
* gridRows Grid rows count.
* gridCols Grid column count.
*/
- CV_WRAP GridAdaptedFeatureDetector( const Ptr<FeatureDetector>& detector=0,
+ CV_WRAP GridAdaptedFeatureDetector( const Ptr<FeatureDetector>& detector=Ptr<FeatureDetector>(),
int maxTotalKeypoints=1000,
int gridRows=4, int gridCols=4 );
class CV_EXPORTS_W FlannBasedMatcher : public DescriptorMatcher
{
public:
- CV_WRAP FlannBasedMatcher( const Ptr<flann::IndexParams>& indexParams=new flann::KDTreeIndexParams(),
- const Ptr<flann::SearchParams>& searchParams=new flann::SearchParams() );
+ CV_WRAP FlannBasedMatcher( const Ptr<flann::IndexParams>& indexParams=makePtr<flann::KDTreeIndexParams>(),
+ const Ptr<flann::SearchParams>& searchParams=makePtr<flann::SearchParams>() );
virtual void add( const std::vector<Mat>& descriptors );
virtual void clear();
scale_ = scale_in;
offset_ = offset_in;
// create an agast detector
- fast_9_16_ = new FastFeatureDetector(1, true, FastFeatureDetector::TYPE_9_16);
+ fast_9_16_ = makePtr<FastFeatureDetector>(1, true, FastFeatureDetector::TYPE_9_16);
makeOffsets(pixel_5_8_, (int)img_.step, 8);
makeOffsets(pixel_9_16_, (int)img_.step, 16);
}
offset_ = 0.5f * scale_ - 0.5f;
}
scores_ = cv::Mat::zeros(img_.rows, img_.cols, CV_8U);
- fast_9_16_ = new FastFeatureDetector(1, false, FastFeatureDetector::TYPE_9_16);
+ fast_9_16_ = makePtr<FastFeatureDetector>(1, false, FastFeatureDetector::TYPE_9_16);
makeOffsets(pixel_5_8_, (int)img_.step, 8);
makeOffsets(pixel_9_16_, (int)img_.step, 16);
}
{
size_t pos = String("Opponent").size();
String type = descriptorExtractorType.substr(pos);
- return new OpponentColorDescriptorExtractor(DescriptorExtractor::create(type));
+ return makePtr<OpponentColorDescriptorExtractor>(DescriptorExtractor::create(type));
}
return Algorithm::create<DescriptorExtractor>("Feature2D." + descriptorExtractorType);
OpponentColorDescriptorExtractor::OpponentColorDescriptorExtractor( const Ptr<DescriptorExtractor>& _descriptorExtractor ) :
descriptorExtractor(_descriptorExtractor)
{
- CV_Assert( !descriptorExtractor.empty() );
+ CV_Assert( descriptorExtractor );
}
static void convertBGRImageToOpponentColorSpace( const Mat& bgrImage, std::vector<Mat>& opponentChannels )
bool OpponentColorDescriptorExtractor::empty() const
{
- return descriptorExtractor.empty() || (DescriptorExtractor*)(descriptorExtractor)->empty();
+ return !descriptorExtractor || descriptorExtractor->empty();
}
}
{
if( detectorType.find("Grid") == 0 )
{
- return new GridAdaptedFeatureDetector(FeatureDetector::create(
+ return makePtr<GridAdaptedFeatureDetector>(FeatureDetector::create(
detectorType.substr(strlen("Grid"))));
}
if( detectorType.find("Pyramid") == 0 )
{
- return new PyramidAdaptedFeatureDetector(FeatureDetector::create(
+ return makePtr<PyramidAdaptedFeatureDetector>(FeatureDetector::create(
detectorType.substr(strlen("Pyramid"))));
}
if( detectorType.find("Dynamic") == 0 )
{
- return new DynamicAdaptedFeatureDetector(AdjusterAdapter::create(
+ return makePtr<DynamicAdaptedFeatureDetector>(AdjusterAdapter::create(
detectorType.substr(strlen("Dynamic"))));
}
bool GridAdaptedFeatureDetector::empty() const
{
- return detector.empty() || (FeatureDetector*)detector->empty();
+ return !detector || detector->empty();
}
struct ResponseComparator
bool PyramidAdaptedFeatureDetector::empty() const
{
- return detector.empty() || (FeatureDetector*)detector->empty();
+ return !detector || detector->empty();
}
void PyramidAdaptedFeatureDetector::detectImpl( const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask ) const
bool DynamicAdaptedFeatureDetector::empty() const
{
- return adjuster_.empty() || adjuster_->empty();
+ return !adjuster_ || adjuster_->empty();
}
void DynamicAdaptedFeatureDetector::detectImpl(const Mat& image, std::vector<KeyPoint>& keypoints, const Mat& mask) const
Ptr<AdjusterAdapter> FastAdjuster::clone() const
{
- Ptr<AdjusterAdapter> cloned_obj = new FastAdjuster( init_thresh_, nonmax_, min_thresh_, max_thresh_ );
+ Ptr<AdjusterAdapter> cloned_obj(new FastAdjuster( init_thresh_, nonmax_, min_thresh_, max_thresh_ ));
return cloned_obj;
}
Ptr<AdjusterAdapter> StarAdjuster::clone() const
{
- Ptr<AdjusterAdapter> cloned_obj = new StarAdjuster( init_thresh_, min_thresh_, max_thresh_ );
+ Ptr<AdjusterAdapter> cloned_obj(new StarAdjuster( init_thresh_, min_thresh_, max_thresh_ ));
return cloned_obj;
}
Ptr<AdjusterAdapter> SurfAdjuster::clone() const
{
- Ptr<AdjusterAdapter> cloned_obj = new SurfAdjuster( init_thresh_, min_thresh_, max_thresh_ );
+ Ptr<AdjusterAdapter> cloned_obj(new SurfAdjuster( init_thresh_, min_thresh_, max_thresh_ ));
return cloned_obj;
}
if( !detectorType.compare( "FAST" ) )
{
- adapter = new FastAdjuster();
+ adapter = makePtr<FastAdjuster>();
}
else if( !detectorType.compare( "STAR" ) )
{
- adapter = new StarAdjuster();
+ adapter = makePtr<StarAdjuster>();
}
else if( !detectorType.compare( "SURF" ) )
{
- adapter = new SurfAdjuster();
+ adapter = makePtr<SurfAdjuster>();
}
return adapter;
keypoints1 = _keypoints1 != 0 ? _keypoints1 : &buf1;
keypoints2 = _keypoints2 != 0 ? _keypoints2 : &buf2;
- if( (keypoints1->empty() || keypoints2->empty()) && fdetector.empty() )
+ if( (keypoints1->empty() || keypoints2->empty()) && !fdetector )
CV_Error( Error::StsBadArg, "fdetector must not be empty when keypoints1 or keypoints2 is empty" );
if( keypoints1->empty() )
if( keypoints1.empty() )
CV_Error( Error::StsBadArg, "keypoints1 must not be empty" );
- if( matches1to2->empty() && dmatcher.empty() )
+ if( matches1to2->empty() && !dmatcher )
CV_Error( Error::StsBadArg, "dmatch must not be empty when matches1to2 is empty" );
bool computeKeypoints2ByPrj = keypoints2.empty();
Ptr<DescriptorMatcher> BFMatcher::clone( bool emptyTrainData ) const
{
- BFMatcher* matcher = new BFMatcher(normType, crossCheck);
+ Ptr<BFMatcher> matcher = makePtr<BFMatcher>(normType, crossCheck);
if( !emptyTrainData )
{
matcher->trainDescCollection.resize(trainDescCollection.size());
*/
Ptr<DescriptorMatcher> DescriptorMatcher::create( const String& descriptorMatcherType )
{
- DescriptorMatcher* dm = 0;
+ Ptr<DescriptorMatcher> dm;
if( !descriptorMatcherType.compare( "FlannBased" ) )
{
- dm = new FlannBasedMatcher();
+ dm = makePtr<FlannBasedMatcher>();
}
else if( !descriptorMatcherType.compare( "BruteForce" ) ) // L2
{
- dm = new BFMatcher(NORM_L2);
+ dm = makePtr<BFMatcher>(int(NORM_L2)); // anonymous enums can't be template parameters
}
else if( !descriptorMatcherType.compare( "BruteForce-SL2" ) ) // Squared L2
{
- dm = new BFMatcher(NORM_L2SQR);
+ dm = makePtr<BFMatcher>(int(NORM_L2SQR));
}
else if( !descriptorMatcherType.compare( "BruteForce-L1" ) )
{
- dm = new BFMatcher(NORM_L1);
+ dm = makePtr<BFMatcher>(int(NORM_L1));
}
else if( !descriptorMatcherType.compare("BruteForce-Hamming") ||
!descriptorMatcherType.compare("BruteForce-HammingLUT") )
{
- dm = new BFMatcher(NORM_HAMMING);
+ dm = makePtr<BFMatcher>(int(NORM_HAMMING));
}
else if( !descriptorMatcherType.compare("BruteForce-Hamming(2)") )
{
- dm = new BFMatcher(NORM_HAMMING2);
+ dm = makePtr<BFMatcher>(int(NORM_HAMMING2));
}
else
CV_Error( Error::StsBadArg, "Unknown matcher name" );
FlannBasedMatcher::FlannBasedMatcher( const Ptr<flann::IndexParams>& _indexParams, const Ptr<flann::SearchParams>& _searchParams )
: indexParams(_indexParams), searchParams(_searchParams), addedDescCount(0)
{
- CV_Assert( !_indexParams.empty() );
- CV_Assert( !_searchParams.empty() );
+ CV_Assert( _indexParams );
+ CV_Assert( _searchParams );
}
void FlannBasedMatcher::add( const std::vector<Mat>& descriptors )
void FlannBasedMatcher::train()
{
- if( flannIndex.empty() || mergedDescriptors.size() < addedDescCount )
+ if( !flannIndex || mergedDescriptors.size() < addedDescCount )
{
mergedDescriptors.set( trainDescCollection );
- flannIndex = new flann::Index( mergedDescriptors.getDescriptors(), *indexParams );
+ flannIndex = makePtr<flann::Index>( mergedDescriptors.getDescriptors(), *indexParams );
}
}
void FlannBasedMatcher::read( const FileNode& fn)
{
- if (indexParams.empty())
- indexParams = new flann::IndexParams();
+ if (!indexParams)
+ indexParams = makePtr<flann::IndexParams>();
FileNode ip = fn["indexParams"];
CV_Assert(ip.type() == FileNode::SEQ);
};
}
- if (searchParams.empty())
- searchParams = new flann::SearchParams();
+ if (!searchParams)
+ searchParams = makePtr<flann::SearchParams>();
FileNode sp = fn["searchParams"];
CV_Assert(sp.type() == FileNode::SEQ);
Ptr<DescriptorMatcher> FlannBasedMatcher::clone( bool emptyTrainData ) const
{
- FlannBasedMatcher* matcher = new FlannBasedMatcher(indexParams, searchParams);
+ Ptr<FlannBasedMatcher> matcher = makePtr<FlannBasedMatcher>(indexParams, searchParams);
if( !emptyTrainData )
{
CV_Error( Error::StsNotImplemented, "deep clone functionality is not implemented, because "
Ptr<GenericDescriptorMatcher> descriptorMatcher =
Algorithm::create<GenericDescriptorMatcher>("DescriptorMatcher." + genericDescritptorMatcherType);
- if( !paramsFilename.empty() && !descriptorMatcher.empty() )
+ if( !paramsFilename.empty() && descriptorMatcher )
{
FileStorage fs = FileStorage( paramsFilename, FileStorage::READ );
if( fs.isOpened() )
const Ptr<DescriptorMatcher>& _matcher )
: extractor( _extractor ), matcher( _matcher )
{
- CV_Assert( !extractor.empty() && !matcher.empty() );
+ CV_Assert( extractor && matcher );
}
VectorDescriptorMatcher::~VectorDescriptorMatcher()
bool VectorDescriptorMatcher::empty() const
{
- return extractor.empty() || extractor->empty() ||
- matcher.empty() || matcher->empty();
+ return !extractor || extractor->empty() ||
+ !matcher || matcher->empty();
}
Ptr<GenericDescriptorMatcher> VectorDescriptorMatcher::clone( bool emptyTrainData ) const
{
// TODO clone extractor
- return new VectorDescriptorMatcher( extractor, matcher->clone(emptyTrainData) );
+ return makePtr<VectorDescriptorMatcher>( extractor, matcher->clone(emptyTrainData) );
}
}
void emptyDataTest()
{
- assert( !dextractor.empty() );
+ assert( dextractor );
// One image.
Mat image;
void regressionTest()
{
- assert( !dextractor.empty() );
+ assert( dextractor );
// Read the test image.
string imgFilename = string(ts->get_data_path()) + FEATURES2D_DIR + "/" + IMAGE_FILENAME;
void run(int)
{
createDescriptorExtractor();
- if( dextractor.empty() )
+ if( !dextractor )
{
ts->printf(cvtest::TS::LOG, "Descriptor extractor is empty.\n");
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
void CV_FeatureDetectorTest::run( int /*start_from*/ )
{
- if( fdetector.empty() )
+ if( !fdetector )
{
ts->printf( cvtest::TS::LOG, "Feature detector is empty.\n" );
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
virtual void run(int)
{
cv::initModule_features2d();
- CV_Assert(!detector.empty());
+ CV_Assert(detector);
string imgFilename = string(ts->get_data_path()) + FEATURES2D_DIR + "/" + IMAGE_FILENAME;
// Read the test image.
minKeyPointMatchesRatio(_minKeyPointMatchesRatio),
minAngleInliersRatio(_minAngleInliersRatio)
{
- CV_Assert(!featureDetector.empty());
+ CV_Assert(featureDetector);
}
protected:
normType(_normType),
minDescInliersRatio(_minDescInliersRatio)
{
- CV_Assert(!featureDetector.empty());
- CV_Assert(!descriptorExtractor.empty());
+ CV_Assert(featureDetector);
+ CV_Assert(descriptorExtractor);
}
protected:
minKeyPointMatchesRatio(_minKeyPointMatchesRatio),
minScaleInliersRatio(_minScaleInliersRatio)
{
- CV_Assert(!featureDetector.empty());
+ CV_Assert(featureDetector);
}
protected:
normType(_normType),
minDescInliersRatio(_minDescInliersRatio)
{
- CV_Assert(!featureDetector.empty());
- CV_Assert(!descriptorExtractor.empty());
+ CV_Assert(featureDetector);
+ CV_Assert(descriptorExtractor);
}
protected:
If window was created with OpenGL support, ``imshow`` also support :ocv:class:`ogl::Buffer` , :ocv:class:`ogl::Texture2D` and :ocv:class:`cuda::GpuMat` as input.
-.. note:: This function should be followed by ``waitKey`` function which displays the image for specified milliseconds. Otherwise, it won't display the image.
+.. note:: This function should be followed by ``waitKey`` function which displays the image for specified milliseconds. Otherwise, it won't display the image. For example, ``waitKey(0)`` will display the window infinitely until any keypress (it is suitable for image display). ``waitKey(25)`` will display a frame for 25 ms, after which display will be automatically closed. (If you put it in a loop to read videos, it will display the video frame-by-frame)
namedWindow
Ptr<CvVideoWriter> writer;
};
-template<> CV_EXPORTS void Ptr<CvCapture>::delete_obj();
-template<> CV_EXPORTS void Ptr<CvVideoWriter>::delete_obj();
+template<> CV_EXPORTS void DefaultDeleter<CvCapture>::operator ()(CvCapture* obj) const;
+template<> CV_EXPORTS void DefaultDeleter<CvVideoWriter>::operator ()(CvVideoWriter* obj) const;
} // cv
namespace cv
{
-template<> void Ptr<CvCapture>::delete_obj()
+template<> void DefaultDeleter<CvCapture>::operator ()(CvCapture* obj) const
{ cvReleaseCapture(&obj); }
-template<> void Ptr<CvVideoWriter>::delete_obj()
+template<> void DefaultDeleter<CvVideoWriter>::operator ()(CvVideoWriter* obj) const
{ cvReleaseVideoWriter(&obj); }
}
bool VideoCapture::open(const String& filename)
{
if (isOpened()) release();
- cap = cvCreateFileCapture(filename.c_str());
+ cap.reset(cvCreateFileCapture(filename.c_str()));
return isOpened();
}
bool VideoCapture::open(int device)
{
if (isOpened()) release();
- cap = cvCreateCameraCapture(device);
+ cap.reset(cvCreateCameraCapture(device));
return isOpened();
}
bool VideoWriter::open(const String& filename, int _fourcc, double fps, Size frameSize, bool isColor)
{
- writer = cvCreateVideoWriter(filename.c_str(), _fourcc, fps, frameSize, isColor);
+ writer.reset(cvCreateVideoWriter(filename.c_str(), _fourcc, fps, frameSize, isColor));
return isOpened();
}
ImageDecoder BmpDecoder::newDecoder() const
{
- return new BmpDecoder;
+ return makePtr<BmpDecoder>();
}
bool BmpDecoder::readHeader()
ImageEncoder BmpEncoder::newEncoder() const
{
- return new BmpEncoder;
+ return makePtr<BmpEncoder>();
}
bool BmpEncoder::write( const Mat& img, const std::vector<int>& )
ImageDecoder ExrDecoder::newDecoder() const
{
- return new ExrDecoder;
+ return makePtr<ExrDecoder>();
}
/////////////////////// ExrEncoder ///////////////////
ImageEncoder ExrEncoder::newEncoder() const
{
- return new ExrEncoder;
+ return makePtr<ExrEncoder>();
}
}
ImageDecoder JpegDecoder::newDecoder() const
{
- return new JpegDecoder;
+ return makePtr<JpegDecoder>();
}
bool JpegDecoder::readHeader()
ImageEncoder JpegEncoder::newEncoder() const
{
- return new JpegEncoder;
+ return makePtr<JpegEncoder>();
}
bool JpegEncoder::write( const Mat& img, const std::vector<int>& params )
ImageDecoder Jpeg2KDecoder::newDecoder() const
{
- return new Jpeg2KDecoder;
+ return makePtr<Jpeg2KDecoder>();
}
void Jpeg2KDecoder::close()
ImageEncoder Jpeg2KEncoder::newEncoder() const
{
- return new Jpeg2KEncoder;
+ return makePtr<Jpeg2KEncoder>();
}
bool Jpeg2KEncoder::isFormatSupported( int depth ) const
ImageDecoder PngDecoder::newDecoder() const
{
- return new PngDecoder;
+ return makePtr<PngDecoder>();
}
void PngDecoder::close()
ImageEncoder PngEncoder::newEncoder() const
{
- return new PngEncoder;
+ return makePtr<PngEncoder>();
}
ImageDecoder PxMDecoder::newDecoder() const
{
- return new PxMDecoder;
+ return makePtr<PxMDecoder>();
}
void PxMDecoder::close()
ImageEncoder PxMEncoder::newEncoder() const
{
- return new PxMEncoder;
+ return makePtr<PxMEncoder>();
}
ImageDecoder SunRasterDecoder::newDecoder() const
{
- return new SunRasterDecoder;
+ return makePtr<SunRasterDecoder>();
}
void SunRasterDecoder::close()
ImageEncoder SunRasterEncoder::newEncoder() const
{
- return new SunRasterEncoder;
+ return makePtr<SunRasterEncoder>();
}
SunRasterEncoder::~SunRasterEncoder()
ImageDecoder TiffDecoder::newDecoder() const
{
- return new TiffDecoder;
+ return makePtr<TiffDecoder>();
}
bool TiffDecoder::readHeader()
ImageEncoder TiffEncoder::newEncoder() const
{
- return new TiffEncoder;
+ return makePtr<TiffEncoder>();
}
bool TiffEncoder::isFormatSupported( int depth ) const
ImageDecoder WebPDecoder::newDecoder() const
{
- return new WebPDecoder;
+ return makePtr<WebPDecoder>();
}
bool WebPDecoder::readHeader()
ImageEncoder WebPEncoder::newEncoder() const
{
- return new WebPEncoder();
+ return makePtr<WebPEncoder>();
}
bool WebPEncoder::write(const Mat& img, const std::vector<int>& params)
{
ImageCodecInitializer()
{
- decoders.push_back( new BmpDecoder );
- encoders.push_back( new BmpEncoder );
+ decoders.push_back( makePtr<BmpDecoder>() );
+ encoders.push_back( makePtr<BmpEncoder>() );
#ifdef HAVE_JPEG
- decoders.push_back( new JpegDecoder );
- encoders.push_back( new JpegEncoder );
+ decoders.push_back( makePtr<JpegDecoder>() );
+ encoders.push_back( makePtr<JpegEncoder>() );
#endif
#ifdef HAVE_WEBP
- decoders.push_back( new WebPDecoder );
- encoders.push_back( new WebPEncoder );
+ decoders.push_back( makePtr<WebPDecoder>() );
+ encoders.push_back( makePtr<WebPEncoder>() );
#endif
- decoders.push_back( new SunRasterDecoder );
- encoders.push_back( new SunRasterEncoder );
- decoders.push_back( new PxMDecoder );
- encoders.push_back( new PxMEncoder );
+ decoders.push_back( makePtr<SunRasterDecoder>() );
+ encoders.push_back( makePtr<SunRasterEncoder>() );
+ decoders.push_back( makePtr<PxMDecoder>() );
+ encoders.push_back( makePtr<PxMEncoder>() );
#ifdef HAVE_TIFF
- decoders.push_back( new TiffDecoder );
+ decoders.push_back( makePtr<TiffDecoder>() );
#endif
- encoders.push_back( new TiffEncoder );
+ encoders.push_back( makePtr<TiffEncoder>() );
#ifdef HAVE_PNG
- decoders.push_back( new PngDecoder );
- encoders.push_back( new PngEncoder );
+ decoders.push_back( makePtr<PngDecoder>() );
+ encoders.push_back( makePtr<PngEncoder>() );
#endif
#ifdef HAVE_JASPER
- decoders.push_back( new Jpeg2KDecoder );
- encoders.push_back( new Jpeg2KEncoder );
+ decoders.push_back( makePtr<Jpeg2KDecoder>() );
+ encoders.push_back( makePtr<Jpeg2KEncoder>() );
#endif
#ifdef HAVE_OPENEXR
- decoders.push_back( new ExrDecoder );
- encoders.push_back( new ExrEncoder );
+ decoders.push_back( makePtr<ExrDecoder>() );
+ encoders.push_back( makePtr<ExrEncoder>() );
#endif
}
Mat temp, *data = &temp;
ImageDecoder decoder = findDecoder(filename);
- if( decoder.empty() )
+ if( !decoder )
return 0;
decoder->setSource(filename);
if( !decoder->readHeader() )
CV_Assert( image.channels() == 1 || image.channels() == 3 || image.channels() == 4 );
ImageEncoder encoder = findEncoder( filename );
- if( encoder.empty() )
+ if( !encoder )
CV_Error( CV_StsError, "could not find a writer for the specified extension" );
if( !encoder->isFormatSupported(image.depth()) )
String filename;
ImageDecoder decoder = findDecoder(buf);
- if( decoder.empty() )
+ if( !decoder )
return 0;
if( !decoder->setSource(buf) )
CV_Assert( channels == 1 || channels == 3 || channels == 4 );
ImageEncoder encoder = findEncoder( ext );
- if( encoder.empty() )
+ if( !encoder )
CV_Error( CV_StsError, "could not find encoder for the specified extension" );
if( !encoder->isFormatSupported(image.depth()) )
{
string file_path = src_dir+"video/big_buck_bunny."+ext[i];
- cap = cvCreateFileCapture(file_path.c_str());
- if (cap.empty())
+ cap.reset(cvCreateFileCapture(file_path.c_str()));
+ if (!cap)
{
ts->printf(cvtest::TS::LOG, "\nFile information (video %d): \n\nName: big_buck_bunny.%s\nFAILED\n\n", i+1, ext[i].c_str());
ts->printf(cvtest::TS::LOG, "Error: cannot read source video file.\n");
Point dstOfs = Point(0,0),
bool isolated = false);
//! returns true if the filter is separable
- bool isSeparable() const { return (const BaseFilter*)filter2D == 0; }
+ bool isSeparable() const { return !filter2D; }
//! returns the number
int remainingInputRows() const;
int remainingOutputRows() const;
cv::Ptr<cv::CLAHE> cv::createCLAHE(double clipLimit, cv::Size tileGridSize)
{
- return new CLAHE_Impl(clipLimit, tileGridSize.width, tileGridSize.height);
+ return makePtr<CLAHE_Impl>(clipLimit, tileGridSize.width, tileGridSize.height);
}
if( contourHeaderSize < (int)sizeof(CvContour))
CV_Error( CV_StsBadSize, "Contour header size must be >= sizeof(CvContour)" );
- storage00 = cvCreateChildMemStorage(storage);
- storage01 = cvCreateChildMemStorage(storage);
+ storage00.reset(cvCreateChildMemStorage(storage));
+ storage01.reset(cvCreateChildMemStorage(storage));
CvMat stub, *mat;
if( isSeparable() )
{
- CV_Assert( !rowFilter.empty() && !columnFilter.empty() );
+ CV_Assert( rowFilter && columnFilter );
ksize = Size(rowFilter->ksize, columnFilter->ksize);
anchor = Point(rowFilter->anchor, columnFilter->anchor);
}
rowCount = dstY = 0;
startY = startY0 = std::max(roi.y - anchor.y, 0);
endY = std::min(roi.y + roi.height + ksize.height - anchor.y - 1, wholeSize.height);
- if( !columnFilter.empty() )
+ if( columnFilter )
columnFilter->reset();
- if( !filter2D.empty() )
+ if( filter2D )
filter2D->reset();
return startY;
if( (symmetryType & (KERNEL_SYMMETRICAL|KERNEL_ASYMMETRICAL)) != 0 && ksize <= 5 )
{
if( sdepth == CV_8U && ddepth == CV_32S )
- return Ptr<BaseRowFilter>(new SymmRowSmallFilter<uchar, int, SymmRowSmallVec_8u32s>
- (kernel, anchor, symmetryType, SymmRowSmallVec_8u32s(kernel, symmetryType)));
+ return makePtr<SymmRowSmallFilter<uchar, int, SymmRowSmallVec_8u32s> >
+ (kernel, anchor, symmetryType, SymmRowSmallVec_8u32s(kernel, symmetryType));
if( sdepth == CV_32F && ddepth == CV_32F )
- return Ptr<BaseRowFilter>(new SymmRowSmallFilter<float, float, SymmRowSmallVec_32f>
- (kernel, anchor, symmetryType, SymmRowSmallVec_32f(kernel, symmetryType)));
+ return makePtr<SymmRowSmallFilter<float, float, SymmRowSmallVec_32f> >
+ (kernel, anchor, symmetryType, SymmRowSmallVec_32f(kernel, symmetryType));
}
if( sdepth == CV_8U && ddepth == CV_32S )
- return Ptr<BaseRowFilter>(new RowFilter<uchar, int, RowVec_8u32s>
- (kernel, anchor, RowVec_8u32s(kernel)));
+ return makePtr<RowFilter<uchar, int, RowVec_8u32s> >
+ (kernel, anchor, RowVec_8u32s(kernel));
if( sdepth == CV_8U && ddepth == CV_32F )
- return Ptr<BaseRowFilter>(new RowFilter<uchar, float, RowNoVec>(kernel, anchor));
+ return makePtr<RowFilter<uchar, float, RowNoVec> >(kernel, anchor);
if( sdepth == CV_8U && ddepth == CV_64F )
- return Ptr<BaseRowFilter>(new RowFilter<uchar, double, RowNoVec>(kernel, anchor));
+ return makePtr<RowFilter<uchar, double, RowNoVec> >(kernel, anchor);
if( sdepth == CV_16U && ddepth == CV_32F )
- return Ptr<BaseRowFilter>(new RowFilter<ushort, float, RowNoVec>(kernel, anchor));
+ return makePtr<RowFilter<ushort, float, RowNoVec> >(kernel, anchor);
if( sdepth == CV_16U && ddepth == CV_64F )
- return Ptr<BaseRowFilter>(new RowFilter<ushort, double, RowNoVec>(kernel, anchor));
+ return makePtr<RowFilter<ushort, double, RowNoVec> >(kernel, anchor);
if( sdepth == CV_16S && ddepth == CV_32F )
- return Ptr<BaseRowFilter>(new RowFilter<short, float, RowVec_16s32f>
- (kernel, anchor, RowVec_16s32f(kernel)));
+ return makePtr<RowFilter<short, float, RowVec_16s32f> >
+ (kernel, anchor, RowVec_16s32f(kernel));
if( sdepth == CV_16S && ddepth == CV_64F )
- return Ptr<BaseRowFilter>(new RowFilter<short, double, RowNoVec>(kernel, anchor));
+ return makePtr<RowFilter<short, double, RowNoVec> >(kernel, anchor);
if( sdepth == CV_32F && ddepth == CV_32F )
- return Ptr<BaseRowFilter>(new RowFilter<float, float, RowVec_32f>
- (kernel, anchor, RowVec_32f(kernel)));
+ return makePtr<RowFilter<float, float, RowVec_32f> >
+ (kernel, anchor, RowVec_32f(kernel));
if( sdepth == CV_32F && ddepth == CV_64F )
- return Ptr<BaseRowFilter>(new RowFilter<float, double, RowNoVec>(kernel, anchor));
+ return makePtr<RowFilter<float, double, RowNoVec> >(kernel, anchor);
if( sdepth == CV_64F && ddepth == CV_64F )
- return Ptr<BaseRowFilter>(new RowFilter<double, double, RowNoVec>(kernel, anchor));
+ return makePtr<RowFilter<double, double, RowNoVec> >(kernel, anchor);
CV_Error_( CV_StsNotImplemented,
("Unsupported combination of source format (=%d), and buffer format (=%d)",
srcType, bufType));
- return Ptr<BaseRowFilter>(0);
+ return Ptr<BaseRowFilter>();
}
if( !(symmetryType & (KERNEL_SYMMETRICAL|KERNEL_ASYMMETRICAL)) )
{
if( ddepth == CV_8U && sdepth == CV_32S )
- return Ptr<BaseColumnFilter>(new ColumnFilter<FixedPtCastEx<int, uchar>, ColumnNoVec>
- (kernel, anchor, delta, FixedPtCastEx<int, uchar>(bits)));
+ return makePtr<ColumnFilter<FixedPtCastEx<int, uchar>, ColumnNoVec> >
+ (kernel, anchor, delta, FixedPtCastEx<int, uchar>(bits));
if( ddepth == CV_8U && sdepth == CV_32F )
- return Ptr<BaseColumnFilter>(new ColumnFilter<Cast<float, uchar>, ColumnNoVec>(kernel, anchor, delta));
+ return makePtr<ColumnFilter<Cast<float, uchar>, ColumnNoVec> >(kernel, anchor, delta);
if( ddepth == CV_8U && sdepth == CV_64F )
- return Ptr<BaseColumnFilter>(new ColumnFilter<Cast<double, uchar>, ColumnNoVec>(kernel, anchor, delta));
+ return makePtr<ColumnFilter<Cast<double, uchar>, ColumnNoVec> >(kernel, anchor, delta);
if( ddepth == CV_16U && sdepth == CV_32F )
- return Ptr<BaseColumnFilter>(new ColumnFilter<Cast<float, ushort>, ColumnNoVec>(kernel, anchor, delta));
+ return makePtr<ColumnFilter<Cast<float, ushort>, ColumnNoVec> >(kernel, anchor, delta);
if( ddepth == CV_16U && sdepth == CV_64F )
- return Ptr<BaseColumnFilter>(new ColumnFilter<Cast<double, ushort>, ColumnNoVec>(kernel, anchor, delta));
+ return makePtr<ColumnFilter<Cast<double, ushort>, ColumnNoVec> >(kernel, anchor, delta);
if( ddepth == CV_16S && sdepth == CV_32F )
- return Ptr<BaseColumnFilter>(new ColumnFilter<Cast<float, short>, ColumnNoVec>(kernel, anchor, delta));
+ return makePtr<ColumnFilter<Cast<float, short>, ColumnNoVec> >(kernel, anchor, delta);
if( ddepth == CV_16S && sdepth == CV_64F )
- return Ptr<BaseColumnFilter>(new ColumnFilter<Cast<double, short>, ColumnNoVec>(kernel, anchor, delta));
+ return makePtr<ColumnFilter<Cast<double, short>, ColumnNoVec> >(kernel, anchor, delta);
if( ddepth == CV_32F && sdepth == CV_32F )
- return Ptr<BaseColumnFilter>(new ColumnFilter<Cast<float, float>, ColumnNoVec>(kernel, anchor, delta));
+ return makePtr<ColumnFilter<Cast<float, float>, ColumnNoVec> >(kernel, anchor, delta);
if( ddepth == CV_64F && sdepth == CV_64F )
- return Ptr<BaseColumnFilter>(new ColumnFilter<Cast<double, double>, ColumnNoVec>(kernel, anchor, delta));
+ return makePtr<ColumnFilter<Cast<double, double>, ColumnNoVec> >(kernel, anchor, delta);
}
else
{
if( ksize == 3 )
{
if( ddepth == CV_8U && sdepth == CV_32S )
- return Ptr<BaseColumnFilter>(new SymmColumnSmallFilter<
- FixedPtCastEx<int, uchar>, SymmColumnVec_32s8u>
+ return makePtr<SymmColumnSmallFilter<
+ FixedPtCastEx<int, uchar>, SymmColumnVec_32s8u> >
(kernel, anchor, delta, symmetryType, FixedPtCastEx<int, uchar>(bits),
- SymmColumnVec_32s8u(kernel, symmetryType, bits, delta)));
+ SymmColumnVec_32s8u(kernel, symmetryType, bits, delta));
if( ddepth == CV_16S && sdepth == CV_32S && bits == 0 )
- return Ptr<BaseColumnFilter>(new SymmColumnSmallFilter<Cast<int, short>,
- SymmColumnSmallVec_32s16s>(kernel, anchor, delta, symmetryType,
- Cast<int, short>(), SymmColumnSmallVec_32s16s(kernel, symmetryType, bits, delta)));
+ return makePtr<SymmColumnSmallFilter<Cast<int, short>,
+ SymmColumnSmallVec_32s16s> >(kernel, anchor, delta, symmetryType,
+ Cast<int, short>(), SymmColumnSmallVec_32s16s(kernel, symmetryType, bits, delta));
if( ddepth == CV_32F && sdepth == CV_32F )
- return Ptr<BaseColumnFilter>(new SymmColumnSmallFilter<
- Cast<float, float>,SymmColumnSmallVec_32f>
+ return makePtr<SymmColumnSmallFilter<
+ Cast<float, float>,SymmColumnSmallVec_32f> >
(kernel, anchor, delta, symmetryType, Cast<float, float>(),
- SymmColumnSmallVec_32f(kernel, symmetryType, 0, delta)));
+ SymmColumnSmallVec_32f(kernel, symmetryType, 0, delta));
}
if( ddepth == CV_8U && sdepth == CV_32S )
- return Ptr<BaseColumnFilter>(new SymmColumnFilter<FixedPtCastEx<int, uchar>, SymmColumnVec_32s8u>
+ return makePtr<SymmColumnFilter<FixedPtCastEx<int, uchar>, SymmColumnVec_32s8u> >
(kernel, anchor, delta, symmetryType, FixedPtCastEx<int, uchar>(bits),
- SymmColumnVec_32s8u(kernel, symmetryType, bits, delta)));
+ SymmColumnVec_32s8u(kernel, symmetryType, bits, delta));
if( ddepth == CV_8U && sdepth == CV_32F )
- return Ptr<BaseColumnFilter>(new SymmColumnFilter<Cast<float, uchar>, ColumnNoVec>
- (kernel, anchor, delta, symmetryType));
+ return makePtr<SymmColumnFilter<Cast<float, uchar>, ColumnNoVec> >
+ (kernel, anchor, delta, symmetryType);
if( ddepth == CV_8U && sdepth == CV_64F )
- return Ptr<BaseColumnFilter>(new SymmColumnFilter<Cast<double, uchar>, ColumnNoVec>
- (kernel, anchor, delta, symmetryType));
+ return makePtr<SymmColumnFilter<Cast<double, uchar>, ColumnNoVec> >
+ (kernel, anchor, delta, symmetryType);
if( ddepth == CV_16U && sdepth == CV_32F )
- return Ptr<BaseColumnFilter>(new SymmColumnFilter<Cast<float, ushort>, ColumnNoVec>
- (kernel, anchor, delta, symmetryType));
+ return makePtr<SymmColumnFilter<Cast<float, ushort>, ColumnNoVec> >
+ (kernel, anchor, delta, symmetryType);
if( ddepth == CV_16U && sdepth == CV_64F )
- return Ptr<BaseColumnFilter>(new SymmColumnFilter<Cast<double, ushort>, ColumnNoVec>
- (kernel, anchor, delta, symmetryType));
+ return makePtr<SymmColumnFilter<Cast<double, ushort>, ColumnNoVec> >
+ (kernel, anchor, delta, symmetryType);
if( ddepth == CV_16S && sdepth == CV_32S )
- return Ptr<BaseColumnFilter>(new SymmColumnFilter<Cast<int, short>, ColumnNoVec>
- (kernel, anchor, delta, symmetryType));
+ return makePtr<SymmColumnFilter<Cast<int, short>, ColumnNoVec> >
+ (kernel, anchor, delta, symmetryType);
if( ddepth == CV_16S && sdepth == CV_32F )
- return Ptr<BaseColumnFilter>(new SymmColumnFilter<Cast<float, short>, SymmColumnVec_32f16s>
+ return makePtr<SymmColumnFilter<Cast<float, short>, SymmColumnVec_32f16s> >
(kernel, anchor, delta, symmetryType, Cast<float, short>(),
- SymmColumnVec_32f16s(kernel, symmetryType, 0, delta)));
+ SymmColumnVec_32f16s(kernel, symmetryType, 0, delta));
if( ddepth == CV_16S && sdepth == CV_64F )
- return Ptr<BaseColumnFilter>(new SymmColumnFilter<Cast<double, short>, ColumnNoVec>
- (kernel, anchor, delta, symmetryType));
+ return makePtr<SymmColumnFilter<Cast<double, short>, ColumnNoVec> >
+ (kernel, anchor, delta, symmetryType);
if( ddepth == CV_32F && sdepth == CV_32F )
- return Ptr<BaseColumnFilter>(new SymmColumnFilter<Cast<float, float>, SymmColumnVec_32f>
+ return makePtr<SymmColumnFilter<Cast<float, float>, SymmColumnVec_32f> >
(kernel, anchor, delta, symmetryType, Cast<float, float>(),
- SymmColumnVec_32f(kernel, symmetryType, 0, delta)));
+ SymmColumnVec_32f(kernel, symmetryType, 0, delta));
if( ddepth == CV_64F && sdepth == CV_64F )
- return Ptr<BaseColumnFilter>(new SymmColumnFilter<Cast<double, double>, ColumnNoVec>
- (kernel, anchor, delta, symmetryType));
+ return makePtr<SymmColumnFilter<Cast<double, double>, ColumnNoVec> >
+ (kernel, anchor, delta, symmetryType);
}
CV_Error_( CV_StsNotImplemented,
("Unsupported combination of buffer format (=%d), and destination format (=%d)",
bufType, dstType));
- return Ptr<BaseColumnFilter>(0);
+ return Ptr<BaseColumnFilter>();
}
Ptr<BaseColumnFilter> _columnFilter = getLinearColumnFilter(
_bufType, _dstType, columnKernel, _anchor.y, ctype, _delta, bits );
- return Ptr<FilterEngine>( new FilterEngine(Ptr<BaseFilter>(0), _rowFilter, _columnFilter,
+ return Ptr<FilterEngine>( new FilterEngine(Ptr<BaseFilter>(), _rowFilter, _columnFilter,
_srcType, _dstType, _bufType, _rowBorderType, _columnBorderType, _borderValue ));
}
anchor = normalizeAnchor(anchor, _kernel.size());
/*if( sdepth == CV_8U && ddepth == CV_8U && kdepth == CV_32S )
- return Ptr<BaseFilter>(new Filter2D<uchar, FixedPtCastEx<int, uchar>, FilterVec_8u>
+ return makePtr<Filter2D<uchar, FixedPtCastEx<int, uchar>, FilterVec_8u> >
(_kernel, anchor, delta, FixedPtCastEx<int, uchar>(bits),
- FilterVec_8u(_kernel, bits, delta)));
+ FilterVec_8u(_kernel, bits, delta));
if( sdepth == CV_8U && ddepth == CV_16S && kdepth == CV_32S )
- return Ptr<BaseFilter>(new Filter2D<uchar, FixedPtCastEx<int, short>, FilterVec_8u16s>
+ return makePtr<Filter2D<uchar, FixedPtCastEx<int, short>, FilterVec_8u16s> >
(_kernel, anchor, delta, FixedPtCastEx<int, short>(bits),
- FilterVec_8u16s(_kernel, bits, delta)));*/
+ FilterVec_8u16s(_kernel, bits, delta));*/
kdepth = sdepth == CV_64F || ddepth == CV_64F ? CV_64F : CV_32F;
Mat kernel;
_kernel.convertTo(kernel, kdepth, _kernel.type() == CV_32S ? 1./(1 << bits) : 1.);
if( sdepth == CV_8U && ddepth == CV_8U )
- return Ptr<BaseFilter>(new Filter2D<uchar, Cast<float, uchar>, FilterVec_8u>
- (kernel, anchor, delta, Cast<float, uchar>(), FilterVec_8u(kernel, 0, delta)));
+ return makePtr<Filter2D<uchar, Cast<float, uchar>, FilterVec_8u> >
+ (kernel, anchor, delta, Cast<float, uchar>(), FilterVec_8u(kernel, 0, delta));
if( sdepth == CV_8U && ddepth == CV_16U )
- return Ptr<BaseFilter>(new Filter2D<uchar,
- Cast<float, ushort>, FilterNoVec>(kernel, anchor, delta));
+ return makePtr<Filter2D<uchar,
+ Cast<float, ushort>, FilterNoVec> >(kernel, anchor, delta);
if( sdepth == CV_8U && ddepth == CV_16S )
- return Ptr<BaseFilter>(new Filter2D<uchar, Cast<float, short>, FilterVec_8u16s>
- (kernel, anchor, delta, Cast<float, short>(), FilterVec_8u16s(kernel, 0, delta)));
+ return makePtr<Filter2D<uchar, Cast<float, short>, FilterVec_8u16s> >
+ (kernel, anchor, delta, Cast<float, short>(), FilterVec_8u16s(kernel, 0, delta));
if( sdepth == CV_8U && ddepth == CV_32F )
- return Ptr<BaseFilter>(new Filter2D<uchar,
- Cast<float, float>, FilterNoVec>(kernel, anchor, delta));
+ return makePtr<Filter2D<uchar,
+ Cast<float, float>, FilterNoVec> >(kernel, anchor, delta);
if( sdepth == CV_8U && ddepth == CV_64F )
- return Ptr<BaseFilter>(new Filter2D<uchar,
- Cast<double, double>, FilterNoVec>(kernel, anchor, delta));
+ return makePtr<Filter2D<uchar,
+ Cast<double, double>, FilterNoVec> >(kernel, anchor, delta);
if( sdepth == CV_16U && ddepth == CV_16U )
- return Ptr<BaseFilter>(new Filter2D<ushort,
- Cast<float, ushort>, FilterNoVec>(kernel, anchor, delta));
+ return makePtr<Filter2D<ushort,
+ Cast<float, ushort>, FilterNoVec> >(kernel, anchor, delta);
if( sdepth == CV_16U && ddepth == CV_32F )
- return Ptr<BaseFilter>(new Filter2D<ushort,
- Cast<float, float>, FilterNoVec>(kernel, anchor, delta));
+ return makePtr<Filter2D<ushort,
+ Cast<float, float>, FilterNoVec> >(kernel, anchor, delta);
if( sdepth == CV_16U && ddepth == CV_64F )
- return Ptr<BaseFilter>(new Filter2D<ushort,
- Cast<double, double>, FilterNoVec>(kernel, anchor, delta));
+ return makePtr<Filter2D<ushort,
+ Cast<double, double>, FilterNoVec> >(kernel, anchor, delta);
if( sdepth == CV_16S && ddepth == CV_16S )
- return Ptr<BaseFilter>(new Filter2D<short,
- Cast<float, short>, FilterNoVec>(kernel, anchor, delta));
+ return makePtr<Filter2D<short,
+ Cast<float, short>, FilterNoVec> >(kernel, anchor, delta);
if( sdepth == CV_16S && ddepth == CV_32F )
- return Ptr<BaseFilter>(new Filter2D<short,
- Cast<float, float>, FilterNoVec>(kernel, anchor, delta));
+ return makePtr<Filter2D<short,
+ Cast<float, float>, FilterNoVec> >(kernel, anchor, delta);
if( sdepth == CV_16S && ddepth == CV_64F )
- return Ptr<BaseFilter>(new Filter2D<short,
- Cast<double, double>, FilterNoVec>(kernel, anchor, delta));
+ return makePtr<Filter2D<short,
+ Cast<double, double>, FilterNoVec> >(kernel, anchor, delta);
if( sdepth == CV_32F && ddepth == CV_32F )
- return Ptr<BaseFilter>(new Filter2D<float, Cast<float, float>, FilterVec_32f>
- (kernel, anchor, delta, Cast<float, float>(), FilterVec_32f(kernel, 0, delta)));
+ return makePtr<Filter2D<float, Cast<float, float>, FilterVec_32f> >
+ (kernel, anchor, delta, Cast<float, float>(), FilterVec_32f(kernel, 0, delta));
if( sdepth == CV_64F && ddepth == CV_64F )
- return Ptr<BaseFilter>(new Filter2D<double,
- Cast<double, double>, FilterNoVec>(kernel, anchor, delta));
+ return makePtr<Filter2D<double,
+ Cast<double, double>, FilterNoVec> >(kernel, anchor, delta);
CV_Error_( CV_StsNotImplemented,
("Unsupported combination of source format (=%d), and destination format (=%d)",
srcType, dstType));
- return Ptr<BaseFilter>(0);
+ return Ptr<BaseFilter>();
}
Ptr<BaseFilter> _filter2D = getLinearFilter(_srcType, _dstType,
kernel, _anchor, _delta, bits);
- return Ptr<FilterEngine>(new FilterEngine(_filter2D, Ptr<BaseRowFilter>(0),
- Ptr<BaseColumnFilter>(0), _srcType, _dstType, _srcType,
- _rowBorderType, _columnBorderType, _borderValue ));
+ return makePtr<FilterEngine>(_filter2D, Ptr<BaseRowFilter>(),
+ Ptr<BaseColumnFilter>(), _srcType, _dstType, _srcType,
+ _rowBorderType, _columnBorderType, _borderValue );
}
Ptr<GeneralizedHoughBallard> cv::createGeneralizedHoughBallard()
{
- return new GeneralizedHoughBallardImpl;
+ return makePtr<GeneralizedHoughBallardImpl>();
}
// GeneralizedHoughGuil
Ptr<GeneralizedHoughGuil> cv::createGeneralizedHoughGuil()
{
- return new GeneralizedHoughGuilImpl;
+ return makePtr<GeneralizedHoughGuilImpl>();
}
float idp, dr;
CvSeqReader reader;
- edges = cvCreateMat( img->rows, img->cols, CV_8UC1 );
+ edges.reset(cvCreateMat( img->rows, img->cols, CV_8UC1 ));
cvCanny( img, edges, MAX(canny_threshold/2,1), canny_threshold, 3 );
- dx = cvCreateMat( img->rows, img->cols, CV_16SC1 );
- dy = cvCreateMat( img->rows, img->cols, CV_16SC1 );
+ dx.reset(cvCreateMat( img->rows, img->cols, CV_16SC1 ));
+ dy.reset(cvCreateMat( img->rows, img->cols, CV_16SC1 ));
cvSobel( img, dx, 1, 0, 3 );
cvSobel( img, dy, 0, 1, 3 );
if( dp < 1.f )
dp = 1.f;
idp = 1.f/dp;
- accum = cvCreateMat( cvCeil(img->rows*idp)+2, cvCeil(img->cols*idp)+2, CV_32SC1 );
+ accum.reset(cvCreateMat( cvCeil(img->rows*idp)+2, cvCeil(img->cols*idp)+2, CV_32SC1 ));
cvZero(accum);
- storage = cvCreateMemStorage();
+ storage.reset(cvCreateMemStorage());
nz = cvCreateSeq( CV_32SC2, sizeof(CvSeq), sizeof(CvPoint), storage );
centers = cvCreateSeq( CV_32SC1, sizeof(CvSeq), sizeof(int), storage );
cvClearSeq( centers );
cvSeqPushMulti( centers, &sort_buf[0], center_count );
- dist_buf = cvCreateMat( 1, nz_count, CV_32FC1 );
+ dist_buf.reset(cvCreateMat( 1, nz_count, CV_32FC1 ));
ddata = dist_buf->data.fl;
dr = dp;
double param1, double param2,
int minRadius, int maxRadius )
{
- Ptr<CvMemStorage> storage = cvCreateMemStorage(STORAGE_SIZE);
+ Ptr<CvMemStorage> storage(cvCreateMemStorage(STORAGE_SIZE));
Mat image = _image.getMat();
CvMat c_image = image;
CvSeq* seq = cvHoughCircles( &c_image, storage, method,
ssize = cvGetMatSize(src);
dsize = cvGetMatSize(dst);
- mapx = cvCreateMat( dsize.height, dsize.width, CV_32F );
- mapy = cvCreateMat( dsize.height, dsize.width, CV_32F );
+ mapx.reset(cvCreateMat( dsize.height, dsize.width, CV_32F ));
+ mapy.reset(cvCreateMat( dsize.height, dsize.width, CV_32F ));
if( !(flags & CV_WARP_INVERSE_MAP) )
{
dsize.width = dst->cols;
dsize.height = dst->rows;
- mapx = cvCreateMat( dsize.height, dsize.width, CV_32F );
- mapy = cvCreateMat( dsize.height, dsize.width, CV_32F );
+ mapx.reset(cvCreateMat( dsize.height, dsize.width, CV_32F ));
+ mapy.reset(cvCreateMat( dsize.height, dsize.width, CV_32F ));
if( !(flags & CV_WARP_INVERSE_MAP) )
{
int _refine, double _scale, double _sigma_scale, double _quant, double _ang_th,
double _log_eps, double _density_th, int _n_bins)
{
- return Ptr<LineSegmentDetector>(new LineSegmentDetectorImpl(
+ return makePtr<LineSegmentDetectorImpl>(
_refine, _scale, _sigma_scale, _quant, _ang_th,
- _log_eps, _density_th, _n_bins));
+ _log_eps, _density_th, _n_bins);
}
/////////////////////////////////////////////////////////////////////////////////////////
if( op == MORPH_ERODE )
{
if( depth == CV_8U )
- return Ptr<BaseRowFilter>(new MorphRowFilter<MinOp<uchar>,
- ErodeRowVec8u>(ksize, anchor));
+ return makePtr<MorphRowFilter<MinOp<uchar>,
+ ErodeRowVec8u> >(ksize, anchor);
if( depth == CV_16U )
- return Ptr<BaseRowFilter>(new MorphRowFilter<MinOp<ushort>,
- ErodeRowVec16u>(ksize, anchor));
+ return makePtr<MorphRowFilter<MinOp<ushort>,
+ ErodeRowVec16u> >(ksize, anchor);
if( depth == CV_16S )
- return Ptr<BaseRowFilter>(new MorphRowFilter<MinOp<short>,
- ErodeRowVec16s>(ksize, anchor));
+ return makePtr<MorphRowFilter<MinOp<short>,
+ ErodeRowVec16s> >(ksize, anchor);
if( depth == CV_32F )
- return Ptr<BaseRowFilter>(new MorphRowFilter<MinOp<float>,
- ErodeRowVec32f>(ksize, anchor));
+ return makePtr<MorphRowFilter<MinOp<float>,
+ ErodeRowVec32f> >(ksize, anchor);
if( depth == CV_64F )
- return Ptr<BaseRowFilter>(new MorphRowFilter<MinOp<double>,
- ErodeRowVec64f>(ksize, anchor));
+ return makePtr<MorphRowFilter<MinOp<double>,
+ ErodeRowVec64f> >(ksize, anchor);
}
else
{
if( depth == CV_8U )
- return Ptr<BaseRowFilter>(new MorphRowFilter<MaxOp<uchar>,
- DilateRowVec8u>(ksize, anchor));
+ return makePtr<MorphRowFilter<MaxOp<uchar>,
+ DilateRowVec8u> >(ksize, anchor);
if( depth == CV_16U )
- return Ptr<BaseRowFilter>(new MorphRowFilter<MaxOp<ushort>,
- DilateRowVec16u>(ksize, anchor));
+ return makePtr<MorphRowFilter<MaxOp<ushort>,
+ DilateRowVec16u> >(ksize, anchor);
if( depth == CV_16S )
- return Ptr<BaseRowFilter>(new MorphRowFilter<MaxOp<short>,
- DilateRowVec16s>(ksize, anchor));
+ return makePtr<MorphRowFilter<MaxOp<short>,
+ DilateRowVec16s> >(ksize, anchor);
if( depth == CV_32F )
- return Ptr<BaseRowFilter>(new MorphRowFilter<MaxOp<float>,
- DilateRowVec32f>(ksize, anchor));
+ return makePtr<MorphRowFilter<MaxOp<float>,
+ DilateRowVec32f> >(ksize, anchor);
if( depth == CV_64F )
- return Ptr<BaseRowFilter>(new MorphRowFilter<MaxOp<double>,
- DilateRowVec64f>(ksize, anchor));
+ return makePtr<MorphRowFilter<MaxOp<double>,
+ DilateRowVec64f> >(ksize, anchor);
}
CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type));
- return Ptr<BaseRowFilter>(0);
+ return Ptr<BaseRowFilter>();
}
cv::Ptr<cv::BaseColumnFilter> cv::getMorphologyColumnFilter(int op, int type, int ksize, int anchor)
if( op == MORPH_ERODE )
{
if( depth == CV_8U )
- return Ptr<BaseColumnFilter>(new MorphColumnFilter<MinOp<uchar>,
- ErodeColumnVec8u>(ksize, anchor));
+ return makePtr<MorphColumnFilter<MinOp<uchar>,
+ ErodeColumnVec8u> >(ksize, anchor);
if( depth == CV_16U )
- return Ptr<BaseColumnFilter>(new MorphColumnFilter<MinOp<ushort>,
- ErodeColumnVec16u>(ksize, anchor));
+ return makePtr<MorphColumnFilter<MinOp<ushort>,
+ ErodeColumnVec16u> >(ksize, anchor);
if( depth == CV_16S )
- return Ptr<BaseColumnFilter>(new MorphColumnFilter<MinOp<short>,
- ErodeColumnVec16s>(ksize, anchor));
+ return makePtr<MorphColumnFilter<MinOp<short>,
+ ErodeColumnVec16s> >(ksize, anchor);
if( depth == CV_32F )
- return Ptr<BaseColumnFilter>(new MorphColumnFilter<MinOp<float>,
- ErodeColumnVec32f>(ksize, anchor));
+ return makePtr<MorphColumnFilter<MinOp<float>,
+ ErodeColumnVec32f> >(ksize, anchor);
if( depth == CV_64F )
- return Ptr<BaseColumnFilter>(new MorphColumnFilter<MinOp<double>,
- ErodeColumnVec64f>(ksize, anchor));
+ return makePtr<MorphColumnFilter<MinOp<double>,
+ ErodeColumnVec64f> >(ksize, anchor);
}
else
{
if( depth == CV_8U )
- return Ptr<BaseColumnFilter>(new MorphColumnFilter<MaxOp<uchar>,
- DilateColumnVec8u>(ksize, anchor));
+ return makePtr<MorphColumnFilter<MaxOp<uchar>,
+ DilateColumnVec8u> >(ksize, anchor);
if( depth == CV_16U )
- return Ptr<BaseColumnFilter>(new MorphColumnFilter<MaxOp<ushort>,
- DilateColumnVec16u>(ksize, anchor));
+ return makePtr<MorphColumnFilter<MaxOp<ushort>,
+ DilateColumnVec16u> >(ksize, anchor);
if( depth == CV_16S )
- return Ptr<BaseColumnFilter>(new MorphColumnFilter<MaxOp<short>,
- DilateColumnVec16s>(ksize, anchor));
+ return makePtr<MorphColumnFilter<MaxOp<short>,
+ DilateColumnVec16s> >(ksize, anchor);
if( depth == CV_32F )
- return Ptr<BaseColumnFilter>(new MorphColumnFilter<MaxOp<float>,
- DilateColumnVec32f>(ksize, anchor));
+ return makePtr<MorphColumnFilter<MaxOp<float>,
+ DilateColumnVec32f> >(ksize, anchor);
if( depth == CV_64F )
- return Ptr<BaseColumnFilter>(new MorphColumnFilter<MaxOp<double>,
- DilateColumnVec64f>(ksize, anchor));
+ return makePtr<MorphColumnFilter<MaxOp<double>,
+ DilateColumnVec64f> >(ksize, anchor);
}
CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type));
- return Ptr<BaseColumnFilter>(0);
+ return Ptr<BaseColumnFilter>();
}
if( op == MORPH_ERODE )
{
if( depth == CV_8U )
- return Ptr<BaseFilter>(new MorphFilter<MinOp<uchar>, ErodeVec8u>(kernel, anchor));
+ return makePtr<MorphFilter<MinOp<uchar>, ErodeVec8u> >(kernel, anchor);
if( depth == CV_16U )
- return Ptr<BaseFilter>(new MorphFilter<MinOp<ushort>, ErodeVec16u>(kernel, anchor));
+ return makePtr<MorphFilter<MinOp<ushort>, ErodeVec16u> >(kernel, anchor);
if( depth == CV_16S )
- return Ptr<BaseFilter>(new MorphFilter<MinOp<short>, ErodeVec16s>(kernel, anchor));
+ return makePtr<MorphFilter<MinOp<short>, ErodeVec16s> >(kernel, anchor);
if( depth == CV_32F )
- return Ptr<BaseFilter>(new MorphFilter<MinOp<float>, ErodeVec32f>(kernel, anchor));
+ return makePtr<MorphFilter<MinOp<float>, ErodeVec32f> >(kernel, anchor);
if( depth == CV_64F )
- return Ptr<BaseFilter>(new MorphFilter<MinOp<double>, ErodeVec64f>(kernel, anchor));
+ return makePtr<MorphFilter<MinOp<double>, ErodeVec64f> >(kernel, anchor);
}
else
{
if( depth == CV_8U )
- return Ptr<BaseFilter>(new MorphFilter<MaxOp<uchar>, DilateVec8u>(kernel, anchor));
+ return makePtr<MorphFilter<MaxOp<uchar>, DilateVec8u> >(kernel, anchor);
if( depth == CV_16U )
- return Ptr<BaseFilter>(new MorphFilter<MaxOp<ushort>, DilateVec16u>(kernel, anchor));
+ return makePtr<MorphFilter<MaxOp<ushort>, DilateVec16u> >(kernel, anchor);
if( depth == CV_16S )
- return Ptr<BaseFilter>(new MorphFilter<MaxOp<short>, DilateVec16s>(kernel, anchor));
+ return makePtr<MorphFilter<MaxOp<short>, DilateVec16s> >(kernel, anchor);
if( depth == CV_32F )
- return Ptr<BaseFilter>(new MorphFilter<MaxOp<float>, DilateVec32f>(kernel, anchor));
+ return makePtr<MorphFilter<MaxOp<float>, DilateVec32f> >(kernel, anchor);
if( depth == CV_64F )
- return Ptr<BaseFilter>(new MorphFilter<MaxOp<double>, DilateVec64f>(kernel, anchor));
+ return makePtr<MorphFilter<MaxOp<double>, DilateVec64f> >(kernel, anchor);
}
CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type));
- return Ptr<BaseFilter>(0);
+ return Ptr<BaseFilter>();
}
depth == CV_32F ? (double)-FLT_MAX : -DBL_MAX);
}
- return Ptr<FilterEngine>(new FilterEngine(filter2D, rowFilter, columnFilter,
- type, type, type, _rowBorderType, _columnBorderType, borderValue ));
+ return makePtr<FilterEngine>(filter2D, rowFilter, columnFilter,
+ type, type, type, _rowBorderType, _columnBorderType, borderValue );
}
anchor = ksize/2;
if( sdepth == CV_8U && ddepth == CV_32S )
- return Ptr<BaseRowFilter>(new RowSum<uchar, int>(ksize, anchor));
+ return makePtr<RowSum<uchar, int> >(ksize, anchor);
if( sdepth == CV_8U && ddepth == CV_64F )
- return Ptr<BaseRowFilter>(new RowSum<uchar, double>(ksize, anchor));
+ return makePtr<RowSum<uchar, double> >(ksize, anchor);
if( sdepth == CV_16U && ddepth == CV_32S )
- return Ptr<BaseRowFilter>(new RowSum<ushort, int>(ksize, anchor));
+ return makePtr<RowSum<ushort, int> >(ksize, anchor);
if( sdepth == CV_16U && ddepth == CV_64F )
- return Ptr<BaseRowFilter>(new RowSum<ushort, double>(ksize, anchor));
+ return makePtr<RowSum<ushort, double> >(ksize, anchor);
if( sdepth == CV_16S && ddepth == CV_32S )
- return Ptr<BaseRowFilter>(new RowSum<short, int>(ksize, anchor));
+ return makePtr<RowSum<short, int> >(ksize, anchor);
if( sdepth == CV_32S && ddepth == CV_32S )
- return Ptr<BaseRowFilter>(new RowSum<int, int>(ksize, anchor));
+ return makePtr<RowSum<int, int> >(ksize, anchor);
if( sdepth == CV_16S && ddepth == CV_64F )
- return Ptr<BaseRowFilter>(new RowSum<short, double>(ksize, anchor));
+ return makePtr<RowSum<short, double> >(ksize, anchor);
if( sdepth == CV_32F && ddepth == CV_64F )
- return Ptr<BaseRowFilter>(new RowSum<float, double>(ksize, anchor));
+ return makePtr<RowSum<float, double> >(ksize, anchor);
if( sdepth == CV_64F && ddepth == CV_64F )
- return Ptr<BaseRowFilter>(new RowSum<double, double>(ksize, anchor));
+ return makePtr<RowSum<double, double> >(ksize, anchor);
CV_Error_( CV_StsNotImplemented,
("Unsupported combination of source format (=%d), and buffer format (=%d)",
srcType, sumType));
- return Ptr<BaseRowFilter>(0);
+ return Ptr<BaseRowFilter>();
}
anchor = ksize/2;
if( ddepth == CV_8U && sdepth == CV_32S )
- return Ptr<BaseColumnFilter>(new ColumnSum<int, uchar>(ksize, anchor, scale));
+ return makePtr<ColumnSum<int, uchar> >(ksize, anchor, scale);
if( ddepth == CV_8U && sdepth == CV_64F )
- return Ptr<BaseColumnFilter>(new ColumnSum<double, uchar>(ksize, anchor, scale));
+ return makePtr<ColumnSum<double, uchar> >(ksize, anchor, scale);
if( ddepth == CV_16U && sdepth == CV_32S )
- return Ptr<BaseColumnFilter>(new ColumnSum<int, ushort>(ksize, anchor, scale));
+ return makePtr<ColumnSum<int, ushort> >(ksize, anchor, scale);
if( ddepth == CV_16U && sdepth == CV_64F )
- return Ptr<BaseColumnFilter>(new ColumnSum<double, ushort>(ksize, anchor, scale));
+ return makePtr<ColumnSum<double, ushort> >(ksize, anchor, scale);
if( ddepth == CV_16S && sdepth == CV_32S )
- return Ptr<BaseColumnFilter>(new ColumnSum<int, short>(ksize, anchor, scale));
+ return makePtr<ColumnSum<int, short> >(ksize, anchor, scale);
if( ddepth == CV_16S && sdepth == CV_64F )
- return Ptr<BaseColumnFilter>(new ColumnSum<double, short>(ksize, anchor, scale));
+ return makePtr<ColumnSum<double, short> >(ksize, anchor, scale);
if( ddepth == CV_32S && sdepth == CV_32S )
- return Ptr<BaseColumnFilter>(new ColumnSum<int, int>(ksize, anchor, scale));
+ return makePtr<ColumnSum<int, int> >(ksize, anchor, scale);
if( ddepth == CV_32F && sdepth == CV_32S )
- return Ptr<BaseColumnFilter>(new ColumnSum<int, float>(ksize, anchor, scale));
+ return makePtr<ColumnSum<int, float> >(ksize, anchor, scale);
if( ddepth == CV_32F && sdepth == CV_64F )
- return Ptr<BaseColumnFilter>(new ColumnSum<double, float>(ksize, anchor, scale));
+ return makePtr<ColumnSum<double, float> >(ksize, anchor, scale);
if( ddepth == CV_64F && sdepth == CV_32S )
- return Ptr<BaseColumnFilter>(new ColumnSum<int, double>(ksize, anchor, scale));
+ return makePtr<ColumnSum<int, double> >(ksize, anchor, scale);
if( ddepth == CV_64F && sdepth == CV_64F )
- return Ptr<BaseColumnFilter>(new ColumnSum<double, double>(ksize, anchor, scale));
+ return makePtr<ColumnSum<double, double> >(ksize, anchor, scale);
CV_Error_( CV_StsNotImplemented,
("Unsupported combination of sum format (=%d), and destination format (=%d)",
sumType, dstType));
- return Ptr<BaseColumnFilter>(0);
+ return Ptr<BaseColumnFilter>();
}
Ptr<BaseColumnFilter> columnFilter = getColumnSumFilter(sumType,
dstType, ksize.height, anchor.y, normalize ? 1./(ksize.width*ksize.height) : 1);
- return Ptr<FilterEngine>(new FilterEngine(Ptr<BaseFilter>(0), rowFilter, columnFilter,
- srcType, dstType, sumType, borderType ));
+ return makePtr<FilterEngine>(Ptr<BaseFilter>(), rowFilter, columnFilter,
+ srcType, dstType, sumType, borderType );
}
void CV_PerimeterAreaSliceTest::run( int )
{
- Ptr<CvMemStorage> storage = cvCreateMemStorage();
+ Ptr<CvMemStorage> storage(cvCreateMemStorage());
RNG& rng = theRNG();
const double min_r = 90, max_r = 120;
namespace cv
{
-class CV_EXPORTS_AS(FeatureDetector) javaFeatureDetector : public FeatureDetector
+class CV_EXPORTS_AS(FeatureDetector) javaFeatureDetector
{
public:
-#if 0
- //DO NOT REMOVE! The block is required for sources parser
- CV_WRAP void detect( const Mat& image, CV_OUT std::vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const;
- CV_WRAP void detect( const std::vector<Mat>& images, CV_OUT std::vector<std::vector<KeyPoint> >& keypoints, const std::vector<Mat>& masks=std::vector<Mat>() ) const;
- CV_WRAP virtual bool empty() const;
-#endif
+ CV_WRAP void detect( const Mat& image, CV_OUT std::vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const
+ { return wrapped->detect(image, keypoints, mask); }
+
+ CV_WRAP void detect( const std::vector<Mat>& images, CV_OUT std::vector<std::vector<KeyPoint> >& keypoints, const std::vector<Mat>& masks=std::vector<Mat>() ) const
+ { return wrapped->detect(images, keypoints, masks); }
+
+ CV_WRAP bool empty() const
+ { return wrapped->empty(); }
enum
{
break;
}
- Ptr<FeatureDetector> detector = FeatureDetector::create(name);
- detector.addref();
- return (javaFeatureDetector*)((FeatureDetector*) detector);
+ return new javaFeatureDetector(FeatureDetector::create(name));
}
CV_WRAP void write( const String& fileName ) const
{
FileStorage fs(fileName, FileStorage::WRITE);
- ((FeatureDetector*)this)->write(fs);
- fs.release();
+ wrapped->write(fs);
}
CV_WRAP void read( const String& fileName )
{
FileStorage fs(fileName, FileStorage::READ);
- ((FeatureDetector*)this)->read(fs.root());
- fs.release();
+ wrapped->read(fs.root());
}
+
+private:
+ javaFeatureDetector(Ptr<FeatureDetector> _wrapped) : wrapped(_wrapped)
+ {}
+
+ Ptr<FeatureDetector> wrapped;
};
-class CV_EXPORTS_AS(DescriptorMatcher) javaDescriptorMatcher : public DescriptorMatcher
+class CV_EXPORTS_AS(DescriptorMatcher) javaDescriptorMatcher
{
public:
-#if 0
- //DO NOT REMOVE! The block is required for sources parser
- CV_WRAP virtual bool isMaskSupported() const;
- CV_WRAP virtual void add( const std::vector<Mat>& descriptors );
- CV_WRAP const std::vector<Mat>& getTrainDescriptors() const;
- CV_WRAP virtual void clear();
- CV_WRAP virtual bool empty() const;
- CV_WRAP virtual void train();
+ CV_WRAP bool isMaskSupported() const
+ { return wrapped->isMaskSupported(); }
+
+ CV_WRAP void add( const std::vector<Mat>& descriptors )
+ { return wrapped->add(descriptors); }
+
+ CV_WRAP const std::vector<Mat>& getTrainDescriptors() const
+ { return wrapped->getTrainDescriptors(); }
+
+ CV_WRAP void clear()
+ { return wrapped->clear(); }
+
+ CV_WRAP bool empty() const
+ { return wrapped->empty(); }
+
+ CV_WRAP void train()
+ { return wrapped->train(); }
+
CV_WRAP void match( const Mat& queryDescriptors, const Mat& trainDescriptors,
- CV_OUT std::vector<DMatch>& matches, const Mat& mask=Mat() ) const;
+ CV_OUT std::vector<DMatch>& matches, const Mat& mask=Mat() ) const
+ { return wrapped->match(queryDescriptors, trainDescriptors, matches, mask); }
+
CV_WRAP void knnMatch( const Mat& queryDescriptors, const Mat& trainDescriptors,
CV_OUT std::vector<std::vector<DMatch> >& matches, int k,
- const Mat& mask=Mat(), bool compactResult=false ) const;
+ const Mat& mask=Mat(), bool compactResult=false ) const
+ { return wrapped->knnMatch(queryDescriptors, trainDescriptors, matches, k, mask, compactResult); }
+
CV_WRAP void radiusMatch( const Mat& queryDescriptors, const Mat& trainDescriptors,
CV_OUT std::vector<std::vector<DMatch> >& matches, float maxDistance,
- const Mat& mask=Mat(), bool compactResult=false ) const;
+ const Mat& mask=Mat(), bool compactResult=false ) const
+ { return wrapped->radiusMatch(queryDescriptors, trainDescriptors, matches, maxDistance, mask, compactResult); }
+
CV_WRAP void match( const Mat& queryDescriptors, CV_OUT std::vector<DMatch>& matches,
- const std::vector<Mat>& masks=std::vector<Mat>() );
+ const std::vector<Mat>& masks=std::vector<Mat>() )
+ { return wrapped->match(queryDescriptors, matches, masks); }
+
CV_WRAP void knnMatch( const Mat& queryDescriptors, CV_OUT std::vector<std::vector<DMatch> >& matches, int k,
- const std::vector<Mat>& masks=std::vector<Mat>(), bool compactResult=false );
+ const std::vector<Mat>& masks=std::vector<Mat>(), bool compactResult=false )
+ { return wrapped->knnMatch(queryDescriptors, matches, k, masks, compactResult); }
+
CV_WRAP void radiusMatch( const Mat& queryDescriptors, CV_OUT std::vector<std::vector<DMatch> >& matches, float maxDistance,
- const std::vector<Mat>& masks=std::vector<Mat>(), bool compactResult=false );
-#endif
+ const std::vector<Mat>& masks=std::vector<Mat>(), bool compactResult=false )
+ { return wrapped->radiusMatch(queryDescriptors, matches, maxDistance, masks, compactResult); }
enum
{
CV_WRAP_AS(clone) javaDescriptorMatcher* jclone( bool emptyTrainData=false ) const
{
- Ptr<DescriptorMatcher> matcher = this->clone(emptyTrainData);
- matcher.addref();
- return (javaDescriptorMatcher*)((DescriptorMatcher*) matcher);
+ return new javaDescriptorMatcher(wrapped->clone(emptyTrainData));
}
//supported: FlannBased, BruteForce, BruteForce-L1, BruteForce-Hamming, BruteForce-HammingLUT
break;
}
- Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(name);
- matcher.addref();
- return (javaDescriptorMatcher*)((DescriptorMatcher*) matcher);
+ return new javaDescriptorMatcher(DescriptorMatcher::create(name));
}
CV_WRAP void write( const String& fileName ) const
{
FileStorage fs(fileName, FileStorage::WRITE);
- ((DescriptorMatcher*)this)->write(fs);
- fs.release();
+ wrapped->write(fs);
}
CV_WRAP void read( const String& fileName )
{
FileStorage fs(fileName, FileStorage::READ);
- ((DescriptorMatcher*)this)->read(fs.root());
- fs.release();
+ wrapped->read(fs.root());
}
+
+private:
+ javaDescriptorMatcher(Ptr<DescriptorMatcher> _wrapped) : wrapped(_wrapped)
+ {}
+
+ Ptr<DescriptorMatcher> wrapped;
};
-class CV_EXPORTS_AS(DescriptorExtractor) javaDescriptorExtractor : public DescriptorExtractor
+class CV_EXPORTS_AS(DescriptorExtractor) javaDescriptorExtractor
{
public:
-#if 0
- //DO NOT REMOVE! The block is required for sources parser
- CV_WRAP void compute( const Mat& image, CV_IN_OUT std::vector<KeyPoint>& keypoints, Mat& descriptors ) const;
- CV_WRAP void compute( const std::vector<Mat>& images, CV_IN_OUT std::vector<std::vector<KeyPoint> >& keypoints, CV_OUT std::vector<Mat>& descriptors ) const;
- CV_WRAP virtual int descriptorSize() const;
- CV_WRAP virtual int descriptorType() const;
+ CV_WRAP void compute( const Mat& image, CV_IN_OUT std::vector<KeyPoint>& keypoints, Mat& descriptors ) const
+ { return wrapped->compute(image, keypoints, descriptors); }
- CV_WRAP virtual bool empty() const;
-#endif
+ CV_WRAP void compute( const std::vector<Mat>& images, CV_IN_OUT std::vector<std::vector<KeyPoint> >& keypoints, CV_OUT std::vector<Mat>& descriptors ) const
+ { return wrapped->compute(images, keypoints, descriptors); }
+
+ CV_WRAP int descriptorSize() const
+ { return wrapped->descriptorSize(); }
+
+ CV_WRAP int descriptorType() const
+ { return wrapped->descriptorType(); }
+
+ CV_WRAP bool empty() const
+ { return wrapped->empty(); }
enum
{
break;
}
- Ptr<DescriptorExtractor> extractor = DescriptorExtractor::create(name);
- extractor.addref();
- return (javaDescriptorExtractor*)((DescriptorExtractor*) extractor);
+ return new javaDescriptorExtractor(DescriptorExtractor::create(name));
}
CV_WRAP void write( const String& fileName ) const
{
FileStorage fs(fileName, FileStorage::WRITE);
- ((DescriptorExtractor*)this)->write(fs);
- fs.release();
+ wrapped->write(fs);
}
CV_WRAP void read( const String& fileName )
{
FileStorage fs(fileName, FileStorage::READ);
- ((DescriptorExtractor*)this)->read(fs.root());
- fs.release();
+ wrapped->read(fs.root());
}
+
+private:
+ javaDescriptorExtractor(Ptr<DescriptorExtractor> _wrapped) : wrapped(_wrapped)
+ {}
+
+ Ptr<DescriptorExtractor> wrapped;
};
-class CV_EXPORTS_AS(GenericDescriptorMatcher) javaGenericDescriptorMatcher : public GenericDescriptorMatcher
+class CV_EXPORTS_AS(GenericDescriptorMatcher) javaGenericDescriptorMatcher
{
public:
-#if 0
- //DO NOT REMOVE! The block is required for sources parser
- CV_WRAP virtual void add( const std::vector<Mat>& images,
- std::vector<std::vector<KeyPoint> >& keypoints );
- CV_WRAP const std::vector<Mat>& getTrainImages() const;
- CV_WRAP const std::vector<std::vector<KeyPoint> >& getTrainKeypoints() const;
- CV_WRAP virtual void clear();
- CV_WRAP virtual bool isMaskSupported();
- CV_WRAP virtual void train();
+ CV_WRAP void add( const std::vector<Mat>& images,
+ std::vector<std::vector<KeyPoint> >& keypoints )
+ { return wrapped->add(images, keypoints); }
+
+ CV_WRAP const std::vector<Mat>& getTrainImages() const
+ { return wrapped->getTrainImages(); }
+
+ CV_WRAP const std::vector<std::vector<KeyPoint> >& getTrainKeypoints() const
+ { return wrapped->getTrainKeypoints(); }
+
+ CV_WRAP void clear()
+ { return wrapped->clear(); }
+
+ CV_WRAP bool isMaskSupported()
+ { return wrapped->isMaskSupported(); }
+
+ CV_WRAP void train()
+ { return wrapped->train(); }
+
CV_WRAP void classify( const Mat& queryImage, CV_IN_OUT std::vector<KeyPoint>& queryKeypoints,
- const Mat& trainImage, std::vector<KeyPoint>& trainKeypoints ) const;
- CV_WRAP void classify( const Mat& queryImage, CV_IN_OUT std::vector<KeyPoint>& queryKeypoints );
+ const Mat& trainImage, std::vector<KeyPoint>& trainKeypoints ) const
+ { return wrapped->classify(queryImage, queryKeypoints, trainImage, trainKeypoints); }
+
+ CV_WRAP void classify( const Mat& queryImage, CV_IN_OUT std::vector<KeyPoint>& queryKeypoints )
+ { return wrapped->classify(queryImage, queryKeypoints); }
+
CV_WRAP void match( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
const Mat& trainImage, std::vector<KeyPoint>& trainKeypoints,
- CV_OUT std::vector<DMatch>& matches, const Mat& mask=Mat() ) const;
+ CV_OUT std::vector<DMatch>& matches, const Mat& mask=Mat() ) const
+ { return wrapped->match(queryImage, queryKeypoints, trainImage, trainKeypoints, matches, mask); }
+
CV_WRAP void knnMatch( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
const Mat& trainImage, std::vector<KeyPoint>& trainKeypoints,
CV_OUT std::vector<std::vector<DMatch> >& matches, int k,
- const Mat& mask=Mat(), bool compactResult=false ) const;
+ const Mat& mask=Mat(), bool compactResult=false ) const
+ { return wrapped->knnMatch(queryImage, queryKeypoints, trainImage, trainKeypoints,
+ matches, k, mask, compactResult); }
+
CV_WRAP void radiusMatch( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
const Mat& trainImage, std::vector<KeyPoint>& trainKeypoints,
CV_OUT std::vector<std::vector<DMatch> >& matches, float maxDistance,
- const Mat& mask=Mat(), bool compactResult=false ) const;
+ const Mat& mask=Mat(), bool compactResult=false ) const
+ { return wrapped->radiusMatch(queryImage, queryKeypoints, trainImage, trainKeypoints,
+ matches, maxDistance, mask, compactResult); }
+
CV_WRAP void match( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
- CV_OUT std::vector<DMatch>& matches, const std::vector<Mat>& masks=std::vector<Mat>() );
+ CV_OUT std::vector<DMatch>& matches, const std::vector<Mat>& masks=std::vector<Mat>() )
+ { return wrapped->match(queryImage, queryKeypoints, matches, masks); }
+
CV_WRAP void knnMatch( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
CV_OUT std::vector<std::vector<DMatch> >& matches, int k,
- const std::vector<Mat>& masks=std::vector<Mat>(), bool compactResult=false );
+ const std::vector<Mat>& masks=std::vector<Mat>(), bool compactResult=false )
+ { return wrapped->knnMatch(queryImage, queryKeypoints, matches, k, masks, compactResult); }
+
CV_WRAP void radiusMatch( const Mat& queryImage, std::vector<KeyPoint>& queryKeypoints,
CV_OUT std::vector<std::vector<DMatch> >& matches, float maxDistance,
- const std::vector<Mat>& masks=std::vector<Mat>(), bool compactResult=false );
- CV_WRAP virtual bool empty() const;
-#endif
+ const std::vector<Mat>& masks=std::vector<Mat>(), bool compactResult=false )
+ { return wrapped->radiusMatch(queryImage, queryKeypoints, matches, maxDistance, masks, compactResult); }
+
+ CV_WRAP bool empty() const
+ { return wrapped->empty(); }
+
enum
{
CV_WRAP_AS(clone) javaGenericDescriptorMatcher* jclone( bool emptyTrainData=false ) const
{
- Ptr<GenericDescriptorMatcher> matcher = this->clone(emptyTrainData);
- matcher.addref();
- return (javaGenericDescriptorMatcher*)((GenericDescriptorMatcher*) matcher);
+ return new javaGenericDescriptorMatcher(wrapped->clone(emptyTrainData));
}
//supported: OneWay, Fern
break;
}
- Ptr<GenericDescriptorMatcher> matcher = GenericDescriptorMatcher::create(name);
- matcher.addref();
- return (javaGenericDescriptorMatcher*)((GenericDescriptorMatcher*) matcher);
+ return new javaGenericDescriptorMatcher(GenericDescriptorMatcher::create(name));
}
CV_WRAP void write( const String& fileName ) const
{
FileStorage fs(fileName, FileStorage::WRITE);
- ((GenericDescriptorMatcher*)this)->write(fs);
- fs.release();
+ wrapped->write(fs);
}
CV_WRAP void read( const String& fileName )
{
FileStorage fs(fileName, FileStorage::READ);
- ((GenericDescriptorMatcher*)this)->read(fs.root());
- fs.release();
+ wrapped->read(fs.root());
}
+
+private:
+ javaGenericDescriptorMatcher(Ptr<GenericDescriptorMatcher> _wrapped) : wrapped(_wrapped)
+ {}
+
+ Ptr<GenericDescriptorMatcher> wrapped;
};
#if 0
void CvEM::write( CvFileStorage* _fs, const char* name ) const
{
- FileStorage fs = _fs;
+ FileStorage fs(_fs, false);
if(name)
fs << name << "{";
emObj.write(fs);
if(name)
fs << "}";
- fs.fs.obj = 0;
}
double CvEM::calcLikelihood( const Mat &input_sample ) const
Mat descr;
Ptr<Feature2D> surf = Algorithm::create<Feature2D>("Feature2D.SURF");
- if( surf.empty() )
+ if( !surf )
CV_Error(CV_StsNotImplemented, "OpenCV was built without SURF support");
surf->set("hessianThreshold", params.hessianThreshold);
cvGetStarKeypoints( const CvArr* _img, CvMemStorage* storage,
CvStarDetectorParams params )
{
- Ptr<StarDetector> star = new StarDetector(params.maxSize, params.responseThreshold,
- params.lineThresholdProjected,
- params.lineThresholdBinarized,
- params.suppressNonmaxSize);
+ Ptr<StarDetector> star(new StarDetector(params.maxSize, params.responseThreshold,
+ params.lineThresholdProjected,
+ params.lineThresholdBinarized,
+ params.suppressNonmaxSize));
std::vector<KeyPoint> kpts;
star->detect(cvarrToMat(_img), kpts, Mat());
CV_Error(CV_StsUnsupportedFormat, "dist must be CV_64FC1");
if (CV_MAT_TYPE(type()) != CV_MAT_TYPE(desc->type)) {
- tmp_desc = cvCreateMat(desc->rows, desc->cols, type());
+ tmp_desc.reset(cvCreateMat(desc->rows, desc->cols, type()));
cvConvert(desc, tmp_desc);
desc = tmp_desc;
}
{
std::vector<KeyPoint> features;
Ptr<FeatureDetector> surf_extractor = FeatureDetector::create("SURF");
- if( surf_extractor.empty() )
+ if( !surf_extractor )
CV_Error(CV_StsNotImplemented, "OpenCV was built without SURF support");
surf_extractor->set("hessianThreshold", 1.0);
//printf("Extracting SURF features...");
{
clear();
- if( _base.empty() )
+ if( !_base )
base = _base;
params = _params;
GenericDescriptorMatcher::clear();
prevTrainCount = 0;
- if( !base.empty() )
+ if( base )
base->clear();
}
void OneWayDescriptorMatcher::train()
{
- if( base.empty() || prevTrainCount < (int)trainPointCollection.keypointCount() )
+ if( !base || prevTrainCount < (int)trainPointCollection.keypointCount() )
{
- base = new OneWayDescriptorObject( params.patchSize, params.poseCount, params.pcaFilename,
- params.trainPath, params.trainImagesList, params.minScale, params.maxScale, params.stepScale );
+ base.reset(
+ new OneWayDescriptorObject( params.patchSize, params.poseCount, params.pcaFilename,
+ params.trainPath, params.trainImagesList, params.minScale, params.maxScale, params.stepScale ));
base->Allocate( (int)trainPointCollection.keypointCount() );
prevTrainCount = (int)trainPointCollection.keypointCount();
void OneWayDescriptorMatcher::read( const FileNode &fn )
{
- base = new OneWayDescriptorObject( params.patchSize, params.poseCount, String (), String (), String (),
- params.minScale, params.maxScale, params.stepScale );
+ base.reset(
+ new OneWayDescriptorObject( params.patchSize, params.poseCount, String (), String (), String (),
+ params.minScale, params.maxScale, params.stepScale ));
base->Read (fn);
}
bool OneWayDescriptorMatcher::empty() const
{
- return base.empty() || base->empty();
+ return !base || base->empty();
}
Ptr<GenericDescriptorMatcher> OneWayDescriptorMatcher::clone( bool emptyTrainData ) const
{
- OneWayDescriptorMatcher* matcher = new OneWayDescriptorMatcher( params );
+ Ptr<OneWayDescriptorMatcher> matcher = makePtr<OneWayDescriptorMatcher>( params );
if( !emptyTrainData )
{
params = _params;
if( !params.filename.empty() )
{
- classifier = new FernClassifier;
+ classifier = makePtr<FernClassifier>();
FileStorage fs(params.filename, FileStorage::READ);
if( fs.isOpened() )
classifier->read( fs.getFirstTopLevelNode() );
void FernDescriptorMatcher::train()
{
- if( classifier.empty() || prevTrainCount < (int)trainPointCollection.keypointCount() )
+ if( !classifier || prevTrainCount < (int)trainPointCollection.keypointCount() )
{
assert( params.filename.empty() );
for( size_t imgIdx = 0; imgIdx < trainPointCollection.imageCount(); imgIdx++ )
KeyPoint::convert( trainPointCollection.getKeypoints((int)imgIdx), points[imgIdx] );
- classifier = new FernClassifier( points, trainPointCollection.getImages(), std::vector<std::vector<int> >(), 0, // each points is a class
- params.patchSize, params.signatureSize, params.nstructs, params.structSize,
- params.nviews, params.compressionMethod, params.patchGenerator );
+ classifier.reset(
+ new FernClassifier( points, trainPointCollection.getImages(), std::vector<std::vector<int> >(), 0, // each points is a class
+ params.patchSize, params.signatureSize, params.nstructs, params.structSize,
+ params.nviews, params.compressionMethod, params.patchGenerator ));
}
}
bool FernDescriptorMatcher::empty() const
{
- return classifier.empty() || classifier->empty();
+ return !classifier || classifier->empty();
}
Ptr<GenericDescriptorMatcher> FernDescriptorMatcher::clone( bool emptyTrainData ) const
{
- FernDescriptorMatcher* matcher = new FernDescriptorMatcher( params );
+ Ptr<FernDescriptorMatcher> matcher = makePtr<FernDescriptorMatcher>( params );
if( !emptyTrainData )
{
CV_Error( CV_StsNotImplemented, "deep clone dunctionality is not implemented, because "
typedef CvGBTreesParams GradientBoostingTreeParams;
typedef CvGBTrees GradientBoostingTrees;
-template<> CV_EXPORTS void Ptr<CvDTreeSplit>::delete_obj();
+template<> CV_EXPORTS void DefaultDeleter<CvDTreeSplit>::operator ()(CvDTreeSplit* obj) const;
CV_EXPORTS bool initModule_ml(void);
}
bool initModule_ml(void)
{
- Ptr<Algorithm> em = createEM_hidden();
+ Ptr<Algorithm> em = createEM_ptr_hidden();
return em->info() != 0;
}
}
if( res && bestSplit->quality < split->quality )
- memcpy( (CvDTreeSplit*)bestSplit, (CvDTreeSplit*)split, splitSize );
+ memcpy( bestSplit.get(), split.get(), splitSize );
}
}
}
namespace cv
{
-template<> CV_EXPORTS void Ptr<CvDTreeSplit>::delete_obj()
+template<> CV_EXPORTS void DefaultDeleter<CvDTreeSplit>::operator ()(CvDTreeSplit* obj) const
{
fastFree(obj);
}
node = _node;
splitSize = tree->get_data()->split_heap->elem_size;
- bestSplit = (CvDTreeSplit*)fastMalloc(splitSize);
- memset((CvDTreeSplit*)bestSplit, 0, splitSize);
+ bestSplit.reset((CvDTreeSplit*)fastMalloc(splitSize));
+ memset(bestSplit.get(), 0, splitSize);
bestSplit->quality = -1;
bestSplit->condensed_idx = INT_MIN;
- split = (CvDTreeSplit*)fastMalloc(splitSize);
- memset((CvDTreeSplit*)split, 0, splitSize);
+ split.reset((CvDTreeSplit*)fastMalloc(splitSize));
+ memset(split.get(), 0, splitSize);
//haveSplit = false;
}
node = finder.node;
splitSize = tree->get_data()->split_heap->elem_size;
- bestSplit = (CvDTreeSplit*)fastMalloc(splitSize);
- memcpy((CvDTreeSplit*)(bestSplit), (const CvDTreeSplit*)finder.bestSplit, splitSize);
- split = (CvDTreeSplit*)fastMalloc(splitSize);
- memset((CvDTreeSplit*)split, 0, splitSize);
+ bestSplit.reset((CvDTreeSplit*)fastMalloc(splitSize));
+ memcpy(bestSplit.get(), finder.bestSplit.get(), splitSize);
+ split.reset((CvDTreeSplit*)fastMalloc(splitSize));
+ memset(split.get(), 0, splitSize);
}
void DTreeBestSplitFinder::operator()(const BlockedRange& range)
}
if( res && bestSplit->quality < split->quality )
- memcpy( (CvDTreeSplit*)bestSplit, (CvDTreeSplit*)split, splitSize );
+ memcpy( bestSplit.get(), split.get(), splitSize );
}
}
void DTreeBestSplitFinder::join( DTreeBestSplitFinder& rhs )
{
if( bestSplit->quality < rhs.bestSplit->quality )
- memcpy( (CvDTreeSplit*)bestSplit, (CvDTreeSplit*)rhs.bestSplit, splitSize );
+ memcpy( bestSplit.get(), rhs.bestSplit.get(), splitSize );
}
}
bool initModule_nonfree(void)
{
- Ptr<Algorithm> sift = createSIFT_hidden(), surf = createSURF_hidden();
+ Ptr<Algorithm> sift = createSIFT_ptr_hidden(), surf = createSURF_ptr_hidden();
return sift->info() != 0 && surf->info() != 0;
}
void CV_FeatureDetectorTest::run( int /*start_from*/ )
{
- if( fdetector.empty() )
+ if( !fdetector )
{
ts->printf( cvtest::TS::LOG, "Feature detector is empty.\n" );
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
void run(int)
{
createDescriptorExtractor();
- if( dextractor.empty() )
+ if( !dextractor )
{
ts->printf(cvtest::TS::LOG, "Descriptor extractor is empty.\n");
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA );
void run(int)
{
Ptr<Feature2D> f = Algorithm::create<Feature2D>("Feature2D." + fname);
- if(f.empty())
+ if(!f)
return;
string path = string(ts->get_data_path()) + "detectors_descriptors_evaluation/planar/";
string imgname1 = path + "box.png";
FeatureDetectorUsingMaskTest(const Ptr<FeatureDetector>& featureDetector) :
featureDetector_(featureDetector)
{
- CV_Assert(!featureDetector_.empty());
+ CV_Assert(featureDetector_);
}
protected:
virtual void run(int)
{
cv::initModule_features2d();
- CV_Assert(!detector.empty());
+ CV_Assert(detector);
string imgFilename = string(ts->get_data_path()) + FEATURES2D_DIR + "/" + IMAGE_FILENAME;
// Read the test image.
minKeyPointMatchesRatio(_minKeyPointMatchesRatio),
minAngleInliersRatio(_minAngleInliersRatio)
{
- CV_Assert(!featureDetector.empty());
+ CV_Assert(featureDetector);
}
protected:
normType(_normType),
minDescInliersRatio(_minDescInliersRatio)
{
- CV_Assert(!featureDetector.empty());
- CV_Assert(!descriptorExtractor.empty());
+ CV_Assert(featureDetector);
+ CV_Assert(descriptorExtractor);
}
protected:
minKeyPointMatchesRatio(_minKeyPointMatchesRatio),
minScaleInliersRatio(_minScaleInliersRatio)
{
- CV_Assert(!featureDetector.empty());
+ CV_Assert(featureDetector);
}
protected:
normType(_normType),
minDescInliersRatio(_minDescInliersRatio)
{
- CV_Assert(!featureDetector.empty());
- CV_Assert(!descriptorExtractor.empty());
+ CV_Assert(featureDetector);
+ CV_Assert(descriptorExtractor);
}
protected:
static Ptr<FeatureEvaluator> create(int type);
};
-template<> CV_EXPORTS void Ptr<CvHaarClassifierCascade>::delete_obj();
+template<> CV_EXPORTS void DefaultDeleter<CvHaarClassifierCascade>::operator ()(CvHaarClassifierCascade* obj) const;
enum { CASCADE_DO_CANNY_PRUNING = 1,
CASCADE_SCALE_IMAGE = 2,
\param nonMaxSuppression Whenever non-maximum suppression is done over the branch probabilities
\param minProbability The minimum probability difference between local maxima and local minima ERs
*/
-CV_EXPORTS Ptr<ERFilter> createERFilterNM1(const Ptr<ERFilter::Callback>& cb = NULL,
+CV_EXPORTS Ptr<ERFilter> createERFilterNM1(const Ptr<ERFilter::Callback>& cb = Ptr<ERFilter::Callback>(),
int thresholdDelta = 1, float minArea = 0.000025,
float maxArea = 0.13, float minProbability = 0.2,
bool nonMaxSuppression = true,
if omitted tries to load a default classifier from file trained_classifierNM2.xml
\param minProbability The minimum probability P(er|character) allowed for retreived ER's
*/
-CV_EXPORTS Ptr<ERFilter> createERFilterNM2(const Ptr<ERFilter::Callback>& cb = NULL,
+CV_EXPORTS Ptr<ERFilter> createERFilterNM2(const Ptr<ERFilter::Callback>& cb = Ptr<ERFilter::Callback>(),
float minProbability = 0.85);
}
HaarEvaluator::HaarEvaluator()
{
- features = new std::vector<Feature>();
+ features = makePtr<std::vector<Feature> >();
}
HaarEvaluator::~HaarEvaluator()
{
Ptr<FeatureEvaluator> HaarEvaluator::clone() const
{
- HaarEvaluator* ret = new HaarEvaluator;
+ Ptr<HaarEvaluator> ret = makePtr<HaarEvaluator>();
ret->origWinSize = origWinSize;
ret->features = features;
ret->featuresPtr = &(*ret->features)[0];
LBPEvaluator::LBPEvaluator()
{
- features = new std::vector<Feature>();
+ features = makePtr<std::vector<Feature> >();
}
LBPEvaluator::~LBPEvaluator()
{
Ptr<FeatureEvaluator> LBPEvaluator::clone() const
{
- LBPEvaluator* ret = new LBPEvaluator;
+ Ptr<LBPEvaluator> ret = makePtr<LBPEvaluator>();
ret->origWinSize = origWinSize;
ret->features = features;
ret->featuresPtr = &(*ret->features)[0];
HOGEvaluator::HOGEvaluator()
{
- features = new std::vector<Feature>();
+ features = makePtr<std::vector<Feature> >();
}
HOGEvaluator::~HOGEvaluator()
Ptr<FeatureEvaluator> HOGEvaluator::clone() const
{
- HOGEvaluator* ret = new HOGEvaluator;
+ Ptr<HOGEvaluator> ret = makePtr<HOGEvaluator>();
ret->origWinSize = origWinSize;
ret->features = features;
ret->featuresPtr = &(*ret->features)[0];
bool CascadeClassifier::empty() const
{
- return oldCascade.empty() && data.stages.empty();
+ return !oldCascade && data.stages.empty();
}
bool CascadeClassifier::load(const String& filename)
fs.release();
- oldCascade = Ptr<CvHaarClassifierCascade>((CvHaarClassifierCascade*)cvLoad(filename.c_str(), 0, 0, 0));
+ oldCascade.reset((CvHaarClassifierCascade*)cvLoad(filename.c_str(), 0, 0, 0));
return !oldCascade.empty();
}
int CascadeClassifier::runAt( Ptr<FeatureEvaluator>& evaluator, Point pt, double& weight )
{
- CV_Assert( oldCascade.empty() );
+ CV_Assert( !oldCascade );
assert( data.featureType == FeatureEvaluator::HAAR ||
data.featureType == FeatureEvaluator::LBP ||
#endif
Mat currentMask;
- if (!maskGenerator.empty()) {
+ if (maskGenerator) {
currentMask=maskGenerator->generateMask(image);
}
{
candidates.clear();
- if (!maskGenerator.empty())
+ if (maskGenerator)
maskGenerator->initializeMask(image);
if( maxObjectSize.height == 0 || maxObjectSize.width == 0 )
return featureEvaluator->read(fn);
}
-template<> void Ptr<CvHaarClassifierCascade>::delete_obj()
+template<> void DefaultDeleter<CvHaarClassifierCascade>::operator ()(CvHaarClassifierCascade* obj) const
{ cvReleaseHaarClassifierCascade(&obj); }
} // namespace cv
minProbabilityDiff = 1.;
num_accepted_regions = 0;
num_rejected_regions = 0;
- classifier = NULL;
}
// the key method. Takes image on input, vector of ERStat is output for the first stage,
CV_Assert( (thresholdDelta >= 0) && (thresholdDelta <= 128) );
CV_Assert( (minProbabilityDiff >= 0.) && (minProbabilityDiff <= 1.) );
- Ptr<ERFilterNM> filter = new ERFilterNM();
+ Ptr<ERFilterNM> filter = makePtr<ERFilterNM>();
if (cb == NULL)
- filter->setCallback(new ERClassifierNM1());
+ filter->setCallback(makePtr<ERClassifierNM1>());
else
filter->setCallback(cb);
CV_Assert( (minProbability >= 0.) && (minProbability <= 1.) );
- Ptr<ERFilterNM> filter = new ERFilterNM();
+ Ptr<ERFilterNM> filter = makePtr<ERFilterNM>();
if (cb == NULL)
- filter->setCallback(new ERClassifierNM2());
+ filter->setCallback(makePtr<ERClassifierNM2>());
else
filter->setCallback(cb);
maxSize.width = img->cols;
}
- temp = cvCreateMat( img->rows, img->cols, CV_8UC1 );
- sum = cvCreateMat( img->rows + 1, img->cols + 1, CV_32SC1 );
- sqsum = cvCreateMat( img->rows + 1, img->cols + 1, CV_64FC1 );
+ temp.reset(cvCreateMat( img->rows, img->cols, CV_8UC1 ));
+ sum.reset(cvCreateMat( img->rows + 1, img->cols + 1, CV_32SC1 ));
+ sqsum.reset(cvCreateMat( img->rows + 1, img->cols + 1, CV_64FC1 ));
if( !cascade->hid_cascade )
icvCreateHidHaarClassifierCascade(cascade);
if( cascade->hid_cascade->has_tilted_features )
- tilted = cvCreateMat( img->rows + 1, img->cols + 1, CV_32SC1 );
+ tilted.reset(cvCreateMat( img->rows + 1, img->cols + 1, CV_32SC1 ));
result_seq = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvAvgComp), storage );
if( use_ipp )
normImg = cvCreateMat( img->rows, img->cols, CV_32FC1 );
#endif
- imgSmall = cvCreateMat( img->rows + 1, img->cols + 1, CV_8UC1 );
+ imgSmall.reset(cvCreateMat( img->rows + 1, img->cols + 1, CV_8UC1 ));
for( factor = 1; ; factor *= scaleFactor )
{
if( doCannyPruning )
{
- sumcanny = cvCreateMat( img->rows + 1, img->cols + 1, CV_32SC1 );
+ sumcanny.reset(cvCreateMat( img->rows + 1, img->cols + 1, CV_32SC1 ));
cvCanny( img, temp, 0, 50, 3 );
cvIntegral( temp, sumcanny );
}
{
if(ptr && _fs)
{
- FileStorage fs(_fs);
- fs.fs.addref();
+ FileStorage fs(_fs, false);
((const _ClsName*)ptr)->write(fs, String(name));
}
}
Ptr<Modality> Modality::create(const String& modality_type)
{
if (modality_type == "ColorGradient")
- return new ColorGradient();
+ return makePtr<ColorGradient>();
else if (modality_type == "DepthNormal")
- return new DepthNormal();
+ return makePtr<DepthNormal>();
else
- return NULL;
+ return Ptr<Modality>();
}
Ptr<Modality> Modality::create(const FileNode& fn)
Ptr<QuantizedPyramid> ColorGradient::processImpl(const Mat& src,
const Mat& mask) const
{
- return new ColorGradientPyramid(src, mask, weak_threshold, num_features, strong_threshold);
+ return makePtr<ColorGradientPyramid>(src, mask, weak_threshold, num_features, strong_threshold);
}
void ColorGradient::read(const FileNode& fn)
Ptr<QuantizedPyramid> DepthNormal::processImpl(const Mat& src,
const Mat& mask) const
{
- return new DepthNormalPyramid(src, mask, distance_threshold, difference_threshold,
- num_features, extract_threshold);
+ return makePtr<DepthNormalPyramid>(src, mask, distance_threshold, difference_threshold,
+ num_features, extract_threshold);
}
void DepthNormal::read(const FileNode& fn)
Ptr<Detector> getDefaultLINE()
{
std::vector< Ptr<Modality> > modalities;
- modalities.push_back(new ColorGradient);
- return new Detector(modalities, std::vector<int>(T_DEFAULTS, T_DEFAULTS + 2));
+ modalities.push_back(makePtr<ColorGradient>());
+ return makePtr<Detector>(modalities, std::vector<int>(T_DEFAULTS, T_DEFAULTS + 2));
}
Ptr<Detector> getDefaultLINEMOD()
{
std::vector< Ptr<Modality> > modalities;
- modalities.push_back(new ColorGradient);
- modalities.push_back(new DepthNormal);
- return new Detector(modalities, std::vector<int>(T_DEFAULTS, T_DEFAULTS + 2));
+ modalities.push_back(makePtr<ColorGradient>());
+ modalities.push_back(makePtr<DepthNormal>());
+ return makePtr<Detector>(modalities, std::vector<int>(T_DEFAULTS, T_DEFAULTS + 2));
}
} // namespace linemod
int di, const Mat& img,
vector<Rect>& objects )
{
- Ptr<CvHaarClassifierCascade> c_cascade = cvLoadHaarClassifierCascade(filename.c_str(), cvSize(0,0));
- Ptr<CvMemStorage> storage = cvCreateMemStorage();
+ Ptr<CvHaarClassifierCascade> c_cascade(cvLoadHaarClassifierCascade(filename.c_str(), cvSize(0,0)));
+ Ptr<CvMemStorage> storage(cvCreateMemStorage());
- if( c_cascade.empty() )
+ if( !c_cascade )
{
ts->printf( cvtest::TS::LOG, "cascade %s can not be opened");
return cvtest::TS::FAIL_INVALID_TEST_DATA;
Ptr<FilterEngine_GPU> cv::ocl::createFilter2D_GPU(const Ptr<BaseFilter_GPU> filter2D)
{
- return Ptr<FilterEngine_GPU>(new Filter2DEngine_GPU(filter2D));
+ return makePtr<Filter2DEngine_GPU>(filter2D);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
Ptr<BaseFilter_GPU> filter2D = getMorphologyFilter_GPU(op, type, kernel, ksize, anchor);
- return Ptr<FilterEngine_GPU>(new MorphologyFilterEngine_GPU(filter2D, iterations));
+ return makePtr<MorphologyFilterEngine_GPU>(filter2D, iterations);
}
namespace
normalizeKernel(kernel, gpu_krnl, CV_32FC1);
normalizeAnchor(norm_archor, ksize);
- return Ptr<BaseFilter_GPU>(new LinearFilter_GPU(ksize, anchor, gpu_krnl, GPUFilter2D_callers[CV_MAT_CN(srcType)],
- borderType));
+ return makePtr<LinearFilter_GPU>(ksize, anchor, gpu_krnl, GPUFilter2D_callers[CV_MAT_CN(srcType)],
+ borderType);
}
Ptr<FilterEngine_GPU> cv::ocl::createLinearFilter_GPU(int srcType, int dstType, const Mat &kernel, const Point &anchor,
Ptr<FilterEngine_GPU> cv::ocl::createSeparableFilter_GPU(const Ptr<BaseRowFilter_GPU> &rowFilter,
const Ptr<BaseColumnFilter_GPU> &columnFilter)
{
- return Ptr<FilterEngine_GPU>(new SeparableFilterEngine_GPU(rowFilter, columnFilter));
+ return makePtr<SeparableFilterEngine_GPU>(rowFilter, columnFilter);
}
/*
normalizeAnchor(anchor, ksize);
- return Ptr<BaseFilter_GPU>(new GPUBoxFilter(ksize, anchor,
- borderType, FilterBox_callers[(CV_MAT_DEPTH(srcType) == CV_32F)][CV_MAT_CN(srcType)]));
+ return makePtr<GPUBoxFilter>(ksize, anchor,
+ borderType, FilterBox_callers[(CV_MAT_DEPTH(srcType) == CV_32F)][CV_MAT_CN(srcType)]);
}
Ptr<FilterEngine_GPU> cv::ocl::createBoxFilter_GPU(int srcType, int dstType,
normalizeAnchor(anchor, ksize);
- return Ptr<BaseRowFilter_GPU>(new GpuLinearRowFilter(ksize, anchor, mat_kernel,
- gpuFilter1D_callers[CV_MAT_DEPTH(srcType)], bordertype));
+ return makePtr<GpuLinearRowFilter>(ksize, anchor, mat_kernel,
+ gpuFilter1D_callers[CV_MAT_DEPTH(srcType)], bordertype);
}
namespace
normalizeAnchor(anchor, ksize);
- return Ptr<BaseColumnFilter_GPU>(new GpuLinearColumnFilter(ksize, anchor, mat_kernel,
- gpuFilter1D_callers[CV_MAT_DEPTH(dstType)], bordertype));
+ return makePtr<GpuLinearColumnFilter>(ksize, anchor, mat_kernel,
+ gpuFilter1D_callers[CV_MAT_DEPTH(dstType)], bordertype);
}
Ptr<FilterEngine_GPU> cv::ocl::createSeparableLinearFilter_GPU(int srcType, int dstType,
cv::Ptr<cv::CLAHE> createCLAHE(double clipLimit, cv::Size tileGridSize)
{
- return new CLAHE_Impl(clipLimit, tileGridSize.width, tileGridSize.height);
+ return makePtr<CLAHE_Impl>(clipLimit, tileGridSize.width, tileGridSize.height);
}
//////////////////////////////////bilateralFilter////////////////////////////////////////////////////
Ptr<TextureCL> bindTexturePtr(const oclMat &mat)
{
- return Ptr<TextureCL>(new TextureCL(bindTexture(mat), mat.rows, mat.cols, mat.type()));
+ return makePtr<TextureCL>(bindTexture(mat), mat.rows, mat.cols, mat.type());
}
void releaseTexture(cl_mem& texture)
{
}
namespace cv {
-template<> void cv::Ptr<IplConvKernel>::delete_obj()
+template<> void cv::DefaultDeleter<IplConvKernel>::operator ()(IplConvKernel* obj) const
{
cvReleaseStructuringElement(&obj);
}
ecols = input_img->cols + 2;
erows = input_img->rows + 2;
- f = cvCreateMat(erows, ecols, CV_8UC1);
- t = cvCreateMat(erows, ecols, CV_32FC1);
- band = cvCreateMat(erows, ecols, CV_8UC1);
- mask = cvCreateMat(erows, ecols, CV_8UC1);
- el_cross = cvCreateStructuringElementEx(3,3,1,1,CV_SHAPE_CROSS,NULL);
+ f.reset(cvCreateMat(erows, ecols, CV_8UC1));
+ t.reset(cvCreateMat(erows, ecols, CV_32FC1));
+ band.reset(cvCreateMat(erows, ecols, CV_8UC1));
+ mask.reset(cvCreateMat(erows, ecols, CV_8UC1));
+ el_cross.reset(cvCreateStructuringElementEx(3,3,1,1,CV_SHAPE_CROSS,NULL));
cvCopy( input_img, output_img );
cvSet(mask,cvScalar(KNOWN,0,0,0));
cvSet(f,cvScalar(KNOWN,0,0,0));
cvSet(t,cvScalar(1.0e6f,0,0,0));
cvDilate(mask,band,el_cross,1); // image with narrow band
- Heap=new CvPriorityQueueFloat;
+ Heap=cv::makePtr<CvPriorityQueueFloat>();
if (!Heap->Init(band))
return;
cvSub(band,mask,band,NULL);
if( flags == cv::INPAINT_TELEA )
{
- out = cvCreateMat(erows, ecols, CV_8UC1);
- el_range = cvCreateStructuringElementEx(2*range+1,2*range+1,
- range,range,CV_SHAPE_RECT,NULL);
+ out.reset(cvCreateMat(erows, ecols, CV_8UC1));
+ el_range.reset(cvCreateStructuringElementEx(2*range+1,2*range+1,
+ range,range,CV_SHAPE_RECT,NULL));
cvDilate(mask,out,el_range,1);
cvSub(out,mask,out,NULL);
- Out=new CvPriorityQueueFloat;
+ Out=cv::makePtr<CvPriorityQueueFloat>();
if (!Out->Init(out))
return;
if (!Out->Add(band))
template <typename T>
bool pyopencv_to(PyObject *o, Ptr<T>& p, const char *name)
{
- p = new T();
+ p = makePtr<T>();
return pyopencv_to(o, *p, name);
}
gen_template_check_self = Template(""" if(!PyObject_TypeCheck(self, &pyopencv_${name}_Type))
return failmsgp("Incorrect type of self (must be '${name}' or its derivative)");
- $cname* _self_ = ${amp}((pyopencv_${name}_t*)self)->v;
+ $cname* _self_ = ${amp}((pyopencv_${name}_t*)self)->v${get};
""")
gen_template_check_self_algo = Template(""" if(!PyObject_TypeCheck(self, &pyopencv_${name}_Type))
return failmsgp("Incorrect type of self (must be '${name}' or its derivative)");
- $cname* _self_ = dynamic_cast<$cname*>(${amp}((pyopencv_${name}_t*)self)->v.obj);
+ $cname* _self_ = dynamic_cast<$cname*>(${amp}((pyopencv_${name}_t*)self)->v.get());
""")
-gen_template_call_constructor = Template("""self = PyObject_NEW(pyopencv_${name}_t, &pyopencv_${name}_Type);
+gen_template_call_constructor_prelude = Template("""self = PyObject_NEW(pyopencv_${name}_t, &pyopencv_${name}_Type);
new (&(self->v)) Ptr<$cname>(); // init Ptr with placement new
- if(self) ERRWRAP2(self->v = new $cname""")
+ if(self) """)
-gen_template_simple_call_constructor = Template("""self = PyObject_NEW(pyopencv_${name}_t, &pyopencv_${name}_Type);
- if(self) ERRWRAP2(self->v = $cname""")
+gen_template_call_constructor = Template("""self->v.reset(new ${cname}${args})""")
+
+gen_template_simple_call_constructor_prelude = Template("""self = PyObject_NEW(pyopencv_${name}_t, &pyopencv_${name}_Type);
+ if(self) """)
+
+gen_template_simple_call_constructor = Template("""self->v = ${cname}${args}""")
gen_template_parse_args = Template("""const char* keywords[] = { $kw_list, NULL };
if( PyArg_ParseTupleAndKeywords(args, kw, "$fmtspec", (char**)keywords, $parse_arglist)$code_cvt )""")
gen_template_func_body = Template("""$code_decl
$code_parse
{
- $code_fcall;
+ ${code_prelude}ERRWRAP2($code_fcall);
$code_ret;
}
""")
failmsg("Expected ${cname} for argument '%%s'", name);
return false;
}
- dst = ((pyopencv_${name}_t*)src)->v;
+ dst = ((pyopencv_${name}_t*)src)->v.dynamicCast<${cname}>();
return true;
}
gen_template_get_prop_algo = Template("""
static PyObject* pyopencv_${name}_get_${member}(pyopencv_${name}_t* p, void *closure)
{
- return pyopencv_from(dynamic_cast<$cname*>(p->v.obj)${access}${member});
+ return pyopencv_from(dynamic_cast<$cname*>(p->v.get())${access}${member});
}
""")
PyErr_SetString(PyExc_TypeError, "Cannot delete the ${member} attribute");
return -1;
}
- return pyopencv_to(value, dynamic_cast<$cname*>(p->v.obj)${access}${member}) ? 0 : -1;
+ return pyopencv_to(value, dynamic_cast<$cname*>(p->v.get())${access}${member}) ? 0 : -1;
}
""")
if self.classname:
selfinfo = all_classes[self.classname]
if not self.isconstructor:
- amp = ""
- if selfinfo.issimple:
- amp = "&"
+ amp = "&" if selfinfo.issimple else ""
if selfinfo.isalgorithm:
code += gen_template_check_self_algo.substitute(name=selfinfo.name, cname=selfinfo.cname, amp=amp)
else:
- code += gen_template_check_self.substitute(name=selfinfo.name, cname=selfinfo.cname, amp=amp)
+ get = "" if selfinfo.issimple else ".get()"
+ code += gen_template_check_self.substitute(name=selfinfo.name, cname=selfinfo.cname, amp=amp, get=get)
fullname = selfinfo.wname + "." + fullname
all_code_variants = []
declno = -1
for v in self.variants:
code_decl = ""
- code_fcall = ""
code_ret = ""
code_cvt_list = []
- if self.isconstructor:
- code_decl += " pyopencv_%s_t* self = 0;\n" % selfinfo.name
- templ = gen_template_call_constructor
- if selfinfo.issimple:
- templ = gen_template_simple_call_constructor
- code_fcall = templ.substitute(name=selfinfo.name, cname=selfinfo.cname)
- else:
- code_fcall = "ERRWRAP2( "
- if v.rettype:
- code_decl += " " + v.rettype + " retval;\n"
- code_fcall += "retval = "
- if ismethod:
- code_fcall += "_self_->" + self.cname
- else:
- code_fcall += self.cname
- code_fcall += "("
+ code_args = "("
all_cargs = []
parse_arglist = []
if not defval and a.tp.endswith("*"):
defval = 0
assert defval
- if not code_fcall.endswith("("):
- code_fcall += ", "
- code_fcall += defval
+ if not code_args.endswith("("):
+ code_args += ", "
+ code_args += defval
all_cargs.append([[None, ""], ""])
continue
tp1 = tp = a.tp
else:
code_decl += " %s %s;\n" % (amapping[0], a.name)
- if not code_fcall.endswith("("):
- code_fcall += ", "
- code_fcall += amp + a.name
+ if not code_args.endswith("("):
+ code_args += ", "
+ code_args += amp + a.name
+
+ code_args += ")"
+
+ if self.isconstructor:
+ code_decl += " pyopencv_%s_t* self = 0;\n" % selfinfo.name
+ if selfinfo.issimple:
+ templ_prelude = gen_template_simple_call_constructor_prelude
+ templ = gen_template_simple_call_constructor
+ else:
+ templ_prelude = gen_template_call_constructor_prelude
+ templ = gen_template_call_constructor
- code_fcall += "))"
+ code_prelude = templ_prelude.substitute(name=selfinfo.name, cname=selfinfo.cname)
+ code_fcall = templ.substitute(name=selfinfo.name, cname=selfinfo.cname, args=code_args)
+ else:
+ code_prelude = ""
+ code_fcall = ""
+ if v.rettype:
+ code_decl += " " + v.rettype + " retval;\n"
+ code_fcall += "retval = "
+ if ismethod:
+ code_fcall += "_self_->" + self.cname
+ else:
+ code_fcall += self.cname
+ code_fcall += code_args
if code_cvt_list:
code_cvt_list = [""] + code_cvt_list
(fmtspec, ", ".join(["pyopencv_from(" + aname + ")" for aname, argno in v.py_outlist]))
all_code_variants.append(gen_template_func_body.substitute(code_decl=code_decl,
- code_parse=code_parse, code_fcall=code_fcall, code_ret=code_ret))
+ code_parse=code_parse, code_prelude=code_prelude, code_fcall=code_fcall, code_ret=code_ret))
if len(all_code_variants)==1:
# if the function/method has only 1 signature, then just put it
cv::softcascade::ChannelsProcessor::~ChannelsProcessor() { throw_no_cuda(); }
cv::Ptr<cv::softcascade::ChannelsProcessor> cv::softcascade::ChannelsProcessor::create(const int, const int, const int)
-{ throw_no_cuda(); return cv::Ptr<cv::softcascade::ChannelsProcessor>(0); }
+{ throw_no_cuda(); return cv::Ptr<cv::softcascade::ChannelsProcessor>(); }
#else
cv::Ptr<cv::softcascade::ChannelsProcessor> cv::softcascade::ChannelsProcessor::create(const int s, const int b, const int m)
{
CV_Assert((m && SEPARABLE));
- return cv::Ptr<cv::softcascade::ChannelsProcessor>(new SeparablePreprocessor(s, b));
+ return makePtr<SeparablePreprocessor>(s, b);
}
cv::softcascade::ChannelsProcessor::ChannelsProcessor() { }
bool initModule_softcascade(void)
{
- Ptr<Algorithm> sc = createSCascade_hidden();
- Ptr<Algorithm> sc1 = createDetector_hidden();
+ Ptr<Algorithm> sc = createSCascade_ptr_hidden();
+ Ptr<Algorithm> sc1 = createDetector_ptr_hidden();
return (sc1->info() != 0) && (sc->info() != 0);
}
class PlaneWarper : public WarperCreator
{
public:
- Ptr<detail::RotationWarper> create(float scale) const { return new detail::PlaneWarper(scale); }
+ Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::PlaneWarper>(scale); }
};
class CylindricalWarper: public WarperCreator
{
public:
- Ptr<detail::RotationWarper> create(float scale) const { return new detail::CylindricalWarper(scale); }
+ Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::CylindricalWarper>(scale); }
};
class SphericalWarper: public WarperCreator
{
public:
- Ptr<detail::RotationWarper> create(float scale) const { return new detail::SphericalWarper(scale); }
+ Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::SphericalWarper>(scale); }
};
class FisheyeWarper : public WarperCreator
{
public:
- Ptr<detail::RotationWarper> create(float scale) const { return new detail::FisheyeWarper(scale); }
+ Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::FisheyeWarper>(scale); }
};
class StereographicWarper: public WarperCreator
{
public:
- Ptr<detail::RotationWarper> create(float scale) const { return new detail::StereographicWarper(scale); }
+ Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::StereographicWarper>(scale); }
};
class CompressedRectilinearWarper: public WarperCreator
{
a = A; b = B;
}
- Ptr<detail::RotationWarper> create(float scale) const { return new detail::CompressedRectilinearWarper(scale, a, b); }
+ Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::CompressedRectilinearWarper>(scale, a, b); }
};
class CompressedRectilinearPortraitWarper: public WarperCreator
{
a = A; b = B;
}
- Ptr<detail::RotationWarper> create(float scale) const { return new detail::CompressedRectilinearPortraitWarper(scale, a, b); }
+ Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::CompressedRectilinearPortraitWarper>(scale, a, b); }
};
class PaniniWarper: public WarperCreator
{
a = A; b = B;
}
- Ptr<detail::RotationWarper> create(float scale) const { return new detail::PaniniWarper(scale, a, b); }
+ Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::PaniniWarper>(scale, a, b); }
};
class PaniniPortraitWarper: public WarperCreator
{
a = A; b = B;
}
- Ptr<detail::RotationWarper> create(float scale) const { return new detail::PaniniPortraitWarper(scale, a, b); }
+ Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::PaniniPortraitWarper>(scale, a, b); }
};
class MercatorWarper: public WarperCreator
{
public:
- Ptr<detail::RotationWarper> create(float scale) const { return new detail::MercatorWarper(scale); }
+ Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::MercatorWarper>(scale); }
};
class TransverseMercatorWarper: public WarperCreator
{
public:
- Ptr<detail::RotationWarper> create(float scale) const { return new detail::TransverseMercatorWarper(scale); }
+ Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::TransverseMercatorWarper>(scale); }
};
class PlaneWarperGpu: public WarperCreator
{
public:
- Ptr<detail::RotationWarper> create(float scale) const { return new detail::PlaneWarperGpu(scale); }
+ Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::PlaneWarperGpu>(scale); }
};
class CylindricalWarperGpu: public WarperCreator
{
public:
- Ptr<detail::RotationWarper> create(float scale) const { return new detail::CylindricalWarperGpu(scale); }
+ Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::CylindricalWarperGpu>(scale); }
};
class SphericalWarperGpu: public WarperCreator
{
public:
- Ptr<detail::RotationWarper> create(float scale) const { return new detail::SphericalWarperGpu(scale); }
+ Ptr<detail::RotationWarper> create(float scale) const { return makePtr<detail::SphericalWarperGpu>(scale); }
};
#endif
imgs.push_back( imread( getDataPath("stitching/a3.png") ) );
Ptr<detail::FeaturesFinder> featuresFinder = GetParam() == "orb"
- ? (detail::FeaturesFinder*)new detail::OrbFeaturesFinder()
- : (detail::FeaturesFinder*)new detail::SurfFeaturesFinder();
+ ? Ptr<detail::FeaturesFinder>(new detail::OrbFeaturesFinder())
+ : Ptr<detail::FeaturesFinder>(new detail::SurfFeaturesFinder());
Ptr<detail::FeaturesMatcher> featuresMatcher = GetParam() == "orb"
- ? new detail::BestOf2NearestMatcher(false, ORB_MATCH_CONFIDENCE)
- : new detail::BestOf2NearestMatcher(false, SURF_MATCH_CONFIDENCE);
+ ? makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE)
+ : makePtr<detail::BestOf2NearestMatcher>(false, SURF_MATCH_CONFIDENCE);
declare.time(30 * 20).iterations(20);
Stitcher stitcher = Stitcher::createDefault();
stitcher.setFeaturesFinder(featuresFinder);
stitcher.setFeaturesMatcher(featuresMatcher);
- stitcher.setWarper(new SphericalWarper());
+ stitcher.setWarper(makePtr<SphericalWarper>());
stitcher.setRegistrationResol(WORK_MEGAPIX);
startTimer();
imgs.push_back( imread( getDataPath("stitching/b2.png") ) );
Ptr<detail::FeaturesFinder> featuresFinder = GetParam() == "orb"
- ? (detail::FeaturesFinder*)new detail::OrbFeaturesFinder()
- : (detail::FeaturesFinder*)new detail::SurfFeaturesFinder();
+ ? Ptr<detail::FeaturesFinder>(new detail::OrbFeaturesFinder())
+ : Ptr<detail::FeaturesFinder>(new detail::SurfFeaturesFinder());
Ptr<detail::FeaturesMatcher> featuresMatcher = GetParam() == "orb"
- ? new detail::BestOf2NearestMatcher(false, ORB_MATCH_CONFIDENCE)
- : new detail::BestOf2NearestMatcher(false, SURF_MATCH_CONFIDENCE);
+ ? makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE)
+ : makePtr<detail::BestOf2NearestMatcher>(false, SURF_MATCH_CONFIDENCE);
declare.time(30 * 20).iterations(20);
Stitcher stitcher = Stitcher::createDefault();
stitcher.setFeaturesFinder(featuresFinder);
stitcher.setFeaturesMatcher(featuresMatcher);
- stitcher.setWarper(new SphericalWarper());
+ stitcher.setWarper(makePtr<SphericalWarper>());
stitcher.setRegistrationResol(WORK_MEGAPIX);
startTimer();
Ptr<detail::FeaturesMatcher> matcher;
if (GetParam() == "surf")
{
- finder = new detail::SurfFeaturesFinder();
- matcher = new detail::BestOf2NearestMatcher(false, SURF_MATCH_CONFIDENCE);
+ finder = makePtr<detail::SurfFeaturesFinder>();
+ matcher = makePtr<detail::BestOf2NearestMatcher>(false, SURF_MATCH_CONFIDENCE);
}
else if (GetParam() == "orb")
{
- finder = new detail::OrbFeaturesFinder();
- matcher = new detail::BestOf2NearestMatcher(false, ORB_MATCH_CONFIDENCE);
+ finder = makePtr<detail::OrbFeaturesFinder>();
+ matcher = makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE);
}
else
{
int featuresVectorSize = get<1>(GetParam());
if (detectorName == "surf")
{
- finder = new detail::SurfFeaturesFinder();
- matcher = new detail::BestOf2NearestMatcher(false, SURF_MATCH_CONFIDENCE);
+ finder = makePtr<detail::SurfFeaturesFinder>();
+ matcher = makePtr<detail::BestOf2NearestMatcher>(false, SURF_MATCH_CONFIDENCE);
}
else if (detectorName == "orb")
{
- finder = new detail::OrbFeaturesFinder();
- matcher = new detail::BestOf2NearestMatcher(false, ORB_MATCH_CONFIDENCE);
+ finder = makePtr<detail::OrbFeaturesFinder>();
+ matcher = makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE);
}
else
{
Ptr<Blender> Blender::createDefault(int type, bool try_gpu)
{
if (type == NO)
- return new Blender();
+ return makePtr<Blender>();
if (type == FEATHER)
- return new FeatherBlender();
+ return makePtr<FeatherBlender>();
if (type == MULTI_BAND)
- return new MultiBandBlender(try_gpu);
+ return makePtr<MultiBandBlender>(try_gpu);
CV_Error(Error::StsBadArg, "unsupported blending method");
- return NULL;
+ return Ptr<Blender>();
}
Ptr<ExposureCompensator> ExposureCompensator::createDefault(int type)
{
if (type == NO)
- return new NoExposureCompensator();
+ return makePtr<NoExposureCompensator>();
if (type == GAIN)
- return new GainCompensator();
+ return makePtr<GainCompensator>();
if (type == GAIN_BLOCKS)
- return new BlocksGainCompensator();
+ return makePtr<BlocksGainCompensator>();
CV_Error(Error::StsBadArg, "unsupported exposure compensation method");
- return NULL;
+ return Ptr<ExposureCompensator>();
}
matches_info.matches.clear();
- Ptr<flann::IndexParams> indexParams = new flann::KDTreeIndexParams();
- Ptr<flann::SearchParams> searchParams = new flann::SearchParams();
+ Ptr<flann::IndexParams> indexParams = makePtr<flann::KDTreeIndexParams>();
+ Ptr<flann::SearchParams> searchParams = makePtr<flann::SearchParams>();
if (features2.descriptors.depth() == CV_8U)
{
if (num_octaves_descr == num_octaves && num_layers_descr == num_layers)
{
surf = Algorithm::create<Feature2D>("Feature2D.SURF");
- if( surf.empty() )
+ if( !surf )
CV_Error( Error::StsNotImplemented, "OpenCV was built without SURF support" );
surf->set("hessianThreshold", hess_thresh);
surf->set("nOctaves", num_octaves);
detector_ = Algorithm::create<FeatureDetector>("Feature2D.SURF");
extractor_ = Algorithm::create<DescriptorExtractor>("Feature2D.SURF");
- if( detector_.empty() || extractor_.empty() )
+ if( !detector_ || !extractor_ )
CV_Error( Error::StsNotImplemented, "OpenCV was built without SURF support" );
detector_->set("hessianThreshold", hess_thresh);
{
gray_image = image;
}
- if (surf.empty())
+ if (!surf)
{
detector_->detect(gray_image, features.keypoints);
extractor_->compute(gray_image, features.keypoints, features.descriptors);
OrbFeaturesFinder::OrbFeaturesFinder(Size _grid_size, int n_features, float scaleFactor, int nlevels)
{
grid_size = _grid_size;
- orb = new ORB(n_features * (99 + grid_size.area())/100/grid_size.area(), scaleFactor, nlevels);
+ orb = makePtr<ORB>(n_features * (99 + grid_size.area())/100/grid_size.area(), scaleFactor, nlevels);
}
void OrbFeaturesFinder::find(const Mat &image, ImageFeatures &features)
#ifdef HAVE_OPENCV_CUDAFEATURES2D
if (try_use_gpu && getCudaEnabledDeviceCount() > 0)
{
- impl_ = new GpuMatcher(match_conf);
+ impl_ = makePtr<GpuMatcher>(match_conf);
}
else
#endif
{
- impl_ = new CpuMatcher(match_conf);
+ impl_ = makePtr<CpuMatcher>(match_conf);
}
is_thread_safe_ = impl_->isThreadSafe();
stitcher.setPanoConfidenceThresh(1);
stitcher.setWaveCorrection(true);
stitcher.setWaveCorrectKind(detail::WAVE_CORRECT_HORIZ);
- stitcher.setFeaturesMatcher(new detail::BestOf2NearestMatcher(try_use_gpu));
- stitcher.setBundleAdjuster(new detail::BundleAdjusterRay());
+ stitcher.setFeaturesMatcher(makePtr<detail::BestOf2NearestMatcher>(try_use_gpu));
+ stitcher.setBundleAdjuster(makePtr<detail::BundleAdjusterRay>());
#ifdef HAVE_OPENCV_CUDA
if (try_use_gpu && cuda::getCudaEnabledDeviceCount() > 0)
{
#ifdef HAVE_OPENCV_NONFREE
- stitcher.setFeaturesFinder(new detail::SurfFeaturesFinderGpu());
+ stitcher.setFeaturesFinder(makePtr<detail::SurfFeaturesFinderGpu>());
#else
- stitcher.setFeaturesFinder(new detail::OrbFeaturesFinder());
+ stitcher.setFeaturesFinder(makePtr<detail::OrbFeaturesFinder>());
#endif
- stitcher.setWarper(new SphericalWarperGpu());
- stitcher.setSeamFinder(new detail::GraphCutSeamFinderGpu());
+ stitcher.setWarper(makePtr<SphericalWarperGpu>());
+ stitcher.setSeamFinder(makePtr<detail::GraphCutSeamFinderGpu>());
}
else
#endif
{
#ifdef HAVE_OPENCV_NONFREE
- stitcher.setFeaturesFinder(new detail::SurfFeaturesFinder());
+ stitcher.setFeaturesFinder(makePtr<detail::SurfFeaturesFinder>());
#else
- stitcher.setFeaturesFinder(new detail::OrbFeaturesFinder());
+ stitcher.setFeaturesFinder(makePtr<detail::OrbFeaturesFinder>());
#endif
- stitcher.setWarper(new SphericalWarper());
- stitcher.setSeamFinder(new detail::GraphCutSeamFinder(detail::GraphCutSeamFinderBase::COST_COLOR));
+ stitcher.setWarper(makePtr<SphericalWarper>());
+ stitcher.setSeamFinder(makePtr<detail::GraphCutSeamFinder>(detail::GraphCutSeamFinderBase::COST_COLOR));
}
- stitcher.setExposureCompensator(new detail::BlocksGainCompensator());
- stitcher.setBlender(new detail::MultiBandBlender(try_use_gpu));
+ stitcher.setExposureCompensator(makePtr<detail::BlocksGainCompensator>());
+ stitcher.setBlender(makePtr<detail::MultiBandBlender>(try_use_gpu));
return stitcher;
}
TEST(SurfFeaturesFinder, CanFindInROIs)
{
- Ptr<detail::FeaturesFinder> finder = new detail::SurfFeaturesFinder();
+ Ptr<detail::FeaturesFinder> finder = makePtr<detail::SurfFeaturesFinder>();
Mat img = imread(string(cvtest::TS::ptr()->get_data_path()) + "cv/shared/lena.png");
vector<Rect> rois;
superRes->set("temporalAreaRadius", temporalAreaRadius);
superRes->set("opticalFlow", opticalFlow);
- superRes->setInput(new OneFrameSource_CUDA(GpuMat(frame)));
+ superRes->setInput(makePtr<OneFrameSource_CUDA>(GpuMat(frame)));
GpuMat dst;
superRes->nextFrame(dst);
superRes->set("temporalAreaRadius", temporalAreaRadius);
superRes->set("opticalFlow", opticalFlow);
- superRes->setInput(new OneFrameSource_CPU(frame));
+ superRes->setInput(makePtr<OneFrameSource_CPU>(frame));
Mat dst;
superRes->nextFrame(dst);
superRes_ocl->set("temporalAreaRadius", temporalAreaRadius);
superRes_ocl->set("opticalFlow", opticalFlowOcl);
- superRes_ocl->setInput(new OneFrameSource_OCL(frame_ocl));
+ superRes_ocl->setInput(makePtr<OneFrameSource_OCL>(frame_ocl));
ocl::oclMat dst_ocl;
superRes_ocl->nextFrame(dst_ocl);
// update blur filter and btv weights
- if (filter_.empty() || blurKernelSize_ != curBlurKernelSize_ || blurSigma_ != curBlurSigma_ || src[0].type() != curSrcType_)
+ if (!filter_ || blurKernelSize_ != curBlurKernelSize_ || blurSigma_ != curBlurSigma_ || src[0].type() != curSrcType_)
{
filter_ = createGaussianFilter(src[0].type(), Size(blurKernelSize_, blurKernelSize_), blurSigma_);
curBlurKernelSize_ = blurKernelSize_;
Ptr<SuperResolution> cv::superres::createSuperResolution_BTVL1()
{
- return new BTVL1;
+ return makePtr<BTVL1>();
}
Ptr<SuperResolution> cv::superres::createSuperResolution_BTVL1_CUDA()
{
- return new BTVL1_CUDA;
+ return makePtr<BTVL1_CUDA>();
}
#endif // HAVE_CUDA
Ptr<SuperResolution> cv::superres::createSuperResolution_BTVL1_OCL()
{
- return new BTVL1_OCL;
+ return makePtr<BTVL1_OCL>();
}
#endif
Ptr<FrameSource> cv::superres::createFrameSource_Empty()
{
- return new EmptyFrameSource;
+ return makePtr<EmptyFrameSource>();
}
//////////////////////////////////////////////////////
Ptr<FrameSource> cv::superres::createFrameSource_Video(const String& fileName)
{
- return new VideoFrameSource(fileName);
+ return makePtr<VideoFrameSource>(fileName);
}
Ptr<FrameSource> cv::superres::createFrameSource_Camera(int deviceId)
{
- return new CameraFrameSource(deviceId);
+ return makePtr<CameraFrameSource>(deviceId);
}
#endif // HAVE_OPENCV_HIGHGUI
Ptr<FrameSource> cv::superres::createFrameSource_Video_CUDA(const String& fileName)
{
- return new VideoFrameSource(fileName);
+ return makePtr<VideoFrameSource>(fileName);
}
#endif // HAVE_OPENCV_CUDACODEC
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Farneback()
{
- return new Farneback;
+ return makePtr<Farneback>();
}
///////////////////////////////////////////////////////////////////
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Simple()
{
- return new Simple;
+ return makePtr<Simple>();
}
///////////////////////////////////////////////////////////////////
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_DualTVL1()
{
- return new DualTVL1;
+ return makePtr<DualTVL1>();
}
///////////////////////////////////////////////////////////////////
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Brox_CUDA()
{
- return new Brox_CUDA;
+ return makePtr<Brox_CUDA>();
}
///////////////////////////////////////////////////////////////////
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_PyrLK_CUDA()
{
- return new PyrLK_CUDA;
+ return makePtr<PyrLK_CUDA>();
}
///////////////////////////////////////////////////////////////////
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Farneback_CUDA()
{
- return new Farneback_CUDA;
+ return makePtr<Farneback_CUDA>();
}
///////////////////////////////////////////////////////////////////
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_DualTVL1_CUDA()
{
- return new DualTVL1_CUDA;
+ return makePtr<DualTVL1_CUDA>();
}
#endif // HAVE_OPENCV_CUDAOPTFLOW
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_PyrLK_OCL()
{
- return new PyrLK_OCL;
+ return makePtr<PyrLK_OCL>();
}
///////////////////////////////////////////////////////////////////
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_DualTVL1_OCL()
{
- return new DualTVL1_OCL;
+ return makePtr<DualTVL1_OCL>();
}
///////////////////////////////////////////////////////////////////
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Farneback_OCL()
{
- return new FarneBack_OCL;
+ return makePtr<FarneBack_OCL>();
}
#endif
AllignedFrameSource::AllignedFrameSource(const cv::Ptr<cv::superres::FrameSource>& base, int scale) :
base_(base), scale_(scale)
{
- CV_Assert( !base_.empty() );
+ CV_Assert( base_ );
}
void AllignedFrameSource::nextFrame(cv::OutputArray frame)
DegradeFrameSource::DegradeFrameSource(const cv::Ptr<cv::superres::FrameSource>& base, int scale) :
base_(base), iscale_(1.0 / scale)
{
- CV_Assert( !base_.empty() );
+ CV_Assert( base_ );
}
void addGaussNoise(cv::Mat& image, double sigma)
superRes->set("temporalAreaRadius", temporalAreaRadius);
cv::Ptr<cv::superres::FrameSource> goldSource(new AllignedFrameSource(cv::superres::createFrameSource_Video(inputVideoName), scale));
- cv::Ptr<cv::superres::FrameSource> lowResSource(new DegradeFrameSource(new AllignedFrameSource(cv::superres::createFrameSource_Video(inputVideoName), scale), scale));
+ cv::Ptr<cv::superres::FrameSource> lowResSource(new DegradeFrameSource(
+ cv::makePtr<AllignedFrameSource>(cv::superres::createFrameSource_Video(inputVideoName), scale), scale));
// skip first frame
cv::Mat frame;
Ptr<BackgroundSubtractorMOG> createBackgroundSubtractorMOG(int history, int nmixtures,
double backgroundRatio, double noiseSigma)
{
- return new BackgroundSubtractorMOGImpl(history, nmixtures, backgroundRatio, noiseSigma);
+ return makePtr<BackgroundSubtractorMOGImpl>(history, nmixtures, backgroundRatio, noiseSigma);
}
}
Ptr<BackgroundSubtractorMOG2> createBackgroundSubtractorMOG2(int _history, double _varThreshold,
bool _bShadowDetection)
{
- return new BackgroundSubtractorMOG2Impl(_history, (float)_varThreshold, _bShadowDetection);
+ return makePtr<BackgroundSubtractorMOG2Impl>(_history, (float)_varThreshold, _bShadowDetection);
}
}
Ptr<BackgroundSubtractorGMG> createBackgroundSubtractorGMG(int initializationFrames, double decisionThreshold)
{
- Ptr<BackgroundSubtractorGMG> bgfg = new BackgroundSubtractorGMGImpl;
+ Ptr<BackgroundSubtractorGMG> bgfg = makePtr<BackgroundSubtractorGMGImpl>();
bgfg->setNumFrames(initializationFrames);
bgfg->setDecisionThreshold(decisionThreshold);
Ptr<DenseOpticalFlow> cv::createOptFlow_DualTVL1()
{
- return new OpticalFlowDual_TVL1;
+ return makePtr<OpticalFlowDual_TVL1>();
}
Ptr<BackgroundSubtractorGMG> fgbg = createBackgroundSubtractorGMG();
Mat fgmask;
- if (fgbg.empty())
+ if (!fgbg)
CV_Error(Error::StsError,"Failed to create Algorithm\n");
/**
void VideoFileSource::reset() { impl->reset(); }
Mat VideoFileSource::nextFrame() { return impl->nextFrame(); }
-int VideoFileSource::width() { return ((VideoFileSourceImpl*)impl.obj)->width(); }
-int VideoFileSource::height() { return ((VideoFileSourceImpl*)impl.obj)->height(); }
-int VideoFileSource::count() { return ((VideoFileSourceImpl*)impl.obj)->count(); }
-double VideoFileSource::fps() { return ((VideoFileSourceImpl*)impl.obj)->fps(); }
+int VideoFileSource::width() { return ((VideoFileSourceImpl*)impl.get())->width(); }
+int VideoFileSource::height() { return ((VideoFileSourceImpl*)impl.get())->height(); }
+int VideoFileSource::count() { return ((VideoFileSourceImpl*)impl.get())->count(); }
+double VideoFileSource::fps() { return ((VideoFileSourceImpl*)impl.get())->fps(); }
} // namespace videostab
} // namespace cv
KeypointBasedMotionEstimator::KeypointBasedMotionEstimator(Ptr<MotionEstimatorBase> estimator)
: ImageMotionEstimatorBase(estimator->motionModel()), motionEstimator_(estimator)
{
- setDetector(new GoodFeaturesToTrackDetector());
- setOpticalFlowEstimator(new SparsePyrLkOptFlowEstimator());
- setOutlierRejector(new NullOutlierRejector());
+ setDetector(makePtr<GoodFeaturesToTrackDetector>());
+ setOpticalFlowEstimator(makePtr<SparsePyrLkOptFlowEstimator>());
+ setOutlierRejector(makePtr<NullOutlierRejector>());
}
// perform outlier rejection
- IOutlierRejector *outlRejector = static_cast<IOutlierRejector*>(outlierRejector_);
+ IOutlierRejector *outlRejector = outlierRejector_.get();
if (!dynamic_cast<NullOutlierRejector*>(outlRejector))
{
pointsPrev_.swap(pointsPrevGood_);
detector_ = cuda::createGoodFeaturesToTrackDetector(CV_8UC1);
CV_Assert(cuda::getCudaEnabledDeviceCount() > 0);
- setOutlierRejector(new NullOutlierRejector());
+ setOutlierRejector(makePtr<NullOutlierRejector>());
}
// perform outlier rejection
- IOutlierRejector *rejector = static_cast<IOutlierRejector*>(outlierRejector_);
+ IOutlierRejector *rejector = outlierRejector_.get();
if (!dynamic_cast<NullOutlierRejector*>(rejector))
{
outlierRejector_->process(frame0.size(), hostPointsPrev_, hostPoints_, rejectionStatus_);
MotionInpainter::MotionInpainter()
{
#ifdef HAVE_OPENCV_CUDAOPTFLOW
- setOptFlowEstimator(new DensePyrLkOptFlowEstimatorGpu());
+ setOptFlowEstimator(makePtr<DensePyrLkOptFlowEstimatorGpu>());
#else
CV_Error(Error::StsNotImplemented, "Current implementation of MotionInpainter requires CUDA");
#endif
model.scaling(1);
ClpPresolve presolveInfo;
- Ptr<ClpSimplex> presolvedModel = presolveInfo.presolvedModel(model);
+ Ptr<ClpSimplex> presolvedModel(presolveInfo.presolvedModel(model));
- if (!presolvedModel.empty())
+ if (presolvedModel)
{
presolvedModel->dual();
presolveInfo.postsolve(true);
StabilizerBase::StabilizerBase()
{
- setLog(new LogToStdout());
- setFrameSource(new NullFrameSource());
- setMotionEstimator(new KeypointBasedMotionEstimator(new MotionEstimatorRansacL2()));
- setDeblurer(new NullDeblurer());
- setInpainter(new NullInpainter());
+ setLog(makePtr<LogToStdout>());
+ setFrameSource(makePtr<NullFrameSource>());
+ setMotionEstimator(makePtr<KeypointBasedMotionEstimator>(makePtr<MotionEstimatorRansacL2>()));
+ setDeblurer(makePtr<NullDeblurer>());
+ setInpainter(makePtr<NullInpainter>());
setRadius(15);
setTrimRatio(0);
setCorrectionForInclusion(false);
void StabilizerBase::setUp(const Mat &firstFrame)
{
- InpainterBase *inpaint = static_cast<InpainterBase*>(inpainter_);
+ InpainterBase *inpaint = inpainter_.get();
doInpainting_ = dynamic_cast<NullInpainter*>(inpaint) == 0;
if (doInpainting_)
{
inpainter_->setStabilizationMotions(stabilizationMotions_);
}
- DeblurerBase *deblurer = static_cast<DeblurerBase*>(deblurer_);
+ DeblurerBase *deblurer = deblurer_.get();
doDeblurring_ = dynamic_cast<NullDeblurer*>(deblurer) == 0;
if (doDeblurring_)
{
OnePassStabilizer::OnePassStabilizer()
{
- setMotionFilter(new GaussianMotionFilter());
+ setMotionFilter(makePtr<GaussianMotionFilter>());
reset();
}
TwoPassStabilizer::TwoPassStabilizer()
{
- setMotionStabilizer(new GaussianMotionFilter());
- setWobbleSuppressor(new NullWobbleSuppressor());
+ setMotionStabilizer(makePtr<GaussianMotionFilter>());
+ setWobbleSuppressor(makePtr<NullWobbleSuppressor>());
setEstimateTrimRatio(false);
reset();
}
{
// check if we must do wobble suppression
- WobbleSuppressorBase *wobble = static_cast<WobbleSuppressorBase*>(wobbleSuppressor_);
+ WobbleSuppressorBase *wobble = wobbleSuppressor_.get();
doWobbleSuppression_ = dynamic_cast<NullWobbleSuppressor*>(wobble) == 0;
// estimate motions
for (int i = -radius_; i <= 0; ++i)
at(i, frames_) = firstFrame;
- WobbleSuppressorBase *wobble = static_cast<WobbleSuppressorBase*>(wobbleSuppressor_);
+ WobbleSuppressorBase *wobble = wobbleSuppressor_.get();
doWobbleSuppression_ = dynamic_cast<NullWobbleSuppressor*>(wobble) == 0;
if (doWobbleSuppression_)
{
WobbleSuppressorBase::WobbleSuppressorBase() : motions_(0), stabilizationMotions_(0)
{
- setMotionEstimator(new KeypointBasedMotionEstimator(new MotionEstimatorRansacL2(MM_HOMOGRAPHY)));
+ setMotionEstimator(makePtr<KeypointBasedMotionEstimator>(makePtr<MotionEstimatorRansacL2>(MM_HOMOGRAPHY)));
}
Detector(detector)
{
LOGD("CascadeDetectorAdapter::Detect::Detect");
- CV_Assert(!detector.empty());
+ CV_Assert(detector);
}
void detect(const cv::Mat &Image, std::vector<cv::Rect> &objects)
mainDetector(_mainDetector),
trackingDetector(_trackingDetector)
{
- CV_Assert(!_mainDetector.empty());
- CV_Assert(!_trackingDetector.empty());
+ CV_Assert(_mainDetector);
+ CV_Assert(_trackingDetector);
DetectionBasedTracker::Parameters DetectorParams;
- tracker = new DetectionBasedTracker(mainDetector.ptr<DetectionBasedTracker::IDetector>(), trackingDetector.ptr<DetectionBasedTracker::IDetector>(), DetectorParams);
+ tracker = makePtr<DetectionBasedTracker>(mainDetector, trackingDetector, DetectorParams);
}
};
try
{
- cv::Ptr<CascadeDetectorAdapter> mainDetector = new CascadeDetectorAdapter(new CascadeClassifier(stdFileName));
- cv::Ptr<CascadeDetectorAdapter> trackingDetector = new CascadeDetectorAdapter(new CascadeClassifier(stdFileName));
+ cv::Ptr<CascadeDetectorAdapter> mainDetector = makePtr<CascadeDetectorAdapter>(
+ makePtr<CascadeClassifier>(stdFileName));
+ cv::Ptr<CascadeDetectorAdapter> trackingDetector = makePtr<CascadeDetectorAdapter>(
+ makePtr<CascadeClassifier>(stdFileName));
result = (jlong)new DetectorAgregator(mainDetector, trackingDetector);
if (faceSize > 0)
{
Ptr<FeatureDetector> featureDetector = FeatureDetector::create( ddmParams.detectorType );
Ptr<DescriptorExtractor> descExtractor = DescriptorExtractor::create( ddmParams.descriptorType );
Ptr<BOWImgDescriptorExtractor> bowExtractor;
- if( featureDetector.empty() || descExtractor.empty() )
+ if( !featureDetector || !descExtractor )
{
cout << "featureDetector or descExtractor was not created" << endl;
return -1;
}
{
Ptr<DescriptorMatcher> descMatcher = DescriptorMatcher::create( ddmParams.matcherType );
- if( featureDetector.empty() || descExtractor.empty() || descMatcher.empty() )
+ if( !featureDetector || !descExtractor || !descMatcher )
{
cout << "descMatcher was not created" << endl;
return -1;
}
- bowExtractor = new BOWImgDescriptorExtractor( descExtractor, descMatcher );
+ bowExtractor = makePtr<BOWImgDescriptorExtractor>( descExtractor, descMatcher );
}
// Print configuration to screen
setNumThreads(8);
Ptr<BackgroundSubtractor> fgbg = createBackgroundSubtractorGMG(20, 0.7);
- if (fgbg.empty())
+ if (!fgbg)
{
std::cerr << "Failed to create BackgroundSubtractor.GMG Algorithm." << std::endl;
return -1;
IDetector(),
Detector(detector)
{
- CV_Assert(!detector.empty());
+ CV_Assert(detector);
}
void detect(const cv::Mat &Image, std::vector<cv::Rect> &objects)
}
std::string cascadeFrontalfilename = "../../data/lbpcascades/lbpcascade_frontalface.xml";
- cv::Ptr<cv::CascadeClassifier> cascade = new cv::CascadeClassifier(cascadeFrontalfilename);
- cv::Ptr<DetectionBasedTracker::IDetector> MainDetector = new CascadeDetectorAdapter(cascade);
+ cv::Ptr<cv::CascadeClassifier> cascade = makePtr<cv::CascadeClassifier>(cascadeFrontalfilename);
+ cv::Ptr<DetectionBasedTracker::IDetector> MainDetector = makePtr<CascadeDetectorAdapter>(cascade);
- cascade = new cv::CascadeClassifier(cascadeFrontalfilename);
- cv::Ptr<DetectionBasedTracker::IDetector> TrackingDetector = new CascadeDetectorAdapter(cascade);
+ cascade = makePtr<cv::CascadeClassifier>(cascadeFrontalfilename);
+ cv::Ptr<DetectionBasedTracker::IDetector> TrackingDetector = makePtr<CascadeDetectorAdapter>(cascade);
DetectionBasedTracker::Parameters params;
DetectionBasedTracker Detector(MainDetector, TrackingDetector, params);
{
cout << "< Evaluate descriptor matcher..." << endl;
vector<Point2f> curve;
- Ptr<GenericDescriptorMatcher> gdm = new VectorDescriptorMatcher( descriptorExtractor, descriptorMatcher );
+ Ptr<GenericDescriptorMatcher> gdm = makePtr<VectorDescriptorMatcher>( descriptorExtractor, descriptorMatcher );
evaluateGenericDescriptorMatcher( img1, img2, H12, keypoints1, keypoints2, 0, 0, curve, gdm );
Point2f firstPoint = *curve.begin();
int mactherFilterType = getMatcherFilterType( argv[4] );
bool eval = !isWarpPerspective ? false : (atoi(argv[6]) == 0 ? false : true);
cout << ">" << endl;
- if( detector.empty() || descriptorExtractor.empty() || descriptorMatcher.empty() )
+ if( !detector || !descriptorExtractor || !descriptorMatcher )
{
cout << "Can not create detector or descriptor exstractor or descriptor matcher of given types" << endl;
return -1;
CascadeDetectorAdapter(cv::Ptr<cv::CascadeClassifier> detector):
Detector(detector)
{
- CV_Assert(!detector.empty());
+ CV_Assert(detector);
}
void detect(const cv::Mat &Image, std::vector<cv::Rect> &objects)
}
std::string cascadeFrontalfilename=cascadefile;
- cv::Ptr<cv::CascadeClassifier> cascade = new cv::CascadeClassifier(cascadeFrontalfilename);
- cv::Ptr<DetectionBasedTracker::IDetector> MainDetector = new CascadeDetectorAdapter(cascade);
+ cv::Ptr<cv::CascadeClassifier> cascade = makePtr<cv::CascadeClassifier>(cascadeFrontalfilename);
+ cv::Ptr<DetectionBasedTracker::IDetector> MainDetector = makePtr<CascadeDetectorAdapter>(cascade);
- cascade = new cv::CascadeClassifier(cascadeFrontalfilename);
- cv::Ptr<DetectionBasedTracker::IDetector> TrackingDetector = new CascadeDetectorAdapter(cascade);
+ cascade = makePtr<cv::CascadeClassifier>(cascadeFrontalfilename);
+ cv::Ptr<DetectionBasedTracker::IDetector> TrackingDetector = makePtr<CascadeDetectorAdapter>(cascade);
DetectionBasedTracker::Parameters params;
DetectionBasedTracker fd(MainDetector, TrackingDetector, params);
{
defaultDetector = FeatureDetector::create( algName );
specificDetector = FeatureDetector::create( algName );
- if( defaultDetector.empty() )
+ if( !defaultDetector )
{
printf( "Algorithm can not be read\n" );
exit(-1);
defaultDescMatcher = GenericDescriptorMatcher::create( algName );
specificDescMatcher = GenericDescriptorMatcher::create( algName );
- if( defaultDescMatcher.empty() )
+ if( !defaultDescMatcher )
{
Ptr<DescriptorExtractor> extractor = DescriptorExtractor::create( algName );
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create( matcherName );
- defaultDescMatcher = new VectorDescriptorMatch( extractor, matcher );
- specificDescMatcher = new VectorDescriptorMatch( extractor, matcher );
+ defaultDescMatcher = makePtr<VectorDescriptorMatch>( extractor, matcher );
+ specificDescMatcher = makePtr<VectorDescriptorMatch>( extractor, matcher );
- if( extractor.empty() || matcher.empty() )
+ if( !extractor || !matcher )
{
printf("Algorithm can not be read\n");
exit(-1);
virtual void readAlgorithm( )
{
string classifierFile = data_path + "/features2d/calonder_classifier.rtc";
- defaultDescMatcher = new VectorDescriptorMatch( new CalonderDescriptorExtractor<float>( classifierFile ),
- new BFMatcher(NORM_L2) );
+ defaultDescMatcher = makePtr<VectorDescriptorMatch>(
+ makePtr<CalonderDescriptorExtractor<float> >( classifierFile ),
+ makePtr<BFMatcher>(int(NORM_L2)));
specificDescMatcher = defaultDescMatcher;
}
};
readAllDatasetsRunParams();
- OneWayDescriptorBase *base = new OneWayDescriptorBase(patchSize, poseCount, pcaFilename,
- trainPath, trainImagesList);
+ Ptr<OneWayDescriptorBase> base(
+ new OneWayDescriptorBase(patchSize, poseCount, pcaFilename,
+ trainPath, trainImagesList));
- OneWayDescriptorMatch *match = new OneWayDescriptorMatch ();
+ Ptr<OneWayDescriptorMatch> match = makePtr<OneWayDescriptorMatch>();
match->initialize( OneWayDescriptorMatch::Params (), base );
defaultDescMatcher = match;
writeAllDatasetsRunParams();
Ptr<BaseQualityEvaluator> evals[] =
{
- new DetectorQualityEvaluator( "FAST", "quality-detector-fast" ),
- new DetectorQualityEvaluator( "GFTT", "quality-detector-gftt" ),
- new DetectorQualityEvaluator( "HARRIS", "quality-detector-harris" ),
- new DetectorQualityEvaluator( "MSER", "quality-detector-mser" ),
- new DetectorQualityEvaluator( "STAR", "quality-detector-star" ),
- new DetectorQualityEvaluator( "SIFT", "quality-detector-sift" ),
- new DetectorQualityEvaluator( "SURF", "quality-detector-surf" ),
-
- new DescriptorQualityEvaluator( "SIFT", "quality-descriptor-sift", "BruteForce" ),
- new DescriptorQualityEvaluator( "SURF", "quality-descriptor-surf", "BruteForce" ),
- new DescriptorQualityEvaluator( "FERN", "quality-descriptor-fern"),
- new CalonderDescriptorQualityEvaluator()
+ makePtr<DetectorQualityEvaluator>( "FAST", "quality-detector-fast" ),
+ makePtr<DetectorQualityEvaluator>( "GFTT", "quality-detector-gftt" ),
+ makePtr<DetectorQualityEvaluator>( "HARRIS", "quality-detector-harris" ),
+ makePtr<DetectorQualityEvaluator>( "MSER", "quality-detector-mser" ),
+ makePtr<DetectorQualityEvaluator>( "STAR", "quality-detector-star" ),
+ makePtr<DetectorQualityEvaluator>( "SIFT", "quality-detector-sift" ),
+ makePtr<DetectorQualityEvaluator>( "SURF", "quality-detector-surf" ),
+
+ makePtr<DescriptorQualityEvaluator>( "SIFT", "quality-descriptor-sift", "BruteForce" ),
+ makePtr<DescriptorQualityEvaluator>( "SURF", "quality-descriptor-surf", "BruteForce" ),
+ makePtr<DescriptorQualityEvaluator>( "FERN", "quality-descriptor-fern"),
+ makePtr<CalonderDescriptorQualityEvaluator>()
};
for( size_t i = 0; i < sizeof(evals)/sizeof(evals[0]); i++ )
//generate test data
cout << "Extracting Test Data from images" << endl <<
endl;
- Ptr<FeatureDetector> detector =
+ Ptr<FeatureDetector> detector(
new DynamicAdaptedFeatureDetector(
- AdjusterAdapter::create("STAR"), 130, 150, 5);
- Ptr<DescriptorExtractor> extractor =
- new SurfDescriptorExtractor(1000, 4, 2, false, true);
+ AdjusterAdapter::create("STAR"), 130, 150, 5));
+ Ptr<DescriptorExtractor> extractor(
+ new SurfDescriptorExtractor(1000, 4, 2, false, true));
Ptr<DescriptorMatcher> matcher =
DescriptorMatcher::create("FlannBased");
endl;
Ptr<of2::FabMap> fabmap;
- fabmap = new of2::FabMap2(tree, 0.39, 0, of2::FabMap::SAMPLED |
- of2::FabMap::CHOW_LIU);
+ fabmap.reset(new of2::FabMap2(tree, 0.39, 0, of2::FabMap::SAMPLED |
+ of2::FabMap::CHOW_LIU));
fabmap->addTraining(trainData);
vector<of2::IMatch> matches;
std::string params_filename = std::string(argv[4]);
Ptr<GenericDescriptorMatcher> descriptorMatcher = GenericDescriptorMatcher::create(alg_name, params_filename);
- if( descriptorMatcher.empty() )
+ if( !descriptorMatcher )
{
printf ("Cannot create descriptor\n");
return 0;
help();
const char* imagename = argc > 1 ? argv[1] : "lena.jpg";
#if DEMO_MIXED_API_USE
- Ptr<IplImage> iplimg = cvLoadImage(imagename); // Ptr<T> is safe ref-conting pointer class
- if(iplimg.empty())
+ Ptr<IplImage> iplimg(cvLoadImage(imagename)); // Ptr<T> is safe ref-counting pointer class
+ if(!iplimg)
{
fprintf(stderr, "Can not load image %s\n", imagename);
return -1;
// Functions to store detector and templates in single XML/YAML file
static cv::Ptr<cv::linemod::Detector> readLinemod(const std::string& filename)
{
- cv::Ptr<cv::linemod::Detector> detector = new cv::linemod::Detector;
+ cv::Ptr<cv::linemod::Detector> detector = cv::makePtr<cv::linemod::Detector>();
cv::FileStorage fs(filename, cv::FileStorage::READ);
detector->read(fs.root());
descriptorMatcher = DescriptorMatcher::create( matcherType );
cout << ">" << endl;
- bool isCreated = !( featureDetector.empty() || descriptorExtractor.empty() || descriptorMatcher.empty() );
+ bool isCreated = featureDetector && descriptorExtractor && descriptorMatcher;
if( !isCreated )
cout << "Can not create feature detector or descriptor extractor or descriptor matcher of given types." << endl << ">" << endl;
{
#ifdef HAVE_OPENCV_NONFREE
if (try_gpu && cuda::getCudaEnabledDeviceCount() > 0)
- finder = new SurfFeaturesFinderGpu();
+ finder = makePtr<SurfFeaturesFinderGpu>();
else
#endif
- finder = new SurfFeaturesFinder();
+ finder = makePtr<SurfFeaturesFinder>();
}
else if (features_type == "orb")
{
- finder = new OrbFeaturesFinder();
+ finder = makePtr<OrbFeaturesFinder>();
}
else
{
}
Ptr<detail::BundleAdjusterBase> adjuster;
- if (ba_cost_func == "reproj") adjuster = new detail::BundleAdjusterReproj();
- else if (ba_cost_func == "ray") adjuster = new detail::BundleAdjusterRay();
+ if (ba_cost_func == "reproj") adjuster = makePtr<detail::BundleAdjusterReproj>();
+ else if (ba_cost_func == "ray") adjuster = makePtr<detail::BundleAdjusterRay>();
else
{
cout << "Unknown bundle adjustment cost function: '" << ba_cost_func << "'.\n";
#ifdef HAVE_OPENCV_CUDAWARPING
if (try_gpu && cuda::getCudaEnabledDeviceCount() > 0)
{
- if (warp_type == "plane") warper_creator = new cv::PlaneWarperGpu();
- else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarperGpu();
- else if (warp_type == "spherical") warper_creator = new cv::SphericalWarperGpu();
+ if (warp_type == "plane")
+ warper_creator = makePtr<cv::PlaneWarperGpu>();
+ else if (warp_type == "cylindrical")
+ warper_creator = makePtr<cv::CylindricalWarperGpu>();
+ else if (warp_type == "spherical")
+ warper_creator = makePtr<cv::SphericalWarperGpu>();
}
else
#endif
{
- if (warp_type == "plane") warper_creator = new cv::PlaneWarper();
- else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarper();
- else if (warp_type == "spherical") warper_creator = new cv::SphericalWarper();
- else if (warp_type == "fisheye") warper_creator = new cv::FisheyeWarper();
- else if (warp_type == "stereographic") warper_creator = new cv::StereographicWarper();
- else if (warp_type == "compressedPlaneA2B1") warper_creator = new cv::CompressedRectilinearWarper(2, 1);
- else if (warp_type == "compressedPlaneA1.5B1") warper_creator = new cv::CompressedRectilinearWarper(1.5, 1);
- else if (warp_type == "compressedPlanePortraitA2B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(2, 1);
- else if (warp_type == "compressedPlanePortraitA1.5B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(1.5, 1);
- else if (warp_type == "paniniA2B1") warper_creator = new cv::PaniniWarper(2, 1);
- else if (warp_type == "paniniA1.5B1") warper_creator = new cv::PaniniWarper(1.5, 1);
- else if (warp_type == "paniniPortraitA2B1") warper_creator = new cv::PaniniPortraitWarper(2, 1);
- else if (warp_type == "paniniPortraitA1.5B1") warper_creator = new cv::PaniniPortraitWarper(1.5, 1);
- else if (warp_type == "mercator") warper_creator = new cv::MercatorWarper();
- else if (warp_type == "transverseMercator") warper_creator = new cv::TransverseMercatorWarper();
+ if (warp_type == "plane")
+ warper_creator = makePtr<cv::PlaneWarper>();
+ else if (warp_type == "cylindrical")
+ warper_creator = makePtr<cv::CylindricalWarper>();
+ else if (warp_type == "spherical")
+ warper_creator = makePtr<cv::SphericalWarper>();
+ else if (warp_type == "fisheye")
+ warper_creator = makePtr<cv::FisheyeWarper>();
+ else if (warp_type == "stereographic")
+ warper_creator = makePtr<cv::StereographicWarper>();
+ else if (warp_type == "compressedPlaneA2B1")
+ warper_creator = makePtr<cv::CompressedRectilinearWarper>(2.0f, 1.0f);
+ else if (warp_type == "compressedPlaneA1.5B1")
+ warper_creator = makePtr<cv::CompressedRectilinearWarper>(1.5f, 1.0f);
+ else if (warp_type == "compressedPlanePortraitA2B1")
+ warper_creator = makePtr<cv::CompressedRectilinearPortraitWarper>(2.0f, 1.0f);
+ else if (warp_type == "compressedPlanePortraitA1.5B1")
+ warper_creator = makePtr<cv::CompressedRectilinearPortraitWarper>(1.5f, 1.0f);
+ else if (warp_type == "paniniA2B1")
+ warper_creator = makePtr<cv::PaniniWarper>(2.0f, 1.0f);
+ else if (warp_type == "paniniA1.5B1")
+ warper_creator = makePtr<cv::PaniniWarper>(1.5f, 1.0f);
+ else if (warp_type == "paniniPortraitA2B1")
+ warper_creator = makePtr<cv::PaniniPortraitWarper>(2.0f, 1.0f);
+ else if (warp_type == "paniniPortraitA1.5B1")
+ warper_creator = makePtr<cv::PaniniPortraitWarper>(1.5f, 1.0f);
+ else if (warp_type == "mercator")
+ warper_creator = makePtr<cv::MercatorWarper>();
+ else if (warp_type == "transverseMercator")
+ warper_creator = makePtr<cv::TransverseMercatorWarper>();
}
- if (warper_creator.empty())
+ if (!warper_creator)
{
cout << "Can't create the following warper '" << warp_type << "'\n";
return 1;
Ptr<SeamFinder> seam_finder;
if (seam_find_type == "no")
- seam_finder = new detail::NoSeamFinder();
+ seam_finder = makePtr<detail::NoSeamFinder>();
else if (seam_find_type == "voronoi")
- seam_finder = new detail::VoronoiSeamFinder();
+ seam_finder = makePtr<detail::VoronoiSeamFinder>();
else if (seam_find_type == "gc_color")
{
#ifdef HAVE_OPENCV_CUDA
if (try_gpu && cuda::getCudaEnabledDeviceCount() > 0)
- seam_finder = new detail::GraphCutSeamFinderGpu(GraphCutSeamFinderBase::COST_COLOR);
+ seam_finder = makePtr<detail::GraphCutSeamFinderGpu>(GraphCutSeamFinderBase::COST_COLOR);
else
#endif
- seam_finder = new detail::GraphCutSeamFinder(GraphCutSeamFinderBase::COST_COLOR);
+ seam_finder = makePtr<detail::GraphCutSeamFinder>(GraphCutSeamFinderBase::COST_COLOR);
}
else if (seam_find_type == "gc_colorgrad")
{
#ifdef HAVE_OPENCV_CUDA
if (try_gpu && cuda::getCudaEnabledDeviceCount() > 0)
- seam_finder = new detail::GraphCutSeamFinderGpu(GraphCutSeamFinderBase::COST_COLOR_GRAD);
+ seam_finder = makePtr<detail::GraphCutSeamFinderGpu>(GraphCutSeamFinderBase::COST_COLOR_GRAD);
else
#endif
- seam_finder = new detail::GraphCutSeamFinder(GraphCutSeamFinderBase::COST_COLOR_GRAD);
+ seam_finder = makePtr<detail::GraphCutSeamFinder>(GraphCutSeamFinderBase::COST_COLOR_GRAD);
}
else if (seam_find_type == "dp_color")
- seam_finder = new detail::DpSeamFinder(DpSeamFinder::COLOR);
+ seam_finder = makePtr<detail::DpSeamFinder>(DpSeamFinder::COLOR);
else if (seam_find_type == "dp_colorgrad")
- seam_finder = new detail::DpSeamFinder(DpSeamFinder::COLOR_GRAD);
- if (seam_finder.empty())
+ seam_finder = makePtr<detail::DpSeamFinder>(DpSeamFinder::COLOR_GRAD);
+ if (!seam_finder)
{
cout << "Can't create the following seam finder '" << seam_find_type << "'\n";
return 1;
resize(dilated_mask, seam_mask, mask_warped.size());
mask_warped = seam_mask & mask_warped;
- if (blender.empty())
+ if (!blender)
{
blender = Blender::createDefault(blend_type, try_gpu);
Size dst_sz = resultRoi(corners, sizes).size();
blender = Blender::createDefault(Blender::NO, try_gpu);
else if (blend_type == Blender::MULTI_BAND)
{
- MultiBandBlender* mb = dynamic_cast<MultiBandBlender*>(static_cast<Blender*>(blender));
+ MultiBandBlender* mb = dynamic_cast<MultiBandBlender*>(blender.get());
mb->setNumBands(static_cast<int>(ceil(log(blend_width)/log(2.)) - 1.));
LOGLN("Multi-band blender, number of bands: " << mb->numBands());
}
else if (blend_type == Blender::FEATHER)
{
- FeatherBlender* fb = dynamic_cast<FeatherBlender*>(static_cast<Blender*>(blender));
+ FeatherBlender* fb = dynamic_cast<FeatherBlender*>(blender.get());
fb->setSharpness(1.f/blend_width);
LOGLN("Feather blender, sharpness: " << fb->sharpness());
}
const char* imagename = argc > 1 ? argv[1] : "lena.jpg";
#ifdef DEMO_MIXED_API_USE
- Ptr<IplImage> IplI = cvLoadImage(imagename); // Ptr<T> is safe ref-counting pointer class
- if(IplI.empty())
+ Ptr<IplImage> IplI(cvLoadImage(imagename)); // Ptr<T> is a safe ref-counting pointer class
+ if(!IplI)
{
cerr << "Can not load image " << imagename << endl;
return -1;
Mat train_desc, query_desc;
const int DESIRED_FTRS = 500;
- GridAdaptedFeatureDetector detector(new FastFeatureDetector(10, true), DESIRED_FTRS, 4, 4);
+ GridAdaptedFeatureDetector detector(makePtr<FastFeatureDetector>(10, true), DESIRED_FTRS, 4, 4);
Mat H_prev = Mat::eye(3, 3, CV_32FC1);
for (;;)
virtual Ptr<ImageMotionEstimatorBase> build()
{
- MotionEstimatorRansacL2 *est = new MotionEstimatorRansacL2(motionModel(arg(prefix + "model")));
+ Ptr<MotionEstimatorRansacL2> est = makePtr<MotionEstimatorRansacL2>(motionModel(arg(prefix + "model")));
RansacParams ransac = est->ransacParams();
if (arg(prefix + "subset") != "auto")
est->setMinInlierRatio(argf(prefix + "min-inlier-ratio"));
- Ptr<IOutlierRejector> outlierRejector = new NullOutlierRejector();
+ Ptr<IOutlierRejector> outlierRejector = makePtr<NullOutlierRejector>();
if (arg(prefix + "local-outlier-rejection") == "yes")
{
- TranslationBasedLocalOutlierRejector *tblor = new TranslationBasedLocalOutlierRejector();
+ Ptr<TranslationBasedLocalOutlierRejector> tblor = makePtr<TranslationBasedLocalOutlierRejector>();
RansacParams ransacParams = tblor->ransacParams();
if (arg(prefix + "thresh") != "auto")
ransacParams.thresh = argf(prefix + "thresh");
#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDA) && defined(HAVE_OPENCV_CUDAOPTFLOW)
if (gpu)
{
- KeypointBasedMotionEstimatorGpu *kbest = new KeypointBasedMotionEstimatorGpu(est);
+ Ptr<KeypointBasedMotionEstimatorGpu> kbest = makePtr<KeypointBasedMotionEstimatorGpu>(est);
kbest->setOutlierRejector(outlierRejector);
return kbest;
}
#endif
- KeypointBasedMotionEstimator *kbest = new KeypointBasedMotionEstimator(est);
- kbest->setDetector(new GoodFeaturesToTrackDetector(argi(prefix + "nkps")));
+ Ptr<KeypointBasedMotionEstimator> kbest = makePtr<KeypointBasedMotionEstimator>(est);
+ kbest->setDetector(makePtr<GoodFeaturesToTrackDetector>(argi(prefix + "nkps")));
kbest->setOutlierRejector(outlierRejector);
return kbest;
}
virtual Ptr<ImageMotionEstimatorBase> build()
{
- MotionEstimatorL1 *est = new MotionEstimatorL1(motionModel(arg(prefix + "model")));
+ Ptr<MotionEstimatorL1> est = makePtr<MotionEstimatorL1>(motionModel(arg(prefix + "model")));
- Ptr<IOutlierRejector> outlierRejector = new NullOutlierRejector();
+ Ptr<IOutlierRejector> outlierRejector = makePtr<NullOutlierRejector>();
if (arg(prefix + "local-outlier-rejection") == "yes")
{
- TranslationBasedLocalOutlierRejector *tblor = new TranslationBasedLocalOutlierRejector();
+ Ptr<TranslationBasedLocalOutlierRejector> tblor = makePtr<TranslationBasedLocalOutlierRejector>();
RansacParams ransacParams = tblor->ransacParams();
if (arg(prefix + "thresh") != "auto")
ransacParams.thresh = argf(prefix + "thresh");
#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDA) && defined(HAVE_OPENCV_CUDAOPTFLOW)
if (gpu)
{
- KeypointBasedMotionEstimatorGpu *kbest = new KeypointBasedMotionEstimatorGpu(est);
+ Ptr<KeypointBasedMotionEstimatorGpu> kbest = makePtr<KeypointBasedMotionEstimatorGpu>(est);
kbest->setOutlierRejector(outlierRejector);
return kbest;
}
#endif
- KeypointBasedMotionEstimator *kbest = new KeypointBasedMotionEstimator(est);
- kbest->setDetector(new GoodFeaturesToTrackDetector(argi(prefix + "nkps")));
+ Ptr<KeypointBasedMotionEstimator> kbest = makePtr<KeypointBasedMotionEstimator>(est);
+ kbest->setDetector(makePtr<GoodFeaturesToTrackDetector>(argi(prefix + "nkps")));
kbest->setOutlierRejector(outlierRejector);
return kbest;
}
// get source video parameters
- VideoFileSource *source = new VideoFileSource(inputPath);
+ Ptr<VideoFileSource> source = makePtr<VideoFileSource>(inputPath);
cout << "frame count (rough): " << source->count() << endl;
if (arg("fps") == "auto")
outputFps = source->fps();
Ptr<IMotionEstimatorBuilder> motionEstBuilder;
if (arg("lin-prog-motion-est") == "yes")
- motionEstBuilder = new MotionEstimatorL1Builder(cmd, arg("gpu") == "yes");
+ motionEstBuilder.reset(new MotionEstimatorL1Builder(cmd, arg("gpu") == "yes"));
else
- motionEstBuilder = new MotionEstimatorRansacL2Builder(cmd, arg("gpu") == "yes");
+ motionEstBuilder.reset(new MotionEstimatorRansacL2Builder(cmd, arg("gpu") == "yes"));
Ptr<IMotionEstimatorBuilder> wsMotionEstBuilder;
if (arg("ws-lp") == "yes")
- wsMotionEstBuilder = new MotionEstimatorL1Builder(cmd, arg("gpu") == "yes", "ws-");
+ wsMotionEstBuilder.reset(new MotionEstimatorL1Builder(cmd, arg("gpu") == "yes", "ws-"));
else
- wsMotionEstBuilder = new MotionEstimatorRansacL2Builder(cmd, arg("gpu") == "yes", "ws-");
+ wsMotionEstBuilder.reset(new MotionEstimatorRansacL2Builder(cmd, arg("gpu") == "yes", "ws-"));
// determine whether we must use one pass or two pass stabilizer
bool isTwoPass =
if (arg("lin-prog-stab") == "yes")
{
- LpMotionStabilizer *stab = new LpMotionStabilizer();
+ Ptr<LpMotionStabilizer> stab = makePtr<LpMotionStabilizer>();
stab->setFrameSize(Size(source->width(), source->height()));
stab->setTrimRatio(arg("lps-trim-ratio") == "auto" ? argf("trim-ratio") : argf("lps-trim-ratio"));
stab->setWeight1(argf("lps-w1"));
twoPassStabilizer->setMotionStabilizer(stab);
}
else if (arg("stdev") == "auto")
- twoPassStabilizer->setMotionStabilizer(new GaussianMotionFilter(argi("radius")));
+ twoPassStabilizer->setMotionStabilizer(makePtr<GaussianMotionFilter>(argi("radius")));
else
- twoPassStabilizer->setMotionStabilizer(new GaussianMotionFilter(argi("radius"), argf("stdev")));
+ twoPassStabilizer->setMotionStabilizer(makePtr<GaussianMotionFilter>(argi("radius"), argf("stdev")));
// init wobble suppressor if necessary
if (arg("wobble-suppress") == "yes")
{
- MoreAccurateMotionWobbleSuppressorBase *ws = new MoreAccurateMotionWobbleSuppressor();
+ Ptr<MoreAccurateMotionWobbleSuppressorBase> ws = makePtr<MoreAccurateMotionWobbleSuppressor>();
if (arg("gpu") == "yes")
#ifdef HAVE_OPENCV_CUDA
- ws = new MoreAccurateMotionWobbleSuppressorGpu();
+ ws = makePtr<MoreAccurateMotionWobbleSuppressorGpu>();
#else
throw runtime_error("OpenCV is built without CUDA support");
#endif
MotionModel model = ws->motionEstimator()->motionModel();
if (arg("load-motions2") != "no")
{
- ws->setMotionEstimator(new FromFileMotionReader(arg("load-motions2")));
+ ws->setMotionEstimator(makePtr<FromFileMotionReader>(arg("load-motions2")));
ws->motionEstimator()->setMotionModel(model);
}
if (arg("save-motions2") != "no")
{
- ws->setMotionEstimator(new ToFileMotionWriter(arg("save-motions2"), ws->motionEstimator()));
+ ws->setMotionEstimator(makePtr<ToFileMotionWriter>(arg("save-motions2"), ws->motionEstimator()));
ws->motionEstimator()->setMotionModel(model);
}
}
OnePassStabilizer *onePassStabilizer = new OnePassStabilizer();
stabilizer = onePassStabilizer;
if (arg("stdev") == "auto")
- onePassStabilizer->setMotionFilter(new GaussianMotionFilter(argi("radius")));
+ onePassStabilizer->setMotionFilter(makePtr<GaussianMotionFilter>(argi("radius")));
else
- onePassStabilizer->setMotionFilter(new GaussianMotionFilter(argi("radius"), argf("stdev")));
+ onePassStabilizer->setMotionFilter(makePtr<GaussianMotionFilter>(argi("radius"), argf("stdev")));
}
stabilizer->setFrameSource(source);
stabilizer->setMotionEstimator(motionEstBuilder->build());
// cast stabilizer to simple frame source interface to read stabilized frames
- stabilizedFrames = dynamic_cast<IFrameSource*>(stabilizer);
+ stabilizedFrames.reset(dynamic_cast<IFrameSource*>(stabilizer));
MotionModel model = stabilizer->motionEstimator()->motionModel();
if (arg("load-motions") != "no")
{
- stabilizer->setMotionEstimator(new FromFileMotionReader(arg("load-motions")));
+ stabilizer->setMotionEstimator(makePtr<FromFileMotionReader>(arg("load-motions")));
stabilizer->motionEstimator()->setMotionModel(model);
}
if (arg("save-motions") != "no")
{
- stabilizer->setMotionEstimator(new ToFileMotionWriter(arg("save-motions"), stabilizer->motionEstimator()));
+ stabilizer->setMotionEstimator(makePtr<ToFileMotionWriter>(arg("save-motions"), stabilizer->motionEstimator()));
stabilizer->motionEstimator()->setMotionModel(model);
}
// init deblurer
if (arg("deblur") == "yes")
{
- WeightingDeblurer *deblurer = new WeightingDeblurer();
+ Ptr<WeightingDeblurer> deblurer = makePtr<WeightingDeblurer>();
deblurer->setRadius(argi("radius"));
deblurer->setSensitivity(argf("deblur-sens"));
stabilizer->setDeblurer(deblurer);
Ptr<InpainterBase> inpainters_(inpainters);
if (arg("mosaic") == "yes")
{
- ConsistentMosaicInpainter *inp = new ConsistentMosaicInpainter();
+ Ptr<ConsistentMosaicInpainter> inp = makePtr<ConsistentMosaicInpainter>();
inp->setStdevThresh(argf("mosaic-stdev"));
inpainters->pushBack(inp);
}
if (arg("motion-inpaint") == "yes")
{
- MotionInpainter *inp = new MotionInpainter();
+ Ptr<MotionInpainter> inp = makePtr<MotionInpainter>();
inp->setDistThreshold(argf("mi-dist-thresh"));
inpainters->pushBack(inp);
}
if (arg("color-inpaint") == "average")
- inpainters->pushBack(new ColorAverageInpainter());
+ inpainters->pushBack(makePtr<ColorAverageInpainter>());
else if (arg("color-inpaint") == "ns")
- inpainters->pushBack(new ColorInpainter(INPAINT_NS, argd("ci-radius")));
+ inpainters->pushBack(makePtr<ColorInpainter>(int(INPAINT_NS), argd("ci-radius")));
else if (arg("color-inpaint") == "telea")
- inpainters->pushBack(new ColorInpainter(INPAINT_TELEA, argd("ci-radius")));
+ inpainters->pushBack(makePtr<ColorInpainter>(int(INPAINT_TELEA), argd("ci-radius")));
else if (arg("color-inpaint") != "no")
throw runtime_error("unknown color inpainting method: " + arg("color-inpaint"));
if (!inpainters->empty())
namespace cv
{
- template<> void Ptr<CvBGStatModel>::delete_obj()
+ template<> void DefaultDeleter<CvBGStatModel>::operator ()(CvBGStatModel* obj) const
{
cvReleaseBGStatModel(&obj);
}
{
cuda::setDevice(0);
d_algs[0] = cuda::createStereoBM(256);
- streams[0] = new Stream;
+ streams[0] = makePtr<Stream>();
cuda::setDevice(1);
d_algs[1] = cuda::createStereoBM(256);
- streams[1] = new Stream;
+ streams[1] = makePtr<Stream>();
}
StereoMultiGpuStream::~StereoMultiGpuStream()
{
cerr << "Incorrect Optical Flow algorithm - " << name << endl;
}
- return 0;
+ return Ptr<DenseOpticalFlowExt>();
}
#if defined(HAVE_OPENCV_OCL)
static Ptr<DenseOpticalFlowExt> createOptFlow(const string& name)
else if (name == "brox")
{
std::cout<<"brox has not been implemented!\n";
- return NULL;
+ return Ptr<DenseOpticalFlowExt>();
}
else if (name == "pyrlk")
return createOptFlow_PyrLK_OCL();
{
cerr << "Incorrect Optical Flow algorithm - " << name << endl;
}
- return 0;
+ return Ptr<DenseOpticalFlowExt>();
}
#endif
int main(int argc, const char* argv[])
frameSource.release();
}
}
- if (frameSource.empty())
+ if (!frameSource)
frameSource = createFrameSource_Video(inputVideoName);
// skip first frame, it is usually corrupted