From: Maria Dimashova Date: Thu, 5 Aug 2010 12:19:26 +0000 (+0000) Subject: replaced detector/descriptor evaluation functions from test to features2d; modified... X-Git-Tag: accepted/2.0/20130307.220821~4615 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=121e51d35bb5e261dceae1667c07935734f02931;p=profile%2Fivi%2Fopencv.git replaced detector/descriptor evaluation functions from test to features2d; modified VectorDescriptorMatch constructor; removed commented calonder descriptor implementation --- diff --git a/modules/features2d/include/opencv2/features2d/features2d.hpp b/modules/features2d/include/opencv2/features2d/features2d.hpp index 1716d58..88aa105 100644 --- a/modules/features2d/include/opencv2/features2d/features2d.hpp +++ b/modules/features2d/include/opencv2/features2d/features2d.hpp @@ -811,127 +811,6 @@ private: bool keep_floats_; }; -#if 0 -class CV_EXPORTS CalonderClassifier -{ -public: - CalonderClassifier(); - CalonderClassifier( const vector >& points, const vector& refimgs, - const vector >& labels=vector >(), int _numClasses=0, - int _pathSize=DEFAULT_PATCH_SIZE, - int _numTrees=DEFAULT_NUM_TREES, - int _treeDepth=DEFAULT_TREE_DEPTH, - int _numViews=DEFAULT_NUM_VIEWS, - int _compressedDim=DEFAULT_COMPRESSED_DIM, - int _compressType=DEFAULT_COMPRESS_TYPE, - int _numQuantBits=DEFAULT_NUM_QUANT_BITS, - const PatchGenerator& patchGenerator=PatchGenerator() ); - - virtual ~CalonderClassifier(); - virtual void clear(); - - void train( const vector >& points, const vector& refimgs, - const vector >& labels=vector >(), int _nclasses=0, - int _pathSize=DEFAULT_PATCH_SIZE, - int _numTrees=DEFAULT_NUM_TREES, - int _treeDepth=DEFAULT_TREE_DEPTH, - int _numViews=DEFAULT_NUM_VIEWS, - int _compressedDim=DEFAULT_COMPRESSED_DIM, - int _compressType=DEFAULT_COMPRESS_TYPE, - int _numQuantBits=DEFAULT_NUM_QUANT_BITS, - const PatchGenerator& patchGenerator=PatchGenerator() ); - - virtual void operator()(const Mat& img, Point2f pt, vector& signature, float thresh=0.f) const; - virtual void operator()(const Mat& patch, vector& signature, float thresh=-1.f) const; -#define QUANTIZATION_AVAILABLE 1 -#if QUANTIZATION_AVAILABLE - void quantizePosteriors( int _numQuantBits, bool isClearFloatPosteriors=false ); - void clearFloatPosteriors(); - virtual void operator()(const Mat& img, Point2f pt, vector& signature, uchar thresh=-1.f) const; - virtual void operator()(const Mat& patch, vector& signature, uchar thresh=-1.f) const; -#endif - - void read( const FileNode& fn ); - void read( std::istream& is ); - void write( FileStorage& fs ) const; - - bool empty() const; - - void setVerbose( bool _verbose ); - - int getPatchSize() const; - int getNumTrees() const; - int getTreeDepth() const; - int getNumViews() const; - int getSignatureSize() const; - int getCompressType() const; - int getNumQuantBits() const; - int getOrigNumClasses() const; - - - enum - { - COMPRESS_NONE = -1, - COMPRESS_DISTR_GAUSS = 0, - COMPRESS_DISTR_BERNOULLI = 1, - COMPRESS_DISTR_DBFRIENDLY = 2, - }; - - static float GET_LOWER_QUANT_PERC() { return .03f; } - static float GET_UPPER_QUANT_PERC() { return .92f; } - - enum - { - MAX_NUM_QUANT_BITS = 8, - DEFAULT_PATCH_SIZE = 32, - DEFAULT_NUM_TREES = 48, - DEFAULT_TREE_DEPTH = 9, - DEFAULT_NUM_VIEWS = 500, - DEFAULT_COMPRESSED_DIM = 176, - DEFAULT_COMPRESS_TYPE = COMPRESS_DISTR_BERNOULLI, - DEFAULT_NUM_QUANT_BITS = -1, - }; -private: - void prepare( int _patchSize, int _signatureSize, int _numTrees, int _treeDepth, int _numViews ); - - int getLeafIdx( int treeIdx, const Mat& patch ) const; - void finalize( int _compressedDim, int _compressType, int _numQuantBits, - const vector& leafSampleCounters); - - void compressLeaves( int _compressedDim, int _compressType ); - - bool verbose; - - int patchSize; - int signatureSize; - int numTrees; - int treeDepth; - int numViews; - - int origNumClasses; - int compressType; - int numQuantBits; - - int numLeavesPerTree; - int numNodesPerTree; - - struct Node - { - uchar x1, y1, x2, y2; - Node() : x1(0), y1(0), x2(0), y2(0) {} - Node( uchar _x1, uchar _y1, uchar _x2, uchar _y2 ) : x1(_x1), y1(_y1), x2(_x2), y2(_y2) - {} - int operator() (const Mat_& patch) const - { return patch(y1,x1) > patch(y2, x2) ? 1 : 0; } - }; - vector nodes; - vector posteriors; -#if QUANTIZATION_AVAILABLE - vector quantizedPosteriors; -#endif -}; -#endif - /****************************************************************************************\ * One-Way Descriptor * \****************************************************************************************/ @@ -1515,6 +1394,10 @@ protected: virtual void detectImpl( const Mat& image, const Mat& mask, vector& keypoints ) const; }; + +CV_EXPORTS Mat windowedMatchingMask( const vector& keypoints1, const vector& keypoints2, + float maxDeltaX, float maxDeltaY ); + /****************************************************************************************\ * DescriptorExtractor * \****************************************************************************************/ @@ -2274,8 +2157,8 @@ class CV_EXPORTS VectorDescriptorMatch : public GenericDescriptorMatch public: using GenericDescriptorMatch::add; - VectorDescriptorMatch( DescriptorExtractor *_extractor = 0, DescriptorMatcher * _matcher = 0 ) : - extractor( _extractor ), matcher( _matcher ) {} + VectorDescriptorMatch( const Ptr& _extractor, const Ptr& _matcher ) + : extractor( _extractor ), matcher( _matcher ) {} ~VectorDescriptorMatch() {} @@ -2303,10 +2186,9 @@ protected: //vector classIds; }; -CV_EXPORTS Mat windowedMatchingMask( const vector& keypoints1, const vector& keypoints2, - float maxDeltaX, float maxDeltaY ); - - +/****************************************************************************************\ +* Drawing functions * +\****************************************************************************************/ struct CV_EXPORTS DrawMatchesFlags { enum{ DEFAULT = 0, // Output image matrix will be created (Mat::create), @@ -2318,7 +2200,7 @@ struct CV_EXPORTS DrawMatchesFlags // Matches will be drawn on existing content of output image. NOT_DRAW_SINGLE_POINTS = 2, // Single keypoints will not be drawn. DRAW_RICH_KEYPOINTS = 4 // For each keypoint the circle around keypoint with keypoint size and - // orientation will be drawn. + // orientation will be drawn. }; }; @@ -2345,7 +2227,28 @@ CV_EXPORTS void drawMatches( const Mat& img1, const vector& keypoints1 const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), const vector >& matchesMask=vector >(), int flags=DrawMatchesFlags::DEFAULT ); -} +/****************************************************************************************\ +* Evaluation functions * +\****************************************************************************************/ + +CV_EXPORTS void evaluateFeatureDetector( const Mat& img1, const Mat& img2, const Mat& H1to2, + vector* keypoints1, vector* keypoints2, + float& repeatability, int& correspCount, + const Ptr& fdetector=Ptr() ); + +CV_EXPORTS void computeRecallPrecisionCurve( const vector >& matches1to2, + const vector >& correctMatches1to2Mask, + vector& recallPrecisionCurve ); +CV_EXPORTS float getRecall( const vector& recallPrecisionCurve, float l_precision ); + +CV_EXPORTS void evaluateDescriptorMatch( const Mat& img1, const Mat& img2, const Mat& H1to2, + vector& keypoints1, vector& keypoints2, + vector >* matches1to2, vector >* correctMatches1to2Mask, + vector& recallPrecisionCurve, + const Ptr& dmatch=Ptr() ); + + +} /* namespace cv */ #endif /* __cplusplus */ diff --git a/modules/features2d/src/calonder.cpp b/modules/features2d/src/calonder.cpp index 6958016..fa409a9 100644 --- a/modules/features2d/src/calonder.cpp +++ b/modules/features2d/src/calonder.cpp @@ -991,596 +991,4 @@ void RTreeClassifier::discardFloatPosteriors() printf("[OK] RTC: discarded float posteriors of all trees\n"); } -#if 0 -const int progressBarSize = 50; - -CalonderClassifier::CalonderClassifier() -{ - verbose = false; - clear(); -} - -CalonderClassifier::~CalonderClassifier() -{} - -CalonderClassifier::CalonderClassifier( const vector >& points, const vector& refimgs, - const vector >& labels, int _numClasses, - int _pathSize, int _numTrees, int _treeDepth, - int _numViews, int _compressedDim, int _compressType, int _numQuantBits, - const PatchGenerator &patchGenerator ) -{ - verbose = false; - train( points, refimgs, labels, _numClasses, _pathSize, _numTrees, _treeDepth, _numViews, - _compressedDim, _compressType, _numQuantBits, patchGenerator ); -} - -int CalonderClassifier::getPatchSize() const -{ return patchSize; } - -int CalonderClassifier::getNumTrees() const -{ return numTrees; } - -int CalonderClassifier::getTreeDepth() const -{ return treeDepth; } - -int CalonderClassifier::getNumViews() const -{ return numViews; } - -int CalonderClassifier::getSignatureSize() const -{ return signatureSize; } - -int CalonderClassifier::getCompressType() const -{ return compressType; } - -int CalonderClassifier::getNumQuantBits() const -{ return numQuantBits; } - -int CalonderClassifier::getOrigNumClasses() const -{ return origNumClasses; } - -void CalonderClassifier::setVerbose( bool _verbose ) -{ - verbose = _verbose; -} - -void CalonderClassifier::clear() -{ - patchSize = numTrees = origNumClasses = signatureSize = treeDepth = numViews = numQuantBits = 0; - compressType = COMPRESS_NONE; - - nodes.clear(); - posteriors.clear(); -#if QUANTIZATION_AVAILABLE - quantizedPosteriors.clear(); -#endif -} - -bool CalonderClassifier::empty() const -{ - return posteriors.empty() && quantizedPosteriors.empty(); -} - -void CalonderClassifier::prepare( int _patchSize, int _signatureSize, int _numTrees, int _treeDepth, int _numViews ) -{ - clear(); - - patchSize = _patchSize; - signatureSize = _signatureSize; - numTrees = _numTrees; - treeDepth = _treeDepth; - numViews = _numViews; - - numLeavesPerTree = 1 << treeDepth; // 2^d - numNodesPerTree = numLeavesPerTree - 1; // 2^d - 1 - - nodes = vector( numTrees*numNodesPerTree ); - posteriors = vector( numTrees*numLeavesPerTree*signatureSize, 0.f ); -} - -static int calcNumPoints( const vector >& points ) -{ - int count = 0; - for( size_t i = 0; i < points.size(); i++ ) - count += points[i].size(); - return count; -} - -void CalonderClassifier::train( const vector >& points, const vector& refimgs, - const vector >& labels, int _numClasses, - int _patchSize, int _numTrees, int _treeDepth, int _numViews, - int _compressedDim, int _compressType, int _numQuantBits, - const PatchGenerator &patchGenerator ) -{ - if( points.empty() || refimgs.size() != points.size() ) - CV_Error( CV_StsBadSize, "points vector must be no empty and refimgs must have the same size as points" ); - if( _patchSize < 5 || _patchSize >= 256 ) - CV_Error( CV_StsBadArg, "patchSize must be in [5, 255]"); - if( _numTrees <= 0 || _treeDepth <= 0 ) - CV_Error( CV_StsBadArg, "numTrees, treeDepth, numViews must be positive"); - int numPoints = calcNumPoints( points ); - if( !labels.empty() && ( labels.size() != points.size() || _numClasses <=0 || _numClasses > numPoints ) ) - CV_Error( CV_StsBadArg, "labels has incorrect size or _numClasses is not in [1, numPoints]"); - _numViews = std::max( 1, _numViews ); - - int _origNumClasses = labels.empty() ? numPoints : _numClasses; - - if( verbose ) - { - cout << "Using train parameters:" << endl; - cout << " patchSize=" << _patchSize << endl; - cout << " numTrees=" << _numTrees << endl; - cout << " treeDepth=" << _treeDepth << endl; - cout << " numViews=" << _numViews << endl; - cout << " compressedDim=" << _compressedDim << endl; - cout << " compressType=" << _compressType << endl; - cout << " numQuantBits=" << _numQuantBits << endl; - cout << endl - << " numPoints=" << numPoints << endl; - cout << " origNumClasses=" << _origNumClasses << endl; - } - - prepare( _patchSize, _origNumClasses, _numTrees, _treeDepth, _numViews ); - - origNumClasses = _origNumClasses; - vector leafSampleCounters = vector( numTrees*numLeavesPerTree, 0 ); - // generate nodes - RNG rng = theRNG(); - for( int i = 0; i < numTrees*numNodesPerTree; i++ ) - { - uchar x1 = rng(_patchSize); - uchar y1 = rng(_patchSize); - uchar x2 = rng(_patchSize); - uchar y2 = rng(_patchSize); - nodes[i] = Node(x1, y1, x2, y2); - } - - Size size( patchSize, patchSize ); - Mat patch; - if( verbose ) cout << "START training..." << endl; - for( size_t treeIdx = 0; treeIdx < (size_t)numTrees; treeIdx++ ) - { - if( verbose ) cout << "< tree " << treeIdx << endl; - int globalPointIdx = 0; - int* treeLeafSampleCounters = &leafSampleCounters[treeIdx*numLeavesPerTree]; - float* treePosteriors = &posteriors[treeIdx*numLeavesPerTree*signatureSize]; - for( size_t imgIdx = 0; imgIdx < points.size(); imgIdx++ ) - { - const Point2f* imgPoints = &points[imgIdx][0]; - const int* imgLabels = labels.empty() ? 0 : &labels[imgIdx][0]; - int last = -1, cur; - for( size_t pointIdx = 0; pointIdx < points[imgIdx].size(); pointIdx++, globalPointIdx++ ) - { - int classID = imgLabels==0 ? globalPointIdx : imgLabels[pointIdx]; - Point2f pt = imgPoints[pointIdx]; - const Mat& src = refimgs[imgIdx]; - - if( verbose && (cur = (int)((float)globalPointIdx/numPoints*progressBarSize)) != last ) - { - last = cur; - cout << "."; - cout.flush(); - } - - CV_Assert( classID >= 0 && classID < signatureSize ); - for( int v = 0; v < numViews; v++ ) - { - patchGenerator( src, pt, patch, size, rng ); - // add sample - int leafIdx = getLeafIdx( treeIdx, patch ); - treeLeafSampleCounters[leafIdx]++; - treePosteriors[leafIdx*signatureSize + classID]++; - } - } - } - - if( verbose ) cout << endl << ">" << endl; - } - - _compressedDim = std::max( 0, std::min(signatureSize, _compressedDim) ); - _numQuantBits = std::max( 0, std::min((int)MAX_NUM_QUANT_BITS, _numQuantBits) ); - finalize( _compressedDim, _compressType, _numQuantBits, leafSampleCounters ); - - if( verbose ) cout << "END training." << endl; -} - -int CalonderClassifier::getLeafIdx( int treeIdx, const Mat& patch ) const -{ - const Node* treeNodes = &nodes[treeIdx*numNodesPerTree]; - int idx = 0; - for( int d = 0; d < treeDepth-1; d++ ) - { - int offset = treeNodes[idx](patch); - idx = 2*idx + 1 + offset; - } - return idx; -} - -void CalonderClassifier::finalize( int _compressedDim, int _compressType, int _numQuantBits, - const vector& leafSampleCounters ) -{ - for( int ti = 0; ti < numTrees; ti++ ) - { - const int* treeLeafSampleCounters = &leafSampleCounters[ti*numLeavesPerTree]; - float* treePosteriors = &posteriors[ti*numLeavesPerTree*signatureSize]; - // Normalize by number of patches to reach each leaf - for( int li = 0; li < numLeavesPerTree; li++ ) - { - int sampleCount = treeLeafSampleCounters[li]; - if( sampleCount != 0 ) - { - float normalizer = 1.0f / sampleCount; - int leafPosteriorIdx = li*signatureSize; - for( int ci = 0; ci < signatureSize; ci++ ) - treePosteriors[leafPosteriorIdx + ci] *= normalizer; - } - } - } - - // apply compressive sensing - if( _compressedDim > 0 && _compressedDim < signatureSize ) - compressLeaves( _compressedDim, _compressType ); - else - { - if( verbose ) - cout << endl << "[WARNING] NO compression to leaves applied, because _compressedDim=" << _compressedDim << endl; - } - - // convert float-posteriors to uchar-posteriors (quantization step) -#if QUANTIZATION_AVAILABLE - if( _numQuantBits > 0 ) - quantizePosteriors( _numQuantBits ); - else - { - if( verbose ) - cout << endl << "[WARNING] NO quantization to posteriors, because _numQuantBits=" << _numQuantBits << endl; - } -#endif -} - -Mat createCompressionMatrix( int rows, int cols, int distrType ) -{ - Mat mtr( rows, cols, CV_32FC1 ); - assert( rows <= cols ); - - RNG rng(23); - - if( distrType == CalonderClassifier::COMPRESS_DISTR_GAUSS ) - { - float sigma = 1./rows; - for( int y = 0; y < rows; y++ ) - for( int x = 0; x < cols; x++ ) - mtr.at(y,x) = rng.gaussian( sigma ); - } - else if( distrType == CalonderClassifier::COMPRESS_DISTR_BERNOULLI ) - { - float par = (float)(1./sqrt((float)rows)); - for( int y = 0; y < rows; y++ ) - for( int x = 0; x < cols; x++ ) - mtr.at(y,x) = rng(2)==0 ? par : -par; - } - else if( distrType == CalonderClassifier::COMPRESS_DISTR_DBFRIENDLY ) - { - float par = (float)sqrt(3./rows); - for( int y = 0; y < rows; y++ ) - for( int x = 0; x < cols; x++ ) - { - int rng6 = rng(6); - mtr.at(y,x) = rng6==0 ? par : (rng6==1 ? -par : 0.f); - } - } - else - CV_Assert( 0 ); - - return mtr; -} - -void CalonderClassifier::compressLeaves( int _compressedDim, int _compressType ) -{ - if( verbose ) - cout << endl << "[OK] compressing leaves with matrix " << _compressedDim << " x " << signatureSize << endl; - - Mat compressionMtrT = (createCompressionMatrix( _compressedDim, signatureSize, _compressType )).t(); - - vector comprPosteriors( numTrees*numLeavesPerTree*_compressedDim, 0); - Mat( numTrees*numLeavesPerTree, _compressedDim, CV_32FC1, &comprPosteriors[0] ) = - Mat( numTrees*numLeavesPerTree, signatureSize, CV_32FC1, &posteriors[0]) * compressionMtrT; - - posteriors.resize( comprPosteriors.size() ); - copy( comprPosteriors.begin(), comprPosteriors.end(), posteriors.begin() ); - - signatureSize = _compressedDim; - compressType = _compressType; -} - -#if QUANTIZATION_AVAILABLE -static float percentile( const float* data, int n, float p ) -{ - assert( n>0 ); - assert( p>=0 && p<=1 ); - - vector vec( data, data+n ); - std::sort(vec.begin(), vec.end()); - int ix = (int)(p*(n-1)); - return vec[ix]; -} - -void quantizeVector( const float* src, int dim, float fbounds[2], uchar ubounds[2], uchar* dst ) -{ - assert( fbounds[0] < fbounds[1] ); - assert( ubounds[0] < ubounds[1] ); - - float normFactor = 1.f/(fbounds[1] - fbounds[0]); - for( int i = 0; i < dim; i++ ) - { - float part = (src[i] - fbounds[0]) * normFactor; - assert( 0 <= part && part <= 1 ) ; - uchar val = ubounds[0] + (uchar)( part*ubounds[1] ); - dst[i] = std::max( 0, (int)std::min(ubounds[1], val) ); - } -} - -void CalonderClassifier::quantizePosteriors( int _numQuantBits, bool isClearFloatPosteriors ) -{ - uchar ubounds[] = { 0, (uchar)((1<<_numQuantBits)-1) }; - float fbounds[] = { 0.f, 0.f }; - - int totalLeavesCount = numTrees*numLeavesPerTree; - for( int li = 0; li < totalLeavesCount; li++ ) // TODO for some random choosen leaves ! - { - fbounds[0] += percentile( &posteriors[li*signatureSize], signatureSize, GET_LOWER_QUANT_PERC() ); - fbounds[1] += percentile( &posteriors[li*signatureSize], signatureSize, GET_UPPER_QUANT_PERC() ); - } - fbounds[0] /= totalLeavesCount; - fbounds[1] /= totalLeavesCount; - - quantizedPosteriors.resize( posteriors.size() ); - quantizeVector( &posteriors[0], posteriors.size(), fbounds, ubounds, &quantizedPosteriors[0] ); - - if( isClearFloatPosteriors ) - clearFloatPosteriors(); -} - -void CalonderClassifier::clearFloatPosteriors() -{ - quantizedPosteriors.clear(); -} - -#endif - -void CalonderClassifier::operator()( const Mat& img, Point2f pt, vector& signature, float thresh ) const -{ - if( img.empty() || img.type() != CV_8UC1 ) - return; - - Mat patch; - getRectSubPix(img, Size(patchSize,patchSize), pt, patch, img.type()); - (*this)( patch, signature, thresh ); -} - -void CalonderClassifier::operator()( const Mat& patch, vector& signature, float thresh ) const -{ - if( posteriors.empty() || patch.empty() || patch.type() != CV_8UC1 || patch.cols < patchSize || patch.rows < patchSize ) - return; - - int treePostSize = numLeavesPerTree*signatureSize; - - signature.resize( signatureSize, 0.f ); - float* sig = &signature[0]; - for( int ti = 0; ti < numTrees; ti++ ) - { - int leafIdx = getLeafIdx( ti, patch ); - const float* post = &posteriors[ti*treePostSize + leafIdx*signatureSize]; - for( int ci = 0; ci < signatureSize; ci++ ) - sig[ci] += post[ci]; - } - float coef = 1.f/numTrees; - for( int ci = 0; ci < signatureSize; ci++ ) - { - sig[ci] *= coef; - if( sig[ci] < thresh ) - sig[ci] = 0; - } -} - -#if QUANTIZATION_AVAILABLE -void CalonderClassifier::operator()( const Mat& img, Point2f pt, vector& signature, uchar thresh ) const -{ - if( img.empty() || img.type() != CV_8UC1 ) - return; - - Mat patch; - getRectSubPix(img, Size(patchSize,patchSize), pt, patch, img.type()); - (*this)(patch, signature, thresh ); -} - -void CalonderClassifier::operator()( const Mat& patch, vector& signature, uchar thresh ) const -{ - if( quantizedPosteriors.empty() || patch.empty() || patch.type() != CV_8UC1 || patch.cols > patchSize || patch.rows > patchSize ) - return; - - int treePostSize = numLeavesPerTree*signatureSize; - - vector sum( signatureSize, 0.f ); - for( int ti = 0; ti < numTrees; ti++ ) - { - int leafIdx = getLeafIdx( ti, patch ); - const uchar* post = &quantizedPosteriors[ti*treePostSize + leafIdx*signatureSize]; - for( int ci = 0; ci < signatureSize; ci++ ) - sum[ci] += post[ci]; - } - float coef = 1.f/numTrees; - signature.resize( signatureSize ); - uchar* sig = &signature[0]; - for( int ci = 0; ci < signatureSize; ci++ ) - { - sig[ci] = (uchar)(sum[ci]*coef); - if( sig[ci] < thresh ) - sig[ci] = 0; - } -} -#endif - -void CalonderClassifier::read( const FileNode& fn ) -{ - prepare( fn["patchSize"], fn["signatureSize"], fn["numTrees"], fn["treeDepth"], fn["numViews"] ); - origNumClasses = fn["origNumClasses"]; - compressType = fn["compressType"]; - int _numQuantBits = fn["numQuantBits"]; - - for( int ti = 0; ti < numTrees; ti++ ) - { - stringstream treeName; - treeName << "tree" << ti; - FileNode treeFN = fn["trees"][treeName.str()]; - - Node* treeNodes = &nodes[ti*numNodesPerTree]; - FileNodeIterator nodesFNIter = treeFN["nodes"].begin(); - for( int ni = 0; ni < numNodesPerTree; ni++ ) - { - Node* node = treeNodes + ni; - nodesFNIter >> node->x1 >> node->y1 >> node->x2 >> node->y2; - } - - FileNode posteriorsFN = treeFN["posteriors"]; - for( int li = 0; li < numLeavesPerTree; li++ ) - { - stringstream leafName; - leafName << "leaf" << li; - float* post = &posteriors[ti*numLeavesPerTree*signatureSize + li*signatureSize]; - FileNodeIterator leafFNIter = posteriorsFN[leafName.str()].begin(); - for( int ci = 0; ci < signatureSize; ci++ ) - leafFNIter >> post[ci]; - } - } -#if QUANTIZATION_AVAILABLE - if( _numQuantBits ) - quantizePosteriors(_numQuantBits); -#endif -} - -void CalonderClassifier::write( FileStorage& fs ) const -{ - if( !fs.isOpened() ) - return; - fs << "patchSize" << patchSize; - fs << "numTrees" << numTrees; - fs << "treeDepth" << treeDepth; - fs << "numViews" << numViews; - fs << "origNumClasses" << origNumClasses; - fs << "signatureSize" << signatureSize; - fs << "compressType" << compressType; - fs << "numQuantBits" << numQuantBits; - - fs << "trees" << "{"; - for( int ti = 0; ti < numTrees; ti++ ) - { - stringstream treeName; - treeName << "tree" << ti; - fs << treeName.str() << "{"; - - fs << "nodes" << "[:"; - const Node* treeNodes = &nodes[ti*numNodesPerTree]; - for( int ni = 0; ni < numNodesPerTree; ni++ ) - { - const Node* node = treeNodes + ni; - fs << node->x1 << node->y1 << node->x2 << node->y2; - } - fs << "]"; // nodes - - fs << "posteriors" << "{"; - for( int li = 0; li < numLeavesPerTree; li++ ) - { - stringstream leafName; - leafName << "leaf" << li; - fs << leafName.str() << "[:"; - const float* post = &posteriors[ti*numLeavesPerTree*signatureSize + li*signatureSize]; - for( int ci = 0; ci < signatureSize; ci++ ) - { - fs << post[ci]; - } - fs << "]"; // leaf - } - fs << "}"; // posteriors - fs << "}"; // tree - } - fs << "}"; // trees -} - -struct RTreeNode -{ - short offset1, offset2; -}; - -void CalonderClassifier::read( istream &is ) -{ - int _patchSize, _numTrees, _treeDepth, _numViews, _signatureSize, _origNumClasses, _numQuantBits, _compressType; - - _patchSize = 32; - _numViews = 0; - _compressType = COMPRESS_DISTR_BERNOULLI; - is.read((char*)(&_numTrees), sizeof(_numTrees)); - is.read((char*)(&_signatureSize), sizeof(_signatureSize)); - is.read((char*)(&_origNumClasses), sizeof(_origNumClasses)); - is.read((char*)(&_numQuantBits), sizeof(_numQuantBits)); - - // 1st tree - int _classes; - is.read((char*)(&_classes), sizeof(_classes)); - CV_Assert( _signatureSize == _classes ); - is.read((char*)(&_treeDepth), sizeof(_treeDepth)); - - prepare( _patchSize, _signatureSize, _numTrees, _treeDepth, _numViews ); - - origNumClasses = _origNumClasses; - compressType = _compressType; - - if( _numQuantBits>8 ) - { - if( verbose ) - cout << "[WARNING] suspicious value numQuantBits=" << numQuantBits << " found; setting to " << DEFAULT_NUM_QUANT_BITS; - _numQuantBits = DEFAULT_NUM_QUANT_BITS; - } - - // 1st tree - vector rtreeNodes(numNodesPerTree); - is.read((char*)(&rtreeNodes[0]), numNodesPerTree * sizeof(rtreeNodes[0])); - for( int ni = 0; ni < numNodesPerTree; ni ++ ) - { - short offset1 = rtreeNodes[ni].offset1, - offset2 = rtreeNodes[ni].offset2; - nodes[ni] = Node(offset1 % _patchSize, offset1 / _patchSize, offset2 % _patchSize, offset2 / _patchSize ); - } - for( int li = 0; li < numLeavesPerTree; li++ ) - is.read((char*)&posteriors[li*signatureSize], signatureSize * sizeof(float)); - - // other trees - for( int treeIdx = 1; treeIdx < numTrees; treeIdx++ ) - { - is.read((char*)(&_classes), sizeof(_classes)); - CV_Assert( _classes == signatureSize ); - is.read((char*)(&_treeDepth), sizeof(_treeDepth)); - CV_Assert( _treeDepth == treeDepth ); - - is.read((char*)(&rtreeNodes[0]), numNodesPerTree * sizeof(rtreeNodes[0])); - - Node* treeNodes = &nodes[treeIdx*numNodesPerTree]; - for( int ni = 0; ni < numNodesPerTree; ni ++ ) - { - short offset1 = rtreeNodes[ni].offset1, - offset2 = rtreeNodes[ni].offset2; - treeNodes[ni] = Node(offset1 % _patchSize, offset1 / _patchSize, offset2 % _patchSize, offset2 / _patchSize ); - } - float* treePosteriors = &posteriors[treeIdx*numLeavesPerTree*signatureSize]; - for( int li = 0; li < numLeavesPerTree; li++ ) - is.read((char*)&treePosteriors[li*signatureSize], signatureSize * sizeof(float)); - - } - -#if QUANTIZATION_AVAILABLE - if( _numQuantBits ) - quantizePosteriors(_numQuantBits); -#endif -} -#endif - } diff --git a/modules/features2d/src/evaluation.cpp b/modules/features2d/src/evaluation.cpp new file mode 100644 index 0000000..d30ab61 --- /dev/null +++ b/modules/features2d/src/evaluation.cpp @@ -0,0 +1,502 @@ +//*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" +#include + +using namespace cv; +using namespace std; + +inline Point2f applyHomography( const Mat_& H, const Point2f& pt ) +{ + double z = H(2,0)*pt.x + H(2,1)*pt.y + H(2,2); + if( z ) + { + double w = 1./z; + return Point2f( (H(0,0)*pt.x + H(0,1)*pt.y + H(0,2))*w, (H(1,0)*pt.x + H(1,1)*pt.y + H(1,2))*w ); + } + return Point2f( numeric_limits::max(), numeric_limits::max() ); +} + +inline void linearizeHomographyAt( const Mat_& H, const Point2f& pt, Mat_& A ) +{ + A.create(2,2); + double p1 = H(0,0)*pt.x + H(0,1)*pt.y + H(0,2), + p2 = H(1,0)*pt.x + H(1,1)*pt.y + H(1,2), + p3 = H(2,0)*pt.x + H(2,1)*pt.y + H(2,2), + p3_2 = p3*p3; + if( p3 ) + { + A(0,0) = H(0,0)/p3 - p1*H(2,0)/p3_2; // fxdx + A(0,1) = H(0,1)/p3 - p1*H(2,1)/p3_2; // fxdy + + A(1,0) = H(1,0)/p3 - p2*H(2,0)/p3_2; // fydx + A(1,1) = H(1,1)/p3 - p2*H(2,1)/p3_2; // fydx + } + else + A.setTo(Scalar::all(numeric_limits::max())); +} + +class EllipticKeyPoint +{ +public: + EllipticKeyPoint(); + EllipticKeyPoint( const Point2f& _center, const Scalar& _ellipse ); + + static void convert( const vector& src, vector& dst ); + static void convert( const vector& src, vector& dst ); + + static Mat_ getSecondMomentsMatrix( const Scalar& _ellipse ); + Mat_ getSecondMomentsMatrix() const; + + void calcProjection( const Mat_& H, EllipticKeyPoint& projection ) const; + static void calcProjection( const vector& src, const Mat_& H, vector& dst ); + + Point2f center; + Scalar ellipse; // 3 elements a, b, c: ax^2+2bxy+cy^2=1 + Size_ axes; // half lenght of elipse axes + Size_ boundingBox; // half sizes of bounding box which sides are parallel to the coordinate axes +}; + +EllipticKeyPoint::EllipticKeyPoint() +{ + *this = EllipticKeyPoint(Point2f(0,0), Scalar(1, 0, 1) ); +} + +EllipticKeyPoint::EllipticKeyPoint( const Point2f& _center, const Scalar& _ellipse ) +{ + center = _center; + ellipse = _ellipse; + + Mat_ M = getSecondMomentsMatrix(_ellipse), eval; + eigen( M, eval ); + assert( eval.rows == 2 && eval.cols == 1 ); + axes.width = 1.f / sqrt(eval(0,0)); + axes.height = 1.f / sqrt(eval(1,0)); + + float ac_b2 = ellipse[0]*ellipse[2] - ellipse[1]*ellipse[1]; + boundingBox.width = sqrt(ellipse[2]/ac_b2); + boundingBox.height = sqrt(ellipse[0]/ac_b2); +} + +Mat_ EllipticKeyPoint::getSecondMomentsMatrix( const Scalar& _ellipse ) +{ + Mat_ M(2, 2); + M(0,0) = _ellipse[0]; + M(1,0) = M(0,1) = _ellipse[1]; + M(1,1) = _ellipse[2]; + return M; +} + +Mat_ EllipticKeyPoint::getSecondMomentsMatrix() const +{ + return getSecondMomentsMatrix(ellipse); +} + +void EllipticKeyPoint::calcProjection( const Mat_& H, EllipticKeyPoint& projection ) const +{ + Point2f dstCenter = applyHomography(H, center); + + Mat_ invM; invert(getSecondMomentsMatrix(), invM); + Mat_ Aff; linearizeHomographyAt(H, center, Aff); + Mat_ dstM; invert(Aff*invM*Aff.t(), dstM); + + projection = EllipticKeyPoint( dstCenter, Scalar(dstM(0,0), dstM(0,1), dstM(1,1)) ); +} + +void EllipticKeyPoint::convert( const vector& src, vector& dst ) +{ + if( !src.empty() ) + { + dst.resize(src.size()); + for( size_t i = 0; i < src.size(); i++ ) + { + float rad = src[i].size/2; + assert( rad ); + float fac = 1.f/(rad*rad); + dst[i] = EllipticKeyPoint( src[i].pt, Scalar(fac, 0, fac) ); + } + } +} + +void EllipticKeyPoint::convert( const vector& src, vector& dst ) +{ + if( !src.empty() ) + { + dst.resize(src.size()); + for( size_t i = 0; i < src.size(); i++ ) + { + Size_ axes = src[i].axes; + float rad = sqrt(axes.height*axes.width); + dst[i] = KeyPoint(src[i].center, 2*rad ); + } + } +} + +void EllipticKeyPoint::calcProjection( const vector& src, const Mat_& H, vector& dst ) +{ + if( !src.empty() ) + { + assert( !H.empty() && H.cols == 3 && H.rows == 3); + dst.resize(src.size()); + vector::const_iterator srcIt = src.begin(); + vector::iterator dstIt = dst.begin(); + for( ; srcIt != src.end(); ++srcIt, ++dstIt ) + srcIt->calcProjection(H, *dstIt); + } +} + +static void filterEllipticKeyPointsByImageSize( vector& keypoints, const Size& imgSize ) +{ + if( !keypoints.empty() ) + { + vector filtered; + filtered.reserve(keypoints.size()); + vector::const_iterator it = keypoints.begin(); + for( int i = 0; it != keypoints.end(); ++it, i++ ) + { + if( it->center.x + it->boundingBox.width < imgSize.width && + it->center.x - it->boundingBox.width > 0 && + it->center.y + it->boundingBox.height < imgSize.height && + it->center.y - it->boundingBox.height > 0 ) + filtered.push_back(*it); + } + keypoints.assign(filtered.begin(), filtered.end()); + } +} + +static void overlap( const vector& keypoints1, const vector& keypoints2t, bool commonPart, + SparseMat_& overlaps ) +{ + overlaps.clear(); + if( keypoints1.empty() || keypoints2t.empty() ) + return; + + int size[] = { keypoints1.size(), keypoints2t.size() }; + overlaps.create( 2, size ); + + for( size_t i1 = 0; i1 < keypoints1.size(); i1++ ) + { + EllipticKeyPoint kp1 = keypoints1[i1]; + float maxDist = sqrt(kp1.axes.width*kp1.axes.height), + fac = 30.f/maxDist; + if( !commonPart ) + fac=3; + + maxDist = maxDist*4; + fac = 1.0/(fac*fac); + + EllipticKeyPoint keypoint1a = EllipticKeyPoint( kp1.center, Scalar(fac*kp1.ellipse[0], fac*kp1.ellipse[1], fac*kp1.ellipse[2]) ); + + for( size_t i2 = 0; i2 < keypoints2t.size(); i2++ ) + { + EllipticKeyPoint kp2 = keypoints2t[i2]; + Point2f diff = kp2.center - kp1.center; + + if( norm(diff) < maxDist ) + { + EllipticKeyPoint keypoint2a = EllipticKeyPoint( kp2.center, Scalar(fac*kp2.ellipse[0], fac*kp2.ellipse[1], fac*kp2.ellipse[2]) ); + //find the largest eigenvalue + float maxx = ceil(( keypoint1a.boundingBox.width > (diff.x+keypoint2a.boundingBox.width)) ? + keypoint1a.boundingBox.width : (diff.x+keypoint2a.boundingBox.width)); + float minx = floor((-keypoint1a.boundingBox.width < (diff.x-keypoint2a.boundingBox.width)) ? + -keypoint1a.boundingBox.width : (diff.x-keypoint2a.boundingBox.width)); + + float maxy = ceil(( keypoint1a.boundingBox.height > (diff.y+keypoint2a.boundingBox.height)) ? + keypoint1a.boundingBox.height : (diff.y+keypoint2a.boundingBox.height)); + float miny = floor((-keypoint1a.boundingBox.height < (diff.y-keypoint2a.boundingBox.height)) ? + -keypoint1a.boundingBox.height : (diff.y-keypoint2a.boundingBox.height)); + float mina = (maxx-minx) < (maxy-miny) ? (maxx-minx) : (maxy-miny) ; + float dr = mina/50.0; + float bua = 0, bna = 0; + //compute the area + for( float rx1 = minx; rx1 <= maxx; rx1+=dr ) + { + float rx2 = rx1-diff.x; + for( float ry1=miny; ry1<=maxy; ry1+=dr ) + { + float ry2=ry1-diff.y; + //compute the distance from the ellipse center + float e1 = keypoint1a.ellipse[0]*rx1*rx1+2*keypoint1a.ellipse[1]*rx1*ry1+keypoint1a.ellipse[2]*ry1*ry1; + float e2 = keypoint2a.ellipse[0]*rx2*rx2+2*keypoint2a.ellipse[1]*rx2*ry2+keypoint2a.ellipse[2]*ry2*ry2; + //compute the area + if( e1<1 && e2<1 ) bna++; + if( e1<1 || e2<1 ) bua++; + } + } + if( bna > 0) + overlaps.ref(i1,i2) = bna/bua; + } + } + } +} + +static void calculateRepeatability( const Mat& img1, const Mat& img2, const Mat& H1to2, + const vector& _keypoints1, const vector& _keypoints2, + float& repeatability, int& correspondencesCount, + SparseMat_* thresholdedOverlapMask=0 ) +{ + vector keypoints1, keypoints2, keypoints1t, keypoints2t; + EllipticKeyPoint::convert( _keypoints1, keypoints1 ); + EllipticKeyPoint::convert( _keypoints2, keypoints2 ); + + // calculate projections of key points + EllipticKeyPoint::calcProjection( keypoints1, H1to2, keypoints1t ); + Mat H2to1; invert(H1to2, H2to1); + EllipticKeyPoint::calcProjection( keypoints2, H2to1, keypoints2t ); + + bool ifEvaluateDetectors = !thresholdedOverlapMask; // == commonPart + float overlapThreshold; + if( ifEvaluateDetectors ) + { + overlapThreshold = 1.f - 0.4f; + + // remove key points from outside of the common image part + Size sz1 = img1.size(), sz2 = img2.size(); + filterEllipticKeyPointsByImageSize( keypoints1, sz1 ); + filterEllipticKeyPointsByImageSize( keypoints1t, sz2 ); + filterEllipticKeyPointsByImageSize( keypoints2, sz2 ); + filterEllipticKeyPointsByImageSize( keypoints2t, sz1 ); + } + else + { + overlapThreshold = 1.f - 0.5f; + } + int minCount = min( keypoints1.size(), keypoints2t.size() ); + + // calculate overlap errors + SparseMat_ overlaps; + overlap( keypoints1, keypoints2t, ifEvaluateDetectors, overlaps ); + + correspondencesCount = -1; + repeatability = -1.f; + const int* size = overlaps.size(); + if( !size || overlaps.nzcount() == 0 ) + return; + + if( ifEvaluateDetectors ) + { + // threshold the overlaps + for( int y = 0; y < size[0]; y++ ) + { + for( int x = 0; x < size[1]; x++ ) + { + if ( overlaps(y,x) < overlapThreshold ) + overlaps.erase(y,x); + } + } + + // regions one-to-one matching + correspondencesCount = 0; + while( overlaps.nzcount() > 0 ) + { + double maxOverlap = 0; + int maxIdx[2]; + minMaxLoc( overlaps, 0, &maxOverlap, 0, maxIdx ); + for( size_t i1 = 0; i1 < keypoints1.size(); i1++ ) + overlaps.erase(i1, maxIdx[1]); + for( size_t i2 = 0; i2 < keypoints2t.size(); i2++ ) + overlaps.erase(maxIdx[0], i2); + correspondencesCount++; + } + repeatability = minCount ? (float)correspondencesCount/minCount : -1; + } + else + { + thresholdedOverlapMask->create( 2, size ); + for( int y = 0; y < size[0]; y++ ) + { + for( int x = 0; x < size[1]; x++ ) + { + float val = overlaps(y,x); + if ( val >= overlapThreshold ) + thresholdedOverlapMask->ref(y,x) = 1; + } + } + } +} + +void cv::evaluateFeatureDetector( const Mat& img1, const Mat& img2, const Mat& H1to2, + vector* _keypoints1, vector* _keypoints2, + float& repeatability, int& correspCount, + const Ptr& _fdetector ) +{ + Ptr fdetector(_fdetector); + vector *keypoints1, *keypoints2, buf1, buf2; + keypoints1 = _keypoints1 != 0 ? _keypoints1 : &buf1; + keypoints2 = _keypoints2 != 0 ? _keypoints2 : &buf2; + + if( (keypoints1->empty() || keypoints2->empty()) && fdetector.empty() ) + CV_Error( CV_StsBadArg, "fdetector must be no empty when keypoints1 or keypoints2 is empty" ); + + if( keypoints1->empty() ) + fdetector->detect( img1, *keypoints1 ); + if( keypoints2->empty() ) + fdetector->detect( img1, *keypoints2 ); + + calculateRepeatability( img1, img2, H1to2, *keypoints1, *keypoints2, repeatability, correspCount ); +} + +struct DMatchForEvaluation : public DMatch +{ + uchar isCorrect; + DMatchForEvaluation( const DMatch &dm ) : DMatch( dm ) {} +}; + +static inline float recall( int correctMatchCount, int correspondenceCount ) +{ + return correspondenceCount ? (float)correctMatchCount / (float)correspondenceCount : -1; +} + +static inline float precision( int correctMatchCount, int falseMatchCount ) +{ + return correctMatchCount + falseMatchCount ? (float)correctMatchCount / (float)(correctMatchCount + falseMatchCount) : -1; +} + +void cv::computeRecallPrecisionCurve( const vector >& matches1to2, + const vector >& correctMatches1to2Mask, + vector& recallPrecisionCurve ) +{ + CV_Assert( matches1to2.size() == correctMatches1to2Mask.size() ); + + vector allMatches; + int correspondenceCount = 0; + for( size_t i = 0; i < matches1to2.size(); i++ ) + { + for( size_t j = 0; j < matches1to2[i].size(); j++ ) + { + DMatchForEvaluation match = matches1to2[i][j]; + match.isCorrect = correctMatches1to2Mask[i][j] ; + allMatches.push_back( match ); + correspondenceCount += match.isCorrect != 0 ? 1 : 0; + } + } + + std::sort( allMatches.begin(), allMatches.end() ); + + int correctMatchCount = 0, falseMatchCount = 0; + recallPrecisionCurve.resize( allMatches.size() ); + for( size_t i = 0; i < allMatches.size(); i++ ) + { + if( allMatches[i].isCorrect ) + correctMatchCount++; + else + falseMatchCount++; + + float r = recall( correctMatchCount, correspondenceCount ); + float p = precision( correctMatchCount, falseMatchCount ); + recallPrecisionCurve[i] = Point2f(1-p, r); + } +} + +float cv::getRecall( const vector& recallPrecisionCurve, float l_precision ) +{ + float recall = -1; + + if( l_precision >= 0 && l_precision <= 1 ) + { + int bestIdx = -1; + float minDiff = FLT_MAX; + for( size_t i = 0; i < recallPrecisionCurve.size(); i++ ) + { + float curDiff = std::fabs(l_precision - recallPrecisionCurve[i].x); + if( curDiff <= minDiff ) + { + bestIdx = i; + minDiff = curDiff; + } + } + + recall = recallPrecisionCurve[bestIdx].y; + } + + return recall; +} + +void cv::evaluateDescriptorMatch( const Mat& img1, const Mat& img2, const Mat& H1to2, + vector& keypoints1, vector& keypoints2, + vector >* _matches1to2, vector >* _correctMatches1to2Mask, + vector& recallPrecisionCurve, + const Ptr& _dmatch ) +{ + Ptr dmatch = _dmatch; + dmatch->clear(); + + vector > *matches1to2, buf1; + vector > *correctMatches1to2Mask, buf2; + matches1to2 = _matches1to2 != 0 ? _matches1to2 : &buf1; + correctMatches1to2Mask = _correctMatches1to2Mask != 0 ? _correctMatches1to2Mask : &buf2; + + if( keypoints1.empty() || keypoints2.empty() ) + CV_Error( CV_StsBadArg, "keypoints1 and keypoints2 must be no empty" ); + if( matches1to2->empty() && dmatch.empty() ) + CV_Error( CV_StsBadArg, "dmatch must be no empty when matches1to2 is empty" ); + if( matches1to2->empty() ) + { + dmatch->add( img2, keypoints2 ); + //TODO: use more sophisticated strategy to choose threshold + dmatch->match( img1, keypoints1, *matches1to2, std::numeric_limits::max() ); + } + float repeatability; + int correspCount; + SparseMat_ thresholdedOverlapMask; // thresholded allOverlapErrors + calculateRepeatability( img1, img2, H1to2, + keypoints1, keypoints2, + repeatability, correspCount, + &thresholdedOverlapMask ); + + correctMatches1to2Mask->resize(matches1to2->size()); + int ddd = 0; + for( size_t i = 0; i < matches1to2->size(); i++ ) + { + (*correctMatches1to2Mask)[i].resize((*matches1to2)[i].size()); + for( size_t j = 0;j < (*matches1to2)[i].size(); j++ ) + { + int indexQuery = (*matches1to2)[i][j].indexQuery; + int indexTrain = (*matches1to2)[i][j].indexTrain; + (*correctMatches1to2Mask)[i][j] = thresholdedOverlapMask( indexQuery, indexTrain ); + ddd += thresholdedOverlapMask( indexQuery, indexTrain ) != 0 ? 1 : 0; + } + } + + computeRecallPrecisionCurve( *matches1to2, *correctMatches1to2Mask, recallPrecisionCurve ); +} diff --git a/tests/cv/src/adetectordescriptor_evaluation.cpp b/tests/cv/src/adetectordescriptor_evaluation.cpp index 42dfbe6..f00388d 100644 --- a/tests/cv/src/adetectordescriptor_evaluation.cpp +++ b/tests/cv/src/adetectordescriptor_evaluation.cpp @@ -51,143 +51,9 @@ using namespace cv; /****************************************************************************************\ * Functions to evaluate affine covariant detectors and descriptors. * \****************************************************************************************/ -inline Point2f applyHomography( const Mat_& H, const Point2f& pt ) -{ - double z = H(2,0)*pt.x + H(2,1)*pt.y + H(2,2); - if( z ) - { - double w = 1./z; - return Point2f( (H(0,0)*pt.x + H(0,1)*pt.y + H(0,2))*w, (H(1,0)*pt.x + H(1,1)*pt.y + H(1,2))*w ); - } - return Point2f( numeric_limits::max(), numeric_limits::max() ); -} - -inline void linearizeHomographyAt( const Mat_& H, const Point2f& pt, Mat_& A ) -{ - A.create(2,2); - double p1 = H(0,0)*pt.x + H(0,1)*pt.y + H(0,2), - p2 = H(1,0)*pt.x + H(1,1)*pt.y + H(1,2), - p3 = H(2,0)*pt.x + H(2,1)*pt.y + H(2,2), - p3_2 = p3*p3; - if( p3 ) - { - A(0,0) = H(0,0)/p3 - p1*H(2,0)/p3_2; // fxdx - A(0,1) = H(0,1)/p3 - p1*H(2,1)/p3_2; // fxdy - - A(1,0) = H(1,0)/p3 - p2*H(2,0)/p3_2; // fydx - A(1,1) = H(1,1)/p3 - p2*H(2,1)/p3_2; // fydx - } - else - A.setTo(Scalar::all(numeric_limits::max())); -} - -class EllipticKeyPoint -{ -public: - EllipticKeyPoint(); - EllipticKeyPoint( const Point2f& _center, const Scalar& _ellipse ); - - static void convert( const vector& src, vector& dst ); - static void convert( const vector& src, vector& dst ); - static Mat_ getSecondMomentsMatrix( const Scalar& _ellipse ); - Mat_ getSecondMomentsMatrix() const; - - void calcProjection( const Mat_& H, EllipticKeyPoint& projection ) const; - - Point2f center; - Scalar ellipse; // 3 elements a, b, c: ax^2+2bxy+cy^2=1 - Size_ axes; // half lenght of elipse axes - Size_ boundingBox; // half sizes of bounding box which sides are parallel to the coordinate axes -}; - -EllipticKeyPoint::EllipticKeyPoint() -{ - *this = EllipticKeyPoint(Point2f(0,0), Scalar(1, 0, 1) ); -} - -EllipticKeyPoint::EllipticKeyPoint( const Point2f& _center, const Scalar& _ellipse ) -{ - center = _center; - ellipse = _ellipse; - - Mat_ M = getSecondMomentsMatrix(_ellipse), eval; - eigen( M, eval ); - assert( eval.rows == 2 && eval.cols == 1 ); - axes.width = 1.f / sqrt(eval(0,0)); - axes.height = 1.f / sqrt(eval(1,0)); - - float ac_b2 = ellipse[0]*ellipse[2] - ellipse[1]*ellipse[1]; - boundingBox.width = sqrt(ellipse[2]/ac_b2); - boundingBox.height = sqrt(ellipse[0]/ac_b2); -} - -Mat_ EllipticKeyPoint::getSecondMomentsMatrix( const Scalar& _ellipse ) -{ - Mat_ M(2, 2); - M(0,0) = _ellipse[0]; - M(1,0) = M(0,1) = _ellipse[1]; - M(1,1) = _ellipse[2]; - return M; -} - -Mat_ EllipticKeyPoint::getSecondMomentsMatrix() const -{ - return getSecondMomentsMatrix(ellipse); -} - -void EllipticKeyPoint::calcProjection( const Mat_& H, EllipticKeyPoint& projection ) const -{ - Point2f dstCenter = applyHomography(H, center); - - Mat_ invM; invert(getSecondMomentsMatrix(), invM); - Mat_ Aff; linearizeHomographyAt(H, center, Aff); - Mat_ dstM; invert(Aff*invM*Aff.t(), dstM); - - projection = EllipticKeyPoint( dstCenter, Scalar(dstM(0,0), dstM(0,1), dstM(1,1)) ); -} - -void EllipticKeyPoint::convert( const vector& src, vector& dst ) -{ - if( !src.empty() ) - { - dst.resize(src.size()); - for( size_t i = 0; i < src.size(); i++ ) - { - float rad = src[i].size/2; - assert( rad ); - float fac = 1.f/(rad*rad); - dst[i] = EllipticKeyPoint( src[i].pt, Scalar(fac, 0, fac) ); - } - } -} - -void EllipticKeyPoint::convert( const vector& src, vector& dst ) -{ - if( !src.empty() ) - { - dst.resize(src.size()); - for( size_t i = 0; i < src.size(); i++ ) - { - Size_ axes = src[i].axes; - float rad = sqrt(axes.height*axes.width); - dst[i] = KeyPoint(src[i].center, 2*rad ); - } - } -} - -void calcEllipticKeyPointProjections( const vector& src, const Mat_& H, vector& dst ) -{ - if( !src.empty() ) - { - assert( !H.empty() && H.cols == 3 && H.rows == 3); - dst.resize(src.size()); - vector::const_iterator srcIt = src.begin(); - vector::iterator dstIt = dst.begin(); - for( ; srcIt != src.end(); ++srcIt, ++dstIt ) - srcIt->calcProjection(H, *dstIt); - } -} +Point2f applyHomography( const Mat_& H, const Point2f& pt ); +void linearizeHomographyAt( const Mat_& H, const Point2f& pt, Mat_& A ); void calcKeyPointProjections( const vector& src, const Mat_& H, vector& dst ) { @@ -202,7 +68,10 @@ void calcKeyPointProjections( const vector& src, const Mat_& H Point2f dstPt = applyHomography(H, srcIt->pt); float srcSize2 = srcIt->size * srcIt->size; - Mat_ invM; invert(EllipticKeyPoint::getSecondMomentsMatrix( Scalar(1./srcSize2, 0., 1./srcSize2)), invM); + Mat_ M(2, 2); + M(0,0) = M(1,1) = 1./srcSize2; + M(1,0) = M(0,1) = 0; + Mat_ invM; invert(M, invM); Mat_ Aff; linearizeHomographyAt(H, srcIt->pt, Aff); Mat_ dstM; invert(Aff*invM*Aff.t(), dstM); Mat_ eval; eigen( dstM, eval ); @@ -236,261 +105,6 @@ void filterKeyPointsByImageSize( vector& keypoints, const Size& imgSiz } } -/* - * calulate ovelap errors - */ -void overlap( const vector& keypoints1, const vector& keypoints2t, bool commonPart, - SparseMat_& overlaps ) -{ - overlaps.clear(); - if( keypoints1.empty() || keypoints2t.empty() ) - return; - - int size[] = { keypoints1.size(), keypoints2t.size() }; - overlaps.create( 2, size ); - - for( size_t i1 = 0; i1 < keypoints1.size(); i1++ ) - { - EllipticKeyPoint kp1 = keypoints1[i1]; - float maxDist = sqrt(kp1.axes.width*kp1.axes.height), - fac = 30.f/maxDist; - if( !commonPart ) - fac=3; - - maxDist = maxDist*4; - fac = 1.0/(fac*fac); - - EllipticKeyPoint keypoint1a = EllipticKeyPoint( kp1.center, Scalar(fac*kp1.ellipse[0], fac*kp1.ellipse[1], fac*kp1.ellipse[2]) ); - - for( size_t i2 = 0; i2 < keypoints2t.size(); i2++ ) - { - EllipticKeyPoint kp2 = keypoints2t[i2]; - Point2f diff = kp2.center - kp1.center; - - if( norm(diff) < maxDist ) - { - EllipticKeyPoint keypoint2a = EllipticKeyPoint( kp2.center, Scalar(fac*kp2.ellipse[0], fac*kp2.ellipse[1], fac*kp2.ellipse[2]) ); - //find the largest eigenvalue - float maxx = ceil(( keypoint1a.boundingBox.width > (diff.x+keypoint2a.boundingBox.width)) ? - keypoint1a.boundingBox.width : (diff.x+keypoint2a.boundingBox.width)); - float minx = floor((-keypoint1a.boundingBox.width < (diff.x-keypoint2a.boundingBox.width)) ? - -keypoint1a.boundingBox.width : (diff.x-keypoint2a.boundingBox.width)); - - float maxy = ceil(( keypoint1a.boundingBox.height > (diff.y+keypoint2a.boundingBox.height)) ? - keypoint1a.boundingBox.height : (diff.y+keypoint2a.boundingBox.height)); - float miny = floor((-keypoint1a.boundingBox.height < (diff.y-keypoint2a.boundingBox.height)) ? - -keypoint1a.boundingBox.height : (diff.y-keypoint2a.boundingBox.height)); - float mina = (maxx-minx) < (maxy-miny) ? (maxx-minx) : (maxy-miny) ; - float dr = mina/50.0; - float bua = 0, bna = 0; - //compute the area - for( float rx1 = minx; rx1 <= maxx; rx1+=dr ) - { - float rx2 = rx1-diff.x; - for( float ry1=miny; ry1<=maxy; ry1+=dr ) - { - float ry2=ry1-diff.y; - //compute the distance from the ellipse center - float e1 = keypoint1a.ellipse[0]*rx1*rx1+2*keypoint1a.ellipse[1]*rx1*ry1+keypoint1a.ellipse[2]*ry1*ry1; - float e2 = keypoint2a.ellipse[0]*rx2*rx2+2*keypoint2a.ellipse[1]*rx2*ry2+keypoint2a.ellipse[2]*ry2*ry2; - //compute the area - if( e1<1 && e2<1 ) bna++; - if( e1<1 || e2<1 ) bua++; - } - } - if( bna > 0) - overlaps.ref(i1,i2) = 100.0*bna/bua; - } - } - } -} - -void filterEllipticKeyPointsByImageSize( vector& keypoints, const Size& imgSize ) -{ - if( !keypoints.empty() ) - { - vector filtered; - filtered.reserve(keypoints.size()); - vector::const_iterator it = keypoints.begin(); - for( int i = 0; it != keypoints.end(); ++it, i++ ) - { - if( it->center.x + it->boundingBox.width < imgSize.width && - it->center.x - it->boundingBox.width > 0 && - it->center.y + it->boundingBox.height < imgSize.height && - it->center.y - it->boundingBox.height > 0 ) - filtered.push_back(*it); - } - keypoints.assign(filtered.begin(), filtered.end()); - } -} - -void getEllipticKeyPointsInCommonPart( vector& keypoints1, vector& keypoints2, - vector& keypoints1t, vector& keypoints2t, - Size& imgSize1, const Size& imgSize2 ) -{ - filterEllipticKeyPointsByImageSize( keypoints1, imgSize1 ); - filterEllipticKeyPointsByImageSize( keypoints1t, imgSize2 ); - filterEllipticKeyPointsByImageSize( keypoints2, imgSize2 ); - filterEllipticKeyPointsByImageSize( keypoints2t, imgSize1 ); -} - -void calculateRepeatability( const vector& _keypoints1, const vector& _keypoints2, - const Mat& img1, const Mat& img2, const Mat& H1to2, - float& repeatability, int& correspondencesCount, - SparseMat_* thresholdedOverlapMask=0 ) -{ - vector keypoints1( _keypoints1.begin(), _keypoints1.end() ), - keypoints2( _keypoints2.begin(), _keypoints2.end() ), - keypoints1t( keypoints1.size() ), - keypoints2t( keypoints2.size() ); - - // calculate projections of key points - calcEllipticKeyPointProjections( keypoints1, H1to2, keypoints1t ); - Mat H2to1; invert(H1to2, H2to1); - calcEllipticKeyPointProjections( keypoints2, H2to1, keypoints2t ); - - bool ifEvaluateDetectors = !thresholdedOverlapMask; // == commonPart - float overlapThreshold; - if( ifEvaluateDetectors ) - { - overlapThreshold = 100.f - 40.f; - - // remove key points from outside of the common image part - Size sz1 = img1.size(), sz2 = img2.size(); - getEllipticKeyPointsInCommonPart( keypoints1, keypoints2, keypoints1t, keypoints2t, sz1, sz2 ); - } - else - { - overlapThreshold = 100.f - 50.f; - } - int minCount = min( keypoints1.size(), keypoints2t.size() ); - - // calculate overlap errors - SparseMat_ overlaps; - overlap( keypoints1, keypoints2t, ifEvaluateDetectors, overlaps ); - - correspondencesCount = -1; - repeatability = -1.f; - const int* size = overlaps.size(); - if( !size || overlaps.nzcount() == 0 ) - return; - - if( ifEvaluateDetectors ) - { - // threshold the overlaps - for( int y = 0; y < size[0]; y++ ) - { - for( int x = 0; x < size[1]; x++ ) - { - if ( overlaps(y,x) < overlapThreshold ) - overlaps.erase(y,x); - } - } - - // regions one-to-one matching - correspondencesCount = 0; - while( overlaps.nzcount() > 0 ) - { - double maxOverlap = 0; - int maxIdx[2]; - minMaxLoc( overlaps, 0, &maxOverlap, 0, maxIdx ); - for( size_t i1 = 0; i1 < keypoints1.size(); i1++ ) - overlaps.erase(i1, maxIdx[1]); - for( size_t i2 = 0; i2 < keypoints2t.size(); i2++ ) - overlaps.erase(maxIdx[0], i2); - correspondencesCount++; - } - repeatability = minCount ? (float)(correspondencesCount*100)/minCount : -1; - } - else - { - thresholdedOverlapMask->create( 2, size ); - for( int y = 0; y < size[0]; y++ ) - { - for( int x = 0; x < size[1]; x++ ) - { - float val = overlaps(y,x); - if ( val >= overlapThreshold ) - thresholdedOverlapMask->ref(y,x) = val; - } - } - } -} - - -void evaluateDetectors( const vector& keypoints1, const vector& keypoints2, - const Mat& img1, const Mat& img2, const Mat& H1to2, - float& repeatability, int& correspCount ) -{ - calculateRepeatability( keypoints1, keypoints2, - img1, img2, H1to2, - repeatability, correspCount ); -} - -inline float recall( int correctMatchCount, int correspondenceCount ) -{ - return correspondenceCount ? (float)correctMatchCount / (float)correspondenceCount : -1; -} - -inline float precision( int correctMatchCount, int falseMatchCount ) -{ - return correctMatchCount + falseMatchCount ? (float)correctMatchCount / (float)(correctMatchCount + falseMatchCount) : -1; -} - - -struct DMatchForEvaluation : public DMatch -{ - int isCorrect; - - DMatchForEvaluation( const DMatch &dm ) - : DMatch( dm ) - { - } -}; - - -void evaluateDescriptors( const vector& keypoints1, const vector& keypoints2, - const vector >& matches1to2, vector &allMatches, - const Mat& img1, const Mat& img2, const Mat& H1to2, - int &correctMatchCount, int &falseMatchCount, int& correspondenceCount ) -{ - assert( !keypoints1.empty() && !keypoints2.empty() && !matches1to2.empty() ); - assert( keypoints1.size() == matches1to2.size() ); - - float repeatability; - int correspCount; - SparseMat_ thresholdedOverlapMask; // thresholded allOverlapErrors - calculateRepeatability( keypoints1, keypoints2, - img1, img2, H1to2, - repeatability, correspCount, - &thresholdedOverlapMask ); - correspondenceCount = thresholdedOverlapMask.nzcount(); - - correctMatchCount = 0; - falseMatchCount = 0; - - for( size_t i = 0; i < matches1to2.size(); i++ ) - { - for( size_t j = 0;j < matches1to2[i].size(); j++ ) - { - //if( matches1to2[i].match.indexTrain > 0 ) - //{ - DMatchForEvaluation match = matches1to2[i][j]; - match.isCorrect = thresholdedOverlapMask( match.indexQuery, match.indexTrain); - if( match.isCorrect ) - correctMatchCount++; - else - falseMatchCount++; - allMatches.push_back( match ); - //} - //else - //{ - // matches1to2[i].isCorrect = -1; - //} - } - } -} - /****************************************************************************************\ * Detectors evaluation * \****************************************************************************************/ @@ -1063,22 +677,18 @@ void DetectorQualityTest::runDatasetTest (const vector &imgs, const vector< calcQuality[di].resize(TEST_CASE_COUNT); - vector keypoints1; vector ekeypoints1; - + vector keypoints1; detector->detect( imgs[0], keypoints1 ); writeKeypoints( keypontsFS, keypoints1, 0); - EllipticKeyPoint::convert( keypoints1, ekeypoints1 ); int progressCount = DATASETS_COUNT*TEST_CASE_COUNT; for( int ci = 0; ci < TEST_CASE_COUNT; ci++ ) { progress = update_progress( progress, di*TEST_CASE_COUNT + ci, progressCount, 0 ); vector keypoints2; - detector->detect( imgs[ci+1], keypoints2 ); + evaluateFeatureDetector( imgs[0], imgs[ci+1], Hs[ci], &keypoints1, &keypoints2, + calcQuality[di][ci].repeatability, calcQuality[di][ci].correspondenceCount, + detector ); writeKeypoints( keypontsFS, keypoints2, ci+1); - vector ekeypoints2; - EllipticKeyPoint::convert( keypoints2, ekeypoints2 ); - evaluateDetectors( ekeypoints1, ekeypoints2, imgs[0], imgs[ci], Hs[ci], - calcQuality[di][ci].repeatability, calcQuality[di][ci].correspondenceCount ); } } @@ -1185,7 +795,7 @@ protected: virtual int processResults( int datasetIdx, int caseIdx ); virtual void writePlotData( int di ) const; - void calculatePlotData( vector &allMatches, int allCorrespCount, int di ); + void calculatePlotData( vector > &allMatches, vector > &allCorrectMatchesMask, int di ); struct Quality { @@ -1333,8 +943,8 @@ void DescriptorQualityTest::readAlgorithm( ) if( defaultDescMatch == 0 ) { - DescriptorExtractor *extractor = createDescriptorExtractor( algName ); - DescriptorMatcher *matcher = createDescriptorMatcher( matcherName ); + Ptr extractor = createDescriptorExtractor( algName ); + Ptr matcher = createDescriptorMatcher( matcherName ); defaultDescMatch = new VectorDescriptorMatch( extractor, matcher ); specificDescMatch = new VectorDescriptorMatch( extractor, matcher ); @@ -1346,10 +956,15 @@ void DescriptorQualityTest::readAlgorithm( ) } } -void DescriptorQualityTest::calculatePlotData( vector &allMatches, int allCorrespCount, int di ) +void DescriptorQualityTest::calculatePlotData( vector > &allMatches, vector > &allCorrectMatchesMask, int di ) { - std::sort( allMatches.begin(), allMatches.end() ); + vector recallPrecisionCurve; + computeRecallPrecisionCurve( allMatches, allCorrectMatchesMask, recallPrecisionCurve ); + // you have recallPrecisionCurve for all images from dataset + // size of recallPrecisionCurve == total matches count +#if 0 + std::sort( allMatches.begin(), allMatches.end() ); //calcDatasetQuality[di].resize( allMatches.size() ); calcDatasetQuality[di].clear(); int correctMatchCount = 0, falseMatchCount = 0; @@ -1358,6 +973,7 @@ void DescriptorQualityTest::calculatePlotData( vector &allM int step = 1 + allMatches.size() / npoints; const float resultPrecision = 0.5; bool isResultCalculated = false; + for( size_t i=0;i &allM calcDatasetQuality[di].push_back( quality ); - if( !isResultCalculated && quality.precision < resultPrecision) + if( !isResultCalculated && quality.precision < resultPrecision ) { for(int ci=0;ci &allM quality.precision = precision( correctMatchCount, falseMatchCount ); calcDatasetQuality[di].push_back( quality ); +#endif } @@ -1407,20 +1024,18 @@ void DescriptorQualityTest::runDatasetTest (const vector &imgs, const vecto Ptr descMatch = commRunParams[di].isActiveParams ? specificDescMatch : defaultDescMatch; calcQuality[di].resize(TEST_CASE_COUNT); - vector keypoints1; vector ekeypoints1; + vector keypoints1; readKeypoints( keypontsFS, keypoints1, 0); - EllipticKeyPoint::convert( keypoints1, ekeypoints1 ); int progressCount = DATASETS_COUNT*TEST_CASE_COUNT; - vector allMatches; - int allCorrespCount = 0; + vector > allMatches1to2; + vector > allCorrectMatchesMask; for( int ci = 0; ci < TEST_CASE_COUNT; ci++ ) { progress = update_progress( progress, di*TEST_CASE_COUNT + ci, progressCount, 0 ); vector keypoints2; - vector ekeypoints2; if( commRunParams[di].projectKeypointsFrom1Image ) { // TODO need to test function calcKeyPointProjections @@ -1429,24 +1044,20 @@ void DescriptorQualityTest::runDatasetTest (const vector &imgs, const vecto } else readKeypoints( keypontsFS, keypoints2, ci+1 ); - EllipticKeyPoint::convert( keypoints2, ekeypoints2 ); - descMatch->add( imgs[ci+1], keypoints2 ); - vector > matches1to2; - //TODO: use more sophisticated strategy to choose threshold - descMatch->match( imgs[0], keypoints1, matches1to2, std::numeric_limits::max() ); - // TODO if( commRunParams[di].matchFilter ) - int correspCount; - int correctMatchCount = 0, falseMatchCount = 0; - evaluateDescriptors( ekeypoints1, ekeypoints2, matches1to2, allMatches, imgs[0], imgs[ci+1], Hs[ci], - correctMatchCount, falseMatchCount, correspCount ); - allCorrespCount += correspCount; - - descMatch->clear (); + vector > matches1to2; + vector > correctMatchesMask; + vector recallPrecisionCurve; // not used because we need recallPrecisionCurve for + // all images in dataset + evaluateDescriptorMatch( imgs[0], imgs[ci+1], Hs[ci], keypoints1, keypoints2, + &matches1to2, &correctMatchesMask, recallPrecisionCurve, + descMatch ); + allMatches1to2.insert( allMatches1to2.end(), matches1to2.begin(), matches1to2.end() ); + allCorrectMatchesMask.insert( allCorrectMatchesMask.end(), correctMatchesMask.begin(), correctMatchesMask.end() ); } - calculatePlotData( allMatches, allCorrespCount, di ); + calculatePlotData( allMatches1to2, allCorrectMatchesMask, di ); } int DescriptorQualityTest::processResults( int datasetIdx, int caseIdx )