Added the defaultNorm() method to the DescriptorExtractor class. This method returns the default norm type for each descriptor type. The tests and C/C++ samples were updated to get the norm type directly from the DescriptorExtractor inherited classes.
This was reported in feature report #2182 (http://code.opencv.org/issues/2182). It will make it possible to get the norm type usually applied matching method for each descriptor, instead of passing it manually.
virtual int descriptorSize() const = 0;
virtual int descriptorType() const = 0;
+ virtual int defaultNorm() const = 0;
static Ptr<DescriptorExtractor> create( const String& descriptorExtractorType );
virtual void write( FileStorage& ) const;
virtual int descriptorSize() const;
virtual int descriptorType() const;
+ virtual int defaultNorm() const;
protected:
...
};
virtual void write( FileStorage& ) const;
virtual int descriptorSize() const;
virtual int descriptorType() const;
+ virtual int defaultNorm() const;
protected:
...
};
CV_WRAP virtual int descriptorSize() const = 0;
CV_WRAP virtual int descriptorType() const = 0;
+ CV_WRAP virtual int defaultNorm() const = 0;
CV_WRAP virtual bool empty() const;
int descriptorSize() const;
// returns the descriptor type
int descriptorType() const;
+ // returns the default norm type
+ int defaultNorm() const;
// Compute the BRISK features on an image
void operator()(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints) const;
int descriptorSize() const;
// returns the descriptor type
int descriptorType() const;
+ // returns the default norm type
+ int defaultNorm() const;
// Compute the ORB features and descriptors on an image
void operator()(InputArray image, InputArray mask, std::vector<KeyPoint>& keypoints) const;
/** returns the descriptor type */
virtual int descriptorType() const;
+ /** returns the default norm type */
+ virtual int defaultNorm() const;
+
/** select the 512 "best description pairs"
* @param images grayscale images set
* @param keypoints set of detected keypoints
virtual int descriptorSize() const;
virtual int descriptorType() const;
+ virtual int defaultNorm() const;
virtual bool empty() const;
virtual int descriptorSize() const;
virtual int descriptorType() const;
+ virtual int defaultNorm() const;
/// @todo read and write for brief
return CV_8UC1;
}
+int BriefDescriptorExtractor::defaultNorm() const
+{
+ return NORM_HAMMING;
+}
+
void BriefDescriptorExtractor::read( const FileNode& fn)
{
int dSize = fn["descriptorSize"];
return CV_8U;
}
+int
+BRISK::defaultNorm() const
+{
+ return NORM_HAMMING;
+}
+
BRISK::~BRISK()
{
delete[] patternPoints_;
return descriptorExtractor->descriptorType();
}
+int OpponentColorDescriptorExtractor::defaultNorm() const
+{
+ return descriptorExtractor->defaultNorm();
+}
+
bool OpponentColorDescriptorExtractor::empty() const
{
return !descriptorExtractor || descriptorExtractor->empty();
return CV_8U;
}
+int FREAK::defaultNorm() const
+{
+ return NORM_HAMMING;
+}
+
} // END NAMESPACE CV
return CV_8U;
}
+int ORB::defaultNorm() const
+{
+ return NORM_HAMMING;
+}
+
/** Compute the ORB features and descriptors on an image
* @param img the image to compute the features and descriptors on
* @param mask the mask to apply
TEST(Features2d_RotationInvariance_Descriptor_BRISK, regression)
{
DescriptorRotationInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.BRISK"),
- Algorithm::create<DescriptorExtractor>("Feature2D.BRISK"),
- NORM_HAMMING,
+ Algorithm::create<DescriptorExtractor>("Feature2D.BRISK"),
+ Algorithm::create<DescriptorExtractor>("Feature2D.BRISK")->defaultNorm(),
0.99f);
test.safe_run();
}
{
DescriptorRotationInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.ORB"),
Algorithm::create<DescriptorExtractor>("Feature2D.ORB"),
- NORM_HAMMING,
+ Algorithm::create<DescriptorExtractor>("Feature2D.ORB")->defaultNorm(),
0.99f);
test.safe_run();
}
//{
// DescriptorRotationInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.ORB"),
// Algorithm::create<DescriptorExtractor>("Feature2D.FREAK"),
-// NORM_HAMMING,
+// Algorithm::create<DescriptorExtractor>("Feature2D.FREAK")->defaultNorm(),
// 0.f);
// test.safe_run();
//}
//TEST(Features2d_ScaleInvariance_Descriptor_BRISK, regression)
//{
// DescriptorScaleInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.BRISK"),
-// Algorithm::create<DescriptorExtractor>("Feature2D.BRISK"),
-// NORM_HAMMING,
-// 0.99f);
+// Algorithm::create<DescriptorExtractor>("Feature2D.BRISK"),
+// Algorithm::create<DescriptorExtractor>("Feature2D.BRISK")->defaultNorm(),
+// 0.99f);
// test.safe_run();
//}
//TEST(Features2d_ScaleInvariance_Descriptor_ORB, regression)
//{
// DescriptorScaleInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.ORB"),
-// Algorithm::create<DescriptorExtractor>("Feature2D.ORB"),
-// NORM_HAMMING,
-// 0.01f);
+// Algorithm::create<DescriptorExtractor>("Feature2D.ORB"),
+// Algorithm::create<DescriptorExtractor>("Feature2D.ORB")->defaultNorm(),
+// 0.01f);
// test.safe_run();
//}
//TEST(Features2d_ScaleInvariance_Descriptor_FREAK, regression)
//{
// DescriptorScaleInvarianceTest test(Algorithm::create<FeatureDetector>("Feature2D.ORB"),
-// Algorithm::create<DescriptorExtractor>("Feature2D.FREAK"),
-// NORM_HAMMING,
-// 0.01f);
+// Algorithm::create<DescriptorExtractor>("Feature2D.FREAK"),
+// Algorithm::create<DescriptorExtractor>("Feature2D.FREAK")->defaultNorm(),
+// 0.01f);
// test.safe_run();
//}
virtual void write( FileStorage &fs ) const;
virtual int descriptorSize() const;
virtual int descriptorType() const;
+ virtual int defaultNorm() const;
protected:
...
}
virtual int descriptorSize() const { return classifier_.classes(); }
virtual int descriptorType() const { return DataType<T>::type; }
+ virtual int defaultNorm() const { return NORM_L1; }
virtual bool empty() const;
//! returns the descriptor size in float's (64 or 128)
int descriptorSize() const;
+ //! returns the default norm type
+ int defaultNorm() const;
//! upload host keypoints to device memory
void uploadKeypoints(const std::vector<KeyPoint>& keypoints, GpuMat& keypointsGPU);
//! returns the descriptor type
CV_WRAP int descriptorType() const;
+ //! returns the default norm type
+ CV_WRAP int defaultNorm() const;
+
//! finds the keypoints using SIFT algorithm
void operator()(InputArray img, InputArray mask,
std::vector<KeyPoint>& keypoints) const;
//! returns the descriptor type
CV_WRAP int descriptorType() const;
+ //! returns the descriptor type
+ CV_WRAP int defaultNorm() const;
+
//! finds the keypoints using fast hessian detector used in SURF
void operator()(InputArray img, InputArray mask,
CV_OUT std::vector<KeyPoint>& keypoints) const;
//! returns the descriptor size in float's (64 or 128)
int descriptorSize() const;
+ //! returns the default norm type
+ int defaultNorm() const;
//! upload host keypoints to device memory
void uploadKeypoints(const std::vector<cv::KeyPoint> &keypoints, oclMat &keypointsocl);
//! download keypoints from device to host memory
return CV_32F;
}
+int SIFT::defaultNorm() const
+{
+ return NORM_L2;
+}
+
void SIFT::operator()(InputArray _image, InputArray _mask,
std::vector<KeyPoint>& keypoints) const
int SURF::descriptorSize() const { return extended ? 128 : 64; }
int SURF::descriptorType() const { return CV_32F; }
+int SURF::defaultNorm() const { return NORM_L2; }
void SURF::operator()(InputArray imgarg, InputArray maskarg,
CV_OUT std::vector<KeyPoint>& keypoints) const
return extended ? 128 : 64;
}
+int cv::cuda::SURF_CUDA::defaultNorm() const
+{
+ return NORM_L2;
+}
+
void cv::cuda::SURF_CUDA::uploadKeypoints(const std::vector<KeyPoint>& keypoints, GpuMat& keypointsGPU)
{
if (keypoints.empty())
return extended ? 128 : 64;
}
+int cv::ocl::SURF_OCL::defaultNorm() const
+{
+ return NORM_L2;
+}
+
void cv::ocl::SURF_OCL::uploadKeypoints(const std::vector<KeyPoint> &keypoints, oclMat &keypointsGPU)
{
if (keypoints.empty())
CV_Assert(kpt2[i].response > 0 );
vector<DMatch> matches;
- BFMatcher(NORM_L2, true).match(d1, d2, matches);
+ BFMatcher(f->defaultNorm(), true).match(d1, d2, matches);
vector<Point2f> pt1, pt2;
for( size_t i = 0; i < matches.size(); i++ ) {
cv::Mat descriptors_gold;
surf_gold(image, cv::noArray(), keypoints, descriptors_gold, true);
- cv::BFMatcher matcher(cv::NORM_L2);
+ cv::BFMatcher matcher(surf.defaultNorm());
std::vector<cv::DMatch> matches;
matcher.match(descriptors_gold, cv::Mat(descriptors), matches);
cv::Mat descriptors_gold;
surf_gold(image, cv::noArray(), keypoints, descriptors_gold, true);
- cv::BFMatcher matcher(cv::NORM_L2);
+ cv::BFMatcher matcher(surf.defaultNorm());
std::vector<cv::DMatch> matches;
matcher.match(descriptors_gold, cv::Mat(descriptors), matches);
Mat descriptors2; de.compute( img2, keypoints2, descriptors2 );
// Match descriptors
- BFMatcher matcher(NORM_L1);
+ BFMatcher matcher(de.defaultNorm());
vector<DMatch> matches;
matcher.match( descriptors1, descriptors2, matches );
//Do matching using features2d
cout << "matching with BruteForceMatcher<Hamming>" << endl;
- BFMatcher matcher_popcount(NORM_HAMMING);
+ BFMatcher matcher_popcount(extractor.defaultNorm());
vector<DMatch> matches_popcount;
double pop_time = match(kpts_1, kpts_2, matcher_popcount, desc_1, desc_2, matches_popcount);
cout << "done BruteForceMatcher<Hamming> matching. took " << pop_time << " seconds" << endl;
virtual void readAlgorithm( )
{
string classifierFile = data_path + "/features2d/calonder_classifier.rtc";
+ Ptr<DescriptorExtractor> extractor = makePtr<CalonderDescriptorExtractor<float> >( classifierFile );
defaultDescMatcher = makePtr<VectorDescriptorMatch>(
- makePtr<CalonderDescriptorExtractor<float> >( classifierFile ),
- makePtr<BFMatcher>(int(NORM_L2)));
+ extractor,
+ makePtr<BFMatcher>(extractor->defaultNorm()));
specificDescMatcher = defaultDescMatcher;
}
};
// The standard Hamming distance can be used such as
// BFMatcher matcher(NORM_HAMMING);
// or the proposed cascade of hamming distance using SSSE3
- BFMatcher matcher(NORM_HAMMING);
+ BFMatcher matcher(extractor.defaultNorm());
// detect
double t = (double)getTickCount();
extractor.compute(img2, keypoints2, descriptors2);
// matching descriptors
- BFMatcher matcher(NORM_L2);
+ BFMatcher matcher(extractor.defaultNorm());
vector<DMatch> matches;
matcher.match(descriptors1, descriptors2, matches);
extractor.compute(img2, keypoints2, descriptors2);
// matching descriptors
- BFMatcher matcher(NORM_L2);
+ BFMatcher matcher(extractor.defaultNorm());
vector<DMatch> matches;
matcher.match(descriptors1, descriptors2, matches);
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors with a brute force matcher
- BFMatcher matcher(NORM_L2);
+ BFMatcher matcher(extractor.defaultNorm());
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
vector<DMatch> matches;
- BFMatcher desc_matcher(NORM_HAMMING);
+ BFMatcher desc_matcher(brief.defaultNorm());
vector<Point2f> train_pts, query_pts;
vector<KeyPoint> train_kpts, query_kpts;
cout << "FOUND " << keypoints2GPU.cols << " keypoints on second image" << endl;
// matching descriptors
- BFMatcher_CUDA matcher(NORM_L2);
+ BFMatcher_CUDA matcher(surf.defaultNorm());
GpuMat trainIdx, distance;
matcher.matchSingle(descriptors1GPU, descriptors2GPU, trainIdx, distance);