From: marina.kolpakova Date: Wed, 9 Jan 2013 10:29:14 +0000 (+0400) Subject: created abstract FeaturePool class X-Git-Tag: accepted/tizen/6.0/unified/20201030.111113~4025^2~56 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=a3600b94db083908283ee429a530d37abffd2a3c;p=platform%2Fupstream%2Fopencv.git created abstract FeaturePool class --- diff --git a/apps/sft/include/sft/octave.hpp b/apps/sft/include/sft/octave.hpp index 0a5fc17..1ab5e40 100644 --- a/apps/sft/include/sft/octave.hpp +++ b/apps/sft/include/sft/octave.hpp @@ -102,28 +102,34 @@ private: void write(cv::FileStorage& fs, const string&, const ICF& f); std::ostream& operator<<(std::ostream& out, const ICF& m); -class FeaturePool +class ICFFeaturePool : public cv::FeaturePool { public: - FeaturePool(cv::Size model, int nfeatures); + ICFFeaturePool(cv::Size model, int nfeatures); - int size() const { return (int)pool.size(); } - float apply(int fi, int si, const Mat& integrals) const; - void write( cv::FileStorage& fs, int index) const; + virtual int size() const { return (int)pool.size(); } + virtual float apply(int fi, int si, const Mat& integrals) const; + virtual void write( cv::FileStorage& fs, int index) const; + + virtual ~ICFFeaturePool(); private: + void fill(int desired); cv::Size model; int nfeatures; - Icfvector pool; + std::vector pool; static const unsigned int seed = 0; enum { N_CHANNELS = 10 }; }; + +using cv::FeaturePool; + // used for traning single octave scale class Octave : cv::Boost { @@ -142,12 +148,13 @@ public: Octave(cv::Rect boundingBox, int npositives, int nnegatives, int logScale, int shrinkage); virtual ~Octave(); - virtual bool train(const Dataset& dataset, const FeaturePool& pool, int weaks, int treeDepth); + virtual bool train(const Dataset& dataset, const FeaturePool* pool, int weaks, int treeDepth); + virtual float predict( const Mat& _sample, Mat& _votes, bool raw_mode, bool return_sum ) const; virtual void setRejectThresholds(cv::Mat& thresholds); virtual void write( CvFileStorage* fs, string name) const; - virtual void write( cv::FileStorage &fs, const FeaturePool& pool, const Mat& thresholds) const; + virtual void write( cv::FileStorage &fs, const FeaturePool* pool, const Mat& thresholds) const; int logScale; @@ -155,7 +162,7 @@ protected: virtual bool train( const cv::Mat& trainData, const cv::Mat& responses, const cv::Mat& varIdx=cv::Mat(), const cv::Mat& sampleIdx=cv::Mat(), const cv::Mat& varType=cv::Mat(), const cv::Mat& missingDataMask=cv::Mat()); - void processPositives(const Dataset& dataset, const FeaturePool& pool); + void processPositives(const Dataset& dataset, const FeaturePool* pool); void generateNegatives(const Dataset& dataset); float predict( const Mat& _sample, const cv::Range range) const; diff --git a/apps/sft/octave.cpp b/apps/sft/octave.cpp index 14f3016..693b7b9 100644 --- a/apps/sft/octave.cpp +++ b/apps/sft/octave.cpp @@ -197,14 +197,14 @@ public: }; } -void sft::Octave::processPositives(const Dataset& dataset, const FeaturePool& pool) +void sft::Octave::processPositives(const Dataset& dataset, const FeaturePool* pool) { Preprocessor prepocessor(shrinkage); int w = boundingBox.width; int h = boundingBox.height; - integrals.create(pool.size(), (w / shrinkage + 1) * (h / shrinkage * 10 + 1), CV_32SC1); + integrals.create(pool->size(), (w / shrinkage + 1) * (h / shrinkage * 10 + 1), CV_32SC1); int total = 0; for (svector::const_iterator it = dataset.pos.begin(); it != dataset.pos.end(); ++it) @@ -338,7 +338,7 @@ void sft::Octave::traverse(const CvBoostTree* tree, cv::FileStorage& fs, int& nf fs << "}"; } -void sft::Octave::write( cv::FileStorage &fso, const FeaturePool& pool, const Mat& thresholds) const +void sft::Octave::write( cv::FileStorage &fso, const FeaturePool* pool, const Mat& thresholds) const { CV_Assert(!thresholds.empty()); cv::Mat used( 1, weak->total * (pow(2, params.max_depth) - 1), CV_32SC1); @@ -364,7 +364,7 @@ void sft::Octave::write( cv::FileStorage &fso, const FeaturePool& pool, const Ma fso << "features" << "["; for (int i = 0; i < nfeatures; ++i) - pool.write(fso, usedPtr[i]); + pool->write(fso, usedPtr[i]); fso << "]" << "}"; } @@ -376,7 +376,7 @@ void sft::Octave::initial_weights(double (&p)[2]) p[1] = n / (2. * (double)(npositives)); } -bool sft::Octave::train(const Dataset& dataset, const FeaturePool& pool, int weaks, int treeDepth) +bool sft::Octave::train(const Dataset& dataset, const FeaturePool* pool, int weaks, int treeDepth) { CV_Assert(treeDepth == 2); CV_Assert(weaks > 0); @@ -389,7 +389,7 @@ bool sft::Octave::train(const Dataset& dataset, const FeaturePool& pool, int wea generateNegatives(dataset); // 2. only sumple case (all features used) - int nfeatures = pool.size(); + int nfeatures = pool->size(); cv::Mat varIdx(1, nfeatures, CV_32SC1); int* ptr = varIdx.ptr(0); @@ -417,7 +417,7 @@ bool sft::Octave::train(const Dataset& dataset, const FeaturePool& pool, int wea float* dptr = trainData.ptr(fi); for (int si = 0; si < nsamples; ++si) { - dptr[si] = pool.apply(fi, si, integrals); + dptr[si] = pool->apply(fi, si, integrals); } } @@ -448,18 +448,19 @@ void sft::Octave::write( CvFileStorage* fs, string name) const } // ========= FeaturePool ========= // -sft::FeaturePool::FeaturePool(cv::Size m, int n) : model(m), nfeatures(n) + +sft::ICFFeaturePool::ICFFeaturePool(cv::Size m, int n) : FeaturePool(), model(m), nfeatures(n) { CV_Assert(m != cv::Size() && n > 0); fill(nfeatures); } -float sft::FeaturePool::apply(int fi, int si, const Mat& integrals) const +float sft::ICFFeaturePool::apply(int fi, int si, const Mat& integrals) const { return pool[fi](integrals.row(si), model); } -void sft::FeaturePool::write( cv::FileStorage& fs, int index) const +void sft::ICFFeaturePool::write( cv::FileStorage& fs, int index) const { CV_Assert((index > 0) && (index < (int)pool.size())); fs << pool[index]; @@ -470,8 +471,9 @@ void sft::write(cv::FileStorage& fs, const string&, const ICF& f) fs << "{" << "channel" << f.channel << "rect" << f.bb << "}"; } +sft::ICFFeaturePool::~ICFFeaturePool(){} -void sft::FeaturePool::fill(int desired) +void sft::ICFFeaturePool::fill(int desired) { int mw = model.width; int mh = model.height; diff --git a/apps/sft/sft.cpp b/apps/sft/sft.cpp index 9da4c04..8c40039 100644 --- a/apps/sft/sft.cpp +++ b/apps/sft/sft.cpp @@ -117,7 +117,7 @@ int main(int argc, char** argv) int nfeatures = cfg.poolSize; cv::Size model = cfg.model(it); std::cout << "Model " << model << std::endl; - sft::FeaturePool pool(model, nfeatures); + sft::ICFFeaturePool pool(model, nfeatures); nfeatures = pool.size(); @@ -132,7 +132,7 @@ int main(int argc, char** argv) std::string path = cfg.trainPath; sft::Dataset dataset(path, boost.logScale); - if (boost.train(dataset, pool, cfg.weaks, cfg.treeDepth)) + if (boost.train(dataset, &pool, cfg.weaks, cfg.treeDepth)) { CvFileStorage* fout = cvOpenFileStorage(cfg.resPath(it).c_str(), 0, CV_STORAGE_WRITE); boost.write(fout, cfg.cascadeName); @@ -142,7 +142,7 @@ int main(int argc, char** argv) cv::Mat thresholds; boost.setRejectThresholds(thresholds); - boost.write(fso, pool, thresholds); + boost.write(fso, &pool, thresholds); cv::FileStorage tfs(("thresholds." + cfg.resPath(it)).c_str(), cv::FileStorage::WRITE); tfs << "thresholds" << thresholds; diff --git a/modules/ml/include/opencv2/ml/ml.hpp b/modules/ml/include/opencv2/ml/ml.hpp index e8e069b..775fab5 100644 --- a/modules/ml/include/opencv2/ml/ml.hpp +++ b/modules/ml/include/opencv2/ml/ml.hpp @@ -2132,6 +2132,17 @@ template<> CV_EXPORTS void Ptr::delete_obj(); CV_EXPORTS bool initModule_ml(void); +CV_EXPORTS class FeaturePool +{ +public: + + virtual int size() const = 0; + virtual float apply(int fi, int si, const Mat& integrals) const = 0; + virtual void write( cv::FileStorage& fs, int index) const = 0; + + virtual ~FeaturePool() = 0; +}; + } #endif // __cplusplus diff --git a/modules/ml/src/octave.cpp b/modules/ml/src/octave.cpp new file mode 100644 index 0000000..a7b7179 --- /dev/null +++ b/modules/ml/src/octave.cpp @@ -0,0 +1,45 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +cv::FeaturePool::~FeaturePool(){} \ No newline at end of file