float predict( const Mat& _sample, const cv::Range range) const;
private:
void traverse(const CvBoostTree* tree, cv::FileStorage& fs, const float* th = 0) const;
-
+ virtual void initial_weights(double (&p)[2]);
cv::Rect boundingBox;
int npositives;
dprintf("Processing negatives finished:\n\trequested %d negatives, viewed %d samples.\n", nnegatives, total);
}
+
template <typename T> int sgn(T val) {
return (T(0) < val) - (val < T(0));
}
<< "}";
}
+void sft::Octave::initial_weights(double (&p)[2])
+{
+ double n = data->sample_count;
+ p[0] = n / (double)(nnegatives) ;
+ p[1] = n / (double)(npositives);
+}
+
bool sft::Octave::train(const Dataset& dataset, const FeaturePool& pool, int weaks, int treeDepth)
{
CV_Assert(treeDepth == 2);
virtual void write_params( CvFileStorage* fs ) const;
virtual void read_params( CvFileStorage* fs, CvFileNode* node );
+ virtual void initial_weights(double (&p)[2]);
+
CvDTreeTrainData* data;
CvBoostParams params;
CvSeq* weak;
return result;
}
+void CvBoost::initial_weights(double (&p)[2])
+{
+ p[0] = 1.;
+ p[1] = 1.;
+}
+
void
CvBoost::update_weights( CvBoostTree* tree )
{
// in case of logitboost and gentle adaboost each weak tree is a regression tree,
// so we need to convert class labels to floating-point values
- double w0 = 1./n;
- double p[2] = { 1, 1 };
+ double w0 = 1./ n;
+ double p[2] = { 1., 1. };
+ initial_weights(p);
cvReleaseMat( &orig_response );
cvReleaseMat( &sum_response );