float* intrinsic, float* homography );
/****************************************************************************************\
-* Additional operations on Subdivisions *
-\****************************************************************************************/
-
-// paints voronoi diagram: just demo function
-CVAPI(void) icvDrawMosaic( CvSubdiv2D* subdiv, IplImage* src, IplImage* dst );
-
-// checks planar subdivision for correctness. It is not an absolute check,
-// but it verifies some relations between quad-edges
-CVAPI(int) icvSubdiv2DCheck( CvSubdiv2D* subdiv );
-
-// returns squared distance between two 2D points with floating-point coordinates.
-CV_INLINE double icvSqDist2D32f( CvPoint2D32f pt1, CvPoint2D32f pt2 )
-{
- double dx = pt1.x - pt2.x;
- double dy = pt1.y - pt2.y;
-
- return dx*dx + dy*dy;
-}
-
-
-/****************************************************************************************\
* More operations on sequences *
\****************************************************************************************/
/*******************************Stereo correspondence*************************************/
typedef struct CvCliqueFinder
-{
+{
CvGraph* graph;
int** adj_matr;
int N; //graph size
int k; //stack size
int* current_comp;
int** All;
-
+
int* ne;
int* ce;
int* fixp; //node with minimal disconnections
int status;
int best_score;
int weighted;
- int weighted_edges;
+ int weighted_edges;
float best_weight;
float* edge_weights;
float* vertex_weights;
#define CLIQUE_FOUND 1
#define CLIQUE_END 0
-/*CVAPI(void) cvStartFindCliques( CvGraph* graph, CvCliqueFinder* finder, int reverse,
+/*CVAPI(void) cvStartFindCliques( CvGraph* graph, CvCliqueFinder* finder, int reverse,
int weighted CV_DEFAULT(0), int weighted_edges CV_DEFAULT(0));
-CVAPI(int) cvFindNextMaximalClique( CvCliqueFinder* finder, int* clock_rest CV_DEFAULT(0) );
+CVAPI(int) cvFindNextMaximalClique( CvCliqueFinder* finder, int* clock_rest CV_DEFAULT(0) );
CVAPI(void) cvEndFindCliques( CvCliqueFinder* finder );
CVAPI(void) cvBronKerbosch( CvGraph* graph );*/
#define CV_UNDEF_SC_PARAM 12345 //default value of parameters
-#define CV_IDP_BIRCHFIELD_PARAM1 25
+#define CV_IDP_BIRCHFIELD_PARAM1 25
#define CV_IDP_BIRCHFIELD_PARAM2 5
#define CV_IDP_BIRCHFIELD_PARAM3 12
#define CV_IDP_BIRCHFIELD_PARAM4 15
#define CV_IDP_BIRCHFIELD_PARAM5 25
-#define CV_DISPARITY_BIRCHFIELD 0
+#define CV_DISPARITY_BIRCHFIELD 0
/*F///////////////////////////////////////////////////////////////////////////
// rightImage - right image of stereo-pair (format 8uC1).
// mode - mode of correspondence retrieval (now CV_DISPARITY_BIRCHFIELD only)
// dispImage - destination disparity image
-// maxDisparity - maximal disparity
+// maxDisparity - maximal disparity
// param1, param2, param3, param4, param5 - parameters of algorithm
// Returns:
// Notes:
// Images must be rectified.
// All images must have format 8uC1.
//F*/
-CVAPI(void)
-cvFindStereoCorrespondence(
+CVAPI(void)
+cvFindStereoCorrespondence(
const CvArr* leftImage, const CvArr* rightImage,
int mode,
CvArr* dispImage,
- int maxDisparity,
- double param1 CV_DEFAULT(CV_UNDEF_SC_PARAM),
- double param2 CV_DEFAULT(CV_UNDEF_SC_PARAM),
- double param3 CV_DEFAULT(CV_UNDEF_SC_PARAM),
- double param4 CV_DEFAULT(CV_UNDEF_SC_PARAM),
+ int maxDisparity,
+ double param1 CV_DEFAULT(CV_UNDEF_SC_PARAM),
+ double param2 CV_DEFAULT(CV_UNDEF_SC_PARAM),
+ double param3 CV_DEFAULT(CV_UNDEF_SC_PARAM),
+ double param4 CV_DEFAULT(CV_UNDEF_SC_PARAM),
double param5 CV_DEFAULT(CV_UNDEF_SC_PARAM) );
/*****************************************************************************************/
double gamma,
CvStereoLineCoeff* coeffs);
-/*CVAPI(int) icvComputeFundMatrEpipoles ( double* camMatr1,
- double* rotMatr1,
+/*CVAPI(int) icvComputeFundMatrEpipoles ( double* camMatr1,
+ double* rotMatr1,
double* transVect1,
double* camMatr2,
double* rotMatr2,
CvPoint2D64f p2_start,CvPoint2D64f p2_end,
CvPoint2D64f* cross,
int* result);
-
+
CVAPI(void) icvGetPieceLength(CvPoint2D64f point1,CvPoint2D64f point2,double* dist);
CVAPI(void) icvGetCrossRectDirect( CvSize imageSize,
/* finds correspondence between two contours */
CvSeq* cvCalcContoursCorrespondence( const CvSeq* contour1,
- const CvSeq* contour2,
+ const CvSeq* contour2,
CvMemStorage* storage);
/* morphs contours using the pre-calculated correspondence:
typedef struct CvFaceTracker CvFaceTracker;
-#define CV_NUM_FACE_ELEMENTS 3
+#define CV_NUM_FACE_ELEMENTS 3
enum CV_FACE_ELEMENTS
{
CV_FACE_MOUTH = 0,
typedef unsigned char CvBool;
-typedef struct
+typedef struct Cv3dTracker2dTrackedObject
{
int id;
CvPoint2D32f p; // pgruebele: So we do not loose precision, this needs to be float
return r;
}
-typedef struct
+typedef struct Cv3dTrackerTrackedObject
{
int id;
CvPoint3D32f p; // location of the tracked object
return r;
}
-typedef struct
+typedef struct Cv3dTrackerCameraInfo
{
CvBool valid;
float mat[4][4]; /* maps camera coordinates to world coordinates */
/* has all the info we need */
} Cv3dTrackerCameraInfo;
-typedef struct
+typedef struct Cv3dTrackerCameraIntrinsics
{
CvPoint2D32f principal_point;
float focal_length[2];
#define CV_PREV_VORONOIEDGE2D( EDGE, SITE ) ((EDGE)->next[2 + ((EDGE)->site[0] != (SITE))])
#define CV_VORONOIEDGE2D_BEGINNODE( EDGE, SITE ) ((EDGE)->node[((EDGE)->site[0] != (SITE))])
#define CV_VORONOIEDGE2D_ENDNODE( EDGE, SITE ) ((EDGE)->node[((EDGE)->site[0] == (SITE))])
-#define CV_TWIN_VORONOISITE2D( SITE, EDGE ) ( (EDGE)->site[((EDGE)->site[0] == (SITE))])
+#define CV_TWIN_VORONOISITE2D( SITE, EDGE ) ( (EDGE)->site[((EDGE)->site[0] == (SITE))])
#define CV_VORONOISITE2D_FIELDS() \
struct CvVoronoiNode2D *node[2]; \
typedef struct CvLCMEdge
{
- CV_GRAPH_EDGE_FIELDS()
+ CV_GRAPH_EDGE_FIELDS()
CvSeq* chain;
float width;
int index1;
typedef struct CvLCMNode
{
CV_GRAPH_VERTEX_FIELDS()
- CvContour* contour;
+ CvContour* contour;
} CvLCMNode;
typedef struct CvMatrix3
{
float m[3][3];
-} CvMatrix3;
-
+} CvMatrix3;
+
/* The order of the function corresponds to the order they should appear in
- the view morphing pipeline */
+ the view morphing pipeline */
/* Finds ending points of scanlines on left and right images of stereo-pair */
CVAPI(void) cvMakeScanlines( const CvMatrix3* matrix, CvSize img_size,
float* RandomSample; /* RandomVector to update sample set */
struct CvRandState* RandS; /* Array of structures to generate random vectors */
} CvConDensation;
-
+
/* Creates ConDensation filter state */
CVAPI(CvConDensation*) cvCreateConDensation( int dynam_params,
int measure_params,
CVAPI(void) cvConDensUpdateByTime( CvConDensation* condens);
/* Initializes ConDensation filter samples */
-CVAPI(void) cvConDensInitSampleSet( CvConDensation* condens, CvMat* lower_bound, CvMat* upper_bound );
+CVAPI(void) cvConDensInitSampleSet( CvConDensation* condens, CvMat* lower_bound, CvMat* upper_bound );
CV_INLINE int iplWidth( const IplImage* img )
{
/* Starts cameras calibration */
virtual bool SetFrames( int totalFrames );
-
+
/* Stops cameras calibration */
virtual void Stop( bool calibrate = false );
/* Saves all camera parameters to file */
virtual bool SaveCameraParams( const char* filename );
-
+
/* Loads all camera parameters from file */
virtual bool LoadCameraParams( const char* filename );
image = cvCreateImage( size, depth, channels );
refcount = image ? new int(1) : 0;
}
-
+
CvImage( IplImage* img ) : image(img)
{
refcount = image ? new int(1) : 0;
}
-
+
CvImage( const CvImage& img ) : image(img.image), refcount(img.refcount)
{
if( refcount ) ++(*refcount);
}
-
+
CvImage( const char* filename, const char* imgname=0, int color=-1 ) : image(0), refcount(0)
{ load( filename, imgname, color ); }
-
+
CvImage( CvFileStorage* fs, const char* mapname, const char* imgname ) : image(0), refcount(0)
{ read( fs, mapname, imgname ); }
-
+
CvImage( CvFileStorage* fs, const char* seqname, int idx ) : image(0), refcount(0)
{ read( fs, seqname, idx ); }
-
+
~CvImage()
{
if( refcount && !(--*refcount) )
delete refcount;
}
}
-
+
CvImage clone() { return CvImage(image ? cvCloneImage(image) : 0); }
-
+
void create( CvSize size, int depth, int channels )
{
if( !image || !refcount ||
image->depth != depth || image->nChannels != channels )
attach( cvCreateImage( size, depth, channels ));
}
-
+
void release() { detach(); }
void clear() { detach(); }
-
+
void attach( IplImage* img, bool use_refcount=true )
{
if( refcount && --*refcount == 0 )
image = img;
refcount = use_refcount && image ? new int(1) : 0;
}
-
+
void detach()
{
if( refcount && --*refcount == 0 )
image = 0;
refcount = 0;
}
-
+
bool load( const char* filename, const char* imgname=0, int color=-1 );
bool read( CvFileStorage* fs, const char* mapname, const char* imgname );
bool read( CvFileStorage* fs, const char* seqname, int idx );
void save( const char* filename, const char* imgname, const int* params=0 );
void write( CvFileStorage* fs, const char* imgname );
-
+
void show( const char* window_name );
bool is_valid() { return image != 0; }
-
+
int width() const { return image ? image->width : 0; }
int height() const { return image ? image->height : 0; }
-
+
CvSize size() const { return image ? cvSize(image->width, image->height) : cvSize(0,0); }
-
+
CvSize roi_size() const
{
return !image ? cvSize(0,0) :
!image->roi ? cvSize(image->width,image->height) :
cvSize(image->roi->width, image->roi->height);
}
-
+
CvRect roi() const
{
return !image ? cvRect(0,0,0,0) :
cvRect(image->roi->xOffset,image->roi->yOffset,
image->roi->width,image->roi->height);
}
-
+
int coi() const { return !image || !image->roi ? 0 : image->roi->coi; }
-
+
void set_roi(CvRect roi) { cvSetImageROI(image,roi); }
void reset_roi() { cvResetImageROI(image); }
void set_coi(int coi) { cvSetImageCOI(image,coi); }
int depth() const { return image ? image->depth : 0; }
int channels() const { return image ? image->nChannels : 0; }
int pix_size() const { return image ? ((image->depth & 255)>>3)*image->nChannels : 0; }
-
+
uchar* data() { return image ? (uchar*)image->imageData : 0; }
const uchar* data() const { return image ? (const uchar*)image->imageData : 0; }
int step() const { return image ? image->widthStep : 0; }
int origin() const { return image ? image->origin : 0; }
-
+
uchar* roi_row(int y)
{
assert(0<=y);
assert(!image ?
1 : image->roi ?
y<image->roi->height : y<image->height);
-
+
return !image ? 0 :
!image->roi ?
(uchar*)(image->imageData + y*image->widthStep) :
(uchar*)(image->imageData + (y+image->roi->yOffset)*image->widthStep +
image->roi->xOffset*((image->depth & 255)>>3)*image->nChannels);
}
-
+
const uchar* roi_row(int y) const
{
assert(0<=y);
assert(!image ?
1 : image->roi ?
y<image->roi->height : y<image->height);
-
+
return !image ? 0 :
!image->roi ?
(const uchar*)(image->imageData + y*image->widthStep) :
(const uchar*)(image->imageData + (y+image->roi->yOffset)*image->widthStep +
image->roi->xOffset*((image->depth & 255)>>3)*image->nChannels);
}
-
+
operator const IplImage* () const { return image; }
operator IplImage* () { return image; }
-
+
CvImage& operator = (const CvImage& img)
{
if( img.refcount )
refcount=img.refcount;
return *this;
}
-
+
protected:
IplImage* image;
int* refcount;
CvMatrix() : matrix(0) {}
CvMatrix( int rows, int cols, int type )
{ matrix = cvCreateMat( rows, cols, type ); }
-
+
CvMatrix( int rows, int cols, int type, CvMat* hdr,
void* data=0, int step=CV_AUTOSTEP )
{ matrix = cvInitMatHeader( hdr, rows, cols, type, data, step ); }
-
+
CvMatrix( int rows, int cols, int type, CvMemStorage* storage, bool alloc_data=true );
-
+
CvMatrix( int rows, int cols, int type, void* data, int step=CV_AUTOSTEP )
{ matrix = cvCreateMatHeader( rows, cols, type );
cvSetData( matrix, data, step ); }
-
+
CvMatrix( CvMat* m )
{ matrix = m; }
-
+
CvMatrix( const CvMatrix& m )
{
matrix = m.matrix;
addref();
}
-
+
CvMatrix( const char* filename, const char* matname=0, int color=-1 ) : matrix(0)
{ load( filename, matname, color ); }
-
+
CvMatrix( CvFileStorage* fs, const char* mapname, const char* matname ) : matrix(0)
{ read( fs, mapname, matname ); }
-
+
CvMatrix( CvFileStorage* fs, const char* seqname, int idx ) : matrix(0)
{ read( fs, seqname, idx ); }
-
+
~CvMatrix()
{
release();
}
-
+
CvMatrix clone() { return CvMatrix(matrix ? cvCloneMat(matrix) : 0); }
-
+
void set( CvMat* m, bool add_ref )
{
release();
if( add_ref )
addref();
}
-
+
void create( int rows, int cols, int type )
{
if( !matrix || !matrix->refcount ||
CV_MAT_TYPE(matrix->type) != type )
set( cvCreateMat( rows, cols, type ), false );
}
-
+
void addref() const
{
if( matrix )
++*matrix->refcount;
}
}
-
+
void release()
{
if( matrix )
matrix = 0;
}
}
-
+
void clear()
{
release();
}
-
+
bool load( const char* filename, const char* matname=0, int color=-1 );
bool read( CvFileStorage* fs, const char* mapname, const char* matname );
bool read( CvFileStorage* fs, const char* seqname, int idx );
void save( const char* filename, const char* matname, const int* params=0 );
void write( CvFileStorage* fs, const char* matname );
-
+
void show( const char* window_name );
-
+
bool is_valid() { return matrix != 0; }
-
+
int rows() const { return matrix ? matrix->rows : 0; }
int cols() const { return matrix ? matrix->cols : 0; }
-
+
CvSize size() const
{
return !matrix ? cvSize(0,0) : cvSize(matrix->rows,matrix->cols);
}
-
+
int type() const { return matrix ? CV_MAT_TYPE(matrix->type) : 0; }
int depth() const { return matrix ? CV_MAT_DEPTH(matrix->type) : 0; }
int channels() const { return matrix ? CV_MAT_CN(matrix->type) : 0; }
int pix_size() const { return matrix ? CV_ELEM_SIZE(matrix->type) : 0; }
-
+
uchar* data() { return matrix ? matrix->data.ptr : 0; }
const uchar* data() const { return matrix ? matrix->data.ptr : 0; }
int step() const { return matrix ? matrix->step : 0; }
-
+
void set_data( void* data, int step=CV_AUTOSTEP )
{ cvSetData( matrix, data, step ); }
-
+
uchar* row(int i) { return !matrix ? 0 : matrix->data.ptr + i*matrix->step; }
const uchar* row(int i) const
{ return !matrix ? 0 : matrix->data.ptr + i*matrix->step; }
-
+
operator const CvMat* () const { return matrix; }
operator CvMat* () { return matrix; }
-
+
CvMatrix& operator = (const CvMatrix& _m)
{
_m.addref();
matrix = _m.matrix;
return *this;
}
-
+
protected:
CvMat* matrix;
};
class CV_EXPORTS CvCamShiftTracker
{
public:
-
+
CvCamShiftTracker();
virtual ~CvCamShiftTracker();
-
+
/**** Characteristics of the object that are calculated by track_object method *****/
float get_orientation() const // orientation of the object in degrees
{ return m_box.angle; }
{ return m_box.center; }
CvRect get_window() const // bounding rectangle for the object
{ return m_comp.rect; }
-
+
/*********************** Tracking parameters ************************/
int get_threshold() const // thresholding value that applied to back project
{ return m_threshold; }
-
+
int get_hist_dims( int* dims = 0 ) const // returns number of histogram dimensions and sets
{ return m_hist ? cvGetDims( m_hist->bins, dims ) : 0; }
-
+
int get_min_ch_val( int channel ) const // get the minimum allowed value of the specified channel
{ return m_min_ch_val[channel]; }
-
+
int get_max_ch_val( int channel ) const // get the maximum allowed value of the specified channel
{ return m_max_ch_val[channel]; }
-
+
// set initial object rectangle (must be called before initial calculation of the histogram)
bool set_window( CvRect window)
{ m_comp.rect = window; return true; }
-
+
bool set_threshold( int threshold ) // threshold applied to the histogram bins
{ m_threshold = threshold; return true; }
-
+
bool set_hist_bin_range( int dim, int min_val, int max_val );
-
+
bool set_hist_dims( int c_dims, int* dims );// set the histogram parameters
-
+
bool set_min_ch_val( int channel, int val ) // set the minimum allowed value of the specified channel
{ m_min_ch_val[channel] = val; return true; }
bool set_max_ch_val( int channel, int val ) // set the maximum allowed value of the specified channel
{ m_max_ch_val[channel] = val; return true; }
-
+
/************************ The processing methods *********************************/
// update object position
virtual bool track_object( const IplImage* cur_frame );
-
+
// update object histogram
virtual bool update_histogram( const IplImage* cur_frame );
-
+
// reset histogram
virtual void reset_histogram();
-
+
/************************ Retrieving internal data *******************************/
// get back project image
virtual IplImage* get_back_project()
{ return m_back_project; }
-
+
float query( int* bin ) const
{ return m_hist ? (float)cvGetRealND(m_hist->bins, bin) : 0.f; }
-
+
protected:
-
+
// internal method for color conversion: fills m_color_planes group
virtual void color_transform( const IplImage* img );
-
+
CvHistogram* m_hist;
-
+
CvBox2D m_box;
CvConnectedComp m_comp;
-
+
float m_hist_ranges_data[CV_MAX_DIM][2];
float* m_hist_ranges[CV_MAX_DIM];
-
+
int m_min_ch_val[CV_MAX_DIM];
int m_max_ch_val[CV_MAX_DIM];
int m_threshold;
-
+
IplImage* m_color_planes[CV_MAX_DIM];
IplImage* m_back_project;
IplImage* m_temp;
typedef CvEM ExpectationMaximization;
/*!
- The Patch Generator class
+ The Patch Generator class
*/
class CV_EXPORTS PatchGenerator
{
CV_OUT Mat& transform, RNG& rng,
bool inverse=false) const;
void setAffineParam(double lambda, double theta, double phi);
-
+
double backgroundMin, backgroundMax;
double noiseRange;
bool randomBlur;
void getMostStable2D(const Mat& image, CV_OUT vector<KeyPoint>& keypoints,
int maxCount, const PatchGenerator& patchGenerator) const;
void setVerbose(bool verbose);
-
+
void read(const FileNode& node);
void write(FileStorage& fs, const String& name=String()) const;
-
+
int radius;
int threshold;
int nOctaves;
int nViews;
bool verbose;
-
+
double baseFeatureSize;
double clusteringDistance;
};
virtual void clear();
virtual bool empty() const;
void setVerbose(bool verbose);
-
+
int getClassCount() const;
int getStructCount() const;
int getStructSize() const;
int getSignatureSize() const;
int getCompressionMethod() const;
Size getPatchSize() const;
-
+
struct Feature
{
uchar x1, y1, x2, y2;
template<typename _Tp> bool operator ()(const Mat_<_Tp>& patch) const
{ return patch(y1,x1) > patch(y2, x2); }
};
-
+
enum
{
PATCH_SIZE = 31,
COMPRESSION_PCA = 2,
DEFAULT_COMPRESSION_METHOD = COMPRESSION_NONE
};
-
+
protected:
virtual void prepare(int _nclasses, int _patchSize, int _signatureSize,
int _nstructs, int _structSize,
int _nviews, int _compressionMethod);
virtual void finalize(RNG& rng);
virtual int getLeaf(int fidx, const Mat& patch) const;
-
+
bool verbose;
int nstructs;
int structSize;
int x;
int y;
IplImage* image;
-
+
BaseKeypoint()
: x(0), y(0), image(NULL)
{}
-
+
BaseKeypoint(int x, int y, IplImage* image)
: x(x), y(y), image(image)
{}
{
public:
friend class RTreeClassifier;
-
+
static const uchar PATCH_SIZE = 32;
static const int DEFAULT_DEPTH = 9;
static const int DEFAULT_VIEWS = 5000;
static const size_t DEFAULT_REDUCED_NUM_DIM = 176;
static float GET_LOWER_QUANT_PERC() { return .03f; }
static float GET_UPPER_QUANT_PERC() { return .92f; }
-
+
RandomizedTree();
~RandomizedTree();
-
+
void train(vector<BaseKeypoint> const& base_set, RNG &rng,
int depth, int views, size_t reduced_num_dim, int num_quant_bits);
void train(vector<BaseKeypoint> const& base_set, RNG &rng,
PatchGenerator &make_patch, int depth, int views, size_t reduced_num_dim,
int num_quant_bits);
-
+
// following two funcs are EXPERIMENTAL (do not use unless you know exactly what you do)
static void quantizeVector(float *vec, int dim, int N, float bnds[2], int clamp_mode=0);
static void quantizeVector(float *src, int dim, int N, float bnds[2], uchar *dst);
-
+
// patch_data must be a 32x32 array (no row padding)
float* getPosterior(uchar* patch_data);
const float* getPosterior(uchar* patch_data) const;
uchar* getPosterior2(uchar* patch_data);
const uchar* getPosterior2(uchar* patch_data) const;
-
+
void read(const char* file_name, int num_quant_bits);
void read(std::istream &is, int num_quant_bits);
void write(const char* file_name) const;
void write(std::ostream &os) const;
-
+
int classes() { return classes_; }
int depth() { return depth_; }
-
+
//void setKeepFloatPosteriors(bool b) { keep_float_posteriors_ = b; }
void discardFloatPosteriors() { freePosteriors(1); }
-
+
inline void applyQuantization(int num_quant_bits) { makePosteriors2(num_quant_bits); }
-
+
// debug
void savePosteriors(std::string url, bool append=false);
void savePosteriors2(std::string url, bool append=false);
-
+
private:
int classes_;
int depth_;
float **posteriors_; // 16-bytes aligned posteriors
uchar **posteriors2_; // 16-bytes aligned posteriors
vector<int> leaf_counts_;
-
+
void createNodes(int num_nodes, RNG &rng);
void allocPosteriorsAligned(int num_leaves, int num_classes);
void freePosteriors(int which); // which: 1=posteriors_, 2=posteriors2_, 3=both
struct CV_EXPORTS RTreeNode
{
short offset1, offset2;
-
+
RTreeNode() {}
RTreeNode(uchar x1, uchar y1, uchar x2, uchar y2)
: offset1(y1*RandomizedTree::PATCH_SIZE + x1),
offset2(y2*RandomizedTree::PATCH_SIZE + x2)
{}
-
+
//! Left child on 0, right child on 1
inline bool operator() (uchar* patch_data) const
{
public:
static const int DEFAULT_TREES = 48;
static const size_t DEFAULT_NUM_QUANT_BITS = 4;
-
+
RTreeClassifier();
void train(vector<BaseKeypoint> const& base_set,
RNG &rng,
int views = RandomizedTree::DEFAULT_VIEWS,
size_t reduced_num_dim = RandomizedTree::DEFAULT_REDUCED_NUM_DIM,
int num_quant_bits = DEFAULT_NUM_QUANT_BITS);
-
+
// sig must point to a memory block of at least classes()*sizeof(float|uchar) bytes
void getSignature(IplImage *patch, uchar *sig) const;
void getSignature(IplImage *patch, float *sig) const;
void getSparseSignature(IplImage *patch, float *sig, float thresh) const;
// TODO: deprecated in favor of getSignature overload, remove
void getFloatSignature(IplImage *patch, float *sig) const { getSignature(patch, sig); }
-
+
static int countNonZeroElements(float *vec, int n, double tol=1e-10);
static inline void safeSignatureAlloc(uchar **sig, int num_sig=1, int sig_len=176);
static inline uchar* safeSignatureAlloc(int num_sig=1, int sig_len=176);
-
+
inline int classes() const { return classes_; }
inline int original_num_classes() const { return original_num_classes_; }
-
+
void setQuantization(int num_quant_bits);
void discardFloatPosteriors();
-
+
void read(const char* file_name);
void read(std::istream &is);
void write(const char* file_name) const;
void write(std::ostream &os) const;
-
+
// experimental and debug
void saveAllFloatPosteriors(std::string file_url);
void saveAllBytePosteriors(std::string file_url);
void setFloatPosteriorsFromTextfile_176(std::string url);
float countZeroElements();
-
+
vector<RandomizedTree> trees_;
-
+
private:
int classes_;
int num_quant_bits_;
/****************************************************************************************\
* One-Way Descriptor *
\****************************************************************************************/
-
+
// CvAffinePose: defines a parameterized affine transformation of an image patch.
// An image patch is rotated on angle phi (in degrees), then scaled lambda1 times
// along horizontal and lambda2 times along vertical direction, and then rotated again
float lambda1;
float lambda2;
};
-
+
class CV_EXPORTS OneWayDescriptor
{
public:
OneWayDescriptor();
~OneWayDescriptor();
-
+
// allocates memory for given descriptor parameters
void Allocate(int pose_count, CvSize size, int nChannels);
-
+
// GenerateSamples: generates affine transformed patches with averaging them over small transformation variations.
// If external poses and transforms were specified, uses them instead of generating random ones
// - pose_count: the number of poses to be generated
// - frontal: the input patch (can be a roi in a larger image)
// - norm: if nonzero, normalizes the output patch so that the sum of pixel intensities is 1
void GenerateSamples(int pose_count, IplImage* frontal, int norm = 0);
-
+
// GenerateSamplesFast: generates affine transformed patches with averaging them over small transformation variations.
// Uses precalculated transformed pca components.
// - frontal: the input patch (can be a roi in a larger image)
// pca_descriptors[0] corresponds to the average, pca_descriptors[1]-pca_descriptors[pca_dim] correspond to eigenvectors
void GenerateSamplesFast(IplImage* frontal, CvMat* pca_hr_avg,
CvMat* pca_hr_eigenvectors, OneWayDescriptor* pca_descriptors);
-
+
// sets the poses and corresponding transforms
void SetTransforms(CvAffinePose* poses, CvMat** transforms);
-
+
// Initialize: builds a descriptor.
// - pose_count: the number of poses to build. If poses were set externally, uses them rather than generating random ones
// - frontal: input patch. Can be a roi in a larger image
// - feature_name: the feature name to be associated with the descriptor
// - norm: if 1, the affine transformed patches are normalized so that their sum is 1
void Initialize(int pose_count, IplImage* frontal, const char* feature_name = 0, int norm = 0);
-
+
// InitializeFast: builds a descriptor using precomputed descriptors of pca components
// - pose_count: the number of poses to build
// - frontal: input patch. Can be a roi in a larger image
// followed by the descriptors for eigenvectors
void InitializeFast(int pose_count, IplImage* frontal, const char* feature_name,
CvMat* pca_hr_avg, CvMat* pca_hr_eigenvectors, OneWayDescriptor* pca_descriptors);
-
+
// ProjectPCASample: unwarps an image patch into a vector and projects it into PCA space
// - patch: input image patch
// - avg: PCA average vector
// - eigenvectors: PCA eigenvectors, one per row
// - pca_coeffs: output PCA coefficients
void ProjectPCASample(IplImage* patch, CvMat* avg, CvMat* eigenvectors, CvMat* pca_coeffs) const;
-
+
// InitializePCACoeffs: projects all warped patches into PCA space
// - avg: PCA average vector
// - eigenvectors: PCA eigenvectors, one per row
void InitializePCACoeffs(CvMat* avg, CvMat* eigenvectors);
-
+
// EstimatePose: finds the closest match between an input patch and a set of patches with different poses
// - patch: input image patch
// - pose_idx: the output index of the closest pose
// - distance: the distance to the closest pose (L2 distance)
void EstimatePose(IplImage* patch, int& pose_idx, float& distance) const;
-
+
// EstimatePosePCA: finds the closest match between an input patch and a set of patches with different poses.
// The distance between patches is computed in PCA space
// - patch: input image patch
// - avg: PCA average vector. If 0, matching without PCA is used
// - eigenvectors: PCA eigenvectors, one per row
void EstimatePosePCA(CvArr* patch, int& pose_idx, float& distance, CvMat* avg, CvMat* eigenvalues) const;
-
+
// GetPatchSize: returns the size of each image patch after warping (2 times smaller than the input patch)
CvSize GetPatchSize() const
{
return m_patch_size;
}
-
+
// GetInputPatchSize: returns the required size of the patch that the descriptor is built from
// (2 time larger than the patch after warping)
CvSize GetInputPatchSize() const
{
return cvSize(m_patch_size.width*2, m_patch_size.height*2);
}
-
+
// GetPatch: returns a patch corresponding to specified pose index
// - index: pose index
// - return value: the patch corresponding to specified pose index
IplImage* GetPatch(int index);
-
+
// GetPose: returns a pose corresponding to specified pose index
// - index: pose index
// - return value: the pose corresponding to specified pose index
CvAffinePose GetPose(int index) const;
-
+
// Save: saves all patches with different poses to a specified path
void Save(const char* path);
-
+
// ReadByName: reads a descriptor from a file storage
// - fs: file storage
// - parent: parent node
// - name: node name
// - return value: 1 if succeeded, 0 otherwise
int ReadByName(CvFileStorage* fs, CvFileNode* parent, const char* name);
-
+
// ReadByName: reads a descriptor from a file node
// - parent: parent node
// - name: node name
// - return value: 1 if succeeded, 0 otherwise
int ReadByName(const FileNode &parent, const char* name);
-
+
// Write: writes a descriptor into a file storage
// - fs: file storage
// - name: node name
void Write(CvFileStorage* fs, const char* name);
-
+
// GetFeatureName: returns a name corresponding to a feature
const char* GetFeatureName() const;
-
+
// GetCenter: returns the center of the feature
CvPoint GetCenter() const;
-
+
void SetPCADimHigh(int pca_dim_high) {m_pca_dim_high = pca_dim_high;};
void SetPCADimLow(int pca_dim_low) {m_pca_dim_low = pca_dim_low;};
-
+
int GetPCADimLow() const;
int GetPCADimHigh() const;
-
+
CvMat** GetPCACoeffs() const {return m_pca_coeffs;}
-
+
protected:
int m_pose_count; // the number of poses
CvSize m_patch_size; // size of each image
CvMat** m_pca_coeffs; // an array of length m_pose_count containing pca decomposition of the patch in different poses
CvAffinePose* m_affine_poses; // an array of poses
CvMat** m_transforms; // an array of affine transforms corresponding to poses
-
+
string m_feature_name; // the name of the feature associated with the descriptor
CvPoint m_center; // the coordinates of the feature (the center of the input image ROI)
-
+
int m_pca_dim_high; // the number of descriptor pca components to use for generating affine poses
int m_pca_dim_low; // the number of pca components to use for comparison
};
class CV_EXPORTS OneWayDescriptorBase
{
public:
-
+
// creates an instance of OneWayDescriptor from a set of training files
// - patch_size: size of the input (large) patch
// - pose_count: the number of poses to generate for each descriptor
OneWayDescriptorBase(CvSize patch_size, int pose_count, const char* train_path = 0, const char* pca_config = 0,
const char* pca_hr_config = 0, const char* pca_desc_config = 0, int pyr_levels = 1,
int pca_dim_high = 100, int pca_dim_low = 100);
-
+
OneWayDescriptorBase(CvSize patch_size, int pose_count, const string &pca_filename, const string &train_path = string(), const string &images_list = string(),
float _scale_min = 0.7f, float _scale_max=1.5f, float _scale_step=1.2f, int pyr_levels = 1,
int pca_dim_high = 100, int pca_dim_low = 100);
-
-
+
+
virtual ~OneWayDescriptorBase();
void clear ();
-
-
+
+
// Allocate: allocates memory for a given number of descriptors
void Allocate(int train_feature_count);
-
+
// AllocatePCADescriptors: allocates memory for pca descriptors
void AllocatePCADescriptors();
-
+
// returns patch size
CvSize GetPatchSize() const {return m_patch_size;};
// returns the number of poses for each descriptor
int GetPoseCount() const {return m_pose_count;};
-
+
// returns the number of pyramid levels
int GetPyrLevels() const {return m_pyr_levels;};
-
+
// returns the number of descriptors
int GetDescriptorCount() const {return m_train_feature_count;};
-
+
// CreateDescriptorsFromImage: creates descriptors for each of the input features
// - src: input image
// - features: input features
// - pyr_levels: the number of pyramid levels
void CreateDescriptorsFromImage(IplImage* src, const vector<KeyPoint>& features);
-
+
// CreatePCADescriptors: generates descriptors for PCA components, needed for fast generation of feature descriptors
void CreatePCADescriptors();
-
+
// returns a feature descriptor by feature index
const OneWayDescriptor* GetDescriptor(int desc_idx) const {return &m_descriptors[desc_idx];};
-
+
// FindDescriptor: finds the closest descriptor
// - patch: input image patch
// - desc_idx: output index of the closest descriptor to the input patch
// - _scales: scales of the input patch for each descriptor
// - scale_ranges: input scales variation (float[2])
void FindDescriptor(IplImage* patch, int& desc_idx, int& pose_idx, float& distance, float* _scale = 0, float* scale_ranges = 0) const;
-
+
// - patch: input image patch
// - n: number of the closest indexes
// - desc_idxs: output indexes of the closest descriptor to the input patch (n)
// - scale_ranges: input scales variation (float[2])
void FindDescriptor(IplImage* patch, int n, vector<int>& desc_idxs, vector<int>& pose_idxs,
vector<float>& distances, vector<float>& _scales, float* scale_ranges = 0) const;
-
+
// FindDescriptor: finds the closest descriptor
// - src: input image
// - pt: center of the feature
// - pose_idx: output index of the closest pose of the closest descriptor to the input patch
// - distance: distance from the input patch to the closest feature pose
void FindDescriptor(IplImage* src, cv::Point2f pt, int& desc_idx, int& pose_idx, float& distance) const;
-
+
// InitializePoses: generates random poses
void InitializePoses();
-
+
// InitializeTransformsFromPoses: generates 2x3 affine matrices from poses (initializes m_transforms)
void InitializeTransformsFromPoses();
-
+
// InitializePoseTransforms: subsequently calls InitializePoses and InitializeTransformsFromPoses
void InitializePoseTransforms();
-
+
// InitializeDescriptor: initializes a descriptor
// - desc_idx: descriptor index
// - train_image: image patch (ROI is supported)
// - feature_label: feature textual label
void InitializeDescriptor(int desc_idx, IplImage* train_image, const char* feature_label);
-
+
void InitializeDescriptor(int desc_idx, IplImage* train_image, const KeyPoint& keypoint, const char* feature_label);
-
+
// InitializeDescriptors: load features from an image and create descriptors for each of them
void InitializeDescriptors(IplImage* train_image, const vector<KeyPoint>& features,
const char* feature_label = "", int desc_start_idx = 0);
-
+
// Write: writes this object to a file storage
// - fs: output filestorage
void Write (FileStorage &fs) const;
-
+
// Read: reads OneWayDescriptorBase object from a file node
- // - fn: input file node
+ // - fn: input file node
void Read (const FileNode &fn);
-
+
// LoadPCADescriptors: loads PCA descriptors from a file
// - filename: input filename
int LoadPCADescriptors(const char* filename);
-
+
// LoadPCADescriptors: loads PCA descriptors from a file node
// - fn: input file node
int LoadPCADescriptors(const FileNode &fn);
-
+
// SavePCADescriptors: saves PCA descriptors to a file
// - filename: output filename
void SavePCADescriptors(const char* filename);
-
+
// SavePCADescriptors: saves PCA descriptors to a file storage
// - fs: output file storage
void SavePCADescriptors(CvFileStorage* fs) const;
-
+
// GeneratePCA: calculate and save PCA components and descriptors
// - img_path: path to training PCA images directory
// - images_list: filename with filenames of training PCA images
void GeneratePCA(const char* img_path, const char* images_list, int pose_count=500);
-
+
// SetPCAHigh: sets the high resolution pca matrices (copied to internal structures)
void SetPCAHigh(CvMat* avg, CvMat* eigenvectors);
-
+
// SetPCALow: sets the low resolution pca matrices (copied to internal structures)
void SetPCALow(CvMat* avg, CvMat* eigenvectors);
-
+
int GetLowPCA(CvMat** avg, CvMat** eigenvectors)
{
*avg = m_pca_avg;
*eigenvectors = m_pca_eigenvectors;
return m_pca_dim_low;
};
-
+
int GetPCADimLow() const {return m_pca_dim_low;};
int GetPCADimHigh() const {return m_pca_dim_high;};
-
+
void ConvertDescriptorsArrayToTree(); // Converting pca_descriptors array to KD tree
-
+
// GetPCAFilename: get default PCA filename
static string GetPCAFilename () { return "pca.yml"; }
-
+
virtual bool empty() const { return m_train_feature_count <= 0 ? true : false; }
-
+
protected:
CvSize m_patch_size; // patch size
int m_pose_count; // the number of poses for each descriptor
CvMat* m_pca_hr_avg; // PCA average Vector for large patches
CvMat* m_pca_hr_eigenvectors; // PCA eigenvectors for large patches
OneWayDescriptor* m_pca_descriptors; // an array of PCA descriptors
-
+
cv::flann::Index* m_pca_descriptors_tree;
CvMat* m_pca_descriptors_matrix;
-
+
CvAffinePose* m_poses; // array of poses
CvMat** m_transforms; // array of affine transformations corresponding to poses
-
+
int m_pca_dim_high;
int m_pca_dim_low;
-
+
int m_pyr_levels;
float scale_min;
float scale_max;
float scale_step;
-
+
// SavePCAall: saves PCA components and descriptors to a file storage
// - fs: output file storage
void SavePCAall (FileStorage &fs) const;
-
+
// LoadPCAall: loads PCA components and descriptors from a file node
// - fn: input file node
void LoadPCAall (const FileNode &fn);
// - pca_desc_config: the name of the file that contains descriptors of PCA components
OneWayDescriptorObject(CvSize patch_size, int pose_count, const char* train_path, const char* pca_config,
const char* pca_hr_config = 0, const char* pca_desc_config = 0, int pyr_levels = 1);
-
+
OneWayDescriptorObject(CvSize patch_size, int pose_count, const string &pca_filename,
const string &train_path = string (), const string &images_list = string (),
float _scale_min = 0.7f, float _scale_max=1.5f, float _scale_step=1.2f, int pyr_levels = 1);
-
-
+
+
virtual ~OneWayDescriptorObject();
-
+
// Allocate: allocates memory for a given number of features
// - train_feature_count: the total number of features
// - object_feature_count: the number of features extracted from the object
void Allocate(int train_feature_count, int object_feature_count);
-
-
+
+
void SetLabeledFeatures(const vector<KeyPoint>& features) {m_train_features = features;};
vector<KeyPoint>& GetLabeledFeatures() {return m_train_features;};
const vector<KeyPoint>& GetLabeledFeatures() const {return m_train_features;};
vector<KeyPoint> _GetLabeledFeatures() const;
-
+
// IsDescriptorObject: returns 1 if descriptor with specified index is positive, otherwise 0
int IsDescriptorObject(int desc_idx) const;
-
+
// MatchPointToPart: returns the part number of a feature if it matches one of the object parts, otherwise -1
int MatchPointToPart(CvPoint pt) const;
-
+
// GetDescriptorPart: returns the part number of the feature corresponding to a specified descriptor
// - desc_idx: descriptor index
int GetDescriptorPart(int desc_idx) const;
-
-
+
+
void InitializeObjectDescriptors(IplImage* train_image, const vector<KeyPoint>& features,
const char* feature_label, int desc_start_idx = 0, float scale = 1.0f,
int is_background = 0);
-
+
// GetObjectFeatureCount: returns the number of object features
int GetObjectFeatureCount() const {return m_object_feature_count;};
-
+
protected:
int* m_part_id; // contains part id for each of object descriptors
vector<KeyPoint> m_train_features; // train features
int m_object_feature_count; // the number of the positive features
-
+
};
-
+
/*
* OneWayDescriptorMatcher
*/
static float GET_MIN_SCALE() { return 0.7f; }
static float GET_MAX_SCALE() { return 1.5f; }
static float GET_STEP_SCALE() { return 1.2f; }
-
+
Params( int poseCount = POSE_COUNT,
Size patchSize = Size(PATCH_WIDTH, PATCH_HEIGHT),
string pcaFilename = string(),
string trainPath = string(), string trainImagesList = string(),
float minScale = GET_MIN_SCALE(), float maxScale = GET_MAX_SCALE(),
float stepScale = GET_STEP_SCALE() );
-
+
int poseCount;
Size patchSize;
string pcaFilename;
string trainPath;
string trainImagesList;
-
+
float minScale, maxScale, stepScale;
};
-
+
OneWayDescriptorMatcher( const Params& params=Params() );
virtual ~OneWayDescriptorMatcher();
-
+
void initialize( const Params& params, const Ptr<OneWayDescriptorBase>& base=Ptr<OneWayDescriptorBase>() );
-
+
// Clears keypoints storing in collection and OneWayDescriptorBase
virtual void clear();
-
+
virtual void train();
-
+
virtual bool isMaskSupported();
-
+
virtual void read( const FileNode &fn );
virtual void write( FileStorage& fs ) const;
-
+
virtual bool empty() const;
-
+
virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const;
-
+
protected:
// Matches a set of keypoints from a single image of the training set. A rectangle with a center in a keypoint
// and size (patch_width/2*scale, patch_height/2*scale) is cropped from the source image for each
virtual void radiusMatchImpl( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
vector<vector<DMatch> >& matches, float maxDistance,
const vector<Mat>& masks, bool compactResult );
-
+
Ptr<OneWayDescriptorBase> base;
Params params;
int prevTrainCount;
int nviews=FernClassifier::DEFAULT_VIEWS,
int compressionMethod=FernClassifier::COMPRESSION_NONE,
const PatchGenerator& patchGenerator=PatchGenerator() );
-
+
Params( const string& filename );
-
+
int nclasses;
int patchSize;
int signatureSize;
int nviews;
int compressionMethod;
PatchGenerator patchGenerator;
-
+
string filename;
};
-
+
FernDescriptorMatcher( const Params& params=Params() );
virtual ~FernDescriptorMatcher();
-
+
virtual void clear();
-
+
virtual void train();
-
+
virtual bool isMaskSupported();
-
+
virtual void read( const FileNode &fn );
virtual void write( FileStorage& fs ) const;
virtual bool empty() const;
-
+
virtual Ptr<GenericDescriptorMatcher> clone( bool emptyTrainData=false ) const;
-
+
protected:
virtual void knnMatchImpl( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
vector<vector<DMatch> >& matches, int k,
virtual void radiusMatchImpl( const Mat& queryImage, vector<KeyPoint>& queryKeypoints,
vector<vector<DMatch> >& matches, float maxDistance,
const vector<Mat>& masks, bool compactResult );
-
+
void trainFernClassifier();
void calcBestProbAndMatchIdx( const Mat& image, const Point2f& pt,
float& bestProb, int& bestMatchIdx, vector<float>& signature );
Params params;
int prevTrainCount;
};
-
+
/*
* CalonderDescriptorExtractor
{
public:
CalonderDescriptorExtractor( const string& classifierFile );
-
+
virtual void read( const FileNode &fn );
virtual void write( FileStorage &fs ) const;
-
+
virtual int descriptorSize() const { return classifier_.classes(); }
virtual int descriptorType() const { return DataType<T>::type; }
-
+
virtual bool empty() const;
-
+
protected:
virtual void computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors ) const;
-
+
RTreeClassifier classifier_;
static const int BORDER_SIZE = 16;
};
{
// Cannot compute descriptors for keypoints on the image border.
KeyPointsFilter::runByImageBorder(keypoints, image.size(), BORDER_SIZE);
-
+
/// @todo Check 16-byte aligned
descriptors.create((int)keypoints.size(), classifier_.classes(), cv::DataType<T>::type);
-
+
int patchSize = RandomizedTree::PATCH_SIZE;
int offset = patchSize / 2;
for (size_t i = 0; i < keypoints.size(); ++i)
{
return classifier_.trees_.empty();
}
-
-
+
+
////////////////////// Brute Force Matcher //////////////////////////
-
+
template<class Distance>
class CV_EXPORTS BruteForceMatcher : public BFMatcher
{
BruteForceMatcher( Distance d = Distance() ) : BFMatcher(Distance::normType, false) {}
virtual ~BruteForceMatcher() {}
};
-
-
+
+
/****************************************************************************************\
* Planar Object Detection *
\****************************************************************************************/
const LDetector& getDetector() const;
const FernClassifier& getClassifier() const;
void setVerbose(bool verbose);
-
+
void read(const FileNode& node);
void write(FileStorage& fs, const String& name=String()) const;
bool operator()(const Mat& image, CV_OUT Mat& H, CV_OUT vector<Point2f>& corners) const;
bool operator()(const vector<Mat>& pyr, const vector<KeyPoint>& keypoints,
CV_OUT Mat& H, CV_OUT vector<Point2f>& corners,
CV_OUT vector<int>* pairs=0) const;
-
+
protected:
bool verbose;
Rect modelROI;
LDetector ldetector;
FernClassifier fernClassifier;
};
-
+
}
// 2009-01-12, Xavier Delacour <xavier.delacour@gmail.com>
struct CvLSHOperations
{
virtual ~CvLSHOperations() {}
-
+
virtual int vector_add(const void* data) = 0;
virtual void vector_remove(int i) = 0;
virtual const void* vector_lookup(int i) = 0;
virtual void vector_reserve(int n) = 0;
virtual unsigned int vector_count() = 0;
-
+
virtual void hash_insert(lsh_hash h, int l, int i) = 0;
virtual void hash_remove(lsh_hash h, int l, int i) = 0;
virtual int hash_lookup(lsh_hash h, int l, int* ret_i, int ret_i_max) = 0;
CvMemStorage* storage, CvSeq** comp,
int level, double threshold1,
double threshold2 );
-
+
/****************************************************************************************\
* Planar subdivisions *
\****************************************************************************************/
+typedef size_t CvSubdiv2DEdge;
+
+#define CV_QUADEDGE2D_FIELDS() \
+ int flags; \
+ struct CvSubdiv2DPoint* pt[4]; \
+ CvSubdiv2DEdge next[4];
+
+#define CV_SUBDIV2D_POINT_FIELDS()\
+ int flags; \
+ CvSubdiv2DEdge first; \
+ CvPoint2D32f pt; \
+ int id;
+
+#define CV_SUBDIV2D_VIRTUAL_POINT_FLAG (1 << 30)
+
+typedef struct CvQuadEdge2D
+{
+ CV_QUADEDGE2D_FIELDS()
+}
+CvQuadEdge2D;
+
+typedef struct CvSubdiv2DPoint
+{
+ CV_SUBDIV2D_POINT_FIELDS()
+}
+CvSubdiv2DPoint;
+
+#define CV_SUBDIV2D_FIELDS() \
+ CV_GRAPH_FIELDS() \
+ int quad_edges; \
+ int is_geometry_valid; \
+ CvSubdiv2DEdge recent_edge; \
+ CvPoint2D32f topleft; \
+ CvPoint2D32f bottomright;
+
+typedef struct CvSubdiv2D
+{
+ CV_SUBDIV2D_FIELDS()
+}
+CvSubdiv2D;
+
+typedef enum CvSubdiv2DPointLocation
+{
+ CV_PTLOC_ERROR = -2,
+ CV_PTLOC_OUTSIDE_RECT = -1,
+ CV_PTLOC_INSIDE = 0,
+ CV_PTLOC_VERTEX = 1,
+ CV_PTLOC_ON_EDGE = 2
+}
+CvSubdiv2DPointLocation;
+
+typedef enum CvNextEdgeType
+{
+ CV_NEXT_AROUND_ORG = 0x00,
+ CV_NEXT_AROUND_DST = 0x22,
+ CV_PREV_AROUND_ORG = 0x11,
+ CV_PREV_AROUND_DST = 0x33,
+ CV_NEXT_AROUND_LEFT = 0x13,
+ CV_NEXT_AROUND_RIGHT = 0x31,
+ CV_PREV_AROUND_LEFT = 0x20,
+ CV_PREV_AROUND_RIGHT = 0x02
+}
+CvNextEdgeType;
+
+/* get the next edge with the same origin point (counterwise) */
+#define CV_SUBDIV2D_NEXT_EDGE( edge ) (((CvQuadEdge2D*)((edge) & ~3))->next[(edge)&3])
+
+
/* Initializes Delaunay triangulation */
CVAPI(void) cvInitSubdivDelaunay2D( CvSubdiv2D* subdiv, CvRect rect );
{
CvSubdiv2D* subdiv = cvCreateSubdiv2D( CV_SEQ_KIND_SUBDIV2D, sizeof(*subdiv),
sizeof(CvSubdiv2DPoint), sizeof(CvQuadEdge2D), storage );
-
+
cvInitSubdivDelaunay2D( subdiv, rect );
return subdiv;
}
return (CvSubdiv2DPoint*)e->pt[(edge + 2) & 3];
}
+/****************************************************************************************\
+* Additional operations on Subdivisions *
+\****************************************************************************************/
+
+// paints voronoi diagram: just demo function
+CVAPI(void) icvDrawMosaic( CvSubdiv2D* subdiv, IplImage* src, IplImage* dst );
+
+// checks planar subdivision for correctness. It is not an absolute check,
+// but it verifies some relations between quad-edges
+CVAPI(int) icvSubdiv2DCheck( CvSubdiv2D* subdiv );
+
+// returns squared distance between two 2D points with floating-point coordinates.
+CV_INLINE double icvSqDist2D32f( CvPoint2D32f pt1, CvPoint2D32f pt2 )
+{
+ double dx = pt1.x - pt2.x;
+ double dy = pt1.y - pt2.y;
+
+ return dx*dx + dy*dy;
+}
+
+
+
CV_INLINE double cvTriangleArea( CvPoint2D32f a, CvPoint2D32f b, CvPoint2D32f c )
{
return ((double)b.x - a.x) * ((double)c.y - a.y) - ((double)b.y - a.y) * ((double)c.x - a.x);
-}
+}
+
-
/* Constructs kd-tree from set of feature descriptors */
CVAPI(struct CvFeatureTree*) cvCreateKDTree(CvMat* desc);
/* Query the LSH n times for at most k nearest points; data is n x d,
indices and dist are n x k. At most emax stored points will be accessed. */
CVAPI(void) cvLSHQuery(struct CvLSH* lsh, const CvMat* query_points,
- CvMat* indices, CvMat* dist, int k, int emax);
-
+ CvMat* indices, CvMat* dist, int k, int emax);
+
/* Kolmogorov-Zabin stereo-correspondence algorithm (a.k.a. KZ1) */
#define CV_STEREO_GC_OCCLUDED SHRT_MAX
int minDisparity;
int numberOfDisparities;
int maxIters;
-
+
CvMat* left;
CvMat* right;
CvMat* dispLeft;
int use_previous, CvArr* velx, CvArr* vely,
double lambda, CvTermCriteria criteria );
-
+
/****************************************************************************************\
* Background/foreground segmentation *
\****************************************************************************************/
-
+
/* We discriminate between foreground and background pixels
* by building and maintaining a model of the background.
* Any pixel which does not fit this model is then deemed
* one of which has two variations:
*
* o CV_BG_MODEL_FGD: latest and greatest algorithm, described in
- *
+ *
* Foreground Object Detection from Videos Containing Complex Background.
- * Liyuan Li, Weimin Huang, Irene Y.H. Gu, and Qi Tian.
+ * Liyuan Li, Weimin Huang, Irene Y.H. Gu, and Qi Tian.
* ACM MM2003 9p
*
* o CV_BG_MODEL_FGD_SIMPLE:
* C Stauffer and W Grimson August 2000
* IEEE Transactions on Pattern Analysis and Machine Intelligence 22(8):747-757
*/
-
-
+
+
#define CV_BG_MODEL_FGD 0
#define CV_BG_MODEL_MOG 1 /* "Mixture of Gaussians". */
#define CV_BG_MODEL_FGD_SIMPLE 2
-
+
struct CvBGStatModel;
typedef void (CV_CDECL * CvReleaseBGStatModel)( struct CvBGStatModel** bg_model );
typedef int (CV_CDECL * CvUpdateBGStatModel)( IplImage* curr_frame, struct CvBGStatModel* bg_model,
double learningRate );
-
+
#define CV_BG_STAT_MODEL_FIELDS() \
int type; /*type of BG model*/ \
CvReleaseBGStatModel release; \
int layer_count; /* can be zero */ \
CvMemStorage* storage; /*storage for foreground_regions*/ \
CvSeq* foreground_regions /*foreground object contours*/
-
+
typedef struct CvBGStatModel
{
CV_BG_STAT_MODEL_FIELDS();
} CvBGStatModel;
-//
+//
// Releases memory used by BGStatModel
CVAPI(void) cvReleaseBGStatModel( CvBGStatModel** bg_model );
CVAPI(int) cvChangeDetection( IplImage* prev_frame,
IplImage* curr_frame,
IplImage* change_mask );
-
+
/*
Interface of ACM MM2003 algorithm
*/
-
+
/* Default parameters of foreground detection algorithm: */
#define CV_BGFG_FGD_LC 128
#define CV_BGFG_FGD_N1C 15
#define CV_BGFG_FGD_N2C 25
-
+
#define CV_BGFG_FGD_LCC 64
#define CV_BGFG_FGD_N1CC 25
#define CV_BGFG_FGD_N2CC 40
-
+
/* Background reference image update parameter: */
#define CV_BGFG_FGD_ALPHA_1 0.1f
-
+
/* stat model update parameter
* 0.002f ~ 1K frame(~45sec), 0.005 ~ 18sec (if 25fps and absolutely static BG)
*/
#define CV_BGFG_FGD_ALPHA_2 0.005f
-
+
/* start value for alpha parameter (to fast initiate statistic model) */
#define CV_BGFG_FGD_ALPHA_3 0.1f
-
+
#define CV_BGFG_FGD_DELTA 2
-
+
#define CV_BGFG_FGD_T 0.9f
-
+
#define CV_BGFG_FGD_MINAREA 15.f
-
+
#define CV_BGFG_FGD_BG_UPDATE_TRESH 0.5f
-
+
/* See the above-referenced Li/Huang/Gu/Tian paper
* for a full description of these background-model
* tuning parameters.
int N1c; /* Number of color vectors used to model normal background color variation at a given pixel. */
int N2c; /* Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c. */
/* Used to allow the first N1c vectors to adapt over time to changing background. */
-
+
int Lcc; /* Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64. */
int N1cc; /* Number of color co-occurrence vectors used to model normal background color variation at a given pixel. */
int N2cc; /* Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc. */
/* Used to allow the first N1cc vectors to adapt over time to changing background. */
-
+
int is_obj_without_holes;/* If TRUE we ignore holes within foreground blobs. Defaults to TRUE. */
int perform_morphing; /* Number of erode-dilate-erode foreground-blob cleanup iterations. */
/* These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1. */
-
+
float alpha1; /* How quickly we forget old background pixel values seen. Typically set to 0.1 */
float alpha2; /* "Controls speed of feature learning". Depends on T. Typical value circa 0.005. */
float alpha3; /* Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1. */
-
+
float delta; /* Affects color and color co-occurrence quantization, typically set to 2. */
float T; /* "A percentage value which determines when new features can be recognized as new background." (Typically 0.9).*/
float minArea; /* Discard foreground blobs whose bounding box is smaller than this threshold. */
/* Creates FGD model */
CVAPI(CvBGStatModel*) cvCreateFGDStatModel( IplImage* first_frame,
CvFGDStatModelParams* parameters CV_DEFAULT(NULL));
-
-/*
+
+/*
Interface of Gaussian mixture algorithm
-
+
"An improved adaptive background mixture model for real-time tracking with shadow detection"
P. KadewTraKuPong and R. Bowden,
Proc. 2nd European Workshp on Advanced Video-Based Surveillance Systems, 2001."
http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf
*/
-
+
/* Note: "MOG" == "Mixture Of Gaussians": */
-
+
#define CV_BGFG_MOG_MAX_NGAUSSIANS 500
-
+
/* default parameters of gaussian background detection algorithm */
#define CV_BGFG_MOG_BACKGROUND_THRESHOLD 0.7 /* threshold sum of weights for background test */
#define CV_BGFG_MOG_STD_THRESHOLD 2.5 /* lambda=2.5 is 99% */
#define CV_BGFG_MOG_WEIGHT_INIT 0.05
#define CV_BGFG_MOG_SIGMA_INIT 30
#define CV_BGFG_MOG_MINAREA 15.f
-
-
+
+
#define CV_BGFG_MOG_NCOLORS 3
-
+
typedef struct CvGaussBGStatModelParams
-{
+{
int win_size; /* = 1/alpha */
int n_gauss;
double bg_threshold, std_threshold, minArea;
typedef struct CvGaussBGModel
{
CV_BG_STAT_MODEL_FIELDS();
- CvGaussBGStatModelParams params;
- CvGaussBGPoint* g_point;
+ CvGaussBGStatModelParams params;
+ CvGaussBGPoint* g_point;
int countFrames;
void* mog;
} CvGaussBGModel;
float perimScale CV_DEFAULT(4.f),
CvMemStorage* storage CV_DEFAULT(0),
CvPoint offset CV_DEFAULT(cvPoint(0,0)));
-
+
#ifdef __cplusplus
}
#endif