# --- Eigen ---
if(WITH_EIGEN)
find_path(EIGEN_INCLUDE_PATH "Eigen/Core"
- PATHS /usr/local /opt /usr $ENV{EIGEN_ROOT}/include ENV ProgramFiles ENV ProgramW6432
+ PATHS /usr/local /opt /usr $ENV{EIGEN_ROOT}/include ENV ProgramFiles ENV ProgramW6432
PATH_SUFFIXES include/eigen3 include/eigen2 Eigen/include/eigen3 Eigen/include/eigen2
DOC "The path to Eigen3/Eigen2 headers"
CMAKE_FIND_ROOT_PATH_BOTH)
Here is a code explanation :
-Retina definition is present in the bioinspired package and a simple include allows to use it. You can rather use the specific header : *opencv2/bioinspired.hpp* if you prefer but then include the other required openv modules : *opencv2/core.hpp* and *opencv2/highgui.hpp*
+Retina definition is present in the bioinspired package and a simple include allows to use it. You can rather use the specific header : *opencv2/bioinspired.hpp* if you prefer but then include the other required openv modules : *opencv2/core.hpp* and *opencv2/highgui.hpp*
.. code-block:: cpp
release package is recommended to communicate with OpenCV Manager via the async initialization
described above.
-#. Add the OpenCV library project to your workspace the same way as for the async initialization
+#. Add the OpenCV library project to your workspace the same way as for the async initialization
above. Use menu :guilabel:`File -> Import -> Existing project in your workspace`,
- press :guilabel:`Browse` button and select OpenCV SDK path
+ press :guilabel:`Browse` button and select OpenCV SDK path
(:file:`OpenCV-2.4.6-android-sdk/sdk`).
.. image:: images/eclipse_opencv_dependency0.png
//////////////////////////////////////////////////////////////////////////////////////////////////////////
-//////////////////////////////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////////////////////////////////////
mask4 = (Q.row(2) > 0) & mask4;
mask4 = (Q.row(2) < dist) & mask4;
- mask1 = mask1.t();
- mask2 = mask2.t();
- mask3 = mask3.t();
- mask4 = mask4.t();
+ mask1 = mask1.t();
+ mask2 = mask2.t();
+ mask3 = mask3.t();
+ mask4 = mask4.t();
// If _mask is given, then use it to filter outliers.
if (!_mask.empty())
{
Mat mask = _mask.getMat();
- CV_Assert(mask.size() == mask1.size());
+ CV_Assert(mask.size() == mask1.size());
bitwise_and(mask, mask1, mask1);
bitwise_and(mask, mask2, mask2);
bitwise_and(mask, mask3, mask3);
}
if (_mask.empty() && _mask.needed())
{
- _mask.create(mask1.size(), CV_8U);
+ _mask.create(mask1.size(), CV_8U);
}
CV_Assert(_R.needed() && _t.needed());
This is translation to C++ of the Matlab's LMSolve package by Miroslav Balda.
Here is the original copyright:
============================================================================
-
+
Copyright (c) 2007, Miroslav Balda
All rights reserved.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
met:
- * Redistributions of source code must retain the above copyright
+ * Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
gemm(J, r, 1, noArray(), 0, v, GEMM_1_T);
Mat D = A.diag().clone();
-
+
const double Rlo = 0.25, Rhi = 0.75;
double lambda = 1, lc = 0.75;
int i, iter = 0;
CV_Assert( !LMSolverImpl_info_auto.name().empty() );
return new LMSolverImpl(cb, maxIters);
}
-
+
}
cv::Mat expected;
cv::Rodrigues(rvec, expected);
-
-
+
+
ASSERT_EQ(0, norm(cv::Mat(affine.matrix, false).colRange(0, 3).rowRange(0, 3) != expected));
ASSERT_EQ(0, norm(cv::Mat(affine.linear()) != expected));
-
-
+
+
cv::Matx33d R = cv::Matx33d::eye();
-
+
double angle = 50;
R.val[0] = R.val[4] = std::cos(CV_PI*angle/180.0);
R.val[3] = std::sin(CV_PI*angle/180.0);
R.val[1] = -R.val[3];
-
-
+
+
cv::Affine3d affine1(cv::Mat(cv::Vec3d(0.2, 0.5, 0.3)).reshape(1, 1), cv::Vec3d(4, 5, 6));
cv::Affine3d affine2(R, cv::Vec3d(1, 1, 0.4));
-
+
cv::Affine3d result = affine1.inv() * affine2;
-
+
expected = cv::Mat(affine1.matrix.inv(cv::DECOMP_SVD)) * cv::Mat(affine2.matrix, false);
-
+
cv::Mat diff;
cv::absdiff(expected, result.matrix, diff);
-
+
ASSERT_LT(cv::norm(diff, cv::NORM_INF), 1e-15);
}
:param flags: Flags setting drawing features. Possible ``flags`` bit values are defined by ``DrawMatchesFlags``. See details above in :ocv:func:`drawMatches` .
-.. note:: For Python API, flags are modified as `cv2.DRAW_MATCHES_FLAGS_DEFAULT`, `cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS`, `cv2.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG`, `cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS`
+.. note:: For Python API, flags are modified as `cv2.DRAW_MATCHES_FLAGS_DEFAULT`, `cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS`, `cv2.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG`, `cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS`
:param descriptors: The output descriptors. Pass ``cv::noArray()`` if you do not need it.
:param useProvidedKeypoints: If it is true, then the method will use the provided vector of keypoints instead of detecting them.
-
+
BRISK
-----
#if LIBAVUTIL_BUILD > CALC_FFMPEG_VERSION(51,11,0)
/* Some settings for libx264 encoding, restore dummy values for gop_size
and qmin since they will be set to reasonable defaults by the libx264
- preset system. Also, use a crf encode with the default quality rating,
+ preset system. Also, use a crf encode with the default quality rating,
this seems easier than finding an appropriate default bitrate. */
if (c->codec_id == CODEC_ID_H264) {
c->gop_size = -1;
* An example using the morphological dilate operation can be found at opencv_source_code/samples/cpp/morphology2.cpp
-
+
erode
.. ocv:pyfunction:: cv2.getGaborKernel(ksize, sigma, theta, lambd, gamma[, psi[, ktype]]) -> retval
:param ksize: Size of the filter returned.
-
+
:param sigma: Standard deviation of the gaussian envelope.
-
+
:param theta: Orientation of the normal to the parallel stripes of a Gabor function.
-
+
:param lambd: Wavelength of the sinusoidal factor.
-
+
:param gamma: Spatial aspect ratio.
-
+
:param psi: Phase offset.
-
+
:param ktype: Type of filter coefficients. It can be ``CV_32F`` or ``CV_64F`` .
For more details about gabor filter equations and parameters, see: `Gabor Filter <http://en.wikipedia.org/wiki/Gabor_filter>`_.
:param dst: Destination image of the same size and type as ``src`` .
:param kernel: Structuring element. It can be created using :ocv:func:`getStructuringElement`.
-
+
:param anchor: Anchor position with the kernel. Negative values mean that the anchor is at the kernel center.
:param op: Type of a morphological operation that can be one of the following:
.. ocv:cfunction:: void cvBoxPoints( CvBox2D box, CvPoint2D32f pt[4] )
:param box: The input rotated rectangle. It may be the output of .. ocv:function:: minAreaRect.
-
+
:param points: The output array of four vertices of rectangles.
-
+
The function finds the four vertices of a rotated rectangle. This function is useful to draw the rectangle. In C++, instead of using this function, you can directly use box.points() method. Please visit the `tutorial on bounding rectangle <http://docs.opencv.org/doc/tutorials/imgproc/shapedescriptors/bounding_rects_circles/bounding_rects_circles.html#bounding-rects-circles>`_ for more information.
const cv::Mat img = _img.getMat();
_labels.create(img.size(), CV_MAT_DEPTH(ltype));
cv::Mat labels = _labels.getMat();
- connectedcomponents::CCStatsOp sop(statsv, centroids);
+ connectedcomponents::CCStatsOp sop(statsv, centroids);
if(ltype == CV_16U){
return connectedComponents_sub1(img, labels, connectivity, sop);
}else if(ltype == CV_32S){
for( j = 0; j < count; j++ )
w[j] = 1.f;
}
-
+
/* save the line parameters */
memcpy( _lineprev, _line, 6 * sizeof( float ));
-
+
/* Run again... */
fitLine3D_wods( points, count, w, _line );
}
-
+
if( err < min_err )
{
min_err = err;
double param, double reps, double aeps )
{
Mat points = _points.getMat();
-
+
float linebuf[6]={0.f};
int npoints2 = points.checkVector(2, -1, false);
int npoints3 = points.checkVector(3, -1, false);
-
+
CV_Assert( npoints2 >= 0 || npoints3 >= 0 );
-
+
if( points.depth() != CV_32F || !points.isContinuous() )
{
Mat temp;
points.convertTo(temp, CV_32F);
points = temp;
}
-
+
if( npoints2 >= 0 )
fitLine2D( points.ptr<Point2f>(), npoints2, distType,
(float)param, (float)reps, (float)aeps, linebuf);
else
fitLine3D( points.ptr<Point3f>(), npoints3, distType,
(float)param, (float)reps, (float)aeps, linebuf);
-
+
Mat(npoints2 >= 0 ? 4 : 6, 1, CV_32F, linebuf).copyTo(_line);
}
default:
CV_Error( CV_StsBadArg, "Unknown comparison method" );
}
-
+
return result;
}
if( fabs(a00) > FLT_EPSILON )
{
double db1_2, db1_6, db1_12, db1_24, db1_20, db1_60;
-
+
if( a00 > 0 )
{
db1_2 = 0.5;
m.m03 += mom[9] + y * (3. * mom[5] + y * (3. * mom[2] + ym));
}
}
-
+
completeMomentState( &m );
return m;
}
CastOp castOp;
VecOp vecOp;
- CV_Assert( ssize.width > 0 && ssize.height > 0 &&
+ CV_Assert( ssize.width > 0 && ssize.height > 0 &&
std::abs(dsize.width*2 - ssize.width) <= 2 &&
std::abs(dsize.height*2 - ssize.height) <= 2 );
int k, x, sy0 = -PD_SZ/2, sy = sy0, width0 = std::min((ssize.width-PD_SZ/2-1)/2 + 1, dsize.width);
double sr2 = sr * sr;
int isr2 = cvRound(sr2), isr22 = MAX(isr2,16);
int tab[768];
-
+
if( src0.type() != CV_8UC3 )
CV_Error( CV_StsUnsupportedFormat, "Only 8-bit, 3-channel images are supported" );
Mat given = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_input.png", IMREAD_GRAYSCALE);
Mat gold = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_gold.png", IMREAD_UNCHANGED);
Mat result;
-
+
CV_Assert(given.data != NULL && gold.data != NULL);
cvtColor(given, result, CV_BayerBG2GRAY);
{
/*!
- Extremal Region Stat structure
+ Extremal Region Stat structure
The ERStat structure represents a class-specific Extremal Region (ER).
- An ER is a 4-connected set of pixels with all its grey-level values smaller than the values
- in its outer boundary. A class-specific ER is selected (using a classifier) from all the ER's
+ An ER is a 4-connected set of pixels with all its grey-level values smaller than the values
+ in its outer boundary. A class-specific ER is selected (using a classifier) from all the ER's
in the component tree of the image.
*/
struct CV_EXPORTS ERStat
~ERStat(){};
//! seed point and the threshold (max grey-level value)
- int pixel;
- int level;
+ int pixel;
+ int level;
//! incrementally computable features
- int area;
+ int area;
int perimeter;
int euler; //!< euler number
Rect rect;
double raw_moments[2]; //!< order 1 raw moments to derive the centroid
double central_moments[3]; //!< order 2 central moments to construct the covariance matrix
- std::deque<int> *crossings;//!< horizontal crossings
+ std::deque<int> *crossings;//!< horizontal crossings
float med_crossings; //!< median of the crossings at three different height levels
//! 2nd stage features
float num_inflexion_points;
// TODO Other features can be added (average color, standard deviation, and such)
-
+
// TODO shall we include the pixel list whenever available (i.e. after 2nd stage) ?
- std::vector<int> *pixels;
-
+ std::vector<int> *pixels;
+
//! probability that the ER belongs to the class we are looking for
double probability;
//! pointers preserving the tree structure of the component tree
- ERStat* parent;
- ERStat* child;
+ ERStat* parent;
+ ERStat* child;
ERStat* next;
ERStat* prev;
- //! wenever the regions is a local maxima of the probability
+ //! wenever the regions is a local maxima of the probability
bool local_maxima;
ERStat* max_probability_ancestor;
ERStat* min_probability_ancestor;
public:
virtual ~Callback(){};
//! The classifier must return probability measure for the region.
- virtual double eval(const ERStat& stat) = 0; //const = 0; //TODO why cannot use const = 0 here?
+ virtual double eval(const ERStat& stat) = 0; //const = 0; //TODO why cannot use const = 0 here?
};
- /*!
- the key method. Takes image on input and returns the selected regions in a vector of ERStat
+ /*!
+ the key method. Takes image on input and returns the selected regions in a vector of ERStat
only distinctive ERs which correspond to characters are selected by a sequential classifier
\param image is the input image
\param regions is output for the first stage, input/output for the second one.
/*!
Create an Extremal Region Filter for the 1st stage classifier of N&M algorithm
Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012
-
+
The component tree of the image is extracted by a threshold increased step by step
- from 0 to 255, incrementally computable descriptors (aspect_ratio, compactness,
- number of holes, and number of horizontal crossings) are computed for each ER
- and used as features for a classifier which estimates the class-conditional
- probability P(er|character). The value of P(er|character) is tracked using the inclusion
- relation of ER across all thresholds and only the ERs which correspond to local maximum
+ from 0 to 255, incrementally computable descriptors (aspect_ratio, compactness,
+ number of holes, and number of horizontal crossings) are computed for each ER
+ and used as features for a classifier which estimates the class-conditional
+ probability P(er|character). The value of P(er|character) is tracked using the inclusion
+ relation of ER across all thresholds and only the ERs which correspond to local maximum
of the probability P(er|character) are selected (if the local maximum of the
- probability is above a global limit pmin and the difference between local maximum and
+ probability is above a global limit pmin and the difference between local maximum and
local minimum is greater than minProbabilityDiff).
\param cb Callback with the classifier.
\param minArea The minimum area (% of image size) allowed for retreived ER's
\param minArea The maximum area (% of image size) allowed for retreived ER's
\param minProbability The minimum probability P(er|character) allowed for retreived ER's
- \param nonMaxSuppression Whenever non-maximum suppression is done over the branch probabilities
+ \param nonMaxSuppression Whenever non-maximum suppression is done over the branch probabilities
\param minProbability The minimum probability difference between local maxima and local minima ERs
*/
-CV_EXPORTS Ptr<ERFilter> createERFilterNM1(const Ptr<ERFilter::Callback>& cb = NULL,
- int thresholdDelta = 1, float minArea = 0.000025,
- float maxArea = 0.13, float minProbability = 0.2,
- bool nonMaxSuppression = true,
+CV_EXPORTS Ptr<ERFilter> createERFilterNM1(const Ptr<ERFilter::Callback>& cb = NULL,
+ int thresholdDelta = 1, float minArea = 0.000025,
+ float maxArea = 0.13, float minProbability = 0.2,
+ bool nonMaxSuppression = true,
float minProbabilityDiff = 0.1);
/*!
Create an Extremal Region Filter for the 2nd stage classifier of N&M algorithm
Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012
- In the second stage, the ERs that passed the first stage are classified into character
+ In the second stage, the ERs that passed the first stage are classified into character
and non-character classes using more informative but also more computationally expensive
- features. The classifier uses all the features calculated in the first stage and the following
+ features. The classifier uses all the features calculated in the first stage and the following
additional features: hole area ratio, convex hull ratio, and number of outer inflexion points.
\param cb Callback with the classifier
if omitted tries to load a default classifier from file trained_classifierNM2.xml
\param minProbability The minimum probability P(er|character) allowed for retreived ER's
*/
-CV_EXPORTS Ptr<ERFilter> createERFilterNM2(const Ptr<ERFilter::Callback>& cb = NULL,
+CV_EXPORTS Ptr<ERFilter> createERFilterNM2(const Ptr<ERFilter::Callback>& cb = NULL,
float minProbability = 0.85);
}
int n1 = rweights[i];
double w1 = rejectWeights[i];
int l1 = rejectLevels[i];
-
+
// filter out rectangles which don't have enough similar rectangles
if( n1 <= groupThreshold )
continue;
namespace cv
{
-ERStat::ERStat(int init_level, int init_pixel, int init_x, int init_y) : pixel(init_pixel),
- level(init_level), area(0), perimeter(0), euler(0), probability(1.0),
- parent(0), child(0), next(0), prev(0), local_maxima(0),
+ERStat::ERStat(int init_level, int init_pixel, int init_x, int init_y) : pixel(init_pixel),
+ level(init_level), area(0), perimeter(0), euler(0), probability(1.0),
+ parent(0), child(0), next(0), prev(0), local_maxima(0),
max_probability_ancestor(0), min_probability_ancestor(0)
{
rect = Rect(init_x,init_y,1,1);
//Destructor
~ERFilterNM() {};
- float minProbability;
+ float minProbability;
bool nonMaxSuppression;
float minProbabilityDiff;
- // the key method. Takes image on input, vector of ERStat is output for the first stage,
+ // the key method. Takes image on input, vector of ERStat is output for the first stage,
// input/output - for the second one.
void run( InputArray image, std::vector<ERStat>& regions );
protected:
int thresholdDelta;
- float maxArea;
+ float maxArea;
float minArea;
Ptr<ERFilter::Callback> classifier;
// extract the component tree and store all the ER regions
void er_tree_extract( InputArray image );
// accumulate a pixel into an ER
- void er_add_pixel( ERStat *parent, int x, int y, int non_boundary_neighbours,
- int non_boundary_neighbours_horiz,
+ void er_add_pixel( ERStat *parent, int x, int y, int non_boundary_neighbours,
+ int non_boundary_neighbours_horiz,
int d_C1, int d_C2, int d_C3 );
// merge an ER with its nested parent
void er_merge( ERStat *parent, ERStat *child );
// default 1st stage classifier
-class CV_EXPORTS ERClassifierNM1 : public ERFilter::Callback
+class CV_EXPORTS ERClassifierNM1 : public ERFilter::Callback
{
public:
//Constructor
~ERClassifierNM1() {};
// The classifier must return probability measure for the region.
- double eval(const ERStat& stat);
+ double eval(const ERStat& stat);
private:
CvBoost boost;
};
// default 2nd stage classifier
-class CV_EXPORTS ERClassifierNM2 : public ERFilter::Callback
+class CV_EXPORTS ERClassifierNM2 : public ERFilter::Callback
{
public:
//constructor
~ERClassifierNM2() {};
// The classifier must return probability measure for the region.
- double eval(const ERStat& stat);
+ double eval(const ERStat& stat);
private:
CvBoost boost;
classifier = NULL;
}
-// the key method. Takes image on input, vector of ERStat is output for the first stage,
+// the key method. Takes image on input, vector of ERStat is output for the first stage,
// input/output for the second one.
void ERFilterNM::run( InputArray image, std::vector<ERStat>& _regions )
{
regions = &_regions;
region_mask = Mat::zeros(image.getMat().rows+2, image.getMat().cols+2, CV_8UC1);
-
+
// if regions vector is empty we must extract the entire component tree
if ( regions->size() == 0 )
{
src = (image.getMat() / thresholdDelta) -1;
}
- const unsigned char * image_data = src.data;
- int width = src.cols, height = src.rows;
+ const unsigned char * image_data = src.data;
+ int width = src.cols, height = src.rows;
// the component stack
vector<ERStat*> er_stack;
- //the quads for euler number calculation
+ //the quads for euler number calculation
unsigned char quads[3][4];
quads[0][0] = 1 << 3;
quads[0][1] = 1 << 2;
// we'll look initially for all pixels with grey-level lower than a grey-level higher than any allowed in the image
int threshold_level = (255/thresholdDelta)+1;
-
+
// starting from the first pixel (0,0)
int current_pixel = 0;
int current_edge = 0;
int current_level = image_data[0];
accessible_pixel_mask[0] = true;
-
+
bool push_new_component = true;
-
+
for (;;) {
int x = current_pixel % width;
int y = current_pixel / width;
// push a component with current level in the component stack
- if (push_new_component)
+ if (push_new_component)
er_stack.push_back(new ERStat(current_level, current_pixel, x, y));
push_new_component = false;
-
+
// explore the (remaining) edges to the neighbors to the current pixel
- for (current_edge = current_edge; current_edge < 4; current_edge++)
+ for (current_edge = current_edge; current_edge < 4; current_edge++)
{
int neighbour_pixel = current_pixel;
-
- switch (current_edge)
+
+ switch (current_edge)
{
case 0: if (x < width - 1) neighbour_pixel = current_pixel + 1; break;
case 1: if (y < height - 1) neighbour_pixel = current_pixel + width; break;
}
// if neighbour is not accessible, mark it accessible and retreive its grey-level value
- if ( !accessible_pixel_mask[neighbour_pixel] && (neighbour_pixel != current_pixel) )
+ if ( !accessible_pixel_mask[neighbour_pixel] && (neighbour_pixel != current_pixel) )
{
int neighbour_level = image_data[neighbour_pixel];
accessible_pixel_mask[neighbour_pixel] = true;
- // if neighbour level is not lower than current level add neighbour to the boundary heap
- if (neighbour_level >= current_level)
+ // if neighbour level is not lower than current level add neighbour to the boundary heap
+ if (neighbour_level >= current_level)
{
boundary_pixes[neighbour_level].push_back(neighbour_pixel);
boundary_edges[neighbour_level].push_back(0);
-
+
// if neighbour level is lower than our threshold_level set threshold_level to neighbour level
if (neighbour_level < threshold_level)
threshold_level = neighbour_level;
- }
- else // if neighbour level is lower than current add current_pixel (and next edge)
+ }
+ else // if neighbour level is lower than current add current_pixel (and next edge)
// to the boundary heap for later processing
{
-
+
boundary_pixes[current_level].push_back(current_pixel);
boundary_edges[current_level].push_back(current_edge + 1);
-
+
// if neighbour level is lower than threshold_level set threshold_level to neighbour level
if (current_level < threshold_level)
threshold_level = current_level;
-
+
// consider the new pixel and its grey-level as current pixel
current_pixel = neighbour_pixel;
current_edge = 0;
current_level = neighbour_level;
-
+
// and push a new component
push_new_component = true;
- break;
+ break;
}
}
-
+
} // else neigbor was already accessible
if (push_new_component) continue;
quad_after[2] = 1<<2;
quad_after[3] = 1;
- for (int edge = 0; edge < 8; edge++)
+ for (int edge = 0; edge < 8; edge++)
{
int neighbour4 = -1;
int neighbour8 = -1;
int cell = 0;
- switch (edge)
+ switch (edge)
{
case 0: if (x < width - 1) { neighbour4 = neighbour8 = current_pixel + 1;} cell = 5; break;
case 1: if ((x < width - 1)&&(y < height - 1)) { neighbour8 = current_pixel + 1 + width;} cell = 8; break;
{
if (accumulated_pixel_mask[neighbour8])
pix_value = image_data[neighbour8];
- }
+ }
if (pix_value<=image_data[current_pixel])
{
C_before[p]++;
if ( (quad_before[1] == quads[p][q]) && ((p<2)||(q<2)) )
C_before[p]++;
- if ( (quad_before[2] == quads[p][q]) && ((p<2)||(q<2)) )
+ if ( (quad_before[2] == quads[p][q]) && ((p<2)||(q<2)) )
C_before[p]++;
if ( (quad_before[3] == quads[p][q]) && ((p<2)||(q<2)) )
C_before[p]++;
- if ( (quad_after[0] == quads[p][q]) && ((p<2)||(q<2)) )
+ if ( (quad_after[0] == quads[p][q]) && ((p<2)||(q<2)) )
C_after[p]++;
- if ( (quad_after[1] == quads[p][q]) && ((p<2)||(q<2)) )
+ if ( (quad_after[1] == quads[p][q]) && ((p<2)||(q<2)) )
C_after[p]++;
- if ( (quad_after[2] == quads[p][q]) && ((p<2)||(q<2)) )
+ if ( (quad_after[2] == quads[p][q]) && ((p<2)||(q<2)) )
C_after[p]++;
- if ( (quad_after[3] == quads[p][q]) && ((p<2)||(q<2)) )
+ if ( (quad_after[3] == quads[p][q]) && ((p<2)||(q<2)) )
C_after[p]++;
}
}
er_add_pixel(er_stack.back(), x, y, non_boundary_neighbours, non_boundary_neighbours_horiz, d_C1, d_C2, d_C3);
accumulated_pixel_mask[current_pixel] = true;
-
+
// if we have processed all the possible threshold levels (the hea is empty) we are done!
- if (threshold_level == (255/thresholdDelta)+1)
+ if (threshold_level == (255/thresholdDelta)+1)
{
// save the extracted regions into the output vector
return;
}
-
-
+
+
// pop the heap of boundary pixels
current_pixel = boundary_pixes[threshold_level].back();
boundary_pixes[threshold_level].erase(boundary_pixes[threshold_level].end()-1);
current_edge = boundary_edges[threshold_level].back();
boundary_edges[threshold_level].erase(boundary_edges[threshold_level].end()-1);
-
+
while (boundary_pixes[threshold_level].empty() && (threshold_level < (255/thresholdDelta)+1))
threshold_level++;
-
+
int new_level = image_data[current_pixel];
// if the new pixel has higher grey value than the current one
{
ERStat* er = er_stack.back();
er_stack.erase(er_stack.end()-1);
-
- if (new_level < er_stack.back()->level)
+
+ if (new_level < er_stack.back()->level)
{
er_stack.push_back(new ERStat(new_level, current_pixel, current_pixel%width, current_pixel/width));
- er_merge(er_stack.back(), er);
+ er_merge(er_stack.back(), er);
break;
}
}
// accumulate a pixel into an ER
-void ERFilterNM::er_add_pixel(ERStat *parent, int x, int y, int non_border_neighbours,
- int non_border_neighbours_horiz,
+void ERFilterNM::er_add_pixel(ERStat *parent, int x, int y, int non_border_neighbours,
+ int non_border_neighbours_horiz,
int d_C1, int d_C2, int d_C3)
{
parent->area++;
parent->area += child->area;
parent->perimeter += child->perimeter;
-
+
for (int i=parent->rect.y; i<=min(parent->rect.br().y-1,child->rect.br().y-1); i++)
if (i-child->rect.y >= 0)
for (int i=parent->rect.y-1; i>=child->rect.y; i--)
if (i-child->rect.y < (int)child->crossings->size())
parent->crossings->push_front(child->crossings->at(i-child->rect.y));
- else
+ else
parent->crossings->push_front(0);
for (int i=parent->rect.br().y; i<child->rect.y; i++)
parent->crossings->push_back(0);
-
+
for (int i=max(parent->rect.br().y,child->rect.y); i<=child->rect.br().y-1; i++)
parent->crossings->push_back(child->crossings->at(i-child->rect.y));
std::sort(m_crossings.begin(), m_crossings.end());
child->med_crossings = (float)m_crossings.at(1);
- // free unnecessary mem
- child->crossings->clear();
+ // free unnecessary mem
+ child->crossings->clear();
delete(child->crossings);
child->crossings = NULL;
child->probability = classifier->eval(*child);
}
- if ( ((classifier!=NULL)?(child->probability >= minProbability):true) &&
- ((child->area >= (minArea*region_mask.rows*region_mask.cols)) &&
+ if ( ((classifier!=NULL)?(child->probability >= minProbability):true) &&
+ ((child->area >= (minArea*region_mask.rows*region_mask.cols)) &&
(child->area <= (maxArea*region_mask.rows*region_mask.cols))) )
{
num_accepted_regions++;
child->next = parent->child;
- if (parent->child)
+ if (parent->child)
parent->child->prev = child;
parent->child = child;
child->parent = parent;
while (new_child->next != NULL)
new_child = new_child->next;
new_child->next = parent->child;
- if (parent->child)
+ if (parent->child)
parent->child->prev = new_child;
parent->child = child->child;
child->child->parent = parent;
child->crossings = NULL;
}
delete(child);
- }
-
+ }
+
}
// recursively walk the tree and clean memory
}
delete stat;
}
-
+
// copy extracted regions into the output vector
ERStat* ERFilterNM::er_save( ERStat *er, ERStat *parent, ERStat *prev )
{
-
+
regions->push_back(*er);
regions->back().parent = parent;
this_er->probability = 0; //TODO this makes sense in order to select at least one region in short tree's but is it really necessary?
this_er->max_probability_ancestor = this_er;
this_er->min_probability_ancestor = this_er;
- }
+ }
else
{
this_er->max_probability_ancestor = (this_er->probability > parent->max_probability_ancestor->probability)? this_er : parent->max_probability_ancestor;
// this_er->min_probability_ancestor->local_maxima = false;
this_er->max_probability_ancestor = this_er;
- this_er->min_probability_ancestor = this_er;
+ this_er->min_probability_ancestor = this_er;
}
}
}
-
+
for (ERStat * child = er->child; child; child = child->next)
{
old_prev = er_save(child, this_er, old_prev);
Mat src = image.getMat();
// assert correct image type
CV_Assert( src.type() == CV_8UC1 );
-
+
//Fill the region and calculate 2nd stage features
Mat region = region_mask(Rect(Point(stat->rect.x,stat->rect.y),Point(stat->rect.br().x+2,stat->rect.br().y+2)));
region = Scalar(0);
int newMaskVal = 255;
int flags = 4 + (newMaskVal << 8) + FLOODFILL_FIXED_RANGE + FLOODFILL_MASK_ONLY;
Rect rect;
-
- floodFill( src(Rect(Point(stat->rect.x,stat->rect.y),Point(stat->rect.br().x,stat->rect.br().y))),
- region, Point(stat->pixel%src.cols - stat->rect.x, stat->pixel/src.cols - stat->rect.y),
+
+ floodFill( src(Rect(Point(stat->rect.x,stat->rect.y),Point(stat->rect.br().x,stat->rect.br().y))),
+ region, Point(stat->pixel%src.cols - stat->rect.x, stat->pixel/src.cols - stat->rect.y),
Scalar(255), &rect, Scalar(stat->level), Scalar(0), flags );
rect.width += 2;
rect.height += 2;
vector<Point> contour_poly;
vector<Vec4i> hierarchy;
findContours( region, contours, hierarchy, RETR_TREE, CHAIN_APPROX_NONE, Point(0, 0) );
- //TODO check epsilon parameter of approxPolyDP (set empirically) : we want more precission
+ //TODO check epsilon parameter of approxPolyDP (set empirically) : we want more precission
// if the region is very small because otherwise we'll loose all the convexities
- approxPolyDP( Mat(contours[0]), contour_poly, max(rect.width,rect.height)/25, true );
+ approxPolyDP( Mat(contours[0]), contour_poly, max(rect.width,rect.height)/25, true );
bool was_convex = false;
if ( (classifier != NULL) && (stat->parent != NULL) )
{
stat->probability = classifier->eval(*stat);
- }
+ }
- if ( ( ((classifier != NULL)?(stat->probability >= minProbability):true) &&
- ((stat->area >= minArea*region_mask.rows*region_mask.cols) &&
- (stat->area <= maxArea*region_mask.rows*region_mask.cols)) ) ||
+ if ( ( ((classifier != NULL)?(stat->probability >= minProbability):true) &&
+ ((stat->area >= minArea*region_mask.rows*region_mask.cols) &&
+ (stat->area <= maxArea*region_mask.rows*region_mask.cols)) ) ||
(stat->parent == NULL) )
{
ERClassifierNM1::ERClassifierNM1()
{
- if (ifstream("./trained_classifierNM1.xml"))
+ if (ifstream("./trained_classifierNM1.xml"))
{
// The file with default classifier exists
boost.load("./trained_classifierNM1.xml", "boost");
- }
- else if (ifstream("./training/trained_classifierNM1.xml"))
+ }
+ else if (ifstream("./training/trained_classifierNM1.xml"))
{
// The file with default classifier exists
boost.load("./training/trained_classifierNM1.xml", "boost");
- }
- else
+ }
+ else
{
- // File not found
+ // File not found
CV_Error(CV_StsBadArg, "Default classifier ./trained_classifierNM1.xml not found!");
}
};
ERClassifierNM2::ERClassifierNM2()
{
- if (ifstream("./trained_classifierNM2.xml"))
+ if (ifstream("./trained_classifierNM2.xml"))
{
// The file with default classifier exists
boost.load("./trained_classifierNM2.xml", "boost");
- }
- else if (ifstream("./training/trained_classifierNM2.xml"))
+ }
+ else if (ifstream("./training/trained_classifierNM2.xml"))
{
// The file with default classifier exists
boost.load("./training/trained_classifierNM2.xml", "boost");
- }
- else
+ }
+ else
{
- // File not found
+ // File not found
CV_Error(CV_StsBadArg, "Default classifier ./trained_classifierNM2.xml not found!");
}
};
float arr[] = {0,(float)(stat.rect.width)/(stat.rect.height), // aspect ratio
sqrt((float)(stat.area))/stat.perimeter, // compactness
(float)(1-stat.euler), //number of holes
- stat.med_crossings, stat.hole_area_ratio,
+ stat.med_crossings, stat.hole_area_ratio,
stat.convex_hull_ratio, stat.num_inflexion_points};
vector<float> sample (arr, arr + sizeof(arr) / sizeof(arr[0]) );
/*!
Create an Extremal Region Filter for the 1st stage classifier of N&M algorithm
Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012
-
+
The component tree of the image is extracted by a threshold increased step by step
- from 0 to 255, incrementally computable descriptors (aspect_ratio, compactness,
- number of holes, and number of horizontal crossings) are computed for each ER
- and used as features for a classifier which estimates the class-conditional
- probability P(er|character). The value of P(er|character) is tracked using the inclusion
- relation of ER across all thresholds and only the ERs which correspond to local maximum
+ from 0 to 255, incrementally computable descriptors (aspect_ratio, compactness,
+ number of holes, and number of horizontal crossings) are computed for each ER
+ and used as features for a classifier which estimates the class-conditional
+ probability P(er|character). The value of P(er|character) is tracked using the inclusion
+ relation of ER across all thresholds and only the ERs which correspond to local maximum
of the probability P(er|character) are selected (if the local maximum of the
- probability is above a global limit pmin and the difference between local maximum and
+ probability is above a global limit pmin and the difference between local maximum and
local minimum is greater than minProbabilityDiff).
\param cb Callback with the classifier.
\param minArea The minimum area (% of image size) allowed for retreived ER's
\param minArea The maximum area (% of image size) allowed for retreived ER's
\param minProbability The minimum probability P(er|character) allowed for retreived ER's
- \param nonMaxSuppression Whenever non-maximum suppression is done over the branch probabilities
+ \param nonMaxSuppression Whenever non-maximum suppression is done over the branch probabilities
\param minProbability The minimum probability difference between local maxima and local minima ERs
*/
-Ptr<ERFilter> createERFilterNM1(const Ptr<ERFilter::Callback>& cb, int thresholdDelta,
- float minArea, float maxArea, float minProbability,
+Ptr<ERFilter> createERFilterNM1(const Ptr<ERFilter::Callback>& cb, int thresholdDelta,
+ float minArea, float maxArea, float minProbability,
bool nonMaxSuppression, float minProbabilityDiff)
{
CV_Assert( (minProbabilityDiff >= 0.) && (minProbabilityDiff <= 1.) );
Ptr<ERFilterNM> filter = new ERFilterNM();
-
+
if (cb == NULL)
filter->setCallback(new ERClassifierNM1());
else
Create an Extremal Region Filter for the 2nd stage classifier of N&M algorithm
Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012
- In the second stage, the ERs that passed the first stage are classified into character
+ In the second stage, the ERs that passed the first stage are classified into character
and non-character classes using more informative but also more computationally expensive
- features. The classifier uses all the features calculated in the first stage and the following
+ features. The classifier uses all the features calculated in the first stage and the following
additional features: hole area ratio, convex hull ratio, and number of outer inflexion points.
\param cb Callback with the classifier
Ptr<ERFilterNM> filter = new ERFilterNM();
-
+
if (cb == NULL)
filter->setCallback(new ERClassifierNM2());
else
:param temp1: Convolution kernel, a single-channel floating point matrix. The size is not greater than the ``image`` size. The type is the same as ``image``.
:param result: The destination image
-
+
:param ccorr: Flags to evaluate cross-correlation instead of convolution.
-
+
:param buf: Optional buffer to avoid extra memory allocations and to adjust some specific parameters. See :ocv:struct:`ocl::ConvolveBuf`.
Convolves an image with the kernel. Supports only CV_32FC1 data types and do not support ROI.
};
//! Returns the sorted result of all the elements in input based on equivalent keys.
//
- // The element unit in the values to be sorted is determined from the data type,
+ // The element unit in the values to be sorted is determined from the data type,
// i.e., a CV_32FC2 input {a1a2, b1b2} will be considered as two elements, regardless its
// matrix dimension.
// both keys and values will be sorted inplace
temp = clCreateBuffer((cl_context)clCxt->oclContext(), CL_MEM_READ_WRITE,
(pitch * wholeSize.height + tail_padding - 1) / tail_padding * tail_padding, 0, &err);
openCLVerifyCall(err);
- openCLMemcpy2D(clCxt, temp, pitch, m.datastart, m.step,
+ openCLMemcpy2D(clCxt, temp, pitch, m.datastart, m.step,
wholeSize.width * m.elemSize(), wholeSize.height, clMemcpyHostToDevice, 3);
}
else{
openCLVerifyCall(err);
}
-
+
convert_C3C4(temp, *this);
openCLSafeCall(clReleaseMemObject(temp));
}
}
static void print_simplex_state(const Mat& c,const Mat& b,double v,const std::vector<int> N,const std::vector<int> B){
printf("\tprint simplex state\n");
-
+
printf("v=%g\n",v);
-
+
printf("here c goes\n");
print_matrix(c);
-
+
printf("non-basic: ");
print(Mat(N));
printf("\n");
-
+
printf("here b goes\n");
print_matrix(b);
printf("basic: ");
-
+
print(Mat(B));
printf("\n");
}
if(indexToRow[I]<N.size()){
dprintf(("I=%d from nonbasic\n",I));
int iterator_offset=indexToRow[I];
- c(0,iterator_offset)+=old_c(0,I);
+ c(0,iterator_offset)+=old_c(0,I);
print_matrix(c);
}else{
dprintf(("I=%d from basic\n",I));
}
}
-static inline void pivot(Mat_<double>& c,Mat_<double>& b,double& v,vector<int>& N,vector<int>& B,
+static inline void pivot(Mat_<double>& c,Mat_<double>& b,double& v,vector<int>& N,vector<int>& B,
int leaving_index,int entering_index,vector<unsigned int>& indexToRow){
double Coef=b(leaving_index,entering_index);
for(int i=0;i<b.cols;i++){
}
dprintf(("v was %g\n",v));
v+=Coef*b(leaving_index,b.cols-1);
-
+
SWAP(int,N[entering_index],B[leaving_index]);
SWAP(int,indexToRow[N[entering_index]],indexToRow[B[leaving_index]]);
}
fs << "}";
-
+
delete [] leafs;
}
//--------------------------------------------------------------------------------------------------
-// A demo program of the Extremal Region Filter algorithm described in
+// A demo program of the Extremal Region Filter algorithm described in
// Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012
//--------------------------------------------------------------------------------------------------
void er_draw(Mat &src, Mat &dst, ERStat& er)
{
- if (er.parent != NULL) // deprecate the root region
+ if (er.parent != NULL) // deprecate the root region
{
int newMaskVal = 255;
int flags = 4 + (newMaskVal << 8) + FLOODFILL_FIXED_RANGE + FLOODFILL_MASK_ONLY;
}
}
-
+
int main(int argc, const char * argv[])
{
}
Mat grey(original.size(),CV_8UC1);
cvtColor(original,grey,COLOR_RGB2GRAY);
-
+
double t = (double)getTickCount();
-
+
// Build ER tree and filter with the 1st stage default classifier
Ptr<ERFilter> er_filter1 = createERFilterNM1();
-
+
er_filter1->run(grey, regions);
-
+
t = (double)getTickCount() - t;
cout << " --------------------------------------------------------------------------------------------------" << endl;
cout << "\t FIRST STAGE CLASSIFIER done in " << t * 1000. / getTickFrequency() << " ms." << endl;
}
t = (double)getTickCount();
-
+
// Default second stage classifier
Ptr<ERFilter> er_filter2 = createERFilterNM2();
er_filter2->run(grey, regions);
-
+
t = (double)getTickCount() - t;
cout << " --------------------------------------------------------------------------------------------------" << endl;
cout << "\t SECOND STAGE CLASSIFIER done in " << t * 1000. / getTickFrequency() << " ms." << endl;
int main(int argc, char** argv)
{
- if(argc != 2)
+ if(argc != 2)
{
help(argv);
return 1;
string first_file = argv[1];
VideoCapture sequence(first_file);
-
+
if (!sequence.isOpened())
{
cerr << "Failed to open the image sequence!\n" << endl;
return 1;
}
-
+
Mat image;
namedWindow("Image sequence | press ESC to close", 1);
-
+
for(;;)
{
// Read in image from sequence
sequence >> image;
-
+
// If no image was retrieved -> end of sequence
if(image.empty())
{
cout << "End of Sequence" << endl;
break;
}
-
+
imshow("Image sequence | press ESC to close", image);
if(waitKey(500) == 27)
cout << "press space to save a picture. q or esc to quit" << endl;
namedWindow(window_name, WINDOW_KEEPRATIO); //resizable window;
Mat frame;
-
+
for (;;) {
capture >> frame;
if (frame.empty())
break;
-
+
imshow(window_name, frame);
char key = (char)waitKey(30); //delay N millis, usually long enough to display and capture input
-
+
switch (key) {
case 'q':
case 'Q':
img1 = cv2.imread(fn1, 0)
img2 = cv2.imread(fn2, 0)
detector, matcher = init_feature(feature_name)
-
+
if img1 is None:
print 'Failed to load fn1:', fn1
sys.exit(1)
-
+
if img2 is None:
print 'Failed to load fn2:', fn2
sys.exit(1)
-
+
if detector is None:
print 'unknown feature:', feature_name
sys.exit(1)
-
+
print 'using', feature_name
pool=ThreadPool(processes = cv2.getNumberOfCPUs())
vis[:] = prob[...,np.newaxis]
try:
cv2.ellipse(vis, track_box, (0, 0, 255), 2)
- except:
+ except:
print track_box
cv2.imshow('camshift', vis)
import sys
try:
video_src = sys.argv[1]
- except:
+ except:
video_src = 0
print __doc__
App(video_src).run()
if __name__ == '__main__':
import sys
- try:
+ try:
fn = sys.argv[1]
- except:
+ except:
fn = '../cpp/baboon.jpg'
src = cv2.imread(fn)
count = tk.IntVar()
while True:
match_index = text.search(pattern, 'matchPos', count=count, regexp=regexp, stopindex='end')
- if not match_index:
+ if not match_index:
break
end_index = text.index( "%s+%sc" % (match_index, count.get()) )
text.mark_set('matchPos', end_index)
if img1 is None:
print 'Failed to load fn1:', fn1
sys.exit(1)
-
+
if img2 is None:
print 'Failed to load fn2:', fn2
sys.exit(1)
-
+
if detector is None:
print 'unknown feature:', feature_name
sys.exit(1)
-
+
print 'using', feature_name
kp1, desc1 = detector.detectAndCompute(img1, None)
print __doc__
try:
img_fn = sys.argv[1]
- except:
+ except:
img_fn = '../cpp/baboon.jpg'
img = cv2.imread(img_fn)
if img is None:
print 'Failed to load image file:', img_fn
sys.exit(1)
-
+
filters = build_filters()
with Timer('running single-threaded'):
print "usage : python hist.py <image_file>"
im = cv2.imread(fname)
-
+
if im is None:
print 'Failed to load image file:', fname
sys.exit(1)
'''
This example illustrates how to use cv2.HoughCircles() function.
-Usage: ./houghcircles.py [<image_name>]
+Usage: ./houghcircles.py [<image_name>]
image argument defaults to ../cpp/board.jpg
'''
import sys
try:
fn = sys.argv[1]
- except:
+ except:
fn = '../cpp/fruits.jpg'
-
+
print __doc__
img = cv2.imread(fn)
if img is None:
print 'Failed to load image file:', fn
sys.exit(1)
-
+
img_mark = img.copy()
mark = np.zeros(img.shape[:2], np.uint8)
sketch = Sketcher('img', [img_mark, mark], lambda : ((255, 255, 255), 255))
fn = sys.argv[1]
except:
fn = '../cpp/baboon.jpg'
-
+
img = cv2.imread(fn)
-
+
if img is None:
print 'Failed to load image file:', fn
sys.exit(1)
-
+
cv2.imshow('original', img)
modes = cycle(['erode/dilate', 'open/close', 'blackhat/tophat', 'gradient'])