//! smooths the image using median filter.
CV_EXPORTS_W void medianBlur( const Mat& src, CV_OUT Mat& dst, int ksize );
//! smooths the image using Gaussian filter.
-CV_EXPORTS_W void GaussianBlur( const Mat& src, CV_OUT Mat& dst, Size ksize,
+CV_EXPORTS_AS(gaussianBlur) void GaussianBlur( const Mat& src, CV_OUT Mat& dst, Size ksize,
double sigma1, double sigma2=0,
int borderType=BORDER_DEFAULT );
//! smooths the image using bilateral filter
double delta=0, int borderType=BORDER_DEFAULT );
//! applies generalized Sobel operator to the image
-CV_EXPORTS_W void Sobel( const Mat& src, CV_OUT Mat& dst, int ddepth,
+CV_EXPORTS_AS(sobel) void Sobel( const Mat& src, CV_OUT Mat& dst, int ddepth,
int dx, int dy, int ksize=3,
double scale=1, double delta=0,
int borderType=BORDER_DEFAULT );
//! applies the vertical or horizontal Scharr operator to the image
-CV_EXPORTS_W void Scharr( const Mat& src, CV_OUT Mat& dst, int ddepth,
+CV_EXPORTS_AS(scharr) void Scharr( const Mat& src, CV_OUT Mat& dst, int ddepth,
int dx, int dy, double scale=1, double delta=0,
int borderType=BORDER_DEFAULT );
//! applies Laplacian operator to the image
-CV_EXPORTS_W void Laplacian( const Mat& src, CV_OUT Mat& dst, int ddepth,
+CV_EXPORTS_AS(laplacian) void Laplacian( const Mat& src, CV_OUT Mat& dst, int ddepth,
int ksize=1, double scale=1, double delta=0,
int borderType=BORDER_DEFAULT );
//! applies Canny edge detector and produces the edge map.
-CV_EXPORTS_W void Canny( const Mat& image, CV_OUT Mat& edges,
+CV_EXPORTS_AS(canny) void Canny( const Mat& image, CV_OUT Mat& edges,
double threshold1, double threshold2,
int apertureSize=3, bool L2gradient=false );
bool useHarrisDetector=false, double k=0.04 );
//! finds lines in the black-n-white image using the standard or pyramid Hough transform
-CV_EXPORTS_W void HoughLines( const Mat& image, CV_OUT vector<Vec2f>& lines,
+CV_EXPORTS_AS(houghLines) void HoughLines( const Mat& image, CV_OUT vector<Vec2f>& lines,
double rho, double theta, int threshold,
double srn=0, double stn=0 );
//! finds line segments in the black-n-white image using probabalistic Hough transform
-CV_EXPORTS_W void HoughLinesP( Mat& image, CV_OUT vector<Vec4i>& lines,
+CV_EXPORTS_AS(houghLinesP) void HoughLinesP( Mat& image, CV_OUT vector<Vec4i>& lines,
double rho, double theta, int threshold,
double minLineLength=0, double maxLineGap=0 );
//! finds circles in the grayscale image using 2+1 gradient Hough transform
-CV_EXPORTS_W void HoughCircles( const Mat& image, CV_OUT vector<Vec3f>& circles,
+CV_EXPORTS_AS(houghCircles) void HoughCircles( const Mat& image, CV_OUT vector<Vec3f>& circles,
int method, double dp, double minDist,
double param1=100, double param2=100,
int minRadius=0, int maxRadius=0 );
namespace cv
{
+template<> CV_EXPORTS void Ptr<CvDTreeSplit>::delete_obj()
+{
+ fastFree(obj);
+}
+
DTreeBestSplitFinder::DTreeBestSplitFinder( CvDTree* _tree, CvDTreeNode* _node)
{
tree = _tree;
node = _node;
splitSize = tree->get_data()->split_heap->elem_size;
- bestSplit = (CvDTreeSplit*)(new char[splitSize]);
+ bestSplit = (CvDTreeSplit*)fastMalloc(splitSize);
memset((CvDTreeSplit*)bestSplit, 0, splitSize);
bestSplit->quality = -1;
bestSplit->condensed_idx = INT_MIN;
- split = (CvDTreeSplit*)(new char[splitSize]);
+ split = (CvDTreeSplit*)fastMalloc(splitSize);
memset((CvDTreeSplit*)split, 0, splitSize);
//haveSplit = false;
}
node = finder.node;
splitSize = tree->get_data()->split_heap->elem_size;
- bestSplit = (CvDTreeSplit*)(new char[splitSize]);
+ bestSplit = (CvDTreeSplit*)fastMalloc(splitSize);
memcpy((CvDTreeSplit*)(bestSplit), (const CvDTreeSplit*)finder.bestSplit, splitSize);
- split = (CvDTreeSplit*)(new char[splitSize]);
+ split = (CvDTreeSplit*)fastMalloc(splitSize);
memset((CvDTreeSplit*)split, 0, splitSize);
}
sys.exit(-1)
amapping = simple_argtype_mapping.get(tp, (tp, "O", defval0))
- all_cargs.append(amapping)
+ parse_name = a.name
if a.py_inputarg:
if amapping[1] == "O":
code_decl += " PyObject* pyobj_%s = NULL;\n" % (a.name,)
- parse_arglist.append("pyobj_" + a.name)
+ parse_name = "pyobj_" + a.name
code_cvt_list.append("pyopencv_to(pyobj_%s, %s)" % (a.name, a.name))
- else:
- parse_arglist.append(a.name)
-
+
+ all_cargs.append([amapping, parse_name])
+
defval = a.defval
if not defval:
defval = amapping[2]
if v.args:
# form the format spec for PyArg_ParseTupleAndKeywords
- fmtspec = "".join([all_cargs[argno][1] for aname, argno in v.py_arglist])
+ fmtspec = "".join([all_cargs[argno][0][1] for aname, argno in v.py_arglist])
if v.py_noptargs > 0:
fmtspec = fmtspec[:-v.py_noptargs] + "|" + fmtspec[-v.py_noptargs:]
fmtspec += ":" + fullname
code_parse = gen_template_parse_args.substitute(
kw_list = ", ".join(['"' + aname + '"' for aname, argno in v.py_arglist]),
fmtspec = fmtspec,
- parse_arglist = ", ".join(["&" + aname for aname in parse_arglist]),
+ parse_arglist = ", ".join(["&" + all_cargs[argno][1] for aname, argno in v.py_arglist]),
code_cvt = " &&\n ".join(code_cvt_list))
else:
code_parse = "if(PyObject_Size(args) == 0 && PyObject_Size(kw) == 0)"
fmtspec = "N"*len(v.py_outlist)
backcvt_arg_list = []
for aname, argno in v.py_outlist:
- amapping = all_cargs[argno]
+ amapping = all_cargs[argno][0]
backcvt_arg_list.append("%s(%s)" % (amapping[2], aname))
code_ret = "return Py_BuildValue(\"(%s)\", %s)" % \
(fmtspec, ", ".join(["pyopencv_from(" + aname + ")" for aname, argno in v.py_outlist]))