CV_EXPORTS void getGpuMemInfo(size_t& free, size_t& total);\r
\r
//////////////////////////////// Error handling ////////////////////////\r
- \r
+\r
CV_EXPORTS void error(const char *error_string, const char *file, const int line, const char *func);\r
CV_EXPORTS void nppError( int err, const char *file, const int line, const char *func); \r
\r
public:\r
//! default constructor\r
GpuMat();\r
- //! constructs GpuMatrix of the specified size and type\r
- // (_type is CV_8UC1, CV_64FC3, CV_32SC(12) etc.)\r
- GpuMat(int _rows, int _cols, int _type);\r
- GpuMat(Size _size, int _type);\r
+ //! constructs GpuMatrix of the specified size and type (_type is CV_8UC1, CV_64FC3, CV_32SC(12) etc.)\r
+ GpuMat(int rows, int cols, int type);\r
+ GpuMat(Size size, int type);\r
//! constucts GpuMatrix and fills it with the specified value _s.\r
- GpuMat(int _rows, int _cols, int _type, const Scalar& _s);\r
- GpuMat(Size _size, int _type, const Scalar& _s);\r
+ GpuMat(int rows, int cols, int type, const Scalar& s);\r
+ GpuMat(Size size, int type, const Scalar& s);\r
//! copy constructor\r
GpuMat(const GpuMat& m);\r
\r
//! constructor for GpuMatrix headers pointing to user-allocated data\r
- GpuMat(int _rows, int _cols, int _type, void* _data, size_t _step = Mat::AUTO_STEP);\r
- GpuMat(Size _size, int _type, void* _data, size_t _step = Mat::AUTO_STEP);\r
+ GpuMat(int rows, int cols, int type, void* data, size_t step = Mat::AUTO_STEP);\r
+ GpuMat(Size size, int type, void* data, size_t step = Mat::AUTO_STEP);\r
\r
//! creates a matrix header for a part of the bigger matrix\r
GpuMat(const GpuMat& m, const Range& rowRange, const Range& colRange);\r
template <class T> operator DevMem2D_<T>() const;\r
template <class T> operator PtrStep_<T>() const;\r
\r
- //! pefroms blocking upload data to GpuMat. .\r
+ //! pefroms blocking upload data to GpuMat.\r
void upload(const cv::Mat& m);\r
\r
//! upload async\r
//! sets every GpuMatrix element to s\r
GpuMat& operator = (const Scalar& s);\r
//! sets some of the GpuMatrix elements to s, according to the mask\r
- GpuMat& setTo(const Scalar& s, const GpuMat& mask=GpuMat());\r
+ GpuMat& setTo(const Scalar& s, const GpuMat& mask = GpuMat());\r
//! creates alternative GpuMatrix header for the same data, with different\r
// number of channels and/or different number of rows. see cvReshape.\r
- GpuMat reshape(int _cn, int _rows=0) const;\r
+ GpuMat reshape(int cn, int rows = 0) const;\r
\r
//! allocates new GpuMatrix data unless the GpuMatrix already has specified size and type.\r
// previous data is unreferenced if needed.\r
- void create(int _rows, int _cols, int _type);\r
- void create(Size _size, int _type);\r
+ void create(int rows, int cols, int type);\r
+ void create(Size size, int type);\r
//! decreases reference counter;\r
// deallocate the data when reference counter reaches 0.\r
void release();\r
bool empty() const;\r
\r
//! returns pointer to y-th row\r
- uchar* ptr(int y=0);\r
- const uchar* ptr(int y=0) const;\r
+ uchar* ptr(int y = 0);\r
+ const uchar* ptr(int y = 0) const;\r
\r
//! template version of the above method\r
- template<typename _Tp> _Tp* ptr(int y=0);\r
- template<typename _Tp> const _Tp* ptr(int y=0) const;\r
+ template<typename _Tp> _Tp* ptr(int y = 0);\r
+ template<typename _Tp> const _Tp* ptr(int y = 0) const;\r
\r
//! matrix transposition\r
GpuMat t() const;\r
uchar* dataend;\r
};\r
\r
+//#define TemplatedGpuMat // experimental now, deprecated to use\r
+#ifdef TemplatedGpuMat\r
+ #include "GpuMat_BetaDeprecated.hpp"\r
+#endif\r
+\r
//////////////////////////////// CudaMem ////////////////////////////////\r
// CudaMem is limited cv::Mat with page locked memory allocation.\r
// Page locked memory is only needed for async and faster coping to GPU.\r
CudaMem();\r
CudaMem(const CudaMem& m);\r
\r
- CudaMem(int _rows, int _cols, int _type, int _alloc_type = ALLOC_PAGE_LOCKED);\r
- CudaMem(Size _size, int _type, int _alloc_type = ALLOC_PAGE_LOCKED);\r
+ CudaMem(int rows, int cols, int type, int _alloc_type = ALLOC_PAGE_LOCKED);\r
+ CudaMem(Size size, int type, int alloc_type = ALLOC_PAGE_LOCKED);\r
\r
\r
//! creates from cv::Mat with coping data\r
- explicit CudaMem(const Mat& m, int _alloc_type = ALLOC_PAGE_LOCKED);\r
+ explicit CudaMem(const Mat& m, int alloc_type = ALLOC_PAGE_LOCKED);\r
\r
~CudaMem();\r
\r
CudaMem clone() const;\r
\r
//! allocates new matrix data unless the matrix already has specified size and type.\r
- void create(int _rows, int _cols, int _type, int _alloc_type = ALLOC_PAGE_LOCKED);\r
- void create(Size _size, int _type, int _alloc_type = ALLOC_PAGE_LOCKED);\r
+ void create(int rows, int cols, int type, int alloc_type = ALLOC_PAGE_LOCKED);\r
+ void create(Size size, int type, int alloc_type = ALLOC_PAGE_LOCKED);\r
\r
//! decrements reference counter and released memory if needed.\r
void release();\r
CV_EXPORTS void add(const GpuMat& a, const Scalar& sc, GpuMat& c);\r
//! subtracts one matrix from another (c = a - b)\r
//! supports CV_8UC1, CV_8UC4, CV_32SC1, CV_32FC1 types\r
- CV_EXPORTS void subtract(const GpuMat& a, const GpuMat& b, GpuMat& c);\r
+ CV_EXPORTS void subtract(const GpuMat& a, const GpuMat& b, GpuMat& c);\r
//! subtracts scalar from a matrix (c = a - s)\r
//! supports CV_32FC1 and CV_32FC2 type\r
CV_EXPORTS void subtract(const GpuMat& a, const Scalar& sc, GpuMat& c);\r
//! computes element-wise product of the two arrays (c = a * b)\r
//! supports CV_8UC1, CV_8UC4, CV_32SC1, CV_32FC1 types\r
- CV_EXPORTS void multiply(const GpuMat& a, const GpuMat& b, GpuMat& c);\r
+ CV_EXPORTS void multiply(const GpuMat& a, const GpuMat& b, GpuMat& c);\r
//! multiplies matrix to a scalar (c = a * s)\r
//! supports CV_32FC1 and CV_32FC2 type\r
CV_EXPORTS void multiply(const GpuMat& a, const Scalar& sc, GpuMat& c);\r
//! computes element-wise quotient of the two arrays (c = a / b)\r
//! supports CV_8UC1, CV_8UC4, CV_32SC1, CV_32FC1 types\r
- CV_EXPORTS void divide(const GpuMat& a, const GpuMat& b, GpuMat& c);\r
+ CV_EXPORTS void divide(const GpuMat& a, const GpuMat& b, GpuMat& c);\r
//! computes element-wise quotient of matrix and scalar (c = a / s)\r
//! supports CV_32FC1 and CV_32FC2 type\r
CV_EXPORTS void divide(const GpuMat& a, const Scalar& sc, GpuMat& c);\r
\r
//! transposes the matrix\r
//! supports only CV_8UC1 type\r
- CV_EXPORTS void transpose(const GpuMat& src1, GpuMat& dst);\r
+ CV_EXPORTS void transpose(const GpuMat& src1, GpuMat& dst);\r
\r
//! computes element-wise absolute difference of two arrays (c = abs(a - b))\r
//! supports CV_8UC1, CV_8UC4, CV_32SC1, CV_32FC1 types\r
- CV_EXPORTS void absdiff(const GpuMat& a, const GpuMat& b, GpuMat& c);\r
+ CV_EXPORTS void absdiff(const GpuMat& a, const GpuMat& b, GpuMat& c);\r
//! computes element-wise absolute difference of array and scalar (c = abs(a - s))\r
//! supports only CV_32FC1 type\r
CV_EXPORTS void absdiff(const GpuMat& a, const Scalar& s, GpuMat& c);\r
//! supports NORM_INF, NORM_L1, NORM_L2\r
//! supports only CV_8UC1 type\r
CV_EXPORTS double norm(const GpuMat& src1, int normType=NORM_L2);\r
- \r
+\r
//! computes norm of the difference between two arrays\r
//! supports NORM_INF, NORM_L1, NORM_L2\r
//! supports only CV_8UC1 type\r
//! computes exponent of each matrix element (b = e**a)\r
//! supports only CV_32FC1 type\r
CV_EXPORTS void exp(const GpuMat& a, GpuMat& b);\r
- \r
+\r
//! computes natural logarithm of absolute value of each matrix element: b = log(abs(a))\r
//! supports only CV_32FC1 type\r
CV_EXPORTS void log(const GpuMat& a, GpuMat& b);\r
CV_EXPORTS void magnitude(const GpuMat& x, const GpuMat& y, GpuMat& magnitude);\r
//! Acync version\r
CV_EXPORTS void magnitude(const GpuMat& x, const GpuMat& y, GpuMat& magnitude, const Stream& stream);\r
- \r
+\r
//! computes squared magnitude of each (x(i), y(i)) vector\r
//! supports only floating-point source\r
CV_EXPORTS void magnitudeSqr(const GpuMat& x, const GpuMat& y, GpuMat& magnitude);\r
CV_EXPORTS void polarToCart(const GpuMat& magnitude, const GpuMat& angle, GpuMat& x, GpuMat& y, bool angleInDegrees = false);\r
//! Acync version\r
CV_EXPORTS void polarToCart(const GpuMat& magnitude, const GpuMat& angle, GpuMat& x, GpuMat& y, bool angleInDegrees, const Stream& stream);\r
- \r
+\r
////////////////////////////// Image processing //////////////////////////////\r
\r
//! DST[x,y] = SRC[xmap[x,y],ymap[x,y]] with bilinear interpolation.\r
//! Supports INTER_NEAREST, INTER_LINEAR\r
//! supports CV_8UC1, CV_8UC4 types\r
CV_EXPORTS void resize(const GpuMat& src, GpuMat& dst, Size dsize, double fx=0, double fy=0, int interpolation = INTER_LINEAR);\r
- \r
+\r
//! warps the image using affine transformation\r
//! Supports INTER_NEAREST, INTER_LINEAR, INTER_CUBIC\r
CV_EXPORTS void warpAffine(const GpuMat& src, GpuMat& dst, const Mat& M, Size dsize, int flags = INTER_LINEAR);\r
//! warps the image using perspective transformation\r
//! Supports INTER_NEAREST, INTER_LINEAR, INTER_CUBIC\r
CV_EXPORTS void warpPerspective(const GpuMat& src, GpuMat& dst, const Mat& M, Size dsize, int flags = INTER_LINEAR);\r
- \r
+\r
//! rotate 8bit single or four channel image\r
//! Supports INTER_NEAREST, INTER_LINEAR, INTER_CUBIC\r
//! supports CV_8UC1, CV_8UC4 types\r
CV_EXPORTS void rotate(const GpuMat& src, GpuMat& dst, Size dsize, double angle, double xShift = 0, double yShift = 0, int interpolation = INTER_LINEAR);\r
- \r
+\r
//! copies 2D array to a larger destination array and pads borders with user-specifiable constant\r
//! supports CV_8UC1, CV_8UC4, CV_32SC1 types\r
CV_EXPORTS void copyMakeBorder(const GpuMat& src, GpuMat& dst, int top, int bottom, int left, int right, const Scalar& value = Scalar());\r
- \r
+\r
//! computes the integral image and integral for the squared image\r
//! sum will have CV_32S type, sqsum - CV32F type\r
//! supports only CV_8UC1 source type\r
//////////////////////////////// Filter Engine ////////////////////////////////\r
\r
/*!\r
- The Base Class for 1D or Row-wise Filters\r
- \r
- This is the base class for linear or non-linear filters that process 1D data.\r
- In particular, such filters are used for the "horizontal" filtering parts in separable filters.\r
- */\r
+ The Base Class for 1D or Row-wise Filters\r
+\r
+ This is the base class for linear or non-linear filters that process 1D data.\r
+ In particular, such filters are used for the "horizontal" filtering parts in separable filters.\r
+ */\r
class CV_EXPORTS BaseRowFilter_GPU\r
{\r
public:\r
};\r
\r
/*!\r
- The Base Class for Column-wise Filters\r
- \r
- This is the base class for linear or non-linear filters that process columns of 2D arrays.\r
- Such filters are used for the "vertical" filtering parts in separable filters.\r
- */ \r
+ The Base Class for Column-wise Filters\r
+\r
+ This is the base class for linear or non-linear filters that process columns of 2D arrays.\r
+ Such filters are used for the "vertical" filtering parts in separable filters.\r
+ */ \r
class CV_EXPORTS BaseColumnFilter_GPU\r
{\r
public:\r
};\r
\r
/*!\r
- The Base Class for Non-Separable 2D Filters.\r
- \r
- This is the base class for linear or non-linear 2D filters.\r
- */ \r
+ The Base Class for Non-Separable 2D Filters.\r
+\r
+ This is the base class for linear or non-linear 2D filters.\r
+ */ \r
class CV_EXPORTS BaseFilter_GPU\r
{\r
public:\r
};\r
\r
/*!\r
- The Base Class for Filter Engine.\r
- \r
- The class can be used to apply an arbitrary filtering operation to an image.\r
- It contains all the necessary intermediate buffers.\r
+ The Base Class for Filter Engine.\r
+\r
+ The class can be used to apply an arbitrary filtering operation to an image.\r
+ It contains all the necessary intermediate buffers.\r
*/\r
class CV_EXPORTS FilterEngine_GPU\r
{\r
\r
//! a synonym for normalized box filter\r
static inline void blur(const GpuMat& src, GpuMat& dst, Size ksize, Point anchor = Point(-1,-1)) { boxFilter(src, dst, -1, ksize, anchor); }\r
- \r
+\r
//! erodes the image (applies the local minimum operator)\r
CV_EXPORTS void erode( const GpuMat& src, GpuMat& dst, const Mat& kernel, Point anchor = Point(-1, -1), int iterations = 1);\r
\r
\r
//! applies the vertical or horizontal Scharr operator to the image\r
CV_EXPORTS void Scharr(const GpuMat& src, GpuMat& dst, int ddepth, int dx, int dy, double scale = 1);\r
- \r
+\r
//! smooths the image using Gaussian filter.\r
CV_EXPORTS void GaussianBlur(const GpuMat& src, GpuMat& dst, Size ksize, double sigma1, double sigma2 = 0);\r
\r
\r
//! the default constructor\r
explicit StereoBeliefPropagation(int ndisp = DEFAULT_NDISP,\r
- int iters = DEFAULT_ITERS,\r
- int levels = DEFAULT_LEVELS,\r
- int msg_type = CV_32F);\r
+ int iters = DEFAULT_ITERS,\r
+ int levels = DEFAULT_LEVELS,\r
+ int msg_type = CV_32F);\r
\r
//! the full constructor taking the number of disparities, number of BP iterations on each level,\r
//! number of levels, truncation of data cost, data weight,\r
//! DiscTerm = min(disc_single_jump * fabs(f1-f2), max_disc_term)\r
//! please see paper for more details\r
StereoBeliefPropagation(int ndisp, int iters, int levels,\r
- float max_data_term, float data_weight,\r
- float max_disc_term, float disc_single_jump,\r
- int msg_type = CV_32F);\r
+ float max_data_term, float data_weight,\r
+ float max_disc_term, float disc_single_jump,\r
+ int msg_type = CV_32F);\r
\r
//! the stereo correspondence operator. Finds the disparity for the specified rectified stereo pair,\r
//! if disparity is empty output type will be CV_16S else output type will be disparity.type().\r
\r
//! the default constructor\r
explicit StereoConstantSpaceBP(int ndisp = DEFAULT_NDISP,\r
- int iters = DEFAULT_ITERS,\r
- int levels = DEFAULT_LEVELS,\r
- int nr_plane = DEFAULT_NR_PLANE,\r
- int msg_type = CV_32F);\r
+ int iters = DEFAULT_ITERS,\r
+ int levels = DEFAULT_LEVELS,\r
+ int nr_plane = DEFAULT_NR_PLANE,\r
+ int msg_type = CV_32F);\r
\r
//! the full constructor taking the number of disparities, number of BP iterations on each level,\r
//! number of levels, number of active disparity on the first level, truncation of data cost, data weight,\r
//! truncation of discontinuity cost, discontinuity single jump and minimum disparity threshold\r
StereoConstantSpaceBP(int ndisp, int iters, int levels, int nr_plane,\r
- float max_data_term, float data_weight, float max_disc_term, float disc_single_jump,\r
- int min_disp_th = 0,\r
- int msg_type = CV_32F);\r
+ float max_data_term, float data_weight, float max_disc_term, float disc_single_jump,\r
+ int min_disp_th = 0,\r
+ int msg_type = CV_32F);\r
\r
//! the stereo correspondence operator. Finds the disparity for the specified rectified stereo pair,\r
//! if disparity is empty output type will be CV_16S else output type will be disparity.type().\r
\r
namespace \r
{\r
+ #define error_entry(entry) { entry, #entry }\r
+\r
struct NppError\r
{\r
int error;\r
string str;\r
} \r
+ \r
npp_errors [] = \r
{\r
- { NPP_NOT_SUPPORTED_MODE_ERROR, "NPP_NOT_SUPPORTED_MODE_ERROR" },\r
- { NPP_ROUND_MODE_NOT_SUPPORTED_ERROR, "NPP_ROUND_MODE_NOT_SUPPORTED_ERROR" },\r
- { NPP_RESIZE_NO_OPERATION_ERROR, "NPP_RESIZE_NO_OPERATION_ERROR" },\r
- { NPP_BAD_ARG_ERROR, "NPP_BAD_ARG_ERROR" },\r
- { NPP_LUT_NUMBER_OF_LEVELS_ERROR, "NPP_LUT_NUMBER_OF_LEVELS_ERROR" },\r
- { NPP_TEXTURE_BIND_ERROR, "NPP_TEXTURE_BIND_ERROR" },\r
- { NPP_COEFF_ERROR, "NPP_COEFF_ERROR" },\r
- { NPP_RECT_ERROR, "NPP_RECT_ERROR" },\r
- { NPP_QUAD_ERROR, "NPP_QUAD_ERROR" },\r
- { NPP_WRONG_INTERSECTION_ROI_ERROR, "NPP_WRONG_INTERSECTION_ROI_ERROR" },\r
- { NPP_NOT_EVEN_STEP_ERROR, "NPP_NOT_EVEN_STEP_ERROR" },\r
- { NPP_INTERPOLATION_ERROR, "NPP_INTERPOLATION_ERROR" },\r
- { NPP_RESIZE_FACTOR_ERROR, "NPP_RESIZE_FACTOR_ERROR" },\r
- { NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR, "NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR" },\r
- { NPP_MEMFREE_ERR, "NPP_MEMFREE_ERR" },\r
- { NPP_MEMSET_ERR, "NPP_MEMSET_ERR" },\r
- { NPP_MEMCPY_ERROR, "NPP_MEMCPY_ERROR" },\r
- { NPP_MEM_ALLOC_ERR, "NPP_MEM_ALLOC_ERR" },\r
- { NPP_HISTO_NUMBER_OF_LEVELS_ERROR, "NPP_HISTO_NUMBER_OF_LEVELS_ERROR" },\r
- { NPP_MIRROR_FLIP_ERR, "NPP_MIRROR_FLIP_ERR" },\r
- { NPP_INVALID_INPUT, "NPP_INVALID_INPUT" },\r
- { NPP_ALIGNMENT_ERROR, "NPP_ALIGNMENT_ERROR" },\r
- { NPP_STEP_ERROR, "NPP_STEP_ERROR" },\r
- { NPP_SIZE_ERROR, "NPP_SIZE_ERROR" },\r
- { NPP_POINTER_ERROR, "NPP_POINTER_ERROR" },\r
- { NPP_NULL_POINTER_ERROR, "NPP_NULL_POINTER_ERROR" },\r
- { NPP_CUDA_KERNEL_EXECUTION_ERROR, "NPP_CUDA_KERNEL_EXECUTION_ERROR" },\r
- { NPP_NOT_IMPLEMENTED_ERROR, "NPP_NOT_IMPLEMENTED_ERROR" },\r
- { NPP_ERROR, "NPP_ERROR" }, \r
- { NPP_NO_ERROR, "NPP_NO_ERROR" },\r
- { NPP_SUCCESS, "NPP_SUCCESS" },\r
- { NPP_WARNING, "NPP_WARNING" },\r
- { NPP_WRONG_INTERSECTION_QUAD_WARNING, "NPP_WRONG_INTERSECTION_QUAD_WARNING" },\r
- { NPP_MISALIGNED_DST_ROI_WARNING, "NPP_MISALIGNED_DST_ROI_WARNING" },\r
- { NPP_AFFINE_QUAD_INCORRECT_WARNING, "NPP_AFFINE_QUAD_INCORRECT_WARNING" },\r
- //disabled in NPP for cuda 3.2-rc\r
- //{ NPP_AFFINE_QUAD_CHANGED_WARNING, "NPP_AFFINE_QUAD_CHANGED_WARNING" },\r
- //{ NPP_ADJUSTED_ROI_SIZE_WARNING, "NPP_ADJUSTED_ROI_SIZE_WARNING" },\r
- { NPP_DOUBLE_SIZE_WARNING, "NPP_DOUBLE_SIZE_WARNING" },\r
- { NPP_ODD_ROI_WARNING, "NPP_ODD_ROI_WARNING" }\r
+ error_entry( NPP_NOT_SUPPORTED_MODE_ERROR ),\r
+ error_entry( NPP_ROUND_MODE_NOT_SUPPORTED_ERROR ),\r
+ error_entry( NPP_RESIZE_NO_OPERATION_ERROR ),\r
+ error_entry( NPP_NOT_SUFFICIENT_COMPUTE_CAPABILITY ),\r
+ error_entry( NPP_BAD_ARG_ERROR ),\r
+ error_entry( NPP_LUT_NUMBER_OF_LEVELS_ERROR ),\r
+ error_entry( NPP_TEXTURE_BIND_ERROR ),\r
+ error_entry( NPP_COEFF_ERROR ),\r
+ error_entry( NPP_RECT_ERROR ),\r
+ error_entry( NPP_QUAD_ERROR ),\r
+ error_entry( NPP_WRONG_INTERSECTION_ROI_ERROR ),\r
+ error_entry( NPP_NOT_EVEN_STEP_ERROR ),\r
+ error_entry( NPP_INTERPOLATION_ERROR ),\r
+ error_entry( NPP_RESIZE_FACTOR_ERROR ),\r
+ error_entry( NPP_HAAR_CLASSIFIER_PIXEL_MATCH_ERROR ),\r
+ error_entry( NPP_MEMFREE_ERR ),\r
+ error_entry( NPP_MEMSET_ERR ),\r
+ error_entry( NPP_MEMCPY_ERROR ),\r
+ error_entry( NPP_MEM_ALLOC_ERR ),\r
+ error_entry( NPP_HISTO_NUMBER_OF_LEVELS_ERROR ),\r
+ error_entry( NPP_MIRROR_FLIP_ERR ),\r
+ error_entry( NPP_INVALID_INPUT ),\r
+ error_entry( NPP_ALIGNMENT_ERROR ),\r
+ error_entry( NPP_STEP_ERROR ),\r
+ error_entry( NPP_SIZE_ERROR ),\r
+ error_entry( NPP_POINTER_ERROR ),\r
+ error_entry( NPP_NULL_POINTER_ERROR ),\r
+ error_entry( NPP_CUDA_KERNEL_EXECUTION_ERROR ),\r
+ error_entry( NPP_NOT_IMPLEMENTED_ERROR ),\r
+ error_entry( NPP_ERROR ),\r
+ error_entry( NPP_NO_ERROR ),\r
+ error_entry( NPP_SUCCESS ),\r
+ error_entry( NPP_WARNING ),\r
+ error_entry( NPP_WRONG_INTERSECTION_QUAD_WARNING ),\r
+ error_entry( NPP_MISALIGNED_DST_ROI_WARNING ),\r
+ error_entry( NPP_AFFINE_QUAD_INCORRECT_WARNING ),\r
+ error_entry( NPP_DOUBLE_SIZE_WARNING ),\r
+ error_entry( NPP_ODD_ROI_WARNING )\r
};\r
\r
int error_num = sizeof(npp_errors)/sizeof(npp_errors[0]);\r
\r
void error(const char *error_string, const char *file, const int line, const char *func)\r
{ \r
- //if (uncaught_exception())\r
- cv::error( cv::Exception(CV_GpuApiCallError, error_string, func, file, line) );\r
+ int code = CV_GpuApiCallError;\r
+\r
+ if (std::uncaught_exception())\r
+ {\r
+ const char* errorStr = cvErrorStr(code); \r
+ const char* function = func ? func : "unknown function"; \r
+\r
+ std::cerr << "OpenCV Error: " << errorStr << "(" << error_string << ") in " << function << ", file " << file << ", line " << line;\r
+ std::cerr.flush(); \r
+ }\r
+ else \r
+ cv::error( cv::Exception(code, error_string, func, file, line) );\r
}\r
}\r
}\r