void run_func();
void prepare_to_validation( int );
- double sampson_error(const double* f, double x1, double y1, double x2, double y2);
+ double sampson_error(const double* f, double x1, double y1, double x2, double y2);
int method;
int img_size;
int pt_count = MAX(5, cvRound(exp(pt_count_exp)));
dims = cvtest::randInt(rng) % 2 + 2;
- dims = 2;
+ dims = 2;
method = CV_LMEDS << (cvtest::randInt(rng) % 2);
-
types[INPUT][0] = CV_MAKETYPE(pt_depth, 1);
sizes[OUTPUT][0] = sizes[REF_OUTPUT][0] = cvSize(3,1);
types[OUTPUT][0] = types[REF_OUTPUT][0] = CV_64FC1;
sizes[OUTPUT][1] = sizes[REF_OUTPUT][1] = cvSize(pt_count,1);
- types[OUTPUT][1] = types[REF_OUTPUT][1] = CV_8UC1;
+ types[OUTPUT][1] = types[REF_OUTPUT][1] = CV_8UC1;
sizes[OUTPUT][2] = sizes[REF_OUTPUT][2] = cvSize(1,1);
types[OUTPUT][2] = types[REF_OUTPUT][2] = CV_64FC1;
sizes[OUTPUT][3] = sizes[REF_OUTPUT][3] = cvSize(1,1);
- types[OUTPUT][3] = types[REF_OUTPUT][3] = CV_8UC1;
+ types[OUTPUT][3] = types[REF_OUTPUT][3] = CV_8UC1;
}
void CV_EssentialMatTest::run_func()
{
Mat _input0(test_mat[INPUT][0]), _input1(test_mat[INPUT][1]);
- Mat K(test_mat[INPUT][4]);
- double focal(K.at<double>(0, 0));
- cv::Point2d pp(K.at<double>(0, 2), K.at<double>(1, 2));
+ Mat K(test_mat[INPUT][4]);
+ double focal(K.at<double>(0, 0));
+ cv::Point2d pp(K.at<double>(0, 2), K.at<double>(1, 2));
RNG& rng = ts->get_rng();
Mat E, mask1(test_mat[TEMP][1]);
- E = cv::findEssentialMat( _input0, _input1, focal, pp, method, 0.99, MAX(sigma*3, 0.0001), mask1 );
- if (E.rows > 3)
+ E = cv::findEssentialMat( _input0, _input1, focal, pp, method, 0.99, MAX(sigma*3, 0.0001), mask1 );
+ if (E.rows > 3)
{
- int count = E.rows / 3;
- int row = (cvtest::randInt(rng) % count) * 3;
- E = E.rowRange(row, row + 3) * 1.0;
+ int count = E.rows / 3;
+ int row = (cvtest::randInt(rng) % count) * 3;
+ E = E.rowRange(row, row + 3) * 1.0;
}
- E.copyTo(test_mat[TEMP][0]);
+ E.copyTo(test_mat[TEMP][0]);
- Mat R, t, mask2;
- recoverPose( E, _input0, _input1, R, t, focal, pp, mask2 );
- R.copyTo(test_mat[TEMP][2]);
- t.copyTo(test_mat[TEMP][3]);
- mask2.copyTo(test_mat[TEMP][4]);
+ Mat R, t, mask2;
+ recoverPose( E, _input0, _input1, R, t, focal, pp, mask2 );
+ R.copyTo(test_mat[TEMP][2]);
+ t.copyTo(test_mat[TEMP][3]);
+ mask2.copyTo(test_mat[TEMP][4]);
}
double CV_EssentialMatTest::sampson_error(const double * f, double x1, double y1, double x2, double y2)
{
double Fx1[3] = {
- f[0] * x1 + f[1] * y1 + f[2],
- f[3] * x1 + f[4] * y1 + f[5],
+ f[0] * x1 + f[1] * y1 + f[2],
+ f[3] * x1 + f[4] * y1 + f[5],
f[6] * x1 + f[7] * y1 + f[8]
- };
+ };
double Ftx2[3] = {
- f[0] * x2 + f[3] * y2 + f[6],
- f[1] * x2 + f[4] * y2 + f[7],
+ f[0] * x2 + f[3] * y2 + f[6],
+ f[1] * x2 + f[4] * y2 + f[7],
f[2] * x2 + f[5] * y2 + f[8]
- };
- double x2tFx1 = Fx1[0] * x2 + Fx1[1] * y2 + Fx1[2];
+ };
+ double x2tFx1 = Fx1[0] * x2 + Fx1[1] * y2 + Fx1[2];
- double error = x2tFx1 * x2tFx1 / (Fx1[0] * Fx1[0] + Fx1[1] * Fx1[1] + Ftx2[0] * Ftx2[0] + Ftx2[1] * Ftx2[1]);
- error = sqrt(error);
- return error;
+ double error = x2tFx1 * x2tFx1 / (Fx1[0] * Fx1[0] + Fx1[1] * Fx1[1] + Ftx2[0] * Ftx2[0] + Ftx2[1] * Ftx2[1]);
+ error = sqrt(error);
+ return error;
}
const Mat& A = test_mat[INPUT][4];
double f0[9], f[9], e[9];
Mat F0(3, 3, CV_64FC1, f0), F(3, 3, CV_64F, f);
- Mat E(3, 3, CV_64F, e);
+ Mat E(3, 3, CV_64F, e);
Mat invA, R=Rt0.colRange(0, 3), T1, T2;
uchar* mtfm2 = test_mat[OUTPUT][1].data;
double* e_prop1 = (double*)test_mat[REF_OUTPUT][0].data;
double* e_prop2 = (double*)test_mat[OUTPUT][0].data;
- Mat E_prop2 = Mat(3, 1, CV_64F, e_prop2);
+ Mat E_prop2 = Mat(3, 1, CV_64F, e_prop2);
int i, pt_count = test_mat[INPUT][2].cols;
Mat p1( 1, pt_count, CV_64FC2 );
double y1 = p1.at<Point2d>(i).y;
double x2 = p2.at<Point2d>(i).x;
double y2 = p2.at<Point2d>(i).y;
-// double t0 = sampson_error(f0, x1, y1, x2, y2);
-// double t = sampson_error(f, x1, y1, x2, y2);
+// double t0 = sampson_error(f0, x1, y1, x2, y2);
+// double t = sampson_error(f, x1, y1, x2, y2);
double n1 = 1./sqrt(x1*x1 + y1*y1 + 1);
double n2 = 1./sqrt(x2*x2 + y2*y2 + 1);
double t0 = fabs(f0[0]*x2*x1 + f0[1]*x2*y1 + f0[2]*x2 +
mtfm1[i] = 1;
mtfm2[i] = !status[i] || t0 > err_level || t < err_level;
}
-
+
e_prop1[0] = sqrt(0.5);
e_prop1[1] = sqrt(0.5);
e_prop1[2] = 0;
e_prop2[0] = 0;
e_prop2[1] = 0;
e_prop2[2] = 0;
- SVD::compute(E, E_prop2);
+ SVD::compute(E, E_prop2);
- double* pose_prop1 = (double*)test_mat[REF_OUTPUT][2].data;
- double* pose_prop2 = (double*)test_mat[OUTPUT][2].data;
- double terr1 = norm(Rt0.col(3) / norm(Rt0.col(3)) + test_mat[TEMP][3]);
- double terr2 = norm(Rt0.col(3) / norm(Rt0.col(3)) - test_mat[TEMP][3]);
- Mat rvec;
- Rodrigues(Rt0.colRange(0, 3), rvec);
- pose_prop1[0] = 0;
- // No check for CV_LMeDS on translation. Since it
- // involves with some degraded problem, when data is exact inliers.
- pose_prop2[0] = method == CV_LMEDS || pt_count == 5 ? 0 : MIN(terr1, terr2);
+ double* pose_prop1 = (double*)test_mat[REF_OUTPUT][2].data;
+ double* pose_prop2 = (double*)test_mat[OUTPUT][2].data;
+ double terr1 = norm(Rt0.col(3) / norm(Rt0.col(3)) + test_mat[TEMP][3]);
+ double terr2 = norm(Rt0.col(3) / norm(Rt0.col(3)) - test_mat[TEMP][3]);
+ Mat rvec;
+ Rodrigues(Rt0.colRange(0, 3), rvec);
+ pose_prop1[0] = 0;
+ // No check for CV_LMeDS on translation. Since it
+ // involves with some degraded problem, when data is exact inliers.
+ pose_prop2[0] = method == CV_LMEDS || pt_count == 5 ? 0 : MIN(terr1, terr2);
-// int inliers_count = countNonZero(test_mat[TEMP][1]);
-// int good_count = countNonZero(test_mat[TEMP][4]);
- test_mat[OUTPUT][3] = true; //good_count >= inliers_count / 2;
- test_mat[REF_OUTPUT][3] = true;
+// int inliers_count = countNonZero(test_mat[TEMP][1]);
+// int good_count = countNonZero(test_mat[TEMP][4]);
+ test_mat[OUTPUT][3] = true; //good_count >= inliers_count / 2;
+ test_mat[REF_OUTPUT][3] = true;
}
//Rodrigues vector
Affine3(const Vec3& rvec, const Vec3& t = Vec3::all(0));
-
+
//Combines all contructors above. Supports 4x4, 3x3, 1x3, 3x1 sizes of data matrix
explicit Affine3(const cv::Mat& data, const Vec3& t = Vec3::all(0));
//Rotation matrix
void rotation(const Mat3& R);
-
+
//Rodrigues vector
void rotation(const Vec3& rvec);
-
+
//Combines rotation methods above. Suports 3x3, 1x3, 3x1 sizes of data matrix;
void rotation(const Mat& data);
-
+
//Euler angles
void rotation(float_type alpha, float_type beta, float_type gamma);
template<typename T> inline void cv::Affine3<T>::rotation(const cv::Mat& data)
{
CV_Assert(data.type() == cv::DataType<T>::type);
-
+
if (data.cols == 3 && data.rows == 3)
{
Mat3 R;
template<typename _Tp, int m, int n, int nm> inline void
SVD::compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w, Matx<_Tp, m, nm>& u, Matx<_Tp, n, nm>& vt )
{
- assert( nm == MIN(m, n));
+ CV_StaticAssert( nm == MIN(m, n), "Invalid size of output vector.");
Mat _a(a, false), _u(u, false), _w(w, false), _vt(vt, false);
SVD::compute(_a, _w, _u, _vt);
CV_Assert(_w.data == (uchar*)&w.val[0] && _u.data == (uchar*)&u.val[0] && _vt.data == (uchar*)&vt.val[0]);
template<typename _Tp, int m, int n, int nm> inline void
SVD::compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w )
{
- assert( nm == MIN(m, n));
+ CV_StaticAssert( nm == MIN(m, n), "Invalid size of output vector.");
Mat _a(a, false), _w(w, false);
SVD::compute(_a, _w);
CV_Assert(_w.data == (uchar*)&w.val[0]);
const Matx<_Tp, n, nm>& vt, const Matx<_Tp, m, nb>& rhs,
Matx<_Tp, n, nb>& dst )
{
- assert( nm == MIN(m, n));
+ CV_StaticAssert( nm == MIN(m, n), "Invalid size of output vector.");
Mat _u(u, false), _w(w, false), _vt(vt, false), _rhs(rhs, false), _dst(dst, false);
SVD::backSubst(_w, _u, _vt, _rhs, _dst);
CV_Assert(_dst.data == (uchar*)&dst.val[0]);
# endif
# ifndef CV_StaticAssert
# if defined(__GNUC__) && (__GNUC__ > 3) && (__GNUC_MINOR__ > 2)
-# define CV_StaticAssert(condition, reason) ({ extern int __attribute__((error("CV_StaticAssert: " reason " " #condition))) CV_StaticAssert(); ((condition) ? 0 : CV_StaticAssert()), 0; })
+# define CV_StaticAssert(condition, reason) ({ extern int __attribute__((error("CV_StaticAssert: " reason " " #condition))) CV_StaticAssert(); ((condition) ? 0 : CV_StaticAssert()); })
# else
namespace cv {
template <bool x> struct CV_StaticAssert_failed;
template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1)
{
- assert(channels >= 2);
+ CV_StaticAssert(channels >= 2, "Matx should have at least 2 elaments.");
val[0] = v0; val[1] = v1;
for(int i = 2; i < channels; i++) val[i] = _Tp(0);
}
template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2)
{
- assert(channels >= 3);
+ CV_StaticAssert(channels >= 3, "Matx should have at least 3 elaments.");
val[0] = v0; val[1] = v1; val[2] = v2;
for(int i = 3; i < channels; i++) val[i] = _Tp(0);
}
template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3)
{
- assert(channels >= 4);
+ CV_StaticAssert(channels >= 4, "Matx should have at least 4 elaments.");
val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;
for(int i = 4; i < channels; i++) val[i] = _Tp(0);
}
template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4)
{
- assert(channels >= 5);
+ CV_StaticAssert(channels >= 5, "Matx should have at least 5 elaments.");
val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; val[4] = v4;
for(int i = 5; i < channels; i++) val[i] = _Tp(0);
}
template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3,
_Tp v4, _Tp v5)
{
- assert(channels >= 6);
+ CV_StaticAssert(channels >= 6, "Matx should have at least 6 elaments.");
val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;
val[4] = v4; val[5] = v5;
for(int i = 6; i < channels; i++) val[i] = _Tp(0);
template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3,
_Tp v4, _Tp v5, _Tp v6)
{
- assert(channels >= 7);
+ CV_StaticAssert(channels >= 7, "Matx should have at least 7 elaments.");
val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;
val[4] = v4; val[5] = v5; val[6] = v6;
for(int i = 7; i < channels; i++) val[i] = _Tp(0);
template<typename _Tp, int m, int n> inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3,
_Tp v4, _Tp v5, _Tp v6, _Tp v7)
{
- assert(channels >= 8);
+ CV_StaticAssert(channels >= 8, "Matx should have at least 8 elaments.");
val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;
val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7;
for(int i = 8; i < channels; i++) val[i] = _Tp(0);
_Tp v4, _Tp v5, _Tp v6, _Tp v7,
_Tp v8)
{
- assert(channels >= 9);
+ CV_StaticAssert(channels >= 9, "Matx should have at least 9 elaments.");
val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;
val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7;
val[8] = v8;
_Tp v4, _Tp v5, _Tp v6, _Tp v7,
_Tp v8, _Tp v9)
{
- assert(channels >= 10);
+ CV_StaticAssert(channels >= 10, "Matx should have at least 10 elaments.");
val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;
val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7;
val[8] = v8; val[9] = v9;
_Tp v4, _Tp v5, _Tp v6, _Tp v7,
_Tp v8, _Tp v9, _Tp v10, _Tp v11)
{
- assert(channels == 12);
+ CV_StaticAssert(channels == 12, "Matx should have at least 12 elaments.");
val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;
val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7;
val[8] = v8; val[9] = v9; val[10] = v10; val[11] = v11;
_Tp v8, _Tp v9, _Tp v10, _Tp v11,
_Tp v12, _Tp v13, _Tp v14, _Tp v15)
{
- assert(channels == 16);
+ CV_StaticAssert(channels == 16, "Matx should have at least 16 elaments.");
val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3;
val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7;
val[8] = v8; val[9] = v9; val[10] = v10; val[11] = v11;
#if CV_SSE2
if( patternSize == 16 )
{
- for(; j < img.cols - 16 - 3; j += 16, ptr += 16)
- {
- __m128i m0, m1;
- __m128i v0 = _mm_loadu_si128((const __m128i*)ptr);
- __m128i v1 = _mm_xor_si128(_mm_subs_epu8(v0, t), delta);
- v0 = _mm_xor_si128(_mm_adds_epu8(v0, t), delta);
-
- __m128i x0 = _mm_sub_epi8(_mm_loadu_si128((const __m128i*)(ptr + pixel[0])), delta);
- __m128i x1 = _mm_sub_epi8(_mm_loadu_si128((const __m128i*)(ptr + pixel[quarterPatternSize])), delta);
- __m128i x2 = _mm_sub_epi8(_mm_loadu_si128((const __m128i*)(ptr + pixel[2*quarterPatternSize])), delta);
- __m128i x3 = _mm_sub_epi8(_mm_loadu_si128((const __m128i*)(ptr + pixel[3*quarterPatternSize])), delta);
- m0 = _mm_and_si128(_mm_cmpgt_epi8(x0, v0), _mm_cmpgt_epi8(x1, v0));
- m1 = _mm_and_si128(_mm_cmpgt_epi8(v1, x0), _mm_cmpgt_epi8(v1, x1));
- m0 = _mm_or_si128(m0, _mm_and_si128(_mm_cmpgt_epi8(x1, v0), _mm_cmpgt_epi8(x2, v0)));
- m1 = _mm_or_si128(m1, _mm_and_si128(_mm_cmpgt_epi8(v1, x1), _mm_cmpgt_epi8(v1, x2)));
- m0 = _mm_or_si128(m0, _mm_and_si128(_mm_cmpgt_epi8(x2, v0), _mm_cmpgt_epi8(x3, v0)));
- m1 = _mm_or_si128(m1, _mm_and_si128(_mm_cmpgt_epi8(v1, x2), _mm_cmpgt_epi8(v1, x3)));
- m0 = _mm_or_si128(m0, _mm_and_si128(_mm_cmpgt_epi8(x3, v0), _mm_cmpgt_epi8(x0, v0)));
- m1 = _mm_or_si128(m1, _mm_and_si128(_mm_cmpgt_epi8(v1, x3), _mm_cmpgt_epi8(v1, x0)));
- m0 = _mm_or_si128(m0, m1);
- int mask = _mm_movemask_epi8(m0);
- if( mask == 0 )
- continue;
- if( (mask & 255) == 0 )
+ for(; j < img.cols - 16 - 3; j += 16, ptr += 16)
{
- j -= 8;
- ptr -= 8;
- continue;
- }
+ __m128i m0, m1;
+ __m128i v0 = _mm_loadu_si128((const __m128i*)ptr);
+ __m128i v1 = _mm_xor_si128(_mm_subs_epu8(v0, t), delta);
+ v0 = _mm_xor_si128(_mm_adds_epu8(v0, t), delta);
+
+ __m128i x0 = _mm_sub_epi8(_mm_loadu_si128((const __m128i*)(ptr + pixel[0])), delta);
+ __m128i x1 = _mm_sub_epi8(_mm_loadu_si128((const __m128i*)(ptr + pixel[quarterPatternSize])), delta);
+ __m128i x2 = _mm_sub_epi8(_mm_loadu_si128((const __m128i*)(ptr + pixel[2*quarterPatternSize])), delta);
+ __m128i x3 = _mm_sub_epi8(_mm_loadu_si128((const __m128i*)(ptr + pixel[3*quarterPatternSize])), delta);
+ m0 = _mm_and_si128(_mm_cmpgt_epi8(x0, v0), _mm_cmpgt_epi8(x1, v0));
+ m1 = _mm_and_si128(_mm_cmpgt_epi8(v1, x0), _mm_cmpgt_epi8(v1, x1));
+ m0 = _mm_or_si128(m0, _mm_and_si128(_mm_cmpgt_epi8(x1, v0), _mm_cmpgt_epi8(x2, v0)));
+ m1 = _mm_or_si128(m1, _mm_and_si128(_mm_cmpgt_epi8(v1, x1), _mm_cmpgt_epi8(v1, x2)));
+ m0 = _mm_or_si128(m0, _mm_and_si128(_mm_cmpgt_epi8(x2, v0), _mm_cmpgt_epi8(x3, v0)));
+ m1 = _mm_or_si128(m1, _mm_and_si128(_mm_cmpgt_epi8(v1, x2), _mm_cmpgt_epi8(v1, x3)));
+ m0 = _mm_or_si128(m0, _mm_and_si128(_mm_cmpgt_epi8(x3, v0), _mm_cmpgt_epi8(x0, v0)));
+ m1 = _mm_or_si128(m1, _mm_and_si128(_mm_cmpgt_epi8(v1, x3), _mm_cmpgt_epi8(v1, x0)));
+ m0 = _mm_or_si128(m0, m1);
+ int mask = _mm_movemask_epi8(m0);
+ if( mask == 0 )
+ continue;
+ if( (mask & 255) == 0 )
+ {
+ j -= 8;
+ ptr -= 8;
+ continue;
+ }
- __m128i c0 = _mm_setzero_si128(), c1 = c0, max0 = c0, max1 = c0;
- for( k = 0; k < N; k++ )
- {
- __m128i x = _mm_xor_si128(_mm_loadu_si128((const __m128i*)(ptr + pixel[k])), delta);
- m0 = _mm_cmpgt_epi8(x, v0);
- m1 = _mm_cmpgt_epi8(v1, x);
+ __m128i c0 = _mm_setzero_si128(), c1 = c0, max0 = c0, max1 = c0;
+ for( k = 0; k < N; k++ )
+ {
+ __m128i x = _mm_xor_si128(_mm_loadu_si128((const __m128i*)(ptr + pixel[k])), delta);
+ m0 = _mm_cmpgt_epi8(x, v0);
+ m1 = _mm_cmpgt_epi8(v1, x);
- c0 = _mm_and_si128(_mm_sub_epi8(c0, m0), m0);
- c1 = _mm_and_si128(_mm_sub_epi8(c1, m1), m1);
+ c0 = _mm_and_si128(_mm_sub_epi8(c0, m0), m0);
+ c1 = _mm_and_si128(_mm_sub_epi8(c1, m1), m1);
- max0 = _mm_max_epu8(max0, c0);
- max1 = _mm_max_epu8(max1, c1);
- }
+ max0 = _mm_max_epu8(max0, c0);
+ max1 = _mm_max_epu8(max1, c1);
+ }
- max0 = _mm_max_epu8(max0, max1);
- int m = _mm_movemask_epi8(_mm_cmpgt_epi8(max0, K16));
+ max0 = _mm_max_epu8(max0, max1);
+ int m = _mm_movemask_epi8(_mm_cmpgt_epi8(max0, K16));
- for( k = 0; m > 0 && k < 16; k++, m >>= 1 )
- if(m & 1)
- {
- cornerpos[ncorners++] = j+k;
- if(nonmax_suppression)
- curr[j+k] = (uchar)cornerScore<patternSize>(ptr+k, pixel, threshold);
- }
- }
+ for( k = 0; m > 0 && k < 16; k++, m >>= 1 )
+ if(m & 1)
+ {
+ cornerpos[ncorners++] = j+k;
+ if(nonmax_suppression)
+ curr[j+k] = (uchar)cornerScore<patternSize>(ptr+k, pixel, threshold);
+ }
+ }
}
#endif
for( ; j < img.cols - 3; j++, ptr++ )
#define SANITY_CHECK_KEYPOINTS(array, ...) ::perf::Regression::addKeypoints(this, #array, array , ## __VA_ARGS__)
#define SANITY_CHECK_MATCHES(array, ...) ::perf::Regression::addMatches(this, #array, array , ## __VA_ARGS__)
-#ifdef HAVE_CUDA
class CV_EXPORTS GpuPerf
{
public:
};
# define PERF_RUN_GPU() ::perf::GpuPerf::targetDevice()
-#else
-# define PERF_RUN_GPU() false
-#endif
/*****************************************************************************************\
/*****************************************************************************************\
* ::perf::GpuPerf
\*****************************************************************************************/
-#ifdef HAVE_CUDA
bool perf::GpuPerf::targetDevice()
{
+#ifdef HAVE_CUDA
return !param_run_cpu;
-}
+#else
+ return false;
#endif
+}
/*****************************************************************************************\
* ::perf::PrintTo