From eb6994f58ac2835e79ec97f9a70302d16c1869df Mon Sep 17 00:00:00 2001 From: Vadim Pisarevsky Date: Mon, 30 Aug 2010 18:05:05 +0000 Subject: [PATCH] fixed Mat(const Matx&) constructor; added SVD(Matx) --- modules/core/include/opencv2/core/core.hpp | 15 ++++ modules/core/include/opencv2/core/mat.hpp | 31 +++++++- modules/core/src/lapack.cpp | 124 +++++++++++++++++------------ 3 files changed, 119 insertions(+), 51 deletions(-) diff --git a/modules/core/include/opencv2/core/core.hpp b/modules/core/include/opencv2/core/core.hpp index a64ee72..51dc082 100644 --- a/modules/core/include/opencv2/core/core.hpp +++ b/modules/core/include/opencv2/core/core.hpp @@ -2046,6 +2046,21 @@ public: //! the operator that performs SVD. The previously allocated SVD::u, SVD::w are SVD::vt are released. SVD& operator ()( const Mat& m, int flags=0 ); + //! decomposes matrix and stores the results to user-provided matrices + static void compute( const Mat& m, Mat& w, Mat& u, Mat& vt, int flags=0 ); + //! computes singular values of a matrix + static void compute( const Mat& m, Mat& w, int flags=0 ); + //! performs back substitution + static void backSubst( const Mat& w, const Mat& u, const Mat& vt, + const Mat& rhs, Mat& dst ); + + template static void compute( const Matx<_Tp, m, n>& a, + Matx<_Tp, nm, 1>& w, Matx<_Tp, m, nm>& u, Matx<_Tp, n, nm>& vt ); + template static void compute( const Matx<_Tp, m, n>& a, + Matx<_Tp, nm, 1>& w ); + template static void backSubst( const Matx<_Tp, nm, 1>& w, + const Matx<_Tp, m, nm>& u, const Matx<_Tp, n, nm>& vt, const Matx<_Tp, m, nb>& rhs, Matx<_Tp, n, nb>& dst ); + //! finds dst = arg min_{|dst|=1} |m*dst| static void solveZ( const Mat& m, Mat& dst ); //! performs back substitution, so that dst is the solution or pseudo-solution of m*dst = rhs, where m is the decomposed matrix diff --git a/modules/core/include/opencv2/core/mat.hpp b/modules/core/include/opencv2/core/mat.hpp index de76e97..1d715e0 100644 --- a/modules/core/include/opencv2/core/mat.hpp +++ b/modules/core/include/opencv2/core/mat.hpp @@ -258,7 +258,7 @@ template inline Mat::Mat(const Matx<_Tp,m,n>& M, boo { rows = m; cols = n; - step = sizeof(_Tp); + step = n*sizeof(_Tp); data = datastart = (uchar*)M.val; dataend = datastart + rows*step; } @@ -649,6 +649,35 @@ inline void SVD::solveZ( const Mat& m, Mat& dst ) svd.vt.row(svd.vt.rows-1).reshape(1,svd.vt.cols).copyTo(dst); } +template inline void + SVD::compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w, Matx<_Tp, m, nm>& u, Matx<_Tp, n, nm>& vt ) +{ + assert( nm == MIN(m, n)); + Mat _a(a, false), _u(u, false), _w(w, false), _vt(vt, false); + SVD::compute(_a, _w, _u, _vt); + CV_Assert(_w.data == (uchar*)&w.val[0] && _u.data == (uchar*)&u.val[0] && _vt.data == (uchar*)&vt.val[0]); +} + +template inline void +SVD::compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w ) +{ + assert( nm == MIN(m, n)); + Mat _a(a, false), _w(w, false); + SVD::compute(_a, _w); + CV_Assert(_w.data == (uchar*)&w.val[0]); +} + +template inline void +SVD::backSubst( const Matx<_Tp, nm, 1>& w, const Matx<_Tp, m, nm>& u, + const Matx<_Tp, n, nm>& vt, const Matx<_Tp, m, nb>& rhs, + Matx<_Tp, n, nb>& dst ) +{ + assert( nm == MIN(m, n)); + Mat _u(u, false), _w(w, false), _vt(vt, false), _rhs(_rhs, false), _dst(dst, false); + SVD::backSubst(_w, _u, _vt, _rhs, _dst); + CV_Assert(_dst.data == (uchar*)&dst.val[0]); +} + ///////////////////////////////// Mat_<_Tp> //////////////////////////////////// template inline Mat_<_Tp>::Mat_() : diff --git a/modules/core/src/lapack.cpp b/modules/core/src/lapack.cpp index 413b0e6..6d9a876 100644 --- a/modules/core/src/lapack.cpp +++ b/modules/core/src/lapack.cpp @@ -1316,60 +1316,62 @@ SVBkSb( int m, int n, const T* w, int incw, } -SVD& SVD::operator ()(const Mat& a, int flags) +static void _SVDcompute( const Mat& a, Mat& w, Mat* u, Mat* vt, int flags ) { integer m = a.rows, n = a.cols, mn = std::max(m, n), nm = std::min(m, n); int type = a.type(), elem_size = (int)a.elemSize(); + bool compute_uv = u && vt; - if( flags & NO_UV ) + if( flags & SVD::NO_UV ) { - u.release(); - vt.release(); + if(u) u->release(); + if(vt) vt->release(); + u = vt = 0; } - else + + if( compute_uv ) { - u.create( (int)m, (int)((flags & FULL_UV) ? m : nm), type ); - vt.create( (int)((flags & FULL_UV) ? n : nm), n, type ); + u->create( (int)m, (int)((flags & SVD::FULL_UV) ? m : nm), type ); + vt->create( (int)((flags & SVD::FULL_UV) ? n : nm), n, type ); } - + w.create(nm, 1, type); - + Mat _a = a; int a_ofs = 0, work_ofs=0, iwork_ofs=0, buf_size = 0; bool temp_a = false; double u1=0, v1=0, work1=0; float uf1=0, vf1=0, workf1=0; integer lda, ldu, ldv, lwork=-1, iwork1=0, info=0; - char mode[] = {u.data || vt.data ? 'S' : 'N', '\0'}; - - if( m != n && !(flags & NO_UV) && (flags & FULL_UV) ) + char mode[] = {compute_uv ? 'S' : 'N', '\0'}; + + if( m != n && compute_uv && (flags & SVD::FULL_UV) ) mode[0] = 'A'; - - if( !(flags & MODIFY_A) ) + + if( !(flags & SVD::MODIFY_A) ) { if( mode[0] == 'N' || mode[0] == 'A' ) temp_a = true; - else if( ((vt.data && a.size() == vt.size()) || (u.data && a.size() == u.size())) && - mode[0] == 'S' ) + else if( compute_uv && (a.size() == vt->size() || a.size() == u->size()) && mode[0] == 'S' ) mode[0] = 'O'; } - + lda = a.cols; ldv = ldu = mn; - + if( type == CV_32F ) { sgesdd_(mode, &n, &m, (float*)a.data, &lda, (float*)w.data, - &vf1, &ldv, &uf1, &ldu, &workf1, &lwork, &iwork1, &info ); + &vf1, &ldv, &uf1, &ldu, &workf1, &lwork, &iwork1, &info ); lwork = cvRound(workf1); } else { dgesdd_(mode, &n, &m, (double*)a.data, &lda, (double*)w.data, - &v1, &ldv, &u1, &ldu, &work1, &lwork, &iwork1, &info ); + &v1, &ldv, &u1, &ldu, &work1, &lwork, &iwork1, &info ); lwork = cvRound(work1); } - + assert(info == 0); if( temp_a ) { @@ -1384,80 +1386,102 @@ SVD& SVD::operator ()(const Mat& a, int flags) AutoBuffer buf(buf_size); uchar* buffer = (uchar*)buf; - + if( temp_a ) { _a = Mat(a.rows, a.cols, type, buffer ); a.copyTo(_a); } - - if( !(flags & MODIFY_A) && !temp_a ) + + if( !(flags & SVD::MODIFY_A) && !temp_a ) { - if( vt.data && a.size() == vt.size() ) + if( compute_uv && a.size() == vt->size() ) { - a.copyTo(vt); - _a = vt; + a.copyTo(*vt); + _a = *vt; } - else if( u.data && a.size() == u.size() ) + else if( compute_uv && a.size() == u->size() ) { - a.copyTo(u); - _a = u; + a.copyTo(*u); + _a = *u; } } - - if( mode[0] != 'N' ) + + if( compute_uv ) { - ldv = (int)(vt.step ? vt.step/elem_size : vt.cols); - ldu = (int)(u.step ? u.step/elem_size : u.cols); + ldv = (int)(vt->step ? vt->step/elem_size : vt->cols); + ldu = (int)(u->step ? u->step/elem_size : u->cols); } - + lda = (int)(_a.step ? _a.step/elem_size : _a.cols); if( type == CV_32F ) { sgesdd_(mode, &n, &m, (float*)_a.data, &lda, (float*)w.data, - (float*)vt.data, &ldv, (float*)u.data, &ldu, - (float*)(buffer + work_ofs), &lwork, (integer*)(buffer + iwork_ofs), &info ); + (float*)vt->data, &ldv, (float*)u->data, &ldu, + (float*)(buffer + work_ofs), &lwork, (integer*)(buffer + iwork_ofs), &info ); } else { dgesdd_(mode, &n, &m, (double*)_a.data, &lda, (double*)w.data, - (double*)vt.data, &ldv, (double*)u.data, &ldu, - (double*)(buffer + work_ofs), &lwork, (integer*)(buffer + iwork_ofs), &info ); + (double*)vt->data, &ldv, (double*)u->data, &ldu, + (double*)(buffer + work_ofs), &lwork, (integer*)(buffer + iwork_ofs), &info ); } CV_Assert(info >= 0); if(info != 0) { - u = Scalar(0.); - vt = Scalar(0.); + *u = Scalar(0.); + *vt = Scalar(0.); w = Scalar(0.); } - return *this; +} + + +void SVD::compute( const Mat& a, Mat& w, Mat& u, Mat& vt, int flags ) +{ + _SVDcompute(a, w, &u, &vt, flags); } - -void SVD::backSubst( const Mat& rhs, Mat& dst ) const +void SVD::compute( const Mat& a, Mat& w, int flags ) +{ + _SVDcompute(a, w, 0, 0, flags); +} + +void SVD::backSubst( const Mat& w, const Mat& u, const Mat& vt, const Mat& rhs, Mat& dst ) { int type = w.type(), esz = (int)w.elemSize(); int m = u.rows, n = vt.cols, nb = rhs.data ? rhs.cols : m; AutoBuffer buffer(nb); CV_Assert( u.data && vt.data && w.data ); - + if( rhs.data ) CV_Assert( rhs.type() == type && rhs.rows == m ); - + dst.create( n, nb, type ); if( type == CV_32F ) SVBkSb(m, n, (float*)w.data, 1, (float*)u.data, (int)(u.step/esz), false, - (float*)vt.data, (int)(vt.step/esz), true, (float*)rhs.data, (int)(rhs.step/esz), - nb, (float*)dst.data, (int)(dst.step/esz), buffer, 10*FLT_EPSILON ); + (float*)vt.data, (int)(vt.step/esz), true, (float*)rhs.data, (int)(rhs.step/esz), + nb, (float*)dst.data, (int)(dst.step/esz), buffer, 10*FLT_EPSILON ); else if( type == CV_64F ) SVBkSb(m, n, (double*)w.data, 1, (double*)u.data, (int)(u.step/esz), false, - (double*)vt.data, (int)(vt.step/esz), true, (double*)rhs.data, (int)(rhs.step/esz), - nb, (double*)dst.data, (int)(dst.step/esz), buffer, 2*DBL_EPSILON ); + (double*)vt.data, (int)(vt.step/esz), true, (double*)rhs.data, (int)(rhs.step/esz), + nb, (double*)dst.data, (int)(dst.step/esz), buffer, 2*DBL_EPSILON ); else CV_Error( CV_StsUnsupportedFormat, "" ); } + +SVD& SVD::operator ()(const Mat& a, int flags) +{ + _SVDcompute(a, w, &u, &vt, flags); + return *this; +} + + +void SVD::backSubst( const Mat& rhs, Mat& dst ) const +{ + backSubst( w, u, vt, rhs, dst ); +} + } -- 2.7.4