2 * Copyright (c) 2011. Philipp Wagner <bytefish[at]gmx[dot]de>.
3 * Released to public domain under terms of the BSD Simplified license.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the organization nor the names of its contributors
13 * may be used to endorse or promote products derived from this software
14 * without specific prior written permission.
16 * See <http://www.opensource.org/licenses/bsd-license>
19 #include "precomp.hpp"
32 // Removes duplicate elements in a given vector.
33 template<typename _Tp>
34 inline vector<_Tp> remove_dups(const vector<_Tp>& src) {
35 typedef typename set<_Tp>::const_iterator constSetIterator;
36 typedef typename vector<_Tp>::const_iterator constVecIterator;
38 for (constVecIterator it = src.begin(); it != src.end(); ++it)
39 set_elems.insert(*it);
41 for (constSetIterator it = set_elems.begin(); it != set_elems.end(); ++it)
46 static Mat argsort(InputArray _src, bool ascending=true)
48 Mat src = _src.getMat();
49 if (src.rows != 1 && src.cols != 1) {
50 string error_message = "Wrong shape of input matrix! Expected a matrix with one row or column.";
51 CV_Error(CV_StsBadArg, error_message);
53 int flags = CV_SORT_EVERY_ROW+(ascending ? CV_SORT_ASCENDING : CV_SORT_DESCENDING);
55 sortIdx(src.reshape(1,1),sorted_indices,flags);
56 return sorted_indices;
59 static Mat asRowMatrix(InputArrayOfArrays src, int rtype, double alpha=1, double beta=0) {
60 // make sure the input data is a vector of matrices or vector of vector
61 if(src.kind() != _InputArray::STD_VECTOR_MAT && src.kind() != _InputArray::STD_VECTOR_VECTOR) {
62 string error_message = "The data is expected as InputArray::STD_VECTOR_MAT (a std::vector<Mat>) or _InputArray::STD_VECTOR_VECTOR (a std::vector< vector<...> >).";
63 CV_Error(CV_StsBadArg, error_message);
66 size_t n = src.total();
67 // return empty matrix if no matrices given
70 // dimensionality of (reshaped) samples
71 size_t d = src.getMat(0).total();
73 Mat data((int)n, (int)d, rtype);
75 for(int i = 0; i < (int)n; i++) {
76 // make sure data can be reshaped, throw exception if not!
77 if(src.getMat(i).total() != d) {
78 string error_message = format("Wrong number of elements in matrix #%d! Expected %d was %d.", i, (int)d, (int)src.getMat(i).total());
79 CV_Error(CV_StsBadArg, error_message);
81 // get a hold of the current row
83 // make reshape happy by cloning for non-continuous matrices
84 if(src.getMat(i).isContinuous()) {
85 src.getMat(i).reshape(1, 1).convertTo(xi, rtype, alpha, beta);
87 src.getMat(i).clone().reshape(1, 1).convertTo(xi, rtype, alpha, beta);
93 static void sortMatrixColumnsByIndices(InputArray _src, InputArray _indices, OutputArray _dst) {
94 if(_indices.getMat().type() != CV_32SC1) {
95 CV_Error(CV_StsUnsupportedFormat, "cv::sortColumnsByIndices only works on integer indices!");
97 Mat src = _src.getMat();
98 vector<int> indices = _indices.getMat();
99 _dst.create(src.rows, src.cols, src.type());
100 Mat dst = _dst.getMat();
101 for(size_t idx = 0; idx < indices.size(); idx++) {
102 Mat originalCol = src.col(indices[idx]);
103 Mat sortedCol = dst.col((int)idx);
104 originalCol.copyTo(sortedCol);
108 static Mat sortMatrixColumnsByIndices(InputArray src, InputArray indices) {
110 sortMatrixColumnsByIndices(src, indices, dst);
115 template<typename _Tp> static bool
116 isSymmetric_(InputArray src) {
117 Mat _src = src.getMat();
118 if(_src.cols != _src.rows)
120 for (int i = 0; i < _src.rows; i++) {
121 for (int j = 0; j < _src.cols; j++) {
122 _Tp a = _src.at<_Tp> (i, j);
123 _Tp b = _src.at<_Tp> (j, i);
132 template<typename _Tp> static bool
133 isSymmetric_(InputArray src, double eps) {
134 Mat _src = src.getMat();
135 if(_src.cols != _src.rows)
137 for (int i = 0; i < _src.rows; i++) {
138 for (int j = 0; j < _src.cols; j++) {
139 _Tp a = _src.at<_Tp> (i, j);
140 _Tp b = _src.at<_Tp> (j, i);
141 if (std::abs(a - b) > eps) {
149 static bool isSymmetric(InputArray src, double eps=1e-16)
151 Mat m = src.getMat();
153 case CV_8SC1: return isSymmetric_<char>(m); break;
155 return isSymmetric_<unsigned char>(m); break;
157 return isSymmetric_<short>(m); break;
159 return isSymmetric_<unsigned short>(m); break;
161 return isSymmetric_<int>(m); break;
163 return isSymmetric_<float>(m, eps); break;
165 return isSymmetric_<double>(m, eps); break;
173 //------------------------------------------------------------------------------
174 // cv::subspaceProject
175 //------------------------------------------------------------------------------
176 Mat subspaceProject(InputArray _W, InputArray _mean, InputArray _src) {
179 Mat mean = _mean.getMat();
180 Mat src = _src.getMat();
181 // get number of samples and dimension
184 // make sure the data has the correct shape
186 string error_message = format("Wrong shapes for given matrices. Was size(src) = (%d,%d), size(W) = (%d,%d).", src.rows, src.cols, W.rows, W.cols);
187 CV_Error(CV_StsBadArg, error_message);
189 // make sure mean is correct if not empty
190 if(!mean.empty() && (mean.total() != (size_t) d)) {
191 string error_message = format("Wrong mean shape for the given data matrix. Expected %d, but was %d.", d, mean.total());
192 CV_Error(CV_StsBadArg, error_message);
194 // create temporary matrices
196 // make sure you operate on correct type
197 src.convertTo(X, W.type());
198 // safe to do, because of above assertion
200 for(int i=0; i<n; i++) {
202 subtract(r_i, mean.reshape(1,1), r_i);
205 // finally calculate projection as Y = (X-mean)*W
206 gemm(X, W, 1.0, Mat(), 0.0, Y);
210 //------------------------------------------------------------------------------
211 // cv::subspaceReconstruct
212 //------------------------------------------------------------------------------
213 Mat subspaceReconstruct(InputArray _W, InputArray _mean, InputArray _src)
217 Mat mean = _mean.getMat();
218 Mat src = _src.getMat();
219 // get number of samples and dimension
222 // make sure the data has the correct shape
224 string error_message = format("Wrong shapes for given matrices. Was size(src) = (%d,%d), size(W) = (%d,%d).", src.rows, src.cols, W.rows, W.cols);
225 CV_Error(CV_StsBadArg, error_message);
227 // make sure mean is correct if not empty
228 if(!mean.empty() && (mean.total() != (size_t) W.rows)) {
229 string error_message = format("Wrong mean shape for the given eigenvector matrix. Expected %d, but was %d.", W.cols, mean.total());
230 CV_Error(CV_StsBadArg, error_message);
232 // initialize temporary matrices
234 // copy data & make sure we are using the correct type
235 src.convertTo(Y, W.type());
236 // calculate the reconstruction
237 gemm(Y, W, 1.0, Mat(), 0.0, X, GEMM_2_T);
238 // safe to do because of above assertion
240 for(int i=0; i<n; i++) {
242 add(r_i, mean.reshape(1,1), r_i);
249 class EigenvalueDecomposition {
252 // Holds the data dimension.
255 // Stores real/imag part of a complex division.
258 // Pointer to internal memory.
262 // Holds the computed eigenvalues.
265 // Holds the computed eigenvectors.
269 template<typename _Tp>
270 _Tp *alloc_1d(int m) {
275 template<typename _Tp>
276 _Tp *alloc_1d(int m, _Tp val) {
277 _Tp *arr = alloc_1d<_Tp> (m);
278 for (int i = 0; i < m; i++)
284 template<typename _Tp>
285 _Tp **alloc_2d(int m, int _n) {
286 _Tp **arr = new _Tp*[m];
287 for (int i = 0; i < m; i++)
288 arr[i] = new _Tp[_n];
293 template<typename _Tp>
294 _Tp **alloc_2d(int m, int _n, _Tp val) {
295 _Tp **arr = alloc_2d<_Tp> (m, _n);
296 for (int i = 0; i < m; i++) {
297 for (int j = 0; j < _n; j++) {
304 void cdiv(double xr, double xi, double yr, double yi) {
306 if (std::abs(yr) > std::abs(yi)) {
309 cdivr = (xr + r * xi) / dv;
310 cdivi = (xi - r * xr) / dv;
314 cdivr = (r * xr + xi) / dv;
315 cdivi = (r * xi - xr) / dv;
319 // Nonsymmetric reduction from Hessenberg to real Schur form.
323 // This is derived from the Algol procedure hqr2,
324 // by Martin and Wilkinson, Handbook for Auto. Comp.,
325 // Vol.ii-Linear Algebra, and the corresponding
326 // Fortran subroutine in EISPACK.
333 double eps = pow(2.0, -52.0);
334 double exshift = 0.0;
335 double p = 0, q = 0, r = 0, s = 0, z = 0, t, w, x, y;
337 // Store roots isolated by balanc and compute matrix norm
340 for (int i = 0; i < nn; i++) {
341 if (i < low || i > high) {
345 for (int j = max(i - 1, 0); j < nn; j++) {
346 norm = norm + std::abs(H[i][j]);
350 // Outer loop over eigenvalue index
354 // Look for single small sub-diagonal element
357 s = std::abs(H[l - 1][l - 1]) + std::abs(H[l][l]);
361 if (std::abs(H[l][l - 1]) < eps * s) {
367 // Check for convergence
371 H[n1][n1] = H[n1][n1] + exshift;
379 } else if (l == n1 - 1) {
380 w = H[n1][n1 - 1] * H[n1 - 1][n1];
381 p = (H[n1 - 1][n1 - 1] - H[n1][n1]) / 2.0;
383 z = sqrt(std::abs(q));
384 H[n1][n1] = H[n1][n1] + exshift;
385 H[n1 - 1][n1 - 1] = H[n1 - 1][n1 - 1] + exshift;
404 s = std::abs(x) + std::abs(z);
407 r = sqrt(p * p + q * q);
413 for (int j = n1 - 1; j < nn; j++) {
415 H[n1 - 1][j] = q * z + p * H[n1][j];
416 H[n1][j] = q * H[n1][j] - p * z;
419 // Column modification
421 for (int i = 0; i <= n1; i++) {
423 H[i][n1 - 1] = q * z + p * H[i][n1];
424 H[i][n1] = q * H[i][n1] - p * z;
427 // Accumulate transformations
429 for (int i = low; i <= high; i++) {
431 V[i][n1 - 1] = q * z + p * V[i][n1];
432 V[i][n1] = q * V[i][n1] - p * z;
446 // No convergence yet
456 y = H[n1 - 1][n1 - 1];
457 w = H[n1][n1 - 1] * H[n1 - 1][n1];
460 // Wilkinson's original ad hoc shift
464 for (int i = low; i <= n1; i++) {
467 s = std::abs(H[n1][n1 - 1]) + std::abs(H[n1 - 1][n1 - 2]);
472 // MATLAB's new ad hoc shift
482 s = x - w / ((y - x) / 2.0 + s);
483 for (int i = low; i <= n1; i++) {
491 iter = iter + 1; // (Could check iteration count here.)
493 // Look for two consecutive small sub-diagonal elements
499 p = (r * s - w) / H[m + 1][m] + H[m][m + 1];
500 q = H[m + 1][m + 1] - z - r - s;
502 s = std::abs(p) + std::abs(q) + std::abs(r);
509 if (std::abs(H[m][m - 1]) * (std::abs(q) + std::abs(r)) < eps * (std::abs(p)
510 * (std::abs(H[m - 1][m - 1]) + std::abs(z) + std::abs(
511 H[m + 1][m + 1])))) {
517 for (int i = m + 2; i <= n1; i++) {
524 // Double QR step involving rows l:n and columns m:n
526 for (int k = m; k <= n1 - 1; k++) {
527 bool notlast = (k != n1 - 1);
531 r = (notlast ? H[k + 2][k - 1] : 0.0);
532 x = std::abs(p) + std::abs(q) + std::abs(r);
542 s = sqrt(p * p + q * q + r * r);
548 H[k][k - 1] = -s * x;
550 H[k][k - 1] = -H[k][k - 1];
561 for (int j = k; j < nn; j++) {
562 p = H[k][j] + q * H[k + 1][j];
564 p = p + r * H[k + 2][j];
565 H[k + 2][j] = H[k + 2][j] - p * z;
567 H[k][j] = H[k][j] - p * x;
568 H[k + 1][j] = H[k + 1][j] - p * y;
571 // Column modification
573 for (int i = 0; i <= min(n1, k + 3); i++) {
574 p = x * H[i][k] + y * H[i][k + 1];
576 p = p + z * H[i][k + 2];
577 H[i][k + 2] = H[i][k + 2] - p * r;
579 H[i][k] = H[i][k] - p;
580 H[i][k + 1] = H[i][k + 1] - p * q;
583 // Accumulate transformations
585 for (int i = low; i <= high; i++) {
586 p = x * V[i][k] + y * V[i][k + 1];
588 p = p + z * V[i][k + 2];
589 V[i][k + 2] = V[i][k + 2] - p * r;
591 V[i][k] = V[i][k] - p;
592 V[i][k + 1] = V[i][k + 1] - p * q;
596 } // check convergence
597 } // while (n1 >= low)
599 // Backsubstitute to find vectors of upper triangular form
605 for (n1 = nn - 1; n1 >= 0; n1--) {
614 for (int i = n1 - 1; i >= 0; i--) {
617 for (int j = l; j <= n1; j++) {
618 r = r + H[i][j] * H[j][n1];
629 H[i][n1] = -r / (eps * norm);
632 // Solve real equations
637 q = (d[i] - p) * (d[i] - p) + e[i] * e[i];
638 t = (x * s - z * r) / q;
640 if (std::abs(x) > std::abs(z)) {
641 H[i + 1][n1] = (-r - w * t) / x;
643 H[i + 1][n1] = (-s - y * t) / z;
649 t = std::abs(H[i][n1]);
650 if ((eps * t) * t > 1) {
651 for (int j = i; j <= n1; j++) {
652 H[j][n1] = H[j][n1] / t;
661 // Last vector component imaginary so matrix is triangular
663 if (std::abs(H[n1][n1 - 1]) > std::abs(H[n1 - 1][n1])) {
664 H[n1 - 1][n1 - 1] = q / H[n1][n1 - 1];
665 H[n1 - 1][n1] = -(H[n1][n1] - p) / H[n1][n1 - 1];
667 cdiv(0.0, -H[n1 - 1][n1], H[n1 - 1][n1 - 1] - p, q);
668 H[n1 - 1][n1 - 1] = cdivr;
669 H[n1 - 1][n1] = cdivi;
673 for (int i = n1 - 2; i >= 0; i--) {
674 double ra, sa, vr, vi;
677 for (int j = l; j <= n1; j++) {
678 ra = ra + H[i][j] * H[j][n1 - 1];
679 sa = sa + H[i][j] * H[j][n1];
690 cdiv(-ra, -sa, w, q);
691 H[i][n1 - 1] = cdivr;
695 // Solve complex equations
699 vr = (d[i] - p) * (d[i] - p) + e[i] * e[i] - q * q;
700 vi = (d[i] - p) * 2.0 * q;
701 if (vr == 0.0 && vi == 0.0) {
702 vr = eps * norm * (std::abs(w) + std::abs(q) + std::abs(x)
703 + std::abs(y) + std::abs(z));
705 cdiv(x * r - z * ra + q * sa,
706 x * s - z * sa - q * ra, vr, vi);
707 H[i][n1 - 1] = cdivr;
709 if (std::abs(x) > (std::abs(z) + std::abs(q))) {
710 H[i + 1][n1 - 1] = (-ra - w * H[i][n1 - 1] + q
712 H[i + 1][n1] = (-sa - w * H[i][n1] - q * H[i][n1
715 cdiv(-r - y * H[i][n1 - 1], -s - y * H[i][n1], z,
717 H[i + 1][n1 - 1] = cdivr;
718 H[i + 1][n1] = cdivi;
724 t = max(std::abs(H[i][n1 - 1]), std::abs(H[i][n1]));
725 if ((eps * t) * t > 1) {
726 for (int j = i; j <= n1; j++) {
727 H[j][n1 - 1] = H[j][n1 - 1] / t;
728 H[j][n1] = H[j][n1] / t;
736 // Vectors of isolated roots
738 for (int i = 0; i < nn; i++) {
739 if (i < low || i > high) {
740 for (int j = i; j < nn; j++) {
746 // Back transformation to get eigenvectors of original matrix
748 for (int j = nn - 1; j >= low; j--) {
749 for (int i = low; i <= high; i++) {
751 for (int k = low; k <= min(j, high); k++) {
752 z = z + V[i][k] * H[k][j];
759 // Nonsymmetric reduction to Hessenberg form.
761 // This is derived from the Algol procedures orthes and ortran,
762 // by Martin and Wilkinson, Handbook for Auto. Comp.,
763 // Vol.ii-Linear Algebra, and the corresponding
764 // Fortran subroutines in EISPACK.
768 for (int m = low + 1; m <= high - 1; m++) {
773 for (int i = m; i <= high; i++) {
774 scale = scale + std::abs(H[i][m - 1]);
778 // Compute Householder transformation.
781 for (int i = high; i >= m; i--) {
782 ort[i] = H[i][m - 1] / scale;
783 h += ort[i] * ort[i];
792 // Apply Householder similarity transformation
793 // H = (I-u*u'/h)*H*(I-u*u')/h)
795 for (int j = m; j < n; j++) {
797 for (int i = high; i >= m; i--) {
798 f += ort[i] * H[i][j];
801 for (int i = m; i <= high; i++) {
802 H[i][j] -= f * ort[i];
806 for (int i = 0; i <= high; i++) {
808 for (int j = high; j >= m; j--) {
809 f += ort[j] * H[i][j];
812 for (int j = m; j <= high; j++) {
813 H[i][j] -= f * ort[j];
816 ort[m] = scale * ort[m];
817 H[m][m - 1] = scale * g;
821 // Accumulate transformations (Algol's ortran).
823 for (int i = 0; i < n; i++) {
824 for (int j = 0; j < n; j++) {
825 V[i][j] = (i == j ? 1.0 : 0.0);
829 for (int m = high - 1; m >= low + 1; m--) {
830 if (H[m][m - 1] != 0.0) {
831 for (int i = m + 1; i <= high; i++) {
832 ort[i] = H[i][m - 1];
834 for (int j = m; j <= high; j++) {
836 for (int i = m; i <= high; i++) {
837 g += ort[i] * V[i][j];
839 // Double division avoids possible underflow
840 g = (g / ort[m]) / H[m][m - 1];
841 for (int i = m; i <= high; i++) {
842 V[i][j] += g * ort[i];
849 // Releases all internal working memory.
851 // releases the working data
855 for (int i = 0; i < n; i++) {
863 // Computes the Eigenvalue Decomposition for a matrix given in H.
865 // Allocate memory for the working data.
866 V = alloc_2d<double> (n, n, 0.0);
867 d = alloc_1d<double> (n);
868 e = alloc_1d<double> (n);
869 ort = alloc_1d<double> (n);
870 // Reduce to Hessenberg form.
872 // Reduce Hessenberg to real Schur form.
874 // Copy eigenvalues to OpenCV Matrix.
875 _eigenvalues.create(1, n, CV_64FC1);
876 for (int i = 0; i < n; i++) {
877 _eigenvalues.at<double> (0, i) = d[i];
879 // Copy eigenvectors to OpenCV Matrix.
880 _eigenvectors.create(n, n, CV_64FC1);
881 for (int i = 0; i < n; i++)
882 for (int j = 0; j < n; j++)
883 _eigenvectors.at<double> (i, j) = V[i][j];
884 // Deallocate the memory by releasing all internal working data.
889 EigenvalueDecomposition()
892 // Initializes & computes the Eigenvalue Decomposition for a general matrix
893 // given in src. This function is a port of the EigenvalueSolver in JAMA,
894 // which has been released to public domain by The MathWorks and the
895 // National Institute of Standards and Technology (NIST).
896 EigenvalueDecomposition(InputArray src) {
900 // This function computes the Eigenvalue Decomposition for a general matrix
901 // given in src. This function is a port of the EigenvalueSolver in JAMA,
902 // which has been released to public domain by The MathWorks and the
903 // National Institute of Standards and Technology (NIST).
904 void compute(InputArray src)
906 if(isSymmetric(src)) {
907 // Fall back to OpenCV for a symmetric matrix!
908 cv::eigen(src, _eigenvalues, _eigenvectors);
911 // Convert the given input matrix to double. Is there any way to
912 // prevent allocating the temporary memory? Only used for copying
913 // into working memory and deallocated after.
914 src.getMat().convertTo(tmp, CV_64FC1);
915 // Get dimension of the matrix.
917 // Allocate the matrix data to work on.
918 this->H = alloc_2d<double> (n, n);
919 // Now safely copy the data.
920 for (int i = 0; i < tmp.rows; i++) {
921 for (int j = 0; j < tmp.cols; j++) {
922 this->H[i][j] = tmp.at<double>(i, j);
925 // Deallocates the temporary matrix before computing.
927 // Performs the eigenvalue decomposition of H.
932 ~EigenvalueDecomposition() {}
934 // Returns the eigenvalues of the Eigenvalue Decomposition.
935 Mat eigenvalues() { return _eigenvalues; }
936 // Returns the eigenvectors of the Eigenvalue Decomposition.
937 Mat eigenvectors() { return _eigenvectors; }
941 //------------------------------------------------------------------------------
942 // Linear Discriminant Analysis implementation
943 //------------------------------------------------------------------------------
944 void LDA::save(const string& filename) const {
945 FileStorage fs(filename, FileStorage::WRITE);
946 if (!fs.isOpened()) {
947 CV_Error(CV_StsError, "File can't be opened for writing!");
953 // Deserializes this object from a given filename.
954 void LDA::load(const string& filename) {
955 FileStorage fs(filename, FileStorage::READ);
957 CV_Error(CV_StsError, "File can't be opened for writing!");
962 // Serializes this object to a given FileStorage.
963 void LDA::save(FileStorage& fs) const {
965 fs << "num_components" << _num_components;
966 fs << "eigenvalues" << _eigenvalues;
967 fs << "eigenvectors" << _eigenvectors;
970 // Deserializes this object from a given FileStorage.
971 void LDA::load(const FileStorage& fs) {
973 fs["num_components"] >> _num_components;
974 fs["eigenvalues"] >> _eigenvalues;
975 fs["eigenvectors"] >> _eigenvectors;
978 void LDA::lda(InputArrayOfArrays _src, InputArray _lbls) {
980 Mat src = _src.getMat();
982 // safely copy the labels
984 Mat tmp = _lbls.getMat();
985 for(unsigned int i = 0; i < tmp.total(); i++) {
986 labels.push_back(tmp.at<int>(i));
989 // turn into row sampled matrix
991 // ensure working matrix is double precision
992 src.convertTo(data, CV_64FC1);
993 // maps the labels, so they're ascending: [0,1,...,C]
994 vector<int> mapped_labels(labels.size());
995 vector<int> num2label = remove_dups(labels);
996 map<int, int> label2num;
997 for (int i = 0; i < (int)num2label.size(); i++)
998 label2num[num2label[i]] = i;
999 for (size_t i = 0; i < labels.size(); i++)
1000 mapped_labels[i] = label2num[labels[i]];
1001 // get sample size, dimension
1004 // number of unique labels
1005 int C = (int)num2label.size();
1006 // we can't do a LDA on one class, what do you
1007 // want to separate from each other then?
1009 string error_message = "At least two classes are needed to perform a LDA. Reason: Only one class was given!";
1010 CV_Error(CV_StsBadArg, error_message);
1012 // throw error if less labels, than samples
1013 if (labels.size() != static_cast<size_t>(N)) {
1014 string error_message = format("The number of samples must equal the number of labels. Given %d labels, %d samples. ", labels.size(), N);
1015 CV_Error(CV_StsBadArg, error_message);
1017 // warn if within-classes scatter matrix becomes singular
1019 cout << "Warning: Less observations than feature dimension given!"
1020 << "Computation will probably fail."
1023 // clip number of components to be a valid number
1024 if ((_num_components <= 0) || (_num_components > (C - 1))) {
1025 _num_components = (C - 1);
1027 // holds the mean over all classes
1028 Mat meanTotal = Mat::zeros(1, D, data.type());
1029 // holds the mean for each class
1030 vector<Mat> meanClass(C);
1031 vector<int> numClass(C);
1033 for (int i = 0; i < C; i++) {
1035 meanClass[i] = Mat::zeros(1, D, data.type()); //! Dx1 image vector
1038 for (int i = 0; i < N; i++) {
1039 Mat instance = data.row(i);
1040 int classIdx = mapped_labels[i];
1041 add(meanTotal, instance, meanTotal);
1042 add(meanClass[classIdx], instance, meanClass[classIdx]);
1043 numClass[classIdx]++;
1045 // calculate total mean
1046 meanTotal.convertTo(meanTotal, meanTotal.type(), 1.0 / static_cast<double> (N));
1047 // calculate class means
1048 for (int i = 0; i < C; i++) {
1049 meanClass[i].convertTo(meanClass[i], meanClass[i].type(), 1.0 / static_cast<double> (numClass[i]));
1051 // subtract class means
1052 for (int i = 0; i < N; i++) {
1053 int classIdx = mapped_labels[i];
1054 Mat instance = data.row(i);
1055 subtract(instance, meanClass[classIdx], instance);
1057 // calculate within-classes scatter
1058 Mat Sw = Mat::zeros(D, D, data.type());
1059 mulTransposed(data, Sw, true);
1060 // calculate between-classes scatter
1061 Mat Sb = Mat::zeros(D, D, data.type());
1062 for (int i = 0; i < C; i++) {
1064 subtract(meanClass[i], meanTotal, tmp);
1065 mulTransposed(tmp, tmp, true);
1072 gemm(Swi, Sb, 1.0, Mat(), 0.0, M);
1073 EigenvalueDecomposition es(M);
1074 _eigenvalues = es.eigenvalues();
1075 _eigenvectors = es.eigenvectors();
1076 // reshape eigenvalues, so they are stored by column
1077 _eigenvalues = _eigenvalues.reshape(1, 1);
1078 // get sorted indices descending by their eigenvalue
1079 vector<int> sorted_indices = argsort(_eigenvalues, false);
1080 // now sort eigenvalues and eigenvectors accordingly
1081 _eigenvalues = sortMatrixColumnsByIndices(_eigenvalues, sorted_indices);
1082 _eigenvectors = sortMatrixColumnsByIndices(_eigenvectors, sorted_indices);
1083 // and now take only the num_components and we're out!
1084 _eigenvalues = Mat(_eigenvalues, Range::all(), Range(0, _num_components));
1085 _eigenvectors = Mat(_eigenvectors, Range::all(), Range(0, _num_components));
1088 void LDA::compute(InputArrayOfArrays _src, InputArray _lbls) {
1089 switch(_src.kind()) {
1090 case _InputArray::STD_VECTOR_MAT:
1091 lda(asRowMatrix(_src, CV_64FC1), _lbls);
1093 case _InputArray::MAT:
1094 lda(_src.getMat(), _lbls);
1097 string error_message= format("InputArray Datatype %d is not supported.", _src.kind());
1098 CV_Error(CV_StsBadArg, error_message);
1103 // Projects samples into the LDA subspace.
1104 Mat LDA::project(InputArray src) {
1105 return subspaceProject(_eigenvectors, Mat(), _dataAsRow ? src : src.getMat().t());
1108 // Reconstructs projections from the LDA subspace.
1109 Mat LDA::reconstruct(InputArray src) {
1110 return subspaceReconstruct(_eigenvectors, Mat(), _dataAsRow ? src : src.getMat().t());