1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #include "precomp.hpp"
44 #include "opencv2/core/gpumat.hpp"
45 #include "opencv2/core/opengl_interop.hpp"
46 #include "opencv2/core/opengl_interop_deprecated.hpp"
48 /****************************************************************************************\
49 * [scaled] Identity matrix initialization *
50 \****************************************************************************************/
54 void swap( Mat& a, Mat& b )
56 std::swap(a.flags, b.flags);
57 std::swap(a.dims, b.dims);
58 std::swap(a.rows, b.rows);
59 std::swap(a.cols, b.cols);
60 std::swap(a.data, b.data);
61 std::swap(a.refcount, b.refcount);
62 std::swap(a.datastart, b.datastart);
63 std::swap(a.dataend, b.dataend);
64 std::swap(a.datalimit, b.datalimit);
65 std::swap(a.allocator, b.allocator);
67 std::swap(a.size.p, b.size.p);
68 std::swap(a.step.p, b.step.p);
69 std::swap(a.step.buf[0], b.step.buf[0]);
70 std::swap(a.step.buf[1], b.step.buf[1]);
72 if( a.step.p == b.step.buf )
74 a.step.p = a.step.buf;
78 if( b.step.p == a.step.buf )
80 b.step.p = b.step.buf;
86 static inline void setSize( Mat& m, int _dims, const int* _sz,
87 const size_t* _steps, bool autoSteps=false )
89 CV_Assert( 0 <= _dims && _dims <= CV_MAX_DIM );
92 if( m.step.p != m.step.buf )
95 m.step.p = m.step.buf;
100 m.step.p = (size_t*)fastMalloc(_dims*sizeof(m.step.p[0]) + (_dims+1)*sizeof(m.size.p[0]));
101 m.size.p = (int*)(m.step.p + _dims) + 1;
102 m.size.p[-1] = _dims;
103 m.rows = m.cols = -1;
111 size_t esz = CV_ELEM_SIZE(m.flags), total = esz;
113 for( i = _dims-1; i >= 0; i-- )
120 m.step.p[i] = i < _dims-1 ? _steps[i] : esz;
124 int64 total1 = (int64)total*s;
125 if( (uint64)total1 != (size_t)total1 )
126 CV_Error( CV_StsOutOfRange, "The total matrix size does not fit to \"size_t\" type" );
127 total = (size_t)total1;
139 static void updateContinuityFlag(Mat& m)
142 for( i = 0; i < m.dims; i++ )
148 for( j = m.dims-1; j > i; j-- )
150 if( m.step[j]*m.size[j] < m.step[j-1] )
154 uint64 t = (uint64)m.step[0]*m.size[0];
155 if( j <= i && t == (size_t)t )
156 m.flags |= Mat::CONTINUOUS_FLAG;
158 m.flags &= ~Mat::CONTINUOUS_FLAG;
161 static void finalizeHdr(Mat& m)
163 updateContinuityFlag(m);
166 m.rows = m.cols = -1;
169 m.datalimit = m.datastart + m.size[0]*m.step[0];
172 m.dataend = m.data + m.size[d-1]*m.step[d-1];
173 for( int i = 0; i < d-1; i++ )
174 m.dataend += (m.size[i] - 1)*m.step[i];
177 m.dataend = m.datalimit;
180 m.dataend = m.datalimit = 0;
184 void Mat::create(int d, const int* _sizes, int _type)
187 CV_Assert(0 <= d && d <= CV_MAX_DIM && _sizes);
188 _type = CV_MAT_TYPE(_type);
190 if( data && (d == dims || (d == 1 && dims <= 2)) && _type == type() )
192 if( d == 2 && rows == _sizes[0] && cols == _sizes[1] )
194 for( i = 0; i < d; i++ )
195 if( size[i] != _sizes[i] )
197 if( i == d && (d > 1 || size[1] == 1))
204 flags = (_type & CV_MAT_TYPE_MASK) | MAGIC_VAL;
205 setSize(*this, d, _sizes, 0, true);
210 if( !allocator || allocator == tegra::getAllocator() ) allocator = tegra::getAllocator(d, _sizes, _type);
214 size_t totalsize = alignSize(step.p[0]*size.p[0], (int)sizeof(*refcount));
215 data = datastart = (uchar*)fastMalloc(totalsize + (int)sizeof(*refcount));
216 refcount = (int*)(data + totalsize);
224 allocator->allocate(dims, size, _type, refcount, datastart, data, step.p);
225 CV_Assert( step[dims-1] == (size_t)CV_ELEM_SIZE(flags) );
229 size_t totalSize = alignSize(step.p[0]*size.p[0], (int)sizeof(*refcount));
230 data = datastart = (uchar*)fastMalloc(totalSize + (int)sizeof(*refcount));
231 refcount = (int*)(data + totalSize);
235 allocator->allocate(dims, size, _type, refcount, datastart, data, step.p);
236 CV_Assert( step[dims-1] == (size_t)CV_ELEM_SIZE(flags) );
244 void Mat::copySize(const Mat& m)
246 setSize(*this, m.dims, 0, 0);
247 for( int i = 0; i < dims; i++ )
254 void Mat::deallocate()
257 allocator->deallocate(refcount, datastart, data);
260 CV_DbgAssert(refcount != 0);
266 Mat::Mat(const Mat& m, const Range& _rowRange, const Range& _colRange) : size(&rows)
269 CV_Assert( m.dims >= 2 );
272 AutoBuffer<Range> rs(m.dims);
275 for( int i = 2; i < m.dims; i++ )
276 rs[i] = Range::all();
282 if( _rowRange != Range::all() && _rowRange != Range(0,rows) )
284 CV_Assert( 0 <= _rowRange.start && _rowRange.start <= _rowRange.end && _rowRange.end <= m.rows );
285 rows = _rowRange.size();
286 data += step*_rowRange.start;
287 flags |= SUBMATRIX_FLAG;
290 if( _colRange != Range::all() && _colRange != Range(0,cols) )
292 CV_Assert( 0 <= _colRange.start && _colRange.start <= _colRange.end && _colRange.end <= m.cols );
293 cols = _colRange.size();
294 data += _colRange.start*elemSize();
295 flags &= cols < m.cols ? ~CONTINUOUS_FLAG : -1;
296 flags |= SUBMATRIX_FLAG;
300 flags |= CONTINUOUS_FLAG;
302 if( rows <= 0 || cols <= 0 )
310 Mat::Mat(const Mat& m, const Rect& roi)
311 : flags(m.flags), dims(2), rows(roi.height), cols(roi.width),
312 data(m.data + roi.y*m.step[0]), refcount(m.refcount),
313 datastart(m.datastart), dataend(m.dataend), datalimit(m.datalimit),
314 allocator(m.allocator), size(&rows)
316 CV_Assert( m.dims <= 2 );
317 flags &= roi.width < m.cols ? ~CONTINUOUS_FLAG : -1;
318 flags |= roi.height == 1 ? CONTINUOUS_FLAG : 0;
320 size_t esz = CV_ELEM_SIZE(flags);
322 CV_Assert( 0 <= roi.x && 0 <= roi.width && roi.x + roi.width <= m.cols &&
323 0 <= roi.y && 0 <= roi.height && roi.y + roi.height <= m.rows );
325 CV_XADD(refcount, 1);
326 if( roi.width < m.cols || roi.height < m.rows )
327 flags |= SUBMATRIX_FLAG;
329 step[0] = m.step[0]; step[1] = esz;
331 if( rows <= 0 || cols <= 0 )
339 Mat::Mat(int _dims, const int* _sizes, int _type, void* _data, const size_t* _steps) : size(&rows)
342 flags |= CV_MAT_TYPE(_type);
343 data = datastart = (uchar*)_data;
344 setSize(*this, _dims, _sizes, _steps, true);
349 Mat::Mat(const Mat& m, const Range* ranges) : size(&rows)
355 for( i = 0; i < d; i++ )
358 CV_Assert( r == Range::all() || (0 <= r.start && r.start < r.end && r.end <= m.size[i]) );
361 for( i = 0; i < d; i++ )
364 if( r != Range::all() && r != Range(0, size.p[i]))
366 size.p[i] = r.end - r.start;
367 data += r.start*step.p[i];
368 flags |= SUBMATRIX_FLAG;
371 updateContinuityFlag(*this);
375 Mat::Mat(const CvMatND* m, bool copyData) : size(&rows)
380 data = datastart = m->data.ptr;
381 flags |= CV_MAT_TYPE(m->type);
382 int _sizes[CV_MAX_DIM];
383 size_t _steps[CV_MAX_DIM];
386 for( i = 0; i < d; i++ )
388 _sizes[i] = m->dim[i].size;
389 _steps[i] = m->dim[i].step;
392 setSize(*this, d, _sizes, _steps);
403 Mat Mat::diag(int d) const
405 CV_Assert( dims <= 2 );
407 size_t esz = elemSize();
412 len = std::min(cols - d, rows);
417 len = std::min(rows + d, cols);
420 CV_DbgAssert( len > 0 );
422 m.size[0] = m.rows = len;
423 m.size[1] = m.cols = 1;
424 m.step[0] += (len > 1 ? esz : 0);
427 m.flags &= ~CONTINUOUS_FLAG;
429 m.flags |= CONTINUOUS_FLAG;
431 if( size() != Size(1,1) )
432 m.flags |= SUBMATRIX_FLAG;
438 Mat::Mat(const CvMat* m, bool copyData) : size(&rows)
447 flags = MAGIC_VAL + (m->type & (CV_MAT_TYPE_MASK|CV_MAT_CONT_FLAG));
451 data = datastart = m->data.ptr;
452 size_t esz = CV_ELEM_SIZE(m->type), minstep = cols*esz, _step = m->step;
455 datalimit = datastart + _step*rows;
456 dataend = datalimit - _step + minstep;
457 step[0] = _step; step[1] = esz;
461 data = datastart = dataend = 0;
462 Mat(m->rows, m->cols, m->type, m->data.ptr, m->step).copyTo(*this);
467 Mat::Mat(const IplImage* img, bool copyData) : size(&rows)
475 CV_DbgAssert(CV_IS_IMAGE(img) && img->imageData != 0);
477 int imgdepth = IPL2CV_DEPTH(img->depth);
479 step[0] = img->widthStep;
483 CV_Assert(img->dataOrder == IPL_DATA_ORDER_PIXEL);
484 flags = MAGIC_VAL + CV_MAKETYPE(imgdepth, img->nChannels);
485 rows = img->height; cols = img->width;
486 datastart = data = (uchar*)img->imageData;
487 esz = CV_ELEM_SIZE(flags);
491 CV_Assert(img->dataOrder == IPL_DATA_ORDER_PIXEL || img->roi->coi != 0);
492 bool selectedPlane = img->roi->coi && img->dataOrder == IPL_DATA_ORDER_PLANE;
493 flags = MAGIC_VAL + CV_MAKETYPE(imgdepth, selectedPlane ? 1 : img->nChannels);
494 rows = img->roi->height; cols = img->roi->width;
495 esz = CV_ELEM_SIZE(flags);
496 data = datastart = (uchar*)img->imageData +
497 (selectedPlane ? (img->roi->coi - 1)*step*img->height : 0) +
498 img->roi->yOffset*step[0] + img->roi->xOffset*esz;
500 datalimit = datastart + step.p[0]*rows;
501 dataend = datastart + step.p[0]*(rows-1) + esz*cols;
502 flags |= (cols*esz == step.p[0] || rows == 1 ? CONTINUOUS_FLAG : 0);
509 if( !img->roi || !img->roi->coi ||
510 img->dataOrder == IPL_DATA_ORDER_PLANE)
514 int ch[] = {img->roi->coi - 1, 0};
515 create(m.rows, m.cols, m.type());
516 mixChannels(&m, 1, this, 1, ch, 1);
522 Mat::operator IplImage() const
524 CV_Assert( dims <= 2 );
526 cvInitImageHeader(&img, size(), cvIplDepth(flags), channels());
527 cvSetData(&img, data, (int)step[0]);
532 void Mat::pop_back(size_t nelems)
534 CV_Assert( nelems <= (size_t)size.p[0] );
537 *this = rowRange(0, size.p[0] - (int)nelems);
540 size.p[0] -= (int)nelems;
541 dataend -= nelems*step.p[0];
542 /*if( size.p[0] <= 1 )
545 flags |= CONTINUOUS_FLAG;
547 updateContinuityFlag(*this);
553 void Mat::push_back_(const void* elem)
556 if( isSubmatrix() || dataend + step.p[0] > datalimit )
557 reserve( std::max(r + 1, (r*3+1)/2) );
559 size_t esz = elemSize();
560 memcpy(data + r*step.p[0], elem, esz);
562 dataend += step.p[0];
563 if( esz < step.p[0] )
564 flags &= ~CONTINUOUS_FLAG;
567 void Mat::reserve(size_t nelems)
569 const size_t MIN_SIZE = 64;
571 CV_Assert( (int)nelems >= 0 );
572 if( !isSubmatrix() && data + step.p[0]*nelems <= datalimit )
577 if( (size_t)r >= nelems )
580 size.p[0] = std::max((int)nelems, 1);
581 size_t newsize = total()*elemSize();
583 if( newsize < MIN_SIZE )
584 size.p[0] = (int)((MIN_SIZE + newsize - 1)*nelems/newsize);
586 Mat m(dims, size.p, type());
590 Mat mpart = m.rowRange(0, r);
596 dataend = data + step.p[0]*r;
600 void Mat::resize(size_t nelems)
602 int saveRows = size.p[0];
603 if( saveRows == (int)nelems )
605 CV_Assert( (int)nelems >= 0 );
607 if( isSubmatrix() || data + step.p[0]*nelems > datalimit )
610 size.p[0] = (int)nelems;
611 dataend += (size.p[0] - saveRows)*step.p[0];
613 //updateContinuityFlag(*this);
617 void Mat::resize(size_t nelems, const Scalar& s)
619 int saveRows = size.p[0];
622 if( size.p[0] > saveRows )
624 Mat part = rowRange(saveRows, size.p[0]);
629 void Mat::push_back(const Mat& elems)
631 int r = size.p[0], delta = elems.size.p[0];
642 *this = elems.clone();
646 size.p[0] = elems.size.p[0];
647 bool eq = size == elems.size;
650 CV_Error(CV_StsUnmatchedSizes, "");
651 if( type() != elems.type() )
652 CV_Error(CV_StsUnmatchedFormats, "");
654 if( isSubmatrix() || dataend + step.p[0]*delta > datalimit )
655 reserve( std::max(r + delta, (r*3+1)/2) );
658 dataend += step.p[0]*delta;
660 //updateContinuityFlag(*this);
662 if( isContinuous() && elems.isContinuous() )
663 memcpy(data + r*step.p[0], elems.data, elems.total()*elems.elemSize());
666 Mat part = rowRange(r, r + delta);
672 Mat cvarrToMat(const CvArr* arr, bool copyData,
673 bool /*allowND*/, int coiMode)
678 return Mat((const CvMat*)arr, copyData );
679 if( CV_IS_MATND(arr) )
680 return Mat((const CvMatND*)arr, copyData );
681 if( CV_IS_IMAGE(arr) )
683 const IplImage* iplimg = (const IplImage*)arr;
684 if( coiMode == 0 && iplimg->roi && iplimg->roi->coi > 0 )
685 CV_Error(CV_BadCOI, "COI is not supported by the function");
686 return Mat(iplimg, copyData);
690 CvSeq* seq = (CvSeq*)arr;
691 CV_Assert(seq->total > 0 && CV_ELEM_SIZE(seq->flags) == seq->elem_size);
692 if(!copyData && seq->first->next == seq->first)
693 return Mat(seq->total, 1, CV_MAT_TYPE(seq->flags), seq->first->data);
694 Mat buf(seq->total, 1, CV_MAT_TYPE(seq->flags));
695 cvCvtSeqToArray(seq, buf.data, CV_WHOLE_SEQ);
698 CV_Error(CV_StsBadArg, "Unknown array type");
702 void Mat::locateROI( Size& wholeSize, Point& ofs ) const
704 CV_Assert( dims <= 2 && step[0] > 0 );
705 size_t esz = elemSize(), minstep;
706 ptrdiff_t delta1 = data - datastart, delta2 = dataend - datastart;
712 ofs.y = (int)(delta1/step[0]);
713 ofs.x = (int)((delta1 - step[0]*ofs.y)/esz);
714 CV_DbgAssert( data == datastart + ofs.y*step[0] + ofs.x*esz );
716 minstep = (ofs.x + cols)*esz;
717 wholeSize.height = (int)((delta2 - minstep)/step[0] + 1);
718 wholeSize.height = std::max(wholeSize.height, ofs.y + rows);
719 wholeSize.width = (int)((delta2 - step*(wholeSize.height-1))/esz);
720 wholeSize.width = std::max(wholeSize.width, ofs.x + cols);
723 Mat& Mat::adjustROI( int dtop, int dbottom, int dleft, int dright )
725 CV_Assert( dims <= 2 && step[0] > 0 );
726 Size wholeSize; Point ofs;
727 size_t esz = elemSize();
728 locateROI( wholeSize, ofs );
729 int row1 = std::max(ofs.y - dtop, 0), row2 = std::min(ofs.y + rows + dbottom, wholeSize.height);
730 int col1 = std::max(ofs.x - dleft, 0), col2 = std::min(ofs.x + cols + dright, wholeSize.width);
731 data += (row1 - ofs.y)*step + (col1 - ofs.x)*esz;
732 rows = row2 - row1; cols = col2 - col1;
733 size.p[0] = rows; size.p[1] = cols;
734 if( esz*cols == step[0] || rows == 1 )
735 flags |= CONTINUOUS_FLAG;
737 flags &= ~CONTINUOUS_FLAG;
743 void cv::extractImageCOI(const CvArr* arr, OutputArray _ch, int coi)
745 Mat mat = cvarrToMat(arr, false, true, 1);
746 _ch.create(mat.dims, mat.size, mat.depth());
747 Mat ch = _ch.getMat();
750 CV_Assert( CV_IS_IMAGE(arr) );
751 coi = cvGetImageCOI((const IplImage*)arr)-1;
753 CV_Assert(0 <= coi && coi < mat.channels());
754 int _pairs[] = { coi, 0 };
755 mixChannels( &mat, 1, &ch, 1, _pairs, 1 );
758 void cv::insertImageCOI(InputArray _ch, CvArr* arr, int coi)
760 Mat ch = _ch.getMat(), mat = cvarrToMat(arr, false, true, 1);
763 CV_Assert( CV_IS_IMAGE(arr) );
764 coi = cvGetImageCOI((const IplImage*)arr)-1;
766 CV_Assert(ch.size == mat.size && ch.depth() == mat.depth() && 0 <= coi && coi < mat.channels());
767 int _pairs[] = { 0, coi };
768 mixChannels( &ch, 1, &mat, 1, _pairs, 1 );
774 Mat Mat::reshape(int new_cn, int new_rows) const
779 if( dims > 2 && new_rows == 0 && new_cn != 0 && size[dims-1]*cn % new_cn == 0 )
781 hdr.flags = (hdr.flags & ~CV_MAT_CN_MASK) | ((new_cn-1) << CV_CN_SHIFT);
782 hdr.step[dims-1] = CV_ELEM_SIZE(hdr.flags);
783 hdr.size[dims-1] = hdr.size[dims-1]*cn / new_cn;
787 CV_Assert( dims <= 2 );
792 int total_width = cols * cn;
794 if( (new_cn > total_width || total_width % new_cn != 0) && new_rows == 0 )
795 new_rows = rows * total_width / new_cn;
797 if( new_rows != 0 && new_rows != rows )
799 int total_size = total_width * rows;
800 if( !isContinuous() )
801 CV_Error( CV_BadStep,
802 "The matrix is not continuous, thus its number of rows can not be changed" );
804 if( (unsigned)new_rows > (unsigned)total_size )
805 CV_Error( CV_StsOutOfRange, "Bad new number of rows" );
807 total_width = total_size / new_rows;
809 if( total_width * new_rows != total_size )
810 CV_Error( CV_StsBadArg, "The total number of matrix elements "
811 "is not divisible by the new number of rows" );
814 hdr.step[0] = total_width * elemSize1();
817 int new_width = total_width / new_cn;
819 if( new_width * new_cn != total_width )
820 CV_Error( CV_BadNumChannels,
821 "The total width is not divisible by the new number of channels" );
823 hdr.cols = new_width;
824 hdr.flags = (hdr.flags & ~CV_MAT_CN_MASK) | ((new_cn-1) << CV_CN_SHIFT);
825 hdr.step[1] = CV_ELEM_SIZE(hdr.flags);
830 int Mat::checkVector(int _elemChannels, int _depth, bool _requireContinuous) const
832 return (depth() == _depth || _depth <= 0) &&
833 (isContinuous() || !_requireContinuous) &&
834 ((dims == 2 && (((rows == 1 || cols == 1) && channels() == _elemChannels) ||
835 (cols == _elemChannels && channels() == 1))) ||
836 (dims == 3 && channels() == 1 && size.p[2] == _elemChannels && (size.p[0] == 1 || size.p[1] == 1) &&
837 (isContinuous() || step.p[1] == step.p[2]*size.p[2])))
838 ? (int)(total()*channels()/_elemChannels) : -1;
842 void scalarToRawData(const Scalar& s, void* _buf, int type, int unroll_to)
844 int i, depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
850 uchar* buf = (uchar*)_buf;
851 for(i = 0; i < cn; i++)
852 buf[i] = saturate_cast<uchar>(s.val[i]);
853 for(; i < unroll_to; i++)
859 schar* buf = (schar*)_buf;
860 for(i = 0; i < cn; i++)
861 buf[i] = saturate_cast<schar>(s.val[i]);
862 for(; i < unroll_to; i++)
868 ushort* buf = (ushort*)_buf;
869 for(i = 0; i < cn; i++)
870 buf[i] = saturate_cast<ushort>(s.val[i]);
871 for(; i < unroll_to; i++)
877 short* buf = (short*)_buf;
878 for(i = 0; i < cn; i++)
879 buf[i] = saturate_cast<short>(s.val[i]);
880 for(; i < unroll_to; i++)
886 int* buf = (int*)_buf;
887 for(i = 0; i < cn; i++)
888 buf[i] = saturate_cast<int>(s.val[i]);
889 for(; i < unroll_to; i++)
895 float* buf = (float*)_buf;
896 for(i = 0; i < cn; i++)
897 buf[i] = saturate_cast<float>(s.val[i]);
898 for(; i < unroll_to; i++)
904 double* buf = (double*)_buf;
905 for(i = 0; i < cn; i++)
906 buf[i] = saturate_cast<double>(s.val[i]);
907 for(; i < unroll_to; i++)
912 CV_Error(CV_StsUnsupportedFormat,"");
917 /*************************************************************************************************\
919 \*************************************************************************************************/
921 _InputArray::_InputArray() : flags(0), obj(0) {}
922 #ifdef OPENCV_CAN_BREAK_BINARY_COMPATIBILITY
923 _InputArray::~_InputArray() {}
925 _InputArray::_InputArray(const Mat& m) : flags(MAT), obj((void*)&m) {}
926 _InputArray::_InputArray(const vector<Mat>& vec) : flags(STD_VECTOR_MAT), obj((void*)&vec) {}
927 _InputArray::_InputArray(const double& val) : flags(FIXED_TYPE + FIXED_SIZE + MATX + CV_64F), obj((void*)&val), sz(Size(1,1)) {}
928 _InputArray::_InputArray(const MatExpr& expr) : flags(FIXED_TYPE + FIXED_SIZE + EXPR), obj((void*)&expr) {}
930 _InputArray::_InputArray(const GlBuffer&) : flags(0), obj(0) {}
931 _InputArray::_InputArray(const GlTexture&) : flags(0), obj(0) {}
933 _InputArray::_InputArray(const gpu::GpuMat& d_mat) : flags(GPU_MAT), obj((void*)&d_mat) {}
934 _InputArray::_InputArray(const ogl::Buffer& buf) : flags(OPENGL_BUFFER), obj((void*)&buf) {}
935 _InputArray::_InputArray(const ogl::Texture2D& tex) : flags(OPENGL_TEXTURE), obj((void*)&tex) {}
937 Mat _InputArray::getMat(int i) const
943 const Mat* m = (const Mat*)obj;
952 return (Mat)*((const MatExpr*)obj);
958 return Mat(sz, flags, obj);
961 if( k == STD_VECTOR )
964 int t = CV_MAT_TYPE(flags);
965 const vector<uchar>& v = *(const vector<uchar>*)obj;
967 return !v.empty() ? Mat(size(), t, (void*)&v[0]) : Mat();
973 if( k == STD_VECTOR_VECTOR )
976 const vector<vector<uchar> >& vv = *(const vector<vector<uchar> >*)obj;
977 CV_Assert( 0 <= i && i < (int)vv.size() );
978 const vector<uchar>& v = vv[i];
980 return !v.empty() ? Mat(size(i), t, (void*)&v[0]) : Mat();
985 CV_Error(CV_StsNotImplemented, "This method is not implemented for oclMat yet");
988 CV_Assert( k == STD_VECTOR_MAT );
989 //if( k == STD_VECTOR_MAT )
991 const vector<Mat>& v = *(const vector<Mat>*)obj;
992 CV_Assert( 0 <= i && i < (int)v.size() );
999 void _InputArray::getMatVector(vector<Mat>& mv) const
1005 const Mat& m = *(const Mat*)obj;
1006 int i, n = (int)m.size[0];
1009 for( i = 0; i < n; i++ )
1010 mv[i] = m.dims == 2 ? Mat(1, m.cols, m.type(), (void*)m.ptr(i)) :
1011 Mat(m.dims-1, &m.size[1], m.type(), (void*)m.ptr(i), &m.step[1]);
1017 Mat m = *(const MatExpr*)obj;
1018 int i, n = m.size[0];
1021 for( i = 0; i < n; i++ )
1028 size_t i, n = sz.height, esz = CV_ELEM_SIZE(flags);
1031 for( i = 0; i < n; i++ )
1032 mv[i] = Mat(1, sz.width, CV_MAT_TYPE(flags), (uchar*)obj + esz*sz.width*i);
1036 if( k == STD_VECTOR )
1038 const vector<uchar>& v = *(const vector<uchar>*)obj;
1040 size_t i, n = v.size(), esz = CV_ELEM_SIZE(flags);
1041 int t = CV_MAT_DEPTH(flags), cn = CV_MAT_CN(flags);
1044 for( i = 0; i < n; i++ )
1045 mv[i] = Mat(1, cn, t, (void*)(&v[0] + esz*i));
1055 if( k == STD_VECTOR_VECTOR )
1057 const vector<vector<uchar> >& vv = *(const vector<vector<uchar> >*)obj;
1058 int i, n = (int)vv.size();
1059 int t = CV_MAT_TYPE(flags);
1062 for( i = 0; i < n; i++ )
1064 const vector<uchar>& v = vv[i];
1065 mv[i] = Mat(size(i), t, (void*)&v[0]);
1072 CV_Error(CV_StsNotImplemented, "This method is not implemented for oclMat yet");
1075 CV_Assert( k == STD_VECTOR_MAT );
1076 //if( k == STD_VECTOR_MAT )
1078 const vector<Mat>& v = *(const vector<Mat>*)obj;
1079 mv.resize(v.size());
1080 std::copy(v.begin(), v.end(), mv.begin());
1085 GlBuffer _InputArray::getGlBuffer() const
1087 CV_Error(CV_StsNotImplemented, "This function in deprecated, do not use it");
1088 return GlBuffer(GlBuffer::ARRAY_BUFFER);
1091 GlTexture _InputArray::getGlTexture() const
1093 CV_Error(CV_StsNotImplemented, "This function in deprecated, do not use it");
1097 gpu::GpuMat _InputArray::getGpuMat() const
1101 CV_Assert(k == GPU_MAT);
1103 const gpu::GpuMat* d_mat = (const gpu::GpuMat*)obj;
1107 ogl::Buffer _InputArray::getOGlBuffer() const
1111 CV_Assert(k == OPENGL_BUFFER);
1113 const ogl::Buffer* gl_buf = (const ogl::Buffer*)obj;
1117 ogl::Texture2D _InputArray::getOGlTexture2D() const
1121 CV_Assert(k == OPENGL_TEXTURE);
1123 const ogl::Texture2D* gl_tex = (const ogl::Texture2D*)obj;
1127 int _InputArray::kind() const
1129 return flags & KIND_MASK;
1132 Size _InputArray::size(int i) const
1139 return ((const Mat*)obj)->size();
1145 return ((const MatExpr*)obj)->size();
1154 if( k == STD_VECTOR )
1157 const vector<uchar>& v = *(const vector<uchar>*)obj;
1158 const vector<int>& iv = *(const vector<int>*)obj;
1159 size_t szb = v.size(), szi = iv.size();
1160 return szb == szi ? Size((int)szb, 1) : Size((int)(szb/CV_ELEM_SIZE(flags)), 1);
1166 if( k == STD_VECTOR_VECTOR )
1168 const vector<vector<uchar> >& vv = *(const vector<vector<uchar> >*)obj;
1170 return vv.empty() ? Size() : Size((int)vv.size(), 1);
1171 CV_Assert( i < (int)vv.size() );
1172 const vector<vector<int> >& ivv = *(const vector<vector<int> >*)obj;
1174 size_t szb = vv[i].size(), szi = ivv[i].size();
1175 return szb == szi ? Size((int)szb, 1) : Size((int)(szb/CV_ELEM_SIZE(flags)), 1);
1178 if( k == STD_VECTOR_MAT )
1180 const vector<Mat>& vv = *(const vector<Mat>*)obj;
1182 return vv.empty() ? Size() : Size((int)vv.size(), 1);
1183 CV_Assert( i < (int)vv.size() );
1185 return vv[i].size();
1188 if( k == OPENGL_BUFFER )
1191 const ogl::Buffer* buf = (const ogl::Buffer*)obj;
1195 if( k == OPENGL_TEXTURE )
1198 const ogl::Texture2D* tex = (const ogl::Texture2D*)obj;
1204 CV_Error(CV_StsNotImplemented, "This method is not implemented for oclMat yet");
1207 CV_Assert( k == GPU_MAT );
1208 //if( k == GPU_MAT )
1211 const gpu::GpuMat* d_mat = (const gpu::GpuMat*)obj;
1212 return d_mat->size();
1216 size_t _InputArray::total(int i) const
1223 return ((const Mat*)obj)->total();
1226 if( k == STD_VECTOR_MAT )
1228 const vector<Mat>& vv = *(const vector<Mat>*)obj;
1232 CV_Assert( i < (int)vv.size() );
1233 return vv[i].total();
1236 return size(i).area();
1239 int _InputArray::type(int i) const
1244 return ((const Mat*)obj)->type();
1247 return ((const MatExpr*)obj)->type();
1249 if( k == MATX || k == STD_VECTOR || k == STD_VECTOR_VECTOR )
1250 return CV_MAT_TYPE(flags);
1255 if( k == STD_VECTOR_MAT )
1257 const vector<Mat>& vv = *(const vector<Mat>*)obj;
1258 CV_Assert( i < (int)vv.size() );
1260 return vv[i >= 0 ? i : 0].type();
1263 if( k == OPENGL_BUFFER )
1264 return ((const ogl::Buffer*)obj)->type();
1266 CV_Assert( k == GPU_MAT );
1267 //if( k == GPU_MAT )
1268 return ((const gpu::GpuMat*)obj)->type();
1271 int _InputArray::depth(int i) const
1273 return CV_MAT_DEPTH(type(i));
1276 int _InputArray::channels(int i) const
1278 return CV_MAT_CN(type(i));
1281 bool _InputArray::empty() const
1286 return ((const Mat*)obj)->empty();
1294 if( k == STD_VECTOR )
1296 const vector<uchar>& v = *(const vector<uchar>*)obj;
1303 if( k == STD_VECTOR_VECTOR )
1305 const vector<vector<uchar> >& vv = *(const vector<vector<uchar> >*)obj;
1309 if( k == STD_VECTOR_MAT )
1311 const vector<Mat>& vv = *(const vector<Mat>*)obj;
1315 if( k == OPENGL_BUFFER )
1316 return ((const ogl::Buffer*)obj)->empty();
1318 if( k == OPENGL_TEXTURE )
1319 return ((const ogl::Texture2D*)obj)->empty();
1323 CV_Error(CV_StsNotImplemented, "This method is not implemented for oclMat yet");
1326 CV_Assert( k == GPU_MAT );
1327 //if( k == GPU_MAT )
1328 return ((const gpu::GpuMat*)obj)->empty();
1332 _OutputArray::_OutputArray() {}
1333 #ifdef OPENCV_CAN_BREAK_BINARY_COMPATIBILITY
1334 _OutputArray::~_OutputArray() {}
1336 _OutputArray::_OutputArray(Mat& m) : _InputArray(m) {}
1337 _OutputArray::_OutputArray(vector<Mat>& vec) : _InputArray(vec) {}
1338 _OutputArray::_OutputArray(gpu::GpuMat& d_mat) : _InputArray(d_mat) {}
1339 _OutputArray::_OutputArray(ogl::Buffer& buf) : _InputArray(buf) {}
1340 _OutputArray::_OutputArray(ogl::Texture2D& tex) : _InputArray(tex) {}
1342 _OutputArray::_OutputArray(const Mat& m) : _InputArray(m) {flags |= FIXED_SIZE|FIXED_TYPE;}
1343 _OutputArray::_OutputArray(const vector<Mat>& vec) : _InputArray(vec) {flags |= FIXED_SIZE;}
1344 _OutputArray::_OutputArray(const gpu::GpuMat& d_mat) : _InputArray(d_mat) {flags |= FIXED_SIZE|FIXED_TYPE;}
1345 _OutputArray::_OutputArray(const ogl::Buffer& buf) : _InputArray(buf) {flags |= FIXED_SIZE|FIXED_TYPE;}
1346 _OutputArray::_OutputArray(const ogl::Texture2D& tex) : _InputArray(tex) {flags |= FIXED_SIZE|FIXED_TYPE;}
1349 bool _OutputArray::fixedSize() const
1351 return (flags & FIXED_SIZE) == FIXED_SIZE;
1354 bool _OutputArray::fixedType() const
1356 return (flags & FIXED_TYPE) == FIXED_TYPE;
1359 void _OutputArray::create(Size _sz, int mtype, int i, bool allowTransposed, int fixedDepthMask) const
1362 if( k == MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
1364 CV_Assert(!fixedSize() || ((Mat*)obj)->size.operator()() == _sz);
1365 CV_Assert(!fixedType() || ((Mat*)obj)->type() == mtype);
1366 ((Mat*)obj)->create(_sz, mtype);
1369 if( k == GPU_MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
1371 CV_Assert(!fixedSize() || ((gpu::GpuMat*)obj)->size() == _sz);
1372 CV_Assert(!fixedType() || ((gpu::GpuMat*)obj)->type() == mtype);
1373 ((gpu::GpuMat*)obj)->create(_sz, mtype);
1376 if( k == OPENGL_BUFFER && i < 0 && !allowTransposed && fixedDepthMask == 0 )
1378 CV_Assert(!fixedSize() || ((ogl::Buffer*)obj)->size() == _sz);
1379 CV_Assert(!fixedType() || ((ogl::Buffer*)obj)->type() == mtype);
1380 ((ogl::Buffer*)obj)->create(_sz, mtype);
1383 int sizes[] = {_sz.height, _sz.width};
1384 create(2, sizes, mtype, i, allowTransposed, fixedDepthMask);
1387 void _OutputArray::create(int rows, int cols, int mtype, int i, bool allowTransposed, int fixedDepthMask) const
1390 if( k == MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
1392 CV_Assert(!fixedSize() || ((Mat*)obj)->size.operator()() == Size(cols, rows));
1393 CV_Assert(!fixedType() || ((Mat*)obj)->type() == mtype);
1394 ((Mat*)obj)->create(rows, cols, mtype);
1397 if( k == GPU_MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
1399 CV_Assert(!fixedSize() || ((gpu::GpuMat*)obj)->size() == Size(cols, rows));
1400 CV_Assert(!fixedType() || ((gpu::GpuMat*)obj)->type() == mtype);
1401 ((gpu::GpuMat*)obj)->create(rows, cols, mtype);
1404 if( k == OPENGL_BUFFER && i < 0 && !allowTransposed && fixedDepthMask == 0 )
1406 CV_Assert(!fixedSize() || ((ogl::Buffer*)obj)->size() == Size(cols, rows));
1407 CV_Assert(!fixedType() || ((ogl::Buffer*)obj)->type() == mtype);
1408 ((ogl::Buffer*)obj)->create(rows, cols, mtype);
1411 int sizes[] = {rows, cols};
1412 create(2, sizes, mtype, i, allowTransposed, fixedDepthMask);
1415 void _OutputArray::create(int dims, const int* sizes, int mtype, int i, bool allowTransposed, int fixedDepthMask) const
1418 mtype = CV_MAT_TYPE(mtype);
1423 Mat& m = *(Mat*)obj;
1424 if( allowTransposed )
1426 if( !m.isContinuous() )
1428 CV_Assert(!fixedType() && !fixedSize());
1432 if( dims == 2 && m.dims == 2 && m.data &&
1433 m.type() == mtype && m.rows == sizes[1] && m.cols == sizes[0] )
1439 if(CV_MAT_CN(mtype) == m.channels() && ((1 << CV_MAT_TYPE(flags)) & fixedDepthMask) != 0 )
1442 CV_Assert(CV_MAT_TYPE(mtype) == m.type());
1446 CV_Assert(m.dims == dims);
1447 for(int j = 0; j < dims; ++j)
1448 CV_Assert(m.size[j] == sizes[j]);
1450 m.create(dims, sizes, mtype);
1457 int type0 = CV_MAT_TYPE(flags);
1458 CV_Assert( mtype == type0 || (CV_MAT_CN(mtype) == 1 && ((1 << type0) & fixedDepthMask) != 0) );
1459 CV_Assert( dims == 2 && ((sizes[0] == sz.height && sizes[1] == sz.width) ||
1460 (allowTransposed && sizes[0] == sz.width && sizes[1] == sz.height)));
1464 if( k == STD_VECTOR || k == STD_VECTOR_VECTOR )
1466 CV_Assert( dims == 2 && (sizes[0] == 1 || sizes[1] == 1 || sizes[0]*sizes[1] == 0) );
1467 size_t len = sizes[0]*sizes[1] > 0 ? sizes[0] + sizes[1] - 1 : 0;
1468 vector<uchar>* v = (vector<uchar>*)obj;
1470 if( k == STD_VECTOR_VECTOR )
1472 vector<vector<uchar> >& vv = *(vector<vector<uchar> >*)obj;
1475 CV_Assert(!fixedSize() || len == vv.size());
1479 CV_Assert( i < (int)vv.size() );
1485 int type0 = CV_MAT_TYPE(flags);
1486 CV_Assert( mtype == type0 || (CV_MAT_CN(mtype) == CV_MAT_CN(type0) && ((1 << type0) & fixedDepthMask) != 0) );
1488 int esz = CV_ELEM_SIZE(type0);
1489 CV_Assert(!fixedSize() || len == ((vector<uchar>*)v)->size() / esz);
1493 ((vector<uchar>*)v)->resize(len);
1496 ((vector<Vec2b>*)v)->resize(len);
1499 ((vector<Vec3b>*)v)->resize(len);
1502 ((vector<int>*)v)->resize(len);
1505 ((vector<Vec3s>*)v)->resize(len);
1508 ((vector<Vec2i>*)v)->resize(len);
1511 ((vector<Vec3i>*)v)->resize(len);
1514 ((vector<Vec4i>*)v)->resize(len);
1517 ((vector<Vec6i>*)v)->resize(len);
1520 ((vector<Vec8i>*)v)->resize(len);
1523 ((vector<Vec<int, 9> >*)v)->resize(len);
1526 ((vector<Vec<int, 12> >*)v)->resize(len);
1529 ((vector<Vec<int, 16> >*)v)->resize(len);
1532 ((vector<Vec<int, 32> >*)v)->resize(len);
1535 ((vector<Vec<int, 64> >*)v)->resize(len);
1538 ((vector<Vec<int, 128> >*)v)->resize(len);
1541 CV_Error_(CV_StsBadArg, ("Vectors with element size %d are not supported. Please, modify OutputArray::create()\n", esz));
1548 CV_Error(CV_StsNotImplemented, "This method is not implemented for oclMat yet");
1553 CV_Error(CV_StsNullPtr, "create() called for the missing output array" );
1557 CV_Assert( k == STD_VECTOR_MAT );
1558 //if( k == STD_VECTOR_MAT )
1560 vector<Mat>& v = *(vector<Mat>*)obj;
1564 CV_Assert( dims == 2 && (sizes[0] == 1 || sizes[1] == 1 || sizes[0]*sizes[1] == 0) );
1565 size_t len = sizes[0]*sizes[1] > 0 ? sizes[0] + sizes[1] - 1 : 0, len0 = v.size();
1567 CV_Assert(!fixedSize() || len == len0);
1571 int _type = CV_MAT_TYPE(flags);
1572 for( size_t j = len0; j < len; j++ )
1574 if( v[j].type() == _type )
1576 CV_Assert( v[j].empty() );
1577 v[j].flags = (v[j].flags & ~CV_MAT_TYPE_MASK) | _type;
1583 CV_Assert( i < (int)v.size() );
1586 if( allowTransposed )
1588 if( !m.isContinuous() )
1590 CV_Assert(!fixedType() && !fixedSize());
1594 if( dims == 2 && m.dims == 2 && m.data &&
1595 m.type() == mtype && m.rows == sizes[1] && m.cols == sizes[0] )
1601 if(CV_MAT_CN(mtype) == m.channels() && ((1 << CV_MAT_TYPE(flags)) & fixedDepthMask) != 0 )
1604 CV_Assert(!fixedType() || (CV_MAT_CN(mtype) == m.channels() && ((1 << CV_MAT_TYPE(flags)) & fixedDepthMask) != 0));
1608 CV_Assert(m.dims == dims);
1609 for(int j = 0; j < dims; ++j)
1610 CV_Assert(m.size[j] == sizes[j]);
1613 m.create(dims, sizes, mtype);
1617 void _OutputArray::release() const
1619 CV_Assert(!fixedSize());
1625 ((Mat*)obj)->release();
1631 ((gpu::GpuMat*)obj)->release();
1635 if( k == OPENGL_BUFFER )
1637 ((ogl::Buffer*)obj)->release();
1641 if( k == OPENGL_TEXTURE )
1643 ((ogl::Texture2D*)obj)->release();
1650 if( k == STD_VECTOR )
1652 create(Size(), CV_MAT_TYPE(flags));
1656 if( k == STD_VECTOR_VECTOR )
1658 ((vector<vector<uchar> >*)obj)->clear();
1664 CV_Error(CV_StsNotImplemented, "This method is not implemented for oclMat yet");
1667 CV_Assert( k == STD_VECTOR_MAT );
1668 //if( k == STD_VECTOR_MAT )
1670 ((vector<Mat>*)obj)->clear();
1674 void _OutputArray::clear() const
1680 CV_Assert(!fixedSize());
1681 ((Mat*)obj)->resize(0);
1688 bool _OutputArray::needed() const
1690 return kind() != NONE;
1693 Mat& _OutputArray::getMatRef(int i) const
1698 CV_Assert( k == MAT );
1703 CV_Assert( k == STD_VECTOR_MAT );
1704 vector<Mat>& v = *(vector<Mat>*)obj;
1705 CV_Assert( i < (int)v.size() );
1710 gpu::GpuMat& _OutputArray::getGpuMatRef() const
1713 CV_Assert( k == GPU_MAT );
1714 return *(gpu::GpuMat*)obj;
1717 ogl::Buffer& _OutputArray::getOGlBufferRef() const
1720 CV_Assert( k == OPENGL_BUFFER );
1721 return *(ogl::Buffer*)obj;
1724 ogl::Texture2D& _OutputArray::getOGlTexture2DRef() const
1727 CV_Assert( k == OPENGL_TEXTURE );
1728 return *(ogl::Texture2D*)obj;
1731 static _OutputArray _none;
1732 OutputArray noArray() { return _none; }
1736 /*************************************************************************************************\
1738 \*************************************************************************************************/
1740 void cv::hconcat(const Mat* src, size_t nsrc, OutputArray _dst)
1742 if( nsrc == 0 || !src )
1748 int totalCols = 0, cols = 0;
1750 for( i = 0; i < nsrc; i++ )
1752 CV_Assert( !src[i].empty() && src[i].dims <= 2 &&
1753 src[i].rows == src[0].rows &&
1754 src[i].type() == src[0].type());
1755 totalCols += src[i].cols;
1757 _dst.create( src[0].rows, totalCols, src[0].type());
1758 Mat dst = _dst.getMat();
1759 for( i = 0; i < nsrc; i++ )
1761 Mat dpart = dst(Rect(cols, 0, src[i].cols, src[i].rows));
1762 src[i].copyTo(dpart);
1763 cols += src[i].cols;
1767 void cv::hconcat(InputArray src1, InputArray src2, OutputArray dst)
1769 Mat src[] = {src1.getMat(), src2.getMat()};
1770 hconcat(src, 2, dst);
1773 void cv::hconcat(InputArray _src, OutputArray dst)
1776 _src.getMatVector(src);
1777 hconcat(!src.empty() ? &src[0] : 0, src.size(), dst);
1780 void cv::vconcat(const Mat* src, size_t nsrc, OutputArray _dst)
1782 if( nsrc == 0 || !src )
1788 int totalRows = 0, rows = 0;
1790 for( i = 0; i < nsrc; i++ )
1792 CV_Assert( !src[i].empty() && src[i].dims <= 2 &&
1793 src[i].cols == src[0].cols &&
1794 src[i].type() == src[0].type());
1795 totalRows += src[i].rows;
1797 _dst.create( totalRows, src[0].cols, src[0].type());
1798 Mat dst = _dst.getMat();
1799 for( i = 0; i < nsrc; i++ )
1801 Mat dpart(dst, Rect(0, rows, src[i].cols, src[i].rows));
1802 src[i].copyTo(dpart);
1803 rows += src[i].rows;
1807 void cv::vconcat(InputArray src1, InputArray src2, OutputArray dst)
1809 Mat src[] = {src1.getMat(), src2.getMat()};
1810 vconcat(src, 2, dst);
1813 void cv::vconcat(InputArray _src, OutputArray dst)
1816 _src.getMatVector(src);
1817 vconcat(!src.empty() ? &src[0] : 0, src.size(), dst);
1820 //////////////////////////////////////// set identity ////////////////////////////////////////////
1821 void cv::setIdentity( InputOutputArray _m, const Scalar& s )
1823 Mat m = _m.getMat();
1824 CV_Assert( m.dims <= 2 );
1825 int i, j, rows = m.rows, cols = m.cols, type = m.type();
1827 if( type == CV_32FC1 )
1829 float* data = (float*)m.data;
1830 float val = (float)s[0];
1831 size_t step = m.step/sizeof(data[0]);
1833 for( i = 0; i < rows; i++, data += step )
1835 for( j = 0; j < cols; j++ )
1841 else if( type == CV_64FC1 )
1843 double* data = (double*)m.data;
1845 size_t step = m.step/sizeof(data[0]);
1847 for( i = 0; i < rows; i++, data += step )
1849 for( j = 0; j < cols; j++ )
1850 data[j] = j == i ? val : 0;
1860 //////////////////////////////////////////// trace ///////////////////////////////////////////
1862 cv::Scalar cv::trace( InputArray _m )
1864 Mat m = _m.getMat();
1865 CV_Assert( m.dims <= 2 );
1866 int i, type = m.type();
1867 int nm = std::min(m.rows, m.cols);
1869 if( type == CV_32FC1 )
1871 const float* ptr = (const float*)m.data;
1872 size_t step = m.step/sizeof(ptr[0]) + 1;
1874 for( i = 0; i < nm; i++ )
1879 if( type == CV_64FC1 )
1881 const double* ptr = (const double*)m.data;
1882 size_t step = m.step/sizeof(ptr[0]) + 1;
1884 for( i = 0; i < nm; i++ )
1889 return cv::sum(m.diag());
1892 ////////////////////////////////////// transpose /////////////////////////////////////////
1897 template<typename T> static void
1898 transpose_( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size sz )
1900 int i=0, j, m = sz.width, n = sz.height;
1902 #if CV_ENABLE_UNROLLED
1903 for(; i <= m - 4; i += 4 )
1905 T* d0 = (T*)(dst + dstep*i);
1906 T* d1 = (T*)(dst + dstep*(i+1));
1907 T* d2 = (T*)(dst + dstep*(i+2));
1908 T* d3 = (T*)(dst + dstep*(i+3));
1910 for( j = 0; j <= n - 4; j += 4 )
1912 const T* s0 = (const T*)(src + i*sizeof(T) + sstep*j);
1913 const T* s1 = (const T*)(src + i*sizeof(T) + sstep*(j+1));
1914 const T* s2 = (const T*)(src + i*sizeof(T) + sstep*(j+2));
1915 const T* s3 = (const T*)(src + i*sizeof(T) + sstep*(j+3));
1917 d0[j] = s0[0]; d0[j+1] = s1[0]; d0[j+2] = s2[0]; d0[j+3] = s3[0];
1918 d1[j] = s0[1]; d1[j+1] = s1[1]; d1[j+2] = s2[1]; d1[j+3] = s3[1];
1919 d2[j] = s0[2]; d2[j+1] = s1[2]; d2[j+2] = s2[2]; d2[j+3] = s3[2];
1920 d3[j] = s0[3]; d3[j+1] = s1[3]; d3[j+2] = s2[3]; d3[j+3] = s3[3];
1925 const T* s0 = (const T*)(src + i*sizeof(T) + j*sstep);
1926 d0[j] = s0[0]; d1[j] = s0[1]; d2[j] = s0[2]; d3[j] = s0[3];
1932 T* d0 = (T*)(dst + dstep*i);
1934 #if CV_ENABLE_UNROLLED
1935 for(; j <= n - 4; j += 4 )
1937 const T* s0 = (const T*)(src + i*sizeof(T) + sstep*j);
1938 const T* s1 = (const T*)(src + i*sizeof(T) + sstep*(j+1));
1939 const T* s2 = (const T*)(src + i*sizeof(T) + sstep*(j+2));
1940 const T* s3 = (const T*)(src + i*sizeof(T) + sstep*(j+3));
1942 d0[j] = s0[0]; d0[j+1] = s1[0]; d0[j+2] = s2[0]; d0[j+3] = s3[0];
1947 const T* s0 = (const T*)(src + i*sizeof(T) + j*sstep);
1953 template<typename T> static void
1954 transposeI_( uchar* data, size_t step, int n )
1957 for( i = 0; i < n; i++ )
1959 T* row = (T*)(data + step*i);
1960 uchar* data1 = data + i*sizeof(T);
1961 for( j = i+1; j < n; j++ )
1962 std::swap( row[j], *(T*)(data1 + step*j) );
1966 typedef void (*TransposeFunc)( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size sz );
1967 typedef void (*TransposeInplaceFunc)( uchar* data, size_t step, int n );
1969 #define DEF_TRANSPOSE_FUNC(suffix, type) \
1970 static void transpose_##suffix( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size sz ) \
1971 { transpose_<type>(src, sstep, dst, dstep, sz); } \
1973 static void transposeI_##suffix( uchar* data, size_t step, int n ) \
1974 { transposeI_<type>(data, step, n); }
1976 DEF_TRANSPOSE_FUNC(8u, uchar)
1977 DEF_TRANSPOSE_FUNC(16u, ushort)
1978 DEF_TRANSPOSE_FUNC(8uC3, Vec3b)
1979 DEF_TRANSPOSE_FUNC(32s, int)
1980 DEF_TRANSPOSE_FUNC(16uC3, Vec3s)
1981 DEF_TRANSPOSE_FUNC(32sC2, Vec2i)
1982 DEF_TRANSPOSE_FUNC(32sC3, Vec3i)
1983 DEF_TRANSPOSE_FUNC(32sC4, Vec4i)
1984 DEF_TRANSPOSE_FUNC(32sC6, Vec6i)
1985 DEF_TRANSPOSE_FUNC(32sC8, Vec8i)
1987 static TransposeFunc transposeTab[] =
1989 0, transpose_8u, transpose_16u, transpose_8uC3, transpose_32s, 0, transpose_16uC3, 0,
1990 transpose_32sC2, 0, 0, 0, transpose_32sC3, 0, 0, 0, transpose_32sC4,
1991 0, 0, 0, 0, 0, 0, 0, transpose_32sC6, 0, 0, 0, 0, 0, 0, 0, transpose_32sC8
1994 static TransposeInplaceFunc transposeInplaceTab[] =
1996 0, transposeI_8u, transposeI_16u, transposeI_8uC3, transposeI_32s, 0, transposeI_16uC3, 0,
1997 transposeI_32sC2, 0, 0, 0, transposeI_32sC3, 0, 0, 0, transposeI_32sC4,
1998 0, 0, 0, 0, 0, 0, 0, transposeI_32sC6, 0, 0, 0, 0, 0, 0, 0, transposeI_32sC8
2003 void cv::transpose( InputArray _src, OutputArray _dst )
2005 Mat src = _src.getMat();
2006 size_t esz = src.elemSize();
2007 CV_Assert( src.dims <= 2 && esz <= (size_t)32 );
2009 _dst.create(src.cols, src.rows, src.type());
2010 Mat dst = _dst.getMat();
2012 // handle the case of single-column/single-row matrices, stored in STL vectors.
2013 if( src.rows != dst.cols || src.cols != dst.rows )
2015 CV_Assert( src.size() == dst.size() && (src.cols == 1 || src.rows == 1) );
2020 if( dst.data == src.data )
2022 TransposeInplaceFunc func = transposeInplaceTab[esz];
2023 CV_Assert( func != 0 );
2024 func( dst.data, dst.step, dst.rows );
2028 TransposeFunc func = transposeTab[esz];
2029 CV_Assert( func != 0 );
2030 func( src.data, src.step, dst.data, dst.step, src.size() );
2035 void cv::completeSymm( InputOutputArray _m, bool LtoR )
2037 Mat m = _m.getMat();
2038 CV_Assert( m.dims <= 2 );
2040 int i, j, nrows = m.rows, type = m.type();
2041 int j0 = 0, j1 = nrows;
2042 CV_Assert( m.rows == m.cols );
2044 if( type == CV_32FC1 || type == CV_32SC1 )
2046 int* data = (int*)m.data;
2047 size_t step = m.step/sizeof(data[0]);
2048 for( i = 0; i < nrows; i++ )
2050 if( !LtoR ) j1 = i; else j0 = i+1;
2051 for( j = j0; j < j1; j++ )
2052 data[i*step + j] = data[j*step + i];
2055 else if( type == CV_64FC1 )
2057 double* data = (double*)m.data;
2058 size_t step = m.step/sizeof(data[0]);
2059 for( i = 0; i < nrows; i++ )
2061 if( !LtoR ) j1 = i; else j0 = i+1;
2062 for( j = j0; j < j1; j++ )
2063 data[i*step + j] = data[j*step + i];
2067 CV_Error( CV_StsUnsupportedFormat, "" );
2071 cv::Mat cv::Mat::cross(InputArray _m) const
2073 Mat m = _m.getMat();
2074 int tp = type(), d = CV_MAT_DEPTH(tp);
2075 CV_Assert( dims <= 2 && m.dims <= 2 && size() == m.size() && tp == m.type() &&
2076 ((rows == 3 && cols == 1) || (cols*channels() == 3 && rows == 1)));
2077 Mat result(rows, cols, tp);
2081 const float *a = (const float*)data, *b = (const float*)m.data;
2082 float* c = (float*)result.data;
2083 size_t lda = rows > 1 ? step/sizeof(a[0]) : 1;
2084 size_t ldb = rows > 1 ? m.step/sizeof(b[0]) : 1;
2086 c[0] = a[lda] * b[ldb*2] - a[lda*2] * b[ldb];
2087 c[1] = a[lda*2] * b[0] - a[0] * b[ldb*2];
2088 c[2] = a[0] * b[ldb] - a[lda] * b[0];
2090 else if( d == CV_64F )
2092 const double *a = (const double*)data, *b = (const double*)m.data;
2093 double* c = (double*)result.data;
2094 size_t lda = rows > 1 ? step/sizeof(a[0]) : 1;
2095 size_t ldb = rows > 1 ? m.step/sizeof(b[0]) : 1;
2097 c[0] = a[lda] * b[ldb*2] - a[lda*2] * b[ldb];
2098 c[1] = a[lda*2] * b[0] - a[0] * b[ldb*2];
2099 c[2] = a[0] * b[ldb] - a[lda] * b[0];
2106 ////////////////////////////////////////// reduce ////////////////////////////////////////////
2111 template<typename T, typename ST, class Op> static void
2112 reduceR_( const Mat& srcmat, Mat& dstmat )
2114 typedef typename Op::rtype WT;
2115 Size size = srcmat.size();
2116 size.width *= srcmat.channels();
2117 AutoBuffer<WT> buffer(size.width);
2119 ST* dst = (ST*)dstmat.data;
2120 const T* src = (const T*)srcmat.data;
2121 size_t srcstep = srcmat.step/sizeof(src[0]);
2125 for( i = 0; i < size.width; i++ )
2128 for( ; --size.height; )
2132 #if CV_ENABLE_UNROLLED
2133 for(; i <= size.width - 4; i += 4 )
2136 s0 = op(buf[i], (WT)src[i]);
2137 s1 = op(buf[i+1], (WT)src[i+1]);
2138 buf[i] = s0; buf[i+1] = s1;
2140 s0 = op(buf[i+2], (WT)src[i+2]);
2141 s1 = op(buf[i+3], (WT)src[i+3]);
2142 buf[i+2] = s0; buf[i+3] = s1;
2145 for( ; i < size.width; i++ )
2146 buf[i] = op(buf[i], (WT)src[i]);
2149 for( i = 0; i < size.width; i++ )
2150 dst[i] = (ST)buf[i];
2154 template<typename T, typename ST, class Op> static void
2155 reduceC_( const Mat& srcmat, Mat& dstmat )
2157 typedef typename Op::rtype WT;
2158 Size size = srcmat.size();
2159 int i, k, cn = srcmat.channels();
2163 for( int y = 0; y < size.height; y++ )
2165 const T* src = (const T*)(srcmat.data + srcmat.step*y);
2166 ST* dst = (ST*)(dstmat.data + dstmat.step*y);
2167 if( size.width == cn )
2168 for( k = 0; k < cn; k++ )
2172 for( k = 0; k < cn; k++ )
2174 WT a0 = src[k], a1 = src[k+cn];
2175 for( i = 2*cn; i <= size.width - 4*cn; i += 4*cn )
2177 a0 = op(a0, (WT)src[i+k]);
2178 a1 = op(a1, (WT)src[i+k+cn]);
2179 a0 = op(a0, (WT)src[i+k+cn*2]);
2180 a1 = op(a1, (WT)src[i+k+cn*3]);
2183 for( ; i < size.width; i += cn )
2185 a0 = op(a0, (WT)src[i+k]);
2194 typedef void (*ReduceFunc)( const Mat& src, Mat& dst );
2198 #define reduceSumR8u32s reduceR_<uchar, int, OpAdd<int> >
2199 #define reduceSumR8u32f reduceR_<uchar, float, OpAdd<int> >
2200 #define reduceSumR8u64f reduceR_<uchar, double,OpAdd<int> >
2201 #define reduceSumR16u32f reduceR_<ushort,float, OpAdd<float> >
2202 #define reduceSumR16u64f reduceR_<ushort,double,OpAdd<double> >
2203 #define reduceSumR16s32f reduceR_<short, float, OpAdd<float> >
2204 #define reduceSumR16s64f reduceR_<short, double,OpAdd<double> >
2205 #define reduceSumR32f32f reduceR_<float, float, OpAdd<float> >
2206 #define reduceSumR32f64f reduceR_<float, double,OpAdd<double> >
2207 #define reduceSumR64f64f reduceR_<double,double,OpAdd<double> >
2209 #define reduceMaxR8u reduceR_<uchar, uchar, OpMax<uchar> >
2210 #define reduceMaxR16u reduceR_<ushort,ushort,OpMax<ushort> >
2211 #define reduceMaxR16s reduceR_<short, short, OpMax<short> >
2212 #define reduceMaxR32f reduceR_<float, float, OpMax<float> >
2213 #define reduceMaxR64f reduceR_<double,double,OpMax<double> >
2215 #define reduceMinR8u reduceR_<uchar, uchar, OpMin<uchar> >
2216 #define reduceMinR16u reduceR_<ushort,ushort,OpMin<ushort> >
2217 #define reduceMinR16s reduceR_<short, short, OpMin<short> >
2218 #define reduceMinR32f reduceR_<float, float, OpMin<float> >
2219 #define reduceMinR64f reduceR_<double,double,OpMin<double> >
2221 #define reduceSumC8u32s reduceC_<uchar, int, OpAdd<int> >
2222 #define reduceSumC8u32f reduceC_<uchar, float, OpAdd<int> >
2223 #define reduceSumC8u64f reduceC_<uchar, double,OpAdd<int> >
2224 #define reduceSumC16u32f reduceC_<ushort,float, OpAdd<float> >
2225 #define reduceSumC16u64f reduceC_<ushort,double,OpAdd<double> >
2226 #define reduceSumC16s32f reduceC_<short, float, OpAdd<float> >
2227 #define reduceSumC16s64f reduceC_<short, double,OpAdd<double> >
2228 #define reduceSumC32f32f reduceC_<float, float, OpAdd<float> >
2229 #define reduceSumC32f64f reduceC_<float, double,OpAdd<double> >
2230 #define reduceSumC64f64f reduceC_<double,double,OpAdd<double> >
2232 #define reduceMaxC8u reduceC_<uchar, uchar, OpMax<uchar> >
2233 #define reduceMaxC16u reduceC_<ushort,ushort,OpMax<ushort> >
2234 #define reduceMaxC16s reduceC_<short, short, OpMax<short> >
2235 #define reduceMaxC32f reduceC_<float, float, OpMax<float> >
2236 #define reduceMaxC64f reduceC_<double,double,OpMax<double> >
2238 #define reduceMinC8u reduceC_<uchar, uchar, OpMin<uchar> >
2239 #define reduceMinC16u reduceC_<ushort,ushort,OpMin<ushort> >
2240 #define reduceMinC16s reduceC_<short, short, OpMin<short> >
2241 #define reduceMinC32f reduceC_<float, float, OpMin<float> >
2242 #define reduceMinC64f reduceC_<double,double,OpMin<double> >
2244 void cv::reduce(InputArray _src, OutputArray _dst, int dim, int op, int dtype)
2246 Mat src = _src.getMat();
2247 CV_Assert( src.dims <= 2 );
2249 int stype = src.type(), sdepth = src.depth(), cn = src.channels();
2251 dtype = _dst.fixedType() ? _dst.type() : stype;
2252 int ddepth = CV_MAT_DEPTH(dtype);
2254 _dst.create(dim == 0 ? 1 : src.rows, dim == 0 ? src.cols : 1,
2255 CV_MAKETYPE(dtype >= 0 ? dtype : stype, cn));
2256 Mat dst = _dst.getMat(), temp = dst;
2258 CV_Assert( op == CV_REDUCE_SUM || op == CV_REDUCE_MAX ||
2259 op == CV_REDUCE_MIN || op == CV_REDUCE_AVG );
2260 CV_Assert( src.channels() == dst.channels() );
2262 if( op == CV_REDUCE_AVG )
2265 if( sdepth < CV_32S && ddepth < CV_32S )
2267 temp.create(dst.rows, dst.cols, CV_32SC(cn));
2272 ReduceFunc func = 0;
2275 if( op == CV_REDUCE_SUM )
2277 if(sdepth == CV_8U && ddepth == CV_32S)
2278 func = GET_OPTIMIZED(reduceSumR8u32s);
2279 else if(sdepth == CV_8U && ddepth == CV_32F)
2280 func = reduceSumR8u32f;
2281 else if(sdepth == CV_8U && ddepth == CV_64F)
2282 func = reduceSumR8u64f;
2283 else if(sdepth == CV_16U && ddepth == CV_32F)
2284 func = reduceSumR16u32f;
2285 else if(sdepth == CV_16U && ddepth == CV_64F)
2286 func = reduceSumR16u64f;
2287 else if(sdepth == CV_16S && ddepth == CV_32F)
2288 func = reduceSumR16s32f;
2289 else if(sdepth == CV_16S && ddepth == CV_64F)
2290 func = reduceSumR16s64f;
2291 else if(sdepth == CV_32F && ddepth == CV_32F)
2292 func = GET_OPTIMIZED(reduceSumR32f32f);
2293 else if(sdepth == CV_32F && ddepth == CV_64F)
2294 func = reduceSumR32f64f;
2295 else if(sdepth == CV_64F && ddepth == CV_64F)
2296 func = reduceSumR64f64f;
2298 else if(op == CV_REDUCE_MAX)
2300 if(sdepth == CV_8U && ddepth == CV_8U)
2301 func = GET_OPTIMIZED(reduceMaxR8u);
2302 else if(sdepth == CV_16U && ddepth == CV_16U)
2303 func = reduceMaxR16u;
2304 else if(sdepth == CV_16S && ddepth == CV_16S)
2305 func = reduceMaxR16s;
2306 else if(sdepth == CV_32F && ddepth == CV_32F)
2307 func = GET_OPTIMIZED(reduceMaxR32f);
2308 else if(sdepth == CV_64F && ddepth == CV_64F)
2309 func = reduceMaxR64f;
2311 else if(op == CV_REDUCE_MIN)
2313 if(sdepth == CV_8U && ddepth == CV_8U)
2314 func = GET_OPTIMIZED(reduceMinR8u);
2315 else if(sdepth == CV_16U && ddepth == CV_16U)
2316 func = reduceMinR16u;
2317 else if(sdepth == CV_16S && ddepth == CV_16S)
2318 func = reduceMinR16s;
2319 else if(sdepth == CV_32F && ddepth == CV_32F)
2320 func = GET_OPTIMIZED(reduceMinR32f);
2321 else if(sdepth == CV_64F && ddepth == CV_64F)
2322 func = reduceMinR64f;
2327 if(op == CV_REDUCE_SUM)
2329 if(sdepth == CV_8U && ddepth == CV_32S)
2330 func = GET_OPTIMIZED(reduceSumC8u32s);
2331 else if(sdepth == CV_8U && ddepth == CV_32F)
2332 func = reduceSumC8u32f;
2333 else if(sdepth == CV_8U && ddepth == CV_64F)
2334 func = reduceSumC8u64f;
2335 else if(sdepth == CV_16U && ddepth == CV_32F)
2336 func = reduceSumC16u32f;
2337 else if(sdepth == CV_16U && ddepth == CV_64F)
2338 func = reduceSumC16u64f;
2339 else if(sdepth == CV_16S && ddepth == CV_32F)
2340 func = reduceSumC16s32f;
2341 else if(sdepth == CV_16S && ddepth == CV_64F)
2342 func = reduceSumC16s64f;
2343 else if(sdepth == CV_32F && ddepth == CV_32F)
2344 func = GET_OPTIMIZED(reduceSumC32f32f);
2345 else if(sdepth == CV_32F && ddepth == CV_64F)
2346 func = reduceSumC32f64f;
2347 else if(sdepth == CV_64F && ddepth == CV_64F)
2348 func = reduceSumC64f64f;
2350 else if(op == CV_REDUCE_MAX)
2352 if(sdepth == CV_8U && ddepth == CV_8U)
2353 func = GET_OPTIMIZED(reduceMaxC8u);
2354 else if(sdepth == CV_16U && ddepth == CV_16U)
2355 func = reduceMaxC16u;
2356 else if(sdepth == CV_16S && ddepth == CV_16S)
2357 func = reduceMaxC16s;
2358 else if(sdepth == CV_32F && ddepth == CV_32F)
2359 func = GET_OPTIMIZED(reduceMaxC32f);
2360 else if(sdepth == CV_64F && ddepth == CV_64F)
2361 func = reduceMaxC64f;
2363 else if(op == CV_REDUCE_MIN)
2365 if(sdepth == CV_8U && ddepth == CV_8U)
2366 func = GET_OPTIMIZED(reduceMinC8u);
2367 else if(sdepth == CV_16U && ddepth == CV_16U)
2368 func = reduceMinC16u;
2369 else if(sdepth == CV_16S && ddepth == CV_16S)
2370 func = reduceMinC16s;
2371 else if(sdepth == CV_32F && ddepth == CV_32F)
2372 func = GET_OPTIMIZED(reduceMinC32f);
2373 else if(sdepth == CV_64F && ddepth == CV_64F)
2374 func = reduceMinC64f;
2379 CV_Error( CV_StsUnsupportedFormat,
2380 "Unsupported combination of input and output array formats" );
2384 if( op0 == CV_REDUCE_AVG )
2385 temp.convertTo(dst, dst.type(), 1./(dim == 0 ? src.rows : src.cols));
2389 //////////////////////////////////////// sort ///////////////////////////////////////////
2394 template<typename T> static void sort_( const Mat& src, Mat& dst, int flags )
2399 bool sortRows = (flags & 1) == CV_SORT_EVERY_ROW;
2400 bool inplace = src.data == dst.data;
2401 bool sortDescending = (flags & CV_SORT_DESCENDING) != 0;
2404 n = src.rows, len = src.cols;
2407 n = src.cols, len = src.rows;
2412 for( i = 0; i < n; i++ )
2417 T* dptr = (T*)(dst.data + dst.step*i);
2420 const T* sptr = (const T*)(src.data + src.step*i);
2421 for( j = 0; j < len; j++ )
2428 for( j = 0; j < len; j++ )
2429 ptr[j] = ((const T*)(src.data + src.step*j))[i];
2431 std::sort( ptr, ptr + len, LessThan<T>() );
2432 if( sortDescending )
2433 for( j = 0; j < len/2; j++ )
2434 std::swap(ptr[j], ptr[len-1-j]);
2436 for( j = 0; j < len; j++ )
2437 ((T*)(dst.data + dst.step*j))[i] = ptr[j];
2442 template<typename T> static void sortIdx_( const Mat& src, Mat& dst, int flags )
2445 AutoBuffer<int> ibuf;
2449 bool sortRows = (flags & 1) == CV_SORT_EVERY_ROW;
2450 bool sortDescending = (flags & CV_SORT_DESCENDING) != 0;
2452 CV_Assert( src.data != dst.data );
2455 n = src.rows, len = src.cols;
2458 n = src.cols, len = src.rows;
2465 for( i = 0; i < n; i++ )
2472 ptr = (T*)(src.data + src.step*i);
2473 iptr = (int*)(dst.data + dst.step*i);
2477 for( j = 0; j < len; j++ )
2478 ptr[j] = ((const T*)(src.data + src.step*j))[i];
2480 for( j = 0; j < len; j++ )
2482 std::sort( iptr, iptr + len, LessThanIdx<T>(ptr) );
2483 if( sortDescending )
2484 for( j = 0; j < len/2; j++ )
2485 std::swap(iptr[j], iptr[len-1-j]);
2487 for( j = 0; j < len; j++ )
2488 ((int*)(dst.data + dst.step*j))[i] = iptr[j];
2492 typedef void (*SortFunc)(const Mat& src, Mat& dst, int flags);
2496 void cv::sort( InputArray _src, OutputArray _dst, int flags )
2498 static SortFunc tab[] =
2500 sort_<uchar>, sort_<schar>, sort_<ushort>, sort_<short>,
2501 sort_<int>, sort_<float>, sort_<double>, 0
2503 Mat src = _src.getMat();
2504 SortFunc func = tab[src.depth()];
2505 CV_Assert( src.dims <= 2 && src.channels() == 1 && func != 0 );
2506 _dst.create( src.size(), src.type() );
2507 Mat dst = _dst.getMat();
2508 func( src, dst, flags );
2511 void cv::sortIdx( InputArray _src, OutputArray _dst, int flags )
2513 static SortFunc tab[] =
2515 sortIdx_<uchar>, sortIdx_<schar>, sortIdx_<ushort>, sortIdx_<short>,
2516 sortIdx_<int>, sortIdx_<float>, sortIdx_<double>, 0
2518 Mat src = _src.getMat();
2519 SortFunc func = tab[src.depth()];
2520 CV_Assert( src.dims <= 2 && src.channels() == 1 && func != 0 );
2522 Mat dst = _dst.getMat();
2523 if( dst.data == src.data )
2525 _dst.create( src.size(), CV_32S );
2526 dst = _dst.getMat();
2527 func( src, dst, flags );
2531 ////////////////////////////////////////// kmeans ////////////////////////////////////////////
2536 static void generateRandomCenter(const vector<Vec2f>& box, float* center, RNG& rng)
2538 size_t j, dims = box.size();
2539 float margin = 1.f/dims;
2540 for( j = 0; j < dims; j++ )
2541 center[j] = ((float)rng*(1.f+margin*2.f)-margin)*(box[j][1] - box[j][0]) + box[j][0];
2544 class KMeansPPDistanceComputer : public ParallelLoopBody
2547 KMeansPPDistanceComputer( float *_tdist2,
2560 void operator()( const cv::Range& range ) const
2562 const int begin = range.start;
2563 const int end = range.end;
2565 for ( int i = begin; i<end; i++ )
2567 tdist2[i] = std::min(normL2Sqr_(data + step*i, data + stepci, dims), dist[i]);
2572 KMeansPPDistanceComputer& operator=(const KMeansPPDistanceComputer&); // to quiet MSVC
2579 const size_t stepci;
2583 k-means center initialization using the following algorithm:
2584 Arthur & Vassilvitskii (2007) k-means++: The Advantages of Careful Seeding
2586 static void generateCentersPP(const Mat& _data, Mat& _out_centers,
2587 int K, RNG& rng, int trials)
2589 int i, j, k, dims = _data.cols, N = _data.rows;
2590 const float* data = _data.ptr<float>(0);
2591 size_t step = _data.step/sizeof(data[0]);
2592 vector<int> _centers(K);
2593 int* centers = &_centers[0];
2594 vector<float> _dist(N*3);
2595 float* dist = &_dist[0], *tdist = dist + N, *tdist2 = tdist + N;
2598 centers[0] = (unsigned)rng % N;
2600 for( i = 0; i < N; i++ )
2602 dist[i] = normL2Sqr_(data + step*i, data + step*centers[0], dims);
2606 for( k = 1; k < K; k++ )
2608 double bestSum = DBL_MAX;
2609 int bestCenter = -1;
2611 for( j = 0; j < trials; j++ )
2613 double p = (double)rng*sum0, s = 0;
2614 for( i = 0; i < N-1; i++ )
2615 if( (p -= dist[i]) <= 0 )
2619 parallel_for_(Range(0, N),
2620 KMeansPPDistanceComputer(tdist2, data, dist, dims, step, step*ci));
2621 for( i = 0; i < N; i++ )
2630 std::swap(tdist, tdist2);
2633 centers[k] = bestCenter;
2635 std::swap(dist, tdist);
2638 for( k = 0; k < K; k++ )
2640 const float* src = data + step*centers[k];
2641 float* dst = _out_centers.ptr<float>(k);
2642 for( j = 0; j < dims; j++ )
2647 class KMeansDistanceComputer : public ParallelLoopBody
2650 KMeansDistanceComputer( double *_distances,
2653 const Mat& _centers )
2654 : distances(_distances),
2661 void operator()( const Range& range ) const
2663 const int begin = range.start;
2664 const int end = range.end;
2665 const int K = centers.rows;
2666 const int dims = centers.cols;
2668 const float *sample;
2669 for( int i = begin; i<end; ++i)
2671 sample = data.ptr<float>(i);
2673 double min_dist = DBL_MAX;
2675 for( int k = 0; k < K; k++ )
2677 const float* center = centers.ptr<float>(k);
2678 const double dist = normL2Sqr_(sample, center, dims);
2680 if( min_dist > dist )
2687 distances[i] = min_dist;
2693 KMeansDistanceComputer& operator=(const KMeansDistanceComputer&); // to quiet MSVC
2703 double cv::kmeans( InputArray _data, int K,
2704 InputOutputArray _bestLabels,
2705 TermCriteria criteria, int attempts,
2706 int flags, OutputArray _centers )
2708 const int SPP_TRIALS = 3;
2709 Mat data = _data.getMat();
2710 bool isrow = data.rows == 1 && data.channels() > 1;
2711 int N = !isrow ? data.rows : data.cols;
2712 int dims = (!isrow ? data.cols : 1)*data.channels();
2713 int type = data.depth();
2715 attempts = std::max(attempts, 1);
2716 CV_Assert( data.dims <= 2 && type == CV_32F && K > 0 );
2717 CV_Assert( N >= K );
2719 _bestLabels.create(N, 1, CV_32S, -1, true);
2721 Mat _labels, best_labels = _bestLabels.getMat();
2722 if( flags & CV_KMEANS_USE_INITIAL_LABELS )
2724 CV_Assert( (best_labels.cols == 1 || best_labels.rows == 1) &&
2725 best_labels.cols*best_labels.rows == N &&
2726 best_labels.type() == CV_32S &&
2727 best_labels.isContinuous());
2728 best_labels.copyTo(_labels);
2732 if( !((best_labels.cols == 1 || best_labels.rows == 1) &&
2733 best_labels.cols*best_labels.rows == N &&
2734 best_labels.type() == CV_32S &&
2735 best_labels.isContinuous()))
2736 best_labels.create(N, 1, CV_32S);
2737 _labels.create(best_labels.size(), best_labels.type());
2739 int* labels = _labels.ptr<int>();
2741 Mat centers(K, dims, type), old_centers(K, dims, type), temp(1, dims, type);
2742 vector<int> counters(K);
2743 vector<Vec2f> _box(dims);
2744 Vec2f* box = &_box[0];
2745 double best_compactness = DBL_MAX, compactness = 0;
2746 RNG& rng = theRNG();
2747 int a, iter, i, j, k;
2749 if( criteria.type & TermCriteria::EPS )
2750 criteria.epsilon = std::max(criteria.epsilon, 0.);
2752 criteria.epsilon = FLT_EPSILON;
2753 criteria.epsilon *= criteria.epsilon;
2755 if( criteria.type & TermCriteria::COUNT )
2756 criteria.maxCount = std::min(std::max(criteria.maxCount, 2), 100);
2758 criteria.maxCount = 100;
2763 criteria.maxCount = 2;
2766 const float* sample = data.ptr<float>(0);
2767 for( j = 0; j < dims; j++ )
2768 box[j] = Vec2f(sample[j], sample[j]);
2770 for( i = 1; i < N; i++ )
2772 sample = data.ptr<float>(i);
2773 for( j = 0; j < dims; j++ )
2775 float v = sample[j];
2776 box[j][0] = std::min(box[j][0], v);
2777 box[j][1] = std::max(box[j][1], v);
2781 for( a = 0; a < attempts; a++ )
2783 double max_center_shift = DBL_MAX;
2786 swap(centers, old_centers);
2788 if( iter == 0 && (a > 0 || !(flags & KMEANS_USE_INITIAL_LABELS)) )
2790 if( flags & KMEANS_PP_CENTERS )
2791 generateCentersPP(data, centers, K, rng, SPP_TRIALS);
2794 for( k = 0; k < K; k++ )
2795 generateRandomCenter(_box, centers.ptr<float>(k), rng);
2800 if( iter == 0 && a == 0 && (flags & KMEANS_USE_INITIAL_LABELS) )
2802 for( i = 0; i < N; i++ )
2803 CV_Assert( (unsigned)labels[i] < (unsigned)K );
2807 centers = Scalar(0);
2808 for( k = 0; k < K; k++ )
2811 for( i = 0; i < N; i++ )
2813 sample = data.ptr<float>(i);
2815 float* center = centers.ptr<float>(k);
2817 #if CV_ENABLE_UNROLLED
2818 for(; j <= dims - 4; j += 4 )
2820 float t0 = center[j] + sample[j];
2821 float t1 = center[j+1] + sample[j+1];
2826 t0 = center[j+2] + sample[j+2];
2827 t1 = center[j+3] + sample[j+3];
2833 for( ; j < dims; j++ )
2834 center[j] += sample[j];
2839 max_center_shift = 0;
2841 for( k = 0; k < K; k++ )
2843 if( counters[k] != 0 )
2846 // if some cluster appeared to be empty then:
2847 // 1. find the biggest cluster
2848 // 2. find the farthest from the center point in the biggest cluster
2849 // 3. exclude the farthest point from the biggest cluster and form a new 1-point cluster.
2851 for( int k1 = 1; k1 < K; k1++ )
2853 if( counters[max_k] < counters[k1] )
2857 double max_dist = 0;
2858 int farthest_i = -1;
2859 float* new_center = centers.ptr<float>(k);
2860 float* old_center = centers.ptr<float>(max_k);
2861 float* _old_center = temp.ptr<float>(); // normalized
2862 float scale = 1.f/counters[max_k];
2863 for( j = 0; j < dims; j++ )
2864 _old_center[j] = old_center[j]*scale;
2866 for( i = 0; i < N; i++ )
2868 if( labels[i] != max_k )
2870 sample = data.ptr<float>(i);
2871 double dist = normL2Sqr_(sample, _old_center, dims);
2873 if( max_dist <= dist )
2882 labels[farthest_i] = k;
2883 sample = data.ptr<float>(farthest_i);
2885 for( j = 0; j < dims; j++ )
2887 old_center[j] -= sample[j];
2888 new_center[j] += sample[j];
2892 for( k = 0; k < K; k++ )
2894 float* center = centers.ptr<float>(k);
2895 CV_Assert( counters[k] != 0 );
2897 float scale = 1.f/counters[k];
2898 for( j = 0; j < dims; j++ )
2904 const float* old_center = old_centers.ptr<float>(k);
2905 for( j = 0; j < dims; j++ )
2907 double t = center[j] - old_center[j];
2910 max_center_shift = std::max(max_center_shift, dist);
2915 if( ++iter == MAX(criteria.maxCount, 2) || max_center_shift <= criteria.epsilon )
2919 Mat dists(1, N, CV_64F);
2920 double* dist = dists.ptr<double>(0);
2921 parallel_for_(Range(0, N),
2922 KMeansDistanceComputer(dist, labels, data, centers));
2924 for( i = 0; i < N; i++ )
2926 compactness += dist[i];
2930 if( compactness < best_compactness )
2932 best_compactness = compactness;
2933 if( _centers.needed() )
2934 centers.copyTo(_centers);
2935 _labels.copyTo(best_labels);
2939 return best_compactness;
2943 CV_IMPL void cvSetIdentity( CvArr* arr, CvScalar value )
2945 cv::Mat m = cv::cvarrToMat(arr);
2946 cv::setIdentity(m, value);
2950 CV_IMPL CvScalar cvTrace( const CvArr* arr )
2952 return cv::trace(cv::cvarrToMat(arr));
2956 CV_IMPL void cvTranspose( const CvArr* srcarr, CvArr* dstarr )
2958 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
2960 CV_Assert( src.rows == dst.cols && src.cols == dst.rows && src.type() == dst.type() );
2961 transpose( src, dst );
2965 CV_IMPL void cvCompleteSymm( CvMat* matrix, int LtoR )
2968 cv::completeSymm( m, LtoR != 0 );
2972 CV_IMPL void cvCrossProduct( const CvArr* srcAarr, const CvArr* srcBarr, CvArr* dstarr )
2974 cv::Mat srcA = cv::cvarrToMat(srcAarr), dst = cv::cvarrToMat(dstarr);
2976 CV_Assert( srcA.size() == dst.size() && srcA.type() == dst.type() );
2977 srcA.cross(cv::cvarrToMat(srcBarr)).copyTo(dst);
2982 cvReduce( const CvArr* srcarr, CvArr* dstarr, int dim, int op )
2984 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
2987 dim = src.rows > dst.rows ? 0 : src.cols > dst.cols ? 1 : dst.cols == 1;
2990 CV_Error( CV_StsOutOfRange, "The reduced dimensionality index is out of range" );
2992 if( (dim == 0 && (dst.cols != src.cols || dst.rows != 1)) ||
2993 (dim == 1 && (dst.rows != src.rows || dst.cols != 1)) )
2994 CV_Error( CV_StsBadSize, "The output array size is incorrect" );
2996 if( src.channels() != dst.channels() )
2997 CV_Error( CV_StsUnmatchedFormats, "Input and output arrays must have the same number of channels" );
2999 cv::reduce(src, dst, dim, op, dst.type());
3004 cvRange( CvArr* arr, double start, double end )
3008 CvMat stub, *mat = (CvMat*)arr;
3015 if( !CV_IS_MAT(mat) )
3016 mat = cvGetMat( mat, &stub);
3020 type = CV_MAT_TYPE(mat->type);
3021 delta = (end-start)/(rows*cols);
3023 if( CV_IS_MAT_CONT(mat->type) )
3030 step = mat->step / CV_ELEM_SIZE(type);
3032 if( type == CV_32SC1 )
3034 int* idata = mat->data.i;
3035 int ival = cvRound(val), idelta = cvRound(delta);
3037 if( fabs(val - ival) < DBL_EPSILON &&
3038 fabs(delta - idelta) < DBL_EPSILON )
3040 for( i = 0; i < rows; i++, idata += step )
3041 for( j = 0; j < cols; j++, ival += idelta )
3046 for( i = 0; i < rows; i++, idata += step )
3047 for( j = 0; j < cols; j++, val += delta )
3048 idata[j] = cvRound(val);
3051 else if( type == CV_32FC1 )
3053 float* fdata = mat->data.fl;
3054 for( i = 0; i < rows; i++, fdata += step )
3055 for( j = 0; j < cols; j++, val += delta )
3056 fdata[j] = (float)val;
3059 CV_Error( CV_StsUnsupportedFormat, "The function only supports 32sC1 and 32fC1 datatypes" );
3062 return ok ? arr : 0;
3067 cvSort( const CvArr* _src, CvArr* _dst, CvArr* _idx, int flags )
3069 cv::Mat src = cv::cvarrToMat(_src);
3073 cv::Mat idx0 = cv::cvarrToMat(_idx), idx = idx0;
3074 CV_Assert( src.size() == idx.size() && idx.type() == CV_32S && src.data != idx.data );
3075 cv::sortIdx( src, idx, flags );
3076 CV_Assert( idx0.data == idx.data );
3081 cv::Mat dst0 = cv::cvarrToMat(_dst), dst = dst0;
3082 CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
3083 cv::sort( src, dst, flags );
3084 CV_Assert( dst0.data == dst.data );
3090 cvKMeans2( const CvArr* _samples, int cluster_count, CvArr* _labels,
3091 CvTermCriteria termcrit, int attempts, CvRNG*,
3092 int flags, CvArr* _centers, double* _compactness )
3094 cv::Mat data = cv::cvarrToMat(_samples), labels = cv::cvarrToMat(_labels), centers;
3097 centers = cv::cvarrToMat(_centers);
3099 centers = centers.reshape(1);
3100 data = data.reshape(1);
3102 CV_Assert( !centers.empty() );
3103 CV_Assert( centers.rows == cluster_count );
3104 CV_Assert( centers.cols == data.cols );
3105 CV_Assert( centers.depth() == data.depth() );
3107 CV_Assert( labels.isContinuous() && labels.type() == CV_32S &&
3108 (labels.cols == 1 || labels.rows == 1) &&
3109 labels.cols + labels.rows - 1 == data.rows );
3111 double compactness = cv::kmeans(data, cluster_count, labels, termcrit, attempts,
3112 flags, _centers ? cv::_OutputArray(centers) : cv::_OutputArray() );
3114 *_compactness = compactness;
3118 ///////////////////////////// n-dimensional matrices ////////////////////////////
3123 Mat Mat::reshape(int _cn, int _newndims, const int* _newsz) const
3125 if(_newndims == dims)
3128 return reshape(_cn);
3130 return reshape(_cn, _newsz[0]);
3133 CV_Error(CV_StsNotImplemented, "");
3138 Mat::operator CvMatND() const
3141 cvInitMatNDHeader( &mat, dims, size, type(), data );
3143 for( i = 0; i < d; i++ )
3144 mat.dim[i].step = (int)step[i];
3145 mat.type |= flags & CONTINUOUS_FLAG;
3149 NAryMatIterator::NAryMatIterator()
3150 : arrays(0), planes(0), ptrs(0), narrays(0), nplanes(0), size(0), iterdepth(0), idx(0)
3154 NAryMatIterator::NAryMatIterator(const Mat** _arrays, Mat* _planes, int _narrays)
3155 : arrays(0), planes(0), ptrs(0), narrays(0), nplanes(0), size(0), iterdepth(0), idx(0)
3157 init(_arrays, _planes, 0, _narrays);
3160 NAryMatIterator::NAryMatIterator(const Mat** _arrays, uchar** _ptrs, int _narrays)
3161 : arrays(0), planes(0), ptrs(0), narrays(0), nplanes(0), size(0), iterdepth(0), idx(0)
3163 init(_arrays, 0, _ptrs, _narrays);
3166 void NAryMatIterator::init(const Mat** _arrays, Mat* _planes, uchar** _ptrs, int _narrays)
3168 CV_Assert( _arrays && (_ptrs || _planes) );
3169 int i, j, d1=0, i0 = -1, d = -1;
3180 for( i = 0; _arrays[i] != 0; i++ )
3183 CV_Assert(narrays <= 1000);
3188 for( i = 0; i < narrays; i++ )
3190 CV_Assert(arrays[i] != 0);
3191 const Mat& A = *arrays[i];
3203 // find the first dimensionality which is different from 1;
3204 // in any of the arrays the first "d1" step do not affect the continuity
3205 for( d1 = 0; d1 < d; d1++ )
3206 if( A.size[d1] > 1 )
3210 CV_Assert( A.size == arrays[i0]->size );
3212 if( !A.isContinuous() )
3214 CV_Assert( A.step[d-1] == A.elemSize() );
3215 for( j = d-1; j > d1; j-- )
3216 if( A.step[j]*A.size[j] < A.step[j-1] )
3218 iterdepth = std::max(iterdepth, j);
3224 size = arrays[i0]->size[d-1];
3225 for( j = d-1; j > iterdepth; j-- )
3227 int64 total1 = (int64)size*arrays[i0]->size[j-1];
3228 if( total1 != (int)total1 )
3234 if( iterdepth == d1 )
3238 for( j = iterdepth-1; j >= 0; j-- )
3239 nplanes *= arrays[i0]->size[j];
3249 for( i = 0; i < narrays; i++ )
3251 CV_Assert(arrays[i] != 0);
3252 const Mat& A = *arrays[i];
3260 planes[i] = Mat(1, (int)size, A.type(), A.data);
3265 NAryMatIterator& NAryMatIterator::operator ++()
3267 if( idx >= nplanes-1 )
3271 if( iterdepth == 1 )
3275 for( int i = 0; i < narrays; i++ )
3279 ptrs[i] = arrays[i]->data + arrays[i]->step[0]*idx;
3284 for( int i = 0; i < narrays; i++ )
3286 if( !planes[i].data )
3288 planes[i].data = arrays[i]->data + arrays[i]->step[0]*idx;
3294 for( int i = 0; i < narrays; i++ )
3296 const Mat& A = *arrays[i];
3299 int _idx = (int)idx;
3300 uchar* data = A.data;
3301 for( int j = iterdepth-1; j >= 0 && _idx > 0; j-- )
3303 int szi = A.size[j], t = _idx/szi;
3304 data += (_idx - t * szi)*A.step[j];
3310 planes[i].data = data;
3317 NAryMatIterator NAryMatIterator::operator ++(int)
3319 NAryMatIterator it = *this;
3324 ///////////////////////////////////////////////////////////////////////////
3325 // MatConstIterator //
3326 ///////////////////////////////////////////////////////////////////////////
3328 Point MatConstIterator::pos() const
3332 CV_DbgAssert(m->dims <= 2);
3334 ptrdiff_t ofs = ptr - m->data;
3335 int y = (int)(ofs/m->step[0]);
3336 return Point((int)((ofs - y*m->step[0])/elemSize), y);
3339 void MatConstIterator::pos(int* _idx) const
3341 CV_Assert(m != 0 && _idx);
3342 ptrdiff_t ofs = ptr - m->data;
3343 for( int i = 0; i < m->dims; i++ )
3345 size_t s = m->step[i], v = ofs/s;
3351 ptrdiff_t MatConstIterator::lpos() const
3355 if( m->isContinuous() )
3356 return (ptr - sliceStart)/elemSize;
3357 ptrdiff_t ofs = ptr - m->data;
3361 ptrdiff_t y = ofs/m->step[0];
3362 return y*m->cols + (ofs - y*m->step[0])/elemSize;
3364 ptrdiff_t result = 0;
3365 for( i = 0; i < d; i++ )
3367 size_t s = m->step[i], v = ofs/s;
3369 result = result*m->size[i] + v;
3374 void MatConstIterator::seek(ptrdiff_t ofs, bool relative)
3376 if( m->isContinuous() )
3378 ptr = (relative ? ptr : sliceStart) + ofs*elemSize;
3379 if( ptr < sliceStart )
3381 else if( ptr > sliceEnd )
3392 ofs0 = ptr - m->data;
3393 y = ofs0/m->step[0];
3394 ofs += y*m->cols + (ofs0 - y*m->step[0])/elemSize;
3397 int y1 = std::min(std::max((int)y, 0), m->rows-1);
3398 sliceStart = m->data + y1*m->step[0];
3399 sliceEnd = sliceStart + m->cols*elemSize;
3400 ptr = y < 0 ? sliceStart : y >= m->rows ? sliceEnd :
3401 sliceStart + (ofs - y*m->cols)*elemSize;
3411 int szi = m->size[d-1];
3412 ptrdiff_t t = ofs/szi;
3413 int v = (int)(ofs - t*szi);
3415 ptr = m->data + v*elemSize;
3416 sliceStart = m->data;
3418 for( int i = d-2; i >= 0; i-- )
3422 v = (int)(ofs - t*szi);
3424 sliceStart += v*m->step[i];
3427 sliceEnd = sliceStart + m->size[d-1]*elemSize;
3431 ptr = sliceStart + (ptr - m->data);
3434 void MatConstIterator::seek(const int* _idx, bool relative)
3441 ofs = _idx[0]*m->size[1] + _idx[1];
3444 for( i = 0; i < d; i++ )
3445 ofs = ofs*m->size[i] + _idx[i];
3447 seek(ofs, relative);
3450 ptrdiff_t operator - (const MatConstIterator& b, const MatConstIterator& a)
3454 if( a.sliceEnd == b.sliceEnd )
3455 return (b.ptr - a.ptr)/b.elemSize;
3457 return b.lpos() - a.lpos();
3460 //////////////////////////////// SparseMat ////////////////////////////////
3462 template<typename T1, typename T2> void
3463 convertData_(const void* _from, void* _to, int cn)
3465 const T1* from = (const T1*)_from;
3468 *to = saturate_cast<T2>(*from);
3470 for( int i = 0; i < cn; i++ )
3471 to[i] = saturate_cast<T2>(from[i]);
3474 template<typename T1, typename T2> void
3475 convertScaleData_(const void* _from, void* _to, int cn, double alpha, double beta)
3477 const T1* from = (const T1*)_from;
3480 *to = saturate_cast<T2>(*from*alpha + beta);
3482 for( int i = 0; i < cn; i++ )
3483 to[i] = saturate_cast<T2>(from[i]*alpha + beta);
3486 ConvertData getConvertElem(int fromType, int toType)
3488 static ConvertData tab[][8] =
3489 {{ convertData_<uchar, uchar>, convertData_<uchar, schar>,
3490 convertData_<uchar, ushort>, convertData_<uchar, short>,
3491 convertData_<uchar, int>, convertData_<uchar, float>,
3492 convertData_<uchar, double>, 0 },
3494 { convertData_<schar, uchar>, convertData_<schar, schar>,
3495 convertData_<schar, ushort>, convertData_<schar, short>,
3496 convertData_<schar, int>, convertData_<schar, float>,
3497 convertData_<schar, double>, 0 },
3499 { convertData_<ushort, uchar>, convertData_<ushort, schar>,
3500 convertData_<ushort, ushort>, convertData_<ushort, short>,
3501 convertData_<ushort, int>, convertData_<ushort, float>,
3502 convertData_<ushort, double>, 0 },
3504 { convertData_<short, uchar>, convertData_<short, schar>,
3505 convertData_<short, ushort>, convertData_<short, short>,
3506 convertData_<short, int>, convertData_<short, float>,
3507 convertData_<short, double>, 0 },
3509 { convertData_<int, uchar>, convertData_<int, schar>,
3510 convertData_<int, ushort>, convertData_<int, short>,
3511 convertData_<int, int>, convertData_<int, float>,
3512 convertData_<int, double>, 0 },
3514 { convertData_<float, uchar>, convertData_<float, schar>,
3515 convertData_<float, ushort>, convertData_<float, short>,
3516 convertData_<float, int>, convertData_<float, float>,
3517 convertData_<float, double>, 0 },
3519 { convertData_<double, uchar>, convertData_<double, schar>,
3520 convertData_<double, ushort>, convertData_<double, short>,
3521 convertData_<double, int>, convertData_<double, float>,
3522 convertData_<double, double>, 0 },
3524 { 0, 0, 0, 0, 0, 0, 0, 0 }};
3526 ConvertData func = tab[CV_MAT_DEPTH(fromType)][CV_MAT_DEPTH(toType)];
3527 CV_Assert( func != 0 );
3531 ConvertScaleData getConvertScaleElem(int fromType, int toType)
3533 static ConvertScaleData tab[][8] =
3534 {{ convertScaleData_<uchar, uchar>, convertScaleData_<uchar, schar>,
3535 convertScaleData_<uchar, ushort>, convertScaleData_<uchar, short>,
3536 convertScaleData_<uchar, int>, convertScaleData_<uchar, float>,
3537 convertScaleData_<uchar, double>, 0 },
3539 { convertScaleData_<schar, uchar>, convertScaleData_<schar, schar>,
3540 convertScaleData_<schar, ushort>, convertScaleData_<schar, short>,
3541 convertScaleData_<schar, int>, convertScaleData_<schar, float>,
3542 convertScaleData_<schar, double>, 0 },
3544 { convertScaleData_<ushort, uchar>, convertScaleData_<ushort, schar>,
3545 convertScaleData_<ushort, ushort>, convertScaleData_<ushort, short>,
3546 convertScaleData_<ushort, int>, convertScaleData_<ushort, float>,
3547 convertScaleData_<ushort, double>, 0 },
3549 { convertScaleData_<short, uchar>, convertScaleData_<short, schar>,
3550 convertScaleData_<short, ushort>, convertScaleData_<short, short>,
3551 convertScaleData_<short, int>, convertScaleData_<short, float>,
3552 convertScaleData_<short, double>, 0 },
3554 { convertScaleData_<int, uchar>, convertScaleData_<int, schar>,
3555 convertScaleData_<int, ushort>, convertScaleData_<int, short>,
3556 convertScaleData_<int, int>, convertScaleData_<int, float>,
3557 convertScaleData_<int, double>, 0 },
3559 { convertScaleData_<float, uchar>, convertScaleData_<float, schar>,
3560 convertScaleData_<float, ushort>, convertScaleData_<float, short>,
3561 convertScaleData_<float, int>, convertScaleData_<float, float>,
3562 convertScaleData_<float, double>, 0 },
3564 { convertScaleData_<double, uchar>, convertScaleData_<double, schar>,
3565 convertScaleData_<double, ushort>, convertScaleData_<double, short>,
3566 convertScaleData_<double, int>, convertScaleData_<double, float>,
3567 convertScaleData_<double, double>, 0 },
3569 { 0, 0, 0, 0, 0, 0, 0, 0 }};
3571 ConvertScaleData func = tab[CV_MAT_DEPTH(fromType)][CV_MAT_DEPTH(toType)];
3572 CV_Assert( func != 0 );
3576 enum { HASH_SIZE0 = 8 };
3578 static inline void copyElem(const uchar* from, uchar* to, size_t elemSize)
3581 for( i = 0; i + sizeof(int) <= elemSize; i += sizeof(int) )
3582 *(int*)(to + i) = *(const int*)(from + i);
3583 for( ; i < elemSize; i++ )
3587 static inline bool isZeroElem(const uchar* data, size_t elemSize)
3590 for( i = 0; i + sizeof(int) <= elemSize; i += sizeof(int) )
3591 if( *(int*)(data + i) != 0 )
3593 for( ; i < elemSize; i++ )
3599 SparseMat::Hdr::Hdr( int _dims, const int* _sizes, int _type )
3604 valueOffset = (int)alignSize(sizeof(SparseMat::Node) +
3605 sizeof(int)*std::max(dims - CV_MAX_DIM, 0), CV_ELEM_SIZE1(_type));
3606 nodeSize = alignSize(valueOffset +
3607 CV_ELEM_SIZE(_type), (int)sizeof(size_t));
3610 for( i = 0; i < dims; i++ )
3611 size[i] = _sizes[i];
3612 for( ; i < CV_MAX_DIM; i++ )
3617 void SparseMat::Hdr::clear()
3620 hashtab.resize(HASH_SIZE0);
3622 pool.resize(nodeSize);
3623 nodeCount = freeList = 0;
3627 SparseMat::SparseMat(const Mat& m)
3628 : flags(MAGIC_VAL), hdr(0)
3630 create( m.dims, m.size, m.type() );
3632 int i, idx[CV_MAX_DIM] = {0}, d = m.dims, lastSize = m.size[d - 1];
3633 size_t esz = m.elemSize();
3634 uchar* dptr = m.data;
3638 for( i = 0; i < lastSize; i++, dptr += esz )
3640 if( isZeroElem(dptr, esz) )
3643 uchar* to = newNode(idx, hash(idx));
3644 copyElem( dptr, to, esz );
3647 for( i = d - 2; i >= 0; i-- )
3649 dptr += m.step[i] - m.size[i+1]*m.step[i+1];
3650 if( ++idx[i] < m.size[i] )
3659 SparseMat::SparseMat(const CvSparseMat* m)
3660 : flags(MAGIC_VAL), hdr(0)
3663 create( m->dims, &m->size[0], m->type );
3665 CvSparseMatIterator it;
3666 CvSparseNode* n = cvInitSparseMatIterator(m, &it);
3667 size_t esz = elemSize();
3669 for( ; n != 0; n = cvGetNextSparseNode(&it) )
3671 const int* idx = CV_NODE_IDX(m, n);
3672 uchar* to = newNode(idx, hash(idx));
3673 copyElem((const uchar*)CV_NODE_VAL(m, n), to, esz);
3677 void SparseMat::create(int d, const int* _sizes, int _type)
3680 CV_Assert( _sizes && 0 < d && d <= CV_MAX_DIM );
3681 for( i = 0; i < d; i++ )
3682 CV_Assert( _sizes[i] > 0 );
3683 _type = CV_MAT_TYPE(_type);
3684 if( hdr && _type == type() && hdr->dims == d && hdr->refcount == 1 )
3686 for( i = 0; i < d; i++ )
3687 if( _sizes[i] != hdr->size[i] )
3696 flags = MAGIC_VAL | _type;
3697 hdr = new Hdr(d, _sizes, _type);
3700 void SparseMat::copyTo( SparseMat& m ) const
3709 m.create( hdr->dims, hdr->size, type() );
3710 SparseMatConstIterator from = begin();
3711 size_t i, N = nzcount(), esz = elemSize();
3713 for( i = 0; i < N; i++, ++from )
3715 const Node* n = from.node();
3716 uchar* to = m.newNode(n->idx, n->hashval);
3717 copyElem( from.ptr, to, esz );
3721 void SparseMat::copyTo( Mat& m ) const
3724 m.create( dims(), hdr->size, type() );
3727 SparseMatConstIterator from = begin();
3728 size_t i, N = nzcount(), esz = elemSize();
3730 for( i = 0; i < N; i++, ++from )
3732 const Node* n = from.node();
3733 copyElem( from.ptr, m.ptr(n->idx), esz);
3738 void SparseMat::convertTo( SparseMat& m, int rtype, double alpha ) const
3740 int cn = channels();
3743 rtype = CV_MAKETYPE(rtype, cn);
3744 if( hdr == m.hdr && rtype != type() )
3747 convertTo(temp, rtype, alpha);
3752 CV_Assert(hdr != 0);
3754 m.create( hdr->dims, hdr->size, rtype );
3756 SparseMatConstIterator from = begin();
3757 size_t i, N = nzcount();
3761 ConvertData cvtfunc = getConvertElem(type(), rtype);
3762 for( i = 0; i < N; i++, ++from )
3764 const Node* n = from.node();
3765 uchar* to = hdr == m.hdr ? from.ptr : m.newNode(n->idx, n->hashval);
3766 cvtfunc( from.ptr, to, cn );
3771 ConvertScaleData cvtfunc = getConvertScaleElem(type(), rtype);
3772 for( i = 0; i < N; i++, ++from )
3774 const Node* n = from.node();
3775 uchar* to = hdr == m.hdr ? from.ptr : m.newNode(n->idx, n->hashval);
3776 cvtfunc( from.ptr, to, cn, alpha, 0 );
3782 void SparseMat::convertTo( Mat& m, int rtype, double alpha, double beta ) const
3784 int cn = channels();
3787 rtype = CV_MAKETYPE(rtype, cn);
3790 m.create( dims(), hdr->size, rtype );
3793 SparseMatConstIterator from = begin();
3794 size_t i, N = nzcount();
3796 if( alpha == 1 && beta == 0 )
3798 ConvertData cvtfunc = getConvertElem(type(), rtype);
3799 for( i = 0; i < N; i++, ++from )
3801 const Node* n = from.node();
3802 uchar* to = m.ptr(n->idx);
3803 cvtfunc( from.ptr, to, cn );
3808 ConvertScaleData cvtfunc = getConvertScaleElem(type(), rtype);
3809 for( i = 0; i < N; i++, ++from )
3811 const Node* n = from.node();
3812 uchar* to = m.ptr(n->idx);
3813 cvtfunc( from.ptr, to, cn, alpha, beta );
3818 void SparseMat::clear()
3824 SparseMat::operator CvSparseMat*() const
3828 CvSparseMat* m = cvCreateSparseMat(hdr->dims, hdr->size, type());
3830 SparseMatConstIterator from = begin();
3831 size_t i, N = nzcount(), esz = elemSize();
3833 for( i = 0; i < N; i++, ++from )
3835 const Node* n = from.node();
3836 uchar* to = cvPtrND(m, n->idx, 0, -2, 0);
3837 copyElem(from.ptr, to, esz);
3842 uchar* SparseMat::ptr(int i0, bool createMissing, size_t* hashval)
3844 CV_Assert( hdr && hdr->dims == 1 );
3845 size_t h = hashval ? *hashval : hash(i0);
3846 size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx];
3847 uchar* pool = &hdr->pool[0];
3850 Node* elem = (Node*)(pool + nidx);
3851 if( elem->hashval == h && elem->idx[0] == i0 )
3852 return &value<uchar>(elem);
3859 return newNode( idx, h );
3864 uchar* SparseMat::ptr(int i0, int i1, bool createMissing, size_t* hashval)
3866 CV_Assert( hdr && hdr->dims == 2 );
3867 size_t h = hashval ? *hashval : hash(i0, i1);
3868 size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx];
3869 uchar* pool = &hdr->pool[0];
3872 Node* elem = (Node*)(pool + nidx);
3873 if( elem->hashval == h && elem->idx[0] == i0 && elem->idx[1] == i1 )
3874 return &value<uchar>(elem);
3880 int idx[] = { i0, i1 };
3881 return newNode( idx, h );
3886 uchar* SparseMat::ptr(int i0, int i1, int i2, bool createMissing, size_t* hashval)
3888 CV_Assert( hdr && hdr->dims == 3 );
3889 size_t h = hashval ? *hashval : hash(i0, i1, i2);
3890 size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx];
3891 uchar* pool = &hdr->pool[0];
3894 Node* elem = (Node*)(pool + nidx);
3895 if( elem->hashval == h && elem->idx[0] == i0 &&
3896 elem->idx[1] == i1 && elem->idx[2] == i2 )
3897 return &value<uchar>(elem);
3903 int idx[] = { i0, i1, i2 };
3904 return newNode( idx, h );
3909 uchar* SparseMat::ptr(const int* idx, bool createMissing, size_t* hashval)
3912 int i, d = hdr->dims;
3913 size_t h = hashval ? *hashval : hash(idx);
3914 size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx];
3915 uchar* pool = &hdr->pool[0];
3918 Node* elem = (Node*)(pool + nidx);
3919 if( elem->hashval == h )
3921 for( i = 0; i < d; i++ )
3922 if( elem->idx[i] != idx[i] )
3925 return &value<uchar>(elem);
3930 return createMissing ? newNode(idx, h) : 0;
3933 void SparseMat::erase(int i0, int i1, size_t* hashval)
3935 CV_Assert( hdr && hdr->dims == 2 );
3936 size_t h = hashval ? *hashval : hash(i0, i1);
3937 size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx], previdx=0;
3938 uchar* pool = &hdr->pool[0];
3941 Node* elem = (Node*)(pool + nidx);
3942 if( elem->hashval == h && elem->idx[0] == i0 && elem->idx[1] == i1 )
3949 removeNode(hidx, nidx, previdx);
3952 void SparseMat::erase(int i0, int i1, int i2, size_t* hashval)
3954 CV_Assert( hdr && hdr->dims == 3 );
3955 size_t h = hashval ? *hashval : hash(i0, i1, i2);
3956 size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx], previdx=0;
3957 uchar* pool = &hdr->pool[0];
3960 Node* elem = (Node*)(pool + nidx);
3961 if( elem->hashval == h && elem->idx[0] == i0 &&
3962 elem->idx[1] == i1 && elem->idx[2] == i2 )
3969 removeNode(hidx, nidx, previdx);
3972 void SparseMat::erase(const int* idx, size_t* hashval)
3975 int i, d = hdr->dims;
3976 size_t h = hashval ? *hashval : hash(idx);
3977 size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx], previdx=0;
3978 uchar* pool = &hdr->pool[0];
3981 Node* elem = (Node*)(pool + nidx);
3982 if( elem->hashval == h )
3984 for( i = 0; i < d; i++ )
3985 if( elem->idx[i] != idx[i] )
3995 removeNode(hidx, nidx, previdx);
3998 void SparseMat::resizeHashTab(size_t newsize)
4000 newsize = std::max(newsize, (size_t)8);
4001 if((newsize & (newsize-1)) != 0)
4002 newsize = (size_t)1 << cvCeil(std::log((double)newsize)/CV_LOG2);
4004 size_t i, hsize = hdr->hashtab.size();
4005 vector<size_t> _newh(newsize);
4006 size_t* newh = &_newh[0];
4007 for( i = 0; i < newsize; i++ )
4009 uchar* pool = &hdr->pool[0];
4010 for( i = 0; i < hsize; i++ )
4012 size_t nidx = hdr->hashtab[i];
4015 Node* elem = (Node*)(pool + nidx);
4016 size_t next = elem->next;
4017 size_t newhidx = elem->hashval & (newsize - 1);
4018 elem->next = newh[newhidx];
4019 newh[newhidx] = nidx;
4023 hdr->hashtab = _newh;
4026 uchar* SparseMat::newNode(const int* idx, size_t hashval)
4028 const int HASH_MAX_FILL_FACTOR=3;
4030 size_t hsize = hdr->hashtab.size();
4031 if( ++hdr->nodeCount > hsize*HASH_MAX_FILL_FACTOR )
4033 resizeHashTab(std::max(hsize*2, (size_t)8));
4034 hsize = hdr->hashtab.size();
4037 if( !hdr->freeList )
4039 size_t i, nsz = hdr->nodeSize, psize = hdr->pool.size(),
4040 newpsize = std::max(psize*2, 8*nsz);
4041 hdr->pool.resize(newpsize);
4042 uchar* pool = &hdr->pool[0];
4043 hdr->freeList = std::max(psize, nsz);
4044 for( i = hdr->freeList; i < newpsize - nsz; i += nsz )
4045 ((Node*)(pool + i))->next = i + nsz;
4046 ((Node*)(pool + i))->next = 0;
4048 size_t nidx = hdr->freeList;
4049 Node* elem = (Node*)&hdr->pool[nidx];
4050 hdr->freeList = elem->next;
4051 elem->hashval = hashval;
4052 size_t hidx = hashval & (hsize - 1);
4053 elem->next = hdr->hashtab[hidx];
4054 hdr->hashtab[hidx] = nidx;
4056 int i, d = hdr->dims;
4057 for( i = 0; i < d; i++ )
4058 elem->idx[i] = idx[i];
4059 size_t esz = elemSize();
4060 uchar* p = &value<uchar>(elem);
4061 if( esz == sizeof(float) )
4063 else if( esz == sizeof(double) )
4072 void SparseMat::removeNode(size_t hidx, size_t nidx, size_t previdx)
4074 Node* n = node(nidx);
4077 Node* prev = node(previdx);
4078 prev->next = n->next;
4081 hdr->hashtab[hidx] = n->next;
4082 n->next = hdr->freeList;
4083 hdr->freeList = nidx;
4088 SparseMatConstIterator::SparseMatConstIterator(const SparseMat* _m)
4089 : m((SparseMat*)_m), hashidx(0), ptr(0)
4093 SparseMat::Hdr& hdr = *m->hdr;
4094 const vector<size_t>& htab = hdr.hashtab;
4095 size_t i, hsize = htab.size();
4096 for( i = 0; i < hsize; i++ )
4098 size_t nidx = htab[i];
4102 ptr = &hdr.pool[nidx] + hdr.valueOffset;
4108 SparseMatConstIterator& SparseMatConstIterator::operator ++()
4110 if( !ptr || !m || !m->hdr )
4112 SparseMat::Hdr& hdr = *m->hdr;
4113 size_t next = ((const SparseMat::Node*)(ptr - hdr.valueOffset))->next;
4116 ptr = &hdr.pool[next] + hdr.valueOffset;
4119 size_t i = hashidx + 1, sz = hdr.hashtab.size();
4120 for( ; i < sz; i++ )
4122 size_t nidx = hdr.hashtab[i];
4126 ptr = &hdr.pool[nidx] + hdr.valueOffset;
4136 double norm( const SparseMat& src, int normType )
4138 SparseMatConstIterator it = src.begin();
4140 size_t i, N = src.nzcount();
4141 normType &= NORM_TYPE_MASK;
4142 int type = src.type();
4145 CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 );
4147 if( type == CV_32F )
4149 if( normType == NORM_INF )
4150 for( i = 0; i < N; i++, ++it )
4151 result = std::max(result, std::abs((double)*(const float*)it.ptr));
4152 else if( normType == NORM_L1 )
4153 for( i = 0; i < N; i++, ++it )
4154 result += std::abs(*(const float*)it.ptr);
4156 for( i = 0; i < N; i++, ++it )
4158 double v = *(const float*)it.ptr;
4162 else if( type == CV_64F )
4164 if( normType == NORM_INF )
4165 for( i = 0; i < N; i++, ++it )
4166 result = std::max(result, std::abs(*(const double*)it.ptr));
4167 else if( normType == NORM_L1 )
4168 for( i = 0; i < N; i++, ++it )
4169 result += std::abs(*(const double*)it.ptr);
4171 for( i = 0; i < N; i++, ++it )
4173 double v = *(const double*)it.ptr;
4178 CV_Error( CV_StsUnsupportedFormat, "Only 32f and 64f are supported" );
4180 if( normType == NORM_L2 )
4181 result = std::sqrt(result);
4185 void minMaxLoc( const SparseMat& src, double* _minval, double* _maxval, int* _minidx, int* _maxidx )
4187 SparseMatConstIterator it = src.begin();
4188 size_t i, N = src.nzcount(), d = src.hdr ? src.hdr->dims : 0;
4189 int type = src.type();
4190 const int *minidx = 0, *maxidx = 0;
4192 if( type == CV_32F )
4194 float minval = FLT_MAX, maxval = -FLT_MAX;
4195 for( i = 0; i < N; i++, ++it )
4197 float v = *(const float*)it.ptr;
4201 minidx = it.node()->idx;
4206 maxidx = it.node()->idx;
4214 else if( type == CV_64F )
4216 double minval = DBL_MAX, maxval = -DBL_MAX;
4217 for( i = 0; i < N; i++, ++it )
4219 double v = *(const double*)it.ptr;
4223 minidx = it.node()->idx;
4228 maxidx = it.node()->idx;
4237 CV_Error( CV_StsUnsupportedFormat, "Only 32f and 64f are supported" );
4240 for( i = 0; i < d; i++ )
4241 _minidx[i] = minidx[i];
4243 for( i = 0; i < d; i++ )
4244 _maxidx[i] = maxidx[i];
4248 void normalize( const SparseMat& src, SparseMat& dst, double a, int norm_type )
4251 if( norm_type == CV_L2 || norm_type == CV_L1 || norm_type == CV_C )
4253 scale = norm( src, norm_type );
4254 scale = scale > DBL_EPSILON ? a/scale : 0.;
4257 CV_Error( CV_StsBadArg, "Unknown/unsupported norm type" );
4259 src.convertTo( dst, -1, scale );
4262 ////////////////////// RotatedRect //////////////////////
4264 void RotatedRect::points(Point2f pt[]) const
4266 double _angle = angle*CV_PI/180.;
4267 float b = (float)cos(_angle)*0.5f;
4268 float a = (float)sin(_angle)*0.5f;
4270 pt[0].x = center.x - a*size.height - b*size.width;
4271 pt[0].y = center.y + b*size.height - a*size.width;
4272 pt[1].x = center.x + a*size.height - b*size.width;
4273 pt[1].y = center.y - b*size.height - a*size.width;
4274 pt[2].x = 2*center.x - pt[0].x;
4275 pt[2].y = 2*center.y - pt[0].y;
4276 pt[3].x = 2*center.x - pt[1].x;
4277 pt[3].y = 2*center.y - pt[1].y;
4280 Rect RotatedRect::boundingRect() const
4284 Rect r(cvFloor(min(min(min(pt[0].x, pt[1].x), pt[2].x), pt[3].x)),
4285 cvFloor(min(min(min(pt[0].y, pt[1].y), pt[2].y), pt[3].y)),
4286 cvCeil(max(max(max(pt[0].x, pt[1].x), pt[2].x), pt[3].x)),
4287 cvCeil(max(max(max(pt[0].y, pt[1].y), pt[2].y), pt[3].y)));
4289 r.height -= r.y - 1;