1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #include "precomp.hpp"
44 #include "opencv2/core/gpumat.hpp"
45 #include "opencv2/core/opengl_interop.hpp"
47 /****************************************************************************************\
48 * [scaled] Identity matrix initialization *
49 \****************************************************************************************/
53 void swap( Mat& a, Mat& b )
55 std::swap(a.flags, b.flags);
56 std::swap(a.dims, b.dims);
57 std::swap(a.rows, b.rows);
58 std::swap(a.cols, b.cols);
59 std::swap(a.data, b.data);
60 std::swap(a.refcount, b.refcount);
61 std::swap(a.datastart, b.datastart);
62 std::swap(a.dataend, b.dataend);
63 std::swap(a.datalimit, b.datalimit);
64 std::swap(a.allocator, b.allocator);
66 std::swap(a.size.p, b.size.p);
67 std::swap(a.step.p, b.step.p);
68 std::swap(a.step.buf[0], b.step.buf[0]);
69 std::swap(a.step.buf[1], b.step.buf[1]);
71 if( a.step.p == b.step.buf )
73 a.step.p = a.step.buf;
77 if( b.step.p == a.step.buf )
79 b.step.p = b.step.buf;
85 static inline void setSize( Mat& m, int _dims, const int* _sz,
86 const size_t* _steps, bool autoSteps=false )
88 CV_Assert( 0 <= _dims && _dims <= CV_MAX_DIM );
91 if( m.step.p != m.step.buf )
94 m.step.p = m.step.buf;
99 m.step.p = (size_t*)fastMalloc(_dims*sizeof(m.step.p[0]) + (_dims+1)*sizeof(m.size.p[0]));
100 m.size.p = (int*)(m.step.p + _dims) + 1;
101 m.size.p[-1] = _dims;
102 m.rows = m.cols = -1;
110 size_t esz = CV_ELEM_SIZE(m.flags), total = esz;
112 for( i = _dims-1; i >= 0; i-- )
119 m.step.p[i] = i < _dims-1 ? _steps[i] : esz;
123 int64 total1 = (int64)total*s;
124 if( (uint64)total1 != (size_t)total1 )
125 CV_Error( CV_StsOutOfRange, "The total matrix size does not fit to \"size_t\" type" );
126 total = (size_t)total1;
138 static void updateContinuityFlag(Mat& m)
141 for( i = 0; i < m.dims; i++ )
147 for( j = m.dims-1; j > i; j-- )
149 if( m.step[j]*m.size[j] < m.step[j-1] )
153 uint64 t = (uint64)m.step[0]*m.size[0];
154 if( j <= i && t == (size_t)t )
155 m.flags |= Mat::CONTINUOUS_FLAG;
157 m.flags &= ~Mat::CONTINUOUS_FLAG;
160 static void finalizeHdr(Mat& m)
162 updateContinuityFlag(m);
165 m.rows = m.cols = -1;
168 m.datalimit = m.datastart + m.size[0]*m.step[0];
171 m.dataend = m.data + m.size[d-1]*m.step[d-1];
172 for( int i = 0; i < d-1; i++ )
173 m.dataend += (m.size[i] - 1)*m.step[i];
176 m.dataend = m.datalimit;
179 m.dataend = m.datalimit = 0;
183 void Mat::create(int d, const int* _sizes, int _type)
186 CV_Assert(0 <= d && _sizes && d <= CV_MAX_DIM && _sizes);
187 _type = CV_MAT_TYPE(_type);
189 if( data && (d == dims || (d == 1 && dims <= 2)) && _type == type() )
191 if( d == 2 && rows == _sizes[0] && cols == _sizes[1] )
193 for( i = 0; i < d; i++ )
194 if( size[i] != _sizes[i] )
196 if( i == d && (d > 1 || size[1] == 1))
203 flags = (_type & CV_MAT_TYPE_MASK) | MAGIC_VAL;
204 setSize(*this, d, _sizes, 0, true);
209 if( !allocator || allocator == tegra::getAllocator() ) allocator = tegra::getAllocator(d, _sizes, _type);
213 size_t totalsize = alignSize(step.p[0]*size.p[0], (int)sizeof(*refcount));
214 data = datastart = (uchar*)fastMalloc(totalsize + (int)sizeof(*refcount));
215 refcount = (int*)(data + totalsize);
223 allocator->allocate(dims, size, _type, refcount, datastart, data, step.p);
224 CV_Assert( step[dims-1] == (size_t)CV_ELEM_SIZE(flags) );
228 size_t totalSize = alignSize(step.p[0]*size.p[0], (int)sizeof(*refcount));
229 data = datastart = (uchar*)fastMalloc(totalSize + (int)sizeof(*refcount));
230 refcount = (int*)(data + totalSize);
234 allocator->allocate(dims, size, _type, refcount, datastart, data, step.p);
235 CV_Assert( step[dims-1] == (size_t)CV_ELEM_SIZE(flags) );
243 void Mat::copySize(const Mat& m)
245 setSize(*this, m.dims, 0, 0);
246 for( int i = 0; i < dims; i++ )
253 void Mat::deallocate()
256 allocator->deallocate(refcount, datastart, data);
259 CV_DbgAssert(refcount != 0);
265 Mat::Mat(const Mat& m, const Range& _rowRange, const Range& _colRange) : size(&rows)
268 CV_Assert( m.dims >= 2 );
271 AutoBuffer<Range> rs(m.dims);
274 for( int i = 2; i < m.dims; i++ )
275 rs[i] = Range::all();
281 if( _rowRange != Range::all() && _rowRange != Range(0,rows) )
283 CV_Assert( 0 <= _rowRange.start && _rowRange.start <= _rowRange.end && _rowRange.end <= m.rows );
284 rows = _rowRange.size();
285 data += step*_rowRange.start;
286 flags |= SUBMATRIX_FLAG;
289 if( _colRange != Range::all() && _colRange != Range(0,cols) )
291 CV_Assert( 0 <= _colRange.start && _colRange.start <= _colRange.end && _colRange.end <= m.cols );
292 cols = _colRange.size();
293 data += _colRange.start*elemSize();
294 flags &= cols < m.cols ? ~CONTINUOUS_FLAG : -1;
295 flags |= SUBMATRIX_FLAG;
299 flags |= CONTINUOUS_FLAG;
301 if( rows <= 0 || cols <= 0 )
309 Mat::Mat(const Mat& m, const Rect& roi)
310 : flags(m.flags), dims(2), rows(roi.height), cols(roi.width),
311 data(m.data + roi.y*m.step[0]), refcount(m.refcount),
312 datastart(m.datastart), dataend(m.dataend), datalimit(m.datalimit),
313 allocator(m.allocator), size(&rows)
315 CV_Assert( m.dims <= 2 );
316 flags &= roi.width < m.cols ? ~CONTINUOUS_FLAG : -1;
317 flags |= roi.height == 1 ? CONTINUOUS_FLAG : 0;
319 size_t esz = CV_ELEM_SIZE(flags);
321 CV_Assert( 0 <= roi.x && 0 <= roi.width && roi.x + roi.width <= m.cols &&
322 0 <= roi.y && 0 <= roi.height && roi.y + roi.height <= m.rows );
324 CV_XADD(refcount, 1);
325 if( roi.width < m.cols || roi.height < m.rows )
326 flags |= SUBMATRIX_FLAG;
328 step[0] = m.step[0]; step[1] = esz;
330 if( rows <= 0 || cols <= 0 )
338 Mat::Mat(int _dims, const int* _sizes, int _type, void* _data, const size_t* _steps) : size(&rows)
341 flags |= CV_MAT_TYPE(_type);
342 data = datastart = (uchar*)_data;
343 setSize(*this, _dims, _sizes, _steps, true);
348 Mat::Mat(const Mat& m, const Range* ranges) : size(&rows)
354 for( i = 0; i < d; i++ )
357 CV_Assert( r == Range::all() || (0 <= r.start && r.start < r.end && r.end <= m.size[i]) );
360 for( i = 0; i < d; i++ )
363 if( r != Range::all() && r != Range(0, size.p[i]))
365 size.p[i] = r.end - r.start;
366 data += r.start*step.p[i];
367 flags |= SUBMATRIX_FLAG;
370 updateContinuityFlag(*this);
374 Mat::Mat(const CvMatND* m, bool copyData) : size(&rows)
379 data = datastart = m->data.ptr;
380 flags |= CV_MAT_TYPE(m->type);
381 int _sizes[CV_MAX_DIM];
382 size_t _steps[CV_MAX_DIM];
385 for( i = 0; i < d; i++ )
387 _sizes[i] = m->dim[i].size;
388 _steps[i] = m->dim[i].step;
391 setSize(*this, d, _sizes, _steps);
402 Mat Mat::diag(int d) const
404 CV_Assert( dims <= 2 );
406 size_t esz = elemSize();
411 len = std::min(cols - d, rows);
416 len = std::min(rows + d, cols);
419 CV_DbgAssert( len > 0 );
421 m.size[0] = m.rows = len;
422 m.size[1] = m.cols = 1;
423 m.step[0] += (len > 1 ? esz : 0);
426 m.flags &= ~CONTINUOUS_FLAG;
428 m.flags |= CONTINUOUS_FLAG;
430 if( size() != Size(1,1) )
431 m.flags |= SUBMATRIX_FLAG;
437 Mat::Mat(const CvMat* m, bool copyData) : size(&rows)
446 flags = MAGIC_VAL + (m->type & (CV_MAT_TYPE_MASK|CV_MAT_CONT_FLAG));
450 data = datastart = m->data.ptr;
451 size_t esz = CV_ELEM_SIZE(m->type), minstep = cols*esz, _step = m->step;
454 datalimit = datastart + _step*rows;
455 dataend = datalimit - _step + minstep;
456 step[0] = _step; step[1] = esz;
460 data = datastart = dataend = 0;
461 Mat(m->rows, m->cols, m->type, m->data.ptr, m->step).copyTo(*this);
466 Mat::Mat(const IplImage* img, bool copyData) : size(&rows)
474 CV_DbgAssert(CV_IS_IMAGE(img) && img->imageData != 0);
476 int imgdepth = IPL2CV_DEPTH(img->depth);
478 step[0] = img->widthStep;
482 CV_Assert(img->dataOrder == IPL_DATA_ORDER_PIXEL);
483 flags = MAGIC_VAL + CV_MAKETYPE(imgdepth, img->nChannels);
484 rows = img->height; cols = img->width;
485 datastart = data = (uchar*)img->imageData;
486 esz = CV_ELEM_SIZE(flags);
490 CV_Assert(img->dataOrder == IPL_DATA_ORDER_PIXEL || img->roi->coi != 0);
491 bool selectedPlane = img->roi->coi && img->dataOrder == IPL_DATA_ORDER_PLANE;
492 flags = MAGIC_VAL + CV_MAKETYPE(imgdepth, selectedPlane ? 1 : img->nChannels);
493 rows = img->roi->height; cols = img->roi->width;
494 esz = CV_ELEM_SIZE(flags);
495 data = datastart = (uchar*)img->imageData +
496 (selectedPlane ? (img->roi->coi - 1)*step*img->height : 0) +
497 img->roi->yOffset*step[0] + img->roi->xOffset*esz;
499 datalimit = datastart + step.p[0]*rows;
500 dataend = datastart + step.p[0]*(rows-1) + esz*cols;
501 flags |= (cols*esz == step.p[0] || rows == 1 ? CONTINUOUS_FLAG : 0);
508 if( !img->roi || !img->roi->coi ||
509 img->dataOrder == IPL_DATA_ORDER_PLANE)
513 int ch[] = {img->roi->coi - 1, 0};
514 create(m.rows, m.cols, m.type());
515 mixChannels(&m, 1, this, 1, ch, 1);
521 Mat::operator IplImage() const
523 CV_Assert( dims <= 2 );
525 cvInitImageHeader(&img, size(), cvIplDepth(flags), channels());
526 cvSetData(&img, data, (int)step[0]);
531 void Mat::pop_back(size_t nelems)
533 CV_Assert( nelems <= (size_t)size.p[0] );
536 *this = rowRange(0, size.p[0] - (int)nelems);
539 size.p[0] -= (int)nelems;
540 dataend -= nelems*step.p[0];
541 /*if( size.p[0] <= 1 )
544 flags |= CONTINUOUS_FLAG;
546 updateContinuityFlag(*this);
552 void Mat::push_back_(const void* elem)
555 if( isSubmatrix() || dataend + step.p[0] > datalimit )
556 reserve( std::max(r + 1, (r*3+1)/2) );
558 size_t esz = elemSize();
559 memcpy(data + r*step.p[0], elem, esz);
561 dataend += step.p[0];
562 if( esz < step.p[0] )
563 flags &= ~CONTINUOUS_FLAG;
566 void Mat::reserve(size_t nelems)
568 const size_t MIN_SIZE = 64;
570 CV_Assert( (int)nelems >= 0 );
571 if( !isSubmatrix() && data + step.p[0]*nelems <= datalimit )
576 if( (size_t)r >= nelems )
579 size.p[0] = std::max((int)nelems, 1);
580 size_t newsize = total()*elemSize();
582 if( newsize < MIN_SIZE )
583 size.p[0] = (int)((MIN_SIZE + newsize - 1)*nelems/newsize);
585 Mat m(dims, size.p, type());
589 Mat mpart = m.rowRange(0, r);
595 dataend = data + step.p[0]*r;
599 void Mat::resize(size_t nelems)
601 int saveRows = size.p[0];
602 if( saveRows == (int)nelems )
604 CV_Assert( (int)nelems >= 0 );
606 if( isSubmatrix() || data + step.p[0]*nelems > datalimit )
609 size.p[0] = (int)nelems;
610 dataend += (size.p[0] - saveRows)*step.p[0];
612 //updateContinuityFlag(*this);
616 void Mat::resize(size_t nelems, const Scalar& s)
618 int saveRows = size.p[0];
621 if( size.p[0] > saveRows )
623 Mat part = rowRange(saveRows, size.p[0]);
628 void Mat::push_back(const Mat& elems)
630 int r = size.p[0], delta = elems.size.p[0];
641 *this = elems.clone();
645 size.p[0] = elems.size.p[0];
646 bool eq = size == elems.size;
649 CV_Error(CV_StsUnmatchedSizes, "");
650 if( type() != elems.type() )
651 CV_Error(CV_StsUnmatchedFormats, "");
653 if( isSubmatrix() || dataend + step.p[0]*delta > datalimit )
654 reserve( std::max(r + delta, (r*3+1)/2) );
657 dataend += step.p[0]*delta;
659 //updateContinuityFlag(*this);
661 if( isContinuous() && elems.isContinuous() )
662 memcpy(data + r*step.p[0], elems.data, elems.total()*elems.elemSize());
665 Mat part = rowRange(r, r + delta);
671 Mat cvarrToMat(const CvArr* arr, bool copyData,
672 bool /*allowND*/, int coiMode)
677 return Mat((const CvMat*)arr, copyData );
678 if( CV_IS_MATND(arr) )
679 return Mat((const CvMatND*)arr, copyData );
680 if( CV_IS_IMAGE(arr) )
682 const IplImage* iplimg = (const IplImage*)arr;
683 if( coiMode == 0 && iplimg->roi && iplimg->roi->coi > 0 )
684 CV_Error(CV_BadCOI, "COI is not supported by the function");
685 return Mat(iplimg, copyData);
689 CvSeq* seq = (CvSeq*)arr;
690 CV_Assert(seq->total > 0 && CV_ELEM_SIZE(seq->flags) == seq->elem_size);
691 if(!copyData && seq->first->next == seq->first)
692 return Mat(seq->total, 1, CV_MAT_TYPE(seq->flags), seq->first->data);
693 Mat buf(seq->total, 1, CV_MAT_TYPE(seq->flags));
694 cvCvtSeqToArray(seq, buf.data, CV_WHOLE_SEQ);
697 CV_Error(CV_StsBadArg, "Unknown array type");
701 void Mat::locateROI( Size& wholeSize, Point& ofs ) const
703 CV_Assert( dims <= 2 && step[0] > 0 );
704 size_t esz = elemSize(), minstep;
705 ptrdiff_t delta1 = data - datastart, delta2 = dataend - datastart;
711 ofs.y = (int)(delta1/step[0]);
712 ofs.x = (int)((delta1 - step[0]*ofs.y)/esz);
713 CV_DbgAssert( data == datastart + ofs.y*step[0] + ofs.x*esz );
715 minstep = (ofs.x + cols)*esz;
716 wholeSize.height = (int)((delta2 - minstep)/step[0] + 1);
717 wholeSize.height = std::max(wholeSize.height, ofs.y + rows);
718 wholeSize.width = (int)((delta2 - step*(wholeSize.height-1))/esz);
719 wholeSize.width = std::max(wholeSize.width, ofs.x + cols);
722 Mat& Mat::adjustROI( int dtop, int dbottom, int dleft, int dright )
724 CV_Assert( dims <= 2 && step[0] > 0 );
725 Size wholeSize; Point ofs;
726 size_t esz = elemSize();
727 locateROI( wholeSize, ofs );
728 int row1 = std::max(ofs.y - dtop, 0), row2 = std::min(ofs.y + rows + dbottom, wholeSize.height);
729 int col1 = std::max(ofs.x - dleft, 0), col2 = std::min(ofs.x + cols + dright, wholeSize.width);
730 data += (row1 - ofs.y)*step + (col1 - ofs.x)*esz;
731 rows = row2 - row1; cols = col2 - col1;
732 size.p[0] = rows; size.p[1] = cols;
733 if( esz*cols == step[0] || rows == 1 )
734 flags |= CONTINUOUS_FLAG;
736 flags &= ~CONTINUOUS_FLAG;
742 void cv::extractImageCOI(const CvArr* arr, OutputArray _ch, int coi)
744 Mat mat = cvarrToMat(arr, false, true, 1);
745 _ch.create(mat.dims, mat.size, mat.depth());
746 Mat ch = _ch.getMat();
749 CV_Assert( CV_IS_IMAGE(arr) );
750 coi = cvGetImageCOI((const IplImage*)arr)-1;
752 CV_Assert(0 <= coi && coi < mat.channels());
753 int _pairs[] = { coi, 0 };
754 mixChannels( &mat, 1, &ch, 1, _pairs, 1 );
757 void cv::insertImageCOI(InputArray _ch, CvArr* arr, int coi)
759 Mat ch = _ch.getMat(), mat = cvarrToMat(arr, false, true, 1);
762 CV_Assert( CV_IS_IMAGE(arr) );
763 coi = cvGetImageCOI((const IplImage*)arr)-1;
765 CV_Assert(ch.size == mat.size && ch.depth() == mat.depth() && 0 <= coi && coi < mat.channels());
766 int _pairs[] = { 0, coi };
767 mixChannels( &ch, 1, &mat, 1, _pairs, 1 );
773 Mat Mat::reshape(int new_cn, int new_rows) const
778 if( dims > 2 && new_rows == 0 && new_cn != 0 && size[dims-1]*cn % new_cn == 0 )
780 hdr.flags = (hdr.flags & ~CV_MAT_CN_MASK) | ((new_cn-1) << CV_CN_SHIFT);
781 hdr.step[dims-1] = CV_ELEM_SIZE(hdr.flags);
782 hdr.size[dims-1] = hdr.size[dims-1]*cn / new_cn;
786 CV_Assert( dims <= 2 );
791 int total_width = cols * cn;
793 if( (new_cn > total_width || total_width % new_cn != 0) && new_rows == 0 )
794 new_rows = rows * total_width / new_cn;
796 if( new_rows != 0 && new_rows != rows )
798 int total_size = total_width * rows;
799 if( !isContinuous() )
800 CV_Error( CV_BadStep,
801 "The matrix is not continuous, thus its number of rows can not be changed" );
803 if( (unsigned)new_rows > (unsigned)total_size )
804 CV_Error( CV_StsOutOfRange, "Bad new number of rows" );
806 total_width = total_size / new_rows;
808 if( total_width * new_rows != total_size )
809 CV_Error( CV_StsBadArg, "The total number of matrix elements "
810 "is not divisible by the new number of rows" );
813 hdr.step[0] = total_width * elemSize1();
816 int new_width = total_width / new_cn;
818 if( new_width * new_cn != total_width )
819 CV_Error( CV_BadNumChannels,
820 "The total width is not divisible by the new number of channels" );
822 hdr.cols = new_width;
823 hdr.flags = (hdr.flags & ~CV_MAT_CN_MASK) | ((new_cn-1) << CV_CN_SHIFT);
824 hdr.step[1] = CV_ELEM_SIZE(hdr.flags);
829 int Mat::checkVector(int _elemChannels, int _depth, bool _requireContinuous) const
831 return (depth() == _depth || _depth <= 0) &&
832 (isContinuous() || !_requireContinuous) &&
833 ((dims == 2 && (((rows == 1 || cols == 1) && channels() == _elemChannels) || (cols == _elemChannels))) ||
834 (dims == 3 && channels() == 1 && size.p[2] == _elemChannels && (size.p[0] == 1 || size.p[1] == 1) &&
835 (isContinuous() || step.p[1] == step.p[2]*size.p[2])))
836 ? (int)(total()*channels()/_elemChannels) : -1;
840 void scalarToRawData(const Scalar& s, void* _buf, int type, int unroll_to)
842 int i, depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
848 uchar* buf = (uchar*)_buf;
849 for(i = 0; i < cn; i++)
850 buf[i] = saturate_cast<uchar>(s.val[i]);
851 for(; i < unroll_to; i++)
857 schar* buf = (schar*)_buf;
858 for(i = 0; i < cn; i++)
859 buf[i] = saturate_cast<schar>(s.val[i]);
860 for(; i < unroll_to; i++)
866 ushort* buf = (ushort*)_buf;
867 for(i = 0; i < cn; i++)
868 buf[i] = saturate_cast<ushort>(s.val[i]);
869 for(; i < unroll_to; i++)
875 short* buf = (short*)_buf;
876 for(i = 0; i < cn; i++)
877 buf[i] = saturate_cast<short>(s.val[i]);
878 for(; i < unroll_to; i++)
884 int* buf = (int*)_buf;
885 for(i = 0; i < cn; i++)
886 buf[i] = saturate_cast<int>(s.val[i]);
887 for(; i < unroll_to; i++)
893 float* buf = (float*)_buf;
894 for(i = 0; i < cn; i++)
895 buf[i] = saturate_cast<float>(s.val[i]);
896 for(; i < unroll_to; i++)
902 double* buf = (double*)_buf;
903 for(i = 0; i < cn; i++)
904 buf[i] = saturate_cast<double>(s.val[i]);
905 for(; i < unroll_to; i++)
910 CV_Error(CV_StsUnsupportedFormat,"");
915 /*************************************************************************************************\
917 \*************************************************************************************************/
919 _InputArray::_InputArray() : flags(0), obj(0) {}
920 _InputArray::~_InputArray() {}
921 _InputArray::_InputArray(const Mat& m) : flags(MAT), obj((void*)&m) {}
922 _InputArray::_InputArray(const vector<Mat>& vec) : flags(STD_VECTOR_MAT), obj((void*)&vec) {}
923 _InputArray::_InputArray(const double& val) : flags(FIXED_TYPE + FIXED_SIZE + MATX + CV_64F), obj((void*)&val), sz(Size(1,1)) {}
924 _InputArray::_InputArray(const MatExpr& expr) : flags(FIXED_TYPE + FIXED_SIZE + EXPR), obj((void*)&expr) {}
925 _InputArray::_InputArray(const GlBuffer& buf) : flags(FIXED_TYPE + FIXED_SIZE + OPENGL_BUFFER), obj((void*)&buf) {}
926 _InputArray::_InputArray(const GlTexture& tex) : flags(FIXED_TYPE + FIXED_SIZE + OPENGL_TEXTURE), obj((void*)&tex) {}
927 _InputArray::_InputArray(const gpu::GpuMat& d_mat) : flags(GPU_MAT), obj((void*)&d_mat) {}
929 Mat _InputArray::getMat(int i) const
935 const Mat* m = (const Mat*)obj;
944 return (Mat)*((const MatExpr*)obj);
950 return Mat(sz, flags, obj);
953 if( k == STD_VECTOR )
956 int t = CV_MAT_TYPE(flags);
957 const vector<uchar>& v = *(const vector<uchar>*)obj;
959 return !v.empty() ? Mat(size(), t, (void*)&v[0]) : Mat();
965 if( k == STD_VECTOR_VECTOR )
968 const vector<vector<uchar> >& vv = *(const vector<vector<uchar> >*)obj;
969 CV_Assert( 0 <= i && i < (int)vv.size() );
970 const vector<uchar>& v = vv[i];
972 return !v.empty() ? Mat(size(i), t, (void*)&v[0]) : Mat();
975 CV_Assert( k == STD_VECTOR_MAT );
976 //if( k == STD_VECTOR_MAT )
978 const vector<Mat>& v = *(const vector<Mat>*)obj;
979 CV_Assert( 0 <= i && i < (int)v.size() );
986 void _InputArray::getMatVector(vector<Mat>& mv) const
992 const Mat& m = *(const Mat*)obj;
993 int i, n = (int)m.size[0];
996 for( i = 0; i < n; i++ )
997 mv[i] = m.dims == 2 ? Mat(1, m.cols, m.type(), (void*)m.ptr(i)) :
998 Mat(m.dims-1, &m.size[1], m.type(), (void*)m.ptr(i), &m.step[1]);
1004 Mat m = *(const MatExpr*)obj;
1005 int i, n = m.size[0];
1008 for( i = 0; i < n; i++ )
1015 size_t i, n = sz.height, esz = CV_ELEM_SIZE(flags);
1018 for( i = 0; i < n; i++ )
1019 mv[i] = Mat(1, sz.width, CV_MAT_TYPE(flags), (uchar*)obj + esz*sz.width*i);
1023 if( k == STD_VECTOR )
1025 const vector<uchar>& v = *(const vector<uchar>*)obj;
1027 size_t i, n = v.size(), esz = CV_ELEM_SIZE(flags);
1028 int t = CV_MAT_DEPTH(flags), cn = CV_MAT_CN(flags);
1031 for( i = 0; i < n; i++ )
1032 mv[i] = Mat(1, cn, t, (void*)(&v[0] + esz*i));
1042 if( k == STD_VECTOR_VECTOR )
1044 const vector<vector<uchar> >& vv = *(const vector<vector<uchar> >*)obj;
1045 int i, n = (int)vv.size();
1046 int t = CV_MAT_TYPE(flags);
1049 for( i = 0; i < n; i++ )
1051 const vector<uchar>& v = vv[i];
1052 mv[i] = Mat(size(i), t, (void*)&v[0]);
1057 CV_Assert( k == STD_VECTOR_MAT );
1058 //if( k == STD_VECTOR_MAT )
1060 const vector<Mat>& v = *(const vector<Mat>*)obj;
1061 mv.resize(v.size());
1062 std::copy(v.begin(), v.end(), mv.begin());
1067 GlBuffer _InputArray::getGlBuffer() const
1071 CV_Assert(k == OPENGL_BUFFER);
1072 //if( k == OPENGL_BUFFER )
1074 const GlBuffer* buf = (const GlBuffer*)obj;
1079 GlTexture _InputArray::getGlTexture() const
1083 CV_Assert(k == OPENGL_TEXTURE);
1084 //if( k == OPENGL_TEXTURE )
1086 const GlTexture* tex = (const GlTexture*)obj;
1091 gpu::GpuMat _InputArray::getGpuMat() const
1095 CV_Assert(k == GPU_MAT);
1096 //if( k == GPU_MAT )
1098 const gpu::GpuMat* d_mat = (const gpu::GpuMat*)obj;
1103 int _InputArray::kind() const
1105 return flags & KIND_MASK;
1108 Size _InputArray::size(int i) const
1115 return ((const Mat*)obj)->size();
1121 return ((const MatExpr*)obj)->size();
1130 if( k == STD_VECTOR )
1133 const vector<uchar>& v = *(const vector<uchar>*)obj;
1134 const vector<int>& iv = *(const vector<int>*)obj;
1135 size_t szb = v.size(), szi = iv.size();
1136 return szb == szi ? Size((int)szb, 1) : Size((int)(szb/CV_ELEM_SIZE(flags)), 1);
1142 if( k == STD_VECTOR_VECTOR )
1144 const vector<vector<uchar> >& vv = *(const vector<vector<uchar> >*)obj;
1146 return vv.empty() ? Size() : Size((int)vv.size(), 1);
1147 CV_Assert( i < (int)vv.size() );
1148 const vector<vector<int> >& ivv = *(const vector<vector<int> >*)obj;
1150 size_t szb = vv[i].size(), szi = ivv[i].size();
1151 return szb == szi ? Size((int)szb, 1) : Size((int)(szb/CV_ELEM_SIZE(flags)), 1);
1154 if( k == STD_VECTOR_MAT )
1156 const vector<Mat>& vv = *(const vector<Mat>*)obj;
1158 return vv.empty() ? Size() : Size((int)vv.size(), 1);
1159 CV_Assert( i < (int)vv.size() );
1161 return vv[i].size();
1164 if( k == OPENGL_BUFFER )
1167 const GlBuffer* buf = (const GlBuffer*)obj;
1171 if( k == OPENGL_TEXTURE )
1174 const GlTexture* tex = (const GlTexture*)obj;
1178 CV_Assert( k == GPU_MAT );
1179 //if( k == GPU_MAT )
1182 const gpu::GpuMat* d_mat = (const gpu::GpuMat*)obj;
1183 return d_mat->size();
1187 size_t _InputArray::total(int i) const
1189 return size(i).area();
1192 int _InputArray::type(int i) const
1197 return ((const Mat*)obj)->type();
1200 return ((const MatExpr*)obj)->type();
1202 if( k == MATX || k == STD_VECTOR || k == STD_VECTOR_VECTOR )
1203 return CV_MAT_TYPE(flags);
1208 if( k == STD_VECTOR_MAT )
1210 const vector<Mat>& vv = *(const vector<Mat>*)obj;
1211 CV_Assert( i < (int)vv.size() );
1213 return vv[i >= 0 ? i : 0].type();
1216 if( k == OPENGL_BUFFER )
1217 return ((const GlBuffer*)obj)->type();
1219 if( k == OPENGL_TEXTURE )
1220 return ((const GlTexture*)obj)->type();
1222 CV_Assert( k == GPU_MAT );
1223 //if( k == GPU_MAT )
1224 return ((const gpu::GpuMat*)obj)->type();
1227 int _InputArray::depth(int i) const
1229 return CV_MAT_DEPTH(type(i));
1232 int _InputArray::channels(int i) const
1234 return CV_MAT_CN(type(i));
1237 bool _InputArray::empty() const
1242 return ((const Mat*)obj)->empty();
1250 if( k == STD_VECTOR )
1252 const vector<uchar>& v = *(const vector<uchar>*)obj;
1259 if( k == STD_VECTOR_VECTOR )
1261 const vector<vector<uchar> >& vv = *(const vector<vector<uchar> >*)obj;
1265 if( k == STD_VECTOR_MAT )
1267 const vector<Mat>& vv = *(const vector<Mat>*)obj;
1271 if( k == OPENGL_BUFFER )
1272 return ((const GlBuffer*)obj)->empty();
1274 if( k == OPENGL_TEXTURE )
1275 return ((const GlTexture*)obj)->empty();
1277 CV_Assert( k == GPU_MAT );
1278 //if( k == GPU_MAT )
1279 return ((const gpu::GpuMat*)obj)->empty();
1283 _OutputArray::_OutputArray() {}
1284 _OutputArray::~_OutputArray() {}
1285 _OutputArray::_OutputArray(Mat& m) : _InputArray(m) {}
1286 _OutputArray::_OutputArray(vector<Mat>& vec) : _InputArray(vec) {}
1287 _OutputArray::_OutputArray(gpu::GpuMat& d_mat) : _InputArray(d_mat) {}
1289 _OutputArray::_OutputArray(const Mat& m) : _InputArray(m) {flags |= FIXED_SIZE|FIXED_TYPE;}
1290 _OutputArray::_OutputArray(const vector<Mat>& vec) : _InputArray(vec) {flags |= FIXED_SIZE;}
1291 _OutputArray::_OutputArray(const gpu::GpuMat& d_mat) : _InputArray(d_mat) {flags |= FIXED_SIZE|FIXED_TYPE;}
1294 bool _OutputArray::fixedSize() const
1296 return (flags & FIXED_SIZE) == FIXED_SIZE;
1299 bool _OutputArray::fixedType() const
1301 return (flags & FIXED_TYPE) == FIXED_TYPE;
1304 void _OutputArray::create(Size _sz, int mtype, int i, bool allowTransposed, int fixedDepthMask) const
1307 if( k == MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
1309 CV_Assert(!fixedSize() || ((Mat*)obj)->size.operator()() == _sz);
1310 CV_Assert(!fixedType() || ((Mat*)obj)->type() == mtype);
1311 ((Mat*)obj)->create(_sz, mtype);
1314 if( k == GPU_MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
1316 CV_Assert(!fixedSize() || ((gpu::GpuMat*)obj)->size() == _sz);
1317 CV_Assert(!fixedType() || ((gpu::GpuMat*)obj)->type() == mtype);
1318 ((gpu::GpuMat*)obj)->create(_sz, mtype);
1321 int sizes[] = {_sz.height, _sz.width};
1322 create(2, sizes, mtype, i, allowTransposed, fixedDepthMask);
1325 void _OutputArray::create(int rows, int cols, int mtype, int i, bool allowTransposed, int fixedDepthMask) const
1328 if( k == MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
1330 CV_Assert(!fixedSize() || ((Mat*)obj)->size.operator()() == Size(cols, rows));
1331 CV_Assert(!fixedType() || ((Mat*)obj)->type() == mtype);
1332 ((Mat*)obj)->create(rows, cols, mtype);
1335 if( k == GPU_MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
1337 CV_Assert(!fixedSize() || ((gpu::GpuMat*)obj)->size() == Size(cols, rows));
1338 CV_Assert(!fixedType() || ((gpu::GpuMat*)obj)->type() == mtype);
1339 ((gpu::GpuMat*)obj)->create(rows, cols, mtype);
1342 int sizes[] = {rows, cols};
1343 create(2, sizes, mtype, i, allowTransposed, fixedDepthMask);
1346 void _OutputArray::create(int dims, const int* sizes, int mtype, int i, bool allowTransposed, int fixedDepthMask) const
1349 mtype = CV_MAT_TYPE(mtype);
1354 Mat& m = *(Mat*)obj;
1355 if( allowTransposed )
1357 if( !m.isContinuous() )
1359 CV_Assert(!fixedType() && !fixedSize());
1363 if( dims == 2 && m.dims == 2 && m.data &&
1364 m.type() == mtype && m.rows == sizes[1] && m.cols == sizes[0] )
1370 if(CV_MAT_CN(mtype) == m.channels() && ((1 << CV_MAT_TYPE(flags)) & fixedDepthMask) != 0 )
1373 CV_Assert(CV_MAT_TYPE(mtype) == m.type());
1377 CV_Assert(m.dims == dims);
1378 for(int j = 0; j < dims; ++j)
1379 CV_Assert(m.size[j] == sizes[j]);
1381 m.create(dims, sizes, mtype);
1388 int type0 = CV_MAT_TYPE(flags);
1389 CV_Assert( mtype == type0 || (CV_MAT_CN(mtype) == 1 && ((1 << type0) & fixedDepthMask) != 0) );
1390 CV_Assert( dims == 2 && ((sizes[0] == sz.height && sizes[1] == sz.width) ||
1391 (allowTransposed && sizes[0] == sz.width && sizes[1] == sz.height)));
1395 if( k == STD_VECTOR || k == STD_VECTOR_VECTOR )
1397 CV_Assert( dims == 2 && (sizes[0] == 1 || sizes[1] == 1 || sizes[0]*sizes[1] == 0) );
1398 size_t len = sizes[0]*sizes[1] > 0 ? sizes[0] + sizes[1] - 1 : 0;
1399 vector<uchar>* v = (vector<uchar>*)obj;
1401 if( k == STD_VECTOR_VECTOR )
1403 vector<vector<uchar> >& vv = *(vector<vector<uchar> >*)obj;
1406 CV_Assert(!fixedSize() || len == vv.size());
1410 CV_Assert( i < (int)vv.size() );
1416 int type0 = CV_MAT_TYPE(flags);
1417 CV_Assert( mtype == type0 || (CV_MAT_CN(mtype) == CV_MAT_CN(type0) && ((1 << type0) & fixedDepthMask) != 0) );
1419 int esz = CV_ELEM_SIZE(type0);
1420 CV_Assert(!fixedSize() || len == ((vector<uchar>*)v)->size() / esz);
1424 ((vector<uchar>*)v)->resize(len);
1427 ((vector<Vec2b>*)v)->resize(len);
1430 ((vector<Vec3b>*)v)->resize(len);
1433 ((vector<int>*)v)->resize(len);
1436 ((vector<Vec3s>*)v)->resize(len);
1439 ((vector<Vec2i>*)v)->resize(len);
1442 ((vector<Vec3i>*)v)->resize(len);
1445 ((vector<Vec4i>*)v)->resize(len);
1448 ((vector<Vec6i>*)v)->resize(len);
1451 ((vector<Vec8i>*)v)->resize(len);
1454 ((vector<Vec<int, 9> >*)v)->resize(len);
1457 ((vector<Vec<int, 12> >*)v)->resize(len);
1460 ((vector<Vec<int, 16> >*)v)->resize(len);
1463 ((vector<Vec<int, 32> >*)v)->resize(len);
1466 ((vector<Vec<int, 64> >*)v)->resize(len);
1469 ((vector<Vec<int, 128> >*)v)->resize(len);
1472 CV_Error_(CV_StsBadArg, ("Vectors with element size %d are not supported. Please, modify OutputArray::create()\n", esz));
1479 CV_Error(CV_StsNullPtr, "create() called for the missing output array" );
1483 CV_Assert( k == STD_VECTOR_MAT );
1484 //if( k == STD_VECTOR_MAT )
1486 vector<Mat>& v = *(vector<Mat>*)obj;
1490 CV_Assert( dims == 2 && (sizes[0] == 1 || sizes[1] == 1 || sizes[0]*sizes[1] == 0) );
1491 size_t len = sizes[0]*sizes[1] > 0 ? sizes[0] + sizes[1] - 1 : 0, len0 = v.size();
1493 CV_Assert(!fixedSize() || len == len0);
1497 int _type = CV_MAT_TYPE(flags);
1498 for( size_t j = len0; j < len; j++ )
1500 if( v[i].type() == _type )
1502 CV_Assert( v[i].empty() );
1503 v[i].flags = (v[i].flags & ~CV_MAT_TYPE_MASK) | _type;
1509 CV_Assert( i < (int)v.size() );
1512 if( allowTransposed )
1514 if( !m.isContinuous() )
1516 CV_Assert(!fixedType() && !fixedSize());
1520 if( dims == 2 && m.dims == 2 && m.data &&
1521 m.type() == mtype && m.rows == sizes[1] && m.cols == sizes[0] )
1527 if(CV_MAT_CN(mtype) == m.channels() && ((1 << CV_MAT_TYPE(flags)) & fixedDepthMask) != 0 )
1530 CV_Assert(!fixedType() || (CV_MAT_CN(mtype) == m.channels() && ((1 << CV_MAT_TYPE(flags)) & fixedDepthMask) != 0));
1534 CV_Assert(m.dims == dims);
1535 for(int j = 0; j < dims; ++j)
1536 CV_Assert(m.size[j] == sizes[j]);
1539 m.create(dims, sizes, mtype);
1543 void _OutputArray::release() const
1545 CV_Assert(!fixedSize());
1551 ((Mat*)obj)->release();
1557 ((gpu::GpuMat*)obj)->release();
1564 if( k == STD_VECTOR )
1566 create(Size(), CV_MAT_TYPE(flags));
1570 if( k == STD_VECTOR_VECTOR )
1572 ((vector<vector<uchar> >*)obj)->clear();
1576 CV_Assert( k == STD_VECTOR_MAT );
1577 //if( k == STD_VECTOR_MAT )
1579 ((vector<Mat>*)obj)->clear();
1583 void _OutputArray::clear() const
1589 CV_Assert(!fixedSize());
1590 ((Mat*)obj)->resize(0);
1597 bool _OutputArray::needed() const
1599 return kind() != NONE;
1602 Mat& _OutputArray::getMatRef(int i) const
1607 CV_Assert( k == MAT );
1612 CV_Assert( k == STD_VECTOR_MAT );
1613 vector<Mat>& v = *(vector<Mat>*)obj;
1614 CV_Assert( i < (int)v.size() );
1619 gpu::GpuMat& _OutputArray::getGpuMatRef() const
1622 CV_Assert( k == GPU_MAT );
1623 return *(gpu::GpuMat*)obj;
1626 static _OutputArray _none;
1627 OutputArray noArray() { return _none; }
1631 /*************************************************************************************************\
1633 \*************************************************************************************************/
1635 void cv::hconcat(const Mat* src, size_t nsrc, OutputArray _dst)
1637 if( nsrc == 0 || !src )
1643 int totalCols = 0, cols = 0;
1645 for( i = 0; i < nsrc; i++ )
1647 CV_Assert( !src[i].empty() && src[i].dims <= 2 &&
1648 src[i].rows == src[0].rows &&
1649 src[i].type() == src[0].type());
1650 totalCols += src[i].cols;
1652 _dst.create( src[0].rows, totalCols, src[0].type());
1653 Mat dst = _dst.getMat();
1654 for( i = 0; i < nsrc; i++ )
1656 Mat dpart = dst(Rect(cols, 0, src[i].cols, src[i].rows));
1657 src[i].copyTo(dpart);
1658 cols += src[i].cols;
1662 void cv::hconcat(InputArray src1, InputArray src2, OutputArray dst)
1664 Mat src[] = {src1.getMat(), src2.getMat()};
1665 hconcat(src, 2, dst);
1668 void cv::hconcat(InputArray _src, OutputArray dst)
1671 _src.getMatVector(src);
1672 hconcat(!src.empty() ? &src[0] : 0, src.size(), dst);
1675 void cv::vconcat(const Mat* src, size_t nsrc, OutputArray _dst)
1677 if( nsrc == 0 || !src )
1683 int totalRows = 0, rows = 0;
1685 for( i = 0; i < nsrc; i++ )
1687 CV_Assert( !src[i].empty() && src[i].dims <= 2 &&
1688 src[i].cols == src[0].cols &&
1689 src[i].type() == src[0].type());
1690 totalRows += src[i].rows;
1692 _dst.create( totalRows, src[0].cols, src[0].type());
1693 Mat dst = _dst.getMat();
1694 for( i = 0; i < nsrc; i++ )
1696 Mat dpart(dst, Rect(0, rows, src[i].cols, src[i].rows));
1697 src[i].copyTo(dpart);
1698 rows += src[i].rows;
1702 void cv::vconcat(InputArray src1, InputArray src2, OutputArray dst)
1704 Mat src[] = {src1.getMat(), src2.getMat()};
1705 vconcat(src, 2, dst);
1708 void cv::vconcat(InputArray _src, OutputArray dst)
1711 _src.getMatVector(src);
1712 vconcat(!src.empty() ? &src[0] : 0, src.size(), dst);
1715 //////////////////////////////////////// set identity ////////////////////////////////////////////
1716 void cv::setIdentity( InputOutputArray _m, const Scalar& s )
1718 Mat m = _m.getMat();
1719 CV_Assert( m.dims <= 2 );
1720 int i, j, rows = m.rows, cols = m.cols, type = m.type();
1722 if( type == CV_32FC1 )
1724 float* data = (float*)m.data;
1725 float val = (float)s[0];
1726 size_t step = m.step/sizeof(data[0]);
1728 for( i = 0; i < rows; i++, data += step )
1730 for( j = 0; j < cols; j++ )
1736 else if( type == CV_64FC1 )
1738 double* data = (double*)m.data;
1740 size_t step = m.step/sizeof(data[0]);
1742 for( i = 0; i < rows; i++, data += step )
1744 for( j = 0; j < cols; j++ )
1745 data[j] = j == i ? val : 0;
1755 //////////////////////////////////////////// trace ///////////////////////////////////////////
1757 cv::Scalar cv::trace( InputArray _m )
1759 Mat m = _m.getMat();
1760 CV_Assert( m.dims <= 2 );
1761 int i, type = m.type();
1762 int nm = std::min(m.rows, m.cols);
1764 if( type == CV_32FC1 )
1766 const float* ptr = (const float*)m.data;
1767 size_t step = m.step/sizeof(ptr[0]) + 1;
1769 for( i = 0; i < nm; i++ )
1774 if( type == CV_64FC1 )
1776 const double* ptr = (const double*)m.data;
1777 size_t step = m.step/sizeof(ptr[0]) + 1;
1779 for( i = 0; i < nm; i++ )
1784 return cv::sum(m.diag());
1787 ////////////////////////////////////// transpose /////////////////////////////////////////
1792 template<typename T> static void
1793 transpose_( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size sz )
1795 int i=0, j, m = sz.width, n = sz.height;
1797 #if CV_ENABLE_UNROLLED
1798 for(; i <= m - 4; i += 4 )
1800 T* d0 = (T*)(dst + dstep*i);
1801 T* d1 = (T*)(dst + dstep*(i+1));
1802 T* d2 = (T*)(dst + dstep*(i+2));
1803 T* d3 = (T*)(dst + dstep*(i+3));
1805 for( j = 0; j <= n - 4; j += 4 )
1807 const T* s0 = (const T*)(src + i*sizeof(T) + sstep*j);
1808 const T* s1 = (const T*)(src + i*sizeof(T) + sstep*(j+1));
1809 const T* s2 = (const T*)(src + i*sizeof(T) + sstep*(j+2));
1810 const T* s3 = (const T*)(src + i*sizeof(T) + sstep*(j+3));
1812 d0[j] = s0[0]; d0[j+1] = s1[0]; d0[j+2] = s2[0]; d0[j+3] = s3[0];
1813 d1[j] = s0[1]; d1[j+1] = s1[1]; d1[j+2] = s2[1]; d1[j+3] = s3[1];
1814 d2[j] = s0[2]; d2[j+1] = s1[2]; d2[j+2] = s2[2]; d2[j+3] = s3[2];
1815 d3[j] = s0[3]; d3[j+1] = s1[3]; d3[j+2] = s2[3]; d3[j+3] = s3[3];
1820 const T* s0 = (const T*)(src + i*sizeof(T) + j*sstep);
1821 d0[j] = s0[0]; d1[j] = s0[1]; d2[j] = s0[2]; d3[j] = s0[3];
1827 T* d0 = (T*)(dst + dstep*i);
1829 #if CV_ENABLE_UNROLLED
1830 for(; j <= n - 4; j += 4 )
1832 const T* s0 = (const T*)(src + i*sizeof(T) + sstep*j);
1833 const T* s1 = (const T*)(src + i*sizeof(T) + sstep*(j+1));
1834 const T* s2 = (const T*)(src + i*sizeof(T) + sstep*(j+2));
1835 const T* s3 = (const T*)(src + i*sizeof(T) + sstep*(j+3));
1837 d0[j] = s0[0]; d0[j+1] = s1[0]; d0[j+2] = s2[0]; d0[j+3] = s3[0];
1842 const T* s0 = (const T*)(src + i*sizeof(T) + j*sstep);
1848 template<typename T> static void
1849 transposeI_( uchar* data, size_t step, int n )
1852 for( i = 0; i < n; i++ )
1854 T* row = (T*)(data + step*i);
1855 uchar* data1 = data + i*sizeof(T);
1856 for( j = i+1; j < n; j++ )
1857 std::swap( row[j], *(T*)(data1 + step*j) );
1861 typedef void (*TransposeFunc)( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size sz );
1862 typedef void (*TransposeInplaceFunc)( uchar* data, size_t step, int n );
1864 #define DEF_TRANSPOSE_FUNC(suffix, type) \
1865 static void transpose_##suffix( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size sz ) \
1866 { transpose_<type>(src, sstep, dst, dstep, sz); } \
1868 static void transposeI_##suffix( uchar* data, size_t step, int n ) \
1869 { transposeI_<type>(data, step, n); }
1871 DEF_TRANSPOSE_FUNC(8u, uchar)
1872 DEF_TRANSPOSE_FUNC(16u, ushort)
1873 DEF_TRANSPOSE_FUNC(8uC3, Vec3b)
1874 DEF_TRANSPOSE_FUNC(32s, int)
1875 DEF_TRANSPOSE_FUNC(16uC3, Vec3s)
1876 DEF_TRANSPOSE_FUNC(32sC2, Vec2i)
1877 DEF_TRANSPOSE_FUNC(32sC3, Vec3i)
1878 DEF_TRANSPOSE_FUNC(32sC4, Vec4i)
1879 DEF_TRANSPOSE_FUNC(32sC6, Vec6i)
1880 DEF_TRANSPOSE_FUNC(32sC8, Vec8i)
1882 static TransposeFunc transposeTab[] =
1884 0, transpose_8u, transpose_16u, transpose_8uC3, transpose_32s, 0, transpose_16uC3, 0,
1885 transpose_32sC2, 0, 0, 0, transpose_32sC3, 0, 0, 0, transpose_32sC4,
1886 0, 0, 0, 0, 0, 0, 0, transpose_32sC6, 0, 0, 0, 0, 0, 0, 0, transpose_32sC8
1889 static TransposeInplaceFunc transposeInplaceTab[] =
1891 0, transposeI_8u, transposeI_16u, transposeI_8uC3, transposeI_32s, 0, transposeI_16uC3, 0,
1892 transposeI_32sC2, 0, 0, 0, transposeI_32sC3, 0, 0, 0, transposeI_32sC4,
1893 0, 0, 0, 0, 0, 0, 0, transposeI_32sC6, 0, 0, 0, 0, 0, 0, 0, transposeI_32sC8
1898 void cv::transpose( InputArray _src, OutputArray _dst )
1900 Mat src = _src.getMat();
1901 size_t esz = src.elemSize();
1902 CV_Assert( src.dims <= 2 && esz <= (size_t)32 );
1904 _dst.create(src.cols, src.rows, src.type());
1905 Mat dst = _dst.getMat();
1907 if( dst.data == src.data )
1909 TransposeInplaceFunc func = transposeInplaceTab[esz];
1910 CV_Assert( func != 0 );
1911 func( dst.data, dst.step, dst.rows );
1915 TransposeFunc func = transposeTab[esz];
1916 CV_Assert( func != 0 );
1917 func( src.data, src.step, dst.data, dst.step, src.size() );
1922 void cv::completeSymm( InputOutputArray _m, bool LtoR )
1924 Mat m = _m.getMat();
1925 CV_Assert( m.dims <= 2 );
1927 int i, j, nrows = m.rows, type = m.type();
1928 int j0 = 0, j1 = nrows;
1929 CV_Assert( m.rows == m.cols );
1931 if( type == CV_32FC1 || type == CV_32SC1 )
1933 int* data = (int*)m.data;
1934 size_t step = m.step/sizeof(data[0]);
1935 for( i = 0; i < nrows; i++ )
1937 if( !LtoR ) j1 = i; else j0 = i+1;
1938 for( j = j0; j < j1; j++ )
1939 data[i*step + j] = data[j*step + i];
1942 else if( type == CV_64FC1 )
1944 double* data = (double*)m.data;
1945 size_t step = m.step/sizeof(data[0]);
1946 for( i = 0; i < nrows; i++ )
1948 if( !LtoR ) j1 = i; else j0 = i+1;
1949 for( j = j0; j < j1; j++ )
1950 data[i*step + j] = data[j*step + i];
1954 CV_Error( CV_StsUnsupportedFormat, "" );
1958 cv::Mat cv::Mat::cross(InputArray _m) const
1960 Mat m = _m.getMat();
1961 int tp = type(), d = CV_MAT_DEPTH(tp);
1962 CV_Assert( dims <= 2 && m.dims <= 2 && size() == m.size() && tp == m.type() &&
1963 ((rows == 3 && cols == 1) || (cols*channels() == 3 && rows == 1)));
1964 Mat result(rows, cols, tp);
1968 const float *a = (const float*)data, *b = (const float*)m.data;
1969 float* c = (float*)result.data;
1970 size_t lda = rows > 1 ? step/sizeof(a[0]) : 1;
1971 size_t ldb = rows > 1 ? m.step/sizeof(b[0]) : 1;
1973 c[0] = a[lda] * b[ldb*2] - a[lda*2] * b[ldb];
1974 c[1] = a[lda*2] * b[0] - a[0] * b[ldb*2];
1975 c[2] = a[0] * b[ldb] - a[lda] * b[0];
1977 else if( d == CV_64F )
1979 const double *a = (const double*)data, *b = (const double*)m.data;
1980 double* c = (double*)result.data;
1981 size_t lda = rows > 1 ? step/sizeof(a[0]) : 1;
1982 size_t ldb = rows > 1 ? m.step/sizeof(b[0]) : 1;
1984 c[0] = a[lda] * b[ldb*2] - a[lda*2] * b[ldb];
1985 c[1] = a[lda*2] * b[0] - a[0] * b[ldb*2];
1986 c[2] = a[0] * b[ldb] - a[lda] * b[0];
1993 ////////////////////////////////////////// reduce ////////////////////////////////////////////
1998 template<typename T, typename ST, class Op> static void
1999 reduceR_( const Mat& srcmat, Mat& dstmat )
2001 typedef typename Op::rtype WT;
2002 Size size = srcmat.size();
2003 size.width *= srcmat.channels();
2004 AutoBuffer<WT> buffer(size.width);
2006 ST* dst = (ST*)dstmat.data;
2007 const T* src = (const T*)srcmat.data;
2008 size_t srcstep = srcmat.step/sizeof(src[0]);
2012 for( i = 0; i < size.width; i++ )
2015 for( ; --size.height; )
2019 #if CV_ENABLE_UNROLLED
2020 for(; i <= size.width - 4; i += 4 )
2023 s0 = op(buf[i], (WT)src[i]);
2024 s1 = op(buf[i+1], (WT)src[i+1]);
2025 buf[i] = s0; buf[i+1] = s1;
2027 s0 = op(buf[i+2], (WT)src[i+2]);
2028 s1 = op(buf[i+3], (WT)src[i+3]);
2029 buf[i+2] = s0; buf[i+3] = s1;
2032 for( ; i < size.width; i++ )
2033 buf[i] = op(buf[i], (WT)src[i]);
2036 for( i = 0; i < size.width; i++ )
2037 dst[i] = (ST)buf[i];
2041 template<typename T, typename ST, class Op> static void
2042 reduceC_( const Mat& srcmat, Mat& dstmat )
2044 typedef typename Op::rtype WT;
2045 Size size = srcmat.size();
2046 int i, k, cn = srcmat.channels();
2050 for( int y = 0; y < size.height; y++ )
2052 const T* src = (const T*)(srcmat.data + srcmat.step*y);
2053 ST* dst = (ST*)(dstmat.data + dstmat.step*y);
2054 if( size.width == cn )
2055 for( k = 0; k < cn; k++ )
2059 for( k = 0; k < cn; k++ )
2061 WT a0 = src[k], a1 = src[k+cn];
2062 for( i = 2*cn; i <= size.width - 4*cn; i += 4*cn )
2064 a0 = op(a0, (WT)src[i+k]);
2065 a1 = op(a1, (WT)src[i+k+cn]);
2066 a0 = op(a0, (WT)src[i+k+cn*2]);
2067 a1 = op(a1, (WT)src[i+k+cn*3]);
2070 for( ; i < size.width; i += cn )
2072 a0 = op(a0, (WT)src[i+k]);
2081 typedef void (*ReduceFunc)( const Mat& src, Mat& dst );
2085 #define reduceSumR8u32s reduceR_<uchar, int, OpAdd<int> >
2086 #define reduceSumR8u32f reduceR_<uchar, float, OpAdd<int> >
2087 #define reduceSumR8u64f reduceR_<uchar, double,OpAdd<int> >
2088 #define reduceSumR16u32f reduceR_<ushort,float, OpAdd<float> >
2089 #define reduceSumR16u64f reduceR_<ushort,double,OpAdd<double> >
2090 #define reduceSumR16s32f reduceR_<short, float, OpAdd<float> >
2091 #define reduceSumR16s64f reduceR_<short, double,OpAdd<double> >
2092 #define reduceSumR32f32f reduceR_<float, float, OpAdd<float> >
2093 #define reduceSumR32f64f reduceR_<float, double,OpAdd<double> >
2094 #define reduceSumR64f64f reduceR_<double,double,OpAdd<double> >
2096 #define reduceMaxR8u reduceR_<uchar, uchar, OpMax<uchar> >
2097 #define reduceMaxR16u reduceR_<ushort,ushort,OpMax<ushort> >
2098 #define reduceMaxR16s reduceR_<short, short, OpMax<short> >
2099 #define reduceMaxR32f reduceR_<float, float, OpMax<float> >
2100 #define reduceMaxR64f reduceR_<double,double,OpMax<double> >
2102 #define reduceMinR8u reduceR_<uchar, uchar, OpMin<uchar> >
2103 #define reduceMinR16u reduceR_<ushort,ushort,OpMin<ushort> >
2104 #define reduceMinR16s reduceR_<short, short, OpMin<short> >
2105 #define reduceMinR32f reduceR_<float, float, OpMin<float> >
2106 #define reduceMinR64f reduceR_<double,double,OpMin<double> >
2108 #define reduceSumC8u32s reduceC_<uchar, int, OpAdd<int> >
2109 #define reduceSumC8u32f reduceC_<uchar, float, OpAdd<int> >
2110 #define reduceSumC8u64f reduceC_<uchar, double,OpAdd<int> >
2111 #define reduceSumC16u32f reduceC_<ushort,float, OpAdd<float> >
2112 #define reduceSumC16u64f reduceC_<ushort,double,OpAdd<double> >
2113 #define reduceSumC16s32f reduceC_<short, float, OpAdd<float> >
2114 #define reduceSumC16s64f reduceC_<short, double,OpAdd<double> >
2115 #define reduceSumC32f32f reduceC_<float, float, OpAdd<float> >
2116 #define reduceSumC32f64f reduceC_<float, double,OpAdd<double> >
2117 #define reduceSumC64f64f reduceC_<double,double,OpAdd<double> >
2119 #define reduceMaxC8u reduceC_<uchar, uchar, OpMax<uchar> >
2120 #define reduceMaxC16u reduceC_<ushort,ushort,OpMax<ushort> >
2121 #define reduceMaxC16s reduceC_<short, short, OpMax<short> >
2122 #define reduceMaxC32f reduceC_<float, float, OpMax<float> >
2123 #define reduceMaxC64f reduceC_<double,double,OpMax<double> >
2125 #define reduceMinC8u reduceC_<uchar, uchar, OpMin<uchar> >
2126 #define reduceMinC16u reduceC_<ushort,ushort,OpMin<ushort> >
2127 #define reduceMinC16s reduceC_<short, short, OpMin<short> >
2128 #define reduceMinC32f reduceC_<float, float, OpMin<float> >
2129 #define reduceMinC64f reduceC_<double,double,OpMin<double> >
2131 void cv::reduce(InputArray _src, OutputArray _dst, int dim, int op, int dtype)
2133 Mat src = _src.getMat();
2134 CV_Assert( src.dims <= 2 );
2136 int stype = src.type(), sdepth = src.depth(), cn = src.channels();
2138 dtype = _dst.fixedType() ? _dst.type() : stype;
2139 int ddepth = CV_MAT_DEPTH(dtype);
2141 _dst.create(dim == 0 ? 1 : src.rows, dim == 0 ? src.cols : 1,
2142 CV_MAKETYPE(dtype >= 0 ? dtype : stype, cn));
2143 Mat dst = _dst.getMat(), temp = dst;
2145 CV_Assert( op == CV_REDUCE_SUM || op == CV_REDUCE_MAX ||
2146 op == CV_REDUCE_MIN || op == CV_REDUCE_AVG );
2147 CV_Assert( src.channels() == dst.channels() );
2149 if( op == CV_REDUCE_AVG )
2152 if( sdepth < CV_32S && ddepth < CV_32S )
2154 temp.create(dst.rows, dst.cols, CV_32SC(cn));
2159 ReduceFunc func = 0;
2162 if( op == CV_REDUCE_SUM )
2164 if(sdepth == CV_8U && ddepth == CV_32S)
2165 func = GET_OPTIMIZED(reduceSumR8u32s);
2166 else if(sdepth == CV_8U && ddepth == CV_32F)
2167 func = reduceSumR8u32f;
2168 else if(sdepth == CV_8U && ddepth == CV_64F)
2169 func = reduceSumR8u64f;
2170 else if(sdepth == CV_16U && ddepth == CV_32F)
2171 func = reduceSumR16u32f;
2172 else if(sdepth == CV_16U && ddepth == CV_64F)
2173 func = reduceSumR16u64f;
2174 else if(sdepth == CV_16S && ddepth == CV_32F)
2175 func = reduceSumR16s32f;
2176 else if(sdepth == CV_16S && ddepth == CV_64F)
2177 func = reduceSumR16s64f;
2178 else if(sdepth == CV_32F && ddepth == CV_32F)
2179 func = GET_OPTIMIZED(reduceSumR32f32f);
2180 else if(sdepth == CV_32F && ddepth == CV_64F)
2181 func = reduceSumR32f64f;
2182 else if(sdepth == CV_64F && ddepth == CV_64F)
2183 func = reduceSumR64f64f;
2185 else if(op == CV_REDUCE_MAX)
2187 if(sdepth == CV_8U && ddepth == CV_8U)
2188 func = GET_OPTIMIZED(reduceMaxR8u);
2189 else if(sdepth == CV_16U && ddepth == CV_16U)
2190 func = reduceMaxR16u;
2191 else if(sdepth == CV_16S && ddepth == CV_16S)
2192 func = reduceMaxR16s;
2193 else if(sdepth == CV_32F && ddepth == CV_32F)
2194 func = GET_OPTIMIZED(reduceMaxR32f);
2195 else if(sdepth == CV_64F && ddepth == CV_64F)
2196 func = reduceMaxR64f;
2198 else if(op == CV_REDUCE_MIN)
2200 if(sdepth == CV_8U && ddepth == CV_8U)
2201 func = GET_OPTIMIZED(reduceMinR8u);
2202 else if(sdepth == CV_16U && ddepth == CV_16U)
2203 func = reduceMinR16u;
2204 else if(sdepth == CV_16S && ddepth == CV_16S)
2205 func = reduceMinR16s;
2206 else if(sdepth == CV_32F && ddepth == CV_32F)
2207 func = GET_OPTIMIZED(reduceMinR32f);
2208 else if(sdepth == CV_64F && ddepth == CV_64F)
2209 func = reduceMinR64f;
2214 if(op == CV_REDUCE_SUM)
2216 if(sdepth == CV_8U && ddepth == CV_32S)
2217 func = GET_OPTIMIZED(reduceSumC8u32s);
2218 else if(sdepth == CV_8U && ddepth == CV_32F)
2219 func = reduceSumC8u32f;
2220 else if(sdepth == CV_8U && ddepth == CV_64F)
2221 func = reduceSumC8u64f;
2222 else if(sdepth == CV_16U && ddepth == CV_32F)
2223 func = reduceSumC16u32f;
2224 else if(sdepth == CV_16U && ddepth == CV_64F)
2225 func = reduceSumC16u64f;
2226 else if(sdepth == CV_16S && ddepth == CV_32F)
2227 func = reduceSumC16s32f;
2228 else if(sdepth == CV_16S && ddepth == CV_64F)
2229 func = reduceSumC16s64f;
2230 else if(sdepth == CV_32F && ddepth == CV_32F)
2231 func = GET_OPTIMIZED(reduceSumC32f32f);
2232 else if(sdepth == CV_32F && ddepth == CV_64F)
2233 func = reduceSumC32f64f;
2234 else if(sdepth == CV_64F && ddepth == CV_64F)
2235 func = reduceSumC64f64f;
2237 else if(op == CV_REDUCE_MAX)
2239 if(sdepth == CV_8U && ddepth == CV_8U)
2240 func = GET_OPTIMIZED(reduceMaxC8u);
2241 else if(sdepth == CV_16U && ddepth == CV_16U)
2242 func = reduceMaxC16u;
2243 else if(sdepth == CV_16S && ddepth == CV_16S)
2244 func = reduceMaxC16s;
2245 else if(sdepth == CV_32F && ddepth == CV_32F)
2246 func = GET_OPTIMIZED(reduceMaxC32f);
2247 else if(sdepth == CV_64F && ddepth == CV_64F)
2248 func = reduceMaxC64f;
2250 else if(op == CV_REDUCE_MIN)
2252 if(sdepth == CV_8U && ddepth == CV_8U)
2253 func = GET_OPTIMIZED(reduceMinC8u);
2254 else if(sdepth == CV_16U && ddepth == CV_16U)
2255 func = reduceMinC16u;
2256 else if(sdepth == CV_16S && ddepth == CV_16S)
2257 func = reduceMinC16s;
2258 else if(sdepth == CV_32F && ddepth == CV_32F)
2259 func = GET_OPTIMIZED(reduceMinC32f);
2260 else if(sdepth == CV_64F && ddepth == CV_64F)
2261 func = reduceMinC64f;
2266 CV_Error( CV_StsUnsupportedFormat,
2267 "Unsupported combination of input and output array formats" );
2271 if( op0 == CV_REDUCE_AVG )
2272 temp.convertTo(dst, dst.type(), 1./(dim == 0 ? src.rows : src.cols));
2276 //////////////////////////////////////// sort ///////////////////////////////////////////
2281 template<typename T> static void sort_( const Mat& src, Mat& dst, int flags )
2286 bool sortRows = (flags & 1) == CV_SORT_EVERY_ROW;
2287 bool inplace = src.data == dst.data;
2288 bool sortDescending = (flags & CV_SORT_DESCENDING) != 0;
2291 n = src.rows, len = src.cols;
2294 n = src.cols, len = src.rows;
2299 for( i = 0; i < n; i++ )
2304 T* dptr = (T*)(dst.data + dst.step*i);
2307 const T* sptr = (const T*)(src.data + src.step*i);
2308 for( j = 0; j < len; j++ )
2315 for( j = 0; j < len; j++ )
2316 ptr[j] = ((const T*)(src.data + src.step*j))[i];
2318 std::sort( ptr, ptr + len, LessThan<T>() );
2319 if( sortDescending )
2320 for( j = 0; j < len/2; j++ )
2321 std::swap(ptr[j], ptr[len-1-j]);
2323 for( j = 0; j < len; j++ )
2324 ((T*)(dst.data + dst.step*j))[i] = ptr[j];
2329 template<typename T> static void sortIdx_( const Mat& src, Mat& dst, int flags )
2332 AutoBuffer<int> ibuf;
2336 bool sortRows = (flags & 1) == CV_SORT_EVERY_ROW;
2337 bool sortDescending = (flags & CV_SORT_DESCENDING) != 0;
2339 CV_Assert( src.data != dst.data );
2342 n = src.rows, len = src.cols;
2345 n = src.cols, len = src.rows;
2352 for( i = 0; i < n; i++ )
2359 ptr = (T*)(src.data + src.step*i);
2360 iptr = (int*)(dst.data + dst.step*i);
2364 for( j = 0; j < len; j++ )
2365 ptr[j] = ((const T*)(src.data + src.step*j))[i];
2367 for( j = 0; j < len; j++ )
2369 std::sort( iptr, iptr + len, LessThanIdx<T>(ptr) );
2370 if( sortDescending )
2371 for( j = 0; j < len/2; j++ )
2372 std::swap(iptr[j], iptr[len-1-j]);
2374 for( j = 0; j < len; j++ )
2375 ((int*)(dst.data + dst.step*j))[i] = iptr[j];
2379 typedef void (*SortFunc)(const Mat& src, Mat& dst, int flags);
2383 void cv::sort( InputArray _src, OutputArray _dst, int flags )
2385 static SortFunc tab[] =
2387 sort_<uchar>, sort_<schar>, sort_<ushort>, sort_<short>,
2388 sort_<int>, sort_<float>, sort_<double>, 0
2390 Mat src = _src.getMat();
2391 SortFunc func = tab[src.depth()];
2392 CV_Assert( src.dims <= 2 && src.channels() == 1 && func != 0 );
2393 _dst.create( src.size(), src.type() );
2394 Mat dst = _dst.getMat();
2395 func( src, dst, flags );
2398 void cv::sortIdx( InputArray _src, OutputArray _dst, int flags )
2400 static SortFunc tab[] =
2402 sortIdx_<uchar>, sortIdx_<schar>, sortIdx_<ushort>, sortIdx_<short>,
2403 sortIdx_<int>, sortIdx_<float>, sortIdx_<double>, 0
2405 Mat src = _src.getMat();
2406 SortFunc func = tab[src.depth()];
2407 CV_Assert( src.dims <= 2 && src.channels() == 1 && func != 0 );
2409 Mat dst = _dst.getMat();
2410 if( dst.data == src.data )
2412 _dst.create( src.size(), CV_32S );
2413 dst = _dst.getMat();
2414 func( src, dst, flags );
2418 ////////////////////////////////////////// kmeans ////////////////////////////////////////////
2423 static void generateRandomCenter(const vector<Vec2f>& box, float* center, RNG& rng)
2425 size_t j, dims = box.size();
2426 float margin = 1.f/dims;
2427 for( j = 0; j < dims; j++ )
2428 center[j] = ((float)rng*(1.f+margin*2.f)-margin)*(box[j][1] - box[j][0]) + box[j][0];
2431 class KMeansPPDistanceComputer
2434 KMeansPPDistanceComputer( float *_tdist2,
2447 void operator()( const cv::BlockedRange& range ) const
2449 const int begin = range.begin();
2450 const int end = range.end();
2452 for ( int i = begin; i<end; i++ )
2454 tdist2[i] = std::min(normL2Sqr_(data + step*i, data + stepci, dims), dist[i]);
2459 KMeansPPDistanceComputer& operator=(const KMeansPPDistanceComputer&); // to quiet MSVC
2466 const size_t stepci;
2470 k-means center initialization using the following algorithm:
2471 Arthur & Vassilvitskii (2007) k-means++: The Advantages of Careful Seeding
2473 static void generateCentersPP(const Mat& _data, Mat& _out_centers,
2474 int K, RNG& rng, int trials)
2476 int i, j, k, dims = _data.cols, N = _data.rows;
2477 const float* data = _data.ptr<float>(0);
2478 size_t step = _data.step/sizeof(data[0]);
2479 vector<int> _centers(K);
2480 int* centers = &_centers[0];
2481 vector<float> _dist(N*3);
2482 float* dist = &_dist[0], *tdist = dist + N, *tdist2 = tdist + N;
2485 centers[0] = (unsigned)rng % N;
2487 for( i = 0; i < N; i++ )
2489 dist[i] = normL2Sqr_(data + step*i, data + step*centers[0], dims);
2493 for( k = 1; k < K; k++ )
2495 double bestSum = DBL_MAX;
2496 int bestCenter = -1;
2498 for( j = 0; j < trials; j++ )
2500 double p = (double)rng*sum0, s = 0;
2501 for( i = 0; i < N-1; i++ )
2502 if( (p -= dist[i]) <= 0 )
2506 parallel_for(BlockedRange(0, N),
2507 KMeansPPDistanceComputer(tdist2, data, dist, dims, step, step*ci));
2508 for( i = 0; i < N; i++ )
2517 std::swap(tdist, tdist2);
2520 centers[k] = bestCenter;
2522 std::swap(dist, tdist);
2525 for( k = 0; k < K; k++ )
2527 const float* src = data + step*centers[k];
2528 float* dst = _out_centers.ptr<float>(k);
2529 for( j = 0; j < dims; j++ )
2534 class KMeansDistanceComputer
2537 KMeansDistanceComputer( double *_distances,
2540 const Mat& _centers )
2541 : distances(_distances),
2546 CV_DbgAssert(centers.cols == data.cols);
2549 void operator()( const BlockedRange& range ) const
2551 const int begin = range.begin();
2552 const int end = range.end();
2553 const int K = centers.rows;
2554 const int dims = centers.cols;
2556 const float *sample;
2557 for( int i = begin; i<end; ++i)
2559 sample = data.ptr<float>(i);
2561 double min_dist = DBL_MAX;
2563 for( int k = 0; k < K; k++ )
2565 const float* center = centers.ptr<float>(k);
2566 const double dist = normL2Sqr_(sample, center, dims);
2568 if( min_dist > dist )
2575 distances[i] = min_dist;
2581 KMeansDistanceComputer& operator=(const KMeansDistanceComputer&); // to quiet MSVC
2591 double cv::kmeans( InputArray _data, int K,
2592 InputOutputArray _bestLabels,
2593 TermCriteria criteria, int attempts,
2594 int flags, OutputArray _centers )
2596 const int SPP_TRIALS = 3;
2597 Mat data = _data.getMat();
2598 bool isrow = data.rows == 1 && data.channels() > 1;
2599 int N = !isrow ? data.rows : data.cols;
2600 int dims = (!isrow ? data.cols : 1)*data.channels();
2601 int type = data.depth();
2603 attempts = std::max(attempts, 1);
2604 CV_Assert( data.dims <= 2 && type == CV_32F && K > 0 );
2605 CV_Assert( N >= K );
2607 _bestLabels.create(N, 1, CV_32S, -1, true);
2609 Mat _labels, best_labels = _bestLabels.getMat();
2610 if( flags & CV_KMEANS_USE_INITIAL_LABELS )
2612 CV_Assert( (best_labels.cols == 1 || best_labels.rows == 1) &&
2613 best_labels.cols*best_labels.rows == N &&
2614 best_labels.type() == CV_32S &&
2615 best_labels.isContinuous());
2616 best_labels.copyTo(_labels);
2620 if( !((best_labels.cols == 1 || best_labels.rows == 1) &&
2621 best_labels.cols*best_labels.rows == N &&
2622 best_labels.type() == CV_32S &&
2623 best_labels.isContinuous()))
2624 best_labels.create(N, 1, CV_32S);
2625 _labels.create(best_labels.size(), best_labels.type());
2627 int* labels = _labels.ptr<int>();
2629 Mat centers(K, dims, type), old_centers(K, dims, type), temp(1, dims, type);
2630 vector<int> counters(K);
2631 vector<Vec2f> _box(dims);
2632 Vec2f* box = &_box[0];
2633 double best_compactness = DBL_MAX, compactness = 0;
2634 RNG& rng = theRNG();
2635 int a, iter, i, j, k;
2637 if( criteria.type & TermCriteria::EPS )
2638 criteria.epsilon = std::max(criteria.epsilon, 0.);
2640 criteria.epsilon = FLT_EPSILON;
2641 criteria.epsilon *= criteria.epsilon;
2643 if( criteria.type & TermCriteria::COUNT )
2644 criteria.maxCount = std::min(std::max(criteria.maxCount, 2), 100);
2646 criteria.maxCount = 100;
2651 criteria.maxCount = 2;
2654 const float* sample = data.ptr<float>(0);
2655 for( j = 0; j < dims; j++ )
2656 box[j] = Vec2f(sample[j], sample[j]);
2658 for( i = 1; i < N; i++ )
2660 sample = data.ptr<float>(i);
2661 for( j = 0; j < dims; j++ )
2663 float v = sample[j];
2664 box[j][0] = std::min(box[j][0], v);
2665 box[j][1] = std::max(box[j][1], v);
2669 for( a = 0; a < attempts; a++ )
2671 double max_center_shift = DBL_MAX;
2674 swap(centers, old_centers);
2676 if( iter == 0 && (a > 0 || !(flags & KMEANS_USE_INITIAL_LABELS)) )
2678 if( flags & KMEANS_PP_CENTERS )
2679 generateCentersPP(data, centers, K, rng, SPP_TRIALS);
2682 for( k = 0; k < K; k++ )
2683 generateRandomCenter(_box, centers.ptr<float>(k), rng);
2688 if( iter == 0 && a == 0 && (flags & KMEANS_USE_INITIAL_LABELS) )
2690 for( i = 0; i < N; i++ )
2691 CV_Assert( (unsigned)labels[i] < (unsigned)K );
2695 centers = Scalar(0);
2696 for( k = 0; k < K; k++ )
2699 for( i = 0; i < N; i++ )
2701 sample = data.ptr<float>(i);
2703 float* center = centers.ptr<float>(k);
2705 #if CV_ENABLE_UNROLLED
2706 for(; j <= dims - 4; j += 4 )
2708 float t0 = center[j] + sample[j];
2709 float t1 = center[j+1] + sample[j+1];
2714 t0 = center[j+2] + sample[j+2];
2715 t1 = center[j+3] + sample[j+3];
2721 for( ; j < dims; j++ )
2722 center[j] += sample[j];
2727 max_center_shift = 0;
2729 for( k = 0; k < K; k++ )
2731 if( counters[k] != 0 )
2734 // if some cluster appeared to be empty then:
2735 // 1. find the biggest cluster
2736 // 2. find the farthest from the center point in the biggest cluster
2737 // 3. exclude the farthest point from the biggest cluster and form a new 1-point cluster.
2739 for( int k1 = 1; k1 < K; k1++ )
2741 if( counters[max_k] < counters[k1] )
2745 double max_dist = 0;
2746 int farthest_i = -1;
2747 float* new_center = centers.ptr<float>(k);
2748 float* old_center = centers.ptr<float>(max_k);
2749 float* _old_center = temp.ptr<float>(); // normalized
2750 float scale = 1.f/counters[max_k];
2751 for( j = 0; j < dims; j++ )
2752 _old_center[j] = old_center[j]*scale;
2754 for( i = 0; i < N; i++ )
2756 if( labels[i] != max_k )
2758 sample = data.ptr<float>(i);
2759 double dist = normL2Sqr_(sample, _old_center, dims);
2761 if( max_dist <= dist )
2770 labels[farthest_i] = k;
2771 sample = data.ptr<float>(farthest_i);
2773 for( j = 0; j < dims; j++ )
2775 old_center[j] -= sample[j];
2776 new_center[j] += sample[j];
2780 for( k = 0; k < K; k++ )
2782 float* center = centers.ptr<float>(k);
2783 CV_Assert( counters[k] != 0 );
2785 float scale = 1.f/counters[k];
2786 for( j = 0; j < dims; j++ )
2792 const float* old_center = old_centers.ptr<float>(k);
2793 for( j = 0; j < dims; j++ )
2795 double t = center[j] - old_center[j];
2798 max_center_shift = std::max(max_center_shift, dist);
2803 if( ++iter == MAX(criteria.maxCount, 2) || max_center_shift <= criteria.epsilon )
2807 Mat dists(1, N, CV_64F);
2808 double* dist = dists.ptr<double>(0);
2809 parallel_for(BlockedRange(0, N),
2810 KMeansDistanceComputer(dist, labels, data, centers));
2812 for( i = 0; i < N; i++ )
2814 compactness += dist[i];
2818 if( compactness < best_compactness )
2820 best_compactness = compactness;
2821 if( _centers.needed() )
2822 centers.copyTo(_centers);
2823 _labels.copyTo(best_labels);
2827 return best_compactness;
2831 CV_IMPL void cvSetIdentity( CvArr* arr, CvScalar value )
2833 cv::Mat m = cv::cvarrToMat(arr);
2834 cv::setIdentity(m, value);
2838 CV_IMPL CvScalar cvTrace( const CvArr* arr )
2840 return cv::trace(cv::cvarrToMat(arr));
2844 CV_IMPL void cvTranspose( const CvArr* srcarr, CvArr* dstarr )
2846 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
2848 CV_Assert( src.rows == dst.cols && src.cols == dst.rows && src.type() == dst.type() );
2849 transpose( src, dst );
2853 CV_IMPL void cvCompleteSymm( CvMat* matrix, int LtoR )
2856 cv::completeSymm( m, LtoR != 0 );
2860 CV_IMPL void cvCrossProduct( const CvArr* srcAarr, const CvArr* srcBarr, CvArr* dstarr )
2862 cv::Mat srcA = cv::cvarrToMat(srcAarr), dst = cv::cvarrToMat(dstarr);
2864 CV_Assert( srcA.size() == dst.size() && srcA.type() == dst.type() );
2865 srcA.cross(cv::cvarrToMat(srcBarr)).copyTo(dst);
2870 cvReduce( const CvArr* srcarr, CvArr* dstarr, int dim, int op )
2872 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
2875 dim = src.rows > dst.rows ? 0 : src.cols > dst.cols ? 1 : dst.cols == 1;
2878 CV_Error( CV_StsOutOfRange, "The reduced dimensionality index is out of range" );
2880 if( (dim == 0 && (dst.cols != src.cols || dst.rows != 1)) ||
2881 (dim == 1 && (dst.rows != src.rows || dst.cols != 1)) )
2882 CV_Error( CV_StsBadSize, "The output array size is incorrect" );
2884 if( src.channels() != dst.channels() )
2885 CV_Error( CV_StsUnmatchedFormats, "Input and output arrays must have the same number of channels" );
2887 cv::reduce(src, dst, dim, op, dst.type());
2892 cvRange( CvArr* arr, double start, double end )
2896 CvMat stub, *mat = (CvMat*)arr;
2903 if( !CV_IS_MAT(mat) )
2904 mat = cvGetMat( mat, &stub);
2908 type = CV_MAT_TYPE(mat->type);
2909 delta = (end-start)/(rows*cols);
2911 if( CV_IS_MAT_CONT(mat->type) )
2918 step = mat->step / CV_ELEM_SIZE(type);
2920 if( type == CV_32SC1 )
2922 int* idata = mat->data.i;
2923 int ival = cvRound(val), idelta = cvRound(delta);
2925 if( fabs(val - ival) < DBL_EPSILON &&
2926 fabs(delta - idelta) < DBL_EPSILON )
2928 for( i = 0; i < rows; i++, idata += step )
2929 for( j = 0; j < cols; j++, ival += idelta )
2934 for( i = 0; i < rows; i++, idata += step )
2935 for( j = 0; j < cols; j++, val += delta )
2936 idata[j] = cvRound(val);
2939 else if( type == CV_32FC1 )
2941 float* fdata = mat->data.fl;
2942 for( i = 0; i < rows; i++, fdata += step )
2943 for( j = 0; j < cols; j++, val += delta )
2944 fdata[j] = (float)val;
2947 CV_Error( CV_StsUnsupportedFormat, "The function only supports 32sC1 and 32fC1 datatypes" );
2950 return ok ? arr : 0;
2955 cvSort( const CvArr* _src, CvArr* _dst, CvArr* _idx, int flags )
2957 cv::Mat src = cv::cvarrToMat(_src);
2961 cv::Mat idx0 = cv::cvarrToMat(_idx), idx = idx0;
2962 CV_Assert( src.size() == idx.size() && idx.type() == CV_32S && src.data != idx.data );
2963 cv::sortIdx( src, idx, flags );
2964 CV_Assert( idx0.data == idx.data );
2969 cv::Mat dst0 = cv::cvarrToMat(_dst), dst = dst0;
2970 CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
2971 cv::sort( src, dst, flags );
2972 CV_Assert( dst0.data == dst.data );
2978 cvKMeans2( const CvArr* _samples, int cluster_count, CvArr* _labels,
2979 CvTermCriteria termcrit, int attempts, CvRNG*,
2980 int flags, CvArr* _centers, double* _compactness )
2982 cv::Mat data = cv::cvarrToMat(_samples), labels = cv::cvarrToMat(_labels), centers;
2985 centers = cv::cvarrToMat(_centers);
2987 centers = centers.reshape(1);
2988 data = data.reshape(1);
2990 CV_Assert( !centers.empty() );
2991 CV_Assert( centers.rows == cluster_count );
2992 CV_Assert( centers.cols == data.cols );
2993 CV_Assert( centers.depth() == data.depth() );
2995 CV_Assert( labels.isContinuous() && labels.type() == CV_32S &&
2996 (labels.cols == 1 || labels.rows == 1) &&
2997 labels.cols + labels.rows - 1 == data.rows );
2999 double compactness = cv::kmeans(data, cluster_count, labels, termcrit, attempts,
3000 flags, _centers ? cv::_OutputArray(centers) : cv::_OutputArray() );
3002 *_compactness = compactness;
3006 ///////////////////////////// n-dimensional matrices ////////////////////////////
3011 Mat Mat::reshape(int _cn, int _newndims, const int* _newsz) const
3013 if(_newndims == dims)
3016 return reshape(_cn);
3018 return reshape(_cn, _newsz[0]);
3021 CV_Error(CV_StsNotImplemented, "");
3026 Mat::operator CvMatND() const
3029 cvInitMatNDHeader( &mat, dims, size, type(), data );
3031 for( i = 0; i < d; i++ )
3032 mat.dim[i].step = (int)step[i];
3033 mat.type |= flags & CONTINUOUS_FLAG;
3037 NAryMatIterator::NAryMatIterator()
3038 : arrays(0), planes(0), ptrs(0), narrays(0), nplanes(0), size(0), iterdepth(0), idx(0)
3042 NAryMatIterator::NAryMatIterator(const Mat** _arrays, Mat* _planes, int _narrays)
3043 : arrays(0), planes(0), ptrs(0), narrays(0), nplanes(0), size(0), iterdepth(0), idx(0)
3045 init(_arrays, _planes, 0, _narrays);
3048 NAryMatIterator::NAryMatIterator(const Mat** _arrays, uchar** _ptrs, int _narrays)
3049 : arrays(0), planes(0), ptrs(0), narrays(0), nplanes(0), size(0), iterdepth(0), idx(0)
3051 init(_arrays, 0, _ptrs, _narrays);
3054 void NAryMatIterator::init(const Mat** _arrays, Mat* _planes, uchar** _ptrs, int _narrays)
3056 CV_Assert( _arrays && (_ptrs || _planes) );
3057 int i, j, d1=0, i0 = -1, d = -1;
3068 for( i = 0; _arrays[i] != 0; i++ )
3071 CV_Assert(narrays <= 1000);
3076 for( i = 0; i < narrays; i++ )
3078 CV_Assert(arrays[i] != 0);
3079 const Mat& A = *arrays[i];
3091 // find the first dimensionality which is different from 1;
3092 // in any of the arrays the first "d1" step do not affect the continuity
3093 for( d1 = 0; d1 < d; d1++ )
3094 if( A.size[d1] > 1 )
3098 CV_Assert( A.size == arrays[i0]->size );
3100 if( !A.isContinuous() )
3102 CV_Assert( A.step[d-1] == A.elemSize() );
3103 for( j = d-1; j > d1; j-- )
3104 if( A.step[j]*A.size[j] < A.step[j-1] )
3106 iterdepth = std::max(iterdepth, j);
3112 size = arrays[i0]->size[d-1];
3113 for( j = d-1; j > iterdepth; j-- )
3115 int64 total1 = (int64)size*arrays[i0]->size[j-1];
3116 if( total1 != (int)total1 )
3122 if( iterdepth == d1 )
3126 for( j = iterdepth-1; j >= 0; j-- )
3127 nplanes *= arrays[i0]->size[j];
3137 for( i = 0; i < narrays; i++ )
3139 CV_Assert(arrays[i] != 0);
3140 const Mat& A = *arrays[i];
3148 planes[i] = Mat(1, (int)size, A.type(), A.data);
3153 NAryMatIterator& NAryMatIterator::operator ++()
3155 if( idx >= nplanes-1 )
3159 if( iterdepth == 1 )
3163 for( int i = 0; i < narrays; i++ )
3167 ptrs[i] = arrays[i]->data + arrays[i]->step[0]*idx;
3172 for( int i = 0; i < narrays; i++ )
3174 if( !planes[i].data )
3176 planes[i].data = arrays[i]->data + arrays[i]->step[0]*idx;
3182 for( int i = 0; i < narrays; i++ )
3184 const Mat& A = *arrays[i];
3187 int _idx = (int)idx;
3188 uchar* data = A.data;
3189 for( int j = iterdepth-1; j >= 0 && _idx > 0; j-- )
3191 int szi = A.size[j], t = _idx/szi;
3192 data += (_idx - t * szi)*A.step[j];
3198 planes[i].data = data;
3205 NAryMatIterator NAryMatIterator::operator ++(int)
3207 NAryMatIterator it = *this;
3212 ///////////////////////////////////////////////////////////////////////////
3213 // MatConstIterator //
3214 ///////////////////////////////////////////////////////////////////////////
3216 Point MatConstIterator::pos() const
3220 CV_DbgAssert(m->dims <= 2);
3222 ptrdiff_t ofs = ptr - m->data;
3223 int y = (int)(ofs/m->step[0]);
3224 return Point((int)((ofs - y*m->step[0])/elemSize), y);
3227 void MatConstIterator::pos(int* _idx) const
3229 CV_Assert(m != 0 && _idx);
3230 ptrdiff_t ofs = ptr - m->data;
3231 for( int i = 0; i < m->dims; i++ )
3233 size_t s = m->step[i], v = ofs/s;
3239 ptrdiff_t MatConstIterator::lpos() const
3243 if( m->isContinuous() )
3244 return (ptr - sliceStart)/elemSize;
3245 ptrdiff_t ofs = ptr - m->data;
3249 ptrdiff_t y = ofs/m->step[0];
3250 return y*m->cols + (ofs - y*m->step[0])/elemSize;
3252 ptrdiff_t result = 0;
3253 for( i = 0; i < d; i++ )
3255 size_t s = m->step[i], v = ofs/s;
3257 result = result*m->size[i] + v;
3262 void MatConstIterator::seek(ptrdiff_t ofs, bool relative)
3264 if( m->isContinuous() )
3266 ptr = (relative ? ptr : sliceStart) + ofs*elemSize;
3267 if( ptr < sliceStart )
3269 else if( ptr > sliceEnd )
3280 ofs0 = ptr - m->data;
3281 y = ofs0/m->step[0];
3282 ofs += y*m->cols + (ofs0 - y*m->step[0])/elemSize;
3285 int y1 = std::min(std::max((int)y, 0), m->rows-1);
3286 sliceStart = m->data + y1*m->step[0];
3287 sliceEnd = sliceStart + m->cols*elemSize;
3288 ptr = y < 0 ? sliceStart : y >= m->rows ? sliceEnd :
3289 sliceStart + (ofs - y*m->cols)*elemSize;
3299 int szi = m->size[d-1];
3300 ptrdiff_t t = ofs/szi;
3301 int v = (int)(ofs - t*szi);
3303 ptr = m->data + v*elemSize;
3304 sliceStart = m->data;
3306 for( int i = d-2; i >= 0; i-- )
3310 v = (int)(ofs - t*szi);
3312 sliceStart += v*m->step[i];
3315 sliceEnd = sliceStart + m->size[d-1]*elemSize;
3319 ptr = sliceStart + (ptr - m->data);
3322 void MatConstIterator::seek(const int* _idx, bool relative)
3329 ofs = _idx[0]*m->size[1] + _idx[1];
3332 for( i = 0; i < d; i++ )
3333 ofs = ofs*m->size[i] + _idx[i];
3335 seek(ofs, relative);
3338 ptrdiff_t operator - (const MatConstIterator& b, const MatConstIterator& a)
3342 if( a.sliceEnd == b.sliceEnd )
3343 return (b.ptr - a.ptr)/b.elemSize;
3345 return b.lpos() - a.lpos();
3348 //////////////////////////////// SparseMat ////////////////////////////////
3350 template<typename T1, typename T2> void
3351 convertData_(const void* _from, void* _to, int cn)
3353 const T1* from = (const T1*)_from;
3356 *to = saturate_cast<T2>(*from);
3358 for( int i = 0; i < cn; i++ )
3359 to[i] = saturate_cast<T2>(from[i]);
3362 template<typename T1, typename T2> void
3363 convertScaleData_(const void* _from, void* _to, int cn, double alpha, double beta)
3365 const T1* from = (const T1*)_from;
3368 *to = saturate_cast<T2>(*from*alpha + beta);
3370 for( int i = 0; i < cn; i++ )
3371 to[i] = saturate_cast<T2>(from[i]*alpha + beta);
3374 ConvertData getConvertElem(int fromType, int toType)
3376 static ConvertData tab[][8] =
3377 {{ convertData_<uchar, uchar>, convertData_<uchar, schar>,
3378 convertData_<uchar, ushort>, convertData_<uchar, short>,
3379 convertData_<uchar, int>, convertData_<uchar, float>,
3380 convertData_<uchar, double>, 0 },
3382 { convertData_<schar, uchar>, convertData_<schar, schar>,
3383 convertData_<schar, ushort>, convertData_<schar, short>,
3384 convertData_<schar, int>, convertData_<schar, float>,
3385 convertData_<schar, double>, 0 },
3387 { convertData_<ushort, uchar>, convertData_<ushort, schar>,
3388 convertData_<ushort, ushort>, convertData_<ushort, short>,
3389 convertData_<ushort, int>, convertData_<ushort, float>,
3390 convertData_<ushort, double>, 0 },
3392 { convertData_<short, uchar>, convertData_<short, schar>,
3393 convertData_<short, ushort>, convertData_<short, short>,
3394 convertData_<short, int>, convertData_<short, float>,
3395 convertData_<short, double>, 0 },
3397 { convertData_<int, uchar>, convertData_<int, schar>,
3398 convertData_<int, ushort>, convertData_<int, short>,
3399 convertData_<int, int>, convertData_<int, float>,
3400 convertData_<int, double>, 0 },
3402 { convertData_<float, uchar>, convertData_<float, schar>,
3403 convertData_<float, ushort>, convertData_<float, short>,
3404 convertData_<float, int>, convertData_<float, float>,
3405 convertData_<float, double>, 0 },
3407 { convertData_<double, uchar>, convertData_<double, schar>,
3408 convertData_<double, ushort>, convertData_<double, short>,
3409 convertData_<double, int>, convertData_<double, float>,
3410 convertData_<double, double>, 0 },
3412 { 0, 0, 0, 0, 0, 0, 0, 0 }};
3414 ConvertData func = tab[CV_MAT_DEPTH(fromType)][CV_MAT_DEPTH(toType)];
3415 CV_Assert( func != 0 );
3419 ConvertScaleData getConvertScaleElem(int fromType, int toType)
3421 static ConvertScaleData tab[][8] =
3422 {{ convertScaleData_<uchar, uchar>, convertScaleData_<uchar, schar>,
3423 convertScaleData_<uchar, ushort>, convertScaleData_<uchar, short>,
3424 convertScaleData_<uchar, int>, convertScaleData_<uchar, float>,
3425 convertScaleData_<uchar, double>, 0 },
3427 { convertScaleData_<schar, uchar>, convertScaleData_<schar, schar>,
3428 convertScaleData_<schar, ushort>, convertScaleData_<schar, short>,
3429 convertScaleData_<schar, int>, convertScaleData_<schar, float>,
3430 convertScaleData_<schar, double>, 0 },
3432 { convertScaleData_<ushort, uchar>, convertScaleData_<ushort, schar>,
3433 convertScaleData_<ushort, ushort>, convertScaleData_<ushort, short>,
3434 convertScaleData_<ushort, int>, convertScaleData_<ushort, float>,
3435 convertScaleData_<ushort, double>, 0 },
3437 { convertScaleData_<short, uchar>, convertScaleData_<short, schar>,
3438 convertScaleData_<short, ushort>, convertScaleData_<short, short>,
3439 convertScaleData_<short, int>, convertScaleData_<short, float>,
3440 convertScaleData_<short, double>, 0 },
3442 { convertScaleData_<int, uchar>, convertScaleData_<int, schar>,
3443 convertScaleData_<int, ushort>, convertScaleData_<int, short>,
3444 convertScaleData_<int, int>, convertScaleData_<int, float>,
3445 convertScaleData_<int, double>, 0 },
3447 { convertScaleData_<float, uchar>, convertScaleData_<float, schar>,
3448 convertScaleData_<float, ushort>, convertScaleData_<float, short>,
3449 convertScaleData_<float, int>, convertScaleData_<float, float>,
3450 convertScaleData_<float, double>, 0 },
3452 { convertScaleData_<double, uchar>, convertScaleData_<double, schar>,
3453 convertScaleData_<double, ushort>, convertScaleData_<double, short>,
3454 convertScaleData_<double, int>, convertScaleData_<double, float>,
3455 convertScaleData_<double, double>, 0 },
3457 { 0, 0, 0, 0, 0, 0, 0, 0 }};
3459 ConvertScaleData func = tab[CV_MAT_DEPTH(fromType)][CV_MAT_DEPTH(toType)];
3460 CV_Assert( func != 0 );
3464 enum { HASH_SIZE0 = 8 };
3466 static inline void copyElem(const uchar* from, uchar* to, size_t elemSize)
3469 for( i = 0; (int)i <= (int)(elemSize - sizeof(int)); i += sizeof(int) )
3470 *(int*)(to + i) = *(const int*)(from + i);
3471 for( ; i < elemSize; i++ )
3475 static inline bool isZeroElem(const uchar* data, size_t elemSize)
3478 for( i = 0; i <= elemSize - sizeof(int); i += sizeof(int) )
3479 if( *(int*)(data + i) != 0 )
3481 for( ; i < elemSize; i++ )
3487 SparseMat::Hdr::Hdr( int _dims, const int* _sizes, int _type )
3492 valueOffset = (int)alignSize(sizeof(SparseMat::Node) +
3493 sizeof(int)*std::max(dims - CV_MAX_DIM, 0), CV_ELEM_SIZE1(_type));
3494 nodeSize = alignSize(valueOffset +
3495 CV_ELEM_SIZE(_type), (int)sizeof(size_t));
3498 for( i = 0; i < dims; i++ )
3499 size[i] = _sizes[i];
3500 for( ; i < CV_MAX_DIM; i++ )
3505 void SparseMat::Hdr::clear()
3508 hashtab.resize(HASH_SIZE0);
3510 pool.resize(nodeSize);
3511 nodeCount = freeList = 0;
3515 SparseMat::SparseMat(const Mat& m)
3516 : flags(MAGIC_VAL), hdr(0)
3518 create( m.dims, m.size, m.type() );
3520 int i, idx[CV_MAX_DIM] = {0}, d = m.dims, lastSize = m.size[d - 1];
3521 size_t esz = m.elemSize();
3522 uchar* dptr = m.data;
3526 for( i = 0; i < lastSize; i++, dptr += esz )
3528 if( isZeroElem(dptr, esz) )
3531 uchar* to = newNode(idx, hash(idx));
3532 copyElem( dptr, to, esz );
3535 for( i = d - 2; i >= 0; i-- )
3537 dptr += m.step[i] - m.size[i+1]*m.step[i+1];
3538 if( ++idx[i] < m.size[i] )
3547 SparseMat::SparseMat(const CvSparseMat* m)
3548 : flags(MAGIC_VAL), hdr(0)
3551 create( m->dims, &m->size[0], m->type );
3553 CvSparseMatIterator it;
3554 CvSparseNode* n = cvInitSparseMatIterator(m, &it);
3555 size_t esz = elemSize();
3557 for( ; n != 0; n = cvGetNextSparseNode(&it) )
3559 const int* idx = CV_NODE_IDX(m, n);
3560 uchar* to = newNode(idx, hash(idx));
3561 copyElem((const uchar*)CV_NODE_VAL(m, n), to, esz);
3565 void SparseMat::create(int d, const int* _sizes, int _type)
3568 CV_Assert( _sizes && 0 < d && d <= CV_MAX_DIM );
3569 for( i = 0; i < d; i++ )
3570 CV_Assert( _sizes[i] > 0 );
3571 _type = CV_MAT_TYPE(_type);
3572 if( hdr && _type == type() && hdr->dims == d && hdr->refcount == 1 )
3574 for( i = 0; i < d; i++ )
3575 if( _sizes[i] != hdr->size[i] )
3584 flags = MAGIC_VAL | _type;
3585 hdr = new Hdr(d, _sizes, _type);
3588 void SparseMat::copyTo( SparseMat& m ) const
3597 m.create( hdr->dims, hdr->size, type() );
3598 SparseMatConstIterator from = begin();
3599 size_t i, N = nzcount(), esz = elemSize();
3601 for( i = 0; i < N; i++, ++from )
3603 const Node* n = from.node();
3604 uchar* to = m.newNode(n->idx, n->hashval);
3605 copyElem( from.ptr, to, esz );
3609 void SparseMat::copyTo( Mat& m ) const
3612 m.create( dims(), hdr->size, type() );
3615 SparseMatConstIterator from = begin();
3616 size_t i, N = nzcount(), esz = elemSize();
3618 for( i = 0; i < N; i++, ++from )
3620 const Node* n = from.node();
3621 copyElem( from.ptr, m.ptr(n->idx), esz);
3626 void SparseMat::convertTo( SparseMat& m, int rtype, double alpha ) const
3628 int cn = channels();
3631 rtype = CV_MAKETYPE(rtype, cn);
3632 if( hdr == m.hdr && rtype != type() )
3635 convertTo(temp, rtype, alpha);
3640 CV_Assert(hdr != 0);
3642 m.create( hdr->dims, hdr->size, rtype );
3644 SparseMatConstIterator from = begin();
3645 size_t i, N = nzcount();
3649 ConvertData cvtfunc = getConvertElem(type(), rtype);
3650 for( i = 0; i < N; i++, ++from )
3652 const Node* n = from.node();
3653 uchar* to = hdr == m.hdr ? from.ptr : m.newNode(n->idx, n->hashval);
3654 cvtfunc( from.ptr, to, cn );
3659 ConvertScaleData cvtfunc = getConvertScaleElem(type(), rtype);
3660 for( i = 0; i < N; i++, ++from )
3662 const Node* n = from.node();
3663 uchar* to = hdr == m.hdr ? from.ptr : m.newNode(n->idx, n->hashval);
3664 cvtfunc( from.ptr, to, cn, alpha, 0 );
3670 void SparseMat::convertTo( Mat& m, int rtype, double alpha, double beta ) const
3672 int cn = channels();
3675 rtype = CV_MAKETYPE(rtype, cn);
3678 m.create( dims(), hdr->size, rtype );
3681 SparseMatConstIterator from = begin();
3682 size_t i, N = nzcount();
3684 if( alpha == 1 && beta == 0 )
3686 ConvertData cvtfunc = getConvertElem(type(), rtype);
3687 for( i = 0; i < N; i++, ++from )
3689 const Node* n = from.node();
3690 uchar* to = m.ptr(n->idx);
3691 cvtfunc( from.ptr, to, cn );
3696 ConvertScaleData cvtfunc = getConvertScaleElem(type(), rtype);
3697 for( i = 0; i < N; i++, ++from )
3699 const Node* n = from.node();
3700 uchar* to = m.ptr(n->idx);
3701 cvtfunc( from.ptr, to, cn, alpha, beta );
3706 void SparseMat::clear()
3712 SparseMat::operator CvSparseMat*() const
3716 CvSparseMat* m = cvCreateSparseMat(hdr->dims, hdr->size, type());
3718 SparseMatConstIterator from = begin();
3719 size_t i, N = nzcount(), esz = elemSize();
3721 for( i = 0; i < N; i++, ++from )
3723 const Node* n = from.node();
3724 uchar* to = cvPtrND(m, n->idx, 0, -2, 0);
3725 copyElem(from.ptr, to, esz);
3730 uchar* SparseMat::ptr(int i0, bool createMissing, size_t* hashval)
3732 CV_Assert( hdr && hdr->dims == 1 );
3733 size_t h = hashval ? *hashval : hash(i0);
3734 size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx];
3735 uchar* pool = &hdr->pool[0];
3738 Node* elem = (Node*)(pool + nidx);
3739 if( elem->hashval == h && elem->idx[0] == i0 )
3740 return &value<uchar>(elem);
3747 return newNode( idx, h );
3752 uchar* SparseMat::ptr(int i0, int i1, bool createMissing, size_t* hashval)
3754 CV_Assert( hdr && hdr->dims == 2 );
3755 size_t h = hashval ? *hashval : hash(i0, i1);
3756 size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx];
3757 uchar* pool = &hdr->pool[0];
3760 Node* elem = (Node*)(pool + nidx);
3761 if( elem->hashval == h && elem->idx[0] == i0 && elem->idx[1] == i1 )
3762 return &value<uchar>(elem);
3768 int idx[] = { i0, i1 };
3769 return newNode( idx, h );
3774 uchar* SparseMat::ptr(int i0, int i1, int i2, bool createMissing, size_t* hashval)
3776 CV_Assert( hdr && hdr->dims == 3 );
3777 size_t h = hashval ? *hashval : hash(i0, i1, i2);
3778 size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx];
3779 uchar* pool = &hdr->pool[0];
3782 Node* elem = (Node*)(pool + nidx);
3783 if( elem->hashval == h && elem->idx[0] == i0 &&
3784 elem->idx[1] == i1 && elem->idx[2] == i2 )
3785 return &value<uchar>(elem);
3791 int idx[] = { i0, i1, i2 };
3792 return newNode( idx, h );
3797 uchar* SparseMat::ptr(const int* idx, bool createMissing, size_t* hashval)
3800 int i, d = hdr->dims;
3801 size_t h = hashval ? *hashval : hash(idx);
3802 size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx];
3803 uchar* pool = &hdr->pool[0];
3806 Node* elem = (Node*)(pool + nidx);
3807 if( elem->hashval == h )
3809 for( i = 0; i < d; i++ )
3810 if( elem->idx[i] != idx[i] )
3813 return &value<uchar>(elem);
3818 return createMissing ? newNode(idx, h) : 0;
3821 void SparseMat::erase(int i0, int i1, size_t* hashval)
3823 CV_Assert( hdr && hdr->dims == 2 );
3824 size_t h = hashval ? *hashval : hash(i0, i1);
3825 size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx], previdx=0;
3826 uchar* pool = &hdr->pool[0];
3829 Node* elem = (Node*)(pool + nidx);
3830 if( elem->hashval == h && elem->idx[0] == i0 && elem->idx[1] == i1 )
3837 removeNode(hidx, nidx, previdx);
3840 void SparseMat::erase(int i0, int i1, int i2, size_t* hashval)
3842 CV_Assert( hdr && hdr->dims == 3 );
3843 size_t h = hashval ? *hashval : hash(i0, i1, i2);
3844 size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx], previdx=0;
3845 uchar* pool = &hdr->pool[0];
3848 Node* elem = (Node*)(pool + nidx);
3849 if( elem->hashval == h && elem->idx[0] == i0 &&
3850 elem->idx[1] == i1 && elem->idx[2] == i2 )
3857 removeNode(hidx, nidx, previdx);
3860 void SparseMat::erase(const int* idx, size_t* hashval)
3863 int i, d = hdr->dims;
3864 size_t h = hashval ? *hashval : hash(idx);
3865 size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx], previdx=0;
3866 uchar* pool = &hdr->pool[0];
3869 Node* elem = (Node*)(pool + nidx);
3870 if( elem->hashval == h )
3872 for( i = 0; i < d; i++ )
3873 if( elem->idx[i] != idx[i] )
3883 removeNode(hidx, nidx, previdx);
3886 void SparseMat::resizeHashTab(size_t newsize)
3888 newsize = std::max(newsize, (size_t)8);
3889 if((newsize & (newsize-1)) != 0)
3890 newsize = (size_t)1 << cvCeil(std::log((double)newsize)/CV_LOG2);
3892 size_t i, hsize = hdr->hashtab.size();
3893 vector<size_t> _newh(newsize);
3894 size_t* newh = &_newh[0];
3895 for( i = 0; i < newsize; i++ )
3897 uchar* pool = &hdr->pool[0];
3898 for( i = 0; i < hsize; i++ )
3900 size_t nidx = hdr->hashtab[i];
3903 Node* elem = (Node*)(pool + nidx);
3904 size_t next = elem->next;
3905 size_t newhidx = elem->hashval & (newsize - 1);
3906 elem->next = newh[newhidx];
3907 newh[newhidx] = nidx;
3911 hdr->hashtab = _newh;
3914 uchar* SparseMat::newNode(const int* idx, size_t hashval)
3916 const int HASH_MAX_FILL_FACTOR=3;
3918 size_t hsize = hdr->hashtab.size();
3919 if( ++hdr->nodeCount > hsize*HASH_MAX_FILL_FACTOR )
3921 resizeHashTab(std::max(hsize*2, (size_t)8));
3922 hsize = hdr->hashtab.size();
3925 if( !hdr->freeList )
3927 size_t i, nsz = hdr->nodeSize, psize = hdr->pool.size(),
3928 newpsize = std::max(psize*2, 8*nsz);
3929 hdr->pool.resize(newpsize);
3930 uchar* pool = &hdr->pool[0];
3931 hdr->freeList = std::max(psize, nsz);
3932 for( i = hdr->freeList; i < newpsize - nsz; i += nsz )
3933 ((Node*)(pool + i))->next = i + nsz;
3934 ((Node*)(pool + i))->next = 0;
3936 size_t nidx = hdr->freeList;
3937 Node* elem = (Node*)&hdr->pool[nidx];
3938 hdr->freeList = elem->next;
3939 elem->hashval = hashval;
3940 size_t hidx = hashval & (hsize - 1);
3941 elem->next = hdr->hashtab[hidx];
3942 hdr->hashtab[hidx] = nidx;
3944 int i, d = hdr->dims;
3945 for( i = 0; i < d; i++ )
3946 elem->idx[i] = idx[i];
3947 size_t esz = elemSize();
3948 uchar* p = &value<uchar>(elem);
3949 if( esz == sizeof(float) )
3951 else if( esz == sizeof(double) )
3960 void SparseMat::removeNode(size_t hidx, size_t nidx, size_t previdx)
3962 Node* n = node(nidx);
3965 Node* prev = node(previdx);
3966 prev->next = n->next;
3969 hdr->hashtab[hidx] = n->next;
3970 n->next = hdr->freeList;
3971 hdr->freeList = nidx;
3976 SparseMatConstIterator::SparseMatConstIterator(const SparseMat* _m)
3977 : m((SparseMat*)_m), hashidx(0), ptr(0)
3981 SparseMat::Hdr& hdr = *m->hdr;
3982 const vector<size_t>& htab = hdr.hashtab;
3983 size_t i, hsize = htab.size();
3984 for( i = 0; i < hsize; i++ )
3986 size_t nidx = htab[i];
3990 ptr = &hdr.pool[nidx] + hdr.valueOffset;
3996 SparseMatConstIterator& SparseMatConstIterator::operator ++()
3998 if( !ptr || !m || !m->hdr )
4000 SparseMat::Hdr& hdr = *m->hdr;
4001 size_t next = ((const SparseMat::Node*)(ptr - hdr.valueOffset))->next;
4004 ptr = &hdr.pool[next] + hdr.valueOffset;
4007 size_t i = hashidx + 1, sz = hdr.hashtab.size();
4008 for( ; i < sz; i++ )
4010 size_t nidx = hdr.hashtab[i];
4014 ptr = &hdr.pool[nidx] + hdr.valueOffset;
4024 double norm( const SparseMat& src, int normType )
4026 SparseMatConstIterator it = src.begin();
4028 size_t i, N = src.nzcount();
4029 normType &= NORM_TYPE_MASK;
4030 int type = src.type();
4033 CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 );
4035 if( type == CV_32F )
4037 if( normType == NORM_INF )
4038 for( i = 0; i < N; i++, ++it )
4039 result = std::max(result, std::abs((double)*(const float*)it.ptr));
4040 else if( normType == NORM_L1 )
4041 for( i = 0; i < N; i++, ++it )
4042 result += std::abs(*(const float*)it.ptr);
4044 for( i = 0; i < N; i++, ++it )
4046 double v = *(const float*)it.ptr;
4050 else if( type == CV_64F )
4052 if( normType == NORM_INF )
4053 for( i = 0; i < N; i++, ++it )
4054 result = std::max(result, std::abs(*(const double*)it.ptr));
4055 else if( normType == NORM_L1 )
4056 for( i = 0; i < N; i++, ++it )
4057 result += std::abs(*(const double*)it.ptr);
4059 for( i = 0; i < N; i++, ++it )
4061 double v = *(const double*)it.ptr;
4066 CV_Error( CV_StsUnsupportedFormat, "Only 32f and 64f are supported" );
4068 if( normType == NORM_L2 )
4069 result = std::sqrt(result);
4073 void minMaxLoc( const SparseMat& src, double* _minval, double* _maxval, int* _minidx, int* _maxidx )
4075 SparseMatConstIterator it = src.begin();
4076 size_t i, N = src.nzcount(), d = src.hdr ? src.hdr->dims : 0;
4077 int type = src.type();
4078 const int *minidx = 0, *maxidx = 0;
4080 if( type == CV_32F )
4082 float minval = FLT_MAX, maxval = -FLT_MAX;
4083 for( i = 0; i < N; i++, ++it )
4085 float v = *(const float*)it.ptr;
4089 minidx = it.node()->idx;
4094 maxidx = it.node()->idx;
4102 else if( type == CV_64F )
4104 double minval = DBL_MAX, maxval = -DBL_MAX;
4105 for( i = 0; i < N; i++, ++it )
4107 double v = *(const double*)it.ptr;
4111 minidx = it.node()->idx;
4116 maxidx = it.node()->idx;
4125 CV_Error( CV_StsUnsupportedFormat, "Only 32f and 64f are supported" );
4128 for( i = 0; i < d; i++ )
4129 _minidx[i] = minidx[i];
4131 for( i = 0; i < d; i++ )
4132 _maxidx[i] = maxidx[i];
4136 void normalize( const SparseMat& src, SparseMat& dst, double a, int norm_type )
4139 if( norm_type == CV_L2 || norm_type == CV_L1 || norm_type == CV_C )
4141 scale = norm( src, norm_type );
4142 scale = scale > DBL_EPSILON ? a/scale : 0.;
4145 CV_Error( CV_StsBadArg, "Unknown/unsupported norm type" );
4147 src.convertTo( dst, -1, scale );
4150 ////////////////////// RotatedRect //////////////////////
4152 void RotatedRect::points(Point2f pt[]) const
4154 double _angle = angle*CV_PI/180.;
4155 float b = (float)cos(_angle)*0.5f;
4156 float a = (float)sin(_angle)*0.5f;
4158 pt[0].x = center.x - a*size.height - b*size.width;
4159 pt[0].y = center.y + b*size.height - a*size.width;
4160 pt[1].x = center.x + a*size.height - b*size.width;
4161 pt[1].y = center.y - b*size.height - a*size.width;
4162 pt[2].x = 2*center.x - pt[0].x;
4163 pt[2].y = 2*center.y - pt[0].y;
4164 pt[3].x = 2*center.x - pt[1].x;
4165 pt[3].y = 2*center.y - pt[1].y;
4168 Rect RotatedRect::boundingRect() const
4172 Rect r(cvFloor(min(min(min(pt[0].x, pt[1].x), pt[2].x), pt[3].x)),
4173 cvFloor(min(min(min(pt[0].y, pt[1].y), pt[2].y), pt[3].y)),
4174 cvCeil(max(max(max(pt[0].x, pt[1].x), pt[2].x), pt[3].x)),
4175 cvCeil(max(max(max(pt[0].y, pt[1].y), pt[2].y), pt[3].y)));
4177 r.height -= r.y - 1;