10 const char* getTypeName( int type )
12 static const char* type_names[] = { "8u", "8s", "16u", "16s", "32s", "32f", "64f", "ptr" };
13 return type_names[CV_MAT_DEPTH(type)];
16 int typeByName( const char* name )
19 for( i = 0; i < CV_DEPTH_MAX; i++ )
20 if( strcmp(name, getTypeName(i)) == 0 )
25 string vec2str( const string& sep, const int* v, size_t nelems )
29 for( size_t i = 0; i < nelems; i++ )
31 sprintf(buf, "%d", v[i]);
32 result += string(buf);
40 Size randomSize(RNG& rng, double maxSizeLog)
42 double width_log = rng.uniform(0., maxSizeLog);
43 double height_log = rng.uniform(0., maxSizeLog - width_log);
44 if( (unsigned)rng % 2 != 0 )
45 std::swap(width_log, height_log);
47 sz.width = cvRound(exp(width_log));
48 sz.height = cvRound(exp(height_log));
52 void randomSize(RNG& rng, int minDims, int maxDims, double maxSizeLog, vector<int>& sz)
54 int i, dims = rng.uniform(minDims, maxDims+1);
56 for( i = 0; i < dims; i++ )
58 double v = rng.uniform(0., maxSizeLog);
60 sz[i] = cvRound(exp(v));
62 for( i = 0; i < dims; i++ )
64 int j = rng.uniform(0, dims);
65 int k = rng.uniform(0, dims);
66 std::swap(sz[j], sz[k]);
70 int randomType(RNG& rng, int typeMask, int minChannels, int maxChannels)
72 int channels = rng.uniform(minChannels, maxChannels+1);
74 CV_Assert((typeMask & DEPTH_MASK_ALL) != 0);
77 depth = rng.uniform(CV_8U, CV_64F+1);
78 if( ((1 << depth) & typeMask) != 0 )
81 return CV_MAKETYPE(depth, channels);
84 double getMinVal(int depth)
86 depth = CV_MAT_DEPTH(depth);
87 double val = depth == CV_8U ? 0 : depth == CV_8S ? SCHAR_MIN : depth == CV_16U ? 0 :
88 depth == CV_16S ? SHRT_MIN : depth == CV_32S ? INT_MIN :
89 depth == CV_32F ? -FLT_MAX : depth == CV_64F ? -DBL_MAX : -1;
94 double getMaxVal(int depth)
96 depth = CV_MAT_DEPTH(depth);
97 double val = depth == CV_8U ? UCHAR_MAX : depth == CV_8S ? SCHAR_MAX : depth == CV_16U ? USHRT_MAX :
98 depth == CV_16S ? SHRT_MAX : depth == CV_32S ? INT_MAX :
99 depth == CV_32F ? FLT_MAX : depth == CV_64F ? DBL_MAX : -1;
100 CV_Assert(val != -1);
104 Mat randomMat(RNG& rng, Size size, int type, double minVal, double maxVal, bool useRoi)
109 size0.width += std::max(rng.uniform(0, 10) - 5, 0);
110 size0.height += std::max(rng.uniform(0, 10) - 5, 0);
115 rng.fill(m, RNG::UNIFORM, Scalar::all(minVal), Scalar::all(maxVal));
118 return m(Rect((size0.width-size.width)/2, (size0.height-size.height)/2, size.width, size.height));
121 Mat randomMat(RNG& rng, const vector<int>& size, int type, double minVal, double maxVal, bool useRoi)
123 int i, dims = (int)size.size();
124 vector<int> size0(dims);
125 vector<Range> r(dims);
127 for( i = 0; i < dims; i++ )
133 size0[i] += std::max(rng.uniform(0, 5) - 2, 0);
134 r[i] = Range((size0[i] - size[i])/2, (size0[i] - size[i])/2 + size[i]);
136 eqsize = eqsize && size[i] == size0[i];
139 Mat m(dims, &size0[0], type);
141 rng.fill(m, RNG::UNIFORM, Scalar::all(minVal), Scalar::all(maxVal));
147 void add(const Mat& _a, double alpha, const Mat& _b, double beta,
148 Scalar gamma, Mat& c, int ctype, bool calcAbs)
151 if( a.empty() || alpha == 0 )
153 // both alpha and beta can be 0, but at least one of a and b must be non-empty array,
154 // otherwise we do not know the size of the output (and may be type of the output, when ctype<0)
155 CV_Assert( !a.empty() || !b.empty() );
164 if( b.empty() || beta == 0 )
170 CV_Assert(a.size == b.size);
174 ctype = CV_MAKETYPE(CV_MAT_DEPTH(ctype), a.channels());
175 c.create(a.dims, &a.size[0], ctype);
176 const Mat *arrays[] = {&a, &b, &c, 0};
177 Mat planes[3], buf[3];
179 NAryMatIterator it(arrays, planes);
180 size_t i, nplanes = it.nplanes;
182 int total = (int)planes[0].total(), maxsize = std::min(12*12*std::max(12/cn, 1), total);
184 CV_Assert(planes[0].rows == 1);
185 buf[0].create(1, maxsize, CV_64FC(cn));
187 buf[1].create(1, maxsize, CV_64FC(cn));
188 buf[2].create(1, maxsize, CV_64FC(cn));
189 scalarToRawData(gamma, buf[2].data, CV_64FC(cn), (int)(maxsize*cn));
191 for( i = 0; i < nplanes; i++, ++it)
193 for( int j = 0; j < total; j += maxsize )
195 int j2 = std::min(j + maxsize, total);
196 Mat apart0 = planes[0].colRange(j, j2);
197 Mat cpart0 = planes[2].colRange(j, j2);
198 Mat apart = buf[0].colRange(0, j2 - j);
200 apart0.convertTo(apart, apart.type(), alpha);
201 size_t k, n = (j2 - j)*cn;
202 double* aptr = (double*)apart.data;
203 const double* gptr = (const double*)buf[2].data;
207 for( k = 0; k < n; k++ )
212 Mat bpart0 = planes[1].colRange((int)j, (int)j2);
213 Mat bpart = buf[1].colRange(0, (int)(j2 - j));
214 bpart0.convertTo(bpart, bpart.type(), beta);
215 const double* bptr = (const double*)bpart.data;
217 for( k = 0; k < n; k++ )
218 aptr[k] += bptr[k] + gptr[k];
221 for( k = 0; k < n; k++ )
222 aptr[k] = fabs(aptr[k]);
223 apart.convertTo(cpart0, cpart0.type(), 1, 0);
229 template<typename _Tp1, typename _Tp2> inline void
230 convert_(const _Tp1* src, _Tp2* dst, size_t total, double alpha, double beta)
233 if( alpha == 1 && beta == 0 )
234 for( i = 0; i < total; i++ )
235 dst[i] = saturate_cast<_Tp2>(src[i]);
237 for( i = 0; i < total; i++ )
238 dst[i] = saturate_cast<_Tp2>(src[i]*alpha);
240 for( i = 0; i < total; i++ )
241 dst[i] = saturate_cast<_Tp2>(src[i]*alpha + beta);
244 template<typename _Tp> inline void
245 convertTo(const _Tp* src, void* dst, int dtype, size_t total, double alpha, double beta)
247 switch( CV_MAT_DEPTH(dtype) )
250 convert_(src, (uchar*)dst, total, alpha, beta);
253 convert_(src, (schar*)dst, total, alpha, beta);
256 convert_(src, (ushort*)dst, total, alpha, beta);
259 convert_(src, (short*)dst, total, alpha, beta);
262 convert_(src, (int*)dst, total, alpha, beta);
265 convert_(src, (float*)dst, total, alpha, beta);
268 convert_(src, (double*)dst, total, alpha, beta);
275 void convert(const Mat& src, Mat& dst, int dtype, double alpha, double beta)
277 dtype = CV_MAKETYPE(CV_MAT_DEPTH(dtype), src.channels());
278 dst.create(src.dims, &src.size[0], dtype);
281 set( dst, Scalar::all(beta) );
284 if( dtype == src.type() && alpha == 1 && beta == 0 )
290 const Mat *arrays[]={&src, &dst, 0};
293 NAryMatIterator it(arrays, planes);
294 size_t total = planes[0].total()*planes[0].channels();
295 size_t i, nplanes = it.nplanes;
297 for( i = 0; i < nplanes; i++, ++it)
299 const uchar* sptr = planes[0].data;
300 uchar* dptr = planes[1].data;
302 switch( src.depth() )
305 convertTo((const uchar*)sptr, dptr, dtype, total, alpha, beta);
308 convertTo((const schar*)sptr, dptr, dtype, total, alpha, beta);
311 convertTo((const ushort*)sptr, dptr, dtype, total, alpha, beta);
314 convertTo((const short*)sptr, dptr, dtype, total, alpha, beta);
317 convertTo((const int*)sptr, dptr, dtype, total, alpha, beta);
320 convertTo((const float*)sptr, dptr, dtype, total, alpha, beta);
323 convertTo((const double*)sptr, dptr, dtype, total, alpha, beta);
330 void copy(const Mat& src, Mat& dst, const Mat& mask, bool invertMask)
332 dst.create(src.dims, &src.size[0], src.type());
336 const Mat* arrays[] = {&src, &dst, 0};
338 NAryMatIterator it(arrays, planes);
339 size_t i, nplanes = it.nplanes;
340 size_t planeSize = planes[0].total()*src.elemSize();
342 for( i = 0; i < nplanes; i++, ++it )
343 memcpy(planes[1].data, planes[0].data, planeSize);
348 CV_Assert( src.size == mask.size && mask.type() == CV_8U );
350 const Mat *arrays[]={&src, &dst, &mask, 0};
353 NAryMatIterator it(arrays, planes);
354 size_t j, k, elemSize = src.elemSize(), total = planes[0].total();
355 size_t i, nplanes = it.nplanes;
357 for( i = 0; i < nplanes; i++, ++it)
359 const uchar* sptr = planes[0].data;
360 uchar* dptr = planes[1].data;
361 const uchar* mptr = planes[2].data;
363 for( j = 0; j < total; j++, sptr += elemSize, dptr += elemSize )
365 if( (mptr[j] != 0) ^ invertMask )
366 for( k = 0; k < elemSize; k++ )
373 void set(Mat& dst, const Scalar& gamma, const Mat& mask)
376 scalarToRawData(gamma, &buf, dst.type(), dst.channels());
377 const uchar* gptr = (const uchar*)&buf[0];
381 const Mat* arrays[] = {&dst, 0};
383 NAryMatIterator it(arrays, &plane);
384 size_t i, nplanes = it.nplanes;
385 size_t j, k, elemSize = dst.elemSize(), planeSize = plane.total()*elemSize;
387 for( k = 1; k < elemSize; k++ )
388 if( gptr[k] != gptr[0] )
390 bool uniform = k >= elemSize;
392 for( i = 0; i < nplanes; i++, ++it )
394 uchar* dptr = plane.data;
396 memset( dptr, gptr[0], planeSize );
399 for( j = 0; j < planeSize; j += elemSize, dptr += elemSize )
400 for( k = 0; k < elemSize; k++ )
404 memcpy(dptr, dst.data, planeSize);
409 CV_Assert( dst.size == mask.size && mask.type() == CV_8U );
411 const Mat *arrays[]={&dst, &mask, 0};
414 NAryMatIterator it(arrays, planes);
415 size_t j, k, elemSize = dst.elemSize(), total = planes[0].total();
416 size_t i, nplanes = it.nplanes;
418 for( i = 0; i < nplanes; i++, ++it)
420 uchar* dptr = planes[0].data;
421 const uchar* mptr = planes[1].data;
423 for( j = 0; j < total; j++, dptr += elemSize )
426 for( k = 0; k < elemSize; k++ )
433 void insert(const Mat& src, Mat& dst, int coi)
435 CV_Assert( dst.size == src.size && src.depth() == dst.depth() &&
436 0 <= coi && coi < dst.channels() );
438 const Mat* arrays[] = {&src, &dst, 0};
440 NAryMatIterator it(arrays, planes);
441 size_t i, nplanes = it.nplanes;
442 size_t j, k, size0 = src.elemSize(), size1 = dst.elemSize(), total = planes[0].total();
444 for( i = 0; i < nplanes; i++, ++it )
446 const uchar* sptr = planes[0].data;
447 uchar* dptr = planes[1].data + coi*size0;
449 for( j = 0; j < total; j++, sptr += size0, dptr += size1 )
451 for( k = 0; k < size0; k++ )
458 void extract(const Mat& src, Mat& dst, int coi)
460 dst.create( src.dims, &src.size[0], src.depth() );
461 CV_Assert( 0 <= coi && coi < src.channels() );
463 const Mat* arrays[] = {&src, &dst, 0};
465 NAryMatIterator it(arrays, planes);
466 size_t i, nplanes = it.nplanes;
467 size_t j, k, size0 = src.elemSize(), size1 = dst.elemSize(), total = planes[0].total();
469 for( i = 0; i < nplanes; i++, ++it )
471 const uchar* sptr = planes[0].data + coi*size1;
472 uchar* dptr = planes[1].data;
474 for( j = 0; j < total; j++, sptr += size0, dptr += size1 )
476 for( k = 0; k < size1; k++ )
483 void transpose(const Mat& src, Mat& dst)
485 CV_Assert(src.dims == 2);
486 dst.create(src.cols, src.rows, src.type());
487 int i, j, k, esz = (int)src.elemSize();
489 for( i = 0; i < dst.rows; i++ )
491 const uchar* sptr = src.ptr(0) + i*esz;
492 uchar* dptr = dst.ptr(i);
494 for( j = 0; j < dst.cols; j++, sptr += src.step[0], dptr += esz )
496 for( k = 0; k < esz; k++ )
503 template<typename _Tp> static void
504 randUniInt_(RNG& rng, _Tp* data, size_t total, int cn, const Scalar& scale, const Scalar& delta)
506 for( size_t i = 0; i < total; i += cn )
507 for( int k = 0; k < cn; k++ )
509 int val = cvFloor( randInt(rng)*scale[k] + delta[k] );
510 data[i + k] = saturate_cast<_Tp>(val);
515 template<typename _Tp> static void
516 randUniFlt_(RNG& rng, _Tp* data, size_t total, int cn, const Scalar& scale, const Scalar& delta)
518 for( size_t i = 0; i < total; i += cn )
519 for( int k = 0; k < cn; k++ )
521 double val = randReal(rng)*scale[k] + delta[k];
522 data[i + k] = saturate_cast<_Tp>(val);
527 void randUni( RNG& rng, Mat& a, const Scalar& param0, const Scalar& param1 )
529 Scalar scale = param0;
530 Scalar delta = param1;
531 double C = a.depth() < CV_32F ? 1./(65536.*65536.) : 1.;
533 for( int k = 0; k < 4; k++ )
535 double s = scale.val[k] - delta.val[k];
540 delta.val[k] = scale.val[k];
546 const Mat *arrays[]={&a, 0};
549 NAryMatIterator it(arrays, &plane);
550 size_t i, nplanes = it.nplanes;
551 int depth = a.depth(), cn = a.channels();
552 size_t total = plane.total()*cn;
554 for( i = 0; i < nplanes; i++, ++it )
559 randUniInt_(rng, plane.ptr<uchar>(), total, cn, scale, delta);
562 randUniInt_(rng, plane.ptr<schar>(), total, cn, scale, delta);
565 randUniInt_(rng, plane.ptr<ushort>(), total, cn, scale, delta);
568 randUniInt_(rng, plane.ptr<short>(), total, cn, scale, delta);
571 randUniInt_(rng, plane.ptr<int>(), total, cn, scale, delta);
574 randUniFlt_(rng, plane.ptr<float>(), total, cn, scale, delta);
577 randUniFlt_(rng, plane.ptr<double>(), total, cn, scale, delta);
586 template<typename _Tp> static void
587 erode_(const Mat& src, Mat& dst, const vector<int>& ofsvec)
589 int width = dst.cols*src.channels(), n = (int)ofsvec.size();
590 const int* ofs = &ofsvec[0];
592 for( int y = 0; y < dst.rows; y++ )
594 const _Tp* sptr = src.ptr<_Tp>(y);
595 _Tp* dptr = dst.ptr<_Tp>(y);
597 for( int x = 0; x < width; x++ )
599 _Tp result = sptr[x + ofs[0]];
600 for( int i = 1; i < n; i++ )
601 result = std::min(result, sptr[x + ofs[i]]);
608 template<typename _Tp> static void
609 dilate_(const Mat& src, Mat& dst, const vector<int>& ofsvec)
611 int width = dst.cols*src.channels(), n = (int)ofsvec.size();
612 const int* ofs = &ofsvec[0];
614 for( int y = 0; y < dst.rows; y++ )
616 const _Tp* sptr = src.ptr<_Tp>(y);
617 _Tp* dptr = dst.ptr<_Tp>(y);
619 for( int x = 0; x < width; x++ )
621 _Tp result = sptr[x + ofs[0]];
622 for( int i = 1; i < n; i++ )
623 result = std::max(result, sptr[x + ofs[i]]);
630 void erode(const Mat& _src, Mat& dst, const Mat& _kernel, Point anchor,
631 int borderType, const Scalar& _borderValue)
633 //if( _src.type() == CV_16UC3 && _src.size() == Size(1, 2) )
635 Mat kernel = _kernel, src;
636 Scalar borderValue = _borderValue;
638 kernel = Mat::ones(3, 3, CV_8U);
641 CV_Assert( kernel.type() == CV_8U );
643 if( anchor == Point(-1,-1) )
644 anchor = Point(kernel.cols/2, kernel.rows/2);
645 if( borderType == IPL_BORDER_CONSTANT )
646 borderValue = getMaxVal(src.depth());
647 copyMakeBorder(_src, src, anchor.y, kernel.rows - anchor.y - 1,
648 anchor.x, kernel.cols - anchor.x - 1,
649 borderType, borderValue);
650 dst.create( _src.size(), src.type() );
653 int step = (int)(src.step/src.elemSize1()), cn = src.channels();
654 for( int i = 0; i < kernel.rows; i++ )
655 for( int j = 0; j < kernel.cols; j++ )
656 if( kernel.at<uchar>(i, j) != 0 )
657 ofs.push_back(i*step + j*cn);
659 ofs.push_back(anchor.y*step + anchor.x*cn);
661 switch( src.depth() )
664 erode_<uchar>(src, dst, ofs);
667 erode_<schar>(src, dst, ofs);
670 erode_<ushort>(src, dst, ofs);
673 erode_<short>(src, dst, ofs);
676 erode_<int>(src, dst, ofs);
679 erode_<float>(src, dst, ofs);
682 erode_<double>(src, dst, ofs);
689 void dilate(const Mat& _src, Mat& dst, const Mat& _kernel, Point anchor,
690 int borderType, const Scalar& _borderValue)
692 Mat kernel = _kernel, src;
693 Scalar borderValue = _borderValue;
695 kernel = Mat::ones(3, 3, CV_8U);
698 CV_Assert( kernel.type() == CV_8U );
700 if( anchor == Point(-1,-1) )
701 anchor = Point(kernel.cols/2, kernel.rows/2);
702 if( borderType == IPL_BORDER_CONSTANT )
703 borderValue = getMinVal(src.depth());
704 copyMakeBorder(_src, src, anchor.y, kernel.rows - anchor.y - 1,
705 anchor.x, kernel.cols - anchor.x - 1,
706 borderType, borderValue);
707 dst.create( _src.size(), src.type() );
710 int step = (int)(src.step/src.elemSize1()), cn = src.channels();
711 for( int i = 0; i < kernel.rows; i++ )
712 for( int j = 0; j < kernel.cols; j++ )
713 if( kernel.at<uchar>(i, j) != 0 )
714 ofs.push_back(i*step + j*cn);
716 ofs.push_back(anchor.y*step + anchor.x*cn);
718 switch( src.depth() )
721 dilate_<uchar>(src, dst, ofs);
724 dilate_<schar>(src, dst, ofs);
727 dilate_<ushort>(src, dst, ofs);
730 dilate_<short>(src, dst, ofs);
733 dilate_<int>(src, dst, ofs);
736 dilate_<float>(src, dst, ofs);
739 dilate_<double>(src, dst, ofs);
747 template<typename _Tp> static void
748 filter2D_(const Mat& src, Mat& dst, const vector<int>& ofsvec, const vector<double>& coeffvec)
750 const int* ofs = &ofsvec[0];
751 const double* coeff = &coeffvec[0];
752 int width = dst.cols*dst.channels(), ncoeffs = (int)ofsvec.size();
754 for( int y = 0; y < dst.rows; y++ )
756 const _Tp* sptr = src.ptr<_Tp>(y);
757 double* dptr = dst.ptr<double>(y);
759 for( int x = 0; x < width; x++ )
762 for( int i = 0; i < ncoeffs; i++ )
763 s += sptr[x + ofs[i]]*coeff[i];
770 void filter2D(const Mat& _src, Mat& dst, int ddepth, const Mat& kernel,
771 Point anchor, double delta, int borderType, const Scalar& _borderValue)
774 Scalar borderValue = _borderValue;
775 CV_Assert( kernel.type() == CV_32F || kernel.type() == CV_64F );
776 if( anchor == Point(-1,-1) )
777 anchor = Point(kernel.cols/2, kernel.rows/2);
778 if( borderType == IPL_BORDER_CONSTANT )
779 borderValue = getMinVal(src.depth());
780 copyMakeBorder(_src, src, anchor.y, kernel.rows - anchor.y - 1,
781 anchor.x, kernel.cols - anchor.x - 1,
782 borderType, borderValue);
783 _dst.create( _src.size(), CV_MAKETYPE(CV_64F, src.channels()) );
786 vector<double> coeff(kernel.rows*kernel.cols);
787 Mat cmat(kernel.rows, kernel.cols, CV_64F, &coeff[0]);
788 convert(kernel, cmat, cmat.type());
790 int step = (int)(src.step/src.elemSize1()), cn = src.channels();
791 for( int i = 0; i < kernel.rows; i++ )
792 for( int j = 0; j < kernel.cols; j++ )
793 ofs.push_back(i*step + j*cn);
795 switch( src.depth() )
798 filter2D_<uchar>(src, _dst, ofs, coeff);
801 filter2D_<schar>(src, _dst, ofs, coeff);
804 filter2D_<ushort>(src, _dst, ofs, coeff);
807 filter2D_<short>(src, _dst, ofs, coeff);
810 filter2D_<int>(src, _dst, ofs, coeff);
813 filter2D_<float>(src, _dst, ofs, coeff);
816 filter2D_<double>(src, _dst, ofs, coeff);
822 convert(_dst, dst, ddepth, 1, delta);
826 static int borderInterpolate( int p, int len, int borderType )
828 if( (unsigned)p < (unsigned)len )
830 else if( borderType == IPL_BORDER_REPLICATE )
831 p = p < 0 ? 0 : len - 1;
832 else if( borderType == IPL_BORDER_REFLECT || borderType == IPL_BORDER_REFLECT_101 )
834 int delta = borderType == IPL_BORDER_REFLECT_101;
842 p = len - 1 - (p - len) - delta;
844 while( (unsigned)p >= (unsigned)len );
846 else if( borderType == IPL_BORDER_WRAP )
849 p -= ((p-len+1)/len)*len;
853 else if( borderType == IPL_BORDER_CONSTANT )
856 CV_Error( CV_StsBadArg, "Unknown/unsupported border type" );
861 void copyMakeBorder(const Mat& src, Mat& dst, int top, int bottom, int left, int right,
862 int borderType, const Scalar& borderValue)
864 dst.create(src.rows + top + bottom, src.cols + left + right, src.type());
865 int i, j, k, esz = (int)src.elemSize();
866 int width = src.cols*esz, width1 = dst.cols*esz;
868 if( borderType == IPL_BORDER_CONSTANT )
870 vector<uchar> valvec((src.cols + left + right)*esz);
871 uchar* val = &valvec[0];
872 scalarToRawData(borderValue, val, src.type(), (src.cols + left + right)*src.channels());
876 for( i = 0; i < src.rows; i++ )
878 const uchar* sptr = src.ptr(i);
879 uchar* dptr = dst.ptr(i + top) + left;
880 for( j = 0; j < left; j++ )
881 dptr[j - left] = val[j];
883 for( j = 0; j < width; j++ )
885 for( j = 0; j < right; j++ )
886 dptr[j + width] = val[j];
889 for( i = 0; i < top; i++ )
891 uchar* dptr = dst.ptr(i);
892 for( j = 0; j < width1; j++ )
896 for( i = 0; i < bottom; i++ )
898 uchar* dptr = dst.ptr(i + top + src.rows);
899 for( j = 0; j < width1; j++ )
905 vector<int> tabvec((left + right)*esz + 1);
906 int* ltab = &tabvec[0];
907 int* rtab = &tabvec[left*esz];
908 for( i = 0; i < left; i++ )
910 j = borderInterpolate(i - left, src.cols, borderType)*esz;
911 for( k = 0; k < esz; k++ )
912 ltab[i*esz + k] = j + k;
914 for( i = 0; i < right; i++ )
916 j = borderInterpolate(src.cols + i, src.cols, borderType)*esz;
917 for( k = 0; k < esz; k++ )
918 rtab[i*esz + k] = j + k;
923 for( i = 0; i < src.rows; i++ )
925 const uchar* sptr = src.ptr(i);
926 uchar* dptr = dst.ptr(i + top);
928 for( j = 0; j < left; j++ )
929 dptr[j] = sptr[ltab[j]];
930 if( dptr + left != sptr )
932 for( j = 0; j < width; j++ )
933 dptr[j + left] = sptr[j];
935 for( j = 0; j < right; j++ )
936 dptr[j + left + width] = sptr[rtab[j]];
939 for( i = 0; i < top; i++ )
941 j = borderInterpolate(i - top, src.rows, borderType);
942 const uchar* sptr = dst.ptr(j + top);
943 uchar* dptr = dst.ptr(i);
945 for( k = 0; k < width1; k++ )
949 for( i = 0; i < bottom; i++ )
951 j = borderInterpolate(i + src.rows, src.rows, borderType);
952 const uchar* sptr = dst.ptr(j + top);
953 uchar* dptr = dst.ptr(i + top + src.rows);
955 for( k = 0; k < width1; k++ )
962 template<typename _Tp> static void
963 minMaxLoc_(const _Tp* src, size_t total, size_t startidx,
964 double* _minval, double* _maxval,
965 size_t* _minpos, size_t* _maxpos,
968 _Tp maxval = saturate_cast<_Tp>(*_maxval), minval = saturate_cast<_Tp>(*_minval);
969 size_t minpos = *_minpos, maxpos = *_maxpos;
973 for( size_t i = 0; i < total; i++ )
979 minpos = startidx + i;
984 maxpos = startidx + i;
990 for( size_t i = 0; i < total; i++ )
993 if( minval > val && mask[i] )
996 minpos = startidx + i;
998 if( maxval < val && mask[i] )
1001 maxpos = startidx + i;
1013 static void setpos( const Mat& mtx, vector<int>& pos, size_t idx )
1015 pos.resize(mtx.dims);
1019 for( int i = mtx.dims-1; i >= 0; i-- )
1021 int sz = mtx.size[i]*(i == mtx.dims-1 ? mtx.channels() : 1);
1022 pos[i] = (int)(idx % sz);
1028 for( int i = mtx.dims-1; i >= 0; i-- )
1033 void minMaxLoc(const Mat& src, double* _minval, double* _maxval,
1034 vector<int>* _minloc, vector<int>* _maxloc,
1037 CV_Assert( src.channels() == 1 );
1038 const Mat *arrays[]={&src, &mask, 0};
1041 NAryMatIterator it(arrays, planes);
1042 size_t startidx = 1, total = planes[0].total();
1043 size_t i, nplanes = it.nplanes;
1044 int depth = src.depth();
1045 double maxval = depth < CV_32F ? INT_MIN : depth == CV_32F ? -FLT_MAX : -DBL_MAX;
1046 double minval = depth < CV_32F ? INT_MAX : depth == CV_32F ? FLT_MAX : DBL_MAX;
1047 size_t maxidx = 0, minidx = 0;
1049 for( i = 0; i < nplanes; i++, ++it, startidx += total )
1051 const uchar* sptr = planes[0].data;
1052 const uchar* mptr = planes[1].data;
1057 minMaxLoc_((const uchar*)sptr, total, startidx,
1058 &minval, &maxval, &minidx, &maxidx, mptr);
1061 minMaxLoc_((const schar*)sptr, total, startidx,
1062 &minval, &maxval, &minidx, &maxidx, mptr);
1065 minMaxLoc_((const ushort*)sptr, total, startidx,
1066 &minval, &maxval, &minidx, &maxidx, mptr);
1069 minMaxLoc_((const short*)sptr, total, startidx,
1070 &minval, &maxval, &minidx, &maxidx, mptr);
1073 minMaxLoc_((const int*)sptr, total, startidx,
1074 &minval, &maxval, &minidx, &maxidx, mptr);
1077 minMaxLoc_((const float*)sptr, total, startidx,
1078 &minval, &maxval, &minidx, &maxidx, mptr);
1081 minMaxLoc_((const double*)sptr, total, startidx,
1082 &minval, &maxval, &minidx, &maxidx, mptr);
1090 minval = maxval = 0;
1097 setpos( src, *_maxloc, maxidx );
1099 setpos( src, *_minloc, minidx );
1104 normHamming(const uchar* src, size_t total, int cellSize)
1107 int mask = cellSize == 1 ? 1 : cellSize == 2 ? 3 : cellSize == 4 ? 15 : -1;
1108 CV_Assert( mask >= 0 );
1110 for( size_t i = 0; i < total; i++ )
1112 unsigned a = src[i];
1113 for( ; a != 0; a >>= cellSize )
1114 result += (a & mask) != 0;
1120 template<typename _Tp> static double
1121 norm_(const _Tp* src, size_t total, int cn, int normType, double startval, const uchar* mask)
1124 double result = startval;
1128 if( normType == NORM_INF )
1131 for( i = 0; i < total; i++ )
1132 result = std::max(result, (double)std::abs(0+src[i]));// trick with 0 used to quiet gcc warning
1134 for( int c = 0; c < cn; c++ )
1136 for( i = 0; i < total; i++ )
1138 result = std::max(result, (double)std::abs(0+src[i*cn + c]));
1141 else if( normType == NORM_L1 )
1144 for( i = 0; i < total; i++ )
1145 result += std::abs(0+src[i]);
1147 for( int c = 0; c < cn; c++ )
1149 for( i = 0; i < total; i++ )
1151 result += std::abs(0+src[i*cn + c]);
1157 for( i = 0; i < total; i++ )
1163 for( int c = 0; c < cn; c++ )
1165 for( i = 0; i < total; i++ )
1168 double v = src[i*cn + c];
1177 template<typename _Tp> static double
1178 norm_(const _Tp* src1, const _Tp* src2, size_t total, int cn, int normType, double startval, const uchar* mask)
1181 double result = startval;
1185 if( normType == NORM_INF )
1188 for( i = 0; i < total; i++ )
1189 result = std::max(result, (double)std::abs(src1[i] - src2[i]));
1191 for( int c = 0; c < cn; c++ )
1193 for( i = 0; i < total; i++ )
1195 result = std::max(result, (double)std::abs(src1[i*cn + c] - src2[i*cn + c]));
1198 else if( normType == NORM_L1 )
1201 for( i = 0; i < total; i++ )
1202 result += std::abs(src1[i] - src2[i]);
1204 for( int c = 0; c < cn; c++ )
1206 for( i = 0; i < total; i++ )
1208 result += std::abs(src1[i*cn + c] - src2[i*cn + c]);
1214 for( i = 0; i < total; i++ )
1216 double v = src1[i] - src2[i];
1220 for( int c = 0; c < cn; c++ )
1222 for( i = 0; i < total; i++ )
1225 double v = src1[i*cn + c] - src2[i*cn + c];
1234 double norm(const Mat& src, int normType, const Mat& mask)
1236 if( normType == NORM_HAMMING || normType == NORM_HAMMING2 )
1241 bitwise_and(src, mask, temp);
1242 return norm(temp, normType, Mat());
1245 CV_Assert( src.depth() == CV_8U );
1247 const Mat *arrays[]={&src, 0};
1250 NAryMatIterator it(arrays, planes);
1251 size_t total = planes[0].total();
1252 size_t i, nplanes = it.nplanes;
1254 int cellSize = normType == NORM_HAMMING ? 1 : 2;
1256 for( i = 0; i < nplanes; i++, ++it )
1257 result += normHamming(planes[0].data, total, cellSize);
1260 int normType0 = normType;
1261 normType = normType == NORM_L2SQR ? NORM_L2 : normType;
1263 CV_Assert( mask.empty() || (src.size == mask.size && mask.type() == CV_8U) );
1264 CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 );
1266 const Mat *arrays[]={&src, &mask, 0};
1269 NAryMatIterator it(arrays, planes);
1270 size_t total = planes[0].total();
1271 size_t i, nplanes = it.nplanes;
1272 int depth = src.depth(), cn = planes[0].channels();
1275 for( i = 0; i < nplanes; i++, ++it )
1277 const uchar* sptr = planes[0].data;
1278 const uchar* mptr = planes[1].data;
1283 result = norm_((const uchar*)sptr, total, cn, normType, result, mptr);
1286 result = norm_((const schar*)sptr, total, cn, normType, result, mptr);
1289 result = norm_((const ushort*)sptr, total, cn, normType, result, mptr);
1292 result = norm_((const short*)sptr, total, cn, normType, result, mptr);
1295 result = norm_((const int*)sptr, total, cn, normType, result, mptr);
1298 result = norm_((const float*)sptr, total, cn, normType, result, mptr);
1301 result = norm_((const double*)sptr, total, cn, normType, result, mptr);
1304 CV_Error(CV_StsUnsupportedFormat, "");
1307 if( normType0 == NORM_L2 )
1308 result = sqrt(result);
1313 double norm(const Mat& src1, const Mat& src2, int normType, const Mat& mask)
1315 if( normType == NORM_HAMMING || normType == NORM_HAMMING2 )
1318 bitwise_xor(src1, src2, temp);
1320 bitwise_and(temp, mask, temp);
1322 CV_Assert( temp.depth() == CV_8U );
1324 const Mat *arrays[]={&temp, 0};
1327 NAryMatIterator it(arrays, planes);
1328 size_t total = planes[0].total();
1329 size_t i, nplanes = it.nplanes;
1331 int cellSize = normType == NORM_HAMMING ? 1 : 2;
1333 for( i = 0; i < nplanes; i++, ++it )
1334 result += normHamming(planes[0].data, total, cellSize);
1337 int normType0 = normType;
1338 normType = normType == NORM_L2SQR ? NORM_L2 : normType;
1340 CV_Assert( src1.type() == src2.type() && src1.size == src2.size );
1341 CV_Assert( mask.empty() || (src1.size == mask.size && mask.type() == CV_8U) );
1342 CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 );
1343 const Mat *arrays[]={&src1, &src2, &mask, 0};
1346 NAryMatIterator it(arrays, planes);
1347 size_t total = planes[0].total();
1348 size_t i, nplanes = it.nplanes;
1349 int depth = src1.depth(), cn = planes[0].channels();
1352 for( i = 0; i < nplanes; i++, ++it )
1354 const uchar* sptr1 = planes[0].data;
1355 const uchar* sptr2 = planes[1].data;
1356 const uchar* mptr = planes[2].data;
1361 result = norm_((const uchar*)sptr1, (const uchar*)sptr2, total, cn, normType, result, mptr);
1364 result = norm_((const schar*)sptr1, (const schar*)sptr2, total, cn, normType, result, mptr);
1367 result = norm_((const ushort*)sptr1, (const ushort*)sptr2, total, cn, normType, result, mptr);
1370 result = norm_((const short*)sptr1, (const short*)sptr2, total, cn, normType, result, mptr);
1373 result = norm_((const int*)sptr1, (const int*)sptr2, total, cn, normType, result, mptr);
1376 result = norm_((const float*)sptr1, (const float*)sptr2, total, cn, normType, result, mptr);
1379 result = norm_((const double*)sptr1, (const double*)sptr2, total, cn, normType, result, mptr);
1382 CV_Error(CV_StsUnsupportedFormat, "");
1385 if( normType0 == NORM_L2 )
1386 result = sqrt(result);
1391 template<typename _Tp> static double
1392 crossCorr_(const _Tp* src1, const _Tp* src2, size_t total)
1395 for( size_t i = 0; i < total; i++ )
1396 result += (double)src1[i]*src2[i];
1400 double crossCorr(const Mat& src1, const Mat& src2)
1402 CV_Assert( src1.size == src2.size && src1.type() == src2.type() );
1403 const Mat *arrays[]={&src1, &src2, 0};
1406 NAryMatIterator it(arrays, planes);
1407 size_t total = planes[0].total()*planes[0].channels();
1408 size_t i, nplanes = it.nplanes;
1409 int depth = src1.depth();
1412 for( i = 0; i < nplanes; i++, ++it )
1414 const uchar* sptr1 = planes[0].data;
1415 const uchar* sptr2 = planes[1].data;
1420 result += crossCorr_((const uchar*)sptr1, (const uchar*)sptr2, total);
1423 result += crossCorr_((const schar*)sptr1, (const schar*)sptr2, total);
1426 result += crossCorr_((const ushort*)sptr1, (const ushort*)sptr2, total);
1429 result += crossCorr_((const short*)sptr1, (const short*)sptr2, total);
1432 result += crossCorr_((const int*)sptr1, (const int*)sptr2, total);
1435 result += crossCorr_((const float*)sptr1, (const float*)sptr2, total);
1438 result += crossCorr_((const double*)sptr1, (const double*)sptr2, total);
1441 CV_Error(CV_StsUnsupportedFormat, "");
1449 logicOp_(const uchar* src1, const uchar* src2, uchar* dst, size_t total, char c)
1453 for( i = 0; i < total; i++ )
1454 dst[i] = src1[i] & src2[i];
1456 for( i = 0; i < total; i++ )
1457 dst[i] = src1[i] | src2[i];
1459 for( i = 0; i < total; i++ )
1460 dst[i] = src1[i] ^ src2[i];
1464 logicOpS_(const uchar* src, const uchar* scalar, uchar* dst, size_t total, char c)
1466 const size_t blockSize = 96;
1469 for( i = 0; i < total; i += blockSize, dst += blockSize, src += blockSize )
1471 size_t sz = MIN(total - i, blockSize);
1472 for( j = 0; j < sz; j++ )
1473 dst[j] = src[j] & scalar[j];
1476 for( i = 0; i < total; i += blockSize, dst += blockSize, src += blockSize )
1478 size_t sz = MIN(total - i, blockSize);
1479 for( j = 0; j < sz; j++ )
1480 dst[j] = src[j] | scalar[j];
1484 for( i = 0; i < total; i += blockSize, dst += blockSize, src += blockSize )
1486 size_t sz = MIN(total - i, blockSize);
1487 for( j = 0; j < sz; j++ )
1488 dst[j] = src[j] ^ scalar[j];
1492 for( i = 0; i < total; i++ )
1497 void logicOp( const Mat& src1, const Mat& src2, Mat& dst, char op )
1499 CV_Assert( op == '&' || op == '|' || op == '^' );
1500 CV_Assert( src1.type() == src2.type() && src1.size == src2.size );
1501 dst.create( src1.dims, &src1.size[0], src1.type() );
1502 const Mat *arrays[]={&src1, &src2, &dst, 0};
1505 NAryMatIterator it(arrays, planes);
1506 size_t total = planes[0].total()*planes[0].elemSize();
1507 size_t i, nplanes = it.nplanes;
1509 for( i = 0; i < nplanes; i++, ++it )
1511 const uchar* sptr1 = planes[0].data;
1512 const uchar* sptr2 = planes[1].data;
1513 uchar* dptr = planes[2].data;
1515 logicOp_(sptr1, sptr2, dptr, total, op);
1520 void logicOp(const Mat& src, const Scalar& s, Mat& dst, char op)
1522 CV_Assert( op == '&' || op == '|' || op == '^' || op == '~' );
1523 dst.create( src.dims, &src.size[0], src.type() );
1524 const Mat *arrays[]={&src, &dst, 0};
1527 NAryMatIterator it(arrays, planes);
1528 size_t total = planes[0].total()*planes[0].elemSize();
1529 size_t i, nplanes = it.nplanes;
1531 scalarToRawData(s, buf, src.type(), (int)(96/planes[0].elemSize1()));
1533 for( i = 0; i < nplanes; i++, ++it )
1535 const uchar* sptr = planes[0].data;
1536 uchar* dptr = planes[1].data;
1538 logicOpS_(sptr, (uchar*)&buf[0], dptr, total, op);
1543 template<typename _Tp> static void
1544 compare_(const _Tp* src1, const _Tp* src2, uchar* dst, size_t total, int cmpop)
1550 for( i = 0; i < total; i++ )
1551 dst[i] = src1[i] < src2[i] ? 255 : 0;
1554 for( i = 0; i < total; i++ )
1555 dst[i] = src1[i] <= src2[i] ? 255 : 0;
1558 for( i = 0; i < total; i++ )
1559 dst[i] = src1[i] == src2[i] ? 255 : 0;
1562 for( i = 0; i < total; i++ )
1563 dst[i] = src1[i] != src2[i] ? 255 : 0;
1566 for( i = 0; i < total; i++ )
1567 dst[i] = src1[i] >= src2[i] ? 255 : 0;
1570 for( i = 0; i < total; i++ )
1571 dst[i] = src1[i] > src2[i] ? 255 : 0;
1574 CV_Error(CV_StsBadArg, "Unknown comparison operation");
1579 template<typename _Tp, typename _WTp> static void
1580 compareS_(const _Tp* src1, _WTp value, uchar* dst, size_t total, int cmpop)
1586 for( i = 0; i < total; i++ )
1587 dst[i] = src1[i] < value ? 255 : 0;
1590 for( i = 0; i < total; i++ )
1591 dst[i] = src1[i] <= value ? 255 : 0;
1594 for( i = 0; i < total; i++ )
1595 dst[i] = src1[i] == value ? 255 : 0;
1598 for( i = 0; i < total; i++ )
1599 dst[i] = src1[i] != value ? 255 : 0;
1602 for( i = 0; i < total; i++ )
1603 dst[i] = src1[i] >= value ? 255 : 0;
1606 for( i = 0; i < total; i++ )
1607 dst[i] = src1[i] > value ? 255 : 0;
1610 CV_Error(CV_StsBadArg, "Unknown comparison operation");
1615 void compare(const Mat& src1, const Mat& src2, Mat& dst, int cmpop)
1617 CV_Assert( src1.type() == src2.type() && src1.channels() == 1 && src1.size == src2.size );
1618 dst.create( src1.dims, &src1.size[0], CV_8U );
1619 const Mat *arrays[]={&src1, &src2, &dst, 0};
1622 NAryMatIterator it(arrays, planes);
1623 size_t total = planes[0].total();
1624 size_t i, nplanes = it.nplanes;
1625 int depth = src1.depth();
1627 for( i = 0; i < nplanes; i++, ++it )
1629 const uchar* sptr1 = planes[0].data;
1630 const uchar* sptr2 = planes[1].data;
1631 uchar* dptr = planes[2].data;
1636 compare_((const uchar*)sptr1, (const uchar*)sptr2, dptr, total, cmpop);
1639 compare_((const schar*)sptr1, (const schar*)sptr2, dptr, total, cmpop);
1642 compare_((const ushort*)sptr1, (const ushort*)sptr2, dptr, total, cmpop);
1645 compare_((const short*)sptr1, (const short*)sptr2, dptr, total, cmpop);
1648 compare_((const int*)sptr1, (const int*)sptr2, dptr, total, cmpop);
1651 compare_((const float*)sptr1, (const float*)sptr2, dptr, total, cmpop);
1654 compare_((const double*)sptr1, (const double*)sptr2, dptr, total, cmpop);
1657 CV_Error(CV_StsUnsupportedFormat, "");
1662 void compare(const Mat& src, double value, Mat& dst, int cmpop)
1664 CV_Assert( src.channels() == 1 );
1665 dst.create( src.dims, &src.size[0], CV_8U );
1666 const Mat *arrays[]={&src, &dst, 0};
1669 NAryMatIterator it(arrays, planes);
1670 size_t total = planes[0].total();
1671 size_t i, nplanes = it.nplanes;
1672 int depth = src.depth();
1673 int ivalue = saturate_cast<int>(value);
1675 for( i = 0; i < nplanes; i++, ++it )
1677 const uchar* sptr = planes[0].data;
1678 uchar* dptr = planes[1].data;
1683 compareS_((const uchar*)sptr, ivalue, dptr, total, cmpop);
1686 compareS_((const schar*)sptr, ivalue, dptr, total, cmpop);
1689 compareS_((const ushort*)sptr, ivalue, dptr, total, cmpop);
1692 compareS_((const short*)sptr, ivalue, dptr, total, cmpop);
1695 compareS_((const int*)sptr, ivalue, dptr, total, cmpop);
1698 compareS_((const float*)sptr, value, dptr, total, cmpop);
1701 compareS_((const double*)sptr, value, dptr, total, cmpop);
1704 CV_Error(CV_StsUnsupportedFormat, "");
1710 template<typename _Tp> double
1711 cmpUlpsInt_(const _Tp* src1, const _Tp* src2, size_t total, int imaxdiff,
1712 size_t startidx, size_t& idx)
1715 int realmaxdiff = 0;
1716 for( i = 0; i < total; i++ )
1718 int diff = std::abs(src1[i] - src2[i]);
1719 if( realmaxdiff < diff )
1722 if( diff > imaxdiff && idx == 0 )
1730 template<> double cmpUlpsInt_<int>(const int* src1, const int* src2,
1731 size_t total, int imaxdiff,
1732 size_t startidx, size_t& idx)
1735 double realmaxdiff = 0;
1736 for( i = 0; i < total; i++ )
1738 double diff = fabs((double)src1[i] - (double)src2[i]);
1739 if( realmaxdiff < diff )
1742 if( diff > imaxdiff && idx == 0 )
1751 cmpUlpsFlt_(const int* src1, const int* src2, size_t total, int imaxdiff, size_t startidx, size_t& idx)
1753 const int C = 0x7fffffff;
1754 int realmaxdiff = 0;
1756 for( i = 0; i < total; i++ )
1758 int a = src1[i], b = src2[i];
1759 if( a < 0 ) a ^= C; if( b < 0 ) b ^= C;
1760 int diff = std::abs(a - b);
1761 if( realmaxdiff < diff )
1764 if( diff > imaxdiff && idx == 0 )
1773 cmpUlpsFlt_(const int64* src1, const int64* src2, size_t total, int imaxdiff, size_t startidx, size_t& idx)
1775 const int64 C = CV_BIG_INT(0x7fffffffffffffff);
1776 double realmaxdiff = 0;
1778 for( i = 0; i < total; i++ )
1780 int64 a = src1[i], b = src2[i];
1781 if( a < 0 ) a ^= C; if( b < 0 ) b ^= C;
1782 double diff = fabs((double)a - (double)b);
1783 if( realmaxdiff < diff )
1786 if( diff > imaxdiff && idx == 0 )
1793 bool cmpUlps(const Mat& src1, const Mat& src2, int imaxDiff, double* _realmaxdiff, vector<int>* loc)
1795 CV_Assert( src1.type() == src2.type() && src1.size == src2.size );
1796 const Mat *arrays[]={&src1, &src2, 0};
1798 NAryMatIterator it(arrays, planes);
1799 size_t total = planes[0].total()*planes[0].channels();
1800 size_t i, nplanes = it.nplanes;
1801 int depth = src1.depth();
1802 size_t startidx = 1, idx = 0;
1806 for( i = 0; i < nplanes; i++, ++it, startidx += total )
1808 const uchar* sptr1 = planes[0].data;
1809 const uchar* sptr2 = planes[1].data;
1810 double realmaxdiff = 0;
1815 realmaxdiff = cmpUlpsInt_((const uchar*)sptr1, (const uchar*)sptr2, total, imaxDiff, startidx, idx);
1818 realmaxdiff = cmpUlpsInt_((const schar*)sptr1, (const schar*)sptr2, total, imaxDiff, startidx, idx);
1821 realmaxdiff = cmpUlpsInt_((const ushort*)sptr1, (const ushort*)sptr2, total, imaxDiff, startidx, idx);
1824 realmaxdiff = cmpUlpsInt_((const short*)sptr1, (const short*)sptr2, total, imaxDiff, startidx, idx);
1827 realmaxdiff = cmpUlpsInt_((const int*)sptr1, (const int*)sptr2, total, imaxDiff, startidx, idx);
1830 realmaxdiff = cmpUlpsFlt_((const int*)sptr1, (const int*)sptr2, total, imaxDiff, startidx, idx);
1833 realmaxdiff = cmpUlpsFlt_((const int64*)sptr1, (const int64*)sptr2, total, imaxDiff, startidx, idx);
1836 CV_Error(CV_StsUnsupportedFormat, "");
1840 *_realmaxdiff = std::max(*_realmaxdiff, realmaxdiff);
1843 setpos(src1, *loc, idx);
1848 template<typename _Tp> static void
1849 checkInt_(const _Tp* a, size_t total, int imin, int imax, size_t startidx, size_t& idx)
1851 for( size_t i = 0; i < total; i++ )
1854 if( val < imin || val > imax )
1863 template<typename _Tp> static void
1864 checkFlt_(const _Tp* a, size_t total, double fmin, double fmax, size_t startidx, size_t& idx)
1866 for( size_t i = 0; i < total; i++ )
1869 if( cvIsNaN(val) || cvIsInf(val) || val < fmin || val > fmax )
1878 // checks that the array does not have NaNs and/or Infs and all the elements are
1879 // within [min_val,max_val). idx is the index of the first "bad" element.
1880 int check( const Mat& a, double fmin, double fmax, vector<int>* _idx )
1882 const Mat *arrays[]={&a, 0};
1884 NAryMatIterator it(arrays, &plane);
1885 size_t total = plane.total()*plane.channels();
1886 size_t i, nplanes = it.nplanes;
1887 int depth = a.depth();
1888 size_t startidx = 1, idx = 0;
1889 int imin = 0, imax = 0;
1891 if( depth <= CV_32S )
1893 imin = cvCeil(fmin);
1894 imax = cvFloor(fmax);
1897 for( i = 0; i < nplanes; i++, ++it, startidx += total )
1899 const uchar* aptr = plane.data;
1904 checkInt_((const uchar*)aptr, total, imin, imax, startidx, idx);
1907 checkInt_((const schar*)aptr, total, imin, imax, startidx, idx);
1910 checkInt_((const ushort*)aptr, total, imin, imax, startidx, idx);
1913 checkInt_((const short*)aptr, total, imin, imax, startidx, idx);
1916 checkInt_((const int*)aptr, total, imin, imax, startidx, idx);
1919 checkFlt_((const float*)aptr, total, fmin, fmax, startidx, idx);
1922 checkFlt_((const double*)aptr, total, fmin, fmax, startidx, idx);
1925 CV_Error(CV_StsUnsupportedFormat, "");
1932 if(idx != 0 && _idx)
1933 setpos(a, *_idx, idx);
1934 return idx == 0 ? 0 : -1;
1937 #define CMP_EPS_OK 0
1938 #define CMP_EPS_BIG_DIFF -1
1939 #define CMP_EPS_INVALID_TEST_DATA -2 // there is NaN or Inf value in test data
1940 #define CMP_EPS_INVALID_REF_DATA -3 // there is NaN or Inf value in reference data
1942 // compares two arrays. max_diff is the maximum actual difference,
1943 // success_err_level is maximum allowed difference, idx is the index of the first
1944 // element for which difference is >success_err_level
1945 // (or index of element with the maximum difference)
1946 int cmpEps( const Mat& arr, const Mat& refarr, double* _realmaxdiff,
1947 double success_err_level, vector<int>* _idx,
1948 bool element_wise_relative_error )
1950 CV_Assert( arr.type() == refarr.type() && arr.size == refarr.size );
1952 int ilevel = refarr.depth() <= CV_32S ? cvFloor(success_err_level) : 0;
1953 int result = CMP_EPS_OK;
1955 const Mat *arrays[]={&arr, &refarr, 0};
1957 NAryMatIterator it(arrays, planes);
1958 size_t total = planes[0].total()*planes[0].channels(), j = total;
1959 size_t i, nplanes = it.nplanes;
1960 int depth = arr.depth();
1961 size_t startidx = 1, idx = 0;
1962 double realmaxdiff = 0, maxval = 0;
1967 if( refarr.depth() >= CV_32F && !element_wise_relative_error )
1969 maxval = cvtest::norm( refarr, NORM_INF );
1970 maxval = MAX(maxval, 1.);
1973 for( i = 0; i < nplanes; i++, ++it, startidx += total )
1975 const uchar* sptr1 = planes[0].data;
1976 const uchar* sptr2 = planes[1].data;
1981 realmaxdiff = cmpUlpsInt_((const uchar*)sptr1, (const uchar*)sptr2, total, ilevel, startidx, idx);
1984 realmaxdiff = cmpUlpsInt_((const schar*)sptr1, (const schar*)sptr2, total, ilevel, startidx, idx);
1987 realmaxdiff = cmpUlpsInt_((const ushort*)sptr1, (const ushort*)sptr2, total, ilevel, startidx, idx);
1990 realmaxdiff = cmpUlpsInt_((const short*)sptr1, (const short*)sptr2, total, ilevel, startidx, idx);
1993 realmaxdiff = cmpUlpsInt_((const int*)sptr1, (const int*)sptr2, total, ilevel, startidx, idx);
1996 for( j = 0; j < total; j++ )
1998 double a_val = ((float*)sptr1)[j];
1999 double b_val = ((float*)sptr2)[j];
2001 if( ((int*)sptr1)[j] == ((int*)sptr2)[j] )
2003 if( cvIsNaN(a_val) || cvIsInf(a_val) )
2005 result = CMP_EPS_INVALID_TEST_DATA;
2009 if( cvIsNaN(b_val) || cvIsInf(b_val) )
2011 result = CMP_EPS_INVALID_REF_DATA;
2015 a_val = fabs(a_val - b_val);
2016 threshold = element_wise_relative_error ? fabs(b_val) + 1 : maxval;
2017 if( a_val > threshold*success_err_level )
2019 realmaxdiff = a_val/threshold;
2027 for( j = 0; j < total; j++ )
2029 double a_val = ((double*)sptr1)[j];
2030 double b_val = ((double*)sptr2)[j];
2032 if( ((int64*)sptr1)[j] == ((int64*)sptr2)[j] )
2034 if( cvIsNaN(a_val) || cvIsInf(a_val) )
2036 result = CMP_EPS_INVALID_TEST_DATA;
2040 if( cvIsNaN(b_val) || cvIsInf(b_val) )
2042 result = CMP_EPS_INVALID_REF_DATA;
2046 a_val = fabs(a_val - b_val);
2047 threshold = element_wise_relative_error ? fabs(b_val) + 1 : maxval;
2048 if( a_val > threshold*success_err_level )
2050 realmaxdiff = a_val/threshold;
2058 return CMP_EPS_BIG_DIFF;
2061 *_realmaxdiff = MAX(*_realmaxdiff, realmaxdiff);
2066 if( result == 0 && idx != 0 )
2067 result = CMP_EPS_BIG_DIFF;
2069 if( result < -1 && _realmaxdiff )
2070 *_realmaxdiff = exp(1000.);
2072 setpos(arr, *_idx, idx);
2078 int cmpEps2( TS* ts, const Mat& a, const Mat& b, double success_err_level,
2079 bool element_wise_relative_error, const char* desc )
2084 int code = cmpEps( a, b, &diff, success_err_level, &idx, element_wise_relative_error );
2088 case CMP_EPS_BIG_DIFF:
2089 sprintf( msg, "%s: Too big difference (=%g)", desc, diff );
2090 code = TS::FAIL_BAD_ACCURACY;
2092 case CMP_EPS_INVALID_TEST_DATA:
2093 sprintf( msg, "%s: Invalid output", desc );
2094 code = TS::FAIL_INVALID_OUTPUT;
2096 case CMP_EPS_INVALID_REF_DATA:
2097 sprintf( msg, "%s: Invalid reference output", desc );
2098 code = TS::FAIL_INVALID_OUTPUT;
2106 if( a.total() == 1 )
2108 ts->printf( TS::LOG, "%s\n", msg );
2110 else if( a.dims == 2 && (a.rows == 1 || a.cols == 1) )
2112 ts->printf( TS::LOG, "%s at element %d\n", msg, idx[0] + idx[1] );
2116 string idxstr = vec2str(", ", &idx[0], idx.size());
2117 ts->printf( TS::LOG, "%s at (%s)\n", msg, idxstr.c_str() );
2125 int cmpEps2_64f( TS* ts, const double* val, const double* refval, int len,
2126 double eps, const char* param_name )
2128 Mat _val(1, len, CV_64F, (void*)val);
2129 Mat _refval(1, len, CV_64F, (void*)refval);
2131 return cmpEps2( ts, _val, _refval, eps, true, param_name );
2135 template<typename _Tp> static void
2136 GEMM_(const _Tp* a_data0, int a_step, int a_delta,
2137 const _Tp* b_data0, int b_step, int b_delta,
2138 const _Tp* c_data0, int c_step, int c_delta,
2139 _Tp* d_data, int d_step,
2140 int d_rows, int d_cols, int a_cols, int cn,
2141 double alpha, double beta)
2143 for( int i = 0; i < d_rows; i++, d_data += d_step, c_data0 += c_step, a_data0 += a_step )
2145 for( int j = 0; j < d_cols; j++ )
2147 const _Tp* a_data = a_data0;
2148 const _Tp* b_data = b_data0 + j*b_delta;
2149 const _Tp* c_data = c_data0 + j*c_delta;
2154 for( int k = 0; k < a_cols; k++ )
2156 s += ((double)a_data[0])*b_data[0];
2160 d_data[j] = (_Tp)(s*alpha + (c_data ? c_data[0]*beta : 0));
2164 double s_re = 0, s_im = 0;
2166 for( int k = 0; k < a_cols; k++ )
2168 s_re += ((double)a_data[0])*b_data[0] - ((double)a_data[1])*b_data[1];
2169 s_im += ((double)a_data[0])*b_data[1] + ((double)a_data[1])*b_data[0];
2179 s_re += c_data[0]*beta;
2180 s_im += c_data[1]*beta;
2183 d_data[j*2] = (_Tp)s_re;
2184 d_data[j*2+1] = (_Tp)s_im;
2191 void gemm( const Mat& _a, const Mat& _b, double alpha,
2192 const Mat& _c, double beta, Mat& d, int flags )
2194 Mat a = _a, b = _b, c = _c;
2196 if( a.data == d.data )
2199 if( b.data == d.data )
2202 if( !c.empty() && c.data == d.data && (flags & cv::GEMM_3_T) )
2205 int a_rows = a.rows, a_cols = a.cols, b_rows = b.rows, b_cols = b.cols;
2206 int cn = a.channels();
2207 int a_step = (int)a.step1(), a_delta = cn;
2208 int b_step = (int)b.step1(), b_delta = cn;
2209 int c_rows = 0, c_cols = 0, c_step = 0, c_delta = 0;
2211 CV_Assert( a.type() == b.type() && a.dims == 2 && b.dims == 2 && cn <= 2 );
2213 if( flags & cv::GEMM_1_T )
2215 std::swap( a_rows, a_cols );
2216 std::swap( a_step, a_delta );
2219 if( flags & cv::GEMM_2_T )
2221 std::swap( b_rows, b_cols );
2222 std::swap( b_step, b_delta );
2229 c_step = (int)c.step1();
2232 if( flags & cv::GEMM_3_T )
2234 std::swap( c_rows, c_cols );
2235 std::swap( c_step, c_delta );
2238 CV_Assert( c.dims == 2 && c.type() == a.type() && c_rows == a_rows && c_cols == b_cols );
2241 d.create(a_rows, b_cols, a.type());
2243 if( a.depth() == CV_32F )
2244 GEMM_(a.ptr<float>(), a_step, a_delta, b.ptr<float>(), b_step, b_delta,
2245 !c.empty() ? c.ptr<float>() : 0, c_step, c_delta, d.ptr<float>(),
2246 (int)d.step1(), a_rows, b_cols, a_cols, cn, alpha, beta );
2248 GEMM_(a.ptr<double>(), a_step, a_delta, b.ptr<double>(), b_step, b_delta,
2249 !c.empty() ? c.ptr<double>() : 0, c_step, c_delta, d.ptr<double>(),
2250 (int)d.step1(), a_rows, b_cols, a_cols, cn, alpha, beta );
2254 template<typename _Tp> static void
2255 transform_(const _Tp* sptr, _Tp* dptr, size_t total, int scn, int dcn, const double* mat)
2257 for( size_t i = 0; i < total; i++, sptr += scn, dptr += dcn )
2259 for( int j = 0; j < dcn; j++ )
2261 double s = mat[j*(scn + 1) + scn];
2262 for( int k = 0; k < scn; k++ )
2263 s += mat[j*(scn + 1) + k]*sptr[k];
2264 dptr[j] = saturate_cast<_Tp>(s);
2270 void transform( const Mat& src, Mat& dst, const Mat& transmat, const Mat& _shift )
2274 int scn = src.channels();
2275 int dcn = dst.channels();
2276 int depth = src.depth();
2277 int mattype = transmat.depth();
2278 Mat shift = _shift.reshape(1, 0);
2279 bool haveShift = !shift.empty();
2281 CV_Assert( scn <= 4 && dcn <= 4 &&
2282 (mattype == CV_32F || mattype == CV_64F) &&
2283 (!haveShift || (shift.type() == mattype && (shift.rows == 1 || shift.cols == 1))) );
2285 // prepare cn x (cn + 1) transform matrix
2286 if( mattype == CV_32F )
2288 for( int i = 0; i < transmat.rows; i++ )
2290 mat[i*(scn+1)+scn] = 0.;
2291 for( int j = 0; j < transmat.cols; j++ )
2292 mat[i*(scn+1)+j] = transmat.at<float>(i,j);
2294 mat[i*(scn+1)+scn] = shift.at<float>(i);
2299 for( int i = 0; i < transmat.rows; i++ )
2301 mat[i*(scn+1)+scn] = 0.;
2302 for( int j = 0; j < transmat.cols; j++ )
2303 mat[i*(scn+1)+j] = transmat.at<double>(i,j);
2305 mat[i*(scn+1)+scn] = shift.at<double>(i);
2309 const Mat *arrays[]={&src, &dst, 0};
2311 NAryMatIterator it(arrays, planes);
2312 size_t total = planes[0].total();
2313 size_t i, nplanes = it.nplanes;
2315 for( i = 0; i < nplanes; i++, ++it )
2317 const uchar* sptr = planes[0].data;
2318 uchar* dptr = planes[1].data;
2323 transform_((const uchar*)sptr, (uchar*)dptr, total, scn, dcn, mat);
2326 transform_((const schar*)sptr, (schar*)dptr, total, scn, dcn, mat);
2329 transform_((const ushort*)sptr, (ushort*)dptr, total, scn, dcn, mat);
2332 transform_((const short*)sptr, (short*)dptr, total, scn, dcn, mat);
2335 transform_((const int*)sptr, (int*)dptr, total, scn, dcn, mat);
2338 transform_((const float*)sptr, (float*)dptr, total, scn, dcn, mat);
2341 transform_((const double*)sptr, (double*)dptr, total, scn, dcn, mat);
2344 CV_Error(CV_StsUnsupportedFormat, "");
2349 template<typename _Tp> static void
2350 minmax_(const _Tp* src1, const _Tp* src2, _Tp* dst, size_t total, char op)
2353 for( size_t i = 0; i < total; i++ )
2354 dst[i] = std::max(src1[i], src2[i]);
2356 for( size_t i = 0; i < total; i++ )
2357 dst[i] = std::min(src1[i], src2[i]);
2360 static void minmax(const Mat& src1, const Mat& src2, Mat& dst, char op)
2362 dst.create(src1.dims, src1.size, src1.type());
2363 CV_Assert( src1.type() == src2.type() && src1.size == src2.size );
2364 const Mat *arrays[]={&src1, &src2, &dst, 0};
2367 NAryMatIterator it(arrays, planes);
2368 size_t total = planes[0].total()*planes[0].channels();
2369 size_t i, nplanes = it.nplanes, depth = src1.depth();
2371 for( i = 0; i < nplanes; i++, ++it )
2373 const uchar* sptr1 = planes[0].data;
2374 const uchar* sptr2 = planes[1].data;
2375 uchar* dptr = planes[2].data;
2380 minmax_((const uchar*)sptr1, (const uchar*)sptr2, (uchar*)dptr, total, op);
2383 minmax_((const schar*)sptr1, (const schar*)sptr2, (schar*)dptr, total, op);
2386 minmax_((const ushort*)sptr1, (const ushort*)sptr2, (ushort*)dptr, total, op);
2389 minmax_((const short*)sptr1, (const short*)sptr2, (short*)dptr, total, op);
2392 minmax_((const int*)sptr1, (const int*)sptr2, (int*)dptr, total, op);
2395 minmax_((const float*)sptr1, (const float*)sptr2, (float*)dptr, total, op);
2398 minmax_((const double*)sptr1, (const double*)sptr2, (double*)dptr, total, op);
2401 CV_Error(CV_StsUnsupportedFormat, "");
2407 void min(const Mat& src1, const Mat& src2, Mat& dst)
2409 minmax( src1, src2, dst, 'm' );
2412 void max(const Mat& src1, const Mat& src2, Mat& dst)
2414 minmax( src1, src2, dst, 'M' );
2418 template<typename _Tp> static void
2419 minmax_(const _Tp* src1, _Tp val, _Tp* dst, size_t total, char op)
2422 for( size_t i = 0; i < total; i++ )
2423 dst[i] = std::max(src1[i], val);
2425 for( size_t i = 0; i < total; i++ )
2426 dst[i] = std::min(src1[i], val);
2429 static void minmax(const Mat& src1, double val, Mat& dst, char op)
2431 dst.create(src1.dims, src1.size, src1.type());
2432 const Mat *arrays[]={&src1, &dst, 0};
2435 NAryMatIterator it(arrays, planes);
2436 size_t total = planes[0].total()*planes[0].channels();
2437 size_t i, nplanes = it.nplanes, depth = src1.depth();
2438 int ival = saturate_cast<int>(val);
2440 for( i = 0; i < nplanes; i++, ++it )
2442 const uchar* sptr1 = planes[0].data;
2443 uchar* dptr = planes[1].data;
2448 minmax_((const uchar*)sptr1, saturate_cast<uchar>(ival), (uchar*)dptr, total, op);
2451 minmax_((const schar*)sptr1, saturate_cast<schar>(ival), (schar*)dptr, total, op);
2454 minmax_((const ushort*)sptr1, saturate_cast<ushort>(ival), (ushort*)dptr, total, op);
2457 minmax_((const short*)sptr1, saturate_cast<short>(ival), (short*)dptr, total, op);
2460 minmax_((const int*)sptr1, saturate_cast<int>(ival), (int*)dptr, total, op);
2463 minmax_((const float*)sptr1, saturate_cast<float>(val), (float*)dptr, total, op);
2466 minmax_((const double*)sptr1, saturate_cast<double>(val), (double*)dptr, total, op);
2469 CV_Error(CV_StsUnsupportedFormat, "");
2475 void min(const Mat& src1, double val, Mat& dst)
2477 minmax( src1, val, dst, 'm' );
2480 void max(const Mat& src1, double val, Mat& dst)
2482 minmax( src1, val, dst, 'M' );
2486 template<typename _Tp> static void
2487 muldiv_(const _Tp* src1, const _Tp* src2, _Tp* dst, size_t total, double scale, char op)
2490 for( size_t i = 0; i < total; i++ )
2491 dst[i] = saturate_cast<_Tp>((scale*src1[i])*src2[i]);
2493 for( size_t i = 0; i < total; i++ )
2494 dst[i] = src2[i] ? saturate_cast<_Tp>((scale*src1[i])/src2[i]) : 0;
2496 for( size_t i = 0; i < total; i++ )
2497 dst[i] = src2[i] ? saturate_cast<_Tp>(scale/src2[i]) : 0;
2500 static void muldiv(const Mat& src1, const Mat& src2, Mat& dst, double scale, char op)
2502 dst.create(src2.dims, src2.size, src2.type());
2503 CV_Assert( src1.empty() || (src1.type() == src2.type() && src1.size == src2.size) );
2504 const Mat *arrays[]={&src1, &src2, &dst, 0};
2507 NAryMatIterator it(arrays, planes);
2508 size_t total = planes[1].total()*planes[1].channels();
2509 size_t i, nplanes = it.nplanes, depth = src2.depth();
2511 for( i = 0; i < nplanes; i++, ++it )
2513 const uchar* sptr1 = planes[0].data;
2514 const uchar* sptr2 = planes[1].data;
2515 uchar* dptr = planes[2].data;
2520 muldiv_((const uchar*)sptr1, (const uchar*)sptr2, (uchar*)dptr, total, scale, op);
2523 muldiv_((const schar*)sptr1, (const schar*)sptr2, (schar*)dptr, total, scale, op);
2526 muldiv_((const ushort*)sptr1, (const ushort*)sptr2, (ushort*)dptr, total, scale, op);
2529 muldiv_((const short*)sptr1, (const short*)sptr2, (short*)dptr, total, scale, op);
2532 muldiv_((const int*)sptr1, (const int*)sptr2, (int*)dptr, total, scale, op);
2535 muldiv_((const float*)sptr1, (const float*)sptr2, (float*)dptr, total, scale, op);
2538 muldiv_((const double*)sptr1, (const double*)sptr2, (double*)dptr, total, scale, op);
2541 CV_Error(CV_StsUnsupportedFormat, "");
2547 void multiply(const Mat& src1, const Mat& src2, Mat& dst, double scale)
2549 muldiv( src1, src2, dst, scale, '*' );
2552 void divide(const Mat& src1, const Mat& src2, Mat& dst, double scale)
2554 muldiv( src1, src2, dst, scale, '/' );
2558 template<typename _Tp> static void
2559 mean_(const _Tp* src, const uchar* mask, size_t total, int cn, Scalar& sum, int& nz)
2565 for( size_t i = 0; i < total; i += cn )
2567 for( int c = 0; c < cn; c++ )
2568 sum[c] += src[i + c];
2573 for( size_t i = 0; i < total; i++ )
2577 for( int c = 0; c < cn; c++ )
2578 sum[c] += src[i*cn + c];
2583 Scalar mean(const Mat& src, const Mat& mask)
2585 CV_Assert(mask.empty() || (mask.type() == CV_8U && mask.size == src.size));
2589 const Mat *arrays[]={&src, &mask, 0};
2592 NAryMatIterator it(arrays, planes);
2593 size_t total = planes[0].total();
2594 size_t i, nplanes = it.nplanes;
2595 int depth = src.depth(), cn = src.channels();
2597 for( i = 0; i < nplanes; i++, ++it )
2599 const uchar* sptr = planes[0].data;
2600 const uchar* mptr = planes[1].data;
2605 mean_((const uchar*)sptr, mptr, total, cn, sum, nz);
2608 mean_((const schar*)sptr, mptr, total, cn, sum, nz);
2611 mean_((const ushort*)sptr, mptr, total, cn, sum, nz);
2614 mean_((const short*)sptr, mptr, total, cn, sum, nz);
2617 mean_((const int*)sptr, mptr, total, cn, sum, nz);
2620 mean_((const float*)sptr, mptr, total, cn, sum, nz);
2623 mean_((const double*)sptr, mptr, total, cn, sum, nz);
2626 CV_Error(CV_StsUnsupportedFormat, "");
2630 return sum * (1./std::max(nz, 1));
2634 void patchZeros( Mat& mat, double level )
2636 int j, ncols = mat.cols * mat.channels();
2637 int depth = mat.depth();
2638 CV_Assert( depth == CV_32F || depth == CV_64F );
2640 for( int i = 0; i < mat.rows; i++ )
2642 if( depth == CV_32F )
2644 float* data = mat.ptr<float>(i);
2645 for( j = 0; j < ncols; j++ )
2646 if( fabs(data[j]) < level )
2651 double* data = mat.ptr<double>(i);
2652 for( j = 0; j < ncols; j++ )
2653 if( fabs(data[j]) < level )
2660 static void calcSobelKernel1D( int order, int _aperture_size, int size, vector<int>& kernel )
2662 int i, j, oldval, newval;
2663 kernel.resize(size + 1);
2665 if( _aperture_size < 0 )
2667 static const int scharr[] = { 3, 10, 3, -1, 0, 1 };
2668 assert( size == 3 );
2669 for( i = 0; i < size; i++ )
2670 kernel[i] = scharr[order*3 + i];
2674 for( i = 1; i <= size; i++ )
2678 for( i = 0; i < size - order - 1; i++ )
2681 for( j = 1; j <= size; j++ )
2683 newval = kernel[j] + kernel[j-1];
2684 kernel[j-1] = oldval;
2689 for( i = 0; i < order; i++ )
2691 oldval = -kernel[0];
2692 for( j = 1; j <= size; j++ )
2694 newval = kernel[j-1] - kernel[j];
2695 kernel[j-1] = oldval;
2702 Mat calcSobelKernel2D( int dx, int dy, int _aperture_size, int origin )
2704 CV_Assert( (_aperture_size == -1 || (_aperture_size >= 1 && _aperture_size % 2 == 1)) &&
2705 dx >= 0 && dy >= 0 && dx + dy <= 3 );
2706 Size ksize = _aperture_size == -1 ? Size(3,3) : _aperture_size > 1 ?
2707 Size(_aperture_size, _aperture_size) : dx > 0 ? Size(3, 1) : Size(1, 3);
2709 Mat kernel(ksize, CV_32F);
2712 calcSobelKernel1D( dx, _aperture_size, ksize.width, kx );
2713 calcSobelKernel1D( dy, _aperture_size, ksize.height, ky );
2715 for( int i = 0; i < kernel.rows; i++ )
2717 float ay = (float)ky[i]*(origin && (dy & 1) ? -1 : 1);
2718 for( int j = 0; j < kernel.cols; j++ )
2719 kernel.at<float>(i, j) = kx[j]*ay;
2726 Mat calcLaplaceKernel2D( int aperture_size )
2728 int ksize = aperture_size == 1 ? 3 : aperture_size;
2729 Mat kernel(ksize, ksize, CV_32F);
2733 calcSobelKernel1D( 2, aperture_size, ksize, kx );
2734 if( aperture_size > 1 )
2735 calcSobelKernel1D( 0, aperture_size, ksize, ky );
2739 ky[0] = ky[2] = 0; ky[1] = 1;
2742 for( int i = 0; i < ksize; i++ )
2743 for( int j = 0; j < ksize; j++ )
2744 kernel.at<float>(i, j) = (float)(kx[j]*ky[i] + kx[i]*ky[j]);
2750 void initUndistortMap( const Mat& _a0, const Mat& _k0, Size sz, Mat& _mapx, Mat& _mapy )
2752 _mapx.create(sz, CV_32F);
2753 _mapy.create(sz, CV_32F);
2755 double a[9], k[5]={0,0,0,0,0};
2756 Mat _a(3, 3, CV_64F, a);
2757 Mat _k(_k0.rows,_k0.cols, CV_MAKETYPE(CV_64F,_k0.channels()),k);
2758 double fx, fy, cx, cy, ifx, ify, cxn, cyn;
2760 _a0.convertTo(_a, CV_64F);
2761 _k0.convertTo(_k, CV_64F);
2762 fx = a[0]; fy = a[4]; cx = a[2]; cy = a[5];
2763 ifx = 1./fx; ify = 1./fy;
2767 for( int v = 0; v < sz.height; v++ )
2769 for( int u = 0; u < sz.width; u++ )
2771 double x = (u - cxn)*ifx;
2772 double y = (v - cyn)*ify;
2773 double x2 = x*x, y2 = y*y;
2774 double r2 = x2 + y2;
2775 double cdist = 1 + (k[0] + (k[1] + k[4]*r2)*r2)*r2;
2776 double x1 = x*cdist + k[2]*2*x*y + k[3]*(r2 + 2*x2);
2777 double y1 = y*cdist + k[3]*2*x*y + k[2]*(r2 + 2*y2);
2779 _mapy.at<float>(v, u) = (float)(y1*fy + cy);
2780 _mapx.at<float>(v, u) = (float)(x1*fx + cx);
2786 std::ostream& operator << (std::ostream& out, const MatInfo& m)
2788 if( !m.m || m.m->empty() )
2792 static const char* depthstr[] = {"8u", "8s", "16u", "16s", "32s", "32f", "64f", "?"};
2793 out << depthstr[m.m->depth()] << "C" << m.m->channels() << " " << m.m->dims << "-dim (";
2794 for( int i = 0; i < m.m->dims; i++ )
2795 out << m.m->size[i] << (i < m.m->dims-1 ? " x " : ")");
2801 static Mat getSubArray(const Mat& m, int border, vector<int>& ofs0, vector<int>& ofs)
2803 ofs.resize(ofs0.size());
2806 std::copy(ofs0.begin(), ofs0.end(), ofs.begin());
2810 CV_Assert(d == (int)ofs.size());
2812 for( i = 0; i < d; i++ )
2814 r[i].start = std::max(0, ofs0[i] - border);
2815 r[i].end = std::min(ofs0[i] + 1 + border, m.size[i]);
2816 ofs[i] = std::min(ofs0[i], border);
2821 template<typename _Tp, typename _WTp> static void
2822 writeElems(std::ostream& out, const void* data, int nelems, int starpos)
2824 for(int i = 0; i < nelems; i++)
2828 out << (_WTp)((_Tp*)data)[i];
2831 out << (i+1 < nelems ? ", " : "");
2836 static void writeElems(std::ostream& out, const void* data, int nelems, int depth, int starpos)
2839 writeElems<uchar, int>(out, data, nelems, starpos);
2840 else if(depth == CV_8S)
2841 writeElems<schar, int>(out, data, nelems, starpos);
2842 else if(depth == CV_16U)
2843 writeElems<ushort, int>(out, data, nelems, starpos);
2844 else if(depth == CV_16S)
2845 writeElems<short, int>(out, data, nelems, starpos);
2846 else if(depth == CV_32S)
2847 writeElems<int, int>(out, data, nelems, starpos);
2848 else if(depth == CV_32F)
2850 std::streamsize pp = out.precision();
2852 writeElems<float, float>(out, data, nelems, starpos);
2855 else if(depth == CV_64F)
2857 std::streamsize pp = out.precision();
2859 writeElems<double, double>(out, data, nelems, starpos);
2863 CV_Error(CV_StsUnsupportedFormat, "");
2869 MatPart(const Mat& _m, const vector<int>* _loc)
2870 : m(&_m), loc(_loc) {}
2872 const vector<int>* loc;
2875 static std::ostream& operator << (std::ostream& out, const MatPart& m)
2877 CV_Assert( !m.loc || ((int)m.loc->size() == m.m->dims && m.m->dims <= 2) );
2882 int i, depth = m.m->depth(), cn = m.m->channels(), width = m.m->cols*cn;
2883 for( i = 0; i < m.m->rows; i++ )
2885 writeElems(out, m.m->ptr(i), width, depth, i == (*m.loc)[0] ? (*m.loc)[1] : -1);
2886 out << (i < m.m->rows-1 ? ";\n" : "");
2892 MatComparator::MatComparator(double _maxdiff, int _context)
2893 : maxdiff(_maxdiff), context(_context) {}
2895 ::testing::AssertionResult
2896 MatComparator::operator()(const char* expr1, const char* expr2,
2897 const Mat& m1, const Mat& m2)
2899 if( m1.type() != m2.type() || m1.size != m2.size )
2900 return ::testing::AssertionFailure()
2901 << "The reference and the actual output arrays have different type or size:\n"
2902 << expr1 << " ~ " << MatInfo(m1) << "\n"
2903 << expr2 << " ~ " << MatInfo(m2) << "\n";
2905 //bool ok = cvtest::cmpUlps(m1, m2, maxdiff, &realmaxdiff, &loc0);
2906 int code = cmpEps( m1, m2, &realmaxdiff, maxdiff, &loc0, true);
2909 return ::testing::AssertionSuccess();
2911 Mat m[] = {m1.reshape(1,0), m2.reshape(1,0)};
2912 int dims = m[0].dims;
2914 int border = dims <= 2 ? context : 0;
2920 m1part = Mat(1, 1, m[0].depth(), m[0].ptr(&loc[0]));
2921 m2part = Mat(1, 1, m[1].depth(), m[1].ptr(&loc[0]));
2925 m1part = getSubArray(m[0], border, loc0, loc);
2926 m2part = getSubArray(m[1], border, loc0, loc);
2929 return ::testing::AssertionFailure()
2930 << "too big relative difference (" << realmaxdiff << " > "
2931 << maxdiff << ") between "
2932 << MatInfo(m1) << " '" << expr1 << "' and '" << expr2 << "' at " << Mat(loc0) << ".\n\n"
2933 << "'" << expr1 << "': " << MatPart(m1part, border > 0 ? &loc : 0) << ".\n\n"
2934 << "'" << expr2 << "': " << MatPart(m2part, border > 0 ? &loc : 0) << ".\n";
2937 void printVersionInfo(bool useStdOut)
2939 ::testing::Test::RecordProperty("CV_VERSION", CV_VERSION);
2940 if(useStdOut) std::cout << "OpenCV version: " << CV_VERSION << std::endl;
2942 std::string buildInfo( cv::getBuildInformation() );
2944 size_t pos1 = buildInfo.find("Version control");
2945 size_t pos2 = buildInfo.find("\n", pos1);\
2946 if(pos1 != std::string::npos && pos2 != std::string::npos)
2948 std::string ver( buildInfo.substr(pos1, pos2-pos1) );
2949 ::testing::Test::RecordProperty("Version_control", ver);
2950 if(useStdOut) std::cout << ver << std::endl;
2953 pos1 = buildInfo.find("inner version");
2954 pos2 = buildInfo.find("\n", pos1);\
2955 if(pos1 != std::string::npos && pos2 != std::string::npos)
2957 std::string ver( buildInfo.substr(pos1, pos2-pos1) );
2958 ::testing::Test::RecordProperty("inner_version", ver);
2959 if(useStdOut) std::cout << ver << std::endl;
2963 } //namespace cvtest
2965 void cvTsConvert( const CvMat* src, CvMat* dst )
2967 Mat _src = cvarrToMat(src), _dst = cvarrToMat(dst);
2968 cvtest::convert(_src, _dst, _dst.depth());
2971 void cvTsZero( CvMat* dst, const CvMat* mask )
2973 Mat _dst = cvarrToMat(dst), _mask = mask ? cvarrToMat(mask) : Mat();
2974 cvtest::set(_dst, Scalar::all(0), _mask);