4 #include "opencv2/imgproc/types_c.h"
6 #ifdef HAVE_TEGRA_OPTIMIZATION
15 const char* getTypeName( int type )
17 static const char* type_names[] = { "8u", "8s", "16u", "16s", "32s", "32f", "64f", "ptr" };
18 return type_names[CV_MAT_DEPTH(type)];
21 int typeByName( const char* name )
24 for( i = 0; i < CV_DEPTH_MAX; i++ )
25 if( strcmp(name, getTypeName(i)) == 0 )
30 string vec2str( const string& sep, const int* v, size_t nelems )
34 for( size_t i = 0; i < nelems; i++ )
36 sprintf(buf, "%d", v[i]);
37 result += string(buf);
45 Size randomSize(RNG& rng, double maxSizeLog)
47 double width_log = rng.uniform(0., maxSizeLog);
48 double height_log = rng.uniform(0., maxSizeLog - width_log);
49 if( (unsigned)rng % 2 != 0 )
50 std::swap(width_log, height_log);
52 sz.width = cvRound(exp(width_log));
53 sz.height = cvRound(exp(height_log));
57 void randomSize(RNG& rng, int minDims, int maxDims, double maxSizeLog, vector<int>& sz)
59 int i, dims = rng.uniform(minDims, maxDims+1);
61 for( i = 0; i < dims; i++ )
63 double v = rng.uniform(0., maxSizeLog);
65 sz[i] = cvRound(exp(v));
67 for( i = 0; i < dims; i++ )
69 int j = rng.uniform(0, dims);
70 int k = rng.uniform(0, dims);
71 std::swap(sz[j], sz[k]);
75 int randomType(RNG& rng, int typeMask, int minChannels, int maxChannels)
77 int channels = rng.uniform(minChannels, maxChannels+1);
79 CV_Assert((typeMask & _OutputArray::DEPTH_MASK_ALL) != 0);
82 depth = rng.uniform(CV_8U, CV_64F+1);
83 if( ((1 << depth) & typeMask) != 0 )
86 return CV_MAKETYPE(depth, channels);
89 double getMinVal(int depth)
91 depth = CV_MAT_DEPTH(depth);
92 double val = depth == CV_8U ? 0 : depth == CV_8S ? SCHAR_MIN : depth == CV_16U ? 0 :
93 depth == CV_16S ? SHRT_MIN : depth == CV_32S ? INT_MIN :
94 depth == CV_32F ? -FLT_MAX : depth == CV_64F ? -DBL_MAX : -1;
99 double getMaxVal(int depth)
101 depth = CV_MAT_DEPTH(depth);
102 double val = depth == CV_8U ? UCHAR_MAX : depth == CV_8S ? SCHAR_MAX : depth == CV_16U ? USHRT_MAX :
103 depth == CV_16S ? SHRT_MAX : depth == CV_32S ? INT_MAX :
104 depth == CV_32F ? FLT_MAX : depth == CV_64F ? DBL_MAX : -1;
105 CV_Assert(val != -1);
109 Mat randomMat(RNG& rng, Size size, int type, double minVal, double maxVal, bool useRoi)
114 size0.width += std::max(rng.uniform(0, 10) - 5, 0);
115 size0.height += std::max(rng.uniform(0, 10) - 5, 0);
120 rng.fill(m, RNG::UNIFORM, minVal, maxVal);
123 return m(Rect((size0.width-size.width)/2, (size0.height-size.height)/2, size.width, size.height));
126 Mat randomMat(RNG& rng, const vector<int>& size, int type, double minVal, double maxVal, bool useRoi)
128 int i, dims = (int)size.size();
129 vector<int> size0(dims);
130 vector<Range> r(dims);
132 for( i = 0; i < dims; i++ )
138 size0[i] += std::max(rng.uniform(0, 5) - 2, 0);
139 r[i] = Range((size0[i] - size[i])/2, (size0[i] - size[i])/2 + size[i]);
141 eqsize = eqsize && size[i] == size0[i];
144 Mat m(dims, &size0[0], type);
146 rng.fill(m, RNG::UNIFORM, minVal, maxVal);
152 void add(const Mat& _a, double alpha, const Mat& _b, double beta,
153 Scalar gamma, Mat& c, int ctype, bool calcAbs)
156 if( a.empty() || alpha == 0 )
158 // both alpha and beta can be 0, but at least one of a and b must be non-empty array,
159 // otherwise we do not know the size of the output (and may be type of the output, when ctype<0)
160 CV_Assert( !a.empty() || !b.empty() );
169 if( b.empty() || beta == 0 )
175 CV_Assert(a.size == b.size);
179 ctype = CV_MAKETYPE(CV_MAT_DEPTH(ctype), a.channels());
180 c.create(a.dims, &a.size[0], ctype);
181 const Mat *arrays[] = {&a, &b, &c, 0};
182 Mat planes[3], buf[3];
184 NAryMatIterator it(arrays, planes);
185 size_t i, nplanes = it.nplanes;
187 int total = (int)planes[0].total(), maxsize = std::min(12*12*std::max(12/cn, 1), total);
189 CV_Assert(planes[0].rows == 1);
190 buf[0].create(1, maxsize, CV_64FC(cn));
192 buf[1].create(1, maxsize, CV_64FC(cn));
193 buf[2].create(1, maxsize, CV_64FC(cn));
194 scalarToRawData(gamma, buf[2].ptr(), CV_64FC(cn), (int)(maxsize*cn));
196 for( i = 0; i < nplanes; i++, ++it)
198 for( int j = 0; j < total; j += maxsize )
200 int j2 = std::min(j + maxsize, total);
201 Mat apart0 = planes[0].colRange(j, j2);
202 Mat cpart0 = planes[2].colRange(j, j2);
203 Mat apart = buf[0].colRange(0, j2 - j);
205 apart0.convertTo(apart, apart.type(), alpha);
206 size_t k, n = (j2 - j)*cn;
207 double* aptr = apart.ptr<double>();
208 const double* gptr = buf[2].ptr<double>();
212 for( k = 0; k < n; k++ )
217 Mat bpart0 = planes[1].colRange((int)j, (int)j2);
218 Mat bpart = buf[1].colRange(0, (int)(j2 - j));
219 bpart0.convertTo(bpart, bpart.type(), beta);
220 const double* bptr = bpart.ptr<double>();
222 for( k = 0; k < n; k++ )
223 aptr[k] += bptr[k] + gptr[k];
226 for( k = 0; k < n; k++ )
227 aptr[k] = fabs(aptr[k]);
228 apart.convertTo(cpart0, cpart0.type(), 1, 0);
234 template<typename _Tp1, typename _Tp2> inline void
235 convert_(const _Tp1* src, _Tp2* dst, size_t total, double alpha, double beta)
238 if( alpha == 1 && beta == 0 )
239 for( i = 0; i < total; i++ )
240 dst[i] = saturate_cast<_Tp2>(src[i]);
242 for( i = 0; i < total; i++ )
243 dst[i] = saturate_cast<_Tp2>(src[i]*alpha);
245 for( i = 0; i < total; i++ )
246 dst[i] = saturate_cast<_Tp2>(src[i]*alpha + beta);
249 template<typename _Tp> inline void
250 convertTo(const _Tp* src, void* dst, int dtype, size_t total, double alpha, double beta)
252 switch( CV_MAT_DEPTH(dtype) )
255 convert_(src, (uchar*)dst, total, alpha, beta);
258 convert_(src, (schar*)dst, total, alpha, beta);
261 convert_(src, (ushort*)dst, total, alpha, beta);
264 convert_(src, (short*)dst, total, alpha, beta);
267 convert_(src, (int*)dst, total, alpha, beta);
270 convert_(src, (float*)dst, total, alpha, beta);
273 convert_(src, (double*)dst, total, alpha, beta);
280 void convert(const Mat& src, cv::OutputArray _dst, int dtype, double alpha, double beta)
282 if (dtype < 0) dtype = _dst.depth();
284 dtype = CV_MAKETYPE(CV_MAT_DEPTH(dtype), src.channels());
285 _dst.create(src.dims, &src.size[0], dtype);
286 Mat dst = _dst.getMat();
289 set( dst, Scalar::all(beta) );
292 if( dtype == src.type() && alpha == 1 && beta == 0 )
298 const Mat *arrays[]={&src, &dst, 0};
301 NAryMatIterator it(arrays, planes);
302 size_t total = planes[0].total()*planes[0].channels();
303 size_t i, nplanes = it.nplanes;
305 for( i = 0; i < nplanes; i++, ++it)
307 const uchar* sptr = planes[0].ptr();
308 uchar* dptr = planes[1].ptr();
310 switch( src.depth() )
313 convertTo((const uchar*)sptr, dptr, dtype, total, alpha, beta);
316 convertTo((const schar*)sptr, dptr, dtype, total, alpha, beta);
319 convertTo((const ushort*)sptr, dptr, dtype, total, alpha, beta);
322 convertTo((const short*)sptr, dptr, dtype, total, alpha, beta);
325 convertTo((const int*)sptr, dptr, dtype, total, alpha, beta);
328 convertTo((const float*)sptr, dptr, dtype, total, alpha, beta);
331 convertTo((const double*)sptr, dptr, dtype, total, alpha, beta);
338 void copy(const Mat& src, Mat& dst, const Mat& mask, bool invertMask)
340 dst.create(src.dims, &src.size[0], src.type());
344 const Mat* arrays[] = {&src, &dst, 0};
346 NAryMatIterator it(arrays, planes);
347 size_t i, nplanes = it.nplanes;
348 size_t planeSize = planes[0].total()*src.elemSize();
350 for( i = 0; i < nplanes; i++, ++it )
351 memcpy(planes[1].ptr(), planes[0].ptr(), planeSize);
356 int mcn = mask.channels();
357 CV_Assert( src.size == mask.size && mask.depth() == CV_8U
358 && (mcn == 1 || mcn == src.channels()) );
360 const Mat *arrays[]={&src, &dst, &mask, 0};
363 NAryMatIterator it(arrays, planes);
364 size_t j, k, elemSize = src.elemSize(), maskElemSize = mask.elemSize(), total = planes[0].total();
365 size_t i, nplanes = it.nplanes;
366 size_t elemSize1 = src.elemSize1();
368 for( i = 0; i < nplanes; i++, ++it)
370 const uchar* sptr = planes[0].ptr();
371 uchar* dptr = planes[1].ptr();
372 const uchar* mptr = planes[2].ptr();
373 for( j = 0; j < total; j++, sptr += elemSize, dptr += elemSize, mptr += maskElemSize )
377 if( (mptr[0] != 0) ^ invertMask )
378 for( k = 0; k < elemSize; k++ )
383 for( int c = 0; c < mcn; c++ )
384 if( (mptr[c] != 0) ^ invertMask )
385 for( k = 0; k < elemSize1; k++ )
386 dptr[k + c * elemSize1] = sptr[k + c * elemSize1];
393 void set(Mat& dst, const Scalar& gamma, const Mat& mask)
396 scalarToRawData(gamma, &buf, dst.type(), dst.channels());
397 const uchar* gptr = (const uchar*)&buf[0];
401 const Mat* arrays[] = {&dst, 0};
403 NAryMatIterator it(arrays, &plane);
404 size_t i, nplanes = it.nplanes;
405 size_t j, k, elemSize = dst.elemSize(), planeSize = plane.total()*elemSize;
407 for( k = 1; k < elemSize; k++ )
408 if( gptr[k] != gptr[0] )
410 bool uniform = k >= elemSize;
412 for( i = 0; i < nplanes; i++, ++it )
414 uchar* dptr = plane.ptr();
416 memset( dptr, gptr[0], planeSize );
419 for( j = 0; j < planeSize; j += elemSize, dptr += elemSize )
420 for( k = 0; k < elemSize; k++ )
424 memcpy(dptr, dst.ptr(), planeSize);
429 int cn = dst.channels(), mcn = mask.channels();
430 CV_Assert( dst.size == mask.size && (mcn == 1 || mcn == cn) );
432 const Mat *arrays[]={&dst, &mask, 0};
435 NAryMatIterator it(arrays, planes);
436 size_t j, k, elemSize = dst.elemSize(), maskElemSize = mask.elemSize(), total = planes[0].total();
437 size_t i, nplanes = it.nplanes;
438 size_t elemSize1 = dst.elemSize1();
440 for( i = 0; i < nplanes; i++, ++it)
442 uchar* dptr = planes[0].ptr();
443 const uchar* mptr = planes[1].ptr();
445 for( j = 0; j < total; j++, dptr += elemSize, mptr += maskElemSize )
450 for( k = 0; k < elemSize; k++ )
455 for( int c = 0; c < mcn; c++ )
457 for( k = 0; k < elemSize1; k++ )
458 dptr[k + c * elemSize1] = gptr[k + c * elemSize1];
465 void insert(const Mat& src, Mat& dst, int coi)
467 CV_Assert( dst.size == src.size && src.depth() == dst.depth() &&
468 0 <= coi && coi < dst.channels() );
470 const Mat* arrays[] = {&src, &dst, 0};
472 NAryMatIterator it(arrays, planes);
473 size_t i, nplanes = it.nplanes;
474 size_t j, k, size0 = src.elemSize(), size1 = dst.elemSize(), total = planes[0].total();
476 for( i = 0; i < nplanes; i++, ++it )
478 const uchar* sptr = planes[0].ptr();
479 uchar* dptr = planes[1].ptr() + coi*size0;
481 for( j = 0; j < total; j++, sptr += size0, dptr += size1 )
483 for( k = 0; k < size0; k++ )
490 void extract(const Mat& src, Mat& dst, int coi)
492 dst.create( src.dims, &src.size[0], src.depth() );
493 CV_Assert( 0 <= coi && coi < src.channels() );
495 const Mat* arrays[] = {&src, &dst, 0};
497 NAryMatIterator it(arrays, planes);
498 size_t i, nplanes = it.nplanes;
499 size_t j, k, size0 = src.elemSize(), size1 = dst.elemSize(), total = planes[0].total();
501 for( i = 0; i < nplanes; i++, ++it )
503 const uchar* sptr = planes[0].ptr() + coi*size1;
504 uchar* dptr = planes[1].ptr();
506 for( j = 0; j < total; j++, sptr += size0, dptr += size1 )
508 for( k = 0; k < size1; k++ )
515 void transpose(const Mat& src, Mat& dst)
517 CV_Assert(src.data != dst.data && "Inplace is not support in cvtest::transpose");
518 CV_Assert(src.dims == 2);
519 dst.create(src.cols, src.rows, src.type());
520 int i, j, k, esz = (int)src.elemSize();
522 for( i = 0; i < dst.rows; i++ )
524 const uchar* sptr = src.ptr(0) + i*esz;
525 uchar* dptr = dst.ptr(i);
527 for( j = 0; j < dst.cols; j++, sptr += src.step[0], dptr += esz )
529 for( k = 0; k < esz; k++ )
536 template<typename _Tp> static void
537 randUniInt_(RNG& rng, _Tp* data, size_t total, int cn, const Scalar& scale, const Scalar& delta)
539 for( size_t i = 0; i < total; i += cn )
540 for( int k = 0; k < cn; k++ )
542 int val = cvFloor( randInt(rng)*scale[k] + delta[k] );
543 data[i + k] = saturate_cast<_Tp>(val);
548 template<typename _Tp> static void
549 randUniFlt_(RNG& rng, _Tp* data, size_t total, int cn, const Scalar& scale, const Scalar& delta)
551 for( size_t i = 0; i < total; i += cn )
552 for( int k = 0; k < cn; k++ )
554 double val = randReal(rng)*scale[k] + delta[k];
555 data[i + k] = saturate_cast<_Tp>(val);
560 void randUni( RNG& rng, Mat& a, const Scalar& param0, const Scalar& param1 )
562 Scalar scale = param0;
563 Scalar delta = param1;
564 double C = a.depth() < CV_32F ? 1./(65536.*65536.) : 1.;
566 for( int k = 0; k < 4; k++ )
568 double s = scale.val[k] - delta.val[k];
573 delta.val[k] = scale.val[k];
579 const Mat *arrays[]={&a, 0};
582 NAryMatIterator it(arrays, &plane);
583 size_t i, nplanes = it.nplanes;
584 int depth = a.depth(), cn = a.channels();
585 size_t total = plane.total()*cn;
587 for( i = 0; i < nplanes; i++, ++it )
592 randUniInt_(rng, plane.ptr<uchar>(), total, cn, scale, delta);
595 randUniInt_(rng, plane.ptr<schar>(), total, cn, scale, delta);
598 randUniInt_(rng, plane.ptr<ushort>(), total, cn, scale, delta);
601 randUniInt_(rng, plane.ptr<short>(), total, cn, scale, delta);
604 randUniInt_(rng, plane.ptr<int>(), total, cn, scale, delta);
607 randUniFlt_(rng, plane.ptr<float>(), total, cn, scale, delta);
610 randUniFlt_(rng, plane.ptr<double>(), total, cn, scale, delta);
619 template<typename _Tp> static void
620 erode_(const Mat& src, Mat& dst, const vector<int>& ofsvec)
622 int width = dst.cols*src.channels(), n = (int)ofsvec.size();
623 const int* ofs = &ofsvec[0];
625 for( int y = 0; y < dst.rows; y++ )
627 const _Tp* sptr = src.ptr<_Tp>(y);
628 _Tp* dptr = dst.ptr<_Tp>(y);
630 for( int x = 0; x < width; x++ )
632 _Tp result = sptr[x + ofs[0]];
633 for( int i = 1; i < n; i++ )
634 result = std::min(result, sptr[x + ofs[i]]);
641 template<typename _Tp> static void
642 dilate_(const Mat& src, Mat& dst, const vector<int>& ofsvec)
644 int width = dst.cols*src.channels(), n = (int)ofsvec.size();
645 const int* ofs = &ofsvec[0];
647 for( int y = 0; y < dst.rows; y++ )
649 const _Tp* sptr = src.ptr<_Tp>(y);
650 _Tp* dptr = dst.ptr<_Tp>(y);
652 for( int x = 0; x < width; x++ )
654 _Tp result = sptr[x + ofs[0]];
655 for( int i = 1; i < n; i++ )
656 result = std::max(result, sptr[x + ofs[i]]);
663 void erode(const Mat& _src, Mat& dst, const Mat& _kernel, Point anchor,
664 int borderType, const Scalar& _borderValue)
666 //if( _src.type() == CV_16UC3 && _src.size() == Size(1, 2) )
668 Mat kernel = _kernel, src;
669 Scalar borderValue = _borderValue;
671 kernel = Mat::ones(3, 3, CV_8U);
674 CV_Assert( kernel.type() == CV_8U );
676 if( anchor == Point(-1,-1) )
677 anchor = Point(kernel.cols/2, kernel.rows/2);
678 if( borderType == BORDER_CONSTANT )
679 borderValue = getMaxVal(src.depth());
680 copyMakeBorder(_src, src, anchor.y, kernel.rows - anchor.y - 1,
681 anchor.x, kernel.cols - anchor.x - 1,
682 borderType, borderValue);
683 dst.create( _src.size(), src.type() );
686 int step = (int)(src.step/src.elemSize1()), cn = src.channels();
687 for( int i = 0; i < kernel.rows; i++ )
688 for( int j = 0; j < kernel.cols; j++ )
689 if( kernel.at<uchar>(i, j) != 0 )
690 ofs.push_back(i*step + j*cn);
692 ofs.push_back(anchor.y*step + anchor.x*cn);
694 switch( src.depth() )
697 erode_<uchar>(src, dst, ofs);
700 erode_<schar>(src, dst, ofs);
703 erode_<ushort>(src, dst, ofs);
706 erode_<short>(src, dst, ofs);
709 erode_<int>(src, dst, ofs);
712 erode_<float>(src, dst, ofs);
715 erode_<double>(src, dst, ofs);
722 void dilate(const Mat& _src, Mat& dst, const Mat& _kernel, Point anchor,
723 int borderType, const Scalar& _borderValue)
725 Mat kernel = _kernel, src;
726 Scalar borderValue = _borderValue;
728 kernel = Mat::ones(3, 3, CV_8U);
731 CV_Assert( kernel.type() == CV_8U );
733 if( anchor == Point(-1,-1) )
734 anchor = Point(kernel.cols/2, kernel.rows/2);
735 if( borderType == BORDER_CONSTANT )
736 borderValue = getMinVal(src.depth());
737 copyMakeBorder(_src, src, anchor.y, kernel.rows - anchor.y - 1,
738 anchor.x, kernel.cols - anchor.x - 1,
739 borderType, borderValue);
740 dst.create( _src.size(), src.type() );
743 int step = (int)(src.step/src.elemSize1()), cn = src.channels();
744 for( int i = 0; i < kernel.rows; i++ )
745 for( int j = 0; j < kernel.cols; j++ )
746 if( kernel.at<uchar>(i, j) != 0 )
747 ofs.push_back(i*step + j*cn);
749 ofs.push_back(anchor.y*step + anchor.x*cn);
751 switch( src.depth() )
754 dilate_<uchar>(src, dst, ofs);
757 dilate_<schar>(src, dst, ofs);
760 dilate_<ushort>(src, dst, ofs);
763 dilate_<short>(src, dst, ofs);
766 dilate_<int>(src, dst, ofs);
769 dilate_<float>(src, dst, ofs);
772 dilate_<double>(src, dst, ofs);
780 template<typename _Tp> static void
781 filter2D_(const Mat& src, Mat& dst, const vector<int>& ofsvec, const vector<double>& coeffvec)
783 const int* ofs = &ofsvec[0];
784 const double* coeff = &coeffvec[0];
785 int width = dst.cols*dst.channels(), ncoeffs = (int)ofsvec.size();
787 for( int y = 0; y < dst.rows; y++ )
789 const _Tp* sptr = src.ptr<_Tp>(y);
790 double* dptr = dst.ptr<double>(y);
792 for( int x = 0; x < width; x++ )
795 for( int i = 0; i < ncoeffs; i++ )
796 s += sptr[x + ofs[i]]*coeff[i];
803 void filter2D(const Mat& _src, Mat& dst, int ddepth, const Mat& kernel,
804 Point anchor, double delta, int borderType, const Scalar& _borderValue)
807 Scalar borderValue = _borderValue;
808 CV_Assert( kernel.type() == CV_32F || kernel.type() == CV_64F );
809 if( anchor == Point(-1,-1) )
810 anchor = Point(kernel.cols/2, kernel.rows/2);
811 if( borderType == BORDER_CONSTANT )
812 borderValue = getMinVal(src.depth());
813 copyMakeBorder(_src, src, anchor.y, kernel.rows - anchor.y - 1,
814 anchor.x, kernel.cols - anchor.x - 1,
815 borderType, borderValue);
816 _dst.create( _src.size(), CV_MAKETYPE(CV_64F, src.channels()) );
819 vector<double> coeff(kernel.rows*kernel.cols);
820 Mat cmat(kernel.rows, kernel.cols, CV_64F, &coeff[0]);
821 convert(kernel, cmat, cmat.type());
823 int step = (int)(src.step/src.elemSize1()), cn = src.channels();
824 for( int i = 0; i < kernel.rows; i++ )
825 for( int j = 0; j < kernel.cols; j++ )
826 ofs.push_back(i*step + j*cn);
828 switch( src.depth() )
831 filter2D_<uchar>(src, _dst, ofs, coeff);
834 filter2D_<schar>(src, _dst, ofs, coeff);
837 filter2D_<ushort>(src, _dst, ofs, coeff);
840 filter2D_<short>(src, _dst, ofs, coeff);
843 filter2D_<int>(src, _dst, ofs, coeff);
846 filter2D_<float>(src, _dst, ofs, coeff);
849 filter2D_<double>(src, _dst, ofs, coeff);
855 convert(_dst, dst, ddepth, 1, delta);
859 static int borderInterpolate( int p, int len, int borderType )
861 if( (unsigned)p < (unsigned)len )
863 else if( borderType == BORDER_REPLICATE )
864 p = p < 0 ? 0 : len - 1;
865 else if( borderType == BORDER_REFLECT || borderType == BORDER_REFLECT_101 )
867 int delta = borderType == BORDER_REFLECT_101;
875 p = len - 1 - (p - len) - delta;
877 while( (unsigned)p >= (unsigned)len );
879 else if( borderType == BORDER_WRAP )
882 p -= ((p-len+1)/len)*len;
886 else if( borderType == BORDER_CONSTANT )
889 CV_Error( Error::StsBadArg, "Unknown/unsupported border type" );
894 void copyMakeBorder(const Mat& src, Mat& dst, int top, int bottom, int left, int right,
895 int borderType, const Scalar& borderValue)
897 dst.create(src.rows + top + bottom, src.cols + left + right, src.type());
898 int i, j, k, esz = (int)src.elemSize();
899 int width = src.cols*esz, width1 = dst.cols*esz;
901 if( borderType == BORDER_CONSTANT )
903 vector<uchar> valvec((src.cols + left + right)*esz);
904 uchar* val = &valvec[0];
905 scalarToRawData(borderValue, val, src.type(), (src.cols + left + right)*src.channels());
909 for( i = 0; i < src.rows; i++ )
911 const uchar* sptr = src.ptr(i);
912 uchar* dptr = dst.ptr(i + top) + left;
913 for( j = 0; j < left; j++ )
914 dptr[j - left] = val[j];
916 for( j = 0; j < width; j++ )
918 for( j = 0; j < right; j++ )
919 dptr[j + width] = val[j];
922 for( i = 0; i < top; i++ )
924 uchar* dptr = dst.ptr(i);
925 for( j = 0; j < width1; j++ )
929 for( i = 0; i < bottom; i++ )
931 uchar* dptr = dst.ptr(i + top + src.rows);
932 for( j = 0; j < width1; j++ )
938 vector<int> tabvec((left + right)*esz + 1);
939 int* ltab = &tabvec[0];
940 int* rtab = &tabvec[left*esz];
941 for( i = 0; i < left; i++ )
943 j = borderInterpolate(i - left, src.cols, borderType)*esz;
944 for( k = 0; k < esz; k++ )
945 ltab[i*esz + k] = j + k;
947 for( i = 0; i < right; i++ )
949 j = borderInterpolate(src.cols + i, src.cols, borderType)*esz;
950 for( k = 0; k < esz; k++ )
951 rtab[i*esz + k] = j + k;
956 for( i = 0; i < src.rows; i++ )
958 const uchar* sptr = src.ptr(i);
959 uchar* dptr = dst.ptr(i + top);
961 for( j = 0; j < left; j++ )
962 dptr[j] = sptr[ltab[j]];
963 if( dptr + left != sptr )
965 for( j = 0; j < width; j++ )
966 dptr[j + left] = sptr[j];
968 for( j = 0; j < right; j++ )
969 dptr[j + left + width] = sptr[rtab[j]];
972 for( i = 0; i < top; i++ )
974 j = borderInterpolate(i - top, src.rows, borderType);
975 const uchar* sptr = dst.ptr(j + top);
976 uchar* dptr = dst.ptr(i);
978 for( k = 0; k < width1; k++ )
982 for( i = 0; i < bottom; i++ )
984 j = borderInterpolate(i + src.rows, src.rows, borderType);
985 const uchar* sptr = dst.ptr(j + top);
986 uchar* dptr = dst.ptr(i + top + src.rows);
988 for( k = 0; k < width1; k++ )
995 template<typename _Tp> static void
996 minMaxLoc_(const _Tp* src, size_t total, size_t startidx,
997 double* _minval, double* _maxval,
998 size_t* _minpos, size_t* _maxpos,
1001 _Tp maxval = saturate_cast<_Tp>(*_maxval), minval = saturate_cast<_Tp>(*_minval);
1002 size_t minpos = *_minpos, maxpos = *_maxpos;
1006 for( size_t i = 0; i < total; i++ )
1009 if( minval > val || !minpos )
1012 minpos = startidx + i;
1014 if( maxval < val || !maxpos )
1017 maxpos = startidx + i;
1023 for( size_t i = 0; i < total; i++ )
1026 if( (minval > val || !minpos) && mask[i] )
1029 minpos = startidx + i;
1031 if( (maxval < val || !maxpos) && mask[i] )
1034 maxpos = startidx + i;
1046 static void setpos( const Mat& mtx, vector<int>& pos, size_t idx )
1048 pos.resize(mtx.dims);
1052 for( int i = mtx.dims-1; i >= 0; i-- )
1054 int sz = mtx.size[i]*(i == mtx.dims-1 ? mtx.channels() : 1);
1055 pos[i] = (int)(idx % sz);
1061 for( int i = mtx.dims-1; i >= 0; i-- )
1066 void minMaxLoc(const Mat& src, double* _minval, double* _maxval,
1067 vector<int>* _minloc, vector<int>* _maxloc,
1070 CV_Assert( src.channels() == 1 );
1071 const Mat *arrays[]={&src, &mask, 0};
1074 NAryMatIterator it(arrays, planes);
1075 size_t startidx = 1, total = planes[0].total();
1076 size_t i, nplanes = it.nplanes;
1077 int depth = src.depth();
1080 size_t maxidx = 0, minidx = 0;
1082 for( i = 0; i < nplanes; i++, ++it, startidx += total )
1084 const uchar* sptr = planes[0].ptr();
1085 const uchar* mptr = planes[1].ptr();
1090 minMaxLoc_((const uchar*)sptr, total, startidx,
1091 &minval, &maxval, &minidx, &maxidx, mptr);
1094 minMaxLoc_((const schar*)sptr, total, startidx,
1095 &minval, &maxval, &minidx, &maxidx, mptr);
1098 minMaxLoc_((const ushort*)sptr, total, startidx,
1099 &minval, &maxval, &minidx, &maxidx, mptr);
1102 minMaxLoc_((const short*)sptr, total, startidx,
1103 &minval, &maxval, &minidx, &maxidx, mptr);
1106 minMaxLoc_((const int*)sptr, total, startidx,
1107 &minval, &maxval, &minidx, &maxidx, mptr);
1110 minMaxLoc_((const float*)sptr, total, startidx,
1111 &minval, &maxval, &minidx, &maxidx, mptr);
1114 minMaxLoc_((const double*)sptr, total, startidx,
1115 &minval, &maxval, &minidx, &maxidx, mptr);
1127 setpos( src, *_maxloc, maxidx );
1129 setpos( src, *_minloc, minidx );
1134 normHamming(const uchar* src, size_t total, int cellSize)
1137 int mask = cellSize == 1 ? 1 : cellSize == 2 ? 3 : cellSize == 4 ? 15 : -1;
1138 CV_Assert( mask >= 0 );
1140 for( size_t i = 0; i < total; i++ )
1142 unsigned a = src[i];
1143 for( ; a != 0; a >>= cellSize )
1144 result += (a & mask) != 0;
1150 template<typename _Tp> static double
1151 norm_(const _Tp* src, size_t total, int cn, int normType, double startval, const uchar* mask)
1154 double result = startval;
1158 if( normType == NORM_INF )
1161 for( i = 0; i < total; i++ )
1162 result = std::max(result, (double)std::abs(0+src[i]));// trick with 0 used to quiet gcc warning
1164 for( int c = 0; c < cn; c++ )
1166 for( i = 0; i < total; i++ )
1168 result = std::max(result, (double)std::abs(0+src[i*cn + c]));
1171 else if( normType == NORM_L1 )
1174 for( i = 0; i < total; i++ )
1175 result += std::abs(0+src[i]);
1177 for( int c = 0; c < cn; c++ )
1179 for( i = 0; i < total; i++ )
1181 result += std::abs(0+src[i*cn + c]);
1187 for( i = 0; i < total; i++ )
1193 for( int c = 0; c < cn; c++ )
1195 for( i = 0; i < total; i++ )
1198 double v = src[i*cn + c];
1207 template<typename _Tp> static double
1208 norm_(const _Tp* src1, const _Tp* src2, size_t total, int cn, int normType, double startval, const uchar* mask)
1211 double result = startval;
1215 if( normType == NORM_INF )
1218 for( i = 0; i < total; i++ )
1219 result = std::max(result, (double)std::abs(src1[i] - src2[i]));
1221 for( int c = 0; c < cn; c++ )
1223 for( i = 0; i < total; i++ )
1225 result = std::max(result, (double)std::abs(src1[i*cn + c] - src2[i*cn + c]));
1228 else if( normType == NORM_L1 )
1231 for( i = 0; i < total; i++ )
1232 result += std::abs(src1[i] - src2[i]);
1234 for( int c = 0; c < cn; c++ )
1236 for( i = 0; i < total; i++ )
1238 result += std::abs(src1[i*cn + c] - src2[i*cn + c]);
1244 for( i = 0; i < total; i++ )
1246 double v = src1[i] - src2[i];
1250 for( int c = 0; c < cn; c++ )
1252 for( i = 0; i < total; i++ )
1255 double v = src1[i*cn + c] - src2[i*cn + c];
1264 double norm(InputArray _src, int normType, InputArray _mask)
1266 Mat src = _src.getMat(), mask = _mask.getMat();
1267 if( normType == NORM_HAMMING || normType == NORM_HAMMING2 )
1272 bitwise_and(src, mask, temp);
1273 return cvtest::norm(temp, normType, Mat());
1276 CV_Assert( src.depth() == CV_8U );
1278 const Mat *arrays[]={&src, 0};
1281 NAryMatIterator it(arrays, planes);
1282 size_t total = planes[0].total();
1283 size_t i, nplanes = it.nplanes;
1285 int cellSize = normType == NORM_HAMMING ? 1 : 2;
1287 for( i = 0; i < nplanes; i++, ++it )
1288 result += normHamming(planes[0].ptr(), total, cellSize);
1291 int normType0 = normType;
1292 normType = normType == NORM_L2SQR ? NORM_L2 : normType;
1294 CV_Assert( mask.empty() || (src.size == mask.size && mask.type() == CV_8U) );
1295 CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 );
1297 const Mat *arrays[]={&src, &mask, 0};
1300 NAryMatIterator it(arrays, planes);
1301 size_t total = planes[0].total();
1302 size_t i, nplanes = it.nplanes;
1303 int depth = src.depth(), cn = planes[0].channels();
1306 for( i = 0; i < nplanes; i++, ++it )
1308 const uchar* sptr = planes[0].ptr();
1309 const uchar* mptr = planes[1].ptr();
1314 result = norm_((const uchar*)sptr, total, cn, normType, result, mptr);
1317 result = norm_((const schar*)sptr, total, cn, normType, result, mptr);
1320 result = norm_((const ushort*)sptr, total, cn, normType, result, mptr);
1323 result = norm_((const short*)sptr, total, cn, normType, result, mptr);
1326 result = norm_((const int*)sptr, total, cn, normType, result, mptr);
1329 result = norm_((const float*)sptr, total, cn, normType, result, mptr);
1332 result = norm_((const double*)sptr, total, cn, normType, result, mptr);
1335 CV_Error(Error::StsUnsupportedFormat, "");
1338 if( normType0 == NORM_L2 )
1339 result = sqrt(result);
1344 double norm(InputArray _src1, InputArray _src2, int normType, InputArray _mask)
1346 Mat src1 = _src1.getMat(), src2 = _src2.getMat(), mask = _mask.getMat();
1347 bool isRelative = (normType & NORM_RELATIVE) != 0;
1348 normType &= ~NORM_RELATIVE;
1350 if( normType == NORM_HAMMING || normType == NORM_HAMMING2 )
1353 bitwise_xor(src1, src2, temp);
1355 bitwise_and(temp, mask, temp);
1357 CV_Assert( temp.depth() == CV_8U );
1359 const Mat *arrays[]={&temp, 0};
1362 NAryMatIterator it(arrays, planes);
1363 size_t total = planes[0].total();
1364 size_t i, nplanes = it.nplanes;
1366 int cellSize = normType == NORM_HAMMING ? 1 : 2;
1368 for( i = 0; i < nplanes; i++, ++it )
1369 result += normHamming(planes[0].ptr(), total, cellSize);
1372 int normType0 = normType;
1373 normType = normType == NORM_L2SQR ? NORM_L2 : normType;
1375 CV_Assert( src1.type() == src2.type() && src1.size == src2.size );
1376 CV_Assert( mask.empty() || (src1.size == mask.size && mask.type() == CV_8U) );
1377 CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 );
1378 const Mat *arrays[]={&src1, &src2, &mask, 0};
1381 NAryMatIterator it(arrays, planes);
1382 size_t total = planes[0].total();
1383 size_t i, nplanes = it.nplanes;
1384 int depth = src1.depth(), cn = planes[0].channels();
1387 for( i = 0; i < nplanes; i++, ++it )
1389 const uchar* sptr1 = planes[0].ptr();
1390 const uchar* sptr2 = planes[1].ptr();
1391 const uchar* mptr = planes[2].ptr();
1396 result = norm_((const uchar*)sptr1, (const uchar*)sptr2, total, cn, normType, result, mptr);
1399 result = norm_((const schar*)sptr1, (const schar*)sptr2, total, cn, normType, result, mptr);
1402 result = norm_((const ushort*)sptr1, (const ushort*)sptr2, total, cn, normType, result, mptr);
1405 result = norm_((const short*)sptr1, (const short*)sptr2, total, cn, normType, result, mptr);
1408 result = norm_((const int*)sptr1, (const int*)sptr2, total, cn, normType, result, mptr);
1411 result = norm_((const float*)sptr1, (const float*)sptr2, total, cn, normType, result, mptr);
1414 result = norm_((const double*)sptr1, (const double*)sptr2, total, cn, normType, result, mptr);
1417 CV_Error(Error::StsUnsupportedFormat, "");
1420 if( normType0 == NORM_L2 )
1421 result = sqrt(result);
1422 return isRelative ? result / (cvtest::norm(src2, normType) + DBL_EPSILON) : result;
1425 double PSNR(InputArray _src1, InputArray _src2)
1427 CV_Assert( _src1.depth() == CV_8U );
1428 double diff = std::sqrt(cvtest::norm(_src1, _src2, NORM_L2SQR)/(_src1.total()*_src1.channels()));
1429 return 20*log10(255./(diff+DBL_EPSILON));
1432 template<typename _Tp> static double
1433 crossCorr_(const _Tp* src1, const _Tp* src2, size_t total)
1436 for( size_t i = 0; i < total; i++ )
1437 result += (double)src1[i]*src2[i];
1441 double crossCorr(const Mat& src1, const Mat& src2)
1443 CV_Assert( src1.size == src2.size && src1.type() == src2.type() );
1444 const Mat *arrays[]={&src1, &src2, 0};
1447 NAryMatIterator it(arrays, planes);
1448 size_t total = planes[0].total()*planes[0].channels();
1449 size_t i, nplanes = it.nplanes;
1450 int depth = src1.depth();
1453 for( i = 0; i < nplanes; i++, ++it )
1455 const uchar* sptr1 = planes[0].ptr();
1456 const uchar* sptr2 = planes[1].ptr();
1461 result += crossCorr_((const uchar*)sptr1, (const uchar*)sptr2, total);
1464 result += crossCorr_((const schar*)sptr1, (const schar*)sptr2, total);
1467 result += crossCorr_((const ushort*)sptr1, (const ushort*)sptr2, total);
1470 result += crossCorr_((const short*)sptr1, (const short*)sptr2, total);
1473 result += crossCorr_((const int*)sptr1, (const int*)sptr2, total);
1476 result += crossCorr_((const float*)sptr1, (const float*)sptr2, total);
1479 result += crossCorr_((const double*)sptr1, (const double*)sptr2, total);
1482 CV_Error(Error::StsUnsupportedFormat, "");
1490 logicOp_(const uchar* src1, const uchar* src2, uchar* dst, size_t total, char c)
1494 for( i = 0; i < total; i++ )
1495 dst[i] = src1[i] & src2[i];
1497 for( i = 0; i < total; i++ )
1498 dst[i] = src1[i] | src2[i];
1500 for( i = 0; i < total; i++ )
1501 dst[i] = src1[i] ^ src2[i];
1505 logicOpS_(const uchar* src, const uchar* scalar, uchar* dst, size_t total, char c)
1507 const size_t blockSize = 96;
1510 for( i = 0; i < total; i += blockSize, dst += blockSize, src += blockSize )
1512 size_t sz = MIN(total - i, blockSize);
1513 for( j = 0; j < sz; j++ )
1514 dst[j] = src[j] & scalar[j];
1517 for( i = 0; i < total; i += blockSize, dst += blockSize, src += blockSize )
1519 size_t sz = MIN(total - i, blockSize);
1520 for( j = 0; j < sz; j++ )
1521 dst[j] = src[j] | scalar[j];
1525 for( i = 0; i < total; i += blockSize, dst += blockSize, src += blockSize )
1527 size_t sz = MIN(total - i, blockSize);
1528 for( j = 0; j < sz; j++ )
1529 dst[j] = src[j] ^ scalar[j];
1533 for( i = 0; i < total; i++ )
1538 void logicOp( const Mat& src1, const Mat& src2, Mat& dst, char op )
1540 CV_Assert( op == '&' || op == '|' || op == '^' );
1541 CV_Assert( src1.type() == src2.type() && src1.size == src2.size );
1542 dst.create( src1.dims, &src1.size[0], src1.type() );
1543 const Mat *arrays[]={&src1, &src2, &dst, 0};
1546 NAryMatIterator it(arrays, planes);
1547 size_t total = planes[0].total()*planes[0].elemSize();
1548 size_t i, nplanes = it.nplanes;
1550 for( i = 0; i < nplanes; i++, ++it )
1552 const uchar* sptr1 = planes[0].ptr();
1553 const uchar* sptr2 = planes[1].ptr();
1554 uchar* dptr = planes[2].ptr();
1556 logicOp_(sptr1, sptr2, dptr, total, op);
1561 void logicOp(const Mat& src, const Scalar& s, Mat& dst, char op)
1563 CV_Assert( op == '&' || op == '|' || op == '^' || op == '~' );
1564 dst.create( src.dims, &src.size[0], src.type() );
1565 const Mat *arrays[]={&src, &dst, 0};
1568 NAryMatIterator it(arrays, planes);
1569 size_t total = planes[0].total()*planes[0].elemSize();
1570 size_t i, nplanes = it.nplanes;
1572 scalarToRawData(s, buf, src.type(), (int)(96/planes[0].elemSize1()));
1574 for( i = 0; i < nplanes; i++, ++it )
1576 const uchar* sptr = planes[0].ptr();
1577 uchar* dptr = planes[1].ptr();
1579 logicOpS_(sptr, (uchar*)&buf[0], dptr, total, op);
1584 template<typename _Tp> static void
1585 compare_(const _Tp* src1, const _Tp* src2, uchar* dst, size_t total, int cmpop)
1591 for( i = 0; i < total; i++ )
1592 dst[i] = src1[i] < src2[i] ? 255 : 0;
1595 for( i = 0; i < total; i++ )
1596 dst[i] = src1[i] <= src2[i] ? 255 : 0;
1599 for( i = 0; i < total; i++ )
1600 dst[i] = src1[i] == src2[i] ? 255 : 0;
1603 for( i = 0; i < total; i++ )
1604 dst[i] = src1[i] != src2[i] ? 255 : 0;
1607 for( i = 0; i < total; i++ )
1608 dst[i] = src1[i] >= src2[i] ? 255 : 0;
1611 for( i = 0; i < total; i++ )
1612 dst[i] = src1[i] > src2[i] ? 255 : 0;
1615 CV_Error(Error::StsBadArg, "Unknown comparison operation");
1620 template<typename _Tp, typename _WTp> static void
1621 compareS_(const _Tp* src1, _WTp value, uchar* dst, size_t total, int cmpop)
1627 for( i = 0; i < total; i++ )
1628 dst[i] = src1[i] < value ? 255 : 0;
1631 for( i = 0; i < total; i++ )
1632 dst[i] = src1[i] <= value ? 255 : 0;
1635 for( i = 0; i < total; i++ )
1636 dst[i] = src1[i] == value ? 255 : 0;
1639 for( i = 0; i < total; i++ )
1640 dst[i] = src1[i] != value ? 255 : 0;
1643 for( i = 0; i < total; i++ )
1644 dst[i] = src1[i] >= value ? 255 : 0;
1647 for( i = 0; i < total; i++ )
1648 dst[i] = src1[i] > value ? 255 : 0;
1651 CV_Error(Error::StsBadArg, "Unknown comparison operation");
1656 void compare(const Mat& src1, const Mat& src2, Mat& dst, int cmpop)
1658 CV_Assert( src1.type() == src2.type() && src1.channels() == 1 && src1.size == src2.size );
1659 dst.create( src1.dims, &src1.size[0], CV_8U );
1660 const Mat *arrays[]={&src1, &src2, &dst, 0};
1663 NAryMatIterator it(arrays, planes);
1664 size_t total = planes[0].total();
1665 size_t i, nplanes = it.nplanes;
1666 int depth = src1.depth();
1668 for( i = 0; i < nplanes; i++, ++it )
1670 const uchar* sptr1 = planes[0].ptr();
1671 const uchar* sptr2 = planes[1].ptr();
1672 uchar* dptr = planes[2].ptr();
1677 compare_((const uchar*)sptr1, (const uchar*)sptr2, dptr, total, cmpop);
1680 compare_((const schar*)sptr1, (const schar*)sptr2, dptr, total, cmpop);
1683 compare_((const ushort*)sptr1, (const ushort*)sptr2, dptr, total, cmpop);
1686 compare_((const short*)sptr1, (const short*)sptr2, dptr, total, cmpop);
1689 compare_((const int*)sptr1, (const int*)sptr2, dptr, total, cmpop);
1692 compare_((const float*)sptr1, (const float*)sptr2, dptr, total, cmpop);
1695 compare_((const double*)sptr1, (const double*)sptr2, dptr, total, cmpop);
1698 CV_Error(Error::StsUnsupportedFormat, "");
1703 void compare(const Mat& src, double value, Mat& dst, int cmpop)
1705 CV_Assert( src.channels() == 1 );
1706 dst.create( src.dims, &src.size[0], CV_8U );
1707 const Mat *arrays[]={&src, &dst, 0};
1710 NAryMatIterator it(arrays, planes);
1711 size_t total = planes[0].total();
1712 size_t i, nplanes = it.nplanes;
1713 int depth = src.depth();
1714 int ivalue = saturate_cast<int>(value);
1716 for( i = 0; i < nplanes; i++, ++it )
1718 const uchar* sptr = planes[0].ptr();
1719 uchar* dptr = planes[1].ptr();
1724 compareS_((const uchar*)sptr, ivalue, dptr, total, cmpop);
1727 compareS_((const schar*)sptr, ivalue, dptr, total, cmpop);
1730 compareS_((const ushort*)sptr, ivalue, dptr, total, cmpop);
1733 compareS_((const short*)sptr, ivalue, dptr, total, cmpop);
1736 compareS_((const int*)sptr, ivalue, dptr, total, cmpop);
1739 compareS_((const float*)sptr, value, dptr, total, cmpop);
1742 compareS_((const double*)sptr, value, dptr, total, cmpop);
1745 CV_Error(Error::StsUnsupportedFormat, "");
1751 template<typename _Tp> double
1752 cmpUlpsInt_(const _Tp* src1, const _Tp* src2, size_t total, int imaxdiff,
1753 size_t startidx, size_t& idx)
1756 int realmaxdiff = 0;
1757 for( i = 0; i < total; i++ )
1759 int diff = std::abs(src1[i] - src2[i]);
1760 if( realmaxdiff < diff )
1763 if( diff > imaxdiff && idx == 0 )
1771 template<> double cmpUlpsInt_<int>(const int* src1, const int* src2,
1772 size_t total, int imaxdiff,
1773 size_t startidx, size_t& idx)
1776 double realmaxdiff = 0;
1777 for( i = 0; i < total; i++ )
1779 double diff = fabs((double)src1[i] - (double)src2[i]);
1780 if( realmaxdiff < diff )
1783 if( diff > imaxdiff && idx == 0 )
1792 cmpUlpsFlt_(const int* src1, const int* src2, size_t total, int imaxdiff, size_t startidx, size_t& idx)
1794 const int C = 0x7fffffff;
1795 int realmaxdiff = 0;
1797 for( i = 0; i < total; i++ )
1799 int a = src1[i], b = src2[i];
1802 int diff = std::abs(a - b);
1803 if( realmaxdiff < diff )
1806 if( diff > imaxdiff && idx == 0 )
1815 cmpUlpsFlt_(const int64* src1, const int64* src2, size_t total, int imaxdiff, size_t startidx, size_t& idx)
1817 const int64 C = CV_BIG_INT(0x7fffffffffffffff);
1818 double realmaxdiff = 0;
1820 for( i = 0; i < total; i++ )
1822 int64 a = src1[i], b = src2[i];
1825 double diff = fabs((double)a - (double)b);
1826 if( realmaxdiff < diff )
1829 if( diff > imaxdiff && idx == 0 )
1836 bool cmpUlps(const Mat& src1, const Mat& src2, int imaxDiff, double* _realmaxdiff, vector<int>* loc)
1838 CV_Assert( src1.type() == src2.type() && src1.size == src2.size );
1839 const Mat *arrays[]={&src1, &src2, 0};
1841 NAryMatIterator it(arrays, planes);
1842 size_t total = planes[0].total()*planes[0].channels();
1843 size_t i, nplanes = it.nplanes;
1844 int depth = src1.depth();
1845 size_t startidx = 1, idx = 0;
1849 for( i = 0; i < nplanes; i++, ++it, startidx += total )
1851 const uchar* sptr1 = planes[0].ptr();
1852 const uchar* sptr2 = planes[1].ptr();
1853 double realmaxdiff = 0;
1858 realmaxdiff = cmpUlpsInt_((const uchar*)sptr1, (const uchar*)sptr2, total, imaxDiff, startidx, idx);
1861 realmaxdiff = cmpUlpsInt_((const schar*)sptr1, (const schar*)sptr2, total, imaxDiff, startidx, idx);
1864 realmaxdiff = cmpUlpsInt_((const ushort*)sptr1, (const ushort*)sptr2, total, imaxDiff, startidx, idx);
1867 realmaxdiff = cmpUlpsInt_((const short*)sptr1, (const short*)sptr2, total, imaxDiff, startidx, idx);
1870 realmaxdiff = cmpUlpsInt_((const int*)sptr1, (const int*)sptr2, total, imaxDiff, startidx, idx);
1873 realmaxdiff = cmpUlpsFlt_((const int*)sptr1, (const int*)sptr2, total, imaxDiff, startidx, idx);
1876 realmaxdiff = cmpUlpsFlt_((const int64*)sptr1, (const int64*)sptr2, total, imaxDiff, startidx, idx);
1879 CV_Error(Error::StsUnsupportedFormat, "");
1883 *_realmaxdiff = std::max(*_realmaxdiff, realmaxdiff);
1886 setpos(src1, *loc, idx);
1891 template<typename _Tp> static void
1892 checkInt_(const _Tp* a, size_t total, int imin, int imax, size_t startidx, size_t& idx)
1894 for( size_t i = 0; i < total; i++ )
1897 if( val < imin || val > imax )
1906 template<typename _Tp> static void
1907 checkFlt_(const _Tp* a, size_t total, double fmin, double fmax, size_t startidx, size_t& idx)
1909 for( size_t i = 0; i < total; i++ )
1912 if( cvIsNaN(val) || cvIsInf(val) || val < fmin || val > fmax )
1921 // checks that the array does not have NaNs and/or Infs and all the elements are
1922 // within [min_val,max_val). idx is the index of the first "bad" element.
1923 int check( const Mat& a, double fmin, double fmax, vector<int>* _idx )
1925 const Mat *arrays[]={&a, 0};
1927 NAryMatIterator it(arrays, &plane);
1928 size_t total = plane.total()*plane.channels();
1929 size_t i, nplanes = it.nplanes;
1930 int depth = a.depth();
1931 size_t startidx = 1, idx = 0;
1932 int imin = 0, imax = 0;
1934 if( depth <= CV_32S )
1936 imin = cvCeil(fmin);
1937 imax = cvFloor(fmax);
1940 for( i = 0; i < nplanes; i++, ++it, startidx += total )
1942 const uchar* aptr = plane.ptr();
1947 checkInt_((const uchar*)aptr, total, imin, imax, startidx, idx);
1950 checkInt_((const schar*)aptr, total, imin, imax, startidx, idx);
1953 checkInt_((const ushort*)aptr, total, imin, imax, startidx, idx);
1956 checkInt_((const short*)aptr, total, imin, imax, startidx, idx);
1959 checkInt_((const int*)aptr, total, imin, imax, startidx, idx);
1962 checkFlt_((const float*)aptr, total, fmin, fmax, startidx, idx);
1965 checkFlt_((const double*)aptr, total, fmin, fmax, startidx, idx);
1968 CV_Error(Error::StsUnsupportedFormat, "");
1975 if(idx != 0 && _idx)
1976 setpos(a, *_idx, idx);
1977 return idx == 0 ? 0 : -1;
1980 #define CMP_EPS_OK 0
1981 #define CMP_EPS_BIG_DIFF -1
1982 #define CMP_EPS_INVALID_TEST_DATA -2 // there is NaN or Inf value in test data
1983 #define CMP_EPS_INVALID_REF_DATA -3 // there is NaN or Inf value in reference data
1985 // compares two arrays. max_diff is the maximum actual difference,
1986 // success_err_level is maximum allowed difference, idx is the index of the first
1987 // element for which difference is >success_err_level
1988 // (or index of element with the maximum difference)
1989 int cmpEps( const Mat& arr, const Mat& refarr, double* _realmaxdiff,
1990 double success_err_level, vector<int>* _idx,
1991 bool element_wise_relative_error )
1993 CV_Assert( arr.type() == refarr.type() && arr.size == refarr.size );
1995 int ilevel = refarr.depth() <= CV_32S ? cvFloor(success_err_level) : 0;
1996 int result = CMP_EPS_OK;
1998 const Mat *arrays[]={&arr, &refarr, 0};
2000 NAryMatIterator it(arrays, planes);
2001 size_t total = planes[0].total()*planes[0].channels(), j = total;
2002 size_t i, nplanes = it.nplanes;
2003 int depth = arr.depth();
2004 size_t startidx = 1, idx = 0;
2005 double realmaxdiff = 0, maxval = 0;
2010 if( refarr.depth() >= CV_32F && !element_wise_relative_error )
2012 maxval = cvtest::norm( refarr, NORM_INF );
2013 maxval = MAX(maxval, 1.);
2016 for( i = 0; i < nplanes; i++, ++it, startidx += total )
2018 const uchar* sptr1 = planes[0].ptr();
2019 const uchar* sptr2 = planes[1].ptr();
2024 realmaxdiff = cmpUlpsInt_((const uchar*)sptr1, (const uchar*)sptr2, total, ilevel, startidx, idx);
2027 realmaxdiff = cmpUlpsInt_((const schar*)sptr1, (const schar*)sptr2, total, ilevel, startidx, idx);
2030 realmaxdiff = cmpUlpsInt_((const ushort*)sptr1, (const ushort*)sptr2, total, ilevel, startidx, idx);
2033 realmaxdiff = cmpUlpsInt_((const short*)sptr1, (const short*)sptr2, total, ilevel, startidx, idx);
2036 realmaxdiff = cmpUlpsInt_((const int*)sptr1, (const int*)sptr2, total, ilevel, startidx, idx);
2039 for( j = 0; j < total; j++ )
2041 double a_val = ((float*)sptr1)[j];
2042 double b_val = ((float*)sptr2)[j];
2044 if( ((int*)sptr1)[j] == ((int*)sptr2)[j] )
2046 if( cvIsNaN(a_val) || cvIsInf(a_val) )
2048 result = CMP_EPS_INVALID_TEST_DATA;
2052 if( cvIsNaN(b_val) || cvIsInf(b_val) )
2054 result = CMP_EPS_INVALID_REF_DATA;
2058 a_val = fabs(a_val - b_val);
2059 threshold = element_wise_relative_error ? fabs(b_val) + 1 : maxval;
2060 if( a_val > threshold*success_err_level )
2062 realmaxdiff = a_val/threshold;
2070 for( j = 0; j < total; j++ )
2072 double a_val = ((double*)sptr1)[j];
2073 double b_val = ((double*)sptr2)[j];
2075 if( ((int64*)sptr1)[j] == ((int64*)sptr2)[j] )
2077 if( cvIsNaN(a_val) || cvIsInf(a_val) )
2079 result = CMP_EPS_INVALID_TEST_DATA;
2083 if( cvIsNaN(b_val) || cvIsInf(b_val) )
2085 result = CMP_EPS_INVALID_REF_DATA;
2089 a_val = fabs(a_val - b_val);
2090 threshold = element_wise_relative_error ? fabs(b_val) + 1 : maxval;
2091 if( a_val > threshold*success_err_level )
2093 realmaxdiff = a_val/threshold;
2101 return CMP_EPS_BIG_DIFF;
2104 *_realmaxdiff = MAX(*_realmaxdiff, realmaxdiff);
2109 if( result == 0 && idx != 0 )
2110 result = CMP_EPS_BIG_DIFF;
2112 if( result < -1 && _realmaxdiff )
2113 *_realmaxdiff = exp(1000.);
2115 setpos(arr, *_idx, idx);
2121 int cmpEps2( TS* ts, const Mat& a, const Mat& b, double success_err_level,
2122 bool element_wise_relative_error, const char* desc )
2127 int code = cmpEps( a, b, &diff, success_err_level, &idx, element_wise_relative_error );
2131 case CMP_EPS_BIG_DIFF:
2132 sprintf( msg, "%s: Too big difference (=%g > %g)", desc, diff, success_err_level );
2133 code = TS::FAIL_BAD_ACCURACY;
2135 case CMP_EPS_INVALID_TEST_DATA:
2136 sprintf( msg, "%s: Invalid output", desc );
2137 code = TS::FAIL_INVALID_OUTPUT;
2139 case CMP_EPS_INVALID_REF_DATA:
2140 sprintf( msg, "%s: Invalid reference output", desc );
2141 code = TS::FAIL_INVALID_OUTPUT;
2149 if( a.total() == 1 )
2151 ts->printf( TS::LOG, "%s\n", msg );
2153 else if( a.dims == 2 && (a.rows == 1 || a.cols == 1) )
2155 ts->printf( TS::LOG, "%s at element %d\n", msg, idx[0] + idx[1] );
2159 string idxstr = vec2str(", ", &idx[0], idx.size());
2160 ts->printf( TS::LOG, "%s at (%s)\n", msg, idxstr.c_str() );
2168 int cmpEps2_64f( TS* ts, const double* val, const double* refval, int len,
2169 double eps, const char* param_name )
2171 Mat _val(1, len, CV_64F, (void*)val);
2172 Mat _refval(1, len, CV_64F, (void*)refval);
2174 return cmpEps2( ts, _val, _refval, eps, true, param_name );
2178 template<typename _Tp> static void
2179 GEMM_(const _Tp* a_data0, int a_step, int a_delta,
2180 const _Tp* b_data0, int b_step, int b_delta,
2181 const _Tp* c_data0, int c_step, int c_delta,
2182 _Tp* d_data, int d_step,
2183 int d_rows, int d_cols, int a_cols, int cn,
2184 double alpha, double beta)
2186 for( int i = 0; i < d_rows; i++, d_data += d_step, c_data0 += c_step, a_data0 += a_step )
2188 for( int j = 0; j < d_cols; j++ )
2190 const _Tp* a_data = a_data0;
2191 const _Tp* b_data = b_data0 + j*b_delta;
2192 const _Tp* c_data = c_data0 + j*c_delta;
2197 for( int k = 0; k < a_cols; k++ )
2199 s += ((double)a_data[0])*b_data[0];
2203 d_data[j] = (_Tp)(s*alpha + (c_data ? c_data[0]*beta : 0));
2207 double s_re = 0, s_im = 0;
2209 for( int k = 0; k < a_cols; k++ )
2211 s_re += ((double)a_data[0])*b_data[0] - ((double)a_data[1])*b_data[1];
2212 s_im += ((double)a_data[0])*b_data[1] + ((double)a_data[1])*b_data[0];
2222 s_re += c_data[0]*beta;
2223 s_im += c_data[1]*beta;
2226 d_data[j*2] = (_Tp)s_re;
2227 d_data[j*2+1] = (_Tp)s_im;
2234 void gemm( const Mat& _a, const Mat& _b, double alpha,
2235 const Mat& _c, double beta, Mat& d, int flags )
2237 Mat a = _a, b = _b, c = _c;
2239 if( a.data == d.data )
2242 if( b.data == d.data )
2245 if( !c.empty() && c.data == d.data && (flags & cv::GEMM_3_T) )
2248 int a_rows = a.rows, a_cols = a.cols, b_rows = b.rows, b_cols = b.cols;
2249 int cn = a.channels();
2250 int a_step = (int)a.step1(), a_delta = cn;
2251 int b_step = (int)b.step1(), b_delta = cn;
2252 int c_rows = 0, c_cols = 0, c_step = 0, c_delta = 0;
2254 CV_Assert( a.type() == b.type() && a.dims == 2 && b.dims == 2 && cn <= 2 );
2256 if( flags & cv::GEMM_1_T )
2258 std::swap( a_rows, a_cols );
2259 std::swap( a_step, a_delta );
2262 if( flags & cv::GEMM_2_T )
2264 std::swap( b_rows, b_cols );
2265 std::swap( b_step, b_delta );
2272 c_step = (int)c.step1();
2275 if( flags & cv::GEMM_3_T )
2277 std::swap( c_rows, c_cols );
2278 std::swap( c_step, c_delta );
2281 CV_Assert( c.dims == 2 && c.type() == a.type() && c_rows == a_rows && c_cols == b_cols );
2284 d.create(a_rows, b_cols, a.type());
2286 if( a.depth() == CV_32F )
2287 GEMM_(a.ptr<float>(), a_step, a_delta, b.ptr<float>(), b_step, b_delta,
2288 !c.empty() ? c.ptr<float>() : 0, c_step, c_delta, d.ptr<float>(),
2289 (int)d.step1(), a_rows, b_cols, a_cols, cn, alpha, beta );
2291 GEMM_(a.ptr<double>(), a_step, a_delta, b.ptr<double>(), b_step, b_delta,
2292 !c.empty() ? c.ptr<double>() : 0, c_step, c_delta, d.ptr<double>(),
2293 (int)d.step1(), a_rows, b_cols, a_cols, cn, alpha, beta );
2297 template<typename _Tp> static void
2298 transform_(const _Tp* sptr, _Tp* dptr, size_t total, int scn, int dcn, const double* mat)
2300 for( size_t i = 0; i < total; i++, sptr += scn, dptr += dcn )
2302 for( int j = 0; j < dcn; j++ )
2304 double s = mat[j*(scn + 1) + scn];
2305 for( int k = 0; k < scn; k++ )
2306 s += mat[j*(scn + 1) + k]*sptr[k];
2307 dptr[j] = saturate_cast<_Tp>(s);
2313 void transform( const Mat& src, Mat& dst, const Mat& transmat, const Mat& _shift )
2317 int scn = src.channels();
2318 int dcn = dst.channels();
2319 int depth = src.depth();
2320 int mattype = transmat.depth();
2321 Mat shift = _shift.reshape(1, 0);
2322 bool haveShift = !shift.empty();
2324 CV_Assert( scn <= 4 && dcn <= 4 &&
2325 (mattype == CV_32F || mattype == CV_64F) &&
2326 (!haveShift || (shift.type() == mattype && (shift.rows == 1 || shift.cols == 1))) );
2328 // prepare cn x (cn + 1) transform matrix
2329 if( mattype == CV_32F )
2331 for( int i = 0; i < transmat.rows; i++ )
2333 mat[i*(scn+1)+scn] = 0.;
2334 for( int j = 0; j < transmat.cols; j++ )
2335 mat[i*(scn+1)+j] = transmat.at<float>(i,j);
2337 mat[i*(scn+1)+scn] = shift.at<float>(i);
2342 for( int i = 0; i < transmat.rows; i++ )
2344 mat[i*(scn+1)+scn] = 0.;
2345 for( int j = 0; j < transmat.cols; j++ )
2346 mat[i*(scn+1)+j] = transmat.at<double>(i,j);
2348 mat[i*(scn+1)+scn] = shift.at<double>(i);
2352 const Mat *arrays[]={&src, &dst, 0};
2354 NAryMatIterator it(arrays, planes);
2355 size_t total = planes[0].total();
2356 size_t i, nplanes = it.nplanes;
2358 for( i = 0; i < nplanes; i++, ++it )
2360 const uchar* sptr = planes[0].ptr();
2361 uchar* dptr = planes[1].ptr();
2366 transform_((const uchar*)sptr, (uchar*)dptr, total, scn, dcn, mat);
2369 transform_((const schar*)sptr, (schar*)dptr, total, scn, dcn, mat);
2372 transform_((const ushort*)sptr, (ushort*)dptr, total, scn, dcn, mat);
2375 transform_((const short*)sptr, (short*)dptr, total, scn, dcn, mat);
2378 transform_((const int*)sptr, (int*)dptr, total, scn, dcn, mat);
2381 transform_((const float*)sptr, (float*)dptr, total, scn, dcn, mat);
2384 transform_((const double*)sptr, (double*)dptr, total, scn, dcn, mat);
2387 CV_Error(Error::StsUnsupportedFormat, "");
2392 template<typename _Tp> static void
2393 minmax_(const _Tp* src1, const _Tp* src2, _Tp* dst, size_t total, char op)
2396 for( size_t i = 0; i < total; i++ )
2397 dst[i] = std::max(src1[i], src2[i]);
2399 for( size_t i = 0; i < total; i++ )
2400 dst[i] = std::min(src1[i], src2[i]);
2403 static void minmax(const Mat& src1, const Mat& src2, Mat& dst, char op)
2405 dst.create(src1.dims, src1.size, src1.type());
2406 CV_Assert( src1.type() == src2.type() && src1.size == src2.size );
2407 const Mat *arrays[]={&src1, &src2, &dst, 0};
2410 NAryMatIterator it(arrays, planes);
2411 size_t total = planes[0].total()*planes[0].channels();
2412 size_t i, nplanes = it.nplanes, depth = src1.depth();
2414 for( i = 0; i < nplanes; i++, ++it )
2416 const uchar* sptr1 = planes[0].ptr();
2417 const uchar* sptr2 = planes[1].ptr();
2418 uchar* dptr = planes[2].ptr();
2423 minmax_((const uchar*)sptr1, (const uchar*)sptr2, (uchar*)dptr, total, op);
2426 minmax_((const schar*)sptr1, (const schar*)sptr2, (schar*)dptr, total, op);
2429 minmax_((const ushort*)sptr1, (const ushort*)sptr2, (ushort*)dptr, total, op);
2432 minmax_((const short*)sptr1, (const short*)sptr2, (short*)dptr, total, op);
2435 minmax_((const int*)sptr1, (const int*)sptr2, (int*)dptr, total, op);
2438 minmax_((const float*)sptr1, (const float*)sptr2, (float*)dptr, total, op);
2441 minmax_((const double*)sptr1, (const double*)sptr2, (double*)dptr, total, op);
2444 CV_Error(Error::StsUnsupportedFormat, "");
2450 void min(const Mat& src1, const Mat& src2, Mat& dst)
2452 minmax( src1, src2, dst, 'm' );
2455 void max(const Mat& src1, const Mat& src2, Mat& dst)
2457 minmax( src1, src2, dst, 'M' );
2461 template<typename _Tp> static void
2462 minmax_(const _Tp* src1, _Tp val, _Tp* dst, size_t total, char op)
2465 for( size_t i = 0; i < total; i++ )
2466 dst[i] = std::max(src1[i], val);
2468 for( size_t i = 0; i < total; i++ )
2469 dst[i] = std::min(src1[i], val);
2472 static void minmax(const Mat& src1, double val, Mat& dst, char op)
2474 dst.create(src1.dims, src1.size, src1.type());
2475 const Mat *arrays[]={&src1, &dst, 0};
2478 NAryMatIterator it(arrays, planes);
2479 size_t total = planes[0].total()*planes[0].channels();
2480 size_t i, nplanes = it.nplanes, depth = src1.depth();
2481 int ival = saturate_cast<int>(val);
2483 for( i = 0; i < nplanes; i++, ++it )
2485 const uchar* sptr1 = planes[0].ptr();
2486 uchar* dptr = planes[1].ptr();
2491 minmax_((const uchar*)sptr1, saturate_cast<uchar>(ival), (uchar*)dptr, total, op);
2494 minmax_((const schar*)sptr1, saturate_cast<schar>(ival), (schar*)dptr, total, op);
2497 minmax_((const ushort*)sptr1, saturate_cast<ushort>(ival), (ushort*)dptr, total, op);
2500 minmax_((const short*)sptr1, saturate_cast<short>(ival), (short*)dptr, total, op);
2503 minmax_((const int*)sptr1, saturate_cast<int>(ival), (int*)dptr, total, op);
2506 minmax_((const float*)sptr1, saturate_cast<float>(val), (float*)dptr, total, op);
2509 minmax_((const double*)sptr1, saturate_cast<double>(val), (double*)dptr, total, op);
2512 CV_Error(Error::StsUnsupportedFormat, "");
2518 void min(const Mat& src1, double val, Mat& dst)
2520 minmax( src1, val, dst, 'm' );
2523 void max(const Mat& src1, double val, Mat& dst)
2525 minmax( src1, val, dst, 'M' );
2529 template<typename _Tp> static void
2530 muldiv_(const _Tp* src1, const _Tp* src2, _Tp* dst, size_t total, double scale, char op)
2533 for( size_t i = 0; i < total; i++ )
2534 dst[i] = saturate_cast<_Tp>((scale*src1[i])*src2[i]);
2536 for( size_t i = 0; i < total; i++ )
2537 dst[i] = src2[i] ? saturate_cast<_Tp>((scale*src1[i])/src2[i]) : 0;
2539 for( size_t i = 0; i < total; i++ )
2540 dst[i] = src2[i] ? saturate_cast<_Tp>(scale/src2[i]) : 0;
2543 static void muldiv(const Mat& src1, const Mat& src2, Mat& dst, double scale, char op)
2545 dst.create(src2.dims, src2.size, src2.type());
2546 CV_Assert( src1.empty() || (src1.type() == src2.type() && src1.size == src2.size) );
2547 const Mat *arrays[]={&src1, &src2, &dst, 0};
2550 NAryMatIterator it(arrays, planes);
2551 size_t total = planes[1].total()*planes[1].channels();
2552 size_t i, nplanes = it.nplanes, depth = src2.depth();
2554 for( i = 0; i < nplanes; i++, ++it )
2556 const uchar* sptr1 = planes[0].ptr();
2557 const uchar* sptr2 = planes[1].ptr();
2558 uchar* dptr = planes[2].ptr();
2563 muldiv_((const uchar*)sptr1, (const uchar*)sptr2, (uchar*)dptr, total, scale, op);
2566 muldiv_((const schar*)sptr1, (const schar*)sptr2, (schar*)dptr, total, scale, op);
2569 muldiv_((const ushort*)sptr1, (const ushort*)sptr2, (ushort*)dptr, total, scale, op);
2572 muldiv_((const short*)sptr1, (const short*)sptr2, (short*)dptr, total, scale, op);
2575 muldiv_((const int*)sptr1, (const int*)sptr2, (int*)dptr, total, scale, op);
2578 muldiv_((const float*)sptr1, (const float*)sptr2, (float*)dptr, total, scale, op);
2581 muldiv_((const double*)sptr1, (const double*)sptr2, (double*)dptr, total, scale, op);
2584 CV_Error(Error::StsUnsupportedFormat, "");
2590 void multiply(const Mat& src1, const Mat& src2, Mat& dst, double scale)
2592 muldiv( src1, src2, dst, scale, '*' );
2595 void divide(const Mat& src1, const Mat& src2, Mat& dst, double scale)
2597 muldiv( src1, src2, dst, scale, '/' );
2601 template<typename _Tp> static void
2602 mean_(const _Tp* src, const uchar* mask, size_t total, int cn, Scalar& sum, int& nz)
2608 for( size_t i = 0; i < total; i += cn )
2610 for( int c = 0; c < cn; c++ )
2611 sum[c] += src[i + c];
2616 for( size_t i = 0; i < total; i++ )
2620 for( int c = 0; c < cn; c++ )
2621 sum[c] += src[i*cn + c];
2626 Scalar mean(const Mat& src, const Mat& mask)
2628 CV_Assert(mask.empty() || (mask.type() == CV_8U && mask.size == src.size));
2632 const Mat *arrays[]={&src, &mask, 0};
2635 NAryMatIterator it(arrays, planes);
2636 size_t total = planes[0].total();
2637 size_t i, nplanes = it.nplanes;
2638 int depth = src.depth(), cn = src.channels();
2640 for( i = 0; i < nplanes; i++, ++it )
2642 const uchar* sptr = planes[0].ptr();
2643 const uchar* mptr = planes[1].ptr();
2648 mean_((const uchar*)sptr, mptr, total, cn, sum, nz);
2651 mean_((const schar*)sptr, mptr, total, cn, sum, nz);
2654 mean_((const ushort*)sptr, mptr, total, cn, sum, nz);
2657 mean_((const short*)sptr, mptr, total, cn, sum, nz);
2660 mean_((const int*)sptr, mptr, total, cn, sum, nz);
2663 mean_((const float*)sptr, mptr, total, cn, sum, nz);
2666 mean_((const double*)sptr, mptr, total, cn, sum, nz);
2669 CV_Error(Error::StsUnsupportedFormat, "");
2673 return sum * (1./std::max(nz, 1));
2677 void patchZeros( Mat& mat, double level )
2679 int j, ncols = mat.cols * mat.channels();
2680 int depth = mat.depth();
2681 CV_Assert( depth == CV_32F || depth == CV_64F );
2683 for( int i = 0; i < mat.rows; i++ )
2685 if( depth == CV_32F )
2687 float* data = mat.ptr<float>(i);
2688 for( j = 0; j < ncols; j++ )
2689 if( fabs(data[j]) < level )
2694 double* data = mat.ptr<double>(i);
2695 for( j = 0; j < ncols; j++ )
2696 if( fabs(data[j]) < level )
2703 static void calcSobelKernel1D( int order, int _aperture_size, int size, vector<int>& kernel )
2705 int i, j, oldval, newval;
2706 kernel.resize(size + 1);
2708 if( _aperture_size < 0 )
2710 static const int scharr[] = { 3, 10, 3, -1, 0, 1 };
2711 assert( size == 3 );
2712 for( i = 0; i < size; i++ )
2713 kernel[i] = scharr[order*3 + i];
2717 for( i = 1; i <= size; i++ )
2721 for( i = 0; i < size - order - 1; i++ )
2724 for( j = 1; j <= size; j++ )
2726 newval = kernel[j] + kernel[j-1];
2727 kernel[j-1] = oldval;
2732 for( i = 0; i < order; i++ )
2734 oldval = -kernel[0];
2735 for( j = 1; j <= size; j++ )
2737 newval = kernel[j-1] - kernel[j];
2738 kernel[j-1] = oldval;
2745 Mat calcSobelKernel2D( int dx, int dy, int _aperture_size, int origin )
2747 CV_Assert( (_aperture_size == -1 || (_aperture_size >= 1 && _aperture_size % 2 == 1)) &&
2748 dx >= 0 && dy >= 0 && dx + dy <= 3 );
2749 Size ksize = _aperture_size == -1 ? Size(3,3) : _aperture_size > 1 ?
2750 Size(_aperture_size, _aperture_size) : dx > 0 ? Size(3, 1) : Size(1, 3);
2752 Mat kernel(ksize, CV_32F);
2755 calcSobelKernel1D( dx, _aperture_size, ksize.width, kx );
2756 calcSobelKernel1D( dy, _aperture_size, ksize.height, ky );
2758 for( int i = 0; i < kernel.rows; i++ )
2760 float ay = (float)ky[i]*(origin && (dy & 1) ? -1 : 1);
2761 for( int j = 0; j < kernel.cols; j++ )
2762 kernel.at<float>(i, j) = kx[j]*ay;
2769 Mat calcLaplaceKernel2D( int aperture_size )
2771 int ksize = aperture_size == 1 ? 3 : aperture_size;
2772 Mat kernel(ksize, ksize, CV_32F);
2776 calcSobelKernel1D( 2, aperture_size, ksize, kx );
2777 if( aperture_size > 1 )
2778 calcSobelKernel1D( 0, aperture_size, ksize, ky );
2782 ky[0] = ky[2] = 0; ky[1] = 1;
2785 for( int i = 0; i < ksize; i++ )
2786 for( int j = 0; j < ksize; j++ )
2787 kernel.at<float>(i, j) = (float)(kx[j]*ky[i] + kx[i]*ky[j]);
2793 void initUndistortMap( const Mat& _a0, const Mat& _k0, Size sz, Mat& _mapx, Mat& _mapy )
2795 _mapx.create(sz, CV_32F);
2796 _mapy.create(sz, CV_32F);
2798 double a[9], k[5]={0,0,0,0,0};
2799 Mat _a(3, 3, CV_64F, a);
2800 Mat _k(_k0.rows,_k0.cols, CV_MAKETYPE(CV_64F,_k0.channels()),k);
2801 double fx, fy, cx, cy, ifx, ify, cxn, cyn;
2803 _a0.convertTo(_a, CV_64F);
2804 _k0.convertTo(_k, CV_64F);
2805 fx = a[0]; fy = a[4]; cx = a[2]; cy = a[5];
2806 ifx = 1./fx; ify = 1./fy;
2810 for( int v = 0; v < sz.height; v++ )
2812 for( int u = 0; u < sz.width; u++ )
2814 double x = (u - cxn)*ifx;
2815 double y = (v - cyn)*ify;
2816 double x2 = x*x, y2 = y*y;
2817 double r2 = x2 + y2;
2818 double cdist = 1 + (k[0] + (k[1] + k[4]*r2)*r2)*r2;
2819 double x1 = x*cdist + k[2]*2*x*y + k[3]*(r2 + 2*x2);
2820 double y1 = y*cdist + k[3]*2*x*y + k[2]*(r2 + 2*y2);
2822 _mapy.at<float>(v, u) = (float)(y1*fy + cy);
2823 _mapx.at<float>(v, u) = (float)(x1*fx + cx);
2829 std::ostream& operator << (std::ostream& out, const MatInfo& m)
2831 if( !m.m || m.m->empty() )
2835 static const char* depthstr[] = {"8u", "8s", "16u", "16s", "32s", "32f", "64f", "?"};
2836 out << depthstr[m.m->depth()] << "C" << m.m->channels() << " " << m.m->dims << "-dim (";
2837 for( int i = 0; i < m.m->dims; i++ )
2838 out << m.m->size[i] << (i < m.m->dims-1 ? " x " : ")");
2844 static Mat getSubArray(const Mat& m, int border, vector<int>& ofs0, vector<int>& ofs)
2846 ofs.resize(ofs0.size());
2849 std::copy(ofs0.begin(), ofs0.end(), ofs.begin());
2853 CV_Assert(d == (int)ofs.size());
2855 for( i = 0; i < d; i++ )
2857 r[i].start = std::max(0, ofs0[i] - border);
2858 r[i].end = std::min(ofs0[i] + 1 + border, m.size[i]);
2859 ofs[i] = std::min(ofs0[i], border);
2864 template<typename _Tp, typename _WTp> static void
2865 writeElems(std::ostream& out, const void* data, int nelems, int starpos)
2867 for(int i = 0; i < nelems; i++)
2871 out << (_WTp)((_Tp*)data)[i];
2874 out << (i+1 < nelems ? ", " : "");
2879 static void writeElems(std::ostream& out, const void* data, int nelems, int depth, int starpos)
2882 writeElems<uchar, int>(out, data, nelems, starpos);
2883 else if(depth == CV_8S)
2884 writeElems<schar, int>(out, data, nelems, starpos);
2885 else if(depth == CV_16U)
2886 writeElems<ushort, int>(out, data, nelems, starpos);
2887 else if(depth == CV_16S)
2888 writeElems<short, int>(out, data, nelems, starpos);
2889 else if(depth == CV_32S)
2890 writeElems<int, int>(out, data, nelems, starpos);
2891 else if(depth == CV_32F)
2893 std::streamsize pp = out.precision();
2895 writeElems<float, float>(out, data, nelems, starpos);
2898 else if(depth == CV_64F)
2900 std::streamsize pp = out.precision();
2902 writeElems<double, double>(out, data, nelems, starpos);
2906 CV_Error(Error::StsUnsupportedFormat, "");
2912 MatPart(const Mat& _m, const vector<int>* _loc)
2913 : m(&_m), loc(_loc) {}
2915 const vector<int>* loc;
2918 static std::ostream& operator << (std::ostream& out, const MatPart& m)
2920 CV_Assert( !m.loc || ((int)m.loc->size() == m.m->dims && m.m->dims <= 2) );
2925 int i, depth = m.m->depth(), cn = m.m->channels(), width = m.m->cols*cn;
2926 for( i = 0; i < m.m->rows; i++ )
2928 writeElems(out, m.m->ptr(i), width, depth, i == (*m.loc)[0] ? (*m.loc)[1] : -1);
2929 out << (i < m.m->rows-1 ? ";\n" : "");
2935 MatComparator::MatComparator(double _maxdiff, int _context)
2936 : maxdiff(_maxdiff), realmaxdiff(DBL_MAX), context(_context) {}
2938 ::testing::AssertionResult
2939 MatComparator::operator()(const char* expr1, const char* expr2,
2940 const Mat& m1, const Mat& m2)
2942 if( m1.type() != m2.type() || m1.size != m2.size )
2943 return ::testing::AssertionFailure()
2944 << "The reference and the actual output arrays have different type or size:\n"
2945 << expr1 << " ~ " << MatInfo(m1) << "\n"
2946 << expr2 << " ~ " << MatInfo(m2) << "\n";
2948 //bool ok = cvtest::cmpUlps(m1, m2, maxdiff, &realmaxdiff, &loc0);
2949 int code = cmpEps( m1, m2, &realmaxdiff, maxdiff, &loc0, true);
2952 return ::testing::AssertionSuccess();
2954 Mat m[] = {m1.reshape(1,0), m2.reshape(1,0)};
2955 int dims = m[0].dims;
2957 int border = dims <= 2 ? context : 0;
2963 m1part = Mat(1, 1, m[0].depth(), m[0].ptr(&loc[0]));
2964 m2part = Mat(1, 1, m[1].depth(), m[1].ptr(&loc[0]));
2968 m1part = getSubArray(m[0], border, loc0, loc);
2969 m2part = getSubArray(m[1], border, loc0, loc);
2972 return ::testing::AssertionFailure()
2973 << "too big relative difference (" << realmaxdiff << " > "
2974 << maxdiff << ") between "
2975 << MatInfo(m1) << " '" << expr1 << "' and '" << expr2 << "' at " << Mat(loc0).t() << ".\n"
2976 << "- " << expr1 << ":\n" << MatPart(m1part, border > 0 ? &loc : 0) << ".\n"
2977 << "- " << expr2 << ":\n" << MatPart(m2part, border > 0 ? &loc : 0) << ".\n";
2980 void printVersionInfo(bool useStdOut)
2982 // Tell CTest not to discard any output
2983 if(useStdOut) std::cout << "CTEST_FULL_OUTPUT" << std::endl;
2985 ::testing::Test::RecordProperty("cv_version", CV_VERSION);
2986 if(useStdOut) std::cout << "OpenCV version: " << CV_VERSION << std::endl;
2988 std::string buildInfo( cv::getBuildInformation() );
2990 size_t pos1 = buildInfo.find("Version control");
2991 size_t pos2 = buildInfo.find('\n', pos1);
2992 if(pos1 != std::string::npos && pos2 != std::string::npos)
2994 size_t value_start = buildInfo.rfind(' ', pos2) + 1;
2995 std::string ver( buildInfo.substr(value_start, pos2 - value_start) );
2996 ::testing::Test::RecordProperty("cv_vcs_version", ver);
2997 if (useStdOut) std::cout << "OpenCV VCS version: " << ver << std::endl;
3000 pos1 = buildInfo.find("inner version");
3001 pos2 = buildInfo.find('\n', pos1);
3002 if(pos1 != std::string::npos && pos2 != std::string::npos)
3004 size_t value_start = buildInfo.rfind(' ', pos2) + 1;
3005 std::string ver( buildInfo.substr(value_start, pos2 - value_start) );
3006 ::testing::Test::RecordProperty("cv_inner_vcs_version", ver);
3007 if(useStdOut) std::cout << "Inner VCS version: " << ver << std::endl;
3010 const char * build_type =
3017 ::testing::Test::RecordProperty("cv_build_type", build_type);
3018 if (useStdOut) std::cout << "Build type: " << build_type << std::endl;
3020 const char* parallel_framework = currentParallelFramework();
3022 if (parallel_framework) {
3023 ::testing::Test::RecordProperty("cv_parallel_framework", parallel_framework);
3024 if (useStdOut) std::cout << "Parallel framework: " << parallel_framework << std::endl;
3027 std::string cpu_features;
3030 if (checkHardwareSupport(CV_CPU_POPCNT)) cpu_features += " popcnt";
3033 if (checkHardwareSupport(CV_CPU_MMX)) cpu_features += " mmx";
3036 if (checkHardwareSupport(CV_CPU_SSE)) cpu_features += " sse";
3039 if (checkHardwareSupport(CV_CPU_SSE2)) cpu_features += " sse2";
3042 if (checkHardwareSupport(CV_CPU_SSE3)) cpu_features += " sse3";
3045 if (checkHardwareSupport(CV_CPU_SSSE3)) cpu_features += " ssse3";
3048 if (checkHardwareSupport(CV_CPU_SSE4_1)) cpu_features += " sse4.1";
3051 if (checkHardwareSupport(CV_CPU_SSE4_2)) cpu_features += " sse4.2";
3054 if (checkHardwareSupport(CV_CPU_AVX)) cpu_features += " avx";
3057 if (checkHardwareSupport(CV_CPU_AVX2)) cpu_features += " avx2";
3060 if (checkHardwareSupport(CV_CPU_FMA3)) cpu_features += " fma3";
3063 if (checkHardwareSupport(CV_CPU_AVX_512F)) cpu_features += " avx-512f";
3066 if (checkHardwareSupport(CV_CPU_AVX_512BW)) cpu_features += " avx-512bw";
3069 if (checkHardwareSupport(CV_CPU_AVX_512CD)) cpu_features += " avx-512cd";
3072 if (checkHardwareSupport(CV_CPU_AVX_512DQ)) cpu_features += " avx-512dq";
3075 if (checkHardwareSupport(CV_CPU_AVX_512ER)) cpu_features += " avx-512er";
3077 #if CV_AVX_512IFMA512
3078 if (checkHardwareSupport(CV_CPU_AVX_512IFMA512)) cpu_features += " avx-512ifma512";
3081 if (checkHardwareSupport(CV_CPU_AVX_512PF)) cpu_features += " avx-512pf";
3084 if (checkHardwareSupport(CV_CPU_AVX_512VBMI)) cpu_features += " avx-512vbmi";
3087 if (checkHardwareSupport(CV_CPU_AVX_512VL)) cpu_features += " avx-512vl";
3090 if (checkHardwareSupport(CV_CPU_NEON)) cpu_features += " neon";
3093 if (checkHardwareSupport(CV_CPU_FP16)) cpu_features += " fp16";
3096 if (checkHardwareSupport(CV_CPU_VSX)) cpu_features += " VSX";
3099 cpu_features.erase(0, 1); // erase initial space
3101 ::testing::Test::RecordProperty("cv_cpu_features", cpu_features);
3102 if (useStdOut) std::cout << "CPU features: " << cpu_features << std::endl;
3104 #ifdef HAVE_TEGRA_OPTIMIZATION
3105 const char * tegra_optimization = tegra::useTegra() && tegra::isDeviceSupported() ? "enabled" : "disabled";
3106 ::testing::Test::RecordProperty("cv_tegra_optimization", tegra_optimization);
3107 if (useStdOut) std::cout << "Tegra optimization: " << tegra_optimization << std::endl;
3111 const char * ipp_optimization = cv::ipp::useIPP()? "enabled" : "disabled";
3112 ::testing::Test::RecordProperty("cv_ipp_optimization", ipp_optimization);
3113 if (useStdOut) std::cout << "Intel(R) IPP optimization: " << ipp_optimization << std::endl;
3115 cv::String ippVer = cv::ipp::getIppVersion();
3116 ::testing::Test::RecordProperty("cv_ipp_version", ippVer);
3117 if(useStdOut) std::cout << "Intel(R) IPP version: " << ippVer.c_str() << std::endl;
3123 void threshold( const Mat& _src, Mat& _dst,
3124 double thresh, double maxval, int thresh_type )
3127 int depth = _src.depth(), cn = _src.channels();
3128 int width_n = _src.cols*cn, height = _src.rows;
3129 int ithresh = cvFloor(thresh);
3130 int imaxval, ithresh2;
3132 if( depth == CV_8U )
3134 ithresh2 = saturate_cast<uchar>(ithresh);
3135 imaxval = saturate_cast<uchar>(maxval);
3137 else if( depth == CV_16S )
3139 ithresh2 = saturate_cast<short>(ithresh);
3140 imaxval = saturate_cast<short>(maxval);
3144 ithresh2 = cvRound(ithresh);
3145 imaxval = cvRound(maxval);
3148 assert( depth == CV_8U || depth == CV_16S || depth == CV_32F );
3150 switch( thresh_type )
3152 case CV_THRESH_BINARY:
3153 for( i = 0; i < height; i++ )
3155 if( depth == CV_8U )
3157 const uchar* src = _src.ptr<uchar>(i);
3158 uchar* dst = _dst.ptr<uchar>(i);
3159 for( j = 0; j < width_n; j++ )
3160 dst[j] = (uchar)(src[j] > ithresh ? imaxval : 0);
3162 else if( depth == CV_16S )
3164 const short* src = _src.ptr<short>(i);
3165 short* dst = _dst.ptr<short>(i);
3166 for( j = 0; j < width_n; j++ )
3167 dst[j] = (short)(src[j] > ithresh ? imaxval : 0);
3171 const float* src = _src.ptr<float>(i);
3172 float* dst = _dst.ptr<float>(i);
3173 for( j = 0; j < width_n; j++ )
3174 dst[j] = (float)((double)src[j] > thresh ? maxval : 0.f);
3178 case CV_THRESH_BINARY_INV:
3179 for( i = 0; i < height; i++ )
3181 if( depth == CV_8U )
3183 const uchar* src = _src.ptr<uchar>(i);
3184 uchar* dst = _dst.ptr<uchar>(i);
3185 for( j = 0; j < width_n; j++ )
3186 dst[j] = (uchar)(src[j] > ithresh ? 0 : imaxval);
3188 else if( depth == CV_16S )
3190 const short* src = _src.ptr<short>(i);
3191 short* dst = _dst.ptr<short>(i);
3192 for( j = 0; j < width_n; j++ )
3193 dst[j] = (short)(src[j] > ithresh ? 0 : imaxval);
3197 const float* src = _src.ptr<float>(i);
3198 float* dst = _dst.ptr<float>(i);
3199 for( j = 0; j < width_n; j++ )
3200 dst[j] = (float)((double)src[j] > thresh ? 0.f : maxval);
3204 case CV_THRESH_TRUNC:
3205 for( i = 0; i < height; i++ )
3207 if( depth == CV_8U )
3209 const uchar* src = _src.ptr<uchar>(i);
3210 uchar* dst = _dst.ptr<uchar>(i);
3211 for( j = 0; j < width_n; j++ )
3214 dst[j] = (uchar)(s > ithresh ? ithresh2 : s);
3217 else if( depth == CV_16S )
3219 const short* src = _src.ptr<short>(i);
3220 short* dst = _dst.ptr<short>(i);
3221 for( j = 0; j < width_n; j++ )
3224 dst[j] = (short)(s > ithresh ? ithresh2 : s);
3229 const float* src = _src.ptr<float>(i);
3230 float* dst = _dst.ptr<float>(i);
3231 for( j = 0; j < width_n; j++ )
3234 dst[j] = (float)(s > thresh ? thresh : s);
3239 case CV_THRESH_TOZERO:
3240 for( i = 0; i < height; i++ )
3242 if( depth == CV_8U )
3244 const uchar* src = _src.ptr<uchar>(i);
3245 uchar* dst = _dst.ptr<uchar>(i);
3246 for( j = 0; j < width_n; j++ )
3249 dst[j] = (uchar)(s > ithresh ? s : 0);
3252 else if( depth == CV_16S )
3254 const short* src = _src.ptr<short>(i);
3255 short* dst = _dst.ptr<short>(i);
3256 for( j = 0; j < width_n; j++ )
3259 dst[j] = (short)(s > ithresh ? s : 0);
3264 const float* src = _src.ptr<float>(i);
3265 float* dst = _dst.ptr<float>(i);
3266 for( j = 0; j < width_n; j++ )
3269 dst[j] = s > thresh ? s : 0.f;
3274 case CV_THRESH_TOZERO_INV:
3275 for( i = 0; i < height; i++ )
3277 if( depth == CV_8U )
3279 const uchar* src = _src.ptr<uchar>(i);
3280 uchar* dst = _dst.ptr<uchar>(i);
3281 for( j = 0; j < width_n; j++ )
3284 dst[j] = (uchar)(s > ithresh ? 0 : s);
3287 else if( depth == CV_16S )
3289 const short* src = _src.ptr<short>(i);
3290 short* dst = _dst.ptr<short>(i);
3291 for( j = 0; j < width_n; j++ )
3294 dst[j] = (short)(s > ithresh ? 0 : s);
3299 const float* src = _src.ptr<float>(i);
3300 float* dst = _dst.ptr<float>(i);
3301 for( j = 0; j < width_n; j++ )
3304 dst[j] = s > thresh ? 0.f : s;
3316 _minMaxIdx( const float* src, const uchar* mask, double* _minVal, double* _maxVal,
3317 size_t* _minIdx, size_t* _maxIdx, int len, size_t startIdx )
3319 double minVal = FLT_MAX, maxVal = -FLT_MAX;
3320 size_t minIdx = 0, maxIdx = 0;
3324 for( int i = 0; i < len; i++ )
3330 minIdx = startIdx + i;
3335 maxIdx = startIdx + i;
3341 for( int i = 0; i < len; i++ )
3344 if( mask[i] && val < minVal )
3347 minIdx = startIdx + i;
3349 if( mask[i] && val > maxVal )
3352 maxIdx = startIdx + i;
3368 void minMaxIdx( InputArray _img, double* minVal, double* maxVal,
3369 Point* minLoc, Point* maxLoc, InputArray _mask )
3371 Mat img = _img.getMat();
3372 Mat mask = _mask.getMat();
3373 CV_Assert(img.dims <= 2);
3375 _minMaxIdx((const float*)img.data, mask.data, minVal, maxVal, (size_t*)minLoc, (size_t*)maxLoc, (int)img.total(),1);
3377 std::swap(minLoc->x, minLoc->y);
3379 std::swap(maxLoc->x, maxLoc->y);