1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #include "precomp.hpp"
46 * This file includes the code, contributed by Simon Perreault
47 * (the function icvMedianBlur_8u_O1)
49 * Constant-time median filtering -- http://nomis80.org/ctmf.html
50 * Copyright (C) 2006 Simon Perreault
53 * Laboratoire de vision et systemes numeriques
54 * Pavillon Adrien-Pouliot
56 * Sainte-Foy, Quebec, Canada
59 * perreaul@gel.ulaval.ca
65 /****************************************************************************************\
67 \****************************************************************************************/
69 template<typename T, typename ST> struct RowSum : public BaseRowFilter
71 RowSum( int _ksize, int _anchor )
77 void operator()(const uchar* src, uchar* dst, int width, int cn)
79 const T* S = (const T*)src;
81 int i = 0, k, ksz_cn = ksize*cn;
83 width = (width - 1)*cn;
84 for( k = 0; k < cn; k++, S++, D++ )
87 for( i = 0; i < ksz_cn; i += cn )
90 for( i = 0; i < width; i += cn )
92 s += S[i + ksz_cn] - S[i];
100 template<typename ST, typename T> struct ColumnSum : public BaseColumnFilter
102 ColumnSum( int _ksize, int _anchor, double _scale )
110 void reset() { sumCount = 0; }
112 void operator()(const uchar** src, uchar* dst, int dststep, int count, int width)
116 bool haveScale = scale != 1;
117 double _scale = scale;
119 if( width != (int)sum.size() )
128 for( i = 0; i < width; i++ )
130 for( ; sumCount < ksize - 1; sumCount++, src++ )
132 const ST* Sp = (const ST*)src[0];
133 for( i = 0; i <= width - 2; i += 2 )
135 ST s0 = SUM[i] + Sp[i], s1 = SUM[i+1] + Sp[i+1];
136 SUM[i] = s0; SUM[i+1] = s1;
139 for( ; i < width; i++ )
145 CV_Assert( sumCount == ksize-1 );
149 for( ; count--; src++ )
151 const ST* Sp = (const ST*)src[0];
152 const ST* Sm = (const ST*)src[1-ksize];
156 for( i = 0; i <= width - 2; i += 2 )
158 ST s0 = SUM[i] + Sp[i], s1 = SUM[i+1] + Sp[i+1];
159 D[i] = saturate_cast<T>(s0*_scale);
160 D[i+1] = saturate_cast<T>(s1*_scale);
161 s0 -= Sm[i]; s1 -= Sm[i+1];
162 SUM[i] = s0; SUM[i+1] = s1;
165 for( ; i < width; i++ )
167 ST s0 = SUM[i] + Sp[i];
168 D[i] = saturate_cast<T>(s0*_scale);
174 for( i = 0; i <= width - 2; i += 2 )
176 ST s0 = SUM[i] + Sp[i], s1 = SUM[i+1] + Sp[i+1];
177 D[i] = saturate_cast<T>(s0);
178 D[i+1] = saturate_cast<T>(s1);
179 s0 -= Sm[i]; s1 -= Sm[i+1];
180 SUM[i] = s0; SUM[i+1] = s1;
183 for( ; i < width; i++ )
185 ST s0 = SUM[i] + Sp[i];
186 D[i] = saturate_cast<T>(s0);
202 cv::Ptr<cv::BaseRowFilter> cv::getRowSumFilter(int srcType, int sumType, int ksize, int anchor)
204 int sdepth = CV_MAT_DEPTH(srcType), ddepth = CV_MAT_DEPTH(sumType);
205 CV_Assert( CV_MAT_CN(sumType) == CV_MAT_CN(srcType) );
210 if( sdepth == CV_8U && ddepth == CV_32S )
211 return Ptr<BaseRowFilter>(new RowSum<uchar, int>(ksize, anchor));
212 if( sdepth == CV_8U && ddepth == CV_64F )
213 return Ptr<BaseRowFilter>(new RowSum<uchar, double>(ksize, anchor));
214 if( sdepth == CV_16U && ddepth == CV_32S )
215 return Ptr<BaseRowFilter>(new RowSum<ushort, int>(ksize, anchor));
216 if( sdepth == CV_16U && ddepth == CV_64F )
217 return Ptr<BaseRowFilter>(new RowSum<ushort, double>(ksize, anchor));
218 if( sdepth == CV_16S && ddepth == CV_32S )
219 return Ptr<BaseRowFilter>(new RowSum<short, int>(ksize, anchor));
220 if( sdepth == CV_32S && ddepth == CV_32S )
221 return Ptr<BaseRowFilter>(new RowSum<int, int>(ksize, anchor));
222 if( sdepth == CV_16S && ddepth == CV_64F )
223 return Ptr<BaseRowFilter>(new RowSum<short, double>(ksize, anchor));
224 if( sdepth == CV_32F && ddepth == CV_64F )
225 return Ptr<BaseRowFilter>(new RowSum<float, double>(ksize, anchor));
226 if( sdepth == CV_64F && ddepth == CV_64F )
227 return Ptr<BaseRowFilter>(new RowSum<double, double>(ksize, anchor));
229 CV_Error_( CV_StsNotImplemented,
230 ("Unsupported combination of source format (=%d), and buffer format (=%d)",
233 return Ptr<BaseRowFilter>(0);
237 cv::Ptr<cv::BaseColumnFilter> cv::getColumnSumFilter(int sumType, int dstType, int ksize,
238 int anchor, double scale)
240 int sdepth = CV_MAT_DEPTH(sumType), ddepth = CV_MAT_DEPTH(dstType);
241 CV_Assert( CV_MAT_CN(sumType) == CV_MAT_CN(dstType) );
246 if( ddepth == CV_8U && sdepth == CV_32S )
247 return Ptr<BaseColumnFilter>(new ColumnSum<int, uchar>(ksize, anchor, scale));
248 if( ddepth == CV_8U && sdepth == CV_64F )
249 return Ptr<BaseColumnFilter>(new ColumnSum<double, uchar>(ksize, anchor, scale));
250 if( ddepth == CV_16U && sdepth == CV_32S )
251 return Ptr<BaseColumnFilter>(new ColumnSum<int, ushort>(ksize, anchor, scale));
252 if( ddepth == CV_16U && sdepth == CV_64F )
253 return Ptr<BaseColumnFilter>(new ColumnSum<double, ushort>(ksize, anchor, scale));
254 if( ddepth == CV_16S && sdepth == CV_32S )
255 return Ptr<BaseColumnFilter>(new ColumnSum<int, short>(ksize, anchor, scale));
256 if( ddepth == CV_16S && sdepth == CV_64F )
257 return Ptr<BaseColumnFilter>(new ColumnSum<double, short>(ksize, anchor, scale));
258 if( ddepth == CV_32S && sdepth == CV_32S )
259 return Ptr<BaseColumnFilter>(new ColumnSum<int, int>(ksize, anchor, scale));
260 if( ddepth == CV_32F && sdepth == CV_32S )
261 return Ptr<BaseColumnFilter>(new ColumnSum<int, float>(ksize, anchor, scale));
262 if( ddepth == CV_32F && sdepth == CV_64F )
263 return Ptr<BaseColumnFilter>(new ColumnSum<double, float>(ksize, anchor, scale));
264 if( ddepth == CV_64F && sdepth == CV_32S )
265 return Ptr<BaseColumnFilter>(new ColumnSum<int, double>(ksize, anchor, scale));
266 if( ddepth == CV_64F && sdepth == CV_64F )
267 return Ptr<BaseColumnFilter>(new ColumnSum<double, double>(ksize, anchor, scale));
269 CV_Error_( CV_StsNotImplemented,
270 ("Unsupported combination of sum format (=%d), and destination format (=%d)",
273 return Ptr<BaseColumnFilter>(0);
277 cv::Ptr<cv::FilterEngine> cv::createBoxFilter( int srcType, int dstType, Size ksize,
278 Point anchor, bool normalize, int borderType )
280 int sdepth = CV_MAT_DEPTH(srcType);
281 int cn = CV_MAT_CN(srcType), sumType = CV_64F;
282 if( sdepth <= CV_32S && (!normalize ||
283 ksize.width*ksize.height <= (sdepth == CV_8U ? (1<<23) :
284 sdepth == CV_16U ? (1 << 15) : (1 << 16))) )
286 sumType = CV_MAKETYPE( sumType, cn );
288 Ptr<BaseRowFilter> rowFilter = getRowSumFilter(srcType, sumType, ksize.width, anchor.x );
289 Ptr<BaseColumnFilter> columnFilter = getColumnSumFilter(sumType,
290 dstType, ksize.height, anchor.y, normalize ? 1./(ksize.width*ksize.height) : 1);
292 return Ptr<FilterEngine>(new FilterEngine(Ptr<BaseFilter>(0), rowFilter, columnFilter,
293 srcType, dstType, sumType, borderType ));
297 void cv::boxFilter( InputArray _src, OutputArray _dst, int ddepth,
298 Size ksize, Point anchor,
299 bool normalize, int borderType )
301 Mat src = _src.getMat();
302 int sdepth = src.depth(), cn = src.channels();
305 _dst.create( src.size(), CV_MAKETYPE(ddepth, cn) );
306 Mat dst = _dst.getMat();
307 if( borderType != BORDER_CONSTANT && normalize )
314 #ifdef HAVE_TEGRA_OPTIMIZATION
315 if ( tegra::box(src, dst, ksize, anchor, normalize, borderType) )
319 Ptr<FilterEngine> f = createBoxFilter( src.type(), dst.type(),
320 ksize, anchor, normalize, borderType );
321 f->apply( src, dst );
324 void cv::blur( InputArray src, OutputArray dst,
325 Size ksize, Point anchor, int borderType )
327 boxFilter( src, dst, -1, ksize, anchor, true, borderType );
330 /****************************************************************************************\
332 \****************************************************************************************/
334 cv::Mat cv::getGaussianKernel( int n, double sigma, int ktype )
336 const int SMALL_GAUSSIAN_SIZE = 7;
337 static const float small_gaussian_tab[][SMALL_GAUSSIAN_SIZE] =
340 {0.25f, 0.5f, 0.25f},
341 {0.0625f, 0.25f, 0.375f, 0.25f, 0.0625f},
342 {0.03125f, 0.109375f, 0.21875f, 0.28125f, 0.21875f, 0.109375f, 0.03125f}
345 const float* fixed_kernel = n % 2 == 1 && n <= SMALL_GAUSSIAN_SIZE && sigma <= 0 ?
346 small_gaussian_tab[n>>1] : 0;
348 CV_Assert( ktype == CV_32F || ktype == CV_64F );
349 Mat kernel(n, 1, ktype);
350 float* cf = (float*)kernel.data;
351 double* cd = (double*)kernel.data;
353 double sigmaX = sigma > 0 ? sigma : ((n-1)*0.5 - 1)*0.3 + 0.8;
354 double scale2X = -0.5/(sigmaX*sigmaX);
358 for( i = 0; i < n; i++ )
360 double x = i - (n-1)*0.5;
361 double t = fixed_kernel ? (double)fixed_kernel[i] : std::exp(scale2X*x*x);
362 if( ktype == CV_32F )
375 for( i = 0; i < n; i++ )
377 if( ktype == CV_32F )
378 cf[i] = (float)(cf[i]*sum);
387 cv::Ptr<cv::FilterEngine> cv::createGaussianFilter( int type, Size ksize,
388 double sigma1, double sigma2,
391 int depth = CV_MAT_DEPTH(type);
395 // automatic detection of kernel size from sigma
396 if( ksize.width <= 0 && sigma1 > 0 )
397 ksize.width = cvRound(sigma1*(depth == CV_8U ? 3 : 4)*2 + 1)|1;
398 if( ksize.height <= 0 && sigma2 > 0 )
399 ksize.height = cvRound(sigma2*(depth == CV_8U ? 3 : 4)*2 + 1)|1;
401 CV_Assert( ksize.width > 0 && ksize.width % 2 == 1 &&
402 ksize.height > 0 && ksize.height % 2 == 1 );
404 sigma1 = std::max( sigma1, 0. );
405 sigma2 = std::max( sigma2, 0. );
407 Mat kx = getGaussianKernel( ksize.width, sigma1, std::max(depth, CV_32F) );
409 if( ksize.height == ksize.width && std::abs(sigma1 - sigma2) < DBL_EPSILON )
412 ky = getGaussianKernel( ksize.height, sigma2, std::max(depth, CV_32F) );
414 return createSeparableLinearFilter( type, type, kx, ky, Point(-1,-1), 0, borderType );
418 void cv::GaussianBlur( InputArray _src, OutputArray _dst, Size ksize,
419 double sigma1, double sigma2,
422 Mat src = _src.getMat();
423 _dst.create( src.size(), src.type() );
424 Mat dst = _dst.getMat();
426 if( borderType != BORDER_CONSTANT )
434 if( ksize.width == 1 && ksize.height == 1 )
440 #ifdef HAVE_TEGRA_OPTIMIZATION
441 if(sigma1 == 0 && sigma2 == 0 && tegra::gaussian(src, dst, ksize, borderType))
445 Ptr<FilterEngine> f = createGaussianFilter( src.type(), ksize, sigma1, sigma2, borderType );
446 f->apply( src, dst );
450 /****************************************************************************************\
452 \****************************************************************************************/
459 * This structure represents a two-tier histogram. The first tier (known as the
460 * "coarse" level) is 4 bit wide and the second tier (known as the "fine" level)
461 * is 8 bit wide. Pixels inserted in the fine level also get inserted into the
462 * coarse bucket designated by the 4 MSBs of the fine bucket value.
464 * The structure is aligned on 16 bits, which is a prerequisite for SIMD
465 * instructions. Each bucket is 16 bit wide, which means that extra care must be
466 * taken to prevent overflow.
476 #define MEDIAN_HAVE_SIMD 1
478 static inline void histogram_add_simd( const HT x[16], HT y[16] )
480 const __m128i* rx = (const __m128i*)x;
481 __m128i* ry = (__m128i*)y;
482 __m128i r0 = _mm_add_epi16(_mm_load_si128(ry+0),_mm_load_si128(rx+0));
483 __m128i r1 = _mm_add_epi16(_mm_load_si128(ry+1),_mm_load_si128(rx+1));
484 _mm_store_si128(ry+0, r0);
485 _mm_store_si128(ry+1, r1);
488 static inline void histogram_sub_simd( const HT x[16], HT y[16] )
490 const __m128i* rx = (const __m128i*)x;
491 __m128i* ry = (__m128i*)y;
492 __m128i r0 = _mm_sub_epi16(_mm_load_si128(ry+0),_mm_load_si128(rx+0));
493 __m128i r1 = _mm_sub_epi16(_mm_load_si128(ry+1),_mm_load_si128(rx+1));
494 _mm_store_si128(ry+0, r0);
495 _mm_store_si128(ry+1, r1);
499 #define MEDIAN_HAVE_SIMD 0
503 static inline void histogram_add( const HT x[16], HT y[16] )
506 for( i = 0; i < 16; ++i )
507 y[i] = (HT)(y[i] + x[i]);
510 static inline void histogram_sub( const HT x[16], HT y[16] )
513 for( i = 0; i < 16; ++i )
514 y[i] = (HT)(y[i] - x[i]);
517 static inline void histogram_muladd( int a, const HT x[16],
520 for( int i = 0; i < 16; ++i )
521 y[i] = (HT)(y[i] + a * x[i]);
525 medianBlur_8u_O1( const Mat& _src, Mat& _dst, int ksize )
528 * HOP is short for Histogram OPeration. This macro makes an operation \a op on
529 * histogram \a h for pixel value \a x. It takes care of handling both levels.
531 #define HOP(h,x,op) \
533 *((HT*)h.fine + x) op
535 #define COP(c,j,x,op) \
536 h_coarse[ 16*(n*c+j) + (x>>4) ] op, \
537 h_fine[ 16 * (n*(16*c+(x>>4)) + j) + (x & 0xF) ] op
539 int cn = _dst.channels(), m = _dst.rows, r = (ksize-1)/2;
540 size_t sstep = _src.step, dstep = _dst.step;
541 Histogram CV_DECL_ALIGNED(16) H[4];
542 HT CV_DECL_ALIGNED(16) luc[4][16];
544 int STRIPE_SIZE = std::min( _dst.cols, 512/cn );
546 vector<HT> _h_coarse(1 * 16 * (STRIPE_SIZE + 2*r) * cn + 16);
547 vector<HT> _h_fine(16 * 16 * (STRIPE_SIZE + 2*r) * cn + 16);
548 HT* h_coarse = alignPtr(&_h_coarse[0], 16);
549 HT* h_fine = alignPtr(&_h_fine[0], 16);
551 volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE2);
554 for( int x = 0; x < _dst.cols; x += STRIPE_SIZE )
556 int i, j, k, c, n = std::min(_dst.cols - x, STRIPE_SIZE) + r*2;
557 const uchar* src = _src.data + x*cn;
558 uchar* dst = _dst.data + (x - r)*cn;
560 memset( h_coarse, 0, 16*n*cn*sizeof(h_coarse[0]) );
561 memset( h_fine, 0, 16*16*n*cn*sizeof(h_fine[0]) );
563 // First row initialization
564 for( c = 0; c < cn; c++ )
566 for( j = 0; j < n; j++ )
567 COP( c, j, src[cn*j+c], += (cv::HT)(r+2) );
569 for( i = 1; i < r; i++ )
571 const uchar* p = src + sstep*std::min(i, m-1);
572 for ( j = 0; j < n; j++ )
573 COP( c, j, p[cn*j+c], ++ );
577 for( i = 0; i < m; i++ )
579 const uchar* p0 = src + sstep * std::max( 0, i-r-1 );
580 const uchar* p1 = src + sstep * std::min( m-1, i+r );
582 memset( H, 0, cn*sizeof(H[0]) );
583 memset( luc, 0, cn*sizeof(luc[0]) );
584 for( c = 0; c < cn; c++ )
586 // Update column histograms for the entire row.
587 for( j = 0; j < n; j++ )
589 COP( c, j, p0[j*cn + c], -- );
590 COP( c, j, p1[j*cn + c], ++ );
593 // First column initialization
594 for( k = 0; k < 16; ++k )
595 histogram_muladd( 2*r+1, &h_fine[16*n*(16*c+k)], &H[c].fine[k][0] );
600 for( j = 0; j < 2*r; ++j )
601 histogram_add_simd( &h_coarse[16*(n*c+j)], H[c].coarse );
603 for( j = r; j < n-r; j++ )
605 int t = 2*r*r + 2*r, b, sum = 0;
608 histogram_add_simd( &h_coarse[16*(n*c + std::min(j+r,n-1))], H[c].coarse );
610 // Find median at coarse level
611 for ( k = 0; k < 16 ; ++k )
613 sum += H[c].coarse[k];
616 sum -= H[c].coarse[k];
622 /* Update corresponding histogram segment */
623 if ( luc[c][k] <= j-r )
625 memset( &H[c].fine[k], 0, 16 * sizeof(HT) );
626 for ( luc[c][k] = cv::HT(j-r); luc[c][k] < MIN(j+r+1,n); ++luc[c][k] )
627 histogram_add_simd( &h_fine[16*(n*(16*c+k)+luc[c][k])], H[c].fine[k] );
629 if ( luc[c][k] < j+r+1 )
631 histogram_muladd( j+r+1 - n, &h_fine[16*(n*(16*c+k)+(n-1))], &H[c].fine[k][0] );
632 luc[c][k] = (HT)(j+r+1);
637 for ( ; luc[c][k] < j+r+1; ++luc[c][k] )
639 histogram_sub_simd( &h_fine[16*(n*(16*c+k)+MAX(luc[c][k]-2*r-1,0))], H[c].fine[k] );
640 histogram_add_simd( &h_fine[16*(n*(16*c+k)+MIN(luc[c][k],n-1))], H[c].fine[k] );
644 histogram_sub_simd( &h_coarse[16*(n*c+MAX(j-r,0))], H[c].coarse );
646 /* Find median in segment */
647 segment = H[c].fine[k];
648 for ( b = 0; b < 16 ; b++ )
653 dst[dstep*i+cn*j+c] = (uchar)(16*k + b);
663 for( j = 0; j < 2*r; ++j )
664 histogram_add( &h_coarse[16*(n*c+j)], H[c].coarse );
666 for( j = r; j < n-r; j++ )
668 int t = 2*r*r + 2*r, b, sum = 0;
671 histogram_add( &h_coarse[16*(n*c + std::min(j+r,n-1))], H[c].coarse );
673 // Find median at coarse level
674 for ( k = 0; k < 16 ; ++k )
676 sum += H[c].coarse[k];
679 sum -= H[c].coarse[k];
685 /* Update corresponding histogram segment */
686 if ( luc[c][k] <= j-r )
688 memset( &H[c].fine[k], 0, 16 * sizeof(HT) );
689 for ( luc[c][k] = cv::HT(j-r); luc[c][k] < MIN(j+r+1,n); ++luc[c][k] )
690 histogram_add( &h_fine[16*(n*(16*c+k)+luc[c][k])], H[c].fine[k] );
692 if ( luc[c][k] < j+r+1 )
694 histogram_muladd( j+r+1 - n, &h_fine[16*(n*(16*c+k)+(n-1))], &H[c].fine[k][0] );
695 luc[c][k] = (HT)(j+r+1);
700 for ( ; luc[c][k] < j+r+1; ++luc[c][k] )
702 histogram_sub( &h_fine[16*(n*(16*c+k)+MAX(luc[c][k]-2*r-1,0))], H[c].fine[k] );
703 histogram_add( &h_fine[16*(n*(16*c+k)+MIN(luc[c][k],n-1))], H[c].fine[k] );
707 histogram_sub( &h_coarse[16*(n*c+MAX(j-r,0))], H[c].coarse );
709 /* Find median in segment */
710 segment = H[c].fine[k];
711 for ( b = 0; b < 16 ; b++ )
716 dst[dstep*i+cn*j+c] = (uchar)(16*k + b);
732 medianBlur_8u_Om( const Mat& _src, Mat& _dst, int m )
739 Size size = _dst.size();
740 const uchar* src = _src.data;
741 uchar* dst = _dst.data;
742 int src_step = (int)_src.step, dst_step = (int)_dst.step;
743 int cn = _src.channels();
744 const uchar* src_max = src + size.height*src_step;
746 #define UPDATE_ACC01( pix, cn, op ) \
750 zone0[cn][p >> 4] op; \
753 //CV_Assert( size.height >= nx && size.width >= nx );
754 for( x = 0; x < size.width; x++, src += cn, dst += cn )
756 uchar* dst_cur = dst;
757 const uchar* src_top = src;
758 const uchar* src_bottom = src;
760 int src_step1 = src_step, dst_step1 = dst_step;
764 src_bottom = src_top += src_step*(size.height-1);
765 dst_cur += dst_step*(size.height-1);
766 src_step1 = -src_step1;
767 dst_step1 = -dst_step1;
771 memset( zone0, 0, sizeof(zone0[0])*cn );
772 memset( zone1, 0, sizeof(zone1[0])*cn );
774 for( y = 0; y <= m/2; y++ )
776 for( c = 0; c < cn; c++ )
780 for( k = 0; k < m*cn; k += cn )
781 UPDATE_ACC01( src_bottom[k+c], c, ++ );
785 for( k = 0; k < m*cn; k += cn )
786 UPDATE_ACC01( src_bottom[k+c], c, += m/2+1 );
790 if( (src_step1 > 0 && y < size.height-1) ||
791 (src_step1 < 0 && size.height-y-1 > 0) )
792 src_bottom += src_step1;
795 for( y = 0; y < size.height; y++, dst_cur += dst_step1 )
798 for( c = 0; c < cn; c++ )
803 int t = s + zone0[c][k];
814 dst_cur[c] = (uchar)k;
817 if( y+1 == size.height )
822 for( k = 0; k < m; k++ )
825 int q = src_bottom[k];
834 for( k = 0; k < m*3; k += 3 )
836 UPDATE_ACC01( src_top[k], 0, -- );
837 UPDATE_ACC01( src_top[k+1], 1, -- );
838 UPDATE_ACC01( src_top[k+2], 2, -- );
840 UPDATE_ACC01( src_bottom[k], 0, ++ );
841 UPDATE_ACC01( src_bottom[k+1], 1, ++ );
842 UPDATE_ACC01( src_bottom[k+2], 2, ++ );
848 for( k = 0; k < m*4; k += 4 )
850 UPDATE_ACC01( src_top[k], 0, -- );
851 UPDATE_ACC01( src_top[k+1], 1, -- );
852 UPDATE_ACC01( src_top[k+2], 2, -- );
853 UPDATE_ACC01( src_top[k+3], 3, -- );
855 UPDATE_ACC01( src_bottom[k], 0, ++ );
856 UPDATE_ACC01( src_bottom[k+1], 1, ++ );
857 UPDATE_ACC01( src_bottom[k+2], 2, ++ );
858 UPDATE_ACC01( src_bottom[k+3], 3, ++ );
862 if( (src_step1 > 0 && src_bottom + src_step1 < src_max) ||
863 (src_step1 < 0 && src_bottom + src_step1 >= src) )
864 src_bottom += src_step1;
867 src_top += src_step1;
877 typedef uchar value_type;
878 typedef int arg_type;
880 arg_type load(const uchar* ptr) { return *ptr; }
881 void store(uchar* ptr, arg_type val) { *ptr = (uchar)val; }
882 void operator()(arg_type& a, arg_type& b) const
884 int t = CV_FAST_CAST_8U(a - b);
891 typedef ushort value_type;
892 typedef int arg_type;
894 arg_type load(const ushort* ptr) { return *ptr; }
895 void store(ushort* ptr, arg_type val) { *ptr = (ushort)val; }
896 void operator()(arg_type& a, arg_type& b) const
906 typedef short value_type;
907 typedef int arg_type;
909 arg_type load(const short* ptr) { return *ptr; }
910 void store(short* ptr, arg_type val) { *ptr = (short)val; }
911 void operator()(arg_type& a, arg_type& b) const
921 typedef float value_type;
922 typedef float arg_type;
924 arg_type load(const float* ptr) { return *ptr; }
925 void store(float* ptr, arg_type val) { *ptr = val; }
926 void operator()(arg_type& a, arg_type& b) const
938 typedef uchar value_type;
939 typedef __m128i arg_type;
941 arg_type load(const uchar* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
942 void store(uchar* ptr, arg_type val) { _mm_storeu_si128((__m128i*)ptr, val); }
943 void operator()(arg_type& a, arg_type& b) const
946 a = _mm_min_epu8(a, b);
947 b = _mm_max_epu8(b, t);
954 typedef ushort value_type;
955 typedef __m128i arg_type;
957 arg_type load(const ushort* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
958 void store(ushort* ptr, arg_type val) { _mm_storeu_si128((__m128i*)ptr, val); }
959 void operator()(arg_type& a, arg_type& b) const
961 arg_type t = _mm_subs_epu16(a, b);
962 a = _mm_subs_epu16(a, t);
963 b = _mm_adds_epu16(b, t);
970 typedef short value_type;
971 typedef __m128i arg_type;
973 arg_type load(const short* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
974 void store(short* ptr, arg_type val) { _mm_storeu_si128((__m128i*)ptr, val); }
975 void operator()(arg_type& a, arg_type& b) const
978 a = _mm_min_epi16(a, b);
979 b = _mm_max_epi16(b, t);
986 typedef float value_type;
987 typedef __m128 arg_type;
989 arg_type load(const float* ptr) { return _mm_loadu_ps(ptr); }
990 void store(float* ptr, arg_type val) { _mm_storeu_ps(ptr, val); }
991 void operator()(arg_type& a, arg_type& b) const
994 a = _mm_min_ps(a, b);
995 b = _mm_max_ps(b, t);
1002 typedef MinMax8u MinMaxVec8u;
1003 typedef MinMax16u MinMaxVec16u;
1004 typedef MinMax16s MinMaxVec16s;
1005 typedef MinMax32f MinMaxVec32f;
1009 template<class Op, class VecOp>
1011 medianBlur_SortNet( const Mat& _src, Mat& _dst, int m )
1013 typedef typename Op::value_type T;
1014 typedef typename Op::arg_type WT;
1015 typedef typename VecOp::arg_type VT;
1017 const T* src = (const T*)_src.data;
1018 T* dst = (T*)_dst.data;
1019 int sstep = (int)(_src.step/sizeof(T));
1020 int dstep = (int)(_dst.step/sizeof(T));
1021 Size size = _dst.size();
1022 int i, j, k, cn = _src.channels();
1025 volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE2);
1029 if( size.width == 1 || size.height == 1 )
1031 int len = size.width + size.height - 1;
1032 int sdelta = size.height == 1 ? cn : sstep;
1033 int sdelta0 = size.height == 1 ? 0 : sstep - cn;
1034 int ddelta = size.height == 1 ? cn : dstep;
1036 for( i = 0; i < len; i++, src += sdelta0, dst += ddelta )
1037 for( j = 0; j < cn; j++, src++ )
1039 WT p0 = src[i > 0 ? -sdelta : 0];
1041 WT p2 = src[i < len - 1 ? sdelta : 0];
1043 op(p0, p1); op(p1, p2); op(p0, p1);
1050 for( i = 0; i < size.height; i++, dst += dstep )
1052 const T* row0 = src + std::max(i - 1, 0)*sstep;
1053 const T* row1 = src + i*sstep;
1054 const T* row2 = src + std::min(i + 1, size.height-1)*sstep;
1055 int limit = useSIMD ? cn : size.width;
1059 for( ; j < limit; j++ )
1061 int j0 = j >= cn ? j - cn : j;
1062 int j2 = j < size.width - cn ? j + cn : j;
1063 WT p0 = row0[j0], p1 = row0[j], p2 = row0[j2];
1064 WT p3 = row1[j0], p4 = row1[j], p5 = row1[j2];
1065 WT p6 = row2[j0], p7 = row2[j], p8 = row2[j2];
1067 op(p1, p2); op(p4, p5); op(p7, p8); op(p0, p1);
1068 op(p3, p4); op(p6, p7); op(p1, p2); op(p4, p5);
1069 op(p7, p8); op(p0, p3); op(p5, p8); op(p4, p7);
1070 op(p3, p6); op(p1, p4); op(p2, p5); op(p4, p7);
1071 op(p4, p2); op(p6, p4); op(p4, p2);
1075 if( limit == size.width )
1078 for( ; j <= size.width - VecOp::SIZE - cn; j += VecOp::SIZE )
1080 VT p0 = vop.load(row0+j-cn), p1 = vop.load(row0+j), p2 = vop.load(row0+j+cn);
1081 VT p3 = vop.load(row1+j-cn), p4 = vop.load(row1+j), p5 = vop.load(row1+j+cn);
1082 VT p6 = vop.load(row2+j-cn), p7 = vop.load(row2+j), p8 = vop.load(row2+j+cn);
1084 vop(p1, p2); vop(p4, p5); vop(p7, p8); vop(p0, p1);
1085 vop(p3, p4); vop(p6, p7); vop(p1, p2); vop(p4, p5);
1086 vop(p7, p8); vop(p0, p3); vop(p5, p8); vop(p4, p7);
1087 vop(p3, p6); vop(p1, p4); vop(p2, p5); vop(p4, p7);
1088 vop(p4, p2); vop(p6, p4); vop(p4, p2);
1089 vop.store(dst+j, p4);
1098 if( size.width == 1 || size.height == 1 )
1100 int len = size.width + size.height - 1;
1101 int sdelta = size.height == 1 ? cn : sstep;
1102 int sdelta0 = size.height == 1 ? 0 : sstep - cn;
1103 int ddelta = size.height == 1 ? cn : dstep;
1105 for( i = 0; i < len; i++, src += sdelta0, dst += ddelta )
1106 for( j = 0; j < cn; j++, src++ )
1108 int i1 = i > 0 ? -sdelta : 0;
1109 int i0 = i > 1 ? -sdelta*2 : i1;
1110 int i3 = i < len-1 ? sdelta : 0;
1111 int i4 = i < len-2 ? sdelta*2 : i3;
1112 WT p0 = src[i0], p1 = src[i1], p2 = src[0], p3 = src[i3], p4 = src[i4];
1114 op(p0, p1); op(p3, p4); op(p2, p3); op(p3, p4); op(p0, p2);
1115 op(p2, p4); op(p1, p3); op(p1, p2);
1122 for( i = 0; i < size.height; i++, dst += dstep )
1125 row[0] = src + std::max(i - 2, 0)*sstep;
1126 row[1] = src + std::max(i - 1, 0)*sstep;
1127 row[2] = src + i*sstep;
1128 row[3] = src + std::min(i + 1, size.height-1)*sstep;
1129 row[4] = src + std::min(i + 2, size.height-1)*sstep;
1130 int limit = useSIMD ? cn*2 : size.width;
1134 for( ; j < limit; j++ )
1137 int j1 = j >= cn ? j - cn : j;
1138 int j0 = j >= cn*2 ? j - cn*2 : j1;
1139 int j3 = j < size.width - cn ? j + cn : j;
1140 int j4 = j < size.width - cn*2 ? j + cn*2 : j3;
1141 for( k = 0; k < 5; k++ )
1143 const T* rowk = row[k];
1144 p[k*5] = rowk[j0]; p[k*5+1] = rowk[j1];
1145 p[k*5+2] = rowk[j]; p[k*5+3] = rowk[j3];
1146 p[k*5+4] = rowk[j4];
1149 op(p[1], p[2]); op(p[0], p[1]); op(p[1], p[2]); op(p[4], p[5]); op(p[3], p[4]);
1150 op(p[4], p[5]); op(p[0], p[3]); op(p[2], p[5]); op(p[2], p[3]); op(p[1], p[4]);
1151 op(p[1], p[2]); op(p[3], p[4]); op(p[7], p[8]); op(p[6], p[7]); op(p[7], p[8]);
1152 op(p[10], p[11]); op(p[9], p[10]); op(p[10], p[11]); op(p[6], p[9]); op(p[8], p[11]);
1153 op(p[8], p[9]); op(p[7], p[10]); op(p[7], p[8]); op(p[9], p[10]); op(p[0], p[6]);
1154 op(p[4], p[10]); op(p[4], p[6]); op(p[2], p[8]); op(p[2], p[4]); op(p[6], p[8]);
1155 op(p[1], p[7]); op(p[5], p[11]); op(p[5], p[7]); op(p[3], p[9]); op(p[3], p[5]);
1156 op(p[7], p[9]); op(p[1], p[2]); op(p[3], p[4]); op(p[5], p[6]); op(p[7], p[8]);
1157 op(p[9], p[10]); op(p[13], p[14]); op(p[12], p[13]); op(p[13], p[14]); op(p[16], p[17]);
1158 op(p[15], p[16]); op(p[16], p[17]); op(p[12], p[15]); op(p[14], p[17]); op(p[14], p[15]);
1159 op(p[13], p[16]); op(p[13], p[14]); op(p[15], p[16]); op(p[19], p[20]); op(p[18], p[19]);
1160 op(p[19], p[20]); op(p[21], p[22]); op(p[23], p[24]); op(p[21], p[23]); op(p[22], p[24]);
1161 op(p[22], p[23]); op(p[18], p[21]); op(p[20], p[23]); op(p[20], p[21]); op(p[19], p[22]);
1162 op(p[22], p[24]); op(p[19], p[20]); op(p[21], p[22]); op(p[23], p[24]); op(p[12], p[18]);
1163 op(p[16], p[22]); op(p[16], p[18]); op(p[14], p[20]); op(p[20], p[24]); op(p[14], p[16]);
1164 op(p[18], p[20]); op(p[22], p[24]); op(p[13], p[19]); op(p[17], p[23]); op(p[17], p[19]);
1165 op(p[15], p[21]); op(p[15], p[17]); op(p[19], p[21]); op(p[13], p[14]); op(p[15], p[16]);
1166 op(p[17], p[18]); op(p[19], p[20]); op(p[21], p[22]); op(p[23], p[24]); op(p[0], p[12]);
1167 op(p[8], p[20]); op(p[8], p[12]); op(p[4], p[16]); op(p[16], p[24]); op(p[12], p[16]);
1168 op(p[2], p[14]); op(p[10], p[22]); op(p[10], p[14]); op(p[6], p[18]); op(p[6], p[10]);
1169 op(p[10], p[12]); op(p[1], p[13]); op(p[9], p[21]); op(p[9], p[13]); op(p[5], p[17]);
1170 op(p[13], p[17]); op(p[3], p[15]); op(p[11], p[23]); op(p[11], p[15]); op(p[7], p[19]);
1171 op(p[7], p[11]); op(p[11], p[13]); op(p[11], p[12]);
1175 if( limit == size.width )
1178 for( ; j <= size.width - VecOp::SIZE - cn*2; j += VecOp::SIZE )
1181 for( k = 0; k < 5; k++ )
1183 const T* rowk = row[k];
1184 p[k*5] = vop.load(rowk+j-cn*2); p[k*5+1] = vop.load(rowk+j-cn);
1185 p[k*5+2] = vop.load(rowk+j); p[k*5+3] = vop.load(rowk+j+cn);
1186 p[k*5+4] = vop.load(rowk+j+cn*2);
1189 vop(p[1], p[2]); vop(p[0], p[1]); vop(p[1], p[2]); vop(p[4], p[5]); vop(p[3], p[4]);
1190 vop(p[4], p[5]); vop(p[0], p[3]); vop(p[2], p[5]); vop(p[2], p[3]); vop(p[1], p[4]);
1191 vop(p[1], p[2]); vop(p[3], p[4]); vop(p[7], p[8]); vop(p[6], p[7]); vop(p[7], p[8]);
1192 vop(p[10], p[11]); vop(p[9], p[10]); vop(p[10], p[11]); vop(p[6], p[9]); vop(p[8], p[11]);
1193 vop(p[8], p[9]); vop(p[7], p[10]); vop(p[7], p[8]); vop(p[9], p[10]); vop(p[0], p[6]);
1194 vop(p[4], p[10]); vop(p[4], p[6]); vop(p[2], p[8]); vop(p[2], p[4]); vop(p[6], p[8]);
1195 vop(p[1], p[7]); vop(p[5], p[11]); vop(p[5], p[7]); vop(p[3], p[9]); vop(p[3], p[5]);
1196 vop(p[7], p[9]); vop(p[1], p[2]); vop(p[3], p[4]); vop(p[5], p[6]); vop(p[7], p[8]);
1197 vop(p[9], p[10]); vop(p[13], p[14]); vop(p[12], p[13]); vop(p[13], p[14]); vop(p[16], p[17]);
1198 vop(p[15], p[16]); vop(p[16], p[17]); vop(p[12], p[15]); vop(p[14], p[17]); vop(p[14], p[15]);
1199 vop(p[13], p[16]); vop(p[13], p[14]); vop(p[15], p[16]); vop(p[19], p[20]); vop(p[18], p[19]);
1200 vop(p[19], p[20]); vop(p[21], p[22]); vop(p[23], p[24]); vop(p[21], p[23]); vop(p[22], p[24]);
1201 vop(p[22], p[23]); vop(p[18], p[21]); vop(p[20], p[23]); vop(p[20], p[21]); vop(p[19], p[22]);
1202 vop(p[22], p[24]); vop(p[19], p[20]); vop(p[21], p[22]); vop(p[23], p[24]); vop(p[12], p[18]);
1203 vop(p[16], p[22]); vop(p[16], p[18]); vop(p[14], p[20]); vop(p[20], p[24]); vop(p[14], p[16]);
1204 vop(p[18], p[20]); vop(p[22], p[24]); vop(p[13], p[19]); vop(p[17], p[23]); vop(p[17], p[19]);
1205 vop(p[15], p[21]); vop(p[15], p[17]); vop(p[19], p[21]); vop(p[13], p[14]); vop(p[15], p[16]);
1206 vop(p[17], p[18]); vop(p[19], p[20]); vop(p[21], p[22]); vop(p[23], p[24]); vop(p[0], p[12]);
1207 vop(p[8], p[20]); vop(p[8], p[12]); vop(p[4], p[16]); vop(p[16], p[24]); vop(p[12], p[16]);
1208 vop(p[2], p[14]); vop(p[10], p[22]); vop(p[10], p[14]); vop(p[6], p[18]); vop(p[6], p[10]);
1209 vop(p[10], p[12]); vop(p[1], p[13]); vop(p[9], p[21]); vop(p[9], p[13]); vop(p[5], p[17]);
1210 vop(p[13], p[17]); vop(p[3], p[15]); vop(p[11], p[23]); vop(p[11], p[15]); vop(p[7], p[19]);
1211 vop(p[7], p[11]); vop(p[11], p[13]); vop(p[11], p[12]);
1212 vop.store(dst+j, p[12]);
1223 void cv::medianBlur( InputArray _src0, OutputArray _dst, int ksize )
1225 Mat src0 = _src0.getMat();
1226 _dst.create( src0.size(), src0.type() );
1227 Mat dst = _dst.getMat();
1235 CV_Assert( ksize % 2 == 1 );
1237 #ifdef HAVE_TEGRA_OPTIMIZATION
1238 if (tegra::medianBlur(src0, dst, ksize))
1242 bool useSortNet = ksize == 3 || (ksize == 5
1244 && src0.depth() > CV_8U
1251 if( dst.data != src0.data )
1256 if( src.depth() == CV_8U )
1257 medianBlur_SortNet<MinMax8u, MinMaxVec8u>( src, dst, ksize );
1258 else if( src.depth() == CV_16U )
1259 medianBlur_SortNet<MinMax16u, MinMaxVec16u>( src, dst, ksize );
1260 else if( src.depth() == CV_16S )
1261 medianBlur_SortNet<MinMax16s, MinMaxVec16s>( src, dst, ksize );
1262 else if( src.depth() == CV_32F )
1263 medianBlur_SortNet<MinMax32f, MinMaxVec32f>( src, dst, ksize );
1265 CV_Error(CV_StsUnsupportedFormat, "");
1271 cv::copyMakeBorder( src0, src, 0, 0, ksize/2, ksize/2, BORDER_REPLICATE );
1273 int cn = src0.channels();
1274 CV_Assert( src.depth() == CV_8U && (cn == 1 || cn == 3 || cn == 4) );
1276 double img_size_mp = (double)(src0.total())/(1 << 20);
1277 if( ksize <= 3 + (img_size_mp < 1 ? 12 : img_size_mp < 4 ? 6 : 2)*(MEDIAN_HAVE_SIMD && checkHardwareSupport(CV_CPU_SSE2) ? 1 : 3))
1278 medianBlur_8u_Om( src, dst, ksize );
1280 medianBlur_8u_O1( src, dst, ksize );
1284 /****************************************************************************************\
1286 \****************************************************************************************/
1293 class BilateralFilter_8u_Invoker :
1294 public ParallelLoopBody
1297 BilateralFilter_8u_Invoker(Mat& _dest, const Mat& _temp, int _radius, int _maxk,
1298 int* _space_ofs, float *_space_weight, float *_color_weight) :
1299 temp(&_temp), dest(&_dest), radius(_radius),
1300 maxk(_maxk), space_ofs(_space_ofs), space_weight(_space_weight), color_weight(_color_weight)
1304 virtual void operator() (const Range& range) const
1306 int i, j, cn = dest->channels(), k;
1307 Size size = dest->size();
1309 int CV_DECL_ALIGNED(16) buf[4];
1310 float CV_DECL_ALIGNED(16) bufSum[4];
1311 static const int CV_DECL_ALIGNED(16) bufSignMask[] = { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
1312 bool haveSSE3 = checkHardwareSupport(CV_CPU_SSE3);
1315 for( i = range.start; i < range.end; i++ )
1317 const uchar* sptr = temp->ptr(i+radius) + radius*cn;
1318 uchar* dptr = dest->ptr(i);
1322 for( j = 0; j < size.width; j++ )
1324 float sum = 0, wsum = 0;
1330 __m128 _val0 = _mm_set1_ps(static_cast<float>(val0));
1331 const __m128 _signMask = _mm_load_ps((const float*)bufSignMask);
1333 for( ; k <= maxk - 4; k += 4 )
1335 __m128 _valF = _mm_set_ps(sptr[j + space_ofs[k+3]], sptr[j + space_ofs[k+2]],
1336 sptr[j + space_ofs[k+1]], sptr[j + space_ofs[k]]);
1338 __m128 _val = _mm_andnot_ps(_signMask, _mm_sub_ps(_valF, _val0));
1339 _mm_store_si128((__m128i*)buf, _mm_cvtps_epi32(_val));
1341 __m128 _cw = _mm_set_ps(color_weight[buf[3]],color_weight[buf[2]],
1342 color_weight[buf[1]],color_weight[buf[0]]);
1343 __m128 _sw = _mm_loadu_ps(space_weight+k);
1344 __m128 _w = _mm_mul_ps(_cw, _sw);
1345 _cw = _mm_mul_ps(_w, _valF);
1347 _sw = _mm_hadd_ps(_w, _cw);
1348 _sw = _mm_hadd_ps(_sw, _sw);
1349 _mm_storel_pi((__m64*)bufSum, _sw);
1356 for( ; k < maxk; k++ )
1358 int val = sptr[j + space_ofs[k]];
1359 float w = space_weight[k]*color_weight[std::abs(val - val0)];
1363 // overflow is not possible here => there is no need to use CV_CAST_8U
1364 dptr[j] = (uchar)cvRound(sum/wsum);
1370 for( j = 0; j < size.width*3; j += 3 )
1372 float sum_b = 0, sum_g = 0, sum_r = 0, wsum = 0;
1373 int b0 = sptr[j], g0 = sptr[j+1], r0 = sptr[j+2];
1378 const __m128 _b0 = _mm_set1_ps(static_cast<float>(b0));
1379 const __m128 _g0 = _mm_set1_ps(static_cast<float>(g0));
1380 const __m128 _r0 = _mm_set1_ps(static_cast<float>(r0));
1381 const __m128 _signMask = _mm_load_ps((const float*)bufSignMask);
1383 for( ; k <= maxk - 4; k += 4 )
1385 const uchar* sptr_k = sptr + j + space_ofs[k];
1386 const uchar* sptr_k1 = sptr + j + space_ofs[k+1];
1387 const uchar* sptr_k2 = sptr + j + space_ofs[k+2];
1388 const uchar* sptr_k3 = sptr + j + space_ofs[k+3];
1390 __m128 _b = _mm_set_ps(sptr_k3[0],sptr_k2[0],sptr_k1[0],sptr_k[0]);
1391 __m128 _g = _mm_set_ps(sptr_k3[1],sptr_k2[1],sptr_k1[1],sptr_k[1]);
1392 __m128 _r = _mm_set_ps(sptr_k3[2],sptr_k2[2],sptr_k1[2],sptr_k[2]);
1394 __m128 bt = _mm_andnot_ps(_signMask, _mm_sub_ps(_b,_b0));
1395 __m128 gt = _mm_andnot_ps(_signMask, _mm_sub_ps(_g,_g0));
1396 __m128 rt = _mm_andnot_ps(_signMask, _mm_sub_ps(_r,_r0));
1398 bt =_mm_add_ps(rt, _mm_add_ps(bt, gt));
1399 _mm_store_si128((__m128i*)buf, _mm_cvtps_epi32(bt));
1401 __m128 _w = _mm_set_ps(color_weight[buf[3]],color_weight[buf[2]],
1402 color_weight[buf[1]],color_weight[buf[0]]);
1403 __m128 _sw = _mm_loadu_ps(space_weight+k);
1405 _w = _mm_mul_ps(_w,_sw);
1406 _b = _mm_mul_ps(_b, _w);
1407 _g = _mm_mul_ps(_g, _w);
1408 _r = _mm_mul_ps(_r, _w);
1410 _w = _mm_hadd_ps(_w, _b);
1411 _g = _mm_hadd_ps(_g, _r);
1413 _w = _mm_hadd_ps(_w, _g);
1414 _mm_store_ps(bufSum, _w);
1424 for( ; k < maxk; k++ )
1426 const uchar* sptr_k = sptr + j + space_ofs[k];
1427 int b = sptr_k[0], g = sptr_k[1], r = sptr_k[2];
1428 float w = space_weight[k]*color_weight[std::abs(b - b0) +
1429 std::abs(g - g0) + std::abs(r - r0)];
1430 sum_b += b*w; sum_g += g*w; sum_r += r*w;
1434 b0 = cvRound(sum_b*wsum);
1435 g0 = cvRound(sum_g*wsum);
1436 r0 = cvRound(sum_r*wsum);
1437 dptr[j] = (uchar)b0; dptr[j+1] = (uchar)g0; dptr[j+2] = (uchar)r0;
1446 int radius, maxk, *space_ofs;
1447 float *space_weight, *color_weight;
1451 bilateralFilter_8u( const Mat& src, Mat& dst, int d,
1452 double sigma_color, double sigma_space,
1456 int cn = src.channels();
1457 int i, j, maxk, radius;
1458 Size size = src.size();
1460 CV_Assert( (src.type() == CV_8UC1 || src.type() == CV_8UC3) &&
1461 src.type() == dst.type() && src.size() == dst.size() &&
1462 src.data != dst.data );
1464 if( sigma_color <= 0 )
1466 if( sigma_space <= 0 )
1469 double gauss_color_coeff = -0.5/(sigma_color*sigma_color);
1470 double gauss_space_coeff = -0.5/(sigma_space*sigma_space);
1473 radius = cvRound(sigma_space*1.5);
1476 radius = MAX(radius, 1);
1480 copyMakeBorder( src, temp, radius, radius, radius, radius, borderType );
1482 vector<float> _color_weight(cn*256);
1483 vector<float> _space_weight(d*d);
1484 vector<int> _space_ofs(d*d);
1485 float* color_weight = &_color_weight[0];
1486 float* space_weight = &_space_weight[0];
1487 int* space_ofs = &_space_ofs[0];
1489 // initialize color-related bilateral filter coefficients
1491 for( i = 0; i < 256*cn; i++ )
1492 color_weight[i] = (float)std::exp(i*i*gauss_color_coeff);
1494 // initialize space-related bilateral filter coefficients
1495 for( i = -radius, maxk = 0; i <= radius; i++ )
1499 for( ;j <= radius; j++ )
1501 double r = std::sqrt((double)i*i + (double)j*j);
1504 space_weight[maxk] = (float)std::exp(r*r*gauss_space_coeff);
1505 space_ofs[maxk++] = (int)(i*temp.step + j*cn);
1509 BilateralFilter_8u_Invoker body(dst, temp, radius, maxk, space_ofs, space_weight, color_weight);
1510 parallel_for_(Range(0, size.height), body);
1514 class BilateralFilter_32f_Invoker :
1515 public ParallelLoopBody
1519 BilateralFilter_32f_Invoker(int _cn, int _radius, int _maxk, int *_space_ofs,
1520 const Mat& _temp, Mat& _dest, float _scale_index, float *_space_weight, float *_expLUT) :
1521 cn(_cn), radius(_radius), maxk(_maxk), space_ofs(_space_ofs),
1522 temp(&_temp), dest(&_dest), scale_index(_scale_index), space_weight(_space_weight), expLUT(_expLUT)
1526 virtual void operator() (const Range& range) const
1529 Size size = dest->size();
1531 int CV_DECL_ALIGNED(16) idxBuf[4];
1532 float CV_DECL_ALIGNED(16) bufSum32[4];
1533 static const int CV_DECL_ALIGNED(16) bufSignMask[] = { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
1534 bool haveSSE3 = checkHardwareSupport(CV_CPU_SSE3);
1537 for( i = range.start; i < range.end; i++ )
1539 const float* sptr = temp->ptr<float>(i+radius) + radius*cn;
1540 float* dptr = dest->ptr<float>(i);
1544 for( j = 0; j < size.width; j++ )
1546 float sum = 0, wsum = 0;
1547 float val0 = sptr[j];
1552 const __m128 _val0 = _mm_set1_ps(sptr[j]);
1553 const __m128 _scale_index = _mm_set1_ps(scale_index);
1554 const __m128 _signMask = _mm_load_ps((const float*)bufSignMask);
1556 for( ; k <= maxk - 4 ; k += 4 )
1558 __m128 _sw = _mm_loadu_ps(space_weight + k);
1559 __m128 _val = _mm_set_ps(sptr[j + space_ofs[k+3]], sptr[j + space_ofs[k+2]],
1560 sptr[j + space_ofs[k+1]], sptr[j + space_ofs[k]]);
1561 __m128 _alpha = _mm_mul_ps(_mm_andnot_ps( _signMask, _mm_sub_ps(_val,_val0)), _scale_index);
1563 __m128i _idx = _mm_cvtps_epi32(_alpha);
1564 _mm_store_si128((__m128i*)idxBuf, _idx);
1565 _alpha = _mm_sub_ps(_alpha, _mm_cvtepi32_ps(_idx));
1567 __m128 _explut = _mm_set_ps(expLUT[idxBuf[3]], expLUT[idxBuf[2]],
1568 expLUT[idxBuf[1]], expLUT[idxBuf[0]]);
1569 __m128 _explut1 = _mm_set_ps(expLUT[idxBuf[3]+1], expLUT[idxBuf[2]+1],
1570 expLUT[idxBuf[1]+1], expLUT[idxBuf[0]+1]);
1572 __m128 _w = _mm_mul_ps(_sw, _mm_add_ps(_explut, _mm_mul_ps(_alpha, _mm_sub_ps(_explut1, _explut))));
1573 _val = _mm_mul_ps(_w, _val);
1575 _sw = _mm_hadd_ps(_w, _val);
1576 _sw = _mm_hadd_ps(_sw, _sw);
1577 _mm_storel_pi((__m64*)bufSum32, _sw);
1580 wsum += bufSum32[0];
1585 for( ; k < maxk; k++ )
1587 float val = sptr[j + space_ofs[k]];
1588 float alpha = (float)(std::abs(val - val0)*scale_index);
1589 int idx = cvFloor(alpha);
1591 float w = space_weight[k]*(expLUT[idx] + alpha*(expLUT[idx+1] - expLUT[idx]));
1595 dptr[j] = (float)(sum/wsum);
1601 for( j = 0; j < size.width*3; j += 3 )
1603 float sum_b = 0, sum_g = 0, sum_r = 0, wsum = 0;
1604 float b0 = sptr[j], g0 = sptr[j+1], r0 = sptr[j+2];
1609 const __m128 _b0 = _mm_set1_ps(b0);
1610 const __m128 _g0 = _mm_set1_ps(g0);
1611 const __m128 _r0 = _mm_set1_ps(r0);
1612 const __m128 _scale_index = _mm_set1_ps(scale_index);
1613 const __m128 _signMask = _mm_load_ps((const float*)bufSignMask);
1615 for( ; k <= maxk-4; k += 4 )
1617 __m128 _sw = _mm_loadu_ps(space_weight + k);
1619 const float* sptr_k = sptr + j + space_ofs[k];
1620 const float* sptr_k1 = sptr + j + space_ofs[k+1];
1621 const float* sptr_k2 = sptr + j + space_ofs[k+2];
1622 const float* sptr_k3 = sptr + j + space_ofs[k+3];
1624 __m128 _b = _mm_set_ps(sptr_k3[0], sptr_k2[0], sptr_k1[0], sptr_k[0]);
1625 __m128 _g = _mm_set_ps(sptr_k3[1], sptr_k2[1], sptr_k1[1], sptr_k[1]);
1626 __m128 _r = _mm_set_ps(sptr_k3[2], sptr_k2[2], sptr_k1[2], sptr_k[2]);
1628 __m128 _bt = _mm_andnot_ps(_signMask,_mm_sub_ps(_b,_b0));
1629 __m128 _gt = _mm_andnot_ps(_signMask,_mm_sub_ps(_g,_g0));
1630 __m128 _rt = _mm_andnot_ps(_signMask,_mm_sub_ps(_r,_r0));
1632 __m128 _alpha = _mm_mul_ps(_scale_index, _mm_add_ps(_rt,_mm_add_ps(_bt, _gt)));
1634 __m128i _idx = _mm_cvtps_epi32(_alpha);
1635 _mm_store_si128((__m128i*)idxBuf, _idx);
1636 _alpha = _mm_sub_ps(_alpha, _mm_cvtepi32_ps(_idx));
1638 __m128 _explut = _mm_set_ps(expLUT[idxBuf[3]], expLUT[idxBuf[2]], expLUT[idxBuf[1]], expLUT[idxBuf[0]]);
1639 __m128 _explut1 = _mm_set_ps(expLUT[idxBuf[3]+1], expLUT[idxBuf[2]+1], expLUT[idxBuf[1]+1], expLUT[idxBuf[0]+1]);
1641 __m128 _w = _mm_mul_ps(_sw, _mm_add_ps(_explut, _mm_mul_ps(_alpha, _mm_sub_ps(_explut1, _explut))));
1643 _b = _mm_mul_ps(_b, _w);
1644 _g = _mm_mul_ps(_g, _w);
1645 _r = _mm_mul_ps(_r, _w);
1647 _w = _mm_hadd_ps(_w, _b);
1648 _g = _mm_hadd_ps(_g, _r);
1650 _w = _mm_hadd_ps(_w, _g);
1651 _mm_store_ps(bufSum32, _w);
1653 wsum += bufSum32[0];
1654 sum_b += bufSum32[1];
1655 sum_g += bufSum32[2];
1656 sum_r += bufSum32[3];
1662 for(; k < maxk; k++ )
1664 const float* sptr_k = sptr + j + space_ofs[k];
1665 float b = sptr_k[0], g = sptr_k[1], r = sptr_k[2];
1666 float alpha = (float)((std::abs(b - b0) +
1667 std::abs(g - g0) + std::abs(r - r0))*scale_index);
1668 int idx = cvFloor(alpha);
1670 float w = space_weight[k]*(expLUT[idx] + alpha*(expLUT[idx+1] - expLUT[idx]));
1671 sum_b += b*w; sum_g += g*w; sum_r += r*w;
1678 dptr[j] = b0; dptr[j+1] = g0; dptr[j+2] = r0;
1685 int cn, radius, maxk, *space_ofs;
1688 float scale_index, *space_weight, *expLUT;
1693 bilateralFilter_32f( const Mat& src, Mat& dst, int d,
1694 double sigma_color, double sigma_space,
1697 int cn = src.channels();
1698 int i, j, maxk, radius;
1699 double minValSrc=-1, maxValSrc=1;
1700 const int kExpNumBinsPerChannel = 1 << 12;
1701 int kExpNumBins = 0;
1702 float lastExpVal = 1.f;
1703 float len, scale_index;
1704 Size size = src.size();
1706 CV_Assert( (src.type() == CV_32FC1 || src.type() == CV_32FC3) &&
1707 src.type() == dst.type() && src.size() == dst.size() &&
1708 src.data != dst.data );
1710 if( sigma_color <= 0 )
1712 if( sigma_space <= 0 )
1715 double gauss_color_coeff = -0.5/(sigma_color*sigma_color);
1716 double gauss_space_coeff = -0.5/(sigma_space*sigma_space);
1719 radius = cvRound(sigma_space*1.5);
1722 radius = MAX(radius, 1);
1724 // compute the min/max range for the input image (even if multichannel)
1726 minMaxLoc( src.reshape(1), &minValSrc, &maxValSrc );
1727 if(std::abs(minValSrc - maxValSrc) < FLT_EPSILON)
1733 // temporary copy of the image with borders for easy processing
1735 copyMakeBorder( src, temp, radius, radius, radius, radius, borderType );
1736 const double insteadNaNValue = -5. * sigma_color;
1737 patchNaNs( temp, insteadNaNValue ); // this replacement of NaNs makes the assumption that depth values are nonnegative
1738 // TODO: make insteadNaNValue avalible in the outside function interface to control the cases breaking the assumption
1739 // allocate lookup tables
1740 vector<float> _space_weight(d*d);
1741 vector<int> _space_ofs(d*d);
1742 float* space_weight = &_space_weight[0];
1743 int* space_ofs = &_space_ofs[0];
1745 // assign a length which is slightly more than needed
1746 len = (float)(maxValSrc - minValSrc) * cn;
1747 kExpNumBins = kExpNumBinsPerChannel * cn;
1748 vector<float> _expLUT(kExpNumBins+2);
1749 float* expLUT = &_expLUT[0];
1751 scale_index = kExpNumBins/len;
1753 // initialize the exp LUT
1754 for( i = 0; i < kExpNumBins+2; i++ )
1756 if( lastExpVal > 0.f )
1758 double val = i / scale_index;
1759 expLUT[i] = (float)std::exp(val * val * gauss_color_coeff);
1760 lastExpVal = expLUT[i];
1766 // initialize space-related bilateral filter coefficients
1767 for( i = -radius, maxk = 0; i <= radius; i++ )
1768 for( j = -radius; j <= radius; j++ )
1770 double r = std::sqrt((double)i*i + (double)j*j);
1773 space_weight[maxk] = (float)std::exp(r*r*gauss_space_coeff);
1774 space_ofs[maxk++] = (int)(i*(temp.step/sizeof(float)) + j*cn);
1777 // parallel_for usage
1779 BilateralFilter_32f_Invoker body(cn, radius, maxk, space_ofs, temp, dst, scale_index, space_weight, expLUT);
1780 parallel_for_(Range(0, size.height), body);
1785 void cv::bilateralFilter( InputArray _src, OutputArray _dst, int d,
1786 double sigmaColor, double sigmaSpace,
1789 Mat src = _src.getMat();
1790 _dst.create( src.size(), src.type() );
1791 Mat dst = _dst.getMat();
1793 if( src.depth() == CV_8U )
1794 bilateralFilter_8u( src, dst, d, sigmaColor, sigmaSpace, borderType );
1795 else if( src.depth() == CV_32F )
1796 bilateralFilter_32f( src, dst, d, sigmaColor, sigmaSpace, borderType );
1798 CV_Error( CV_StsUnsupportedFormat,
1799 "Bilateral filtering is only implemented for 8u and 32f images" );
1802 //////////////////////////////////////////////////////////////////////////////////////////
1805 cvSmooth( const void* srcarr, void* dstarr, int smooth_type,
1806 int param1, int param2, double param3, double param4 )
1808 cv::Mat src = cv::cvarrToMat(srcarr), dst0 = cv::cvarrToMat(dstarr), dst = dst0;
1810 CV_Assert( dst.size() == src.size() &&
1811 (smooth_type == CV_BLUR_NO_SCALE || dst.type() == src.type()) );
1816 if( smooth_type == CV_BLUR || smooth_type == CV_BLUR_NO_SCALE )
1817 cv::boxFilter( src, dst, dst.depth(), cv::Size(param1, param2), cv::Point(-1,-1),
1818 smooth_type == CV_BLUR, cv::BORDER_REPLICATE );
1819 else if( smooth_type == CV_GAUSSIAN )
1820 cv::GaussianBlur( src, dst, cv::Size(param1, param2), param3, param4, cv::BORDER_REPLICATE );
1821 else if( smooth_type == CV_MEDIAN )
1822 cv::medianBlur( src, dst, param1 );
1824 cv::bilateralFilter( src, dst, param1, param3, param4, cv::BORDER_REPLICATE );
1826 if( dst.data != dst0.data )
1827 CV_Error( CV_StsUnmatchedFormats, "The destination image does not have the proper type" );