1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #include "precomp.hpp"
44 #include "opencl_kernels.hpp"
47 * This file includes the code, contributed by Simon Perreault
48 * (the function icvMedianBlur_8u_O1)
50 * Constant-time median filtering -- http://nomis80.org/ctmf.html
51 * Copyright (C) 2006 Simon Perreault
54 * Laboratoire de vision et systemes numeriques
55 * Pavillon Adrien-Pouliot
57 * Sainte-Foy, Quebec, Canada
60 * perreaul@gel.ulaval.ca
66 /****************************************************************************************\
68 \****************************************************************************************/
70 template<typename T, typename ST>
74 RowSum( int _ksize, int _anchor ) :
81 virtual void operator()(const uchar* src, uchar* dst, int width, int cn)
83 const T* S = (const T*)src;
85 int i = 0, k, ksz_cn = ksize*cn;
87 width = (width - 1)*cn;
88 for( k = 0; k < cn; k++, S++, D++ )
91 for( i = 0; i < ksz_cn; i += cn )
94 for( i = 0; i < width; i += cn )
96 s += S[i + ksz_cn] - S[i];
104 template<typename ST, typename T>
106 public BaseColumnFilter
108 ColumnSum( int _ksize, int _anchor, double _scale ) :
117 virtual void reset() { sumCount = 0; }
119 virtual void operator()(const uchar** src, uchar* dst, int dststep, int count, int width)
123 bool haveScale = scale != 1;
124 double _scale = scale;
126 if( width != (int)sum.size() )
135 for( i = 0; i < width; i++ )
137 for( ; sumCount < ksize - 1; sumCount++, src++ )
139 const ST* Sp = (const ST*)src[0];
140 for( i = 0; i <= width - 2; i += 2 )
142 ST s0 = SUM[i] + Sp[i], s1 = SUM[i+1] + Sp[i+1];
143 SUM[i] = s0; SUM[i+1] = s1;
146 for( ; i < width; i++ )
152 CV_Assert( sumCount == ksize-1 );
156 for( ; count--; src++ )
158 const ST* Sp = (const ST*)src[0];
159 const ST* Sm = (const ST*)src[1-ksize];
163 for( i = 0; i <= width - 2; i += 2 )
165 ST s0 = SUM[i] + Sp[i], s1 = SUM[i+1] + Sp[i+1];
166 D[i] = saturate_cast<T>(s0*_scale);
167 D[i+1] = saturate_cast<T>(s1*_scale);
168 s0 -= Sm[i]; s1 -= Sm[i+1];
169 SUM[i] = s0; SUM[i+1] = s1;
172 for( ; i < width; i++ )
174 ST s0 = SUM[i] + Sp[i];
175 D[i] = saturate_cast<T>(s0*_scale);
181 for( i = 0; i <= width - 2; i += 2 )
183 ST s0 = SUM[i] + Sp[i], s1 = SUM[i+1] + Sp[i+1];
184 D[i] = saturate_cast<T>(s0);
185 D[i+1] = saturate_cast<T>(s1);
186 s0 -= Sm[i]; s1 -= Sm[i+1];
187 SUM[i] = s0; SUM[i+1] = s1;
190 for( ; i < width; i++ )
192 ST s0 = SUM[i] + Sp[i];
193 D[i] = saturate_cast<T>(s0);
208 struct ColumnSum<int, uchar> :
209 public BaseColumnFilter
211 ColumnSum( int _ksize, int _anchor, double _scale ) :
220 virtual void reset() { sumCount = 0; }
222 virtual void operator()(const uchar** src, uchar* dst, int dststep, int count, int width)
226 bool haveScale = scale != 1;
227 double _scale = scale;
230 bool haveSSE2 = checkHardwareSupport(CV_CPU_SSE2);
233 if( width != (int)sum.size() )
242 memset((void*)SUM, 0, width*sizeof(int));
243 for( ; sumCount < ksize - 1; sumCount++, src++ )
245 const int* Sp = (const int*)src[0];
250 for( ; i < width-4; i+=4 )
252 __m128i _sum = _mm_loadu_si128((const __m128i*)(SUM+i));
253 __m128i _sp = _mm_loadu_si128((const __m128i*)(Sp+i));
254 _mm_storeu_si128((__m128i*)(SUM+i),_mm_add_epi32(_sum, _sp));
258 for( ; i < width; i++ )
264 CV_Assert( sumCount == ksize-1 );
268 for( ; count--; src++ )
270 const int* Sp = (const int*)src[0];
271 const int* Sm = (const int*)src[1-ksize];
272 uchar* D = (uchar*)dst;
279 const __m128 scale4 = _mm_set1_ps((float)_scale);
280 for( ; i < width-8; i+=8 )
282 __m128i _sm = _mm_loadu_si128((const __m128i*)(Sm+i));
283 __m128i _sm1 = _mm_loadu_si128((const __m128i*)(Sm+i+4));
285 __m128i _s0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i)),
286 _mm_loadu_si128((const __m128i*)(Sp+i)));
287 __m128i _s01 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i+4)),
288 _mm_loadu_si128((const __m128i*)(Sp+i+4)));
290 __m128i _s0T = _mm_cvtps_epi32(_mm_mul_ps(scale4, _mm_cvtepi32_ps(_s0)));
291 __m128i _s0T1 = _mm_cvtps_epi32(_mm_mul_ps(scale4, _mm_cvtepi32_ps(_s01)));
293 _s0T = _mm_packs_epi32(_s0T, _s0T1);
295 _mm_storel_epi64((__m128i*)(D+i), _mm_packus_epi16(_s0T, _s0T));
297 _mm_storeu_si128((__m128i*)(SUM+i), _mm_sub_epi32(_s0,_sm));
298 _mm_storeu_si128((__m128i*)(SUM+i+4),_mm_sub_epi32(_s01,_sm1));
302 for( ; i < width; i++ )
304 int s0 = SUM[i] + Sp[i];
305 D[i] = saturate_cast<uchar>(s0*_scale);
315 for( ; i < width-8; i+=8 )
317 __m128i _sm = _mm_loadu_si128((const __m128i*)(Sm+i));
318 __m128i _sm1 = _mm_loadu_si128((const __m128i*)(Sm+i+4));
320 __m128i _s0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i)),
321 _mm_loadu_si128((const __m128i*)(Sp+i)));
322 __m128i _s01 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i+4)),
323 _mm_loadu_si128((const __m128i*)(Sp+i+4)));
325 __m128i _s0T = _mm_packs_epi32(_s0, _s01);
327 _mm_storel_epi64((__m128i*)(D+i), _mm_packus_epi16(_s0T, _s0T));
329 _mm_storeu_si128((__m128i*)(SUM+i), _mm_sub_epi32(_s0,_sm));
330 _mm_storeu_si128((__m128i*)(SUM+i+4),_mm_sub_epi32(_s01,_sm1));
335 for( ; i < width; i++ )
337 int s0 = SUM[i] + Sp[i];
338 D[i] = saturate_cast<uchar>(s0);
348 std::vector<int> sum;
352 struct ColumnSum<int, short> :
353 public BaseColumnFilter
355 ColumnSum( int _ksize, int _anchor, double _scale ) :
364 virtual void reset() { sumCount = 0; }
366 virtual void operator()(const uchar** src, uchar* dst, int dststep, int count, int width)
370 bool haveScale = scale != 1;
371 double _scale = scale;
374 bool haveSSE2 = checkHardwareSupport(CV_CPU_SSE2);
377 if( width != (int)sum.size() )
385 memset((void*)SUM, 0, width*sizeof(int));
386 for( ; sumCount < ksize - 1; sumCount++, src++ )
388 const int* Sp = (const int*)src[0];
393 for( ; i < width-4; i+=4 )
395 __m128i _sum = _mm_loadu_si128((const __m128i*)(SUM+i));
396 __m128i _sp = _mm_loadu_si128((const __m128i*)(Sp+i));
397 _mm_storeu_si128((__m128i*)(SUM+i),_mm_add_epi32(_sum, _sp));
401 for( ; i < width; i++ )
407 CV_Assert( sumCount == ksize-1 );
411 for( ; count--; src++ )
413 const int* Sp = (const int*)src[0];
414 const int* Sm = (const int*)src[1-ksize];
415 short* D = (short*)dst;
422 const __m128 scale4 = _mm_set1_ps((float)_scale);
423 for( ; i < width-8; i+=8 )
425 __m128i _sm = _mm_loadu_si128((const __m128i*)(Sm+i));
426 __m128i _sm1 = _mm_loadu_si128((const __m128i*)(Sm+i+4));
428 __m128i _s0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i)),
429 _mm_loadu_si128((const __m128i*)(Sp+i)));
430 __m128i _s01 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i+4)),
431 _mm_loadu_si128((const __m128i*)(Sp+i+4)));
433 __m128i _s0T = _mm_cvtps_epi32(_mm_mul_ps(scale4, _mm_cvtepi32_ps(_s0)));
434 __m128i _s0T1 = _mm_cvtps_epi32(_mm_mul_ps(scale4, _mm_cvtepi32_ps(_s01)));
436 _mm_storeu_si128((__m128i*)(D+i), _mm_packs_epi32(_s0T, _s0T1));
438 _mm_storeu_si128((__m128i*)(SUM+i),_mm_sub_epi32(_s0,_sm));
439 _mm_storeu_si128((__m128i*)(SUM+i+4), _mm_sub_epi32(_s01,_sm1));
443 for( ; i < width; i++ )
445 int s0 = SUM[i] + Sp[i];
446 D[i] = saturate_cast<short>(s0*_scale);
456 for( ; i < width-8; i+=8 )
459 __m128i _sm = _mm_loadu_si128((const __m128i*)(Sm+i));
460 __m128i _sm1 = _mm_loadu_si128((const __m128i*)(Sm+i+4));
462 __m128i _s0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i)),
463 _mm_loadu_si128((const __m128i*)(Sp+i)));
464 __m128i _s01 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i+4)),
465 _mm_loadu_si128((const __m128i*)(Sp+i+4)));
467 _mm_storeu_si128((__m128i*)(D+i), _mm_packs_epi32(_s0, _s01));
469 _mm_storeu_si128((__m128i*)(SUM+i), _mm_sub_epi32(_s0,_sm));
470 _mm_storeu_si128((__m128i*)(SUM+i+4),_mm_sub_epi32(_s01,_sm1));
475 for( ; i < width; i++ )
477 int s0 = SUM[i] + Sp[i];
478 D[i] = saturate_cast<short>(s0);
488 std::vector<int> sum;
493 struct ColumnSum<int, ushort> :
494 public BaseColumnFilter
496 ColumnSum( int _ksize, int _anchor, double _scale ) :
505 virtual void reset() { sumCount = 0; }
507 virtual void operator()(const uchar** src, uchar* dst, int dststep, int count, int width)
511 bool haveScale = scale != 1;
512 double _scale = scale;
514 bool haveSSE2 = checkHardwareSupport(CV_CPU_SSE2);
517 if( width != (int)sum.size() )
525 memset((void*)SUM, 0, width*sizeof(int));
526 for( ; sumCount < ksize - 1; sumCount++, src++ )
528 const int* Sp = (const int*)src[0];
533 for( ; i < width-4; i+=4 )
535 __m128i _sum = _mm_loadu_si128((const __m128i*)(SUM+i));
536 __m128i _sp = _mm_loadu_si128((const __m128i*)(Sp+i));
537 _mm_storeu_si128((__m128i*)(SUM+i), _mm_add_epi32(_sum, _sp));
541 for( ; i < width; i++ )
547 CV_Assert( sumCount == ksize-1 );
551 for( ; count--; src++ )
553 const int* Sp = (const int*)src[0];
554 const int* Sm = (const int*)src[1-ksize];
555 ushort* D = (ushort*)dst;
562 const __m128 scale4 = _mm_set1_ps((float)_scale);
563 const __m128i delta0 = _mm_set1_epi32(0x8000);
564 const __m128i delta1 = _mm_set1_epi32(0x80008000);
566 for( ; i < width-4; i+=4)
568 __m128i _sm = _mm_loadu_si128((const __m128i*)(Sm+i));
569 __m128i _s0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i)),
570 _mm_loadu_si128((const __m128i*)(Sp+i)));
572 __m128i _res = _mm_cvtps_epi32(_mm_mul_ps(scale4, _mm_cvtepi32_ps(_s0)));
574 _res = _mm_sub_epi32(_res, delta0);
575 _res = _mm_add_epi16(_mm_packs_epi32(_res, _res), delta1);
577 _mm_storel_epi64((__m128i*)(D+i), _res);
578 _mm_storeu_si128((__m128i*)(SUM+i), _mm_sub_epi32(_s0,_sm));
582 for( ; i < width; i++ )
584 int s0 = SUM[i] + Sp[i];
585 D[i] = saturate_cast<ushort>(s0*_scale);
595 const __m128i delta0 = _mm_set1_epi32(0x8000);
596 const __m128i delta1 = _mm_set1_epi32(0x80008000);
598 for( ; i < width-4; i+=4 )
600 __m128i _sm = _mm_loadu_si128((const __m128i*)(Sm+i));
601 __m128i _s0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i)),
602 _mm_loadu_si128((const __m128i*)(Sp+i)));
604 __m128i _res = _mm_sub_epi32(_s0, delta0);
605 _res = _mm_add_epi16(_mm_packs_epi32(_res, _res), delta1);
607 _mm_storel_epi64((__m128i*)(D+i), _res);
608 _mm_storeu_si128((__m128i*)(SUM+i), _mm_sub_epi32(_s0,_sm));
613 for( ; i < width; i++ )
615 int s0 = SUM[i] + Sp[i];
616 D[i] = saturate_cast<ushort>(s0);
626 std::vector<int> sum;
631 #define DIVUP(total, grain) ((total + grain - 1) / (grain))
633 static bool ocl_boxFilter( InputArray _src, OutputArray _dst, int ddepth,
634 Size ksize, Point anchor, int borderType, bool normalize, bool sqr = false )
636 int type = _src.type(), sdepth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type), esz = CV_ELEM_SIZE(type);
637 bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;
642 if (cn > 4 || (!doubleSupport && (sdepth == CV_64F || ddepth == CV_64F)) ||
643 _src.offset() % esz != 0 || _src.step() % esz != 0)
647 anchor.x = ksize.width / 2;
649 anchor.y = ksize.height / 2;
651 int computeUnits = ocl::Device::getDefault().maxComputeUnits();
652 float alpha = 1.0f / (ksize.height * ksize.width);
653 Size size = _src.size(), wholeSize;
654 bool isolated = (borderType & BORDER_ISOLATED) != 0;
655 borderType &= ~BORDER_ISOLATED;
656 int wdepth = std::max(CV_32F, std::max(ddepth, sdepth));
658 const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", 0, "BORDER_REFLECT_101" };
659 size_t globalsize[2] = { size.width, size.height };
660 size_t localsize[2] = { 0, 1 };
662 UMat src = _src.getUMat();
666 src.locateROI(wholeSize, ofs);
669 int h = isolated ? size.height : wholeSize.height;
670 int w = isolated ? size.width : wholeSize.width;
672 size_t maxWorkItemSizes[32];
673 ocl::Device::getDefault().maxWorkItemSizes(maxWorkItemSizes);
674 int tryWorkItems = (int)maxWorkItemSizes[0];
679 int BLOCK_SIZE_X = tryWorkItems, BLOCK_SIZE_Y = std::min(ksize.height * 10, size.height);
681 while (BLOCK_SIZE_X > 32 && BLOCK_SIZE_X >= ksize.width * 2 && BLOCK_SIZE_X > size.width * 2)
683 while (BLOCK_SIZE_Y < BLOCK_SIZE_X / 8 && BLOCK_SIZE_Y * computeUnits * 32 < size.height)
686 if (ksize.width > BLOCK_SIZE_X || w < ksize.width || h < ksize.height)
690 String opts = format("-D LOCAL_SIZE_X=%d -D BLOCK_SIZE_Y=%d -D ST=%s -D DT=%s -D WT=%s -D convertToDT=%s -D convertToWT=%s"
691 " -D ANCHOR_X=%d -D ANCHOR_Y=%d -D KERNEL_SIZE_X=%d -D KERNEL_SIZE_Y=%d -D %s%s%s%s%s"
692 " -D ST1=%s -D DT1=%s -D cn=%d",
693 BLOCK_SIZE_X, BLOCK_SIZE_Y, ocl::typeToStr(type), ocl::typeToStr(CV_MAKE_TYPE(ddepth, cn)),
694 ocl::typeToStr(CV_MAKE_TYPE(wdepth, cn)),
695 ocl::convertTypeStr(wdepth, ddepth, cn, cvt[0]),
696 ocl::convertTypeStr(sdepth, wdepth, cn, cvt[1]),
697 anchor.x, anchor.y, ksize.width, ksize.height, borderMap[borderType],
698 isolated ? " -D BORDER_ISOLATED" : "", doubleSupport ? " -D DOUBLE_SUPPORT" : "",
699 normalize ? " -D NORMALIZE" : "", sqr ? " -D SQR" : "",
700 ocl::typeToStr(sdepth), ocl::typeToStr(ddepth), cn);
702 localsize[0] = BLOCK_SIZE_X;
703 globalsize[0] = DIVUP(size.width, BLOCK_SIZE_X - (ksize.width - 1)) * BLOCK_SIZE_X;
704 globalsize[1] = DIVUP(size.height, BLOCK_SIZE_Y);
706 kernel.create("boxFilter", cv::ocl::imgproc::boxFilter_oclsrc, opts);
710 size_t kernelWorkGroupSize = kernel.workGroupSize();
711 if (localsize[0] <= kernelWorkGroupSize)
713 if (BLOCK_SIZE_X < (int)kernelWorkGroupSize)
716 tryWorkItems = (int)kernelWorkGroupSize;
719 _dst.create(size, CV_MAKETYPE(ddepth, cn));
720 UMat dst = _dst.getUMat();
722 int idxArg = kernel.set(0, ocl::KernelArg::PtrReadOnly(src));
723 idxArg = kernel.set(idxArg, (int)src.step);
724 int srcOffsetX = (int)((src.offset % src.step) / src.elemSize());
725 int srcOffsetY = (int)(src.offset / src.step);
726 int srcEndX = isolated ? srcOffsetX + size.width : wholeSize.width;
727 int srcEndY = isolated ? srcOffsetY + size.height : wholeSize.height;
728 idxArg = kernel.set(idxArg, srcOffsetX);
729 idxArg = kernel.set(idxArg, srcOffsetY);
730 idxArg = kernel.set(idxArg, srcEndX);
731 idxArg = kernel.set(idxArg, srcEndY);
732 idxArg = kernel.set(idxArg, ocl::KernelArg::WriteOnly(dst));
734 idxArg = kernel.set(idxArg, (float)alpha);
736 return kernel.run(2, globalsize, localsize, false);
744 cv::Ptr<cv::BaseRowFilter> cv::getRowSumFilter(int srcType, int sumType, int ksize, int anchor)
746 int sdepth = CV_MAT_DEPTH(srcType), ddepth = CV_MAT_DEPTH(sumType);
747 CV_Assert( CV_MAT_CN(sumType) == CV_MAT_CN(srcType) );
752 if( sdepth == CV_8U && ddepth == CV_32S )
753 return makePtr<RowSum<uchar, int> >(ksize, anchor);
754 if( sdepth == CV_8U && ddepth == CV_64F )
755 return makePtr<RowSum<uchar, double> >(ksize, anchor);
756 if( sdepth == CV_16U && ddepth == CV_32S )
757 return makePtr<RowSum<ushort, int> >(ksize, anchor);
758 if( sdepth == CV_16U && ddepth == CV_64F )
759 return makePtr<RowSum<ushort, double> >(ksize, anchor);
760 if( sdepth == CV_16S && ddepth == CV_32S )
761 return makePtr<RowSum<short, int> >(ksize, anchor);
762 if( sdepth == CV_32S && ddepth == CV_32S )
763 return makePtr<RowSum<int, int> >(ksize, anchor);
764 if( sdepth == CV_16S && ddepth == CV_64F )
765 return makePtr<RowSum<short, double> >(ksize, anchor);
766 if( sdepth == CV_32F && ddepth == CV_64F )
767 return makePtr<RowSum<float, double> >(ksize, anchor);
768 if( sdepth == CV_64F && ddepth == CV_64F )
769 return makePtr<RowSum<double, double> >(ksize, anchor);
771 CV_Error_( CV_StsNotImplemented,
772 ("Unsupported combination of source format (=%d), and buffer format (=%d)",
775 return Ptr<BaseRowFilter>();
779 cv::Ptr<cv::BaseColumnFilter> cv::getColumnSumFilter(int sumType, int dstType, int ksize,
780 int anchor, double scale)
782 int sdepth = CV_MAT_DEPTH(sumType), ddepth = CV_MAT_DEPTH(dstType);
783 CV_Assert( CV_MAT_CN(sumType) == CV_MAT_CN(dstType) );
788 if( ddepth == CV_8U && sdepth == CV_32S )
789 return makePtr<ColumnSum<int, uchar> >(ksize, anchor, scale);
790 if( ddepth == CV_8U && sdepth == CV_64F )
791 return makePtr<ColumnSum<double, uchar> >(ksize, anchor, scale);
792 if( ddepth == CV_16U && sdepth == CV_32S )
793 return makePtr<ColumnSum<int, ushort> >(ksize, anchor, scale);
794 if( ddepth == CV_16U && sdepth == CV_64F )
795 return makePtr<ColumnSum<double, ushort> >(ksize, anchor, scale);
796 if( ddepth == CV_16S && sdepth == CV_32S )
797 return makePtr<ColumnSum<int, short> >(ksize, anchor, scale);
798 if( ddepth == CV_16S && sdepth == CV_64F )
799 return makePtr<ColumnSum<double, short> >(ksize, anchor, scale);
800 if( ddepth == CV_32S && sdepth == CV_32S )
801 return makePtr<ColumnSum<int, int> >(ksize, anchor, scale);
802 if( ddepth == CV_32F && sdepth == CV_32S )
803 return makePtr<ColumnSum<int, float> >(ksize, anchor, scale);
804 if( ddepth == CV_32F && sdepth == CV_64F )
805 return makePtr<ColumnSum<double, float> >(ksize, anchor, scale);
806 if( ddepth == CV_64F && sdepth == CV_32S )
807 return makePtr<ColumnSum<int, double> >(ksize, anchor, scale);
808 if( ddepth == CV_64F && sdepth == CV_64F )
809 return makePtr<ColumnSum<double, double> >(ksize, anchor, scale);
811 CV_Error_( CV_StsNotImplemented,
812 ("Unsupported combination of sum format (=%d), and destination format (=%d)",
815 return Ptr<BaseColumnFilter>();
819 cv::Ptr<cv::FilterEngine> cv::createBoxFilter( int srcType, int dstType, Size ksize,
820 Point anchor, bool normalize, int borderType )
822 int sdepth = CV_MAT_DEPTH(srcType);
823 int cn = CV_MAT_CN(srcType), sumType = CV_64F;
824 if( sdepth <= CV_32S && (!normalize ||
825 ksize.width*ksize.height <= (sdepth == CV_8U ? (1<<23) :
826 sdepth == CV_16U ? (1 << 15) : (1 << 16))) )
828 sumType = CV_MAKETYPE( sumType, cn );
830 Ptr<BaseRowFilter> rowFilter = getRowSumFilter(srcType, sumType, ksize.width, anchor.x );
831 Ptr<BaseColumnFilter> columnFilter = getColumnSumFilter(sumType,
832 dstType, ksize.height, anchor.y, normalize ? 1./(ksize.width*ksize.height) : 1);
834 return makePtr<FilterEngine>(Ptr<BaseFilter>(), rowFilter, columnFilter,
835 srcType, dstType, sumType, borderType );
839 void cv::boxFilter( InputArray _src, OutputArray _dst, int ddepth,
840 Size ksize, Point anchor,
841 bool normalize, int borderType )
843 CV_OCL_RUN(_dst.isUMat(), ocl_boxFilter(_src, _dst, ddepth, ksize, anchor, borderType, normalize))
845 Mat src = _src.getMat();
846 int stype = src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype);
849 _dst.create( src.size(), CV_MAKETYPE(ddepth, cn) );
850 Mat dst = _dst.getMat();
851 if( borderType != BORDER_CONSTANT && normalize && (borderType & BORDER_ISOLATED) != 0 )
858 #ifdef HAVE_TEGRA_OPTIMIZATION
859 if ( tegra::box(src, dst, ksize, anchor, normalize, borderType) )
863 #if defined(HAVE_IPP)
864 int ippBorderType = borderType & ~BORDER_ISOLATED;
865 Point ocvAnchor, ippAnchor;
866 ocvAnchor.x = anchor.x < 0 ? ksize.width / 2 : anchor.x;
867 ocvAnchor.y = anchor.y < 0 ? ksize.height / 2 : anchor.y;
868 ippAnchor.x = ksize.width / 2 - (ksize.width % 2 == 0 ? 1 : 0);
869 ippAnchor.y = ksize.height / 2 - (ksize.height % 2 == 0 ? 1 : 0);
871 if (normalize && !src.isSubmatrix() && ddepth == sdepth &&
872 (/*ippBorderType == BORDER_REPLICATE ||*/ /* returns ippStsStepErr: Step value is not valid */
873 ippBorderType == BORDER_CONSTANT) && ocvAnchor == ippAnchor )
876 IppiSize roiSize = { dst.cols, dst.rows }, maskSize = { ksize.width, ksize.height };
878 #define IPP_FILTER_BOX_BORDER(ippType, ippDataType, flavor) \
881 if (ippiFilterBoxBorderGetBufferSize(roiSize, maskSize, ippDataType, cn, &bufSize) >= 0) \
883 Ipp8u * buffer = ippsMalloc_8u(bufSize); \
884 ippType borderValue[4] = { 0, 0, 0, 0 }; \
885 ippBorderType = ippBorderType == BORDER_CONSTANT ? ippBorderConst : ippBorderRepl; \
886 IppStatus status = ippiFilterBoxBorder_##flavor((const ippType *)src.data, (int)src.step, (ippType *)dst.data, \
887 (int)dst.step, roiSize, maskSize, \
888 (IppiBorderType)ippBorderType, borderValue, buffer); \
893 setIppErrorStatus(); \
896 if (stype == CV_8UC1)
897 IPP_FILTER_BOX_BORDER(Ipp8u, ipp8u, 8u_C1R);
898 else if (stype == CV_8UC3)
899 IPP_FILTER_BOX_BORDER(Ipp8u, ipp8u, 8u_C3R);
900 else if (stype == CV_8UC4)
901 IPP_FILTER_BOX_BORDER(Ipp8u, ipp8u, 8u_C4R);
903 else if (stype == CV_16UC1)
904 IPP_FILTER_BOX_BORDER(Ipp16u, ipp16u, 16u_C1R);
905 else if (stype == CV_16UC3)
906 IPP_FILTER_BOX_BORDER(Ipp16u, ipp16u, 16u_C3R);
907 else if (stype == CV_16UC4)
908 IPP_FILTER_BOX_BORDER(Ipp16u, ipp16u, 16u_C4R);
910 else if (stype == CV_16SC1)
911 IPP_FILTER_BOX_BORDER(Ipp16s, ipp16s, 16s_C1R);
912 else if (stype == CV_16SC3)
913 IPP_FILTER_BOX_BORDER(Ipp16s, ipp16s, 16s_C3R);
914 else if (stype == CV_16SC4)
915 IPP_FILTER_BOX_BORDER(Ipp16s, ipp16s, 16s_C4R);
917 else if (stype == CV_32FC1)
918 IPP_FILTER_BOX_BORDER(Ipp32f, ipp32f, 32f_C1R);
919 else if (stype == CV_32FC3)
920 IPP_FILTER_BOX_BORDER(Ipp32f, ipp32f, 32f_C3R);
921 else if (stype == CV_32FC4)
922 IPP_FILTER_BOX_BORDER(Ipp32f, ipp32f, 32f_C4R);
924 #undef IPP_FILTER_BOX_BORDER
927 Ptr<FilterEngine> f = createBoxFilter( src.type(), dst.type(),
928 ksize, anchor, normalize, borderType );
929 f->apply( src, dst );
932 void cv::blur( InputArray src, OutputArray dst,
933 Size ksize, Point anchor, int borderType )
935 boxFilter( src, dst, -1, ksize, anchor, true, borderType );
939 /****************************************************************************************\
941 \****************************************************************************************/
946 template<typename T, typename ST>
950 SqrRowSum( int _ksize, int _anchor ) :
957 virtual void operator()(const uchar* src, uchar* dst, int width, int cn)
959 const T* S = (const T*)src;
961 int i = 0, k, ksz_cn = ksize*cn;
963 width = (width - 1)*cn;
964 for( k = 0; k < cn; k++, S++, D++ )
967 for( i = 0; i < ksz_cn; i += cn )
973 for( i = 0; i < width; i += cn )
975 ST val0 = (ST)S[i], val1 = (ST)S[i + ksz_cn];
976 s += val1*val1 - val0*val0;
983 static Ptr<BaseRowFilter> getSqrRowSumFilter(int srcType, int sumType, int ksize, int anchor)
985 int sdepth = CV_MAT_DEPTH(srcType), ddepth = CV_MAT_DEPTH(sumType);
986 CV_Assert( CV_MAT_CN(sumType) == CV_MAT_CN(srcType) );
991 if( sdepth == CV_8U && ddepth == CV_32S )
992 return makePtr<SqrRowSum<uchar, int> >(ksize, anchor);
993 if( sdepth == CV_8U && ddepth == CV_64F )
994 return makePtr<SqrRowSum<uchar, double> >(ksize, anchor);
995 if( sdepth == CV_16U && ddepth == CV_64F )
996 return makePtr<SqrRowSum<ushort, double> >(ksize, anchor);
997 if( sdepth == CV_16S && ddepth == CV_64F )
998 return makePtr<SqrRowSum<short, double> >(ksize, anchor);
999 if( sdepth == CV_32F && ddepth == CV_64F )
1000 return makePtr<SqrRowSum<float, double> >(ksize, anchor);
1001 if( sdepth == CV_64F && ddepth == CV_64F )
1002 return makePtr<SqrRowSum<double, double> >(ksize, anchor);
1004 CV_Error_( CV_StsNotImplemented,
1005 ("Unsupported combination of source format (=%d), and buffer format (=%d)",
1008 return Ptr<BaseRowFilter>();
1013 void cv::sqrBoxFilter( InputArray _src, OutputArray _dst, int ddepth,
1014 Size ksize, Point anchor,
1015 bool normalize, int borderType )
1017 int srcType = _src.type(), sdepth = CV_MAT_DEPTH(srcType), cn = CV_MAT_CN(srcType);
1018 Size size = _src.size();
1021 ddepth = sdepth < CV_32F ? CV_32F : CV_64F;
1023 if( borderType != BORDER_CONSTANT && normalize )
1025 if( size.height == 1 )
1027 if( size.width == 1 )
1031 CV_OCL_RUN(_dst.isUMat() && _src.dims() <= 2,
1032 ocl_boxFilter(_src, _dst, ddepth, ksize, anchor, borderType, normalize, true))
1034 int sumDepth = CV_64F;
1035 if( sdepth == CV_8U )
1037 int sumType = CV_MAKETYPE( sumDepth, cn ), dstType = CV_MAKETYPE(ddepth, cn);
1039 Mat src = _src.getMat();
1040 _dst.create( size, dstType );
1041 Mat dst = _dst.getMat();
1043 Ptr<BaseRowFilter> rowFilter = getSqrRowSumFilter(srcType, sumType, ksize.width, anchor.x );
1044 Ptr<BaseColumnFilter> columnFilter = getColumnSumFilter(sumType,
1045 dstType, ksize.height, anchor.y,
1046 normalize ? 1./(ksize.width*ksize.height) : 1);
1048 Ptr<FilterEngine> f = makePtr<FilterEngine>(Ptr<BaseFilter>(), rowFilter, columnFilter,
1049 srcType, dstType, sumType, borderType );
1050 f->apply( src, dst );
1054 /****************************************************************************************\
1056 \****************************************************************************************/
1058 cv::Mat cv::getGaussianKernel( int n, double sigma, int ktype )
1060 const int SMALL_GAUSSIAN_SIZE = 7;
1061 static const float small_gaussian_tab[][SMALL_GAUSSIAN_SIZE] =
1064 {0.25f, 0.5f, 0.25f},
1065 {0.0625f, 0.25f, 0.375f, 0.25f, 0.0625f},
1066 {0.03125f, 0.109375f, 0.21875f, 0.28125f, 0.21875f, 0.109375f, 0.03125f}
1069 const float* fixed_kernel = n % 2 == 1 && n <= SMALL_GAUSSIAN_SIZE && sigma <= 0 ?
1070 small_gaussian_tab[n>>1] : 0;
1072 CV_Assert( ktype == CV_32F || ktype == CV_64F );
1073 Mat kernel(n, 1, ktype);
1074 float* cf = (float*)kernel.data;
1075 double* cd = (double*)kernel.data;
1077 double sigmaX = sigma > 0 ? sigma : ((n-1)*0.5 - 1)*0.3 + 0.8;
1078 double scale2X = -0.5/(sigmaX*sigmaX);
1082 for( i = 0; i < n; i++ )
1084 double x = i - (n-1)*0.5;
1085 double t = fixed_kernel ? (double)fixed_kernel[i] : std::exp(scale2X*x*x);
1086 if( ktype == CV_32F )
1099 for( i = 0; i < n; i++ )
1101 if( ktype == CV_32F )
1102 cf[i] = (float)(cf[i]*sum);
1112 static void createGaussianKernels( Mat & kx, Mat & ky, int type, Size ksize,
1113 double sigma1, double sigma2 )
1115 int depth = CV_MAT_DEPTH(type);
1119 // automatic detection of kernel size from sigma
1120 if( ksize.width <= 0 && sigma1 > 0 )
1121 ksize.width = cvRound(sigma1*(depth == CV_8U ? 3 : 4)*2 + 1)|1;
1122 if( ksize.height <= 0 && sigma2 > 0 )
1123 ksize.height = cvRound(sigma2*(depth == CV_8U ? 3 : 4)*2 + 1)|1;
1125 CV_Assert( ksize.width > 0 && ksize.width % 2 == 1 &&
1126 ksize.height > 0 && ksize.height % 2 == 1 );
1128 sigma1 = std::max( sigma1, 0. );
1129 sigma2 = std::max( sigma2, 0. );
1131 kx = getGaussianKernel( ksize.width, sigma1, std::max(depth, CV_32F) );
1132 if( ksize.height == ksize.width && std::abs(sigma1 - sigma2) < DBL_EPSILON )
1135 ky = getGaussianKernel( ksize.height, sigma2, std::max(depth, CV_32F) );
1140 cv::Ptr<cv::FilterEngine> cv::createGaussianFilter( int type, Size ksize,
1141 double sigma1, double sigma2,
1145 createGaussianKernels(kx, ky, type, ksize, sigma1, sigma2);
1147 return createSeparableLinearFilter( type, type, kx, ky, Point(-1,-1), 0, borderType );
1151 void cv::GaussianBlur( InputArray _src, OutputArray _dst, Size ksize,
1152 double sigma1, double sigma2,
1155 int type = _src.type();
1156 Size size = _src.size();
1157 _dst.create( size, type );
1159 if( borderType != BORDER_CONSTANT && (borderType & BORDER_ISOLATED) != 0 )
1161 if( size.height == 1 )
1163 if( size.width == 1 )
1167 if( ksize.width == 1 && ksize.height == 1 )
1173 #ifdef HAVE_TEGRA_OPTIMIZATION
1174 if(sigma1 == 0 && sigma2 == 0 && tegra::gaussian(_src.getMat(), _dst.getMat(), ksize, borderType))
1178 #if IPP_VERSION_X100 >= 801 && 0 // these functions are slower in IPP 8.1
1179 int depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
1181 if ((depth == CV_8U || depth == CV_16U || depth == CV_16S || depth == CV_32F) && (cn == 1 || cn == 3) &&
1182 sigma1 == sigma2 && ksize.width == ksize.height && sigma1 != 0.0 )
1184 IppiBorderType ippBorder = ippiGetBorderType(borderType);
1185 if (ippBorderConst == ippBorder || ippBorderRepl == ippBorder)
1187 Mat src = _src.getMat(), dst = _dst.getMat();
1188 IppiSize roiSize = { src.cols, src.rows };
1189 IppDataType dataType = ippiGetDataType(depth);
1190 Ipp32s specSize = 0, bufferSize = 0;
1192 if (ippiFilterGaussianGetBufferSize(roiSize, (Ipp32u)ksize.width, dataType, cn, &specSize, &bufferSize) >= 0)
1194 IppFilterGaussianSpec * pSpec = (IppFilterGaussianSpec *)ippMalloc(specSize);
1195 Ipp8u * pBuffer = (Ipp8u*)ippMalloc(bufferSize);
1197 if (ippiFilterGaussianInit(roiSize, (Ipp32u)ksize.width, (Ipp32f)sigma1, ippBorder, dataType, 1, pSpec, pBuffer) >= 0)
1199 #define IPP_FILTER_GAUSS(ippfavor, ippcn) \
1202 typedef Ipp##ippfavor ippType; \
1203 ippType borderValues[] = { 0, 0, 0 }; \
1204 IppStatus status = ippcn == 1 ? \
1205 ippiFilterGaussianBorder_##ippfavor##_C1R((const ippType *)src.data, (int)src.step, \
1206 (ippType *)dst.data, (int)dst.step, roiSize, borderValues[0], pSpec, pBuffer) : \
1207 ippiFilterGaussianBorder_##ippfavor##_C3R((const ippType *)src.data, (int)src.step, \
1208 (ippType *)dst.data, (int)dst.step, roiSize, borderValues, pSpec, pBuffer); \
1213 } while ((void)0, 0)
1215 if (type == CV_8UC1)
1216 IPP_FILTER_GAUSS(8u, 1);
1217 else if (type == CV_8UC3)
1218 IPP_FILTER_GAUSS(8u, 3);
1219 else if (type == CV_16UC1)
1220 IPP_FILTER_GAUSS(16u, 1);
1221 else if (type == CV_16UC3)
1222 IPP_FILTER_GAUSS(16u, 3);
1223 else if (type == CV_16SC1)
1224 IPP_FILTER_GAUSS(16s, 1);
1225 else if (type == CV_16SC3)
1226 IPP_FILTER_GAUSS(16s, 3);
1227 else if (type == CV_32FC1)
1228 IPP_FILTER_GAUSS(32f, 1);
1229 else if (type == CV_32FC3)
1230 IPP_FILTER_GAUSS(32f, 3);
1231 #undef IPP_FILTER_GAUSS
1234 setIppErrorStatus();
1240 createGaussianKernels(kx, ky, type, ksize, sigma1, sigma2);
1241 sepFilter2D(_src, _dst, CV_MAT_DEPTH(type), kx, ky, Point(-1,-1), 0, borderType );
1244 /****************************************************************************************\
1246 \****************************************************************************************/
1253 * This structure represents a two-tier histogram. The first tier (known as the
1254 * "coarse" level) is 4 bit wide and the second tier (known as the "fine" level)
1255 * is 8 bit wide. Pixels inserted in the fine level also get inserted into the
1256 * coarse bucket designated by the 4 MSBs of the fine bucket value.
1258 * The structure is aligned on 16 bits, which is a prerequisite for SIMD
1259 * instructions. Each bucket is 16 bit wide, which means that extra care must be
1260 * taken to prevent overflow.
1270 #define MEDIAN_HAVE_SIMD 1
1272 static inline void histogram_add_simd( const HT x[16], HT y[16] )
1274 const __m128i* rx = (const __m128i*)x;
1275 __m128i* ry = (__m128i*)y;
1276 __m128i r0 = _mm_add_epi16(_mm_load_si128(ry+0),_mm_load_si128(rx+0));
1277 __m128i r1 = _mm_add_epi16(_mm_load_si128(ry+1),_mm_load_si128(rx+1));
1278 _mm_store_si128(ry+0, r0);
1279 _mm_store_si128(ry+1, r1);
1282 static inline void histogram_sub_simd( const HT x[16], HT y[16] )
1284 const __m128i* rx = (const __m128i*)x;
1285 __m128i* ry = (__m128i*)y;
1286 __m128i r0 = _mm_sub_epi16(_mm_load_si128(ry+0),_mm_load_si128(rx+0));
1287 __m128i r1 = _mm_sub_epi16(_mm_load_si128(ry+1),_mm_load_si128(rx+1));
1288 _mm_store_si128(ry+0, r0);
1289 _mm_store_si128(ry+1, r1);
1293 #define MEDIAN_HAVE_SIMD 0
1297 static inline void histogram_add( const HT x[16], HT y[16] )
1300 for( i = 0; i < 16; ++i )
1301 y[i] = (HT)(y[i] + x[i]);
1304 static inline void histogram_sub( const HT x[16], HT y[16] )
1307 for( i = 0; i < 16; ++i )
1308 y[i] = (HT)(y[i] - x[i]);
1311 static inline void histogram_muladd( int a, const HT x[16],
1314 for( int i = 0; i < 16; ++i )
1315 y[i] = (HT)(y[i] + a * x[i]);
1319 medianBlur_8u_O1( const Mat& _src, Mat& _dst, int ksize )
1322 * HOP is short for Histogram OPeration. This macro makes an operation \a op on
1323 * histogram \a h for pixel value \a x. It takes care of handling both levels.
1325 #define HOP(h,x,op) \
1326 h.coarse[x>>4] op, \
1327 *((HT*)h.fine + x) op
1329 #define COP(c,j,x,op) \
1330 h_coarse[ 16*(n*c+j) + (x>>4) ] op, \
1331 h_fine[ 16 * (n*(16*c+(x>>4)) + j) + (x & 0xF) ] op
1333 int cn = _dst.channels(), m = _dst.rows, r = (ksize-1)/2;
1334 size_t sstep = _src.step, dstep = _dst.step;
1335 Histogram CV_DECL_ALIGNED(16) H[4];
1336 HT CV_DECL_ALIGNED(16) luc[4][16];
1338 int STRIPE_SIZE = std::min( _dst.cols, 512/cn );
1340 std::vector<HT> _h_coarse(1 * 16 * (STRIPE_SIZE + 2*r) * cn + 16);
1341 std::vector<HT> _h_fine(16 * 16 * (STRIPE_SIZE + 2*r) * cn + 16);
1342 HT* h_coarse = alignPtr(&_h_coarse[0], 16);
1343 HT* h_fine = alignPtr(&_h_fine[0], 16);
1344 #if MEDIAN_HAVE_SIMD
1345 volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE2);
1348 for( int x = 0; x < _dst.cols; x += STRIPE_SIZE )
1350 int i, j, k, c, n = std::min(_dst.cols - x, STRIPE_SIZE) + r*2;
1351 const uchar* src = _src.data + x*cn;
1352 uchar* dst = _dst.data + (x - r)*cn;
1354 memset( h_coarse, 0, 16*n*cn*sizeof(h_coarse[0]) );
1355 memset( h_fine, 0, 16*16*n*cn*sizeof(h_fine[0]) );
1357 // First row initialization
1358 for( c = 0; c < cn; c++ )
1360 for( j = 0; j < n; j++ )
1361 COP( c, j, src[cn*j+c], += (cv::HT)(r+2) );
1363 for( i = 1; i < r; i++ )
1365 const uchar* p = src + sstep*std::min(i, m-1);
1366 for ( j = 0; j < n; j++ )
1367 COP( c, j, p[cn*j+c], ++ );
1371 for( i = 0; i < m; i++ )
1373 const uchar* p0 = src + sstep * std::max( 0, i-r-1 );
1374 const uchar* p1 = src + sstep * std::min( m-1, i+r );
1376 memset( H, 0, cn*sizeof(H[0]) );
1377 memset( luc, 0, cn*sizeof(luc[0]) );
1378 for( c = 0; c < cn; c++ )
1380 // Update column histograms for the entire row.
1381 for( j = 0; j < n; j++ )
1383 COP( c, j, p0[j*cn + c], -- );
1384 COP( c, j, p1[j*cn + c], ++ );
1387 // First column initialization
1388 for( k = 0; k < 16; ++k )
1389 histogram_muladd( 2*r+1, &h_fine[16*n*(16*c+k)], &H[c].fine[k][0] );
1391 #if MEDIAN_HAVE_SIMD
1394 for( j = 0; j < 2*r; ++j )
1395 histogram_add_simd( &h_coarse[16*(n*c+j)], H[c].coarse );
1397 for( j = r; j < n-r; j++ )
1399 int t = 2*r*r + 2*r, b, sum = 0;
1402 histogram_add_simd( &h_coarse[16*(n*c + std::min(j+r,n-1))], H[c].coarse );
1404 // Find median at coarse level
1405 for ( k = 0; k < 16 ; ++k )
1407 sum += H[c].coarse[k];
1410 sum -= H[c].coarse[k];
1416 /* Update corresponding histogram segment */
1417 if ( luc[c][k] <= j-r )
1419 memset( &H[c].fine[k], 0, 16 * sizeof(HT) );
1420 for ( luc[c][k] = cv::HT(j-r); luc[c][k] < MIN(j+r+1,n); ++luc[c][k] )
1421 histogram_add_simd( &h_fine[16*(n*(16*c+k)+luc[c][k])], H[c].fine[k] );
1423 if ( luc[c][k] < j+r+1 )
1425 histogram_muladd( j+r+1 - n, &h_fine[16*(n*(16*c+k)+(n-1))], &H[c].fine[k][0] );
1426 luc[c][k] = (HT)(j+r+1);
1431 for ( ; luc[c][k] < j+r+1; ++luc[c][k] )
1433 histogram_sub_simd( &h_fine[16*(n*(16*c+k)+MAX(luc[c][k]-2*r-1,0))], H[c].fine[k] );
1434 histogram_add_simd( &h_fine[16*(n*(16*c+k)+MIN(luc[c][k],n-1))], H[c].fine[k] );
1438 histogram_sub_simd( &h_coarse[16*(n*c+MAX(j-r,0))], H[c].coarse );
1440 /* Find median in segment */
1441 segment = H[c].fine[k];
1442 for ( b = 0; b < 16 ; b++ )
1447 dst[dstep*i+cn*j+c] = (uchar)(16*k + b);
1457 for( j = 0; j < 2*r; ++j )
1458 histogram_add( &h_coarse[16*(n*c+j)], H[c].coarse );
1460 for( j = r; j < n-r; j++ )
1462 int t = 2*r*r + 2*r, b, sum = 0;
1465 histogram_add( &h_coarse[16*(n*c + std::min(j+r,n-1))], H[c].coarse );
1467 // Find median at coarse level
1468 for ( k = 0; k < 16 ; ++k )
1470 sum += H[c].coarse[k];
1473 sum -= H[c].coarse[k];
1479 /* Update corresponding histogram segment */
1480 if ( luc[c][k] <= j-r )
1482 memset( &H[c].fine[k], 0, 16 * sizeof(HT) );
1483 for ( luc[c][k] = cv::HT(j-r); luc[c][k] < MIN(j+r+1,n); ++luc[c][k] )
1484 histogram_add( &h_fine[16*(n*(16*c+k)+luc[c][k])], H[c].fine[k] );
1486 if ( luc[c][k] < j+r+1 )
1488 histogram_muladd( j+r+1 - n, &h_fine[16*(n*(16*c+k)+(n-1))], &H[c].fine[k][0] );
1489 luc[c][k] = (HT)(j+r+1);
1494 for ( ; luc[c][k] < j+r+1; ++luc[c][k] )
1496 histogram_sub( &h_fine[16*(n*(16*c+k)+MAX(luc[c][k]-2*r-1,0))], H[c].fine[k] );
1497 histogram_add( &h_fine[16*(n*(16*c+k)+MIN(luc[c][k],n-1))], H[c].fine[k] );
1501 histogram_sub( &h_coarse[16*(n*c+MAX(j-r,0))], H[c].coarse );
1503 /* Find median in segment */
1504 segment = H[c].fine[k];
1505 for ( b = 0; b < 16 ; b++ )
1510 dst[dstep*i+cn*j+c] = (uchar)(16*k + b);
1526 medianBlur_8u_Om( const Mat& _src, Mat& _dst, int m )
1533 Size size = _dst.size();
1534 const uchar* src = _src.data;
1535 uchar* dst = _dst.data;
1536 int src_step = (int)_src.step, dst_step = (int)_dst.step;
1537 int cn = _src.channels();
1538 const uchar* src_max = src + size.height*src_step;
1540 #define UPDATE_ACC01( pix, cn, op ) \
1544 zone0[cn][p >> 4] op; \
1547 //CV_Assert( size.height >= nx && size.width >= nx );
1548 for( x = 0; x < size.width; x++, src += cn, dst += cn )
1550 uchar* dst_cur = dst;
1551 const uchar* src_top = src;
1552 const uchar* src_bottom = src;
1554 int src_step1 = src_step, dst_step1 = dst_step;
1558 src_bottom = src_top += src_step*(size.height-1);
1559 dst_cur += dst_step*(size.height-1);
1560 src_step1 = -src_step1;
1561 dst_step1 = -dst_step1;
1565 memset( zone0, 0, sizeof(zone0[0])*cn );
1566 memset( zone1, 0, sizeof(zone1[0])*cn );
1568 for( y = 0; y <= m/2; y++ )
1570 for( c = 0; c < cn; c++ )
1574 for( k = 0; k < m*cn; k += cn )
1575 UPDATE_ACC01( src_bottom[k+c], c, ++ );
1579 for( k = 0; k < m*cn; k += cn )
1580 UPDATE_ACC01( src_bottom[k+c], c, += m/2+1 );
1584 if( (src_step1 > 0 && y < size.height-1) ||
1585 (src_step1 < 0 && size.height-y-1 > 0) )
1586 src_bottom += src_step1;
1589 for( y = 0; y < size.height; y++, dst_cur += dst_step1 )
1592 for( c = 0; c < cn; c++ )
1597 int t = s + zone0[c][k];
1608 dst_cur[c] = (uchar)k;
1611 if( y+1 == size.height )
1616 for( k = 0; k < m; k++ )
1619 int q = src_bottom[k];
1628 for( k = 0; k < m*3; k += 3 )
1630 UPDATE_ACC01( src_top[k], 0, -- );
1631 UPDATE_ACC01( src_top[k+1], 1, -- );
1632 UPDATE_ACC01( src_top[k+2], 2, -- );
1634 UPDATE_ACC01( src_bottom[k], 0, ++ );
1635 UPDATE_ACC01( src_bottom[k+1], 1, ++ );
1636 UPDATE_ACC01( src_bottom[k+2], 2, ++ );
1642 for( k = 0; k < m*4; k += 4 )
1644 UPDATE_ACC01( src_top[k], 0, -- );
1645 UPDATE_ACC01( src_top[k+1], 1, -- );
1646 UPDATE_ACC01( src_top[k+2], 2, -- );
1647 UPDATE_ACC01( src_top[k+3], 3, -- );
1649 UPDATE_ACC01( src_bottom[k], 0, ++ );
1650 UPDATE_ACC01( src_bottom[k+1], 1, ++ );
1651 UPDATE_ACC01( src_bottom[k+2], 2, ++ );
1652 UPDATE_ACC01( src_bottom[k+3], 3, ++ );
1656 if( (src_step1 > 0 && src_bottom + src_step1 < src_max) ||
1657 (src_step1 < 0 && src_bottom + src_step1 >= src) )
1658 src_bottom += src_step1;
1661 src_top += src_step1;
1671 typedef uchar value_type;
1672 typedef int arg_type;
1674 arg_type load(const uchar* ptr) { return *ptr; }
1675 void store(uchar* ptr, arg_type val) { *ptr = (uchar)val; }
1676 void operator()(arg_type& a, arg_type& b) const
1678 int t = CV_FAST_CAST_8U(a - b);
1685 typedef ushort value_type;
1686 typedef int arg_type;
1688 arg_type load(const ushort* ptr) { return *ptr; }
1689 void store(ushort* ptr, arg_type val) { *ptr = (ushort)val; }
1690 void operator()(arg_type& a, arg_type& b) const
1700 typedef short value_type;
1701 typedef int arg_type;
1703 arg_type load(const short* ptr) { return *ptr; }
1704 void store(short* ptr, arg_type val) { *ptr = (short)val; }
1705 void operator()(arg_type& a, arg_type& b) const
1715 typedef float value_type;
1716 typedef float arg_type;
1718 arg_type load(const float* ptr) { return *ptr; }
1719 void store(float* ptr, arg_type val) { *ptr = val; }
1720 void operator()(arg_type& a, arg_type& b) const
1732 typedef uchar value_type;
1733 typedef __m128i arg_type;
1735 arg_type load(const uchar* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
1736 void store(uchar* ptr, arg_type val) { _mm_storeu_si128((__m128i*)ptr, val); }
1737 void operator()(arg_type& a, arg_type& b) const
1740 a = _mm_min_epu8(a, b);
1741 b = _mm_max_epu8(b, t);
1748 typedef ushort value_type;
1749 typedef __m128i arg_type;
1751 arg_type load(const ushort* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
1752 void store(ushort* ptr, arg_type val) { _mm_storeu_si128((__m128i*)ptr, val); }
1753 void operator()(arg_type& a, arg_type& b) const
1755 arg_type t = _mm_subs_epu16(a, b);
1756 a = _mm_subs_epu16(a, t);
1757 b = _mm_adds_epu16(b, t);
1764 typedef short value_type;
1765 typedef __m128i arg_type;
1767 arg_type load(const short* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
1768 void store(short* ptr, arg_type val) { _mm_storeu_si128((__m128i*)ptr, val); }
1769 void operator()(arg_type& a, arg_type& b) const
1772 a = _mm_min_epi16(a, b);
1773 b = _mm_max_epi16(b, t);
1780 typedef float value_type;
1781 typedef __m128 arg_type;
1783 arg_type load(const float* ptr) { return _mm_loadu_ps(ptr); }
1784 void store(float* ptr, arg_type val) { _mm_storeu_ps(ptr, val); }
1785 void operator()(arg_type& a, arg_type& b) const
1788 a = _mm_min_ps(a, b);
1789 b = _mm_max_ps(b, t);
1796 typedef MinMax8u MinMaxVec8u;
1797 typedef MinMax16u MinMaxVec16u;
1798 typedef MinMax16s MinMaxVec16s;
1799 typedef MinMax32f MinMaxVec32f;
1803 template<class Op, class VecOp>
1805 medianBlur_SortNet( const Mat& _src, Mat& _dst, int m )
1807 typedef typename Op::value_type T;
1808 typedef typename Op::arg_type WT;
1809 typedef typename VecOp::arg_type VT;
1811 const T* src = (const T*)_src.data;
1812 T* dst = (T*)_dst.data;
1813 int sstep = (int)(_src.step/sizeof(T));
1814 int dstep = (int)(_dst.step/sizeof(T));
1815 Size size = _dst.size();
1816 int i, j, k, cn = _src.channels();
1819 volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE2);
1823 if( size.width == 1 || size.height == 1 )
1825 int len = size.width + size.height - 1;
1826 int sdelta = size.height == 1 ? cn : sstep;
1827 int sdelta0 = size.height == 1 ? 0 : sstep - cn;
1828 int ddelta = size.height == 1 ? cn : dstep;
1830 for( i = 0; i < len; i++, src += sdelta0, dst += ddelta )
1831 for( j = 0; j < cn; j++, src++ )
1833 WT p0 = src[i > 0 ? -sdelta : 0];
1835 WT p2 = src[i < len - 1 ? sdelta : 0];
1837 op(p0, p1); op(p1, p2); op(p0, p1);
1844 for( i = 0; i < size.height; i++, dst += dstep )
1846 const T* row0 = src + std::max(i - 1, 0)*sstep;
1847 const T* row1 = src + i*sstep;
1848 const T* row2 = src + std::min(i + 1, size.height-1)*sstep;
1849 int limit = useSIMD ? cn : size.width;
1853 for( ; j < limit; j++ )
1855 int j0 = j >= cn ? j - cn : j;
1856 int j2 = j < size.width - cn ? j + cn : j;
1857 WT p0 = row0[j0], p1 = row0[j], p2 = row0[j2];
1858 WT p3 = row1[j0], p4 = row1[j], p5 = row1[j2];
1859 WT p6 = row2[j0], p7 = row2[j], p8 = row2[j2];
1861 op(p1, p2); op(p4, p5); op(p7, p8); op(p0, p1);
1862 op(p3, p4); op(p6, p7); op(p1, p2); op(p4, p5);
1863 op(p7, p8); op(p0, p3); op(p5, p8); op(p4, p7);
1864 op(p3, p6); op(p1, p4); op(p2, p5); op(p4, p7);
1865 op(p4, p2); op(p6, p4); op(p4, p2);
1869 if( limit == size.width )
1872 for( ; j <= size.width - VecOp::SIZE - cn; j += VecOp::SIZE )
1874 VT p0 = vop.load(row0+j-cn), p1 = vop.load(row0+j), p2 = vop.load(row0+j+cn);
1875 VT p3 = vop.load(row1+j-cn), p4 = vop.load(row1+j), p5 = vop.load(row1+j+cn);
1876 VT p6 = vop.load(row2+j-cn), p7 = vop.load(row2+j), p8 = vop.load(row2+j+cn);
1878 vop(p1, p2); vop(p4, p5); vop(p7, p8); vop(p0, p1);
1879 vop(p3, p4); vop(p6, p7); vop(p1, p2); vop(p4, p5);
1880 vop(p7, p8); vop(p0, p3); vop(p5, p8); vop(p4, p7);
1881 vop(p3, p6); vop(p1, p4); vop(p2, p5); vop(p4, p7);
1882 vop(p4, p2); vop(p6, p4); vop(p4, p2);
1883 vop.store(dst+j, p4);
1892 if( size.width == 1 || size.height == 1 )
1894 int len = size.width + size.height - 1;
1895 int sdelta = size.height == 1 ? cn : sstep;
1896 int sdelta0 = size.height == 1 ? 0 : sstep - cn;
1897 int ddelta = size.height == 1 ? cn : dstep;
1899 for( i = 0; i < len; i++, src += sdelta0, dst += ddelta )
1900 for( j = 0; j < cn; j++, src++ )
1902 int i1 = i > 0 ? -sdelta : 0;
1903 int i0 = i > 1 ? -sdelta*2 : i1;
1904 int i3 = i < len-1 ? sdelta : 0;
1905 int i4 = i < len-2 ? sdelta*2 : i3;
1906 WT p0 = src[i0], p1 = src[i1], p2 = src[0], p3 = src[i3], p4 = src[i4];
1908 op(p0, p1); op(p3, p4); op(p2, p3); op(p3, p4); op(p0, p2);
1909 op(p2, p4); op(p1, p3); op(p1, p2);
1916 for( i = 0; i < size.height; i++, dst += dstep )
1919 row[0] = src + std::max(i - 2, 0)*sstep;
1920 row[1] = src + std::max(i - 1, 0)*sstep;
1921 row[2] = src + i*sstep;
1922 row[3] = src + std::min(i + 1, size.height-1)*sstep;
1923 row[4] = src + std::min(i + 2, size.height-1)*sstep;
1924 int limit = useSIMD ? cn*2 : size.width;
1928 for( ; j < limit; j++ )
1931 int j1 = j >= cn ? j - cn : j;
1932 int j0 = j >= cn*2 ? j - cn*2 : j1;
1933 int j3 = j < size.width - cn ? j + cn : j;
1934 int j4 = j < size.width - cn*2 ? j + cn*2 : j3;
1935 for( k = 0; k < 5; k++ )
1937 const T* rowk = row[k];
1938 p[k*5] = rowk[j0]; p[k*5+1] = rowk[j1];
1939 p[k*5+2] = rowk[j]; p[k*5+3] = rowk[j3];
1940 p[k*5+4] = rowk[j4];
1943 op(p[1], p[2]); op(p[0], p[1]); op(p[1], p[2]); op(p[4], p[5]); op(p[3], p[4]);
1944 op(p[4], p[5]); op(p[0], p[3]); op(p[2], p[5]); op(p[2], p[3]); op(p[1], p[4]);
1945 op(p[1], p[2]); op(p[3], p[4]); op(p[7], p[8]); op(p[6], p[7]); op(p[7], p[8]);
1946 op(p[10], p[11]); op(p[9], p[10]); op(p[10], p[11]); op(p[6], p[9]); op(p[8], p[11]);
1947 op(p[8], p[9]); op(p[7], p[10]); op(p[7], p[8]); op(p[9], p[10]); op(p[0], p[6]);
1948 op(p[4], p[10]); op(p[4], p[6]); op(p[2], p[8]); op(p[2], p[4]); op(p[6], p[8]);
1949 op(p[1], p[7]); op(p[5], p[11]); op(p[5], p[7]); op(p[3], p[9]); op(p[3], p[5]);
1950 op(p[7], p[9]); op(p[1], p[2]); op(p[3], p[4]); op(p[5], p[6]); op(p[7], p[8]);
1951 op(p[9], p[10]); op(p[13], p[14]); op(p[12], p[13]); op(p[13], p[14]); op(p[16], p[17]);
1952 op(p[15], p[16]); op(p[16], p[17]); op(p[12], p[15]); op(p[14], p[17]); op(p[14], p[15]);
1953 op(p[13], p[16]); op(p[13], p[14]); op(p[15], p[16]); op(p[19], p[20]); op(p[18], p[19]);
1954 op(p[19], p[20]); op(p[21], p[22]); op(p[23], p[24]); op(p[21], p[23]); op(p[22], p[24]);
1955 op(p[22], p[23]); op(p[18], p[21]); op(p[20], p[23]); op(p[20], p[21]); op(p[19], p[22]);
1956 op(p[22], p[24]); op(p[19], p[20]); op(p[21], p[22]); op(p[23], p[24]); op(p[12], p[18]);
1957 op(p[16], p[22]); op(p[16], p[18]); op(p[14], p[20]); op(p[20], p[24]); op(p[14], p[16]);
1958 op(p[18], p[20]); op(p[22], p[24]); op(p[13], p[19]); op(p[17], p[23]); op(p[17], p[19]);
1959 op(p[15], p[21]); op(p[15], p[17]); op(p[19], p[21]); op(p[13], p[14]); op(p[15], p[16]);
1960 op(p[17], p[18]); op(p[19], p[20]); op(p[21], p[22]); op(p[23], p[24]); op(p[0], p[12]);
1961 op(p[8], p[20]); op(p[8], p[12]); op(p[4], p[16]); op(p[16], p[24]); op(p[12], p[16]);
1962 op(p[2], p[14]); op(p[10], p[22]); op(p[10], p[14]); op(p[6], p[18]); op(p[6], p[10]);
1963 op(p[10], p[12]); op(p[1], p[13]); op(p[9], p[21]); op(p[9], p[13]); op(p[5], p[17]);
1964 op(p[13], p[17]); op(p[3], p[15]); op(p[11], p[23]); op(p[11], p[15]); op(p[7], p[19]);
1965 op(p[7], p[11]); op(p[11], p[13]); op(p[11], p[12]);
1969 if( limit == size.width )
1972 for( ; j <= size.width - VecOp::SIZE - cn*2; j += VecOp::SIZE )
1975 for( k = 0; k < 5; k++ )
1977 const T* rowk = row[k];
1978 p[k*5] = vop.load(rowk+j-cn*2); p[k*5+1] = vop.load(rowk+j-cn);
1979 p[k*5+2] = vop.load(rowk+j); p[k*5+3] = vop.load(rowk+j+cn);
1980 p[k*5+4] = vop.load(rowk+j+cn*2);
1983 vop(p[1], p[2]); vop(p[0], p[1]); vop(p[1], p[2]); vop(p[4], p[5]); vop(p[3], p[4]);
1984 vop(p[4], p[5]); vop(p[0], p[3]); vop(p[2], p[5]); vop(p[2], p[3]); vop(p[1], p[4]);
1985 vop(p[1], p[2]); vop(p[3], p[4]); vop(p[7], p[8]); vop(p[6], p[7]); vop(p[7], p[8]);
1986 vop(p[10], p[11]); vop(p[9], p[10]); vop(p[10], p[11]); vop(p[6], p[9]); vop(p[8], p[11]);
1987 vop(p[8], p[9]); vop(p[7], p[10]); vop(p[7], p[8]); vop(p[9], p[10]); vop(p[0], p[6]);
1988 vop(p[4], p[10]); vop(p[4], p[6]); vop(p[2], p[8]); vop(p[2], p[4]); vop(p[6], p[8]);
1989 vop(p[1], p[7]); vop(p[5], p[11]); vop(p[5], p[7]); vop(p[3], p[9]); vop(p[3], p[5]);
1990 vop(p[7], p[9]); vop(p[1], p[2]); vop(p[3], p[4]); vop(p[5], p[6]); vop(p[7], p[8]);
1991 vop(p[9], p[10]); vop(p[13], p[14]); vop(p[12], p[13]); vop(p[13], p[14]); vop(p[16], p[17]);
1992 vop(p[15], p[16]); vop(p[16], p[17]); vop(p[12], p[15]); vop(p[14], p[17]); vop(p[14], p[15]);
1993 vop(p[13], p[16]); vop(p[13], p[14]); vop(p[15], p[16]); vop(p[19], p[20]); vop(p[18], p[19]);
1994 vop(p[19], p[20]); vop(p[21], p[22]); vop(p[23], p[24]); vop(p[21], p[23]); vop(p[22], p[24]);
1995 vop(p[22], p[23]); vop(p[18], p[21]); vop(p[20], p[23]); vop(p[20], p[21]); vop(p[19], p[22]);
1996 vop(p[22], p[24]); vop(p[19], p[20]); vop(p[21], p[22]); vop(p[23], p[24]); vop(p[12], p[18]);
1997 vop(p[16], p[22]); vop(p[16], p[18]); vop(p[14], p[20]); vop(p[20], p[24]); vop(p[14], p[16]);
1998 vop(p[18], p[20]); vop(p[22], p[24]); vop(p[13], p[19]); vop(p[17], p[23]); vop(p[17], p[19]);
1999 vop(p[15], p[21]); vop(p[15], p[17]); vop(p[19], p[21]); vop(p[13], p[14]); vop(p[15], p[16]);
2000 vop(p[17], p[18]); vop(p[19], p[20]); vop(p[21], p[22]); vop(p[23], p[24]); vop(p[0], p[12]);
2001 vop(p[8], p[20]); vop(p[8], p[12]); vop(p[4], p[16]); vop(p[16], p[24]); vop(p[12], p[16]);
2002 vop(p[2], p[14]); vop(p[10], p[22]); vop(p[10], p[14]); vop(p[6], p[18]); vop(p[6], p[10]);
2003 vop(p[10], p[12]); vop(p[1], p[13]); vop(p[9], p[21]); vop(p[9], p[13]); vop(p[5], p[17]);
2004 vop(p[13], p[17]); vop(p[3], p[15]); vop(p[11], p[23]); vop(p[11], p[15]); vop(p[7], p[19]);
2005 vop(p[7], p[11]); vop(p[11], p[13]); vop(p[11], p[12]);
2006 vop.store(dst+j, p[12]);
2017 static bool ocl_medianFilter(InputArray _src, OutputArray _dst, int m)
2019 size_t localsize[2] = { 16, 16 };
2020 size_t globalsize[2];
2021 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
2023 if ( !((depth == CV_8U || depth == CV_16U || depth == CV_16S || depth == CV_32F) && cn <= 4 && (m == 3 || m == 5)) )
2026 Size imgSize = _src.size();
2027 bool useOptimized = (1 == cn) &&
2028 (size_t)imgSize.width >= localsize[0] * 8 &&
2029 (size_t)imgSize.height >= localsize[1] * 8 &&
2030 imgSize.width % 4 == 0 &&
2031 imgSize.height % 4 == 0 &&
2032 (ocl::Device::getDefault().isIntel());
2034 cv::String kname = format( useOptimized ? "medianFilter%d_u" : "medianFilter%d", m) ;
2035 cv::String kdefs = useOptimized ?
2036 format("-D T=%s -D T1=%s -D T4=%s%d -D cn=%d -D USE_4OPT", ocl::typeToStr(type),
2037 ocl::typeToStr(depth), ocl::typeToStr(depth), cn*4, cn)
2039 format("-D T=%s -D T1=%s -D cn=%d", ocl::typeToStr(type), ocl::typeToStr(depth), cn) ;
2041 ocl::Kernel k(kname.c_str(), ocl::imgproc::medianFilter_oclsrc, kdefs.c_str() );
2046 UMat src = _src.getUMat();
2047 _dst.create(src.size(), type);
2048 UMat dst = _dst.getUMat();
2050 k.args(ocl::KernelArg::ReadOnlyNoSize(src), ocl::KernelArg::WriteOnly(dst));
2054 globalsize[0] = DIVUP(src.cols / 4, localsize[0]) * localsize[0];
2055 globalsize[1] = DIVUP(src.rows / 4, localsize[1]) * localsize[1];
2059 globalsize[0] = (src.cols + localsize[0] + 2) / localsize[0] * localsize[0];
2060 globalsize[1] = (src.rows + localsize[1] - 1) / localsize[1] * localsize[1];
2063 return k.run(2, globalsize, localsize, false);
2070 void cv::medianBlur( InputArray _src0, OutputArray _dst, int ksize )
2072 CV_Assert( (ksize % 2 == 1) && (_src0.dims() <= 2 ));
2080 CV_OCL_RUN(_dst.isUMat(),
2081 ocl_medianFilter(_src0,_dst, ksize))
2083 Mat src0 = _src0.getMat();
2084 _dst.create( src0.size(), src0.type() );
2085 Mat dst = _dst.getMat();
2087 #if IPP_VERSION_X100 >= 801
2088 #define IPP_FILTER_MEDIAN_BORDER(ippType, ippDataType, flavor) \
2091 if (ippiFilterMedianBorderGetBufferSize(dstRoiSize, maskSize, \
2092 ippDataType, CV_MAT_CN(type), &bufSize) >= 0) \
2094 Ipp8u * buffer = ippsMalloc_8u(bufSize); \
2095 IppStatus status = ippiFilterMedianBorder_##flavor((const ippType *)src0.data, (int)src0.step, \
2096 (ippType *)dst.data, (int)dst.step, dstRoiSize, maskSize, \
2097 ippBorderRepl, (ippType)0, buffer); \
2102 setIppErrorStatus(); \
2107 IppiSize dstRoiSize = ippiSize(dst.cols, dst.rows), maskSize = ippiSize(ksize, ksize);
2109 int type = src0.type();
2110 if (type == CV_8UC1)
2111 IPP_FILTER_MEDIAN_BORDER(Ipp8u, ipp8u, 8u_C1R);
2112 else if (type == CV_16UC1)
2113 IPP_FILTER_MEDIAN_BORDER(Ipp16u, ipp16u, 16u_C1R);
2114 else if (type == CV_16SC1)
2115 IPP_FILTER_MEDIAN_BORDER(Ipp16s, ipp16s, 16s_C1R);
2116 else if (type == CV_32FC1)
2117 IPP_FILTER_MEDIAN_BORDER(Ipp32f, ipp32f, 32f_C1R);
2118 #undef IPP_FILTER_MEDIAN_BORDER
2121 #ifdef HAVE_TEGRA_OPTIMIZATION
2122 if (tegra::medianBlur(src0, dst, ksize))
2126 bool useSortNet = ksize == 3 || (ksize == 5
2128 && src0.depth() > CV_8U
2135 if( dst.data != src0.data )
2140 if( src.depth() == CV_8U )
2141 medianBlur_SortNet<MinMax8u, MinMaxVec8u>( src, dst, ksize );
2142 else if( src.depth() == CV_16U )
2143 medianBlur_SortNet<MinMax16u, MinMaxVec16u>( src, dst, ksize );
2144 else if( src.depth() == CV_16S )
2145 medianBlur_SortNet<MinMax16s, MinMaxVec16s>( src, dst, ksize );
2146 else if( src.depth() == CV_32F )
2147 medianBlur_SortNet<MinMax32f, MinMaxVec32f>( src, dst, ksize );
2149 CV_Error(CV_StsUnsupportedFormat, "");
2155 cv::copyMakeBorder( src0, src, 0, 0, ksize/2, ksize/2, BORDER_REPLICATE );
2157 int cn = src0.channels();
2158 CV_Assert( src.depth() == CV_8U && (cn == 1 || cn == 3 || cn == 4) );
2160 double img_size_mp = (double)(src0.total())/(1 << 20);
2161 if( ksize <= 3 + (img_size_mp < 1 ? 12 : img_size_mp < 4 ? 6 : 2)*(MEDIAN_HAVE_SIMD && checkHardwareSupport(CV_CPU_SSE2) ? 1 : 3))
2162 medianBlur_8u_Om( src, dst, ksize );
2164 medianBlur_8u_O1( src, dst, ksize );
2168 /****************************************************************************************\
2170 \****************************************************************************************/
2175 class BilateralFilter_8u_Invoker :
2176 public ParallelLoopBody
2179 BilateralFilter_8u_Invoker(Mat& _dest, const Mat& _temp, int _radius, int _maxk,
2180 int* _space_ofs, float *_space_weight, float *_color_weight) :
2181 temp(&_temp), dest(&_dest), radius(_radius),
2182 maxk(_maxk), space_ofs(_space_ofs), space_weight(_space_weight), color_weight(_color_weight)
2186 virtual void operator() (const Range& range) const
2188 int i, j, cn = dest->channels(), k;
2189 Size size = dest->size();
2191 int CV_DECL_ALIGNED(16) buf[4];
2192 float CV_DECL_ALIGNED(16) bufSum[4];
2193 static const int CV_DECL_ALIGNED(16) bufSignMask[] = { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
2194 bool haveSSE3 = checkHardwareSupport(CV_CPU_SSE3);
2197 for( i = range.start; i < range.end; i++ )
2199 const uchar* sptr = temp->ptr(i+radius) + radius*cn;
2200 uchar* dptr = dest->ptr(i);
2204 for( j = 0; j < size.width; j++ )
2206 float sum = 0, wsum = 0;
2212 __m128 _val0 = _mm_set1_ps(static_cast<float>(val0));
2213 const __m128 _signMask = _mm_load_ps((const float*)bufSignMask);
2215 for( ; k <= maxk - 4; k += 4 )
2217 __m128 _valF = _mm_set_ps(sptr[j + space_ofs[k+3]], sptr[j + space_ofs[k+2]],
2218 sptr[j + space_ofs[k+1]], sptr[j + space_ofs[k]]);
2220 __m128 _val = _mm_andnot_ps(_signMask, _mm_sub_ps(_valF, _val0));
2221 _mm_store_si128((__m128i*)buf, _mm_cvtps_epi32(_val));
2223 __m128 _cw = _mm_set_ps(color_weight[buf[3]],color_weight[buf[2]],
2224 color_weight[buf[1]],color_weight[buf[0]]);
2225 __m128 _sw = _mm_loadu_ps(space_weight+k);
2226 __m128 _w = _mm_mul_ps(_cw, _sw);
2227 _cw = _mm_mul_ps(_w, _valF);
2229 _sw = _mm_hadd_ps(_w, _cw);
2230 _sw = _mm_hadd_ps(_sw, _sw);
2231 _mm_storel_pi((__m64*)bufSum, _sw);
2238 for( ; k < maxk; k++ )
2240 int val = sptr[j + space_ofs[k]];
2241 float w = space_weight[k]*color_weight[std::abs(val - val0)];
2245 // overflow is not possible here => there is no need to use cv::saturate_cast
2246 dptr[j] = (uchar)cvRound(sum/wsum);
2252 for( j = 0; j < size.width*3; j += 3 )
2254 float sum_b = 0, sum_g = 0, sum_r = 0, wsum = 0;
2255 int b0 = sptr[j], g0 = sptr[j+1], r0 = sptr[j+2];
2260 const __m128i izero = _mm_setzero_si128();
2261 const __m128 _b0 = _mm_set1_ps(static_cast<float>(b0));
2262 const __m128 _g0 = _mm_set1_ps(static_cast<float>(g0));
2263 const __m128 _r0 = _mm_set1_ps(static_cast<float>(r0));
2264 const __m128 _signMask = _mm_load_ps((const float*)bufSignMask);
2266 for( ; k <= maxk - 4; k += 4 )
2268 const int* const sptr_k0 = reinterpret_cast<const int*>(sptr + j + space_ofs[k]);
2269 const int* const sptr_k1 = reinterpret_cast<const int*>(sptr + j + space_ofs[k+1]);
2270 const int* const sptr_k2 = reinterpret_cast<const int*>(sptr + j + space_ofs[k+2]);
2271 const int* const sptr_k3 = reinterpret_cast<const int*>(sptr + j + space_ofs[k+3]);
2273 __m128 _b = _mm_cvtepi32_ps(_mm_unpacklo_epi16(_mm_unpacklo_epi8(_mm_cvtsi32_si128(sptr_k0[0]), izero), izero));
2274 __m128 _g = _mm_cvtepi32_ps(_mm_unpacklo_epi16(_mm_unpacklo_epi8(_mm_cvtsi32_si128(sptr_k1[0]), izero), izero));
2275 __m128 _r = _mm_cvtepi32_ps(_mm_unpacklo_epi16(_mm_unpacklo_epi8(_mm_cvtsi32_si128(sptr_k2[0]), izero), izero));
2276 __m128 _z = _mm_cvtepi32_ps(_mm_unpacklo_epi16(_mm_unpacklo_epi8(_mm_cvtsi32_si128(sptr_k3[0]), izero), izero));
2278 _MM_TRANSPOSE4_PS(_b, _g, _r, _z);
2280 __m128 bt = _mm_andnot_ps(_signMask, _mm_sub_ps(_b,_b0));
2281 __m128 gt = _mm_andnot_ps(_signMask, _mm_sub_ps(_g,_g0));
2282 __m128 rt = _mm_andnot_ps(_signMask, _mm_sub_ps(_r,_r0));
2284 bt =_mm_add_ps(rt, _mm_add_ps(bt, gt));
2285 _mm_store_si128((__m128i*)buf, _mm_cvtps_epi32(bt));
2287 __m128 _w = _mm_set_ps(color_weight[buf[3]],color_weight[buf[2]],
2288 color_weight[buf[1]],color_weight[buf[0]]);
2289 __m128 _sw = _mm_loadu_ps(space_weight+k);
2291 _w = _mm_mul_ps(_w,_sw);
2292 _b = _mm_mul_ps(_b, _w);
2293 _g = _mm_mul_ps(_g, _w);
2294 _r = _mm_mul_ps(_r, _w);
2296 _w = _mm_hadd_ps(_w, _b);
2297 _g = _mm_hadd_ps(_g, _r);
2299 _w = _mm_hadd_ps(_w, _g);
2300 _mm_store_ps(bufSum, _w);
2310 for( ; k < maxk; k++ )
2312 const uchar* sptr_k = sptr + j + space_ofs[k];
2313 int b = sptr_k[0], g = sptr_k[1], r = sptr_k[2];
2314 float w = space_weight[k]*color_weight[std::abs(b - b0) +
2315 std::abs(g - g0) + std::abs(r - r0)];
2316 sum_b += b*w; sum_g += g*w; sum_r += r*w;
2320 b0 = cvRound(sum_b*wsum);
2321 g0 = cvRound(sum_g*wsum);
2322 r0 = cvRound(sum_r*wsum);
2323 dptr[j] = (uchar)b0; dptr[j+1] = (uchar)g0; dptr[j+2] = (uchar)r0;
2332 int radius, maxk, *space_ofs;
2333 float *space_weight, *color_weight;
2336 #if defined (HAVE_IPP) && !defined(HAVE_IPP_ICV_ONLY) && 0
2337 class IPPBilateralFilter_8u_Invoker :
2338 public ParallelLoopBody
2341 IPPBilateralFilter_8u_Invoker(Mat &_src, Mat &_dst, double _sigma_color, double _sigma_space, int _radius, bool *_ok) :
2342 ParallelLoopBody(), src(_src), dst(_dst), sigma_color(_sigma_color), sigma_space(_sigma_space), radius(_radius), ok(_ok)
2347 virtual void operator() (const Range& range) const
2349 int d = radius * 2 + 1;
2350 IppiSize kernel = {d, d};
2351 IppiSize roi={dst.cols, range.end - range.start};
2353 if (0 > ippiFilterBilateralGetBufSize_8u_C1R( ippiFilterBilateralGauss, roi, kernel, &bufsize))
2358 AutoBuffer<uchar> buf(bufsize);
2359 IppiFilterBilateralSpec *pSpec = (IppiFilterBilateralSpec *)alignPtr(&buf[0], 32);
2360 if (0 > ippiFilterBilateralInit_8u_C1R( ippiFilterBilateralGauss, kernel, (Ipp32f)sigma_color, (Ipp32f)sigma_space, 1, pSpec ))
2365 if (0 > ippiFilterBilateral_8u_C1R( src.ptr<uchar>(range.start) + radius * ((int)src.step[0] + 1), (int)src.step[0], dst.ptr<uchar>(range.start), (int)dst.step[0], roi, kernel, pSpec ))
2375 const IPPBilateralFilter_8u_Invoker& operator= (const IPPBilateralFilter_8u_Invoker&);
2381 static bool ocl_bilateralFilter_8u(InputArray _src, OutputArray _dst, int d,
2382 double sigma_color, double sigma_space,
2385 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
2386 int i, j, maxk, radius;
2388 if (depth != CV_8U || cn > 4)
2391 if (sigma_color <= 0)
2393 if (sigma_space <= 0)
2396 double gauss_color_coeff = -0.5 / (sigma_color * sigma_color);
2397 double gauss_space_coeff = -0.5 / (sigma_space * sigma_space);
2400 radius = cvRound(sigma_space * 1.5);
2403 radius = MAX(radius, 1);
2406 UMat src = _src.getUMat(), dst = _dst.getUMat(), temp;
2410 copyMakeBorder(src, temp, radius, radius, radius, radius, borderType);
2411 std::vector<float> _space_weight(d * d);
2412 std::vector<int> _space_ofs(d * d);
2413 float * const space_weight = &_space_weight[0];
2414 int * const space_ofs = &_space_ofs[0];
2416 // initialize space-related bilateral filter coefficients
2417 for( i = -radius, maxk = 0; i <= radius; i++ )
2418 for( j = -radius; j <= radius; j++ )
2420 double r = std::sqrt((double)i * i + (double)j * j);
2423 space_weight[maxk] = (float)std::exp(r * r * gauss_space_coeff);
2424 space_ofs[maxk++] = (int)(i * temp.step + j * cn);
2428 String cnstr = cn > 1 ? format("%d", cn) : "";
2429 String kernelName("bilateral");
2431 if ((ocl::Device::getDefault().isIntel()) &&
2432 (ocl::Device::getDefault().type() == ocl::Device::TYPE_GPU))
2435 if (dst.cols % 4 == 0 && cn == 1) // For single channel x4 sized images.
2437 kernelName = "bilateral_float4";
2441 ocl::Kernel k(kernelName.c_str(), ocl::imgproc::bilateral_oclsrc,
2442 format("-D radius=%d -D maxk=%d -D cn=%d -D int_t=%s -D uint_t=uint%s -D convert_int_t=%s"
2443 " -D uchar_t=%s -D float_t=%s -D convert_float_t=%s -D convert_uchar_t=%s -D gauss_color_coeff=%f",
2444 radius, maxk, cn, ocl::typeToStr(CV_32SC(cn)), cnstr.c_str(),
2445 ocl::convertTypeStr(CV_8U, CV_32S, cn, cvt[0]),
2446 ocl::typeToStr(type), ocl::typeToStr(CV_32FC(cn)),
2447 ocl::convertTypeStr(CV_32S, CV_32F, cn, cvt[1]),
2448 ocl::convertTypeStr(CV_32F, CV_8U, cn, cvt[2]), gauss_color_coeff));
2452 Mat mspace_weight(1, d * d, CV_32FC1, space_weight);
2453 Mat mspace_ofs(1, d * d, CV_32SC1, space_ofs);
2454 UMat ucolor_weight, uspace_weight, uspace_ofs;
2456 mspace_weight.copyTo(uspace_weight);
2457 mspace_ofs.copyTo(uspace_ofs);
2459 k.args(ocl::KernelArg::ReadOnlyNoSize(temp), ocl::KernelArg::WriteOnly(dst),
2460 ocl::KernelArg::PtrReadOnly(uspace_weight),
2461 ocl::KernelArg::PtrReadOnly(uspace_ofs));
2463 size_t globalsize[2] = { dst.cols / sizeDiv, dst.rows };
2464 return k.run(2, globalsize, NULL, false);
2469 bilateralFilter_8u( const Mat& src, Mat& dst, int d,
2470 double sigma_color, double sigma_space,
2473 int cn = src.channels();
2474 int i, j, maxk, radius;
2475 Size size = src.size();
2477 CV_Assert( (src.type() == CV_8UC1 || src.type() == CV_8UC3) && src.data != dst.data );
2479 if( sigma_color <= 0 )
2481 if( sigma_space <= 0 )
2484 double gauss_color_coeff = -0.5/(sigma_color*sigma_color);
2485 double gauss_space_coeff = -0.5/(sigma_space*sigma_space);
2488 radius = cvRound(sigma_space*1.5);
2491 radius = MAX(radius, 1);
2495 copyMakeBorder( src, temp, radius, radius, radius, radius, borderType );
2497 #if defined HAVE_IPP && (IPP_VERSION_MAJOR >= 7) && 0
2501 IPPBilateralFilter_8u_Invoker body(temp, dst, sigma_color * sigma_color, sigma_space * sigma_space, radius, &ok );
2502 parallel_for_(Range(0, dst.rows), body, dst.total()/(double)(1<<16));
2505 setIppErrorStatus();
2509 std::vector<float> _color_weight(cn*256);
2510 std::vector<float> _space_weight(d*d);
2511 std::vector<int> _space_ofs(d*d);
2512 float* color_weight = &_color_weight[0];
2513 float* space_weight = &_space_weight[0];
2514 int* space_ofs = &_space_ofs[0];
2516 // initialize color-related bilateral filter coefficients
2518 for( i = 0; i < 256*cn; i++ )
2519 color_weight[i] = (float)std::exp(i*i*gauss_color_coeff);
2521 // initialize space-related bilateral filter coefficients
2522 for( i = -radius, maxk = 0; i <= radius; i++ )
2526 for( ; j <= radius; j++ )
2528 double r = std::sqrt((double)i*i + (double)j*j);
2531 space_weight[maxk] = (float)std::exp(r*r*gauss_space_coeff);
2532 space_ofs[maxk++] = (int)(i*temp.step + j*cn);
2536 BilateralFilter_8u_Invoker body(dst, temp, radius, maxk, space_ofs, space_weight, color_weight);
2537 parallel_for_(Range(0, size.height), body, dst.total()/(double)(1<<16));
2541 class BilateralFilter_32f_Invoker :
2542 public ParallelLoopBody
2546 BilateralFilter_32f_Invoker(int _cn, int _radius, int _maxk, int *_space_ofs,
2547 const Mat& _temp, Mat& _dest, float _scale_index, float *_space_weight, float *_expLUT) :
2548 cn(_cn), radius(_radius), maxk(_maxk), space_ofs(_space_ofs),
2549 temp(&_temp), dest(&_dest), scale_index(_scale_index), space_weight(_space_weight), expLUT(_expLUT)
2553 virtual void operator() (const Range& range) const
2556 Size size = dest->size();
2558 int CV_DECL_ALIGNED(16) idxBuf[4];
2559 float CV_DECL_ALIGNED(16) bufSum32[4];
2560 static const int CV_DECL_ALIGNED(16) bufSignMask[] = { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
2561 bool haveSSE3 = checkHardwareSupport(CV_CPU_SSE3);
2564 for( i = range.start; i < range.end; i++ )
2566 const float* sptr = temp->ptr<float>(i+radius) + radius*cn;
2567 float* dptr = dest->ptr<float>(i);
2571 for( j = 0; j < size.width; j++ )
2573 float sum = 0, wsum = 0;
2574 float val0 = sptr[j];
2579 __m128 psum = _mm_setzero_ps();
2580 const __m128 _val0 = _mm_set1_ps(sptr[j]);
2581 const __m128 _scale_index = _mm_set1_ps(scale_index);
2582 const __m128 _signMask = _mm_load_ps((const float*)bufSignMask);
2584 for( ; k <= maxk - 4 ; k += 4 )
2586 __m128 _sw = _mm_loadu_ps(space_weight + k);
2587 __m128 _val = _mm_set_ps(sptr[j + space_ofs[k+3]], sptr[j + space_ofs[k+2]],
2588 sptr[j + space_ofs[k+1]], sptr[j + space_ofs[k]]);
2589 __m128 _alpha = _mm_mul_ps(_mm_andnot_ps( _signMask, _mm_sub_ps(_val,_val0)), _scale_index);
2591 __m128i _idx = _mm_cvtps_epi32(_alpha);
2592 _mm_store_si128((__m128i*)idxBuf, _idx);
2593 _alpha = _mm_sub_ps(_alpha, _mm_cvtepi32_ps(_idx));
2595 __m128 _explut = _mm_set_ps(expLUT[idxBuf[3]], expLUT[idxBuf[2]],
2596 expLUT[idxBuf[1]], expLUT[idxBuf[0]]);
2597 __m128 _explut1 = _mm_set_ps(expLUT[idxBuf[3]+1], expLUT[idxBuf[2]+1],
2598 expLUT[idxBuf[1]+1], expLUT[idxBuf[0]+1]);
2600 __m128 _w = _mm_mul_ps(_sw, _mm_add_ps(_explut, _mm_mul_ps(_alpha, _mm_sub_ps(_explut1, _explut))));
2601 _val = _mm_mul_ps(_w, _val);
2603 _sw = _mm_hadd_ps(_w, _val);
2604 _sw = _mm_hadd_ps(_sw, _sw);
2605 psum = _mm_add_ps(_sw, psum);
2607 _mm_storel_pi((__m64*)bufSum32, psum);
2614 for( ; k < maxk; k++ )
2616 float val = sptr[j + space_ofs[k]];
2617 float alpha = (float)(std::abs(val - val0)*scale_index);
2618 int idx = cvFloor(alpha);
2620 float w = space_weight[k]*(expLUT[idx] + alpha*(expLUT[idx+1] - expLUT[idx]));
2624 dptr[j] = (float)(sum/wsum);
2629 CV_Assert( cn == 3 );
2630 for( j = 0; j < size.width*3; j += 3 )
2632 float sum_b = 0, sum_g = 0, sum_r = 0, wsum = 0;
2633 float b0 = sptr[j], g0 = sptr[j+1], r0 = sptr[j+2];
2638 __m128 sum = _mm_setzero_ps();
2639 const __m128 _b0 = _mm_set1_ps(b0);
2640 const __m128 _g0 = _mm_set1_ps(g0);
2641 const __m128 _r0 = _mm_set1_ps(r0);
2642 const __m128 _scale_index = _mm_set1_ps(scale_index);
2643 const __m128 _signMask = _mm_load_ps((const float*)bufSignMask);
2645 for( ; k <= maxk-4; k += 4 )
2647 __m128 _sw = _mm_loadu_ps(space_weight + k);
2649 const float* const sptr_k0 = sptr + j + space_ofs[k];
2650 const float* const sptr_k1 = sptr + j + space_ofs[k+1];
2651 const float* const sptr_k2 = sptr + j + space_ofs[k+2];
2652 const float* const sptr_k3 = sptr + j + space_ofs[k+3];
2654 __m128 _b = _mm_loadu_ps(sptr_k0);
2655 __m128 _g = _mm_loadu_ps(sptr_k1);
2656 __m128 _r = _mm_loadu_ps(sptr_k2);
2657 __m128 _z = _mm_loadu_ps(sptr_k3);
2658 _MM_TRANSPOSE4_PS(_b, _g, _r, _z);
2660 __m128 _bt = _mm_andnot_ps(_signMask,_mm_sub_ps(_b,_b0));
2661 __m128 _gt = _mm_andnot_ps(_signMask,_mm_sub_ps(_g,_g0));
2662 __m128 _rt = _mm_andnot_ps(_signMask,_mm_sub_ps(_r,_r0));
2664 __m128 _alpha = _mm_mul_ps(_scale_index, _mm_add_ps(_rt,_mm_add_ps(_bt, _gt)));
2666 __m128i _idx = _mm_cvtps_epi32(_alpha);
2667 _mm_store_si128((__m128i*)idxBuf, _idx);
2668 _alpha = _mm_sub_ps(_alpha, _mm_cvtepi32_ps(_idx));
2670 __m128 _explut = _mm_set_ps(expLUT[idxBuf[3]], expLUT[idxBuf[2]], expLUT[idxBuf[1]], expLUT[idxBuf[0]]);
2671 __m128 _explut1 = _mm_set_ps(expLUT[idxBuf[3]+1], expLUT[idxBuf[2]+1], expLUT[idxBuf[1]+1], expLUT[idxBuf[0]+1]);
2673 __m128 _w = _mm_mul_ps(_sw, _mm_add_ps(_explut, _mm_mul_ps(_alpha, _mm_sub_ps(_explut1, _explut))));
2675 _b = _mm_mul_ps(_b, _w);
2676 _g = _mm_mul_ps(_g, _w);
2677 _r = _mm_mul_ps(_r, _w);
2679 _w = _mm_hadd_ps(_w, _b);
2680 _g = _mm_hadd_ps(_g, _r);
2682 _w = _mm_hadd_ps(_w, _g);
2683 sum = _mm_add_ps(sum, _w);
2685 _mm_store_ps(bufSum32, sum);
2687 sum_b = bufSum32[1];
2688 sum_g = bufSum32[2];
2689 sum_r = bufSum32[3];
2693 for(; k < maxk; k++ )
2695 const float* sptr_k = sptr + j + space_ofs[k];
2696 float b = sptr_k[0], g = sptr_k[1], r = sptr_k[2];
2697 float alpha = (float)((std::abs(b - b0) +
2698 std::abs(g - g0) + std::abs(r - r0))*scale_index);
2699 int idx = cvFloor(alpha);
2701 float w = space_weight[k]*(expLUT[idx] + alpha*(expLUT[idx+1] - expLUT[idx]));
2702 sum_b += b*w; sum_g += g*w; sum_r += r*w;
2709 dptr[j] = b0; dptr[j+1] = g0; dptr[j+2] = r0;
2716 int cn, radius, maxk, *space_ofs;
2719 float scale_index, *space_weight, *expLUT;
2724 bilateralFilter_32f( const Mat& src, Mat& dst, int d,
2725 double sigma_color, double sigma_space,
2728 int cn = src.channels();
2729 int i, j, maxk, radius;
2730 double minValSrc=-1, maxValSrc=1;
2731 const int kExpNumBinsPerChannel = 1 << 12;
2732 int kExpNumBins = 0;
2733 float lastExpVal = 1.f;
2734 float len, scale_index;
2735 Size size = src.size();
2737 CV_Assert( (src.type() == CV_32FC1 || src.type() == CV_32FC3) && src.data != dst.data );
2739 if( sigma_color <= 0 )
2741 if( sigma_space <= 0 )
2744 double gauss_color_coeff = -0.5/(sigma_color*sigma_color);
2745 double gauss_space_coeff = -0.5/(sigma_space*sigma_space);
2748 radius = cvRound(sigma_space*1.5);
2751 radius = MAX(radius, 1);
2753 // compute the min/max range for the input image (even if multichannel)
2755 minMaxLoc( src.reshape(1), &minValSrc, &maxValSrc );
2756 if(std::abs(minValSrc - maxValSrc) < FLT_EPSILON)
2762 // temporary copy of the image with borders for easy processing
2764 copyMakeBorder( src, temp, radius, radius, radius, radius, borderType );
2765 const double insteadNaNValue = -5. * sigma_color;
2766 patchNaNs( temp, insteadNaNValue ); // this replacement of NaNs makes the assumption that depth values are nonnegative
2767 // TODO: make insteadNaNValue avalible in the outside function interface to control the cases breaking the assumption
2768 // allocate lookup tables
2769 std::vector<float> _space_weight(d*d);
2770 std::vector<int> _space_ofs(d*d);
2771 float* space_weight = &_space_weight[0];
2772 int* space_ofs = &_space_ofs[0];
2774 // assign a length which is slightly more than needed
2775 len = (float)(maxValSrc - minValSrc) * cn;
2776 kExpNumBins = kExpNumBinsPerChannel * cn;
2777 std::vector<float> _expLUT(kExpNumBins+2);
2778 float* expLUT = &_expLUT[0];
2780 scale_index = kExpNumBins/len;
2782 // initialize the exp LUT
2783 for( i = 0; i < kExpNumBins+2; i++ )
2785 if( lastExpVal > 0.f )
2787 double val = i / scale_index;
2788 expLUT[i] = (float)std::exp(val * val * gauss_color_coeff);
2789 lastExpVal = expLUT[i];
2795 // initialize space-related bilateral filter coefficients
2796 for( i = -radius, maxk = 0; i <= radius; i++ )
2797 for( j = -radius; j <= radius; j++ )
2799 double r = std::sqrt((double)i*i + (double)j*j);
2802 space_weight[maxk] = (float)std::exp(r*r*gauss_space_coeff);
2803 space_ofs[maxk++] = (int)(i*(temp.step/sizeof(float)) + j*cn);
2806 // parallel_for usage
2808 BilateralFilter_32f_Invoker body(cn, radius, maxk, space_ofs, temp, dst, scale_index, space_weight, expLUT);
2809 parallel_for_(Range(0, size.height), body, dst.total()/(double)(1<<16));
2814 void cv::bilateralFilter( InputArray _src, OutputArray _dst, int d,
2815 double sigmaColor, double sigmaSpace,
2818 _dst.create( _src.size(), _src.type() );
2820 CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
2821 ocl_bilateralFilter_8u(_src, _dst, d, sigmaColor, sigmaSpace, borderType))
2823 Mat src = _src.getMat(), dst = _dst.getMat();
2825 if( src.depth() == CV_8U )
2826 bilateralFilter_8u( src, dst, d, sigmaColor, sigmaSpace, borderType );
2827 else if( src.depth() == CV_32F )
2828 bilateralFilter_32f( src, dst, d, sigmaColor, sigmaSpace, borderType );
2830 CV_Error( CV_StsUnsupportedFormat,
2831 "Bilateral filtering is only implemented for 8u and 32f images" );
2834 //////////////////////////////////////////////////////////////////////////////////////////
2837 cvSmooth( const void* srcarr, void* dstarr, int smooth_type,
2838 int param1, int param2, double param3, double param4 )
2840 cv::Mat src = cv::cvarrToMat(srcarr), dst0 = cv::cvarrToMat(dstarr), dst = dst0;
2842 CV_Assert( dst.size() == src.size() &&
2843 (smooth_type == CV_BLUR_NO_SCALE || dst.type() == src.type()) );
2848 if( smooth_type == CV_BLUR || smooth_type == CV_BLUR_NO_SCALE )
2849 cv::boxFilter( src, dst, dst.depth(), cv::Size(param1, param2), cv::Point(-1,-1),
2850 smooth_type == CV_BLUR, cv::BORDER_REPLICATE );
2851 else if( smooth_type == CV_GAUSSIAN )
2852 cv::GaussianBlur( src, dst, cv::Size(param1, param2), param3, param4, cv::BORDER_REPLICATE );
2853 else if( smooth_type == CV_MEDIAN )
2854 cv::medianBlur( src, dst, param1 );
2856 cv::bilateralFilter( src, dst, param1, param3, param4, cv::BORDER_REPLICATE );
2858 if( dst.data != dst0.data )
2859 CV_Error( CV_StsUnmatchedFormats, "The destination image does not have the proper type" );