1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #include "precomp.hpp"
47 /****************************************************************************************\
48 Basic Morphological Operations: Erosion & Dilation
49 \****************************************************************************************/
54 template<typename T> struct MinOp
59 T operator ()(const T a, const T b) const { return std::min(a, b); }
62 template<typename T> struct MaxOp
67 T operator ()(const T a, const T b) const { return std::max(a, b); }
72 #define CV_MIN_8U(a,b) ((a) - CV_FAST_CAST_8U((a) - (b)))
73 #define CV_MAX_8U(a,b) ((a) + CV_FAST_CAST_8U((b) - (a)))
75 template<> inline uchar MinOp<uchar>::operator ()(const uchar a, const uchar b) const { return CV_MIN_8U(a, b); }
76 template<> inline uchar MaxOp<uchar>::operator ()(const uchar a, const uchar b) const { return CV_MAX_8U(a, b); }
80 MorphRowNoVec(int, int) {}
81 int operator()(const uchar*, uchar*, int, int) const { return 0; }
84 struct MorphColumnNoVec
86 MorphColumnNoVec(int, int) {}
87 int operator()(const uchar**, uchar*, int, int, int) const { return 0; }
92 int operator()(uchar**, int, uchar*, int) const { return 0; }
97 template<class VecUpdate> struct MorphRowIVec
99 enum { ESZ = VecUpdate::ESZ };
101 MorphRowIVec(int _ksize, int _anchor) : ksize(_ksize), anchor(_anchor) {}
102 int operator()(const uchar* src, uchar* dst, int width, int cn) const
104 if( !checkHardwareSupport(CV_CPU_SSE2) )
108 int i, k, _ksize = ksize*cn;
109 width = (width & -4)*cn;
112 for( i = 0; i <= width - 16; i += 16 )
114 __m128i s = _mm_loadu_si128((const __m128i*)(src + i));
115 for( k = cn; k < _ksize; k += cn )
117 __m128i x = _mm_loadu_si128((const __m128i*)(src + i + k));
120 _mm_storeu_si128((__m128i*)(dst + i), s);
123 for( ; i < width; i += 4 )
125 __m128i s = _mm_cvtsi32_si128(*(const int*)(src + i));
126 for( k = cn; k < _ksize; k += cn )
128 __m128i x = _mm_cvtsi32_si128(*(const int*)(src + i + k));
131 *(int*)(dst + i) = _mm_cvtsi128_si32(s);
141 template<class VecUpdate> struct MorphRowFVec
143 MorphRowFVec(int _ksize, int _anchor) : ksize(_ksize), anchor(_anchor) {}
144 int operator()(const uchar* src, uchar* dst, int width, int cn) const
146 if( !checkHardwareSupport(CV_CPU_SSE) )
149 int i, k, _ksize = ksize*cn;
150 width = (width & -4)*cn;
153 for( i = 0; i < width; i += 4 )
155 __m128 s = _mm_loadu_ps((const float*)src + i);
156 for( k = cn; k < _ksize; k += cn )
158 __m128 x = _mm_loadu_ps((const float*)src + i + k);
161 _mm_storeu_ps((float*)dst + i, s);
171 template<class VecUpdate> struct MorphColumnIVec
173 enum { ESZ = VecUpdate::ESZ };
175 MorphColumnIVec(int _ksize, int _anchor) : ksize(_ksize), anchor(_anchor) {}
176 int operator()(const uchar** src, uchar* dst, int dststep, int count, int width) const
178 if( !checkHardwareSupport(CV_CPU_SSE2) )
181 int i = 0, k, _ksize = ksize;
185 for( i = 0; i < count + ksize - 1; i++ )
186 CV_Assert( ((size_t)src[i] & 15) == 0 );
188 for( ; _ksize > 1 && count > 1; count -= 2, dst += dststep*2, src += 2 )
190 for( i = 0; i <= width - 32; i += 32 )
192 const uchar* sptr = src[1] + i;
193 __m128i s0 = _mm_load_si128((const __m128i*)sptr);
194 __m128i s1 = _mm_load_si128((const __m128i*)(sptr + 16));
197 for( k = 2; k < _ksize; k++ )
200 x0 = _mm_load_si128((const __m128i*)sptr);
201 x1 = _mm_load_si128((const __m128i*)(sptr + 16));
202 s0 = updateOp(s0, x0);
203 s1 = updateOp(s1, x1);
207 x0 = _mm_load_si128((const __m128i*)sptr);
208 x1 = _mm_load_si128((const __m128i*)(sptr + 16));
209 _mm_storeu_si128((__m128i*)(dst + i), updateOp(s0, x0));
210 _mm_storeu_si128((__m128i*)(dst + i + 16), updateOp(s1, x1));
213 x0 = _mm_load_si128((const __m128i*)sptr);
214 x1 = _mm_load_si128((const __m128i*)(sptr + 16));
215 _mm_storeu_si128((__m128i*)(dst + dststep + i), updateOp(s0, x0));
216 _mm_storeu_si128((__m128i*)(dst + dststep + i + 16), updateOp(s1, x1));
219 for( ; i <= width - 8; i += 8 )
221 __m128i s0 = _mm_loadl_epi64((const __m128i*)(src[1] + i)), x0;
223 for( k = 2; k < _ksize; k++ )
225 x0 = _mm_loadl_epi64((const __m128i*)(src[k] + i));
226 s0 = updateOp(s0, x0);
229 x0 = _mm_loadl_epi64((const __m128i*)(src[0] + i));
230 _mm_storel_epi64((__m128i*)(dst + i), updateOp(s0, x0));
231 x0 = _mm_loadl_epi64((const __m128i*)(src[k] + i));
232 _mm_storel_epi64((__m128i*)(dst + dststep + i), updateOp(s0, x0));
236 for( ; count > 0; count--, dst += dststep, src++ )
238 for( i = 0; i <= width - 32; i += 32 )
240 const uchar* sptr = src[0] + i;
241 __m128i s0 = _mm_load_si128((const __m128i*)sptr);
242 __m128i s1 = _mm_load_si128((const __m128i*)(sptr + 16));
245 for( k = 1; k < _ksize; k++ )
248 x0 = _mm_load_si128((const __m128i*)sptr);
249 x1 = _mm_load_si128((const __m128i*)(sptr + 16));
250 s0 = updateOp(s0, x0);
251 s1 = updateOp(s1, x1);
253 _mm_storeu_si128((__m128i*)(dst + i), s0);
254 _mm_storeu_si128((__m128i*)(dst + i + 16), s1);
257 for( ; i <= width - 8; i += 8 )
259 __m128i s0 = _mm_loadl_epi64((const __m128i*)(src[0] + i)), x0;
261 for( k = 1; k < _ksize; k++ )
263 x0 = _mm_loadl_epi64((const __m128i*)(src[k] + i));
264 s0 = updateOp(s0, x0);
266 _mm_storel_epi64((__m128i*)(dst + i), s0);
277 template<class VecUpdate> struct MorphColumnFVec
279 MorphColumnFVec(int _ksize, int _anchor) : ksize(_ksize), anchor(_anchor) {}
280 int operator()(const uchar** _src, uchar* _dst, int dststep, int count, int width) const
282 if( !checkHardwareSupport(CV_CPU_SSE) )
285 int i = 0, k, _ksize = ksize;
288 for( i = 0; i < count + ksize - 1; i++ )
289 CV_Assert( ((size_t)_src[i] & 15) == 0 );
291 const float** src = (const float**)_src;
292 float* dst = (float*)_dst;
293 dststep /= sizeof(dst[0]);
295 for( ; _ksize > 1 && count > 1; count -= 2, dst += dststep*2, src += 2 )
297 for( i = 0; i <= width - 16; i += 16 )
299 const float* sptr = src[1] + i;
300 __m128 s0 = _mm_load_ps(sptr);
301 __m128 s1 = _mm_load_ps(sptr + 4);
302 __m128 s2 = _mm_load_ps(sptr + 8);
303 __m128 s3 = _mm_load_ps(sptr + 12);
304 __m128 x0, x1, x2, x3;
306 for( k = 2; k < _ksize; k++ )
309 x0 = _mm_load_ps(sptr);
310 x1 = _mm_load_ps(sptr + 4);
311 s0 = updateOp(s0, x0);
312 s1 = updateOp(s1, x1);
313 x2 = _mm_load_ps(sptr + 8);
314 x3 = _mm_load_ps(sptr + 12);
315 s2 = updateOp(s2, x2);
316 s3 = updateOp(s3, x3);
320 x0 = _mm_load_ps(sptr);
321 x1 = _mm_load_ps(sptr + 4);
322 x2 = _mm_load_ps(sptr + 8);
323 x3 = _mm_load_ps(sptr + 12);
324 _mm_storeu_ps(dst + i, updateOp(s0, x0));
325 _mm_storeu_ps(dst + i + 4, updateOp(s1, x1));
326 _mm_storeu_ps(dst + i + 8, updateOp(s2, x2));
327 _mm_storeu_ps(dst + i + 12, updateOp(s3, x3));
330 x0 = _mm_load_ps(sptr);
331 x1 = _mm_load_ps(sptr + 4);
332 x2 = _mm_load_ps(sptr + 8);
333 x3 = _mm_load_ps(sptr + 12);
334 _mm_storeu_ps(dst + dststep + i, updateOp(s0, x0));
335 _mm_storeu_ps(dst + dststep + i + 4, updateOp(s1, x1));
336 _mm_storeu_ps(dst + dststep + i + 8, updateOp(s2, x2));
337 _mm_storeu_ps(dst + dststep + i + 12, updateOp(s3, x3));
340 for( ; i <= width - 4; i += 4 )
342 __m128 s0 = _mm_load_ps(src[1] + i), x0;
344 for( k = 2; k < _ksize; k++ )
346 x0 = _mm_load_ps(src[k] + i);
347 s0 = updateOp(s0, x0);
350 x0 = _mm_load_ps(src[0] + i);
351 _mm_storeu_ps(dst + i, updateOp(s0, x0));
352 x0 = _mm_load_ps(src[k] + i);
353 _mm_storeu_ps(dst + dststep + i, updateOp(s0, x0));
357 for( ; count > 0; count--, dst += dststep, src++ )
359 for( i = 0; i <= width - 16; i += 16 )
361 const float* sptr = src[0] + i;
362 __m128 s0 = _mm_load_ps(sptr);
363 __m128 s1 = _mm_load_ps(sptr + 4);
364 __m128 s2 = _mm_load_ps(sptr + 8);
365 __m128 s3 = _mm_load_ps(sptr + 12);
366 __m128 x0, x1, x2, x3;
368 for( k = 1; k < _ksize; k++ )
371 x0 = _mm_load_ps(sptr);
372 x1 = _mm_load_ps(sptr + 4);
373 s0 = updateOp(s0, x0);
374 s1 = updateOp(s1, x1);
375 x2 = _mm_load_ps(sptr + 8);
376 x3 = _mm_load_ps(sptr + 12);
377 s2 = updateOp(s2, x2);
378 s3 = updateOp(s3, x3);
380 _mm_storeu_ps(dst + i, s0);
381 _mm_storeu_ps(dst + i + 4, s1);
382 _mm_storeu_ps(dst + i + 8, s2);
383 _mm_storeu_ps(dst + i + 12, s3);
386 for( i = 0; i <= width - 4; i += 4 )
388 __m128 s0 = _mm_load_ps(src[0] + i), x0;
389 for( k = 1; k < _ksize; k++ )
391 x0 = _mm_load_ps(src[k] + i);
392 s0 = updateOp(s0, x0);
394 _mm_storeu_ps(dst + i, s0);
405 template<class VecUpdate> struct MorphIVec
407 enum { ESZ = VecUpdate::ESZ };
409 int operator()(uchar** src, int nz, uchar* dst, int width) const
411 if( !checkHardwareSupport(CV_CPU_SSE2) )
418 for( i = 0; i <= width - 32; i += 32 )
420 const uchar* sptr = src[0] + i;
421 __m128i s0 = _mm_loadu_si128((const __m128i*)sptr);
422 __m128i s1 = _mm_loadu_si128((const __m128i*)(sptr + 16));
425 for( k = 1; k < nz; k++ )
428 x0 = _mm_loadu_si128((const __m128i*)sptr);
429 x1 = _mm_loadu_si128((const __m128i*)(sptr + 16));
430 s0 = updateOp(s0, x0);
431 s1 = updateOp(s1, x1);
433 _mm_storeu_si128((__m128i*)(dst + i), s0);
434 _mm_storeu_si128((__m128i*)(dst + i + 16), s1);
437 for( ; i <= width - 8; i += 8 )
439 __m128i s0 = _mm_loadl_epi64((const __m128i*)(src[0] + i)), x0;
441 for( k = 1; k < nz; k++ )
443 x0 = _mm_loadl_epi64((const __m128i*)(src[k] + i));
444 s0 = updateOp(s0, x0);
446 _mm_storel_epi64((__m128i*)(dst + i), s0);
454 template<class VecUpdate> struct MorphFVec
456 int operator()(uchar** _src, int nz, uchar* _dst, int width) const
458 if( !checkHardwareSupport(CV_CPU_SSE) )
461 const float** src = (const float**)_src;
462 float* dst = (float*)_dst;
466 for( i = 0; i <= width - 16; i += 16 )
468 const float* sptr = src[0] + i;
469 __m128 s0 = _mm_loadu_ps(sptr);
470 __m128 s1 = _mm_loadu_ps(sptr + 4);
471 __m128 s2 = _mm_loadu_ps(sptr + 8);
472 __m128 s3 = _mm_loadu_ps(sptr + 12);
473 __m128 x0, x1, x2, x3;
475 for( k = 1; k < nz; k++ )
478 x0 = _mm_loadu_ps(sptr);
479 x1 = _mm_loadu_ps(sptr + 4);
480 x2 = _mm_loadu_ps(sptr + 8);
481 x3 = _mm_loadu_ps(sptr + 12);
482 s0 = updateOp(s0, x0);
483 s1 = updateOp(s1, x1);
484 s2 = updateOp(s2, x2);
485 s3 = updateOp(s3, x3);
487 _mm_storeu_ps(dst + i, s0);
488 _mm_storeu_ps(dst + i + 4, s1);
489 _mm_storeu_ps(dst + i + 8, s2);
490 _mm_storeu_ps(dst + i + 12, s3);
493 for( ; i <= width - 4; i += 4 )
495 __m128 s0 = _mm_loadu_ps(src[0] + i), x0;
497 for( k = 1; k < nz; k++ )
499 x0 = _mm_loadu_ps(src[k] + i);
500 s0 = updateOp(s0, x0);
502 _mm_storeu_ps(dst + i, s0);
505 for( ; i < width; i++ )
507 __m128 s0 = _mm_load_ss(src[0] + i), x0;
509 for( k = 1; k < nz; k++ )
511 x0 = _mm_load_ss(src[k] + i);
512 s0 = updateOp(s0, x0);
514 _mm_store_ss(dst + i, s0);
524 __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_min_epu8(a,b); }
529 __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_max_epu8(a,b); }
534 __m128i operator()(const __m128i& a, const __m128i& b) const
535 { return _mm_subs_epu16(a,_mm_subs_epu16(a,b)); }
540 __m128i operator()(const __m128i& a, const __m128i& b) const
541 { return _mm_adds_epu16(_mm_subs_epu16(a,b), b); }
546 __m128i operator()(const __m128i& a, const __m128i& b) const
547 { return _mm_min_epi16(a, b); }
552 __m128i operator()(const __m128i& a, const __m128i& b) const
553 { return _mm_max_epi16(a, b); }
555 struct VMin32f { __m128 operator()(const __m128& a, const __m128& b) const { return _mm_min_ps(a,b); }};
556 struct VMax32f { __m128 operator()(const __m128& a, const __m128& b) const { return _mm_max_ps(a,b); }};
558 typedef MorphRowIVec<VMin8u> ErodeRowVec8u;
559 typedef MorphRowIVec<VMax8u> DilateRowVec8u;
560 typedef MorphRowIVec<VMin16u> ErodeRowVec16u;
561 typedef MorphRowIVec<VMax16u> DilateRowVec16u;
562 typedef MorphRowIVec<VMin16s> ErodeRowVec16s;
563 typedef MorphRowIVec<VMax16s> DilateRowVec16s;
564 typedef MorphRowFVec<VMin32f> ErodeRowVec32f;
565 typedef MorphRowFVec<VMax32f> DilateRowVec32f;
567 typedef MorphColumnIVec<VMin8u> ErodeColumnVec8u;
568 typedef MorphColumnIVec<VMax8u> DilateColumnVec8u;
569 typedef MorphColumnIVec<VMin16u> ErodeColumnVec16u;
570 typedef MorphColumnIVec<VMax16u> DilateColumnVec16u;
571 typedef MorphColumnIVec<VMin16s> ErodeColumnVec16s;
572 typedef MorphColumnIVec<VMax16s> DilateColumnVec16s;
573 typedef MorphColumnFVec<VMin32f> ErodeColumnVec32f;
574 typedef MorphColumnFVec<VMax32f> DilateColumnVec32f;
576 typedef MorphIVec<VMin8u> ErodeVec8u;
577 typedef MorphIVec<VMax8u> DilateVec8u;
578 typedef MorphIVec<VMin16u> ErodeVec16u;
579 typedef MorphIVec<VMax16u> DilateVec16u;
580 typedef MorphIVec<VMin16s> ErodeVec16s;
581 typedef MorphIVec<VMax16s> DilateVec16s;
582 typedef MorphFVec<VMin32f> ErodeVec32f;
583 typedef MorphFVec<VMax32f> DilateVec32f;
587 #ifdef HAVE_TEGRA_OPTIMIZATION
588 using tegra::ErodeRowVec8u;
589 using tegra::DilateRowVec8u;
591 using tegra::ErodeColumnVec8u;
592 using tegra::DilateColumnVec8u;
594 typedef MorphRowNoVec ErodeRowVec8u;
595 typedef MorphRowNoVec DilateRowVec8u;
597 typedef MorphColumnNoVec ErodeColumnVec8u;
598 typedef MorphColumnNoVec DilateColumnVec8u;
601 typedef MorphRowNoVec ErodeRowVec16u;
602 typedef MorphRowNoVec DilateRowVec16u;
603 typedef MorphRowNoVec ErodeRowVec16s;
604 typedef MorphRowNoVec DilateRowVec16s;
605 typedef MorphRowNoVec ErodeRowVec32f;
606 typedef MorphRowNoVec DilateRowVec32f;
608 typedef MorphColumnNoVec ErodeColumnVec16u;
609 typedef MorphColumnNoVec DilateColumnVec16u;
610 typedef MorphColumnNoVec ErodeColumnVec16s;
611 typedef MorphColumnNoVec DilateColumnVec16s;
612 typedef MorphColumnNoVec ErodeColumnVec32f;
613 typedef MorphColumnNoVec DilateColumnVec32f;
615 typedef MorphNoVec ErodeVec8u;
616 typedef MorphNoVec DilateVec8u;
617 typedef MorphNoVec ErodeVec16u;
618 typedef MorphNoVec DilateVec16u;
619 typedef MorphNoVec ErodeVec16s;
620 typedef MorphNoVec DilateVec16s;
621 typedef MorphNoVec ErodeVec32f;
622 typedef MorphNoVec DilateVec32f;
626 typedef MorphRowNoVec ErodeRowVec64f;
627 typedef MorphRowNoVec DilateRowVec64f;
628 typedef MorphColumnNoVec ErodeColumnVec64f;
629 typedef MorphColumnNoVec DilateColumnVec64f;
630 typedef MorphNoVec ErodeVec64f;
631 typedef MorphNoVec DilateVec64f;
634 template<class Op, class VecOp> struct MorphRowFilter : public BaseRowFilter
636 typedef typename Op::rtype T;
638 MorphRowFilter( int _ksize, int _anchor ) : vecOp(_ksize, _anchor)
644 void operator()(const uchar* src, uchar* dst, int width, int cn)
646 int i, j, k, _ksize = ksize*cn;
647 const T* S = (const T*)src;
653 for( i = 0; i < width*cn; i++ )
658 int i0 = vecOp(src, dst, width, cn);
661 for( k = 0; k < cn; k++, S++, D++ )
663 for( i = i0; i <= width - cn*2; i += cn*2 )
667 for( j = cn*2; j < _ksize; j += cn )
670 D[i+cn] = op(m, s[j]);
673 for( ; i < width; i += cn )
677 for( j = cn; j < _ksize; j += cn )
688 template<class Op, class VecOp> struct MorphColumnFilter : public BaseColumnFilter
690 typedef typename Op::rtype T;
692 MorphColumnFilter( int _ksize, int _anchor ) : vecOp(_ksize, _anchor)
698 void operator()(const uchar** _src, uchar* dst, int dststep, int count, int width)
700 int i, k, _ksize = ksize;
701 const T** src = (const T**)_src;
705 int i0 = vecOp(_src, dst, dststep, count, width);
706 dststep /= sizeof(D[0]);
708 for( ; _ksize > 1 && count > 1; count -= 2, D += dststep*2, src += 2 )
711 #if CV_ENABLE_UNROLLED
712 for( ; i <= width - 4; i += 4 )
714 const T* sptr = src[1] + i;
715 T s0 = sptr[0], s1 = sptr[1], s2 = sptr[2], s3 = sptr[3];
717 for( k = 2; k < _ksize; k++ )
720 s0 = op(s0, sptr[0]); s1 = op(s1, sptr[1]);
721 s2 = op(s2, sptr[2]); s3 = op(s3, sptr[3]);
725 D[i] = op(s0, sptr[0]);
726 D[i+1] = op(s1, sptr[1]);
727 D[i+2] = op(s2, sptr[2]);
728 D[i+3] = op(s3, sptr[3]);
731 D[i+dststep] = op(s0, sptr[0]);
732 D[i+dststep+1] = op(s1, sptr[1]);
733 D[i+dststep+2] = op(s2, sptr[2]);
734 D[i+dststep+3] = op(s3, sptr[3]);
737 for( ; i < width; i++ )
741 for( k = 2; k < _ksize; k++ )
742 s0 = op(s0, src[k][i]);
744 D[i] = op(s0, src[0][i]);
745 D[i+dststep] = op(s0, src[k][i]);
749 for( ; count > 0; count--, D += dststep, src++ )
752 #if CV_ENABLE_UNROLLED
753 for( ; i <= width - 4; i += 4 )
755 const T* sptr = src[0] + i;
756 T s0 = sptr[0], s1 = sptr[1], s2 = sptr[2], s3 = sptr[3];
758 for( k = 1; k < _ksize; k++ )
761 s0 = op(s0, sptr[0]); s1 = op(s1, sptr[1]);
762 s2 = op(s2, sptr[2]); s3 = op(s3, sptr[3]);
765 D[i] = s0; D[i+1] = s1;
766 D[i+2] = s2; D[i+3] = s3;
769 for( ; i < width; i++ )
772 for( k = 1; k < _ksize; k++ )
773 s0 = op(s0, src[k][i]);
783 template<class Op, class VecOp> struct MorphFilter : BaseFilter
785 typedef typename Op::rtype T;
787 MorphFilter( const Mat& _kernel, Point _anchor )
790 ksize = _kernel.size();
791 CV_Assert( _kernel.type() == CV_8U );
793 vector<uchar> coeffs; // we do not really the values of non-zero
794 // kernel elements, just their locations
795 preprocess2DKernel( _kernel, coords, coeffs );
796 ptrs.resize( coords.size() );
799 void operator()(const uchar** src, uchar* dst, int dststep, int count, int width, int cn)
801 const Point* pt = &coords[0];
802 const T** kp = (const T**)&ptrs[0];
803 int i, k, nz = (int)coords.size();
807 for( ; count > 0; count--, dst += dststep, src++ )
811 for( k = 0; k < nz; k++ )
812 kp[k] = (const T*)src[pt[k].y] + pt[k].x*cn;
814 i = vecOp(&ptrs[0], nz, dst, width);
815 #if CV_ENABLE_UNROLLED
816 for( ; i <= width - 4; i += 4 )
818 const T* sptr = kp[0] + i;
819 T s0 = sptr[0], s1 = sptr[1], s2 = sptr[2], s3 = sptr[3];
821 for( k = 1; k < nz; k++ )
824 s0 = op(s0, sptr[0]); s1 = op(s1, sptr[1]);
825 s2 = op(s2, sptr[2]); s3 = op(s3, sptr[3]);
828 D[i] = s0; D[i+1] = s1;
829 D[i+2] = s2; D[i+3] = s3;
832 for( ; i < width; i++ )
835 for( k = 1; k < nz; k++ )
836 s0 = op(s0, kp[k][i]);
842 vector<Point> coords;
849 /////////////////////////////////// External Interface /////////////////////////////////////
851 cv::Ptr<cv::BaseRowFilter> cv::getMorphologyRowFilter(int op, int type, int ksize, int anchor)
853 int depth = CV_MAT_DEPTH(type);
856 CV_Assert( op == MORPH_ERODE || op == MORPH_DILATE );
857 if( op == MORPH_ERODE )
860 return Ptr<BaseRowFilter>(new MorphRowFilter<MinOp<uchar>,
861 ErodeRowVec8u>(ksize, anchor));
862 if( depth == CV_16U )
863 return Ptr<BaseRowFilter>(new MorphRowFilter<MinOp<ushort>,
864 ErodeRowVec16u>(ksize, anchor));
865 if( depth == CV_16S )
866 return Ptr<BaseRowFilter>(new MorphRowFilter<MinOp<short>,
867 ErodeRowVec16s>(ksize, anchor));
868 if( depth == CV_32F )
869 return Ptr<BaseRowFilter>(new MorphRowFilter<MinOp<float>,
870 ErodeRowVec32f>(ksize, anchor));
871 if( depth == CV_64F )
872 return Ptr<BaseRowFilter>(new MorphRowFilter<MinOp<double>,
873 ErodeRowVec64f>(ksize, anchor));
878 return Ptr<BaseRowFilter>(new MorphRowFilter<MaxOp<uchar>,
879 DilateRowVec8u>(ksize, anchor));
880 if( depth == CV_16U )
881 return Ptr<BaseRowFilter>(new MorphRowFilter<MaxOp<ushort>,
882 DilateRowVec16u>(ksize, anchor));
883 if( depth == CV_16S )
884 return Ptr<BaseRowFilter>(new MorphRowFilter<MaxOp<short>,
885 DilateRowVec16s>(ksize, anchor));
886 if( depth == CV_32F )
887 return Ptr<BaseRowFilter>(new MorphRowFilter<MaxOp<float>,
888 DilateRowVec32f>(ksize, anchor));
889 if( depth == CV_64F )
890 return Ptr<BaseRowFilter>(new MorphRowFilter<MaxOp<double>,
891 DilateRowVec64f>(ksize, anchor));
894 CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type));
895 return Ptr<BaseRowFilter>(0);
898 cv::Ptr<cv::BaseColumnFilter> cv::getMorphologyColumnFilter(int op, int type, int ksize, int anchor)
900 int depth = CV_MAT_DEPTH(type);
903 CV_Assert( op == MORPH_ERODE || op == MORPH_DILATE );
904 if( op == MORPH_ERODE )
907 return Ptr<BaseColumnFilter>(new MorphColumnFilter<MinOp<uchar>,
908 ErodeColumnVec8u>(ksize, anchor));
909 if( depth == CV_16U )
910 return Ptr<BaseColumnFilter>(new MorphColumnFilter<MinOp<ushort>,
911 ErodeColumnVec16u>(ksize, anchor));
912 if( depth == CV_16S )
913 return Ptr<BaseColumnFilter>(new MorphColumnFilter<MinOp<short>,
914 ErodeColumnVec16s>(ksize, anchor));
915 if( depth == CV_32F )
916 return Ptr<BaseColumnFilter>(new MorphColumnFilter<MinOp<float>,
917 ErodeColumnVec32f>(ksize, anchor));
918 if( depth == CV_64F )
919 return Ptr<BaseColumnFilter>(new MorphColumnFilter<MinOp<double>,
920 ErodeColumnVec64f>(ksize, anchor));
925 return Ptr<BaseColumnFilter>(new MorphColumnFilter<MaxOp<uchar>,
926 DilateColumnVec8u>(ksize, anchor));
927 if( depth == CV_16U )
928 return Ptr<BaseColumnFilter>(new MorphColumnFilter<MaxOp<ushort>,
929 DilateColumnVec16u>(ksize, anchor));
930 if( depth == CV_16S )
931 return Ptr<BaseColumnFilter>(new MorphColumnFilter<MaxOp<short>,
932 DilateColumnVec16s>(ksize, anchor));
933 if( depth == CV_32F )
934 return Ptr<BaseColumnFilter>(new MorphColumnFilter<MaxOp<float>,
935 DilateColumnVec32f>(ksize, anchor));
936 if( depth == CV_64F )
937 return Ptr<BaseColumnFilter>(new MorphColumnFilter<MaxOp<double>,
938 DilateColumnVec64f>(ksize, anchor));
941 CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type));
942 return Ptr<BaseColumnFilter>(0);
946 cv::Ptr<cv::BaseFilter> cv::getMorphologyFilter(int op, int type, InputArray _kernel, Point anchor)
948 Mat kernel = _kernel.getMat();
949 int depth = CV_MAT_DEPTH(type);
950 anchor = normalizeAnchor(anchor, kernel.size());
951 CV_Assert( op == MORPH_ERODE || op == MORPH_DILATE );
952 if( op == MORPH_ERODE )
955 return Ptr<BaseFilter>(new MorphFilter<MinOp<uchar>, ErodeVec8u>(kernel, anchor));
956 if( depth == CV_16U )
957 return Ptr<BaseFilter>(new MorphFilter<MinOp<ushort>, ErodeVec16u>(kernel, anchor));
958 if( depth == CV_16S )
959 return Ptr<BaseFilter>(new MorphFilter<MinOp<short>, ErodeVec16s>(kernel, anchor));
960 if( depth == CV_32F )
961 return Ptr<BaseFilter>(new MorphFilter<MinOp<float>, ErodeVec32f>(kernel, anchor));
962 if( depth == CV_64F )
963 return Ptr<BaseFilter>(new MorphFilter<MinOp<double>, ErodeVec64f>(kernel, anchor));
968 return Ptr<BaseFilter>(new MorphFilter<MaxOp<uchar>, DilateVec8u>(kernel, anchor));
969 if( depth == CV_16U )
970 return Ptr<BaseFilter>(new MorphFilter<MaxOp<ushort>, DilateVec16u>(kernel, anchor));
971 if( depth == CV_16S )
972 return Ptr<BaseFilter>(new MorphFilter<MaxOp<short>, DilateVec16s>(kernel, anchor));
973 if( depth == CV_32F )
974 return Ptr<BaseFilter>(new MorphFilter<MaxOp<float>, DilateVec32f>(kernel, anchor));
975 if( depth == CV_64F )
976 return Ptr<BaseFilter>(new MorphFilter<MaxOp<double>, DilateVec64f>(kernel, anchor));
979 CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type));
980 return Ptr<BaseFilter>(0);
984 cv::Ptr<cv::FilterEngine> cv::createMorphologyFilter( int op, int type, InputArray _kernel,
985 Point anchor, int _rowBorderType, int _columnBorderType,
986 const Scalar& _borderValue )
988 Mat kernel = _kernel.getMat();
989 anchor = normalizeAnchor(anchor, kernel.size());
991 Ptr<BaseRowFilter> rowFilter;
992 Ptr<BaseColumnFilter> columnFilter;
993 Ptr<BaseFilter> filter2D;
995 if( countNonZero(kernel) == kernel.rows*kernel.cols )
997 // rectangular structuring element
998 rowFilter = getMorphologyRowFilter(op, type, kernel.cols, anchor.x);
999 columnFilter = getMorphologyColumnFilter(op, type, kernel.rows, anchor.y);
1002 filter2D = getMorphologyFilter(op, type, kernel, anchor);
1004 Scalar borderValue = _borderValue;
1005 if( (_rowBorderType == BORDER_CONSTANT || _columnBorderType == BORDER_CONSTANT) &&
1006 borderValue == morphologyDefaultBorderValue() )
1008 int depth = CV_MAT_DEPTH(type);
1009 CV_Assert( depth == CV_8U || depth == CV_16U || depth == CV_16S ||
1010 depth == CV_32F || depth == CV_64F );
1011 if( op == MORPH_ERODE )
1012 borderValue = Scalar::all( depth == CV_8U ? (double)UCHAR_MAX :
1013 depth == CV_16U ? (double)USHRT_MAX :
1014 depth == CV_16S ? (double)SHRT_MAX :
1015 depth == CV_32F ? (double)FLT_MAX : DBL_MAX);
1017 borderValue = Scalar::all( depth == CV_8U || depth == CV_16U ?
1019 depth == CV_16S ? (double)SHRT_MIN :
1020 depth == CV_32F ? (double)-FLT_MAX : -DBL_MAX);
1023 return Ptr<FilterEngine>(new FilterEngine(filter2D, rowFilter, columnFilter,
1024 type, type, type, _rowBorderType, _columnBorderType, borderValue ));
1028 cv::Mat cv::getStructuringElement(int shape, Size ksize, Point anchor)
1034 CV_Assert( shape == MORPH_RECT || shape == MORPH_CROSS || shape == MORPH_ELLIPSE );
1036 anchor = normalizeAnchor(anchor, ksize);
1038 if( ksize == Size(1,1) )
1041 if( shape == MORPH_ELLIPSE )
1045 inv_r2 = r ? 1./((double)r*r) : 0;
1048 Mat elem(ksize, CV_8U);
1050 for( i = 0; i < ksize.height; i++ )
1052 uchar* ptr = elem.data + i*elem.step;
1055 if( shape == MORPH_RECT || (shape == MORPH_CROSS && i == anchor.y) )
1057 else if( shape == MORPH_CROSS )
1058 j1 = anchor.x, j2 = j1 + 1;
1062 if( std::abs(dy) <= r )
1064 int dx = saturate_cast<int>(c*std::sqrt((r*r - dy*dy)*inv_r2));
1065 j1 = std::max( c - dx, 0 );
1066 j2 = std::min( c + dx + 1, ksize.width );
1070 for( j = 0; j < j1; j++ )
1072 for( ; j < j2; j++ )
1074 for( ; j < ksize.width; j++ )
1084 class MorphologyRunner : public ParallelLoopBody
1087 MorphologyRunner(Mat _src, Mat _dst, int _nStripes, int _iterations,
1088 int _op, Mat _kernel, Point _anchor,
1089 int _rowBorderType, int _columnBorderType, const Scalar& _borderValue) :
1090 borderValue(_borderValue)
1095 nStripes = _nStripes;
1096 iterations = _iterations;
1101 rowBorderType = _rowBorderType;
1102 columnBorderType = _columnBorderType;
1105 void operator () ( const Range& range ) const
1107 int row0 = min(cvRound(range.start * src.rows / nStripes), src.rows);
1108 int row1 = min(cvRound(range.end * src.rows / nStripes), src.rows);
1111 printf("Size = (%d, %d), range[%d,%d), row0 = %d, row1 = %d\n",
1112 src.rows, src.cols, range.start, range.end, row0, row1);*/
1114 Mat srcStripe = src.rowRange(row0, row1);
1115 Mat dstStripe = dst.rowRange(row0, row1);
1117 Ptr<FilterEngine> f = createMorphologyFilter(op, src.type(), kernel, anchor,
1118 rowBorderType, columnBorderType, borderValue );
1120 f->apply( srcStripe, dstStripe );
1121 for( int i = 1; i < iterations; i++ )
1122 f->apply( dstStripe, dstStripe );
1135 int columnBorderType;
1139 static void morphOp( int op, InputArray _src, OutputArray _dst,
1141 Point anchor, int iterations,
1142 int borderType, const Scalar& borderValue )
1144 Mat src = _src.getMat(), kernel = _kernel.getMat();
1145 Size ksize = kernel.data ? kernel.size() : Size(3,3);
1146 anchor = normalizeAnchor(anchor, ksize);
1148 CV_Assert( anchor.inside(Rect(0, 0, ksize.width, ksize.height)) );
1150 _dst.create( src.size(), src.type() );
1151 Mat dst = _dst.getMat();
1153 if( iterations == 0 || kernel.rows*kernel.cols == 1 )
1161 kernel = getStructuringElement(MORPH_RECT, Size(1+iterations*2,1+iterations*2));
1162 anchor = Point(iterations, iterations);
1165 else if( iterations > 1 && countNonZero(kernel) == kernel.rows*kernel.cols )
1167 anchor = Point(anchor.x*iterations, anchor.y*iterations);
1168 kernel = getStructuringElement(MORPH_RECT,
1169 Size(ksize.width + (iterations-1)*(ksize.width-1),
1170 ksize.height + (iterations-1)*(ksize.height-1)),
1176 #if defined HAVE_TEGRA_OPTIMIZATION
1177 if (src.data != dst.data && iterations == 1 && //NOTE: threads are not used for inplace processing
1178 (borderType & BORDER_ISOLATED) == 0 && //TODO: check border types
1179 src.rows >= 64 ) //NOTE: just heuristics
1183 parallel_for_(Range(0, nStripes),
1184 MorphologyRunner(src, dst, nStripes, iterations, op, kernel, anchor, borderType, borderType, borderValue));
1186 //Ptr<FilterEngine> f = createMorphologyFilter(op, src.type(),
1187 // kernel, anchor, borderType, borderType, borderValue );
1189 //f->apply( src, dst );
1190 //for( int i = 1; i < iterations; i++ )
1191 // f->apply( dst, dst );
1194 template<> void Ptr<IplConvKernel>::delete_obj()
1195 { cvReleaseStructuringElement(&obj); }
1199 void cv::erode( InputArray src, OutputArray dst, InputArray kernel,
1200 Point anchor, int iterations,
1201 int borderType, const Scalar& borderValue )
1203 morphOp( MORPH_ERODE, src, dst, kernel, anchor, iterations, borderType, borderValue );
1207 void cv::dilate( InputArray src, OutputArray dst, InputArray kernel,
1208 Point anchor, int iterations,
1209 int borderType, const Scalar& borderValue )
1211 morphOp( MORPH_DILATE, src, dst, kernel, anchor, iterations, borderType, borderValue );
1215 void cv::morphologyEx( InputArray _src, OutputArray _dst, int op,
1216 InputArray kernel, Point anchor, int iterations,
1217 int borderType, const Scalar& borderValue )
1219 Mat src = _src.getMat(), temp;
1220 _dst.create(src.size(), src.type());
1221 Mat dst = _dst.getMat();
1226 erode( src, dst, kernel, anchor, iterations, borderType, borderValue );
1229 dilate( src, dst, kernel, anchor, iterations, borderType, borderValue );
1232 erode( src, dst, kernel, anchor, iterations, borderType, borderValue );
1233 dilate( dst, dst, kernel, anchor, iterations, borderType, borderValue );
1236 dilate( src, dst, kernel, anchor, iterations, borderType, borderValue );
1237 erode( dst, dst, kernel, anchor, iterations, borderType, borderValue );
1239 case CV_MOP_GRADIENT:
1240 erode( src, temp, kernel, anchor, iterations, borderType, borderValue );
1241 dilate( src, dst, kernel, anchor, iterations, borderType, borderValue );
1245 if( src.data != dst.data )
1247 erode( src, temp, kernel, anchor, iterations, borderType, borderValue );
1248 dilate( temp, temp, kernel, anchor, iterations, borderType, borderValue );
1251 case CV_MOP_BLACKHAT:
1252 if( src.data != dst.data )
1254 dilate( src, temp, kernel, anchor, iterations, borderType, borderValue );
1255 erode( temp, temp, kernel, anchor, iterations, borderType, borderValue );
1259 CV_Error( CV_StsBadArg, "unknown morphological operation" );
1263 CV_IMPL IplConvKernel *
1264 cvCreateStructuringElementEx( int cols, int rows,
1265 int anchorX, int anchorY,
1266 int shape, int *values )
1268 cv::Size ksize = cv::Size(cols, rows);
1269 cv::Point anchor = cv::Point(anchorX, anchorY);
1270 CV_Assert( cols > 0 && rows > 0 && anchor.inside(cv::Rect(0,0,cols,rows)) &&
1271 (shape != CV_SHAPE_CUSTOM || values != 0));
1273 int i, size = rows * cols;
1274 int element_size = sizeof(IplConvKernel) + size*sizeof(int);
1275 IplConvKernel *element = (IplConvKernel*)cvAlloc(element_size + 32);
1277 element->nCols = cols;
1278 element->nRows = rows;
1279 element->anchorX = anchorX;
1280 element->anchorY = anchorY;
1281 element->nShiftR = shape < CV_SHAPE_ELLIPSE ? shape : CV_SHAPE_CUSTOM;
1282 element->values = (int*)(element + 1);
1284 if( shape == CV_SHAPE_CUSTOM )
1286 for( i = 0; i < size; i++ )
1287 element->values[i] = values[i];
1291 cv::Mat elem = cv::getStructuringElement(shape, ksize, anchor);
1292 for( i = 0; i < size; i++ )
1293 element->values[i] = elem.data[i];
1301 cvReleaseStructuringElement( IplConvKernel ** element )
1304 CV_Error( CV_StsNullPtr, "" );
1309 static void convertConvKernel( const IplConvKernel* src, cv::Mat& dst, cv::Point& anchor )
1313 anchor = cv::Point(1,1);
1317 anchor = cv::Point(src->anchorX, src->anchorY);
1318 dst.create(src->nRows, src->nCols, CV_8U);
1320 int i, size = src->nRows*src->nCols;
1321 for( i = 0; i < size; i++ )
1322 dst.data[i] = (uchar)src->values[i];
1327 cvErode( const CvArr* srcarr, CvArr* dstarr, IplConvKernel* element, int iterations )
1329 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), kernel;
1330 CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
1332 convertConvKernel( element, kernel, anchor );
1333 cv::erode( src, dst, kernel, anchor, iterations, cv::BORDER_REPLICATE );
1338 cvDilate( const CvArr* srcarr, CvArr* dstarr, IplConvKernel* element, int iterations )
1340 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), kernel;
1341 CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
1343 convertConvKernel( element, kernel, anchor );
1344 cv::dilate( src, dst, kernel, anchor, iterations, cv::BORDER_REPLICATE );
1349 cvMorphologyEx( const void* srcarr, void* dstarr, void*,
1350 IplConvKernel* element, int op, int iterations )
1352 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), kernel;
1353 CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
1355 IplConvKernel* temp_element = NULL;
1358 temp_element = cvCreateStructuringElementEx(3, 3, 1, 1, CV_SHAPE_RECT);
1360 temp_element = element;
1362 convertConvKernel( temp_element, kernel, anchor );
1365 cvReleaseStructuringElement(&temp_element);
1367 cv::morphologyEx( src, dst, op, kernel, anchor, iterations, cv::BORDER_REPLICATE );