1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #include "precomp.hpp"
45 #include "opencl_kernels_imgproc.hpp"
47 /****************************************************************************************\
48 Basic Morphological Operations: Erosion & Dilation
49 \****************************************************************************************/
54 template<typename T> struct MinOp
59 T operator ()(const T a, const T b) const { return std::min(a, b); }
62 template<typename T> struct MaxOp
67 T operator ()(const T a, const T b) const { return std::max(a, b); }
72 #define CV_MIN_8U(a,b) ((a) - CV_FAST_CAST_8U((a) - (b)))
73 #define CV_MAX_8U(a,b) ((a) + CV_FAST_CAST_8U((b) - (a)))
75 template<> inline uchar MinOp<uchar>::operator ()(const uchar a, const uchar b) const { return CV_MIN_8U(a, b); }
76 template<> inline uchar MaxOp<uchar>::operator ()(const uchar a, const uchar b) const { return CV_MAX_8U(a, b); }
80 MorphRowNoVec(int, int) {}
81 int operator()(const uchar*, uchar*, int, int) const { return 0; }
84 struct MorphColumnNoVec
86 MorphColumnNoVec(int, int) {}
87 int operator()(const uchar**, uchar*, int, int, int) const { return 0; }
92 int operator()(uchar**, int, uchar*, int) const { return 0; }
97 template<class VecUpdate> struct MorphRowIVec
99 enum { ESZ = VecUpdate::ESZ };
101 MorphRowIVec(int _ksize, int _anchor) : ksize(_ksize), anchor(_anchor) {}
102 int operator()(const uchar* src, uchar* dst, int width, int cn) const
104 if( !checkHardwareSupport(CV_CPU_SSE2) )
108 int i, k, _ksize = ksize*cn;
109 width = (width & -4)*cn;
112 for( i = 0; i <= width - 16; i += 16 )
114 __m128i s = _mm_loadu_si128((const __m128i*)(src + i));
115 for( k = cn; k < _ksize; k += cn )
117 __m128i x = _mm_loadu_si128((const __m128i*)(src + i + k));
120 _mm_storeu_si128((__m128i*)(dst + i), s);
123 for( ; i < width; i += 4 )
125 __m128i s = _mm_cvtsi32_si128(*(const int*)(src + i));
126 for( k = cn; k < _ksize; k += cn )
128 __m128i x = _mm_cvtsi32_si128(*(const int*)(src + i + k));
131 *(int*)(dst + i) = _mm_cvtsi128_si32(s);
141 template<class VecUpdate> struct MorphRowFVec
143 MorphRowFVec(int _ksize, int _anchor) : ksize(_ksize), anchor(_anchor) {}
144 int operator()(const uchar* src, uchar* dst, int width, int cn) const
146 if( !checkHardwareSupport(CV_CPU_SSE) )
149 int i, k, _ksize = ksize*cn;
150 width = (width & -4)*cn;
153 for( i = 0; i < width; i += 4 )
155 __m128 s = _mm_loadu_ps((const float*)src + i);
156 for( k = cn; k < _ksize; k += cn )
158 __m128 x = _mm_loadu_ps((const float*)src + i + k);
161 _mm_storeu_ps((float*)dst + i, s);
171 template<class VecUpdate> struct MorphColumnIVec
173 enum { ESZ = VecUpdate::ESZ };
175 MorphColumnIVec(int _ksize, int _anchor) : ksize(_ksize), anchor(_anchor) {}
176 int operator()(const uchar** src, uchar* dst, int dststep, int count, int width) const
178 if( !checkHardwareSupport(CV_CPU_SSE2) )
181 int i = 0, k, _ksize = ksize;
185 for( i = 0; i < count + ksize - 1; i++ )
186 CV_Assert( ((size_t)src[i] & 15) == 0 );
188 for( ; _ksize > 1 && count > 1; count -= 2, dst += dststep*2, src += 2 )
190 for( i = 0; i <= width - 32; i += 32 )
192 const uchar* sptr = src[1] + i;
193 __m128i s0 = _mm_load_si128((const __m128i*)sptr);
194 __m128i s1 = _mm_load_si128((const __m128i*)(sptr + 16));
197 for( k = 2; k < _ksize; k++ )
200 x0 = _mm_load_si128((const __m128i*)sptr);
201 x1 = _mm_load_si128((const __m128i*)(sptr + 16));
202 s0 = updateOp(s0, x0);
203 s1 = updateOp(s1, x1);
207 x0 = _mm_load_si128((const __m128i*)sptr);
208 x1 = _mm_load_si128((const __m128i*)(sptr + 16));
209 _mm_storeu_si128((__m128i*)(dst + i), updateOp(s0, x0));
210 _mm_storeu_si128((__m128i*)(dst + i + 16), updateOp(s1, x1));
213 x0 = _mm_load_si128((const __m128i*)sptr);
214 x1 = _mm_load_si128((const __m128i*)(sptr + 16));
215 _mm_storeu_si128((__m128i*)(dst + dststep + i), updateOp(s0, x0));
216 _mm_storeu_si128((__m128i*)(dst + dststep + i + 16), updateOp(s1, x1));
219 for( ; i <= width - 8; i += 8 )
221 __m128i s0 = _mm_loadl_epi64((const __m128i*)(src[1] + i)), x0;
223 for( k = 2; k < _ksize; k++ )
225 x0 = _mm_loadl_epi64((const __m128i*)(src[k] + i));
226 s0 = updateOp(s0, x0);
229 x0 = _mm_loadl_epi64((const __m128i*)(src[0] + i));
230 _mm_storel_epi64((__m128i*)(dst + i), updateOp(s0, x0));
231 x0 = _mm_loadl_epi64((const __m128i*)(src[k] + i));
232 _mm_storel_epi64((__m128i*)(dst + dststep + i), updateOp(s0, x0));
236 for( ; count > 0; count--, dst += dststep, src++ )
238 for( i = 0; i <= width - 32; i += 32 )
240 const uchar* sptr = src[0] + i;
241 __m128i s0 = _mm_load_si128((const __m128i*)sptr);
242 __m128i s1 = _mm_load_si128((const __m128i*)(sptr + 16));
245 for( k = 1; k < _ksize; k++ )
248 x0 = _mm_load_si128((const __m128i*)sptr);
249 x1 = _mm_load_si128((const __m128i*)(sptr + 16));
250 s0 = updateOp(s0, x0);
251 s1 = updateOp(s1, x1);
253 _mm_storeu_si128((__m128i*)(dst + i), s0);
254 _mm_storeu_si128((__m128i*)(dst + i + 16), s1);
257 for( ; i <= width - 8; i += 8 )
259 __m128i s0 = _mm_loadl_epi64((const __m128i*)(src[0] + i)), x0;
261 for( k = 1; k < _ksize; k++ )
263 x0 = _mm_loadl_epi64((const __m128i*)(src[k] + i));
264 s0 = updateOp(s0, x0);
266 _mm_storel_epi64((__m128i*)(dst + i), s0);
277 template<class VecUpdate> struct MorphColumnFVec
279 MorphColumnFVec(int _ksize, int _anchor) : ksize(_ksize), anchor(_anchor) {}
280 int operator()(const uchar** _src, uchar* _dst, int dststep, int count, int width) const
282 if( !checkHardwareSupport(CV_CPU_SSE) )
285 int i = 0, k, _ksize = ksize;
288 for( i = 0; i < count + ksize - 1; i++ )
289 CV_Assert( ((size_t)_src[i] & 15) == 0 );
291 const float** src = (const float**)_src;
292 float* dst = (float*)_dst;
293 dststep /= sizeof(dst[0]);
295 for( ; _ksize > 1 && count > 1; count -= 2, dst += dststep*2, src += 2 )
297 for( i = 0; i <= width - 16; i += 16 )
299 const float* sptr = src[1] + i;
300 __m128 s0 = _mm_load_ps(sptr);
301 __m128 s1 = _mm_load_ps(sptr + 4);
302 __m128 s2 = _mm_load_ps(sptr + 8);
303 __m128 s3 = _mm_load_ps(sptr + 12);
304 __m128 x0, x1, x2, x3;
306 for( k = 2; k < _ksize; k++ )
309 x0 = _mm_load_ps(sptr);
310 x1 = _mm_load_ps(sptr + 4);
311 s0 = updateOp(s0, x0);
312 s1 = updateOp(s1, x1);
313 x2 = _mm_load_ps(sptr + 8);
314 x3 = _mm_load_ps(sptr + 12);
315 s2 = updateOp(s2, x2);
316 s3 = updateOp(s3, x3);
320 x0 = _mm_load_ps(sptr);
321 x1 = _mm_load_ps(sptr + 4);
322 x2 = _mm_load_ps(sptr + 8);
323 x3 = _mm_load_ps(sptr + 12);
324 _mm_storeu_ps(dst + i, updateOp(s0, x0));
325 _mm_storeu_ps(dst + i + 4, updateOp(s1, x1));
326 _mm_storeu_ps(dst + i + 8, updateOp(s2, x2));
327 _mm_storeu_ps(dst + i + 12, updateOp(s3, x3));
330 x0 = _mm_load_ps(sptr);
331 x1 = _mm_load_ps(sptr + 4);
332 x2 = _mm_load_ps(sptr + 8);
333 x3 = _mm_load_ps(sptr + 12);
334 _mm_storeu_ps(dst + dststep + i, updateOp(s0, x0));
335 _mm_storeu_ps(dst + dststep + i + 4, updateOp(s1, x1));
336 _mm_storeu_ps(dst + dststep + i + 8, updateOp(s2, x2));
337 _mm_storeu_ps(dst + dststep + i + 12, updateOp(s3, x3));
340 for( ; i <= width - 4; i += 4 )
342 __m128 s0 = _mm_load_ps(src[1] + i), x0;
344 for( k = 2; k < _ksize; k++ )
346 x0 = _mm_load_ps(src[k] + i);
347 s0 = updateOp(s0, x0);
350 x0 = _mm_load_ps(src[0] + i);
351 _mm_storeu_ps(dst + i, updateOp(s0, x0));
352 x0 = _mm_load_ps(src[k] + i);
353 _mm_storeu_ps(dst + dststep + i, updateOp(s0, x0));
357 for( ; count > 0; count--, dst += dststep, src++ )
359 for( i = 0; i <= width - 16; i += 16 )
361 const float* sptr = src[0] + i;
362 __m128 s0 = _mm_load_ps(sptr);
363 __m128 s1 = _mm_load_ps(sptr + 4);
364 __m128 s2 = _mm_load_ps(sptr + 8);
365 __m128 s3 = _mm_load_ps(sptr + 12);
366 __m128 x0, x1, x2, x3;
368 for( k = 1; k < _ksize; k++ )
371 x0 = _mm_load_ps(sptr);
372 x1 = _mm_load_ps(sptr + 4);
373 s0 = updateOp(s0, x0);
374 s1 = updateOp(s1, x1);
375 x2 = _mm_load_ps(sptr + 8);
376 x3 = _mm_load_ps(sptr + 12);
377 s2 = updateOp(s2, x2);
378 s3 = updateOp(s3, x3);
380 _mm_storeu_ps(dst + i, s0);
381 _mm_storeu_ps(dst + i + 4, s1);
382 _mm_storeu_ps(dst + i + 8, s2);
383 _mm_storeu_ps(dst + i + 12, s3);
386 for( i = 0; i <= width - 4; i += 4 )
388 __m128 s0 = _mm_load_ps(src[0] + i), x0;
389 for( k = 1; k < _ksize; k++ )
391 x0 = _mm_load_ps(src[k] + i);
392 s0 = updateOp(s0, x0);
394 _mm_storeu_ps(dst + i, s0);
405 template<class VecUpdate> struct MorphIVec
407 enum { ESZ = VecUpdate::ESZ };
409 int operator()(uchar** src, int nz, uchar* dst, int width) const
411 if( !checkHardwareSupport(CV_CPU_SSE2) )
418 for( i = 0; i <= width - 32; i += 32 )
420 const uchar* sptr = src[0] + i;
421 __m128i s0 = _mm_loadu_si128((const __m128i*)sptr);
422 __m128i s1 = _mm_loadu_si128((const __m128i*)(sptr + 16));
425 for( k = 1; k < nz; k++ )
428 x0 = _mm_loadu_si128((const __m128i*)sptr);
429 x1 = _mm_loadu_si128((const __m128i*)(sptr + 16));
430 s0 = updateOp(s0, x0);
431 s1 = updateOp(s1, x1);
433 _mm_storeu_si128((__m128i*)(dst + i), s0);
434 _mm_storeu_si128((__m128i*)(dst + i + 16), s1);
437 for( ; i <= width - 8; i += 8 )
439 __m128i s0 = _mm_loadl_epi64((const __m128i*)(src[0] + i)), x0;
441 for( k = 1; k < nz; k++ )
443 x0 = _mm_loadl_epi64((const __m128i*)(src[k] + i));
444 s0 = updateOp(s0, x0);
446 _mm_storel_epi64((__m128i*)(dst + i), s0);
454 template<class VecUpdate> struct MorphFVec
456 int operator()(uchar** _src, int nz, uchar* _dst, int width) const
458 if( !checkHardwareSupport(CV_CPU_SSE) )
461 const float** src = (const float**)_src;
462 float* dst = (float*)_dst;
466 for( i = 0; i <= width - 16; i += 16 )
468 const float* sptr = src[0] + i;
469 __m128 s0 = _mm_loadu_ps(sptr);
470 __m128 s1 = _mm_loadu_ps(sptr + 4);
471 __m128 s2 = _mm_loadu_ps(sptr + 8);
472 __m128 s3 = _mm_loadu_ps(sptr + 12);
473 __m128 x0, x1, x2, x3;
475 for( k = 1; k < nz; k++ )
478 x0 = _mm_loadu_ps(sptr);
479 x1 = _mm_loadu_ps(sptr + 4);
480 x2 = _mm_loadu_ps(sptr + 8);
481 x3 = _mm_loadu_ps(sptr + 12);
482 s0 = updateOp(s0, x0);
483 s1 = updateOp(s1, x1);
484 s2 = updateOp(s2, x2);
485 s3 = updateOp(s3, x3);
487 _mm_storeu_ps(dst + i, s0);
488 _mm_storeu_ps(dst + i + 4, s1);
489 _mm_storeu_ps(dst + i + 8, s2);
490 _mm_storeu_ps(dst + i + 12, s3);
493 for( ; i <= width - 4; i += 4 )
495 __m128 s0 = _mm_loadu_ps(src[0] + i), x0;
497 for( k = 1; k < nz; k++ )
499 x0 = _mm_loadu_ps(src[k] + i);
500 s0 = updateOp(s0, x0);
502 _mm_storeu_ps(dst + i, s0);
505 for( ; i < width; i++ )
507 __m128 s0 = _mm_load_ss(src[0] + i), x0;
509 for( k = 1; k < nz; k++ )
511 x0 = _mm_load_ss(src[k] + i);
512 s0 = updateOp(s0, x0);
514 _mm_store_ss(dst + i, s0);
524 __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_min_epu8(a,b); }
529 __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_max_epu8(a,b); }
534 __m128i operator()(const __m128i& a, const __m128i& b) const
535 { return _mm_subs_epu16(a,_mm_subs_epu16(a,b)); }
540 __m128i operator()(const __m128i& a, const __m128i& b) const
541 { return _mm_adds_epu16(_mm_subs_epu16(a,b), b); }
546 __m128i operator()(const __m128i& a, const __m128i& b) const
547 { return _mm_min_epi16(a, b); }
552 __m128i operator()(const __m128i& a, const __m128i& b) const
553 { return _mm_max_epi16(a, b); }
555 struct VMin32f { __m128 operator()(const __m128& a, const __m128& b) const { return _mm_min_ps(a,b); }};
556 struct VMax32f { __m128 operator()(const __m128& a, const __m128& b) const { return _mm_max_ps(a,b); }};
558 typedef MorphRowIVec<VMin8u> ErodeRowVec8u;
559 typedef MorphRowIVec<VMax8u> DilateRowVec8u;
560 typedef MorphRowIVec<VMin16u> ErodeRowVec16u;
561 typedef MorphRowIVec<VMax16u> DilateRowVec16u;
562 typedef MorphRowIVec<VMin16s> ErodeRowVec16s;
563 typedef MorphRowIVec<VMax16s> DilateRowVec16s;
564 typedef MorphRowFVec<VMin32f> ErodeRowVec32f;
565 typedef MorphRowFVec<VMax32f> DilateRowVec32f;
567 typedef MorphColumnIVec<VMin8u> ErodeColumnVec8u;
568 typedef MorphColumnIVec<VMax8u> DilateColumnVec8u;
569 typedef MorphColumnIVec<VMin16u> ErodeColumnVec16u;
570 typedef MorphColumnIVec<VMax16u> DilateColumnVec16u;
571 typedef MorphColumnIVec<VMin16s> ErodeColumnVec16s;
572 typedef MorphColumnIVec<VMax16s> DilateColumnVec16s;
573 typedef MorphColumnFVec<VMin32f> ErodeColumnVec32f;
574 typedef MorphColumnFVec<VMax32f> DilateColumnVec32f;
576 typedef MorphIVec<VMin8u> ErodeVec8u;
577 typedef MorphIVec<VMax8u> DilateVec8u;
578 typedef MorphIVec<VMin16u> ErodeVec16u;
579 typedef MorphIVec<VMax16u> DilateVec16u;
580 typedef MorphIVec<VMin16s> ErodeVec16s;
581 typedef MorphIVec<VMax16s> DilateVec16s;
582 typedef MorphFVec<VMin32f> ErodeVec32f;
583 typedef MorphFVec<VMax32f> DilateVec32f;
587 #ifdef HAVE_TEGRA_OPTIMIZATION
588 using tegra::ErodeRowVec8u;
589 using tegra::DilateRowVec8u;
591 using tegra::ErodeColumnVec8u;
592 using tegra::DilateColumnVec8u;
594 typedef MorphRowNoVec ErodeRowVec8u;
595 typedef MorphRowNoVec DilateRowVec8u;
597 typedef MorphColumnNoVec ErodeColumnVec8u;
598 typedef MorphColumnNoVec DilateColumnVec8u;
601 typedef MorphRowNoVec ErodeRowVec16u;
602 typedef MorphRowNoVec DilateRowVec16u;
603 typedef MorphRowNoVec ErodeRowVec16s;
604 typedef MorphRowNoVec DilateRowVec16s;
605 typedef MorphRowNoVec ErodeRowVec32f;
606 typedef MorphRowNoVec DilateRowVec32f;
608 typedef MorphColumnNoVec ErodeColumnVec16u;
609 typedef MorphColumnNoVec DilateColumnVec16u;
610 typedef MorphColumnNoVec ErodeColumnVec16s;
611 typedef MorphColumnNoVec DilateColumnVec16s;
612 typedef MorphColumnNoVec ErodeColumnVec32f;
613 typedef MorphColumnNoVec DilateColumnVec32f;
615 typedef MorphNoVec ErodeVec8u;
616 typedef MorphNoVec DilateVec8u;
617 typedef MorphNoVec ErodeVec16u;
618 typedef MorphNoVec DilateVec16u;
619 typedef MorphNoVec ErodeVec16s;
620 typedef MorphNoVec DilateVec16s;
621 typedef MorphNoVec ErodeVec32f;
622 typedef MorphNoVec DilateVec32f;
626 typedef MorphRowNoVec ErodeRowVec64f;
627 typedef MorphRowNoVec DilateRowVec64f;
628 typedef MorphColumnNoVec ErodeColumnVec64f;
629 typedef MorphColumnNoVec DilateColumnVec64f;
630 typedef MorphNoVec ErodeVec64f;
631 typedef MorphNoVec DilateVec64f;
634 template<class Op, class VecOp> struct MorphRowFilter : public BaseRowFilter
636 typedef typename Op::rtype T;
638 MorphRowFilter( int _ksize, int _anchor ) : vecOp(_ksize, _anchor)
644 void operator()(const uchar* src, uchar* dst, int width, int cn)
646 int i, j, k, _ksize = ksize*cn;
647 const T* S = (const T*)src;
653 for( i = 0; i < width*cn; i++ )
658 int i0 = vecOp(src, dst, width, cn);
661 for( k = 0; k < cn; k++, S++, D++ )
663 for( i = i0; i <= width - cn*2; i += cn*2 )
667 for( j = cn*2; j < _ksize; j += cn )
670 D[i+cn] = op(m, s[j]);
673 for( ; i < width; i += cn )
677 for( j = cn; j < _ksize; j += cn )
688 template<class Op, class VecOp> struct MorphColumnFilter : public BaseColumnFilter
690 typedef typename Op::rtype T;
692 MorphColumnFilter( int _ksize, int _anchor ) : vecOp(_ksize, _anchor)
698 void operator()(const uchar** _src, uchar* dst, int dststep, int count, int width)
700 int i, k, _ksize = ksize;
701 const T** src = (const T**)_src;
705 int i0 = vecOp(_src, dst, dststep, count, width);
706 dststep /= sizeof(D[0]);
708 for( ; _ksize > 1 && count > 1; count -= 2, D += dststep*2, src += 2 )
711 #if CV_ENABLE_UNROLLED
712 for( ; i <= width - 4; i += 4 )
714 const T* sptr = src[1] + i;
715 T s0 = sptr[0], s1 = sptr[1], s2 = sptr[2], s3 = sptr[3];
717 for( k = 2; k < _ksize; k++ )
720 s0 = op(s0, sptr[0]); s1 = op(s1, sptr[1]);
721 s2 = op(s2, sptr[2]); s3 = op(s3, sptr[3]);
725 D[i] = op(s0, sptr[0]);
726 D[i+1] = op(s1, sptr[1]);
727 D[i+2] = op(s2, sptr[2]);
728 D[i+3] = op(s3, sptr[3]);
731 D[i+dststep] = op(s0, sptr[0]);
732 D[i+dststep+1] = op(s1, sptr[1]);
733 D[i+dststep+2] = op(s2, sptr[2]);
734 D[i+dststep+3] = op(s3, sptr[3]);
737 for( ; i < width; i++ )
741 for( k = 2; k < _ksize; k++ )
742 s0 = op(s0, src[k][i]);
744 D[i] = op(s0, src[0][i]);
745 D[i+dststep] = op(s0, src[k][i]);
749 for( ; count > 0; count--, D += dststep, src++ )
752 #if CV_ENABLE_UNROLLED
753 for( ; i <= width - 4; i += 4 )
755 const T* sptr = src[0] + i;
756 T s0 = sptr[0], s1 = sptr[1], s2 = sptr[2], s3 = sptr[3];
758 for( k = 1; k < _ksize; k++ )
761 s0 = op(s0, sptr[0]); s1 = op(s1, sptr[1]);
762 s2 = op(s2, sptr[2]); s3 = op(s3, sptr[3]);
765 D[i] = s0; D[i+1] = s1;
766 D[i+2] = s2; D[i+3] = s3;
769 for( ; i < width; i++ )
772 for( k = 1; k < _ksize; k++ )
773 s0 = op(s0, src[k][i]);
783 template<class Op, class VecOp> struct MorphFilter : BaseFilter
785 typedef typename Op::rtype T;
787 MorphFilter( const Mat& _kernel, Point _anchor )
790 ksize = _kernel.size();
791 CV_Assert( _kernel.type() == CV_8U );
793 std::vector<uchar> coeffs; // we do not really the values of non-zero
794 // kernel elements, just their locations
795 preprocess2DKernel( _kernel, coords, coeffs );
796 ptrs.resize( coords.size() );
799 void operator()(const uchar** src, uchar* dst, int dststep, int count, int width, int cn)
801 const Point* pt = &coords[0];
802 const T** kp = (const T**)&ptrs[0];
803 int i, k, nz = (int)coords.size();
807 for( ; count > 0; count--, dst += dststep, src++ )
811 for( k = 0; k < nz; k++ )
812 kp[k] = (const T*)src[pt[k].y] + pt[k].x*cn;
814 i = vecOp(&ptrs[0], nz, dst, width);
815 #if CV_ENABLE_UNROLLED
816 for( ; i <= width - 4; i += 4 )
818 const T* sptr = kp[0] + i;
819 T s0 = sptr[0], s1 = sptr[1], s2 = sptr[2], s3 = sptr[3];
821 for( k = 1; k < nz; k++ )
824 s0 = op(s0, sptr[0]); s1 = op(s1, sptr[1]);
825 s2 = op(s2, sptr[2]); s3 = op(s3, sptr[3]);
828 D[i] = s0; D[i+1] = s1;
829 D[i+2] = s2; D[i+3] = s3;
832 for( ; i < width; i++ )
835 for( k = 1; k < nz; k++ )
836 s0 = op(s0, kp[k][i]);
842 std::vector<Point> coords;
843 std::vector<uchar*> ptrs;
849 /////////////////////////////////// External Interface /////////////////////////////////////
851 cv::Ptr<cv::BaseRowFilter> cv::getMorphologyRowFilter(int op, int type, int ksize, int anchor)
853 int depth = CV_MAT_DEPTH(type);
856 CV_Assert( op == MORPH_ERODE || op == MORPH_DILATE );
857 if( op == MORPH_ERODE )
860 return makePtr<MorphRowFilter<MinOp<uchar>,
861 ErodeRowVec8u> >(ksize, anchor);
862 if( depth == CV_16U )
863 return makePtr<MorphRowFilter<MinOp<ushort>,
864 ErodeRowVec16u> >(ksize, anchor);
865 if( depth == CV_16S )
866 return makePtr<MorphRowFilter<MinOp<short>,
867 ErodeRowVec16s> >(ksize, anchor);
868 if( depth == CV_32F )
869 return makePtr<MorphRowFilter<MinOp<float>,
870 ErodeRowVec32f> >(ksize, anchor);
871 if( depth == CV_64F )
872 return makePtr<MorphRowFilter<MinOp<double>,
873 ErodeRowVec64f> >(ksize, anchor);
878 return makePtr<MorphRowFilter<MaxOp<uchar>,
879 DilateRowVec8u> >(ksize, anchor);
880 if( depth == CV_16U )
881 return makePtr<MorphRowFilter<MaxOp<ushort>,
882 DilateRowVec16u> >(ksize, anchor);
883 if( depth == CV_16S )
884 return makePtr<MorphRowFilter<MaxOp<short>,
885 DilateRowVec16s> >(ksize, anchor);
886 if( depth == CV_32F )
887 return makePtr<MorphRowFilter<MaxOp<float>,
888 DilateRowVec32f> >(ksize, anchor);
889 if( depth == CV_64F )
890 return makePtr<MorphRowFilter<MaxOp<double>,
891 DilateRowVec64f> >(ksize, anchor);
894 CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type));
895 return Ptr<BaseRowFilter>();
898 cv::Ptr<cv::BaseColumnFilter> cv::getMorphologyColumnFilter(int op, int type, int ksize, int anchor)
900 int depth = CV_MAT_DEPTH(type);
903 CV_Assert( op == MORPH_ERODE || op == MORPH_DILATE );
904 if( op == MORPH_ERODE )
907 return makePtr<MorphColumnFilter<MinOp<uchar>,
908 ErodeColumnVec8u> >(ksize, anchor);
909 if( depth == CV_16U )
910 return makePtr<MorphColumnFilter<MinOp<ushort>,
911 ErodeColumnVec16u> >(ksize, anchor);
912 if( depth == CV_16S )
913 return makePtr<MorphColumnFilter<MinOp<short>,
914 ErodeColumnVec16s> >(ksize, anchor);
915 if( depth == CV_32F )
916 return makePtr<MorphColumnFilter<MinOp<float>,
917 ErodeColumnVec32f> >(ksize, anchor);
918 if( depth == CV_64F )
919 return makePtr<MorphColumnFilter<MinOp<double>,
920 ErodeColumnVec64f> >(ksize, anchor);
925 return makePtr<MorphColumnFilter<MaxOp<uchar>,
926 DilateColumnVec8u> >(ksize, anchor);
927 if( depth == CV_16U )
928 return makePtr<MorphColumnFilter<MaxOp<ushort>,
929 DilateColumnVec16u> >(ksize, anchor);
930 if( depth == CV_16S )
931 return makePtr<MorphColumnFilter<MaxOp<short>,
932 DilateColumnVec16s> >(ksize, anchor);
933 if( depth == CV_32F )
934 return makePtr<MorphColumnFilter<MaxOp<float>,
935 DilateColumnVec32f> >(ksize, anchor);
936 if( depth == CV_64F )
937 return makePtr<MorphColumnFilter<MaxOp<double>,
938 DilateColumnVec64f> >(ksize, anchor);
941 CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type));
942 return Ptr<BaseColumnFilter>();
946 cv::Ptr<cv::BaseFilter> cv::getMorphologyFilter(int op, int type, InputArray _kernel, Point anchor)
948 Mat kernel = _kernel.getMat();
949 int depth = CV_MAT_DEPTH(type);
950 anchor = normalizeAnchor(anchor, kernel.size());
951 CV_Assert( op == MORPH_ERODE || op == MORPH_DILATE );
952 if( op == MORPH_ERODE )
955 return makePtr<MorphFilter<MinOp<uchar>, ErodeVec8u> >(kernel, anchor);
956 if( depth == CV_16U )
957 return makePtr<MorphFilter<MinOp<ushort>, ErodeVec16u> >(kernel, anchor);
958 if( depth == CV_16S )
959 return makePtr<MorphFilter<MinOp<short>, ErodeVec16s> >(kernel, anchor);
960 if( depth == CV_32F )
961 return makePtr<MorphFilter<MinOp<float>, ErodeVec32f> >(kernel, anchor);
962 if( depth == CV_64F )
963 return makePtr<MorphFilter<MinOp<double>, ErodeVec64f> >(kernel, anchor);
968 return makePtr<MorphFilter<MaxOp<uchar>, DilateVec8u> >(kernel, anchor);
969 if( depth == CV_16U )
970 return makePtr<MorphFilter<MaxOp<ushort>, DilateVec16u> >(kernel, anchor);
971 if( depth == CV_16S )
972 return makePtr<MorphFilter<MaxOp<short>, DilateVec16s> >(kernel, anchor);
973 if( depth == CV_32F )
974 return makePtr<MorphFilter<MaxOp<float>, DilateVec32f> >(kernel, anchor);
975 if( depth == CV_64F )
976 return makePtr<MorphFilter<MaxOp<double>, DilateVec64f> >(kernel, anchor);
979 CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type));
980 return Ptr<BaseFilter>();
984 cv::Ptr<cv::FilterEngine> cv::createMorphologyFilter( int op, int type, InputArray _kernel,
985 Point anchor, int _rowBorderType, int _columnBorderType,
986 const Scalar& _borderValue )
988 Mat kernel = _kernel.getMat();
989 anchor = normalizeAnchor(anchor, kernel.size());
991 Ptr<BaseRowFilter> rowFilter;
992 Ptr<BaseColumnFilter> columnFilter;
993 Ptr<BaseFilter> filter2D;
995 if( countNonZero(kernel) == kernel.rows*kernel.cols )
997 // rectangular structuring element
998 rowFilter = getMorphologyRowFilter(op, type, kernel.cols, anchor.x);
999 columnFilter = getMorphologyColumnFilter(op, type, kernel.rows, anchor.y);
1002 filter2D = getMorphologyFilter(op, type, kernel, anchor);
1004 Scalar borderValue = _borderValue;
1005 if( (_rowBorderType == BORDER_CONSTANT || _columnBorderType == BORDER_CONSTANT) &&
1006 borderValue == morphologyDefaultBorderValue() )
1008 int depth = CV_MAT_DEPTH(type);
1009 CV_Assert( depth == CV_8U || depth == CV_16U || depth == CV_16S ||
1010 depth == CV_32F || depth == CV_64F );
1011 if( op == MORPH_ERODE )
1012 borderValue = Scalar::all( depth == CV_8U ? (double)UCHAR_MAX :
1013 depth == CV_16U ? (double)USHRT_MAX :
1014 depth == CV_16S ? (double)SHRT_MAX :
1015 depth == CV_32F ? (double)FLT_MAX : DBL_MAX);
1017 borderValue = Scalar::all( depth == CV_8U || depth == CV_16U ?
1019 depth == CV_16S ? (double)SHRT_MIN :
1020 depth == CV_32F ? (double)-FLT_MAX : -DBL_MAX);
1023 return makePtr<FilterEngine>(filter2D, rowFilter, columnFilter,
1024 type, type, type, _rowBorderType, _columnBorderType, borderValue );
1028 cv::Mat cv::getStructuringElement(int shape, Size ksize, Point anchor)
1034 CV_Assert( shape == MORPH_RECT || shape == MORPH_CROSS || shape == MORPH_ELLIPSE );
1036 anchor = normalizeAnchor(anchor, ksize);
1038 if( ksize == Size(1,1) )
1041 if( shape == MORPH_ELLIPSE )
1045 inv_r2 = r ? 1./((double)r*r) : 0;
1048 Mat elem(ksize, CV_8U);
1050 for( i = 0; i < ksize.height; i++ )
1052 uchar* ptr = elem.ptr(i);
1055 if( shape == MORPH_RECT || (shape == MORPH_CROSS && i == anchor.y) )
1057 else if( shape == MORPH_CROSS )
1058 j1 = anchor.x, j2 = j1 + 1;
1062 if( std::abs(dy) <= r )
1064 int dx = saturate_cast<int>(c*std::sqrt((r*r - dy*dy)*inv_r2));
1065 j1 = std::max( c - dx, 0 );
1066 j2 = std::min( c + dx + 1, ksize.width );
1070 for( j = 0; j < j1; j++ )
1072 for( ; j < j2; j++ )
1074 for( ; j < ksize.width; j++ )
1084 class MorphologyRunner : public ParallelLoopBody
1087 MorphologyRunner(Mat _src, Mat _dst, int _nStripes, int _iterations,
1088 int _op, Mat _kernel, Point _anchor,
1089 int _rowBorderType, int _columnBorderType, const Scalar& _borderValue) :
1090 borderValue(_borderValue)
1095 nStripes = _nStripes;
1096 iterations = _iterations;
1101 rowBorderType = _rowBorderType;
1102 columnBorderType = _columnBorderType;
1105 void operator () ( const Range& range ) const
1107 int row0 = std::min(cvRound(range.start * src.rows / nStripes), src.rows);
1108 int row1 = std::min(cvRound(range.end * src.rows / nStripes), src.rows);
1111 printf("Size = (%d, %d), range[%d,%d), row0 = %d, row1 = %d\n",
1112 src.rows, src.cols, range.start, range.end, row0, row1);*/
1114 Mat srcStripe = src.rowRange(row0, row1);
1115 Mat dstStripe = dst.rowRange(row0, row1);
1117 Ptr<FilterEngine> f = createMorphologyFilter(op, src.type(), kernel, anchor,
1118 rowBorderType, columnBorderType, borderValue );
1120 f->apply( srcStripe, dstStripe );
1121 for( int i = 1; i < iterations; i++ )
1122 f->apply( dstStripe, dstStripe );
1135 int columnBorderType;
1139 #if IPP_VERSION_X100 >= 801
1140 static bool IPPMorphReplicate(int op, const Mat &src, Mat &dst, const Mat &kernel,
1141 const Size& ksize, const Point &anchor, bool rectKernel)
1143 int type = src.type();
1144 const Mat* _src = &src;
1146 if (src.data == dst.data)
1152 IppiSize roiSize = {src.cols, src.rows};
1153 IppiSize kernelSize = {ksize.width, ksize.height};
1158 if (((kernel.cols - 1) / 2 != anchor.x) || ((kernel.rows - 1) / 2 != anchor.y))
1160 #define IPP_MORPH_CASE(cvtype, flavor, data_type) \
1163 int specSize = 0, bufferSize = 0;\
1164 if (0 > ippiMorphologyBorderGetSize_##flavor(roiSize.width, kernelSize, &specSize, &bufferSize))\
1166 IppiMorphState *pSpec = (IppiMorphState*)ippMalloc(specSize);\
1167 Ipp8u *pBuffer = (Ipp8u*)ippMalloc(bufferSize);\
1168 if (0 > ippiMorphologyBorderInit_##flavor(roiSize.width, kernel.ptr(), kernelSize, pSpec, pBuffer))\
1175 if (op == MORPH_ERODE)\
1176 ok = (0 <= ippiErodeBorder_##flavor(_src->ptr<Ipp##data_type>(), (int)_src->step[0], dst.ptr<Ipp##data_type>(), (int)dst.step[0],\
1177 roiSize, ippBorderRepl, 0, pSpec, pBuffer));\
1179 ok = (0 <= ippiDilateBorder_##flavor(_src->ptr<Ipp##data_type>(), (int)_src->step[0], dst.ptr<Ipp##data_type>(), (int)dst.step[0],\
1180 roiSize, ippBorderRepl, 0, pSpec, pBuffer));\
1187 IppiPoint point = {anchor.x, anchor.y};
1188 // this is case, which can be used with the anchor not in center of the kernel, but
1189 // ippiMorphologyBorderGetSize_, ippiErodeBorderReplicate_ and ippiDilateBorderReplicate_ are deprecated.
1190 #define IPP_MORPH_CASE(cvtype, flavor, data_type) \
1194 int bufferSize = 0;\
1195 if (0 > ippiMorphologyGetSize_##flavor( roiSize.width, kernel.ptr() kernelSize, &specSize))\
1198 IppiMorphState* pState = (IppiMorphState*)ippMalloc(specSize);\
1199 if (ippiMorphologyInit_##flavor(roiSize.width, kernel.ptr(), kernelSize, point, pState) >= 0)\
1201 if (op == MORPH_ERODE)\
1202 ok = ippiErodeBorderReplicate_##flavor(_src->ptr<Ipp##data_type>(), (int)_src->step[0],\
1203 dst.ptr<Ipp##data_type>(), (int)dst.step[0],\
1204 roiSize, ippBorderRepl, pState ) >= 0;\
1206 ok = ippiDilateBorderReplicate_##flavor(_src->ptr<Ipp##data_type>(), (int)_src->step[0],\
1207 dst.ptr<Ipp##data_type>(), (int)dst.step[0],\
1208 roiSize, ippBorderRepl, pState ) >= 0;\
1217 IPP_MORPH_CASE(CV_8UC1, 8u_C1R, 8u);
1218 IPP_MORPH_CASE(CV_8UC3, 8u_C3R, 8u);
1219 IPP_MORPH_CASE(CV_8UC4, 8u_C4R, 8u);
1220 IPP_MORPH_CASE(CV_32FC1, 32f_C1R, 32f);
1221 IPP_MORPH_CASE(CV_32FC3, 32f_C3R, 32f);
1222 IPP_MORPH_CASE(CV_32FC4, 32f_C4R, 32f);
1227 #undef IPP_MORPH_CASE
1231 IppiPoint point = {anchor.x, anchor.y};
1233 #define IPP_MORPH_CASE(cvtype, flavor, data_type) \
1237 if (0 > ippiFilterMinGetBufferSize_##flavor(src.cols, kernelSize, &bufSize))\
1239 AutoBuffer<uchar> buf(bufSize + 64);\
1240 uchar* buffer = alignPtr((uchar*)buf, 32);\
1241 if (op == MORPH_ERODE)\
1242 return (0 <= ippiFilterMinBorderReplicate_##flavor(_src->ptr<Ipp##data_type>(), (int)_src->step[0], dst.ptr<Ipp##data_type>(), (int)dst.step[0], roiSize, kernelSize, point, buffer));\
1243 return (0 <= ippiFilterMaxBorderReplicate_##flavor(_src->ptr<Ipp##data_type>(), (int)_src->step[0], dst.ptr<Ipp##data_type>(), (int)dst.step[0], roiSize, kernelSize, point, buffer));\
1249 IPP_MORPH_CASE(CV_8UC1, 8u_C1R, 8u);
1250 IPP_MORPH_CASE(CV_8UC3, 8u_C3R, 8u);
1251 IPP_MORPH_CASE(CV_8UC4, 8u_C4R, 8u);
1252 IPP_MORPH_CASE(CV_32FC1, 32f_C1R, 32f);
1253 IPP_MORPH_CASE(CV_32FC3, 32f_C3R, 32f);
1254 IPP_MORPH_CASE(CV_32FC4, 32f_C4R, 32f);
1258 #undef IPP_MORPH_CASE
1263 static bool IPPMorphOp(int op, InputArray _src, OutputArray _dst,
1264 const Mat& _kernel, Point anchor, int iterations,
1265 int borderType, const Scalar &borderValue)
1267 Mat src = _src.getMat(), kernel = _kernel;
1268 int type = src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
1270 if( !( depth == CV_8U || depth == CV_32F ) || !(cn == 1 || cn == 3 || cn == 4) ||
1271 !( borderType == cv::BORDER_REPLICATE || (borderType == cv::BORDER_CONSTANT && borderValue == morphologyDefaultBorderValue() &&
1272 kernel.size() == Size(3,3)) ) || !( op == MORPH_DILATE || op == MORPH_ERODE) || _src.isSubmatrix() )
1275 // In case BORDER_CONSTANT, IPPMorphReplicate works correct with kernels of size 3*3 only
1276 if( borderType == cv::BORDER_CONSTANT && kernel.data )
1279 for( y = 0; y < kernel.rows; y++ )
1281 if( kernel.at<uchar>(y, anchor.x) != 0 )
1283 for( x = 0; x < kernel.cols; x++ )
1285 if( kernel.at<uchar>(y,x) != 0 )
1289 for( x = 0; x < kernel.cols; x++ )
1291 if( kernel.at<uchar>(anchor.y, x) != 0 )
1293 for( y = 0; y < kernel.rows; y++ )
1295 if( kernel.at<uchar>(y,x) != 0 )
1301 Size ksize = !kernel.empty() ? kernel.size() : Size(3,3);
1303 _dst.create( src.size(), src.type() );
1304 Mat dst = _dst.getMat();
1306 if( iterations == 0 || kernel.rows*kernel.cols == 1 )
1312 bool rectKernel = false;
1313 if( kernel.empty() )
1315 ksize = Size(1+iterations*2,1+iterations*2);
1316 anchor = Point(iterations, iterations);
1320 else if( iterations >= 1 && countNonZero(kernel) == kernel.rows*kernel.cols )
1322 ksize = Size(ksize.width + (iterations-1)*(ksize.width-1),
1323 ksize.height + (iterations-1)*(ksize.height-1)),
1324 anchor = Point(anchor.x*iterations, anchor.y*iterations);
1330 // TODO: implement the case of iterations > 1.
1331 if( iterations > 1 )
1334 return IPPMorphReplicate( op, src, dst, kernel, ksize, anchor, rectKernel );
1340 #define ROUNDUP(sz, n) ((sz) + (n) - 1 - (((sz) + (n) - 1) % (n)))
1342 static bool ocl_morphSmall( InputArray _src, OutputArray _dst, InputArray _kernel, Point anchor, int borderType,
1343 int op, int actual_op = -1, InputArray _extraMat = noArray())
1345 const ocl::Device & dev = ocl::Device::getDefault();
1346 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type), esz = CV_ELEM_SIZE(type);
1347 bool doubleSupport = dev.doubleFPConfig() > 0;
1349 if (cn > 4 || (!doubleSupport && depth == CV_64F) ||
1350 _src.offset() % esz != 0 || _src.step() % esz != 0)
1353 bool haveExtraMat = !_extraMat.empty();
1354 CV_Assert(actual_op <= 3 || haveExtraMat);
1356 Size ksize = _kernel.size();
1358 anchor.x = ksize.width / 2;
1360 anchor.y = ksize.height / 2;
1362 Size size = _src.size(), wholeSize;
1363 bool isolated = (borderType & BORDER_ISOLATED) != 0;
1364 borderType &= ~BORDER_ISOLATED;
1365 int wdepth = depth, wtype = type;
1369 wtype = CV_MAKETYPE(wdepth, cn);
1373 const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE",
1374 "BORDER_REFLECT", 0, "BORDER_REFLECT_101" };
1375 size_t globalsize[2] = { size.width, size.height };
1377 UMat src = _src.getUMat();
1381 src.locateROI(wholeSize, ofs);
1384 int h = isolated ? size.height : wholeSize.height;
1385 int w = isolated ? size.width : wholeSize.width;
1386 if (w < ksize.width || h < ksize.height)
1389 // Figure out what vector size to use for loading the pixels.
1390 int pxLoadNumPixels = cn != 1 || size.width % 4 ? 1 : 4;
1391 int pxLoadVecSize = cn * pxLoadNumPixels;
1393 // Figure out how many pixels per work item to compute in X and Y
1394 // directions. Too many and we run out of registers.
1395 int pxPerWorkItemX = 1, pxPerWorkItemY = 1;
1396 if (cn <= 2 && ksize.width <= 4 && ksize.height <= 4)
1398 pxPerWorkItemX = size.width % 8 ? size.width % 4 ? size.width % 2 ? 1 : 2 : 4 : 8;
1399 pxPerWorkItemY = size.height % 2 ? 1 : 2;
1401 else if (cn < 4 || (ksize.width <= 4 && ksize.height <= 4))
1403 pxPerWorkItemX = size.width % 2 ? 1 : 2;
1404 pxPerWorkItemY = size.height % 2 ? 1 : 2;
1406 globalsize[0] = size.width / pxPerWorkItemX;
1407 globalsize[1] = size.height / pxPerWorkItemY;
1409 // Need some padding in the private array for pixels
1410 int privDataWidth = ROUNDUP(pxPerWorkItemX + ksize.width - 1, pxLoadNumPixels);
1412 // Make the global size a nice round number so the runtime can pick
1413 // from reasonable choices for the workgroup size
1414 const int wgRound = 256;
1415 globalsize[0] = ROUNDUP(globalsize[0], wgRound);
1423 _kernel.getMat().convertTo(kernel8u, CV_8U);
1424 for (int y = 0; y < kernel8u.rows; ++y)
1425 for (int x = 0; x < kernel8u.cols; ++x)
1426 if (kernel8u.at<uchar>(y, x) != 0)
1427 processing += format("PROCESS(%d,%d)", y, x);
1430 static const char * const op2str[] = { "OP_ERODE", "OP_DILATE", NULL, NULL, "OP_GRADIENT", "OP_TOPHAT", "OP_BLACKHAT" };
1431 String opts = format("-D cn=%d "
1432 "-D ANCHOR_X=%d -D ANCHOR_Y=%d -D KERNEL_SIZE_X=%d -D KERNEL_SIZE_Y=%d "
1433 "-D PX_LOAD_VEC_SIZE=%d -D PX_LOAD_NUM_PX=%d -D DEPTH_%d "
1434 "-D PX_PER_WI_X=%d -D PX_PER_WI_Y=%d -D PRIV_DATA_WIDTH=%d -D %s -D %s "
1435 "-D PX_LOAD_X_ITERATIONS=%d -D PX_LOAD_Y_ITERATIONS=%d "
1436 "-D srcT=%s -D srcT1=%s -D dstT=srcT -D dstT1=srcT1 -D WT=%s -D WT1=%s "
1437 "-D convertToWT=%s -D convertToDstT=%s -D PROCESS_ELEM_=%s -D %s%s",
1438 cn, anchor.x, anchor.y, ksize.width, ksize.height,
1439 pxLoadVecSize, pxLoadNumPixels, depth,
1440 pxPerWorkItemX, pxPerWorkItemY, privDataWidth, borderMap[borderType],
1441 isolated ? "BORDER_ISOLATED" : "NO_BORDER_ISOLATED",
1442 privDataWidth / pxLoadNumPixels, pxPerWorkItemY + ksize.height - 1,
1443 ocl::typeToStr(type), ocl::typeToStr(depth),
1444 haveExtraMat ? ocl::typeToStr(wtype):"srcT",//to prevent overflow - WT
1445 haveExtraMat ? ocl::typeToStr(wdepth):"srcT1",//to prevent overflow - WT1
1446 haveExtraMat ? ocl::convertTypeStr(depth, wdepth, cn, cvt[0]) : "noconvert",//to prevent overflow - src to WT
1447 haveExtraMat ? ocl::convertTypeStr(wdepth, depth, cn, cvt[1]) : "noconvert",//to prevent overflow - WT to dst
1448 processing.c_str(), op2str[op],
1449 actual_op == op ? "" : cv::format(" -D %s", op2str[actual_op]).c_str());
1451 ocl::Kernel kernel("filterSmall", cv::ocl::imgproc::filterSmall_oclsrc, opts);
1455 _dst.create(size, type);
1456 UMat dst = _dst.getUMat();
1464 int cols = src.cols, rows = src.rows;
1465 src.locateROI(wholeSize, ofs);
1466 src.adjustROI(ofs.y, wholeSize.height - rows - ofs.y, ofs.x, wholeSize.width - cols - ofs.x);
1469 src.adjustROI(-ofs.y, -wholeSize.height + rows + ofs.y, -ofs.x, -wholeSize.width + cols + ofs.x);
1470 source.adjustROI(-ofs.y, -wholeSize.height + rows + ofs.y, -ofs.x, -wholeSize.width + cols + ofs.x);
1471 source.locateROI(wholeSize, ofs);
1474 UMat extraMat = _extraMat.getUMat();
1476 int idxArg = kernel.set(0, ocl::KernelArg::PtrReadOnly(source));
1477 idxArg = kernel.set(idxArg, (int)source.step);
1478 int srcOffsetX = (int)((source.offset % source.step) / source.elemSize());
1479 int srcOffsetY = (int)(source.offset / source.step);
1480 int srcEndX = isolated ? srcOffsetX + size.width : wholeSize.width;
1481 int srcEndY = isolated ? srcOffsetY + size.height : wholeSize.height;
1482 idxArg = kernel.set(idxArg, srcOffsetX);
1483 idxArg = kernel.set(idxArg, srcOffsetY);
1484 idxArg = kernel.set(idxArg, srcEndX);
1485 idxArg = kernel.set(idxArg, srcEndY);
1486 idxArg = kernel.set(idxArg, ocl::KernelArg::WriteOnly(dst));
1490 idxArg = kernel.set(idxArg, ocl::KernelArg::ReadOnlyNoSize(extraMat));
1493 return kernel.run(2, globalsize, NULL, false);
1497 static bool ocl_morphOp(InputArray _src, OutputArray _dst, InputArray _kernel,
1498 Point anchor, int iterations, int op, int borderType,
1499 const Scalar &, int actual_op = -1, InputArray _extraMat = noArray())
1501 const ocl::Device & dev = ocl::Device::getDefault();
1502 int type = _src.type(), depth = CV_MAT_DEPTH(type),
1503 cn = CV_MAT_CN(type), esz = CV_ELEM_SIZE(type);
1504 Mat kernel = _kernel.getMat();
1505 Size ksize = !kernel.empty() ? kernel.size() : Size(3, 3), ssize = _src.size();
1507 bool doubleSupport = dev.doubleFPConfig() > 0;
1508 if ((depth == CV_64F && !doubleSupport) || borderType != BORDER_CONSTANT)
1511 bool haveExtraMat = !_extraMat.empty();
1512 CV_Assert(actual_op <= 3 || haveExtraMat);
1516 kernel = getStructuringElement(MORPH_RECT, Size(1+iterations*2,1+iterations*2));
1517 anchor = Point(iterations, iterations);
1520 else if( iterations > 1 && countNonZero(kernel) == kernel.rows*kernel.cols )
1522 anchor = Point(anchor.x*iterations, anchor.y*iterations);
1523 kernel = getStructuringElement(MORPH_RECT,
1524 Size(ksize.width + (iterations-1)*(ksize.width-1),
1525 ksize.height + (iterations-1)*(ksize.height-1)),
1530 // try to use OpenCL kernel adopted for small morph kernel
1531 if (dev.isIntel() && !(dev.type() & ocl::Device::TYPE_CPU) &&
1532 ((ksize.width < 5 && ksize.height < 5 && esz <= 4) ||
1533 (ksize.width == 5 && ksize.height == 5 && cn == 1)) &&
1534 (iterations == 1) && cn == 1)
1536 if (ocl_morphSmall(_src, _dst, kernel, anchor, borderType, op, actual_op, _extraMat))
1540 if (iterations == 0 || kernel.rows*kernel.cols == 1)
1547 size_t localThreads[2] = { 16, 8 };
1549 size_t localThreads[2] = { 16, 16 };
1551 size_t globalThreads[2] = { ssize.width, ssize.height };
1554 if( actual_op != MORPH_ERODE && actual_op != MORPH_DILATE )
1555 localThreads[0] = localThreads[1] = 4;
1558 if (localThreads[0]*localThreads[1] * 2 < (localThreads[0] + ksize.width - 1) * (localThreads[1] + ksize.height - 1))
1564 kernel.convertTo(kernel8u, CV_8U);
1565 for (int y = 0; y < kernel8u.rows; ++y)
1566 for (int x = 0; x < kernel8u.cols; ++x)
1567 if (kernel8u.at<uchar>(y, x) != 0)
1568 processing += format("PROCESS(%d,%d)", y, x);
1570 static const char * const op2str[] = { "OP_ERODE", "OP_DILATE", NULL, NULL, "OP_GRADIENT", "OP_TOPHAT", "OP_BLACKHAT" };
1573 int wdepth = std::max(depth, CV_32F), scalarcn = cn == 3 ? 4 : cn;
1578 std::vector<ocl::Kernel> kernels(iterations);
1579 for (int i = 0; i < iterations; i++)
1581 int current_op = iterations == i + 1 ? actual_op : op;
1582 String buildOptions = format("-D RADIUSX=%d -D RADIUSY=%d -D LSIZE0=%d -D LSIZE1=%d -D %s%s"
1583 " -D PROCESS_ELEMS=%s -D T=%s -D DEPTH_%d -D cn=%d -D T1=%s"
1584 " -D convertToWT=%s -D convertToT=%s -D ST=%s%s",
1585 anchor.x, anchor.y, (int)localThreads[0], (int)localThreads[1], op2str[op],
1586 doubleSupport ? " -D DOUBLE_SUPPORT" : "", processing.c_str(),
1587 ocl::typeToStr(type), depth, cn, ocl::typeToStr(depth),
1588 ocl::convertTypeStr(depth, wdepth, cn, cvt[0]),
1589 ocl::convertTypeStr(wdepth, depth, cn, cvt[1]),
1590 ocl::typeToStr(CV_MAKE_TYPE(depth, scalarcn)),
1591 current_op == op ? "" : cv::format(" -D %s", op2str[current_op]).c_str());
1593 kernels[i].create("morph", ocl::imgproc::morph_oclsrc, buildOptions);
1594 if (kernels[i].empty())
1598 UMat src = _src.getUMat(), extraMat = _extraMat.getUMat();
1599 _dst.create(src.size(), src.type());
1600 UMat dst = _dst.getUMat();
1602 if (iterations == 1 && src.u != dst.u)
1606 src.locateROI(wholesize, ofs);
1607 int wholecols = wholesize.width, wholerows = wholesize.height;
1610 kernels[0].args(ocl::KernelArg::ReadOnlyNoSize(src), ocl::KernelArg::WriteOnlyNoSize(dst),
1611 ofs.x, ofs.y, src.cols, src.rows, wholecols, wholerows,
1612 ocl::KernelArg::ReadOnlyNoSize(extraMat));
1614 kernels[0].args(ocl::KernelArg::ReadOnlyNoSize(src), ocl::KernelArg::WriteOnlyNoSize(dst),
1615 ofs.x, ofs.y, src.cols, src.rows, wholecols, wholerows);
1617 return kernels[0].run(2, globalThreads, localThreads, false);
1620 for (int i = 0; i < iterations; i++)
1628 int cols = src.cols, rows = src.rows;
1629 src.locateROI(wholesize, ofs);
1630 src.adjustROI(ofs.y, wholesize.height - rows - ofs.y, ofs.x, wholesize.width - cols - ofs.x);
1636 src.adjustROI(-ofs.y, -wholesize.height + rows + ofs.y, -ofs.x, -wholesize.width + cols + ofs.x);
1637 source.adjustROI(-ofs.y, -wholesize.height + rows + ofs.y, -ofs.x, -wholesize.width + cols + ofs.x);
1641 int cols = dst.cols, rows = dst.rows;
1642 dst.locateROI(wholesize, ofs);
1643 dst.adjustROI(ofs.y, wholesize.height - rows - ofs.y, ofs.x, wholesize.width - cols - ofs.x);
1645 dst.adjustROI(-ofs.y, -wholesize.height + rows + ofs.y, -ofs.x, -wholesize.width + cols + ofs.x);
1646 source.adjustROI(-ofs.y, -wholesize.height + rows + ofs.y, -ofs.x, -wholesize.width + cols + ofs.x);
1648 source.locateROI(wholesize, ofs);
1650 if (haveExtraMat && iterations == i + 1)
1651 kernels[i].args(ocl::KernelArg::ReadOnlyNoSize(source), ocl::KernelArg::WriteOnlyNoSize(dst),
1652 ofs.x, ofs.y, source.cols, source.rows, wholesize.width, wholesize.height,
1653 ocl::KernelArg::ReadOnlyNoSize(extraMat));
1655 kernels[i].args(ocl::KernelArg::ReadOnlyNoSize(source), ocl::KernelArg::WriteOnlyNoSize(dst),
1656 ofs.x, ofs.y, source.cols, source.rows, wholesize.width, wholesize.height);
1658 if (!kernels[i].run(2, globalThreads, localThreads, false))
1667 static void morphOp( int op, InputArray _src, OutputArray _dst,
1669 Point anchor, int iterations,
1670 int borderType, const Scalar& borderValue )
1672 Mat kernel = _kernel.getMat();
1673 Size ksize = !kernel.empty() ? kernel.size() : Size(3,3);
1674 anchor = normalizeAnchor(anchor, ksize);
1676 CV_OCL_RUN(_dst.isUMat() && _src.dims() <= 2 && _src.channels() <= 4 &&
1677 borderType == cv::BORDER_CONSTANT && borderValue == morphologyDefaultBorderValue() &&
1678 (op == MORPH_ERODE || op == MORPH_DILATE) &&
1679 anchor.x == ksize.width >> 1 && anchor.y == ksize.height >> 1,
1680 ocl_morphOp(_src, _dst, kernel, anchor, iterations, op, borderType, borderValue) )
1682 if (iterations == 0 || kernel.rows*kernel.cols == 1)
1690 kernel = getStructuringElement(MORPH_RECT, Size(1+iterations*2,1+iterations*2));
1691 anchor = Point(iterations, iterations);
1694 else if( iterations > 1 && countNonZero(kernel) == kernel.rows*kernel.cols )
1696 anchor = Point(anchor.x*iterations, anchor.y*iterations);
1697 kernel = getStructuringElement(MORPH_RECT,
1698 Size(ksize.width + (iterations-1)*(ksize.width-1),
1699 ksize.height + (iterations-1)*(ksize.height-1)),
1704 #if IPP_VERSION_X100 >= 801
1705 if( IPPMorphOp(op, _src, _dst, kernel, anchor, iterations, borderType, borderValue) )
1709 Mat src = _src.getMat();
1710 _dst.create( src.size(), src.type() );
1711 Mat dst = _dst.getMat();
1714 #if defined HAVE_TEGRA_OPTIMIZATION
1715 if (src.data != dst.data && iterations == 1 && //NOTE: threads are not used for inplace processing
1716 (borderType & BORDER_ISOLATED) == 0 && //TODO: check border types
1717 src.rows >= 64 ) //NOTE: just heuristics
1721 parallel_for_(Range(0, nStripes),
1722 MorphologyRunner(src, dst, nStripes, iterations, op, kernel, anchor, borderType, borderType, borderValue));
1727 void cv::erode( InputArray src, OutputArray dst, InputArray kernel,
1728 Point anchor, int iterations,
1729 int borderType, const Scalar& borderValue )
1731 morphOp( MORPH_ERODE, src, dst, kernel, anchor, iterations, borderType, borderValue );
1735 void cv::dilate( InputArray src, OutputArray dst, InputArray kernel,
1736 Point anchor, int iterations,
1737 int borderType, const Scalar& borderValue )
1739 morphOp( MORPH_DILATE, src, dst, kernel, anchor, iterations, borderType, borderValue );
1746 static bool ocl_morphologyEx(InputArray _src, OutputArray _dst, int op,
1747 InputArray kernel, Point anchor, int iterations,
1748 int borderType, const Scalar& borderValue)
1750 _dst.createSameSize(_src, _src.type());
1751 bool submat = _dst.isSubmatrix();
1753 _OutputArray _temp = submat ? _dst : _OutputArray(temp);
1758 if (!ocl_morphOp( _src, _dst, kernel, anchor, iterations, MORPH_ERODE, borderType, borderValue ))
1762 if (!ocl_morphOp( _src, _dst, kernel, anchor, iterations, MORPH_DILATE, borderType, borderValue ))
1766 if (!ocl_morphOp( _src, _temp, kernel, anchor, iterations, MORPH_ERODE, borderType, borderValue ))
1768 if (!ocl_morphOp( _temp, _dst, kernel, anchor, iterations, MORPH_DILATE, borderType, borderValue ))
1772 if (!ocl_morphOp( _src, _temp, kernel, anchor, iterations, MORPH_DILATE, borderType, borderValue ))
1774 if (!ocl_morphOp( _temp, _dst, kernel, anchor, iterations, MORPH_ERODE, borderType, borderValue ))
1777 case MORPH_GRADIENT:
1778 if (!ocl_morphOp( _src, temp, kernel, anchor, iterations, MORPH_ERODE, borderType, borderValue ))
1780 if (!ocl_morphOp( _src, _dst, kernel, anchor, iterations, MORPH_DILATE, borderType, borderValue, MORPH_GRADIENT, temp ))
1784 if (!ocl_morphOp( _src, _temp, kernel, anchor, iterations, MORPH_ERODE, borderType, borderValue ))
1786 if (!ocl_morphOp( _temp, _dst, kernel, anchor, iterations, MORPH_DILATE, borderType, borderValue, MORPH_TOPHAT, _src ))
1789 case MORPH_BLACKHAT:
1790 if (!ocl_morphOp( _src, _temp, kernel, anchor, iterations, MORPH_DILATE, borderType, borderValue ))
1792 if (!ocl_morphOp( _temp, _dst, kernel, anchor, iterations, MORPH_ERODE, borderType, borderValue, MORPH_BLACKHAT, _src ))
1796 CV_Error( CV_StsBadArg, "unknown morphological operation" );
1806 void cv::morphologyEx( InputArray _src, OutputArray _dst, int op,
1807 InputArray kernel, Point anchor, int iterations,
1808 int borderType, const Scalar& borderValue )
1811 Size ksize = kernel.size();
1812 anchor = normalizeAnchor(anchor, ksize);
1814 CV_OCL_RUN(_dst.isUMat() && _src.dims() <= 2 && _src.channels() <= 4 &&
1815 anchor.x == ksize.width >> 1 && anchor.y == ksize.height >> 1 &&
1816 borderType == cv::BORDER_CONSTANT && borderValue == morphologyDefaultBorderValue(),
1817 ocl_morphologyEx(_src, _dst, op, kernel, anchor, iterations, borderType, borderValue))
1820 Mat src = _src.getMat(), temp;
1821 _dst.create(src.size(), src.type());
1822 Mat dst = _dst.getMat();
1827 erode( src, dst, kernel, anchor, iterations, borderType, borderValue );
1830 dilate( src, dst, kernel, anchor, iterations, borderType, borderValue );
1833 erode( src, dst, kernel, anchor, iterations, borderType, borderValue );
1834 dilate( dst, dst, kernel, anchor, iterations, borderType, borderValue );
1837 dilate( src, dst, kernel, anchor, iterations, borderType, borderValue );
1838 erode( dst, dst, kernel, anchor, iterations, borderType, borderValue );
1840 case CV_MOP_GRADIENT:
1841 erode( src, temp, kernel, anchor, iterations, borderType, borderValue );
1842 dilate( src, dst, kernel, anchor, iterations, borderType, borderValue );
1846 if( src.data != dst.data )
1848 erode( src, temp, kernel, anchor, iterations, borderType, borderValue );
1849 dilate( temp, temp, kernel, anchor, iterations, borderType, borderValue );
1852 case CV_MOP_BLACKHAT:
1853 if( src.data != dst.data )
1855 dilate( src, temp, kernel, anchor, iterations, borderType, borderValue );
1856 erode( temp, temp, kernel, anchor, iterations, borderType, borderValue );
1860 CV_Error( CV_StsBadArg, "unknown morphological operation" );
1864 CV_IMPL IplConvKernel *
1865 cvCreateStructuringElementEx( int cols, int rows,
1866 int anchorX, int anchorY,
1867 int shape, int *values )
1869 cv::Size ksize = cv::Size(cols, rows);
1870 cv::Point anchor = cv::Point(anchorX, anchorY);
1871 CV_Assert( cols > 0 && rows > 0 && anchor.inside(cv::Rect(0,0,cols,rows)) &&
1872 (shape != CV_SHAPE_CUSTOM || values != 0));
1874 int i, size = rows * cols;
1875 int element_size = sizeof(IplConvKernel) + size*sizeof(int);
1876 IplConvKernel *element = (IplConvKernel*)cvAlloc(element_size + 32);
1878 element->nCols = cols;
1879 element->nRows = rows;
1880 element->anchorX = anchorX;
1881 element->anchorY = anchorY;
1882 element->nShiftR = shape < CV_SHAPE_ELLIPSE ? shape : CV_SHAPE_CUSTOM;
1883 element->values = (int*)(element + 1);
1885 if( shape == CV_SHAPE_CUSTOM )
1887 for( i = 0; i < size; i++ )
1888 element->values[i] = values[i];
1892 cv::Mat elem = cv::getStructuringElement(shape, ksize, anchor);
1893 for( i = 0; i < size; i++ )
1894 element->values[i] = elem.ptr()[i];
1902 cvReleaseStructuringElement( IplConvKernel ** element )
1905 CV_Error( CV_StsNullPtr, "" );
1910 static void convertConvKernel( const IplConvKernel* src, cv::Mat& dst, cv::Point& anchor )
1914 anchor = cv::Point(1,1);
1918 anchor = cv::Point(src->anchorX, src->anchorY);
1919 dst.create(src->nRows, src->nCols, CV_8U);
1921 int i, size = src->nRows*src->nCols;
1922 for( i = 0; i < size; i++ )
1923 dst.ptr()[i] = (uchar)(src->values[i] != 0);
1928 cvErode( const CvArr* srcarr, CvArr* dstarr, IplConvKernel* element, int iterations )
1930 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), kernel;
1931 CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
1933 convertConvKernel( element, kernel, anchor );
1934 cv::erode( src, dst, kernel, anchor, iterations, cv::BORDER_REPLICATE );
1939 cvDilate( const CvArr* srcarr, CvArr* dstarr, IplConvKernel* element, int iterations )
1941 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), kernel;
1942 CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
1944 convertConvKernel( element, kernel, anchor );
1945 cv::dilate( src, dst, kernel, anchor, iterations, cv::BORDER_REPLICATE );
1950 cvMorphologyEx( const void* srcarr, void* dstarr, void*,
1951 IplConvKernel* element, int op, int iterations )
1953 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), kernel;
1954 CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
1956 IplConvKernel* temp_element = NULL;
1959 temp_element = cvCreateStructuringElementEx(3, 3, 1, 1, CV_SHAPE_RECT);
1961 temp_element = element;
1963 convertConvKernel( temp_element, kernel, anchor );
1966 cvReleaseStructuringElement(&temp_element);
1968 cv::morphologyEx( src, dst, op, kernel, anchor, iterations, cv::BORDER_REPLICATE );