1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #include "precomp.hpp"
44 #include "opencl_kernels_imgproc.hpp"
45 #include "opencv2/core/hal/intrin.hpp"
51 thresh_8u( const Mat& _src, Mat& _dst, uchar thresh, uchar maxval, int type )
53 Size roi = _src.size();
54 roi.width *= _src.channels();
55 size_t src_step = _src.step;
56 size_t dst_step = _dst.step;
58 if( _src.isContinuous() && _dst.isContinuous() )
60 roi.width *= roi.height;
62 src_step = dst_step = roi.width;
65 #ifdef HAVE_TEGRA_OPTIMIZATION
66 if (tegra::useTegra() && tegra::thresh_8u(_src, _dst, roi.width, roi.height, thresh, maxval, type))
73 IppiSize sz = { roi.width, roi.height };
74 CV_SUPPRESS_DEPRECATED_START
78 if (_src.data == _dst.data && CV_INSTRUMENT_FUN_IPP(ippiThreshold_GT_8u_C1IR, _dst.ptr(), (int)dst_step, sz, thresh) >= 0)
80 CV_IMPL_ADD(CV_IMPL_IPP);
83 if (CV_INSTRUMENT_FUN_IPP(ippiThreshold_GT_8u_C1R, _src.ptr(), (int)src_step, _dst.ptr(), (int)dst_step, sz, thresh) >= 0)
85 CV_IMPL_ADD(CV_IMPL_IPP);
91 if (_src.data == _dst.data && CV_INSTRUMENT_FUN_IPP(ippiThreshold_LTVal_8u_C1IR, _dst.ptr(), (int)dst_step, sz, thresh+1, 0) >= 0)
93 CV_IMPL_ADD(CV_IMPL_IPP);
96 if (CV_INSTRUMENT_FUN_IPP(ippiThreshold_LTVal_8u_C1R, _src.ptr(), (int)src_step, _dst.ptr(), (int)dst_step, sz, thresh + 1, 0) >= 0)
98 CV_IMPL_ADD(CV_IMPL_IPP);
103 case THRESH_TOZERO_INV:
104 if (_src.data == _dst.data && CV_INSTRUMENT_FUN_IPP(ippiThreshold_GTVal_8u_C1IR, _dst.ptr(), (int)dst_step, sz, thresh, 0) >= 0)
106 CV_IMPL_ADD(CV_IMPL_IPP);
109 if (CV_INSTRUMENT_FUN_IPP(ippiThreshold_GTVal_8u_C1R, _src.ptr(), (int)src_step, _dst.ptr(), (int)dst_step, sz, thresh, 0) >= 0)
111 CV_IMPL_ADD(CV_IMPL_IPP);
117 CV_SUPPRESS_DEPRECATED_END
122 const uchar* src = _src.ptr();
123 uchar* dst = _dst.ptr();
125 bool useSIMD = checkHardwareSupport( CV_CPU_SSE2 ) || checkHardwareSupport( CV_CPU_NEON );
128 v_uint8x16 thresh_u = v_setall_u8( thresh );
129 v_uint8x16 maxval16 = v_setall_u8( maxval );
134 for( int i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
136 for( j = 0; j <= roi.width - 16; j += 16 )
139 v0 = v_load( src + j );
142 v_store( dst + j, v0 );
147 case THRESH_BINARY_INV:
148 for( int i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
150 for( j = 0; j <= roi.width - 16; j += 16 )
153 v0 = v_load( src + j );
156 v_store( dst + j, v0 );
162 for( int i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
164 for( j = 0; j <= roi.width - 16; j += 16 )
167 v0 = v_load( src + j );
168 v0 = v0 - ( v0 - thresh_u );
169 v_store( dst + j, v0 );
175 for( int i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
177 for( j = 0; j <= roi.width - 16; j += 16 )
180 v0 = v_load( src + j );
181 v0 = ( thresh_u < v0 ) & v0;
182 v_store( dst + j, v0 );
187 case THRESH_TOZERO_INV:
188 for( int i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
190 for( j = 0; j <= roi.width - 16; j += 16 )
193 v0 = v_load( src + j );
194 v0 = ( v0 <= thresh_u ) & v0;
195 v_store( dst + j, v0 );
204 if( j_scalar < roi.width )
206 const int thresh_pivot = thresh + 1;
211 memset(tab, 0, thresh_pivot);
212 if (thresh_pivot < 256) {
213 memset(tab + thresh_pivot, maxval, 256 - thresh_pivot);
216 case THRESH_BINARY_INV:
217 memset(tab, maxval, thresh_pivot);
218 if (thresh_pivot < 256) {
219 memset(tab + thresh_pivot, 0, 256 - thresh_pivot);
223 for( int i = 0; i <= thresh; i++ )
225 if (thresh_pivot < 256) {
226 memset(tab + thresh_pivot, thresh, 256 - thresh_pivot);
230 memset(tab, 0, thresh_pivot);
231 for( int i = thresh_pivot; i < 256; i++ )
234 case THRESH_TOZERO_INV:
235 for( int i = 0; i <= thresh; i++ )
237 if (thresh_pivot < 256) {
238 memset(tab + thresh_pivot, 0, 256 - thresh_pivot);
245 for( int i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
248 #if CV_ENABLE_UNROLLED
249 for( ; j <= roi.width - 4; j += 4 )
251 uchar t0 = tab[src[j]];
252 uchar t1 = tab[src[j+1]];
264 for( ; j < roi.width; j++ )
265 dst[j] = tab[src[j]];
272 thresh_16s( const Mat& _src, Mat& _dst, short thresh, short maxval, int type )
275 Size roi = _src.size();
276 roi.width *= _src.channels();
277 const short* src = _src.ptr<short>();
278 short* dst = _dst.ptr<short>();
279 size_t src_step = _src.step/sizeof(src[0]);
280 size_t dst_step = _dst.step/sizeof(dst[0]);
282 if( _src.isContinuous() && _dst.isContinuous() )
284 roi.width *= roi.height;
286 src_step = dst_step = roi.width;
289 #ifdef HAVE_TEGRA_OPTIMIZATION
290 if (tegra::useTegra() && tegra::thresh_16s(_src, _dst, roi.width, roi.height, thresh, maxval, type))
294 #if defined(HAVE_IPP)
297 IppiSize sz = { roi.width, roi.height };
298 CV_SUPPRESS_DEPRECATED_START
302 if (_src.data == _dst.data && CV_INSTRUMENT_FUN_IPP(ippiThreshold_GT_16s_C1IR, dst, (int)dst_step*sizeof(dst[0]), sz, thresh) >= 0)
304 CV_IMPL_ADD(CV_IMPL_IPP);
307 if (CV_INSTRUMENT_FUN_IPP(ippiThreshold_GT_16s_C1R, src, (int)src_step*sizeof(src[0]), dst, (int)dst_step*sizeof(dst[0]), sz, thresh) >= 0)
309 CV_IMPL_ADD(CV_IMPL_IPP);
315 if (_src.data == _dst.data && CV_INSTRUMENT_FUN_IPP(ippiThreshold_LTVal_16s_C1IR, dst, (int)dst_step*sizeof(dst[0]), sz, thresh + 1, 0) >= 0)
317 CV_IMPL_ADD(CV_IMPL_IPP);
320 if (CV_INSTRUMENT_FUN_IPP(ippiThreshold_LTVal_16s_C1R, src, (int)src_step*sizeof(src[0]), dst, (int)dst_step*sizeof(dst[0]), sz, thresh + 1, 0) >= 0)
322 CV_IMPL_ADD(CV_IMPL_IPP);
327 case THRESH_TOZERO_INV:
328 if (_src.data == _dst.data && CV_INSTRUMENT_FUN_IPP(ippiThreshold_GTVal_16s_C1IR, dst, (int)dst_step*sizeof(dst[0]), sz, thresh, 0) >= 0)
330 CV_IMPL_ADD(CV_IMPL_IPP);
333 if (CV_INSTRUMENT_FUN_IPP(ippiThreshold_GTVal_16s_C1R, src, (int)src_step*sizeof(src[0]), dst, (int)dst_step*sizeof(dst[0]), sz, thresh, 0) >= 0)
335 CV_IMPL_ADD(CV_IMPL_IPP);
341 CV_SUPPRESS_DEPRECATED_END
346 bool useSIMD = checkHardwareSupport( CV_CPU_SSE2 ) || checkHardwareSupport( CV_CPU_NEON );
349 v_int16x8 thresh8 = v_setall_s16( thresh );
350 v_int16x8 maxval8 = v_setall_s16( maxval );
355 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
358 for( ; j <= roi.width - 16; j += 16 )
361 v0 = v_load( src + j );
362 v1 = v_load( src + j + 8 );
367 v_store( dst + j, v0 );
368 v_store( dst + j + 8, v1 );
371 for( ; j < roi.width; j++ )
372 dst[j] = src[j] > thresh ? maxval : 0;
376 case THRESH_BINARY_INV:
377 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
380 for( ; j <= roi.width - 16; j += 16 )
383 v0 = v_load( src + j );
384 v1 = v_load( src + j + 8 );
389 v_store( dst + j, v0 );
390 v_store( dst + j + 8, v1 );
393 for( ; j < roi.width; j++ )
394 dst[j] = src[j] <= thresh ? maxval : 0;
399 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
402 for( ; j <= roi.width - 16; j += 16 )
405 v0 = v_load( src + j );
406 v1 = v_load( src + j + 8 );
407 v0 = v_min( v0, thresh8 );
408 v1 = v_min( v1, thresh8 );
409 v_store( dst + j, v0 );
410 v_store( dst + j + 8, v1 );
413 for( ; j < roi.width; j++ )
414 dst[j] = std::min( src[j], thresh );
419 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
422 for( ; j <= roi.width - 16; j += 16 )
425 v0 = v_load( src + j );
426 v1 = v_load( src + j + 8 );
427 v0 = ( thresh8 < v0 ) & v0;
428 v1 = ( thresh8 < v1 ) & v1;
429 v_store( dst + j, v0 );
430 v_store( dst + j + 8, v1 );
433 for( ; j < roi.width; j++ )
436 dst[j] = v > thresh ? v : 0;
441 case THRESH_TOZERO_INV:
442 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
445 for( ; j <= roi.width - 16; j += 16 )
448 v0 = v_load( src + j );
449 v1 = v_load( src + j + 8 );
450 v0 = ( v0 <= thresh8 ) & v0;
451 v1 = ( v1 <= thresh8 ) & v1;
452 v_store( dst + j, v0 );
453 v_store( dst + j + 8, v1 );
456 for( ; j < roi.width; j++ )
459 dst[j] = v <= thresh ? v : 0;
464 return CV_Error( CV_StsBadArg, "" );
473 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
475 for( j = 0; j < roi.width; j++ )
476 dst[j] = src[j] > thresh ? maxval : 0;
480 case THRESH_BINARY_INV:
481 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
483 for( j = 0; j < roi.width; j++ )
484 dst[j] = src[j] <= thresh ? maxval : 0;
489 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
491 for( j = 0; j < roi.width; j++ )
492 dst[j] = std::min( src[j], thresh );
497 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
499 for( j = 0; j < roi.width; j++ )
502 dst[j] = v > thresh ? v : 0;
507 case THRESH_TOZERO_INV:
508 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
510 for( j = 0; j < roi.width; j++ )
513 dst[j] = v <= thresh ? v : 0;
518 return CV_Error( CV_StsBadArg, "" );
525 thresh_32f( const Mat& _src, Mat& _dst, float thresh, float maxval, int type )
528 Size roi = _src.size();
529 roi.width *= _src.channels();
530 const float* src = _src.ptr<float>();
531 float* dst = _dst.ptr<float>();
532 size_t src_step = _src.step/sizeof(src[0]);
533 size_t dst_step = _dst.step/sizeof(dst[0]);
535 if( _src.isContinuous() && _dst.isContinuous() )
537 roi.width *= roi.height;
541 #ifdef HAVE_TEGRA_OPTIMIZATION
542 if (tegra::useTegra() && tegra::thresh_32f(_src, _dst, roi.width, roi.height, thresh, maxval, type))
546 #if defined(HAVE_IPP)
549 IppiSize sz = { roi.width, roi.height };
553 if (0 <= CV_INSTRUMENT_FUN_IPP(ippiThreshold_GT_32f_C1R, src, (int)src_step*sizeof(src[0]), dst, (int)dst_step*sizeof(dst[0]), sz, thresh))
555 CV_IMPL_ADD(CV_IMPL_IPP);
561 if (0 <= CV_INSTRUMENT_FUN_IPP(ippiThreshold_LTVal_32f_C1R, src, (int)src_step*sizeof(src[0]), dst, (int)dst_step*sizeof(dst[0]), sz, thresh + FLT_EPSILON, 0))
563 CV_IMPL_ADD(CV_IMPL_IPP);
568 case THRESH_TOZERO_INV:
569 if (0 <= CV_INSTRUMENT_FUN_IPP(ippiThreshold_GTVal_32f_C1R, src, (int)src_step*sizeof(src[0]), dst, (int)dst_step*sizeof(dst[0]), sz, thresh, 0))
571 CV_IMPL_ADD(CV_IMPL_IPP);
581 bool useSIMD = checkHardwareSupport( CV_CPU_SSE2 ) || checkHardwareSupport( CV_CPU_NEON );
584 v_float32x4 thresh4 = v_setall_f32( thresh );
585 v_float32x4 maxval4 = v_setall_f32( maxval );
590 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
593 for( ; j <= roi.width - 8; j += 8 )
596 v0 = v_load( src + j );
597 v1 = v_load( src + j + 4 );
602 v_store( dst + j, v0 );
603 v_store( dst + j + 4, v1 );
606 for( ; j < roi.width; j++ )
607 dst[j] = src[j] > thresh ? maxval : 0;
611 case THRESH_BINARY_INV:
612 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
615 for( ; j <= roi.width - 8; j += 8 )
618 v0 = v_load( src + j );
619 v1 = v_load( src + j + 4 );
624 v_store( dst + j, v0 );
625 v_store( dst + j + 4, v1 );
628 for( ; j < roi.width; j++ )
629 dst[j] = src[j] <= thresh ? maxval : 0;
634 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
637 for( ; j <= roi.width - 8; j += 8 )
640 v0 = v_load( src + j );
641 v1 = v_load( src + j + 4 );
642 v0 = v_min( v0, thresh4 );
643 v1 = v_min( v1, thresh4 );
644 v_store( dst + j, v0 );
645 v_store( dst + j + 4, v1 );
648 for( ; j < roi.width; j++ )
649 dst[j] = std::min( src[j], thresh );
654 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
657 for( ; j <= roi.width - 8; j += 8 )
660 v0 = v_load( src + j );
661 v1 = v_load( src + j + 4 );
662 v0 = ( thresh4 < v0 ) & v0;
663 v1 = ( thresh4 < v1 ) & v1;
664 v_store( dst + j, v0 );
665 v_store( dst + j + 4, v1 );
668 for( ; j < roi.width; j++ )
671 dst[j] = v > thresh ? v : 0;
676 case THRESH_TOZERO_INV:
677 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
680 for( ; j <= roi.width - 8; j += 8 )
683 v0 = v_load( src + j );
684 v1 = v_load( src + j + 4 );
685 v0 = ( v0 <= thresh4 ) & v0;
686 v1 = ( v1 <= thresh4 ) & v1;
687 v_store( dst + j, v0 );
688 v_store( dst + j + 4, v1 );
691 for( ; j < roi.width; j++ )
694 dst[j] = v <= thresh ? v : 0;
699 return CV_Error( CV_StsBadArg, "" );
708 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
710 for( j = 0; j < roi.width; j++ )
711 dst[j] = src[j] > thresh ? maxval : 0;
715 case THRESH_BINARY_INV:
716 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
718 for( j = 0; j < roi.width; j++ )
719 dst[j] = src[j] <= thresh ? maxval : 0;
724 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
726 for( j = 0; j < roi.width; j++ )
727 dst[j] = std::min( src[j], thresh );
732 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
734 for( j = 0; j < roi.width; j++ )
737 dst[j] = v > thresh ? v : 0;
742 case THRESH_TOZERO_INV:
743 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
745 for( j = 0; j < roi.width; j++ )
748 dst[j] = v <= thresh ? v : 0;
753 return CV_Error( CV_StsBadArg, "" );
759 thresh_64f(const Mat& _src, Mat& _dst, double thresh, double maxval, int type)
762 Size roi = _src.size();
763 roi.width *= _src.channels();
764 const double* src = _src.ptr<double>();
765 double* dst = _dst.ptr<double>();
766 size_t src_step = _src.step / sizeof(src[0]);
767 size_t dst_step = _dst.step / sizeof(dst[0]);
769 if (_src.isContinuous() && _dst.isContinuous())
771 roi.width *= roi.height;
776 bool useSIMD = checkHardwareSupport( CV_CPU_SSE2 ) || checkHardwareSupport( CV_CPU_NEON );
779 v_float64x2 thresh2 = v_setall_f64( thresh );
780 v_float64x2 maxval2 = v_setall_f64( maxval );
785 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
788 for( ; j <= roi.width - 4; j += 4 )
791 v0 = v_load( src + j );
792 v1 = v_load( src + j + 2 );
797 v_store( dst + j, v0 );
798 v_store( dst + j + 2, v1 );
801 for( ; j < roi.width; j++ )
802 dst[j] = src[j] > thresh ? maxval : 0;
806 case THRESH_BINARY_INV:
807 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
810 for( ; j <= roi.width - 4; j += 4 )
813 v0 = v_load( src + j );
814 v1 = v_load( src + j + 2 );
819 v_store( dst + j, v0 );
820 v_store( dst + j + 2, v1 );
823 for( ; j < roi.width; j++ )
824 dst[j] = src[j] <= thresh ? maxval : 0;
829 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
832 for( ; j <= roi.width - 4; j += 4 )
835 v0 = v_load( src + j );
836 v1 = v_load( src + j + 2 );
837 v0 = v_min( v0, thresh2 );
838 v1 = v_min( v1, thresh2 );
839 v_store( dst + j, v0 );
840 v_store( dst + j + 2, v1 );
843 for( ; j < roi.width; j++ )
844 dst[j] = std::min( src[j], thresh );
849 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
852 for( ; j <= roi.width - 4; j += 4 )
855 v0 = v_load( src + j );
856 v1 = v_load( src + j + 2 );
857 v0 = ( thresh2 < v0 ) & v0;
858 v1 = ( thresh2 < v1 ) & v1;
859 v_store( dst + j, v0 );
860 v_store( dst + j + 2, v1 );
863 for( ; j < roi.width; j++ )
866 dst[j] = v > thresh ? v : 0;
871 case THRESH_TOZERO_INV:
872 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
875 for( ; j <= roi.width - 4; j += 4 )
878 v0 = v_load( src + j );
879 v1 = v_load( src + j + 2 );
880 v0 = ( v0 <= thresh2 ) & v0;
881 v1 = ( v1 <= thresh2 ) & v1;
882 v_store( dst + j, v0 );
883 v_store( dst + j + 2, v1 );
886 for( ; j < roi.width; j++ )
889 dst[j] = v <= thresh ? v : 0;
894 return CV_Error(CV_StsBadArg, "");
903 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
906 for( ; j < roi.width; j++ )
907 dst[j] = src[j] > thresh ? maxval : 0;
911 case THRESH_BINARY_INV:
912 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
915 for( ; j < roi.width; j++ )
916 dst[j] = src[j] <= thresh ? maxval : 0;
921 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
924 for( ; j < roi.width; j++ )
925 dst[j] = std::min( src[j], thresh );
930 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
933 for( ; j < roi.width; j++ )
936 dst[j] = v > thresh ? v : 0;
941 case THRESH_TOZERO_INV:
942 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
945 for( ; j < roi.width; j++ )
948 dst[j] = v <= thresh ? v : 0;
953 return CV_Error(CV_StsBadArg, "");
959 static bool ipp_getThreshVal_Otsu_8u( const unsigned char* _src, int step, Size size, unsigned char &thresh)
961 CV_INSTRUMENT_REGION_IPP()
963 #if IPP_VERSION_X100 >= 810
965 IppiSize srcSize = { size.width, size.height };
966 CV_SUPPRESS_DEPRECATED_START
967 ippStatus = CV_INSTRUMENT_FUN_IPP(ippiComputeThreshold_Otsu_8u_C1R, _src, step, srcSize, &thresh);
968 CV_SUPPRESS_DEPRECATED_END
973 CV_UNUSED(_src); CV_UNUSED(step); CV_UNUSED(size); CV_UNUSED(thresh);
980 getThreshVal_Otsu_8u( const Mat& _src )
982 Size size = _src.size();
983 int step = (int) _src.step;
984 if( _src.isContinuous() )
986 size.width *= size.height;
992 unsigned char thresh;
993 CV_IPP_RUN(IPP_VERSION_X100 >= 810, ipp_getThreshVal_Otsu_8u(_src.ptr(), step, size, thresh), thresh);
997 int i, j, h[N] = {0};
998 for( i = 0; i < size.height; i++ )
1000 const uchar* src = _src.ptr() + step*i;
1002 #if CV_ENABLE_UNROLLED
1003 for( ; j <= size.width - 4; j += 4 )
1005 int v0 = src[j], v1 = src[j+1];
1007 v0 = src[j+2]; v1 = src[j+3];
1011 for( ; j < size.width; j++ )
1015 double mu = 0, scale = 1./(size.width*size.height);
1016 for( i = 0; i < N; i++ )
1017 mu += i*(double)h[i];
1020 double mu1 = 0, q1 = 0;
1021 double max_sigma = 0, max_val = 0;
1023 for( i = 0; i < N; i++ )
1025 double p_i, q2, mu2, sigma;
1032 if( std::min(q1,q2) < FLT_EPSILON || std::max(q1,q2) > 1. - FLT_EPSILON )
1035 mu1 = (mu1 + i*p_i)/q1;
1036 mu2 = (mu - q1*mu1)/q2;
1037 sigma = q1*q2*(mu1 - mu2)*(mu1 - mu2);
1038 if( sigma > max_sigma )
1049 getThreshVal_Triangle_8u( const Mat& _src )
1051 Size size = _src.size();
1052 int step = (int) _src.step;
1053 if( _src.isContinuous() )
1055 size.width *= size.height;
1061 int i, j, h[N] = {0};
1062 for( i = 0; i < size.height; i++ )
1064 const uchar* src = _src.ptr() + step*i;
1066 #if CV_ENABLE_UNROLLED
1067 for( ; j <= size.width - 4; j += 4 )
1069 int v0 = src[j], v1 = src[j+1];
1071 v0 = src[j+2]; v1 = src[j+3];
1075 for( ; j < size.width; j++ )
1079 int left_bound = 0, right_bound = 0, max_ind = 0, max = 0;
1081 bool isflipped = false;
1083 for( i = 0; i < N; i++ )
1091 if( left_bound > 0 )
1094 for( i = N-1; i > 0; i-- )
1102 if( right_bound < N-1 )
1105 for( i = 0; i < N; i++ )
1114 if( max_ind-left_bound < right_bound-max_ind)
1120 temp = h[i]; h[i] = h[j]; h[j] = temp;
1123 left_bound = N-1-right_bound;
1124 max_ind = N-1-max_ind;
1127 double thresh = left_bound;
1128 double a, b, dist = 0, tempdist;
1131 * We do not need to compute precise distance here. Distance is maximized, so some constants can
1132 * be omitted. This speeds up a computation a bit.
1134 a = max; b = left_bound-max_ind;
1135 for( i = left_bound+1; i <= max_ind; i++ )
1137 tempdist = a*i + b*h[i];
1138 if( tempdist > dist)
1147 thresh = N-1-thresh;
1152 class ThresholdRunner : public ParallelLoopBody
1155 ThresholdRunner(Mat _src, Mat _dst, double _thresh, double _maxval, int _thresholdType)
1162 thresholdType = _thresholdType;
1165 void operator () ( const Range& range ) const
1167 int row0 = range.start;
1168 int row1 = range.end;
1170 Mat srcStripe = src.rowRange(row0, row1);
1171 Mat dstStripe = dst.rowRange(row0, row1);
1173 if (srcStripe.depth() == CV_8U)
1175 thresh_8u( srcStripe, dstStripe, (uchar)thresh, (uchar)maxval, thresholdType );
1177 else if( srcStripe.depth() == CV_16S )
1179 thresh_16s( srcStripe, dstStripe, (short)thresh, (short)maxval, thresholdType );
1181 else if( srcStripe.depth() == CV_32F )
1183 thresh_32f( srcStripe, dstStripe, (float)thresh, (float)maxval, thresholdType );
1185 else if( srcStripe.depth() == CV_64F )
1187 thresh_64f(srcStripe, dstStripe, thresh, maxval, thresholdType);
1202 static bool ocl_threshold( InputArray _src, OutputArray _dst, double & thresh, double maxval, int thresh_type )
1204 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type),
1205 kercn = ocl::predictOptimalVectorWidth(_src, _dst), ktype = CV_MAKE_TYPE(depth, kercn);
1206 bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;
1208 if ( !(thresh_type == THRESH_BINARY || thresh_type == THRESH_BINARY_INV || thresh_type == THRESH_TRUNC ||
1209 thresh_type == THRESH_TOZERO || thresh_type == THRESH_TOZERO_INV) ||
1210 (!doubleSupport && depth == CV_64F))
1213 const char * const thresholdMap[] = { "THRESH_BINARY", "THRESH_BINARY_INV", "THRESH_TRUNC",
1214 "THRESH_TOZERO", "THRESH_TOZERO_INV" };
1215 ocl::Device dev = ocl::Device::getDefault();
1216 int stride_size = dev.isIntel() && (dev.type() & ocl::Device::TYPE_GPU) ? 4 : 1;
1218 ocl::Kernel k("threshold", ocl::imgproc::threshold_oclsrc,
1219 format("-D %s -D T=%s -D T1=%s -D STRIDE_SIZE=%d%s", thresholdMap[thresh_type],
1220 ocl::typeToStr(ktype), ocl::typeToStr(depth), stride_size,
1221 doubleSupport ? " -D DOUBLE_SUPPORT" : ""));
1225 UMat src = _src.getUMat();
1226 _dst.create(src.size(), type);
1227 UMat dst = _dst.getUMat();
1229 if (depth <= CV_32S)
1230 thresh = cvFloor(thresh);
1232 const double min_vals[] = { 0, CHAR_MIN, 0, SHRT_MIN, INT_MIN, -FLT_MAX, -DBL_MAX, 0 };
1233 double min_val = min_vals[depth];
1235 k.args(ocl::KernelArg::ReadOnlyNoSize(src), ocl::KernelArg::WriteOnly(dst, cn, kercn),
1236 ocl::KernelArg::Constant(Mat(1, 1, depth, Scalar::all(thresh))),
1237 ocl::KernelArg::Constant(Mat(1, 1, depth, Scalar::all(maxval))),
1238 ocl::KernelArg::Constant(Mat(1, 1, depth, Scalar::all(min_val))));
1240 size_t globalsize[2] = { (size_t)dst.cols * cn / kercn, (size_t)dst.rows };
1241 globalsize[1] = (globalsize[1] + stride_size - 1) / stride_size;
1242 return k.run(2, globalsize, NULL, false);
1249 double cv::threshold( InputArray _src, OutputArray _dst, double thresh, double maxval, int type )
1251 CV_INSTRUMENT_REGION()
1253 CV_OCL_RUN_(_src.dims() <= 2 && _dst.isUMat(),
1254 ocl_threshold(_src, _dst, thresh, maxval, type), thresh)
1256 Mat src = _src.getMat();
1257 int automatic_thresh = (type & ~CV_THRESH_MASK);
1258 type &= THRESH_MASK;
1260 CV_Assert( automatic_thresh != (CV_THRESH_OTSU | CV_THRESH_TRIANGLE) );
1261 if( automatic_thresh == CV_THRESH_OTSU )
1263 CV_Assert( src.type() == CV_8UC1 );
1264 thresh = getThreshVal_Otsu_8u( src );
1266 else if( automatic_thresh == CV_THRESH_TRIANGLE )
1268 CV_Assert( src.type() == CV_8UC1 );
1269 thresh = getThreshVal_Triangle_8u( src );
1272 _dst.create( src.size(), src.type() );
1273 Mat dst = _dst.getMat();
1275 if( src.depth() == CV_8U )
1277 int ithresh = cvFloor(thresh);
1279 int imaxval = cvRound(maxval);
1280 if( type == THRESH_TRUNC )
1282 imaxval = saturate_cast<uchar>(imaxval);
1284 if( ithresh < 0 || ithresh >= 255 )
1286 if( type == THRESH_BINARY || type == THRESH_BINARY_INV ||
1287 ((type == THRESH_TRUNC || type == THRESH_TOZERO_INV) && ithresh < 0) ||
1288 (type == THRESH_TOZERO && ithresh >= 255) )
1290 int v = type == THRESH_BINARY ? (ithresh >= 255 ? 0 : imaxval) :
1291 type == THRESH_BINARY_INV ? (ithresh >= 255 ? imaxval : 0) :
1292 /*type == THRESH_TRUNC ? imaxval :*/ 0;
1302 else if( src.depth() == CV_16S )
1304 int ithresh = cvFloor(thresh);
1306 int imaxval = cvRound(maxval);
1307 if( type == THRESH_TRUNC )
1309 imaxval = saturate_cast<short>(imaxval);
1311 if( ithresh < SHRT_MIN || ithresh >= SHRT_MAX )
1313 if( type == THRESH_BINARY || type == THRESH_BINARY_INV ||
1314 ((type == THRESH_TRUNC || type == THRESH_TOZERO_INV) && ithresh < SHRT_MIN) ||
1315 (type == THRESH_TOZERO && ithresh >= SHRT_MAX) )
1317 int v = type == THRESH_BINARY ? (ithresh >= SHRT_MAX ? 0 : imaxval) :
1318 type == THRESH_BINARY_INV ? (ithresh >= SHRT_MAX ? imaxval : 0) :
1319 /*type == THRESH_TRUNC ? imaxval :*/ 0;
1329 else if( src.depth() == CV_32F )
1331 else if( src.depth() == CV_64F )
1334 CV_Error( CV_StsUnsupportedFormat, "" );
1336 parallel_for_(Range(0, dst.rows),
1337 ThresholdRunner(src, dst, thresh, maxval, type),
1338 dst.total()/(double)(1<<16));
1343 void cv::adaptiveThreshold( InputArray _src, OutputArray _dst, double maxValue,
1344 int method, int type, int blockSize, double delta )
1346 CV_INSTRUMENT_REGION()
1348 Mat src = _src.getMat();
1349 CV_Assert( src.type() == CV_8UC1 );
1350 CV_Assert( blockSize % 2 == 1 && blockSize > 1 );
1351 Size size = src.size();
1353 _dst.create( size, src.type() );
1354 Mat dst = _dst.getMat();
1364 if( src.data != dst.data )
1367 if (method == ADAPTIVE_THRESH_MEAN_C)
1368 boxFilter( src, mean, src.type(), Size(blockSize, blockSize),
1369 Point(-1,-1), true, BORDER_REPLICATE );
1370 else if (method == ADAPTIVE_THRESH_GAUSSIAN_C)
1372 Mat srcfloat,meanfloat;
1373 src.convertTo(srcfloat,CV_32F);
1375 GaussianBlur(srcfloat, meanfloat, Size(blockSize, blockSize), 0, 0, BORDER_REPLICATE);
1376 meanfloat.convertTo(mean, src.type());
1379 CV_Error( CV_StsBadFlag, "Unknown/unsupported adaptive threshold method" );
1382 uchar imaxval = saturate_cast<uchar>(maxValue);
1383 int idelta = type == THRESH_BINARY ? cvCeil(delta) : cvFloor(delta);
1386 if( type == CV_THRESH_BINARY )
1387 for( i = 0; i < 768; i++ )
1388 tab[i] = (uchar)(i - 255 > -idelta ? imaxval : 0);
1389 else if( type == CV_THRESH_BINARY_INV )
1390 for( i = 0; i < 768; i++ )
1391 tab[i] = (uchar)(i - 255 <= -idelta ? imaxval : 0);
1393 CV_Error( CV_StsBadFlag, "Unknown/unsupported threshold type" );
1395 if( src.isContinuous() && mean.isContinuous() && dst.isContinuous() )
1397 size.width *= size.height;
1401 for( i = 0; i < size.height; i++ )
1403 const uchar* sdata = src.ptr(i);
1404 const uchar* mdata = mean.ptr(i);
1405 uchar* ddata = dst.ptr(i);
1407 for( j = 0; j < size.width; j++ )
1408 ddata[j] = tab[sdata[j] - mdata[j] + 255];
1413 cvThreshold( const void* srcarr, void* dstarr, double thresh, double maxval, int type )
1415 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), dst0 = dst;
1417 CV_Assert( src.size == dst.size && src.channels() == dst.channels() &&
1418 (src.depth() == dst.depth() || dst.depth() == CV_8U));
1420 thresh = cv::threshold( src, dst, thresh, maxval, type );
1421 if( dst0.data != dst.data )
1422 dst.convertTo( dst0, dst0.depth() );
1428 cvAdaptiveThreshold( const void *srcIm, void *dstIm, double maxValue,
1429 int method, int type, int blockSize, double delta )
1431 cv::Mat src = cv::cvarrToMat(srcIm), dst = cv::cvarrToMat(dstIm);
1432 CV_Assert( src.size == dst.size && src.type() == dst.type() );
1433 cv::adaptiveThreshold( src, dst, maxValue, method, type, blockSize, delta );