1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
10 // Intel License Agreement
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000, Intel Corporation, all rights reserved.
14 // Third party copyrights are property of their respective owners.
16 // Redistribution and use in source and binary forms, with or without modification,
17 // are permitted provided that the following conditions are met:
19 // * Redistribution's of source code must retain the above copyright notice,
20 // this list of conditions and the following disclaimer.
22 // * Redistribution's in binary form must reproduce the above copyright notice,
23 // this list of conditions and the following disclaimer in the documentation
24 // and/or other materials provided with the distribution.
26 // * The name of Intel Corporation may not be used to endorse or promote products
27 // derived from this software without specific prior written permission.
29 // This software is provided by the copyright holders and contributors "as is" and
30 // any express or implied warranties, including, but not limited to, the implied
31 // warranties of merchantability and fitness for a particular purpose are disclaimed.
32 // In no event shall the Intel Corporation or contributors be liable for any direct,
33 // indirect, incidental, special, exemplary, or consequential damages
34 // (including, but not limited to, procurement of substitute goods or services;
35 // loss of use, data, or profits; or business interruption) however caused
36 // and on any theory of liability, whether in contract, strict liability,
37 // or tort (including negligence or otherwise) arising in any way out of
38 // the use of this software, even if advised of the possibility of such damage.
42 #include "precomp.hpp"
43 #include "opencl_kernels_imgproc.hpp"
48 ////////////////// Helper functions //////////////////////
50 static const size_t OUT_OF_RANGE = (size_t)1 << (sizeof(size_t)*8 - 2);
53 calcHistLookupTables_8u( const Mat& hist, const SparseMat& shist,
54 int dims, const float** ranges, const double* uniranges,
55 bool uniform, bool issparse, std::vector<size_t>& _tab )
57 const int low = 0, high = 256;
59 _tab.resize((high-low)*dims);
60 size_t* tab = &_tab[0];
64 for( i = 0; i < dims; i++ )
66 double a = uniranges[i*2];
67 double b = uniranges[i*2+1];
68 int sz = !issparse ? hist.size[i] : shist.size(i);
69 size_t step = !issparse ? hist.step[i] : 1;
71 for( j = low; j < high; j++ )
73 int idx = cvFloor(j*a + b);
75 if( (unsigned)idx < (unsigned)sz )
76 written_idx = idx*step;
78 written_idx = OUT_OF_RANGE;
80 tab[i*(high - low) + j - low] = written_idx;
86 for( i = 0; i < dims; i++ )
88 int limit = std::min(cvCeil(ranges[i][0]), high);
89 int idx = -1, sz = !issparse ? hist.size[i] : shist.size(i);
90 size_t written_idx = OUT_OF_RANGE;
91 size_t step = !issparse ? hist.step[i] : 1;
95 for( ; j < limit; j++ )
96 tab[i*(high - low) + j - low] = written_idx;
98 if( (unsigned)(++idx) < (unsigned)sz )
100 limit = std::min(cvCeil(ranges[i][idx+1]), high);
101 written_idx = idx*step;
105 for( ; j < high; j++ )
106 tab[i*(high - low) + j - low] = OUT_OF_RANGE;
115 static void histPrepareImages( const Mat* images, int nimages, const int* channels,
116 const Mat& mask, int dims, const int* histSize,
117 const float** ranges, bool uniform,
118 std::vector<uchar*>& ptrs, std::vector<int>& deltas,
119 Size& imsize, std::vector<double>& uniranges )
122 CV_Assert( channels != 0 || nimages == dims );
124 imsize = images[0].size();
125 int depth = images[0].depth(), esz1 = (int)images[0].elemSize1();
126 bool isContinuous = true;
128 ptrs.resize(dims + 1);
129 deltas.resize((dims + 1)*2);
131 for( i = 0; i < dims; i++ )
137 CV_Assert( images[j].channels() == 1 );
143 for( j = 0; j < nimages; c -= images[j].channels(), j++ )
144 if( c < images[j].channels() )
146 CV_Assert( j < nimages );
149 CV_Assert( images[j].size() == imsize && images[j].depth() == depth );
150 if( !images[j].isContinuous() )
151 isContinuous = false;
152 ptrs[i] = images[j].data + c*esz1;
153 deltas[i*2] = images[j].channels();
154 deltas[i*2+1] = (int)(images[j].step/esz1 - imsize.width*deltas[i*2]);
159 CV_Assert( mask.size() == imsize && mask.channels() == 1 );
160 isContinuous = isContinuous && mask.isContinuous();
161 ptrs[dims] = mask.data;
163 deltas[dims*2 + 1] = (int)(mask.step/mask.elemSize1());
169 imsize.width *= imsize.height;
176 CV_Assert( depth == CV_8U );
178 uniranges.resize( dims*2 );
179 for( i = 0; i < dims; i++ )
181 uniranges[i*2] = histSize[i]/256.;
182 uniranges[i*2+1] = 0;
187 uniranges.resize( dims*2 );
188 for( i = 0; i < dims; i++ )
190 CV_Assert( ranges[i] && ranges[i][0] < ranges[i][1] );
191 double low = ranges[i][0], high = ranges[i][1];
192 double t = histSize[i]/(high - low);
194 uniranges[i*2+1] = -t*low;
199 for( i = 0; i < dims; i++ )
201 size_t n = histSize[i];
202 for(size_t k = 0; k < n; k++ )
203 CV_Assert( ranges[i][k] < ranges[i][k+1] );
209 ////////////////////////////////// C A L C U L A T E H I S T O G R A M ////////////////////////////////////
211 enum {one = 1, two, three}; // array elements number
214 class calcHist1D_Invoker
217 calcHist1D_Invoker( const std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas,
218 Mat& hist, const double* _uniranges, int sz, int dims,
220 : mask_(_ptrs[dims]),
221 mstep_(_deltas[dims*2 + 1]),
222 imageWidth_(imageSize.width),
223 histogramSize_(hist.size()), histogramType_(hist.type()),
224 globalHistogram_((tbb::atomic<int>*)hist.data)
226 p_[0] = ((T**)&_ptrs[0])[0];
227 step_[0] = (&_deltas[0])[1];
228 d_[0] = (&_deltas[0])[0];
229 a_[0] = (&_uniranges[0])[0];
230 b_[0] = (&_uniranges[0])[1];
234 void operator()( const BlockedRange& range ) const
236 T* p0 = p_[0] + range.begin() * (step_[0] + imageWidth_*d_[0]);
237 uchar* mask = mask_ + range.begin()*mstep_;
239 for( int row = range.begin(); row < range.end(); row++, p0 += step_[0] )
243 for( int x = 0; x < imageWidth_; x++, p0 += d_[0] )
245 int idx = cvFloor(*p0*a_[0] + b_[0]);
246 if( (unsigned)idx < (unsigned)size_[0] )
248 globalHistogram_[idx].fetch_and_add(1);
254 for( int x = 0; x < imageWidth_; x++, p0 += d_[0] )
258 int idx = cvFloor(*p0*a_[0] + b_[0]);
259 if( (unsigned)idx < (unsigned)size_[0] )
261 globalHistogram_[idx].fetch_and_add(1);
271 calcHist1D_Invoker operator=(const calcHist1D_Invoker&);
284 tbb::atomic<int>* globalHistogram_;
288 class calcHist2D_Invoker
291 calcHist2D_Invoker( const std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas,
292 Mat& hist, const double* _uniranges, const int* size,
293 int dims, Size& imageSize, size_t* hstep )
294 : mask_(_ptrs[dims]),
295 mstep_(_deltas[dims*2 + 1]),
296 imageWidth_(imageSize.width),
297 histogramSize_(hist.size()), histogramType_(hist.type()),
298 globalHistogram_(hist.data)
300 p_[0] = ((T**)&_ptrs[0])[0]; p_[1] = ((T**)&_ptrs[0])[1];
301 step_[0] = (&_deltas[0])[1]; step_[1] = (&_deltas[0])[3];
302 d_[0] = (&_deltas[0])[0]; d_[1] = (&_deltas[0])[2];
303 a_[0] = (&_uniranges[0])[0]; a_[1] = (&_uniranges[0])[2];
304 b_[0] = (&_uniranges[0])[1]; b_[1] = (&_uniranges[0])[3];
305 size_[0] = size[0]; size_[1] = size[1];
306 hstep_[0] = hstep[0];
309 void operator()(const BlockedRange& range) const
311 T* p0 = p_[0] + range.begin()*(step_[0] + imageWidth_*d_[0]);
312 T* p1 = p_[1] + range.begin()*(step_[1] + imageWidth_*d_[1]);
313 uchar* mask = mask_ + range.begin()*mstep_;
315 for( int row = range.begin(); row < range.end(); row++, p0 += step_[0], p1 += step_[1] )
319 for( int x = 0; x < imageWidth_; x++, p0 += d_[0], p1 += d_[1] )
321 int idx0 = cvFloor(*p0*a_[0] + b_[0]);
322 int idx1 = cvFloor(*p1*a_[1] + b_[1]);
323 if( (unsigned)idx0 < (unsigned)size_[0] && (unsigned)idx1 < (unsigned)size_[1] )
324 ( (tbb::atomic<int>*)(globalHistogram_ + hstep_[0]*idx0) )[idx1].fetch_and_add(1);
329 for( int x = 0; x < imageWidth_; x++, p0 += d_[0], p1 += d_[1] )
333 int idx0 = cvFloor(*p0*a_[0] + b_[0]);
334 int idx1 = cvFloor(*p1*a_[1] + b_[1]);
335 if( (unsigned)idx0 < (unsigned)size_[0] && (unsigned)idx1 < (unsigned)size_[1] )
336 ((tbb::atomic<int>*)(globalHistogram_ + hstep_[0]*idx0))[idx1].fetch_and_add(1);
345 calcHist2D_Invoker operator=(const calcHist2D_Invoker&);
355 const int imageWidth_;
359 uchar* globalHistogram_;
364 class calcHist3D_Invoker
367 calcHist3D_Invoker( const std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas,
368 Size imsize, Mat& hist, const double* uniranges, int _dims,
369 size_t* hstep, int* size )
370 : mask_(_ptrs[_dims]),
371 mstep_(_deltas[_dims*2 + 1]),
372 imageWidth_(imsize.width),
373 globalHistogram_(hist.data)
375 p_[0] = ((T**)&_ptrs[0])[0]; p_[1] = ((T**)&_ptrs[0])[1]; p_[2] = ((T**)&_ptrs[0])[2];
376 step_[0] = (&_deltas[0])[1]; step_[1] = (&_deltas[0])[3]; step_[2] = (&_deltas[0])[5];
377 d_[0] = (&_deltas[0])[0]; d_[1] = (&_deltas[0])[2]; d_[2] = (&_deltas[0])[4];
378 a_[0] = uniranges[0]; a_[1] = uniranges[2]; a_[2] = uniranges[4];
379 b_[0] = uniranges[1]; b_[1] = uniranges[3]; b_[2] = uniranges[5];
380 size_[0] = size[0]; size_[1] = size[1]; size_[2] = size[2];
381 hstep_[0] = hstep[0]; hstep_[1] = hstep[1];
384 void operator()( const BlockedRange& range ) const
386 T* p0 = p_[0] + range.begin()*(imageWidth_*d_[0] + step_[0]);
387 T* p1 = p_[1] + range.begin()*(imageWidth_*d_[1] + step_[1]);
388 T* p2 = p_[2] + range.begin()*(imageWidth_*d_[2] + step_[2]);
389 uchar* mask = mask_ + range.begin()*mstep_;
391 for( int i = range.begin(); i < range.end(); i++, p0 += step_[0], p1 += step_[1], p2 += step_[2] )
395 for( int x = 0; x < imageWidth_; x++, p0 += d_[0], p1 += d_[1], p2 += d_[2] )
397 int idx0 = cvFloor(*p0*a_[0] + b_[0]);
398 int idx1 = cvFloor(*p1*a_[1] + b_[1]);
399 int idx2 = cvFloor(*p2*a_[2] + b_[2]);
400 if( (unsigned)idx0 < (unsigned)size_[0] &&
401 (unsigned)idx1 < (unsigned)size_[1] &&
402 (unsigned)idx2 < (unsigned)size_[2] )
404 ( (tbb::atomic<int>*)(globalHistogram_ + hstep_[0]*idx0 + hstep_[1]*idx1) )[idx2].fetch_and_add(1);
410 for( int x = 0; x < imageWidth_; x++, p0 += d_[0], p1 += d_[1], p2 += d_[2] )
414 int idx0 = cvFloor(*p0*a_[0] + b_[0]);
415 int idx1 = cvFloor(*p1*a_[1] + b_[1]);
416 int idx2 = cvFloor(*p2*a_[2] + b_[2]);
417 if( (unsigned)idx0 < (unsigned)size_[0] &&
418 (unsigned)idx1 < (unsigned)size_[1] &&
419 (unsigned)idx2 < (unsigned)size_[2] )
421 ( (tbb::atomic<int>*)(globalHistogram_ + hstep_[0]*idx0 + hstep_[1]*idx1) )[idx2].fetch_and_add(1);
430 static bool isFit( const Mat& histogram, const Size imageSize )
432 return ( imageSize.width * imageSize.height >= 320*240
433 && histogram.total() >= 8*8*8 );
437 calcHist3D_Invoker operator=(const calcHist3D_Invoker&);
449 uchar* globalHistogram_;
452 class CalcHist1D_8uInvoker
455 CalcHist1D_8uInvoker( const std::vector<uchar*>& ptrs, const std::vector<int>& deltas,
456 Size imsize, Mat& hist, int dims, const std::vector<size_t>& tab,
459 mstep_(deltas[dims*2 + 1]),
460 imageWidth_(imsize.width),
462 histSize_(hist.size()), histType_(hist.type()),
463 tab_((size_t*)&tab[0]),
464 histogramWriteLock_(lock),
465 globalHistogram_(hist.data)
467 p_[0] = (&ptrs[0])[0];
468 step_[0] = (&deltas[0])[1];
469 d_[0] = (&deltas[0])[0];
472 void operator()( const BlockedRange& range ) const
474 int localHistogram[256] = { 0, };
478 tbb::mutex::scoped_lock lock;
482 int n = (imageWidth_ - 4) / 4 + 1;
483 int tail = imageWidth_ - n*4;
486 p0 += (xN*d_[0] + tail*d_[0] + step_[0]) * range.begin();
490 p0 += (imageWidth_*d_[0] + step_[0]) * range.begin();
491 mask += mstep_*range.begin();
494 for( int i = range.begin(); i < range.end(); i++, p0 += step_[0] )
500 for( x = 0; x <= imageWidth_ - 4; x += 4 )
502 int t0 = p0[x], t1 = p0[x+1];
503 localHistogram[t0]++; localHistogram[t1]++;
504 t0 = p0[x+2]; t1 = p0[x+3];
505 localHistogram[t0]++; localHistogram[t1]++;
511 for( x = 0; x <= imageWidth_ - 4; x += 4 )
513 int t0 = p0[0], t1 = p0[d_[0]];
514 localHistogram[t0]++; localHistogram[t1]++;
516 t0 = p0[0]; t1 = p0[d_[0]];
517 localHistogram[t0]++; localHistogram[t1]++;
522 for( ; x < imageWidth_; x++, p0 += d_[0] )
524 localHistogram[*p0]++;
529 for( x = 0; x < imageWidth_; x++, p0 += d_[0] )
533 localHistogram[*p0]++;
540 lock.acquire(*histogramWriteLock_);
541 for(int i = 0; i < 256; i++ )
543 size_t hidx = tab_[i];
544 if( hidx < OUT_OF_RANGE )
546 *(int*)((globalHistogram_ + hidx)) += localHistogram[i];
552 static bool isFit( const Mat& histogram, const Size imageSize )
554 return ( histogram.total() >= 8
555 && imageSize.width * imageSize.height >= 160*120 );
569 tbb::mutex* histogramWriteLock_;
570 uchar* globalHistogram_;
573 class CalcHist2D_8uInvoker
576 CalcHist2D_8uInvoker( const std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas,
577 Size imsize, Mat& hist, int dims, const std::vector<size_t>& _tab,
579 : mask_(_ptrs[dims]),
580 mstep_(_deltas[dims*2 + 1]),
581 imageWidth_(imsize.width),
582 histSize_(hist.size()), histType_(hist.type()),
583 tab_((size_t*)&_tab[0]),
584 histogramWriteLock_(lock),
585 globalHistogram_(hist.data)
587 p_[0] = (uchar*)(&_ptrs[0])[0]; p_[1] = (uchar*)(&_ptrs[0])[1];
588 step_[0] = (&_deltas[0])[1]; step_[1] = (&_deltas[0])[3];
589 d_[0] = (&_deltas[0])[0]; d_[1] = (&_deltas[0])[2];
592 void operator()( const BlockedRange& range ) const
594 uchar* p0 = p_[0] + range.begin()*(step_[0] + imageWidth_*d_[0]);
595 uchar* p1 = p_[1] + range.begin()*(step_[1] + imageWidth_*d_[1]);
596 uchar* mask = mask_ + range.begin()*mstep_;
598 Mat localHist = Mat::zeros(histSize_, histType_);
599 uchar* localHistData = localHist.data;
600 tbb::mutex::scoped_lock lock;
602 for(int i = range.begin(); i < range.end(); i++, p0 += step_[0], p1 += step_[1])
606 for( int x = 0; x < imageWidth_; x++, p0 += d_[0], p1 += d_[1] )
608 size_t idx = tab_[*p0] + tab_[*p1 + 256];
609 if( idx < OUT_OF_RANGE )
611 ++*(int*)(localHistData + idx);
617 for( int x = 0; x < imageWidth_; x++, p0 += d_[0], p1 += d_[1] )
620 if( mask[x] && (idx = tab_[*p0] + tab_[*p1 + 256]) < OUT_OF_RANGE )
622 ++*(int*)(localHistData + idx);
629 lock.acquire(*histogramWriteLock_);
630 for(int i = 0; i < histSize_.width*histSize_.height; i++)
632 ((int*)globalHistogram_)[i] += ((int*)localHistData)[i];
637 static bool isFit( const Mat& histogram, const Size imageSize )
639 return ( (histogram.total() > 4*4 && histogram.total() <= 116*116
640 && imageSize.width * imageSize.height >= 320*240)
641 || (histogram.total() > 116*116 && imageSize.width * imageSize.height >= 1280*720) );
654 tbb::mutex* histogramWriteLock_;
655 uchar* globalHistogram_;
658 class CalcHist3D_8uInvoker
661 CalcHist3D_8uInvoker( const std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas,
662 Size imsize, Mat& hist, int dims, const std::vector<size_t>& tab )
663 : mask_(_ptrs[dims]),
664 mstep_(_deltas[dims*2 + 1]),
665 histogramSize_(hist.size.p), histogramType_(hist.type()),
666 imageWidth_(imsize.width),
667 tab_((size_t*)&tab[0]),
668 globalHistogram_(hist.data)
670 p_[0] = (uchar*)(&_ptrs[0])[0]; p_[1] = (uchar*)(&_ptrs[0])[1]; p_[2] = (uchar*)(&_ptrs[0])[2];
671 step_[0] = (&_deltas[0])[1]; step_[1] = (&_deltas[0])[3]; step_[2] = (&_deltas[0])[5];
672 d_[0] = (&_deltas[0])[0]; d_[1] = (&_deltas[0])[2]; d_[2] = (&_deltas[0])[4];
675 void operator()( const BlockedRange& range ) const
677 uchar* p0 = p_[0] + range.begin()*(step_[0] + imageWidth_*d_[0]);
678 uchar* p1 = p_[1] + range.begin()*(step_[1] + imageWidth_*d_[1]);
679 uchar* p2 = p_[2] + range.begin()*(step_[2] + imageWidth_*d_[2]);
680 uchar* mask = mask_ + range.begin()*mstep_;
682 for(int i = range.begin(); i < range.end(); i++, p0 += step_[0], p1 += step_[1], p2 += step_[2] )
686 for( int x = 0; x < imageWidth_; x++, p0 += d_[0], p1 += d_[1], p2 += d_[2] )
688 size_t idx = tab_[*p0] + tab_[*p1 + 256] + tab_[*p2 + 512];
689 if( idx < OUT_OF_RANGE )
691 ( *(tbb::atomic<int>*)(globalHistogram_ + idx) ).fetch_and_add(1);
697 for( int x = 0; x < imageWidth_; x++, p0 += d_[0], p1 += d_[1], p2 += d_[2] )
700 if( mask[x] && (idx = tab_[*p0] + tab_[*p1 + 256] + tab_[*p2 + 512]) < OUT_OF_RANGE )
702 (*(tbb::atomic<int>*)(globalHistogram_ + idx)).fetch_and_add(1);
710 static bool isFit( const Mat& histogram, const Size imageSize )
712 return ( histogram.total() >= 128*128*128
713 && imageSize.width * imageSize.width >= 320*240 );
726 uchar* globalHistogram_;
730 callCalcHist2D_8u( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas,
731 Size imsize, Mat& hist, int dims, std::vector<size_t>& _tab )
733 int grainSize = imsize.height / tbb::task_scheduler_init::default_num_threads();
734 tbb::mutex histogramWriteLock;
736 CalcHist2D_8uInvoker body(_ptrs, _deltas, imsize, hist, dims, _tab, &histogramWriteLock);
737 parallel_for(BlockedRange(0, imsize.height, grainSize), body);
741 callCalcHist3D_8u( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas,
742 Size imsize, Mat& hist, int dims, std::vector<size_t>& _tab )
744 CalcHist3D_8uInvoker body(_ptrs, _deltas, imsize, hist, dims, _tab);
745 parallel_for(BlockedRange(0, imsize.height), body);
749 template<typename T> static void
750 calcHist_( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas,
751 Size imsize, Mat& hist, int dims, const float** _ranges,
752 const double* _uniranges, bool uniform )
754 T** ptrs = (T**)&_ptrs[0];
755 const int* deltas = &_deltas[0];
756 uchar* H = hist.ptr();
758 const uchar* mask = _ptrs[dims];
759 int mstep = _deltas[dims*2 + 1];
760 int size[CV_MAX_DIM];
761 size_t hstep[CV_MAX_DIM];
763 for( i = 0; i < dims; i++ )
765 size[i] = hist.size[i];
766 hstep[i] = hist.step[i];
771 const double* uniranges = &_uniranges[0];
776 calcHist1D_Invoker<T> body(_ptrs, _deltas, hist, _uniranges, size[0], dims, imsize);
777 parallel_for(BlockedRange(0, imsize.height), body);
779 double a = uniranges[0], b = uniranges[1];
780 int sz = size[0], d0 = deltas[0], step0 = deltas[1];
781 const T* p0 = (const T*)ptrs[0];
783 for( ; imsize.height--; p0 += step0, mask += mstep )
786 for( x = 0; x < imsize.width; x++, p0 += d0 )
788 int idx = cvFloor(*p0*a + b);
789 if( (unsigned)idx < (unsigned)sz )
793 for( x = 0; x < imsize.width; x++, p0 += d0 )
796 int idx = cvFloor(*p0*a + b);
797 if( (unsigned)idx < (unsigned)sz )
807 calcHist2D_Invoker<T> body(_ptrs, _deltas, hist, _uniranges, size, dims, imsize, hstep);
808 parallel_for(BlockedRange(0, imsize.height), body);
810 double a0 = uniranges[0], b0 = uniranges[1], a1 = uniranges[2], b1 = uniranges[3];
811 int sz0 = size[0], sz1 = size[1];
812 int d0 = deltas[0], step0 = deltas[1],
813 d1 = deltas[2], step1 = deltas[3];
814 size_t hstep0 = hstep[0];
815 const T* p0 = (const T*)ptrs[0];
816 const T* p1 = (const T*)ptrs[1];
818 for( ; imsize.height--; p0 += step0, p1 += step1, mask += mstep )
821 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1 )
823 int idx0 = cvFloor(*p0*a0 + b0);
824 int idx1 = cvFloor(*p1*a1 + b1);
825 if( (unsigned)idx0 < (unsigned)sz0 && (unsigned)idx1 < (unsigned)sz1 )
826 ((int*)(H + hstep0*idx0))[idx1]++;
829 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1 )
832 int idx0 = cvFloor(*p0*a0 + b0);
833 int idx1 = cvFloor(*p1*a1 + b1);
834 if( (unsigned)idx0 < (unsigned)sz0 && (unsigned)idx1 < (unsigned)sz1 )
835 ((int*)(H + hstep0*idx0))[idx1]++;
844 if( calcHist3D_Invoker<T>::isFit(hist, imsize) )
846 calcHist3D_Invoker<T> body(_ptrs, _deltas, imsize, hist, uniranges, dims, hstep, size);
847 parallel_for(BlockedRange(0, imsize.height), body);
851 double a0 = uniranges[0], b0 = uniranges[1],
852 a1 = uniranges[2], b1 = uniranges[3],
853 a2 = uniranges[4], b2 = uniranges[5];
854 int sz0 = size[0], sz1 = size[1], sz2 = size[2];
855 int d0 = deltas[0], step0 = deltas[1],
856 d1 = deltas[2], step1 = deltas[3],
857 d2 = deltas[4], step2 = deltas[5];
858 size_t hstep0 = hstep[0], hstep1 = hstep[1];
859 const T* p0 = (const T*)ptrs[0];
860 const T* p1 = (const T*)ptrs[1];
861 const T* p2 = (const T*)ptrs[2];
863 for( ; imsize.height--; p0 += step0, p1 += step1, p2 += step2, mask += mstep )
866 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1, p2 += d2 )
868 int idx0 = cvFloor(*p0*a0 + b0);
869 int idx1 = cvFloor(*p1*a1 + b1);
870 int idx2 = cvFloor(*p2*a2 + b2);
871 if( (unsigned)idx0 < (unsigned)sz0 &&
872 (unsigned)idx1 < (unsigned)sz1 &&
873 (unsigned)idx2 < (unsigned)sz2 )
874 ((int*)(H + hstep0*idx0 + hstep1*idx1))[idx2]++;
877 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1, p2 += d2 )
880 int idx0 = cvFloor(*p0*a0 + b0);
881 int idx1 = cvFloor(*p1*a1 + b1);
882 int idx2 = cvFloor(*p2*a2 + b2);
883 if( (unsigned)idx0 < (unsigned)sz0 &&
884 (unsigned)idx1 < (unsigned)sz1 &&
885 (unsigned)idx2 < (unsigned)sz2 )
886 ((int*)(H + hstep0*idx0 + hstep1*idx1))[idx2]++;
892 for( ; imsize.height--; mask += mstep )
895 for( x = 0; x < imsize.width; x++ )
898 for( i = 0; i < dims; i++ )
900 int idx = cvFloor(*ptrs[i]*uniranges[i*2] + uniranges[i*2+1]);
901 if( (unsigned)idx >= (unsigned)size[i] )
903 ptrs[i] += deltas[i*2];
904 Hptr += idx*hstep[i];
910 for( ; i < dims; i++ )
911 ptrs[i] += deltas[i*2];
914 for( x = 0; x < imsize.width; x++ )
919 for( ; i < dims; i++ )
921 int idx = cvFloor(*ptrs[i]*uniranges[i*2] + uniranges[i*2+1]);
922 if( (unsigned)idx >= (unsigned)size[i] )
924 ptrs[i] += deltas[i*2];
925 Hptr += idx*hstep[i];
931 for( ; i < dims; i++ )
932 ptrs[i] += deltas[i*2];
934 for( i = 0; i < dims; i++ )
935 ptrs[i] += deltas[i*2 + 1];
941 // non-uniform histogram
942 const float* ranges[CV_MAX_DIM];
943 for( i = 0; i < dims; i++ )
944 ranges[i] = &_ranges[i][0];
946 for( ; imsize.height--; mask += mstep )
948 for( x = 0; x < imsize.width; x++ )
953 if( !mask || mask[x] )
954 for( ; i < dims; i++ )
956 float v = (float)*ptrs[i];
957 const float* R = ranges[i];
958 int idx = -1, sz = size[i];
960 while( v >= R[idx+1] && ++idx < sz )
963 if( (unsigned)idx >= (unsigned)sz )
966 ptrs[i] += deltas[i*2];
967 Hptr += idx*hstep[i];
973 for( ; i < dims; i++ )
974 ptrs[i] += deltas[i*2];
977 for( i = 0; i < dims; i++ )
978 ptrs[i] += deltas[i*2 + 1];
985 calcHist_8u( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas,
986 Size imsize, Mat& hist, int dims, const float** _ranges,
987 const double* _uniranges, bool uniform )
989 uchar** ptrs = &_ptrs[0];
990 const int* deltas = &_deltas[0];
991 uchar* H = hist.ptr();
993 const uchar* mask = _ptrs[dims];
994 int mstep = _deltas[dims*2 + 1];
995 std::vector<size_t> _tab;
997 calcHistLookupTables_8u( hist, SparseMat(), dims, _ranges, _uniranges, uniform, false, _tab );
998 const size_t* tab = &_tab[0];
1003 if( CalcHist1D_8uInvoker::isFit(hist, imsize) )
1005 int treadsNumber = tbb::task_scheduler_init::default_num_threads();
1006 int grainSize = imsize.height/treadsNumber;
1007 tbb::mutex histogramWriteLock;
1009 CalcHist1D_8uInvoker body(_ptrs, _deltas, imsize, hist, dims, _tab, &histogramWriteLock);
1010 parallel_for(BlockedRange(0, imsize.height, grainSize), body);
1014 int d0 = deltas[0], step0 = deltas[1];
1015 int matH[256] = { 0, };
1016 const uchar* p0 = (const uchar*)ptrs[0];
1018 for( ; imsize.height--; p0 += step0, mask += mstep )
1024 for( x = 0; x <= imsize.width - 4; x += 4 )
1026 int t0 = p0[x], t1 = p0[x+1];
1027 matH[t0]++; matH[t1]++;
1028 t0 = p0[x+2]; t1 = p0[x+3];
1029 matH[t0]++; matH[t1]++;
1034 for( x = 0; x <= imsize.width - 4; x += 4 )
1036 int t0 = p0[0], t1 = p0[d0];
1037 matH[t0]++; matH[t1]++;
1039 t0 = p0[0]; t1 = p0[d0];
1040 matH[t0]++; matH[t1]++;
1044 for( ; x < imsize.width; x++, p0 += d0 )
1048 for( x = 0; x < imsize.width; x++, p0 += d0 )
1053 for(int i = 0; i < 256; i++ )
1055 size_t hidx = tab[i];
1056 if( hidx < OUT_OF_RANGE )
1057 *(int*)(H + hidx) += matH[i];
1060 else if( dims == 2 )
1063 if( CalcHist2D_8uInvoker::isFit(hist, imsize) )
1065 callCalcHist2D_8u(_ptrs, _deltas, imsize, hist, dims, _tab);
1069 int d0 = deltas[0], step0 = deltas[1],
1070 d1 = deltas[2], step1 = deltas[3];
1071 const uchar* p0 = (const uchar*)ptrs[0];
1072 const uchar* p1 = (const uchar*)ptrs[1];
1074 for( ; imsize.height--; p0 += step0, p1 += step1, mask += mstep )
1077 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1 )
1079 size_t idx = tab[*p0] + tab[*p1 + 256];
1080 if( idx < OUT_OF_RANGE )
1084 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1 )
1087 if( mask[x] && (idx = tab[*p0] + tab[*p1 + 256]) < OUT_OF_RANGE )
1092 else if( dims == 3 )
1095 if( CalcHist3D_8uInvoker::isFit(hist, imsize) )
1097 callCalcHist3D_8u(_ptrs, _deltas, imsize, hist, dims, _tab);
1101 int d0 = deltas[0], step0 = deltas[1],
1102 d1 = deltas[2], step1 = deltas[3],
1103 d2 = deltas[4], step2 = deltas[5];
1105 const uchar* p0 = (const uchar*)ptrs[0];
1106 const uchar* p1 = (const uchar*)ptrs[1];
1107 const uchar* p2 = (const uchar*)ptrs[2];
1109 for( ; imsize.height--; p0 += step0, p1 += step1, p2 += step2, mask += mstep )
1112 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1, p2 += d2 )
1114 size_t idx = tab[*p0] + tab[*p1 + 256] + tab[*p2 + 512];
1115 if( idx < OUT_OF_RANGE )
1119 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1, p2 += d2 )
1122 if( mask[x] && (idx = tab[*p0] + tab[*p1 + 256] + tab[*p2 + 512]) < OUT_OF_RANGE )
1129 for( ; imsize.height--; mask += mstep )
1132 for( x = 0; x < imsize.width; x++ )
1136 for( ; i < dims; i++ )
1138 size_t idx = tab[*ptrs[i] + i*256];
1139 if( idx >= OUT_OF_RANGE )
1142 ptrs[i] += deltas[i*2];
1148 for( ; i < dims; i++ )
1149 ptrs[i] += deltas[i*2];
1152 for( x = 0; x < imsize.width; x++ )
1157 for( ; i < dims; i++ )
1159 size_t idx = tab[*ptrs[i] + i*256];
1160 if( idx >= OUT_OF_RANGE )
1163 ptrs[i] += deltas[i*2];
1169 for( ; i < dims; i++ )
1170 ptrs[i] += deltas[i*2];
1172 for(int i = 0; i < dims; i++ )
1173 ptrs[i] += deltas[i*2 + 1];
1180 class IPPCalcHistInvoker :
1181 public ParallelLoopBody
1184 IPPCalcHistInvoker(const Mat & _src, Mat & _hist, AutoBuffer<Ipp32s> & _levels, Ipp32s _histSize, Ipp32s _low, Ipp32s _high, bool * _ok) :
1185 ParallelLoopBody(), src(&_src), hist(&_hist), levels(&_levels), histSize(_histSize), low(_low), high(_high), ok(_ok)
1190 virtual void operator() (const Range & range) const
1192 Mat phist(hist->size(), hist->type(), Scalar::all(0));
1194 IppStatus status = ippiHistogramEven_8u_C1R(
1195 src->ptr(range.start), (int)src->step, ippiSize(src->cols, range.end - range.start),
1196 phist.ptr<Ipp32s>(), (Ipp32s *)*levels, histSize, low, high);
1203 CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT);
1205 for (int i = 0; i < histSize; ++i)
1206 CV_XADD((int *)(hist->data + i * hist->step), *(int *)(phist.data + i * phist.step));
1212 AutoBuffer<Ipp32s> * levels;
1213 Ipp32s histSize, low, high;
1216 const IPPCalcHistInvoker & operator = (const IPPCalcHistInvoker & );
1223 void cv::calcHist( const Mat* images, int nimages, const int* channels,
1224 InputArray _mask, OutputArray _hist, int dims, const int* histSize,
1225 const float** ranges, bool uniform, bool accumulate )
1227 Mat mask = _mask.getMat();
1229 CV_Assert(dims > 0 && histSize);
1231 const uchar* const histdata = _hist.getMat().ptr();
1232 _hist.create(dims, histSize, CV_32F);
1233 Mat hist = _hist.getMat(), ihist = hist;
1234 ihist.flags = (ihist.flags & ~CV_MAT_TYPE_MASK)|CV_32S;
1239 if (nimages == 1 && images[0].type() == CV_8UC1 && dims == 1 && channels &&
1240 channels[0] == 0 && mask.empty() && images[0].dims <= 2 &&
1241 !accumulate && uniform)
1243 ihist.setTo(Scalar::all(0));
1244 AutoBuffer<Ipp32s> levels(histSize[0] + 1);
1247 const Mat & src = images[0];
1248 int nstripes = std::min<int>(8, static_cast<int>(src.total() / (1 << 16)));
1249 #ifdef HAVE_CONCURRENCY
1252 IPPCalcHistInvoker invoker(src, ihist, levels, histSize[0] + 1, (Ipp32s)ranges[0][0], (Ipp32s)ranges[0][1], &ok);
1253 Range range(0, src.rows);
1254 parallel_for_(range, invoker, nstripes);
1258 ihist.convertTo(hist, CV_32F);
1259 CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT);
1262 setIppErrorStatus();
1267 if( !accumulate || histdata != hist.data )
1270 hist.convertTo(ihist, CV_32S);
1272 std::vector<uchar*> ptrs;
1273 std::vector<int> deltas;
1274 std::vector<double> uniranges;
1277 CV_Assert( mask.empty() || mask.type() == CV_8UC1 );
1278 histPrepareImages( images, nimages, channels, mask, dims, hist.size, ranges,
1279 uniform, ptrs, deltas, imsize, uniranges );
1280 const double* _uniranges = uniform ? &uniranges[0] : 0;
1282 int depth = images[0].depth();
1284 if( depth == CV_8U )
1285 calcHist_8u(ptrs, deltas, imsize, ihist, dims, ranges, _uniranges, uniform );
1286 else if( depth == CV_16U )
1287 calcHist_<ushort>(ptrs, deltas, imsize, ihist, dims, ranges, _uniranges, uniform );
1288 else if( depth == CV_32F )
1289 calcHist_<float>(ptrs, deltas, imsize, ihist, dims, ranges, _uniranges, uniform );
1291 CV_Error(CV_StsUnsupportedFormat, "");
1293 ihist.convertTo(hist, CV_32F);
1300 template<typename T> static void
1301 calcSparseHist_( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas,
1302 Size imsize, SparseMat& hist, int dims, const float** _ranges,
1303 const double* _uniranges, bool uniform )
1305 T** ptrs = (T**)&_ptrs[0];
1306 const int* deltas = &_deltas[0];
1308 const uchar* mask = _ptrs[dims];
1309 int mstep = _deltas[dims*2 + 1];
1310 const int* size = hist.hdr->size;
1311 int idx[CV_MAX_DIM];
1315 const double* uniranges = &_uniranges[0];
1317 for( ; imsize.height--; mask += mstep )
1319 for( x = 0; x < imsize.width; x++ )
1322 if( !mask || mask[x] )
1323 for( ; i < dims; i++ )
1325 idx[i] = cvFloor(*ptrs[i]*uniranges[i*2] + uniranges[i*2+1]);
1326 if( (unsigned)idx[i] >= (unsigned)size[i] )
1328 ptrs[i] += deltas[i*2];
1332 ++*(int*)hist.ptr(idx, true);
1334 for( ; i < dims; i++ )
1335 ptrs[i] += deltas[i*2];
1337 for( i = 0; i < dims; i++ )
1338 ptrs[i] += deltas[i*2 + 1];
1343 // non-uniform histogram
1344 const float* ranges[CV_MAX_DIM];
1345 for( i = 0; i < dims; i++ )
1346 ranges[i] = &_ranges[i][0];
1348 for( ; imsize.height--; mask += mstep )
1350 for( x = 0; x < imsize.width; x++ )
1354 if( !mask || mask[x] )
1355 for( ; i < dims; i++ )
1357 float v = (float)*ptrs[i];
1358 const float* R = ranges[i];
1359 int j = -1, sz = size[i];
1361 while( v >= R[j+1] && ++j < sz )
1364 if( (unsigned)j >= (unsigned)sz )
1366 ptrs[i] += deltas[i*2];
1371 ++*(int*)hist.ptr(idx, true);
1373 for( ; i < dims; i++ )
1374 ptrs[i] += deltas[i*2];
1377 for( i = 0; i < dims; i++ )
1378 ptrs[i] += deltas[i*2 + 1];
1385 calcSparseHist_8u( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas,
1386 Size imsize, SparseMat& hist, int dims, const float** _ranges,
1387 const double* _uniranges, bool uniform )
1389 uchar** ptrs = (uchar**)&_ptrs[0];
1390 const int* deltas = &_deltas[0];
1392 const uchar* mask = _ptrs[dims];
1393 int mstep = _deltas[dims*2 + 1];
1394 int idx[CV_MAX_DIM];
1395 std::vector<size_t> _tab;
1397 calcHistLookupTables_8u( Mat(), hist, dims, _ranges, _uniranges, uniform, true, _tab );
1398 const size_t* tab = &_tab[0];
1400 for( ; imsize.height--; mask += mstep )
1402 for( x = 0; x < imsize.width; x++ )
1405 if( !mask || mask[x] )
1406 for( ; i < dims; i++ )
1408 size_t hidx = tab[*ptrs[i] + i*256];
1409 if( hidx >= OUT_OF_RANGE )
1411 ptrs[i] += deltas[i*2];
1416 ++*(int*)hist.ptr(idx,true);
1418 for( ; i < dims; i++ )
1419 ptrs[i] += deltas[i*2];
1421 for(int i = 0; i < dims; i++ )
1422 ptrs[i] += deltas[i*2 + 1];
1427 static void calcHist( const Mat* images, int nimages, const int* channels,
1428 const Mat& mask, SparseMat& hist, int dims, const int* histSize,
1429 const float** ranges, bool uniform, bool accumulate, bool keepInt )
1434 hist.create(dims, histSize, CV_32F);
1437 SparseMatIterator it = hist.begin();
1438 for( i = 0, N = hist.nzcount(); i < N; i++, ++it )
1440 Cv32suf* val = (Cv32suf*)it.ptr;
1441 val->i = cvRound(val->f);
1445 std::vector<uchar*> ptrs;
1446 std::vector<int> deltas;
1447 std::vector<double> uniranges;
1450 CV_Assert( mask.empty() || mask.type() == CV_8UC1 );
1451 histPrepareImages( images, nimages, channels, mask, dims, hist.hdr->size, ranges,
1452 uniform, ptrs, deltas, imsize, uniranges );
1453 const double* _uniranges = uniform ? &uniranges[0] : 0;
1455 int depth = images[0].depth();
1456 if( depth == CV_8U )
1457 calcSparseHist_8u(ptrs, deltas, imsize, hist, dims, ranges, _uniranges, uniform );
1458 else if( depth == CV_16U )
1459 calcSparseHist_<ushort>(ptrs, deltas, imsize, hist, dims, ranges, _uniranges, uniform );
1460 else if( depth == CV_32F )
1461 calcSparseHist_<float>(ptrs, deltas, imsize, hist, dims, ranges, _uniranges, uniform );
1463 CV_Error(CV_StsUnsupportedFormat, "");
1467 SparseMatIterator it = hist.begin();
1468 for( i = 0, N = hist.nzcount(); i < N; i++, ++it )
1470 Cv32suf* val = (Cv32suf*)it.ptr;
1471 val->f = (float)val->i;
1483 static bool ocl_calcHist1(InputArray _src, OutputArray _hist, int ddepth = CV_32S)
1485 const ocl::Device & dev = ocl::Device::getDefault();
1486 int compunits = dev.maxComputeUnits();
1487 size_t wgs = dev.maxWorkGroupSize();
1488 Size size = _src.size();
1489 bool use16 = size.width % 16 == 0 && _src.offset() % 16 == 0 && _src.step() % 16 == 0;
1490 int kercn = dev.isAMD() && use16 ? 16 : std::min(4, ocl::predictOptimalVectorWidth(_src));
1492 ocl::Kernel k1("calculate_histogram", ocl::imgproc::histogram_oclsrc,
1493 format("-D BINS=%d -D HISTS_COUNT=%d -D WGS=%d -D kercn=%d -D T=%s%s",
1494 BINS, compunits, wgs, kercn,
1495 kercn == 4 ? "int" : ocl::typeToStr(CV_8UC(kercn)),
1496 _src.isContinuous() ? " -D HAVE_SRC_CONT" : ""));
1500 _hist.create(BINS, 1, ddepth);
1501 UMat src = _src.getUMat(), ghist(1, BINS * compunits, CV_32SC1),
1502 hist = _hist.getUMat();
1504 k1.args(ocl::KernelArg::ReadOnly(src),
1505 ocl::KernelArg::PtrWriteOnly(ghist), (int)src.total());
1507 size_t globalsize = compunits * wgs;
1508 if (!k1.run(1, &globalsize, &wgs, false))
1512 ocl::Kernel k2("merge_histogram", ocl::imgproc::histogram_oclsrc,
1513 format("-D BINS=%d -D HISTS_COUNT=%d -D WGS=%d -D convertToHT=%s -D HT=%s",
1514 BINS, compunits, (int)wgs, ocl::convertTypeStr(CV_32S, ddepth, 1, cvt),
1515 ocl::typeToStr(ddepth)));
1519 k2.args(ocl::KernelArg::PtrReadOnly(ghist),
1520 ocl::KernelArg::WriteOnlyNoSize(hist));
1522 return k2.run(1, &wgs, &wgs, false);
1525 static bool ocl_calcHist(InputArrayOfArrays images, OutputArray hist)
1527 std::vector<UMat> v;
1528 images.getUMatVector(v);
1530 return ocl_calcHist1(v[0], hist, CV_32F);
1537 void cv::calcHist( const Mat* images, int nimages, const int* channels,
1538 InputArray _mask, SparseMat& hist, int dims, const int* histSize,
1539 const float** ranges, bool uniform, bool accumulate )
1541 Mat mask = _mask.getMat();
1542 calcHist( images, nimages, channels, mask, hist, dims, histSize,
1543 ranges, uniform, accumulate, false );
1547 void cv::calcHist( InputArrayOfArrays images, const std::vector<int>& channels,
1548 InputArray mask, OutputArray hist,
1549 const std::vector<int>& histSize,
1550 const std::vector<float>& ranges,
1553 CV_OCL_RUN(images.total() == 1 && channels.size() == 1 && images.channels(0) == 1 &&
1554 channels[0] == 0 && images.isUMatVector() && mask.empty() && !accumulate &&
1555 histSize.size() == 1 && histSize[0] == BINS && ranges.size() == 2 &&
1556 ranges[0] == 0 && ranges[1] == BINS,
1557 ocl_calcHist(images, hist))
1559 int i, dims = (int)histSize.size(), rsz = (int)ranges.size(), csz = (int)channels.size();
1560 int nimages = (int)images.total();
1562 CV_Assert(nimages > 0 && dims > 0);
1563 CV_Assert(rsz == dims*2 || (rsz == 0 && images.depth(0) == CV_8U));
1564 CV_Assert(csz == 0 || csz == dims);
1565 float* _ranges[CV_MAX_DIM];
1568 for( i = 0; i < rsz/2; i++ )
1569 _ranges[i] = (float*)&ranges[i*2];
1572 AutoBuffer<Mat> buf(nimages);
1573 for( i = 0; i < nimages; i++ )
1574 buf[i] = images.getMat(i);
1576 calcHist(&buf[0], nimages, csz ? &channels[0] : 0,
1577 mask, hist, dims, &histSize[0], rsz ? (const float**)_ranges : 0,
1582 /////////////////////////////////////// B A C K P R O J E C T ////////////////////////////////////
1587 template<typename T, typename BT> static void
1588 calcBackProj_( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas,
1589 Size imsize, const Mat& hist, int dims, const float** _ranges,
1590 const double* _uniranges, float scale, bool uniform )
1592 T** ptrs = (T**)&_ptrs[0];
1593 const int* deltas = &_deltas[0];
1594 const uchar* H = hist.ptr();
1596 BT* bproj = (BT*)_ptrs[dims];
1597 int bpstep = _deltas[dims*2 + 1];
1598 int size[CV_MAX_DIM];
1599 size_t hstep[CV_MAX_DIM];
1601 for( i = 0; i < dims; i++ )
1603 size[i] = hist.size[i];
1604 hstep[i] = hist.step[i];
1609 const double* uniranges = &_uniranges[0];
1613 double a = uniranges[0], b = uniranges[1];
1614 int sz = size[0], d0 = deltas[0], step0 = deltas[1];
1615 const T* p0 = (const T*)ptrs[0];
1617 for( ; imsize.height--; p0 += step0, bproj += bpstep )
1619 for( x = 0; x < imsize.width; x++, p0 += d0 )
1621 int idx = cvFloor(*p0*a + b);
1622 bproj[x] = (unsigned)idx < (unsigned)sz ? saturate_cast<BT>(((const float*)H)[idx]*scale) : 0;
1626 else if( dims == 2 )
1628 double a0 = uniranges[0], b0 = uniranges[1],
1629 a1 = uniranges[2], b1 = uniranges[3];
1630 int sz0 = size[0], sz1 = size[1];
1631 int d0 = deltas[0], step0 = deltas[1],
1632 d1 = deltas[2], step1 = deltas[3];
1633 size_t hstep0 = hstep[0];
1634 const T* p0 = (const T*)ptrs[0];
1635 const T* p1 = (const T*)ptrs[1];
1637 for( ; imsize.height--; p0 += step0, p1 += step1, bproj += bpstep )
1639 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1 )
1641 int idx0 = cvFloor(*p0*a0 + b0);
1642 int idx1 = cvFloor(*p1*a1 + b1);
1643 bproj[x] = (unsigned)idx0 < (unsigned)sz0 &&
1644 (unsigned)idx1 < (unsigned)sz1 ?
1645 saturate_cast<BT>(((const float*)(H + hstep0*idx0))[idx1]*scale) : 0;
1649 else if( dims == 3 )
1651 double a0 = uniranges[0], b0 = uniranges[1],
1652 a1 = uniranges[2], b1 = uniranges[3],
1653 a2 = uniranges[4], b2 = uniranges[5];
1654 int sz0 = size[0], sz1 = size[1], sz2 = size[2];
1655 int d0 = deltas[0], step0 = deltas[1],
1656 d1 = deltas[2], step1 = deltas[3],
1657 d2 = deltas[4], step2 = deltas[5];
1658 size_t hstep0 = hstep[0], hstep1 = hstep[1];
1659 const T* p0 = (const T*)ptrs[0];
1660 const T* p1 = (const T*)ptrs[1];
1661 const T* p2 = (const T*)ptrs[2];
1663 for( ; imsize.height--; p0 += step0, p1 += step1, p2 += step2, bproj += bpstep )
1665 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1, p2 += d2 )
1667 int idx0 = cvFloor(*p0*a0 + b0);
1668 int idx1 = cvFloor(*p1*a1 + b1);
1669 int idx2 = cvFloor(*p2*a2 + b2);
1670 bproj[x] = (unsigned)idx0 < (unsigned)sz0 &&
1671 (unsigned)idx1 < (unsigned)sz1 &&
1672 (unsigned)idx2 < (unsigned)sz2 ?
1673 saturate_cast<BT>(((const float*)(H + hstep0*idx0 + hstep1*idx1))[idx2]*scale) : 0;
1679 for( ; imsize.height--; bproj += bpstep )
1681 for( x = 0; x < imsize.width; x++ )
1683 const uchar* Hptr = H;
1684 for( i = 0; i < dims; i++ )
1686 int idx = cvFloor(*ptrs[i]*uniranges[i*2] + uniranges[i*2+1]);
1687 if( (unsigned)idx >= (unsigned)size[i] || (_ranges && *ptrs[i] >= _ranges[i][1]))
1689 ptrs[i] += deltas[i*2];
1690 Hptr += idx*hstep[i];
1694 bproj[x] = saturate_cast<BT>(*(const float*)Hptr*scale);
1698 for( ; i < dims; i++ )
1699 ptrs[i] += deltas[i*2];
1702 for( i = 0; i < dims; i++ )
1703 ptrs[i] += deltas[i*2 + 1];
1709 // non-uniform histogram
1710 const float* ranges[CV_MAX_DIM];
1711 for( i = 0; i < dims; i++ )
1712 ranges[i] = &_ranges[i][0];
1714 for( ; imsize.height--; bproj += bpstep )
1716 for( x = 0; x < imsize.width; x++ )
1718 const uchar* Hptr = H;
1719 for( i = 0; i < dims; i++ )
1721 float v = (float)*ptrs[i];
1722 const float* R = ranges[i];
1723 int idx = -1, sz = size[i];
1725 while( v >= R[idx+1] && ++idx < sz )
1728 if( (unsigned)idx >= (unsigned)sz )
1731 ptrs[i] += deltas[i*2];
1732 Hptr += idx*hstep[i];
1736 bproj[x] = saturate_cast<BT>(*(const float*)Hptr*scale);
1740 for( ; i < dims; i++ )
1741 ptrs[i] += deltas[i*2];
1745 for( i = 0; i < dims; i++ )
1746 ptrs[i] += deltas[i*2 + 1];
1753 calcBackProj_8u( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas,
1754 Size imsize, const Mat& hist, int dims, const float** _ranges,
1755 const double* _uniranges, float scale, bool uniform )
1757 uchar** ptrs = &_ptrs[0];
1758 const int* deltas = &_deltas[0];
1759 const uchar* H = hist.ptr();
1761 uchar* bproj = _ptrs[dims];
1762 int bpstep = _deltas[dims*2 + 1];
1763 std::vector<size_t> _tab;
1765 calcHistLookupTables_8u( hist, SparseMat(), dims, _ranges, _uniranges, uniform, false, _tab );
1766 const size_t* tab = &_tab[0];
1770 int d0 = deltas[0], step0 = deltas[1];
1771 uchar matH[256] = {0};
1772 const uchar* p0 = (const uchar*)ptrs[0];
1774 for( i = 0; i < 256; i++ )
1776 size_t hidx = tab[i];
1777 if( hidx < OUT_OF_RANGE )
1778 matH[i] = saturate_cast<uchar>(*(float*)(H + hidx)*scale);
1781 for( ; imsize.height--; p0 += step0, bproj += bpstep )
1785 for( x = 0; x <= imsize.width - 4; x += 4 )
1787 uchar t0 = matH[p0[x]], t1 = matH[p0[x+1]];
1788 bproj[x] = t0; bproj[x+1] = t1;
1789 t0 = matH[p0[x+2]]; t1 = matH[p0[x+3]];
1790 bproj[x+2] = t0; bproj[x+3] = t1;
1795 for( x = 0; x <= imsize.width - 4; x += 4 )
1797 uchar t0 = matH[p0[0]], t1 = matH[p0[d0]];
1798 bproj[x] = t0; bproj[x+1] = t1;
1800 t0 = matH[p0[0]]; t1 = matH[p0[d0]];
1801 bproj[x+2] = t0; bproj[x+3] = t1;
1805 for( ; x < imsize.width; x++, p0 += d0 )
1806 bproj[x] = matH[*p0];
1809 else if( dims == 2 )
1811 int d0 = deltas[0], step0 = deltas[1],
1812 d1 = deltas[2], step1 = deltas[3];
1813 const uchar* p0 = (const uchar*)ptrs[0];
1814 const uchar* p1 = (const uchar*)ptrs[1];
1816 for( ; imsize.height--; p0 += step0, p1 += step1, bproj += bpstep )
1818 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1 )
1820 size_t idx = tab[*p0] + tab[*p1 + 256];
1821 bproj[x] = idx < OUT_OF_RANGE ? saturate_cast<uchar>(*(const float*)(H + idx)*scale) : 0;
1825 else if( dims == 3 )
1827 int d0 = deltas[0], step0 = deltas[1],
1828 d1 = deltas[2], step1 = deltas[3],
1829 d2 = deltas[4], step2 = deltas[5];
1830 const uchar* p0 = (const uchar*)ptrs[0];
1831 const uchar* p1 = (const uchar*)ptrs[1];
1832 const uchar* p2 = (const uchar*)ptrs[2];
1834 for( ; imsize.height--; p0 += step0, p1 += step1, p2 += step2, bproj += bpstep )
1836 for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1, p2 += d2 )
1838 size_t idx = tab[*p0] + tab[*p1 + 256] + tab[*p2 + 512];
1839 bproj[x] = idx < OUT_OF_RANGE ? saturate_cast<uchar>(*(const float*)(H + idx)*scale) : 0;
1845 for( ; imsize.height--; bproj += bpstep )
1847 for( x = 0; x < imsize.width; x++ )
1849 const uchar* Hptr = H;
1850 for( i = 0; i < dims; i++ )
1852 size_t idx = tab[*ptrs[i] + i*256];
1853 if( idx >= OUT_OF_RANGE )
1855 ptrs[i] += deltas[i*2];
1860 bproj[x] = saturate_cast<uchar>(*(const float*)Hptr*scale);
1864 for( ; i < dims; i++ )
1865 ptrs[i] += deltas[i*2];
1868 for( i = 0; i < dims; i++ )
1869 ptrs[i] += deltas[i*2 + 1];
1876 void cv::calcBackProject( const Mat* images, int nimages, const int* channels,
1877 InputArray _hist, OutputArray _backProject,
1878 const float** ranges, double scale, bool uniform )
1880 Mat hist = _hist.getMat();
1881 std::vector<uchar*> ptrs;
1882 std::vector<int> deltas;
1883 std::vector<double> uniranges;
1885 int dims = hist.dims == 2 && hist.size[1] == 1 ? 1 : hist.dims;
1887 CV_Assert( dims > 0 && !hist.empty() );
1888 _backProject.create( images[0].size(), images[0].depth() );
1889 Mat backProject = _backProject.getMat();
1890 histPrepareImages( images, nimages, channels, backProject, dims, hist.size, ranges,
1891 uniform, ptrs, deltas, imsize, uniranges );
1892 const double* _uniranges = uniform ? &uniranges[0] : 0;
1894 int depth = images[0].depth();
1895 if( depth == CV_8U )
1896 calcBackProj_8u(ptrs, deltas, imsize, hist, dims, ranges, _uniranges, (float)scale, uniform);
1897 else if( depth == CV_16U )
1898 calcBackProj_<ushort, ushort>(ptrs, deltas, imsize, hist, dims, ranges, _uniranges, (float)scale, uniform );
1899 else if( depth == CV_32F )
1900 calcBackProj_<float, float>(ptrs, deltas, imsize, hist, dims, ranges, _uniranges, (float)scale, uniform );
1902 CV_Error(CV_StsUnsupportedFormat, "");
1909 template<typename T, typename BT> static void
1910 calcSparseBackProj_( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas,
1911 Size imsize, const SparseMat& hist, int dims, const float** _ranges,
1912 const double* _uniranges, float scale, bool uniform )
1914 T** ptrs = (T**)&_ptrs[0];
1915 const int* deltas = &_deltas[0];
1917 BT* bproj = (BT*)_ptrs[dims];
1918 int bpstep = _deltas[dims*2 + 1];
1919 const int* size = hist.hdr->size;
1920 int idx[CV_MAX_DIM];
1921 const SparseMat_<float>& hist_ = (const SparseMat_<float>&)hist;
1925 const double* uniranges = &_uniranges[0];
1926 for( ; imsize.height--; bproj += bpstep )
1928 for( x = 0; x < imsize.width; x++ )
1930 for( i = 0; i < dims; i++ )
1932 idx[i] = cvFloor(*ptrs[i]*uniranges[i*2] + uniranges[i*2+1]);
1933 if( (unsigned)idx[i] >= (unsigned)size[i] )
1935 ptrs[i] += deltas[i*2];
1939 bproj[x] = saturate_cast<BT>(hist_(idx)*scale);
1943 for( ; i < dims; i++ )
1944 ptrs[i] += deltas[i*2];
1947 for( i = 0; i < dims; i++ )
1948 ptrs[i] += deltas[i*2 + 1];
1953 // non-uniform histogram
1954 const float* ranges[CV_MAX_DIM];
1955 for( i = 0; i < dims; i++ )
1956 ranges[i] = &_ranges[i][0];
1958 for( ; imsize.height--; bproj += bpstep )
1960 for( x = 0; x < imsize.width; x++ )
1962 for( i = 0; i < dims; i++ )
1964 float v = (float)*ptrs[i];
1965 const float* R = ranges[i];
1966 int j = -1, sz = size[i];
1968 while( v >= R[j+1] && ++j < sz )
1971 if( (unsigned)j >= (unsigned)sz )
1974 ptrs[i] += deltas[i*2];
1978 bproj[x] = saturate_cast<BT>(hist_(idx)*scale);
1982 for( ; i < dims; i++ )
1983 ptrs[i] += deltas[i*2];
1987 for( i = 0; i < dims; i++ )
1988 ptrs[i] += deltas[i*2 + 1];
1995 calcSparseBackProj_8u( std::vector<uchar*>& _ptrs, const std::vector<int>& _deltas,
1996 Size imsize, const SparseMat& hist, int dims, const float** _ranges,
1997 const double* _uniranges, float scale, bool uniform )
1999 uchar** ptrs = &_ptrs[0];
2000 const int* deltas = &_deltas[0];
2002 uchar* bproj = _ptrs[dims];
2003 int bpstep = _deltas[dims*2 + 1];
2004 std::vector<size_t> _tab;
2005 int idx[CV_MAX_DIM];
2007 calcHistLookupTables_8u( Mat(), hist, dims, _ranges, _uniranges, uniform, true, _tab );
2008 const size_t* tab = &_tab[0];
2010 for( ; imsize.height--; bproj += bpstep )
2012 for( x = 0; x < imsize.width; x++ )
2014 for( i = 0; i < dims; i++ )
2016 size_t hidx = tab[*ptrs[i] + i*256];
2017 if( hidx >= OUT_OF_RANGE )
2020 ptrs[i] += deltas[i*2];
2024 bproj[x] = saturate_cast<uchar>(hist.value<float>(idx)*scale);
2028 for( ; i < dims; i++ )
2029 ptrs[i] += deltas[i*2];
2032 for( i = 0; i < dims; i++ )
2033 ptrs[i] += deltas[i*2 + 1];
2039 void cv::calcBackProject( const Mat* images, int nimages, const int* channels,
2040 const SparseMat& hist, OutputArray _backProject,
2041 const float** ranges, double scale, bool uniform )
2043 std::vector<uchar*> ptrs;
2044 std::vector<int> deltas;
2045 std::vector<double> uniranges;
2047 int dims = hist.dims();
2049 CV_Assert( dims > 0 );
2050 _backProject.create( images[0].size(), images[0].depth() );
2051 Mat backProject = _backProject.getMat();
2052 histPrepareImages( images, nimages, channels, backProject,
2053 dims, hist.hdr->size, ranges,
2054 uniform, ptrs, deltas, imsize, uniranges );
2055 const double* _uniranges = uniform ? &uniranges[0] : 0;
2057 int depth = images[0].depth();
2058 if( depth == CV_8U )
2059 calcSparseBackProj_8u(ptrs, deltas, imsize, hist, dims, ranges,
2060 _uniranges, (float)scale, uniform);
2061 else if( depth == CV_16U )
2062 calcSparseBackProj_<ushort, ushort>(ptrs, deltas, imsize, hist, dims, ranges,
2063 _uniranges, (float)scale, uniform );
2064 else if( depth == CV_32F )
2065 calcSparseBackProj_<float, float>(ptrs, deltas, imsize, hist, dims, ranges,
2066 _uniranges, (float)scale, uniform );
2068 CV_Error(CV_StsUnsupportedFormat, "");
2075 static void getUMatIndex(const std::vector<UMat> & um, int cn, int & idx, int & cnidx)
2077 int totalChannels = 0;
2078 for (size_t i = 0, size = um.size(); i < size; ++i)
2080 int ccn = um[i].channels();
2081 totalChannels += ccn;
2083 if (totalChannels == cn)
2089 else if (totalChannels > cn)
2092 cnidx = i == 0 ? cn : (cn - totalChannels + ccn);
2100 static bool ocl_calcBackProject( InputArrayOfArrays _images, std::vector<int> channels,
2101 InputArray _hist, OutputArray _dst,
2102 const std::vector<float>& ranges,
2103 float scale, size_t histdims )
2105 std::vector<UMat> images;
2106 _images.getUMatVector(images);
2108 size_t nimages = images.size(), totalcn = images[0].channels();
2110 CV_Assert(nimages > 0);
2111 Size size = images[0].size();
2112 int depth = images[0].depth();
2114 //kernels are valid for this type only
2118 for (size_t i = 1; i < nimages; ++i)
2120 const UMat & m = images[i];
2121 totalcn += m.channels();
2122 CV_Assert(size == m.size() && depth == m.depth());
2125 std::sort(channels.begin(), channels.end());
2126 for (size_t i = 0; i < histdims; ++i)
2127 CV_Assert(channels[i] < (int)totalcn);
2132 getUMatIndex(images, channels[0], idx, cnidx);
2133 CV_Assert(idx >= 0);
2134 UMat im = images[idx];
2136 String opts = format("-D histdims=1 -D scn=%d", im.channels());
2137 ocl::Kernel lutk("calcLUT", ocl::imgproc::calc_back_project_oclsrc, opts);
2142 UMat lut(1, (int)lsize, CV_32SC1), hist = _hist.getUMat(), uranges(ranges, true);
2144 lutk.args(ocl::KernelArg::ReadOnlyNoSize(hist), hist.rows,
2145 ocl::KernelArg::PtrWriteOnly(lut), scale, ocl::KernelArg::PtrReadOnly(uranges));
2146 if (!lutk.run(1, &lsize, NULL, false))
2149 ocl::Kernel mapk("LUT", ocl::imgproc::calc_back_project_oclsrc, opts);
2153 _dst.create(size, depth);
2154 UMat dst = _dst.getUMat();
2157 mapk.args(ocl::KernelArg::ReadOnlyNoSize(im), ocl::KernelArg::PtrReadOnly(lut),
2158 ocl::KernelArg::WriteOnly(dst));
2160 size_t globalsize[2] = { size.width, size.height };
2161 return mapk.run(2, globalsize, NULL, false);
2163 else if (histdims == 2)
2165 int idx0, idx1, cnidx0, cnidx1;
2166 getUMatIndex(images, channels[0], idx0, cnidx0);
2167 getUMatIndex(images, channels[1], idx1, cnidx1);
2168 CV_Assert(idx0 >= 0 && idx1 >= 0);
2169 UMat im0 = images[idx0], im1 = images[idx1];
2171 // Lut for the first dimension
2172 String opts = format("-D histdims=2 -D scn1=%d -D scn2=%d", im0.channels(), im1.channels());
2173 ocl::Kernel lutk1("calcLUT", ocl::imgproc::calc_back_project_oclsrc, opts);
2178 UMat lut(1, (int)lsize<<1, CV_32SC1), uranges(ranges, true), hist = _hist.getUMat();
2180 lutk1.args(hist.rows, ocl::KernelArg::PtrWriteOnly(lut), (int)0, ocl::KernelArg::PtrReadOnly(uranges), (int)0);
2181 if (!lutk1.run(1, &lsize, NULL, false))
2184 // lut for the second dimension
2185 ocl::Kernel lutk2("calcLUT", ocl::imgproc::calc_back_project_oclsrc, opts);
2189 lut.offset += lsize * sizeof(int);
2190 lutk2.args(hist.cols, ocl::KernelArg::PtrWriteOnly(lut), (int)256, ocl::KernelArg::PtrReadOnly(uranges), (int)2);
2191 if (!lutk2.run(1, &lsize, NULL, false))
2195 ocl::Kernel mapk("LUT", ocl::imgproc::calc_back_project_oclsrc, opts);
2199 _dst.create(size, depth);
2200 UMat dst = _dst.getUMat();
2202 im0.offset += cnidx0;
2203 im1.offset += cnidx1;
2204 mapk.args(ocl::KernelArg::ReadOnlyNoSize(im0), ocl::KernelArg::ReadOnlyNoSize(im1),
2205 ocl::KernelArg::ReadOnlyNoSize(hist), ocl::KernelArg::PtrReadOnly(lut), scale, ocl::KernelArg::WriteOnly(dst));
2207 size_t globalsize[2] = { size.width, size.height };
2208 return mapk.run(2, globalsize, NULL, false);
2217 void cv::calcBackProject( InputArrayOfArrays images, const std::vector<int>& channels,
2218 InputArray hist, OutputArray dst,
2219 const std::vector<float>& ranges,
2223 Size histSize = hist.size();
2224 bool _1D = histSize.height == 1 || histSize.width == 1;
2225 size_t histdims = _1D ? 1 : hist.dims();
2228 CV_OCL_RUN(dst.isUMat() && hist.type() == CV_32FC1 &&
2229 histdims <= 2 && ranges.size() == histdims * 2 && histdims == channels.size(),
2230 ocl_calcBackProject(images, channels, hist, dst, ranges, (float)scale, histdims))
2232 Mat H0 = hist.getMat(), H;
2233 int hcn = H0.channels();
2237 CV_Assert( H0.isContinuous() );
2238 int hsz[CV_CN_MAX+1];
2239 memcpy(hsz, &H0.size[0], H0.dims*sizeof(hsz[0]));
2241 H = Mat(H0.dims+1, hsz, H0.depth(), H0.ptr());
2246 bool _1d = H.rows == 1 || H.cols == 1;
2247 int i, dims = H.dims, rsz = (int)ranges.size(), csz = (int)channels.size();
2248 int nimages = (int)images.total();
2250 CV_Assert(nimages > 0);
2251 CV_Assert(rsz == dims*2 || (rsz == 2 && _1d) || (rsz == 0 && images.depth(0) == CV_8U));
2252 CV_Assert(csz == 0 || csz == dims || (csz == 1 && _1d));
2254 float* _ranges[CV_MAX_DIM];
2257 for( i = 0; i < rsz/2; i++ )
2258 _ranges[i] = (float*)&ranges[i*2];
2261 AutoBuffer<Mat> buf(nimages);
2262 for( i = 0; i < nimages; i++ )
2263 buf[i] = images.getMat(i);
2265 calcBackProject(&buf[0], nimages, csz ? &channels[0] : 0,
2266 hist, dst, rsz ? (const float**)_ranges : 0, scale, true);
2270 ////////////////// C O M P A R E H I S T O G R A M S ////////////////////////
2272 double cv::compareHist( InputArray _H1, InputArray _H2, int method )
2274 Mat H1 = _H1.getMat(), H2 = _H2.getMat();
2275 const Mat* arrays[] = {&H1, &H2, 0};
2277 NAryMatIterator it(arrays, planes);
2279 int j, len = (int)it.size;
2281 CV_Assert( H1.type() == H2.type() && H1.depth() == CV_32F );
2283 double s1 = 0, s2 = 0, s11 = 0, s12 = 0, s22 = 0;
2285 CV_Assert( it.planes[0].isContinuous() && it.planes[1].isContinuous() );
2287 for( size_t i = 0; i < it.nplanes; i++, ++it )
2289 const float* h1 = it.planes[0].ptr<float>();
2290 const float* h2 = it.planes[1].ptr<float>();
2291 len = it.planes[0].rows*it.planes[0].cols*H1.channels();
2293 if( (method == CV_COMP_CHISQR) || (method == CV_COMP_CHISQR_ALT))
2295 for( j = 0; j < len; j++ )
2297 double a = h1[j] - h2[j];
2298 double b = (method == CV_COMP_CHISQR) ? h1[j] : h1[j] + h2[j];
2299 if( fabs(b) > DBL_EPSILON )
2303 else if( method == CV_COMP_CORREL )
2305 for( j = 0; j < len; j++ )
2317 else if( method == CV_COMP_INTERSECT )
2321 float32x4_t v_result = vdupq_n_f32(0.0f);
2322 for( ; j <= len - 4; j += 4 )
2323 v_result = vaddq_f32(v_result, vminq_f32(vld1q_f32(h1 + j), vld1q_f32(h2 + j)));
2324 float CV_DECL_ALIGNED(16) ar[4];
2325 vst1q_f32(ar, v_result);
2326 result += ar[0] + ar[1] + ar[2] + ar[3];
2328 for( ; j < len; j++ )
2329 result += std::min(h1[j], h2[j]);
2331 else if( method == CV_COMP_BHATTACHARYYA )
2333 for( j = 0; j < len; j++ )
2337 result += std::sqrt(a*b);
2342 else if( method == CV_COMP_KL_DIV )
2344 for( j = 0; j < len; j++ )
2348 if( fabs(p) <= DBL_EPSILON ) {
2351 if( fabs(q) <= DBL_EPSILON ) {
2354 result += p * std::log( p / q );
2358 CV_Error( CV_StsBadArg, "Unknown comparison method" );
2361 if( method == CV_COMP_CHISQR_ALT )
2363 else if( method == CV_COMP_CORREL )
2365 size_t total = H1.total();
2366 double scale = 1./total;
2367 double num = s12 - s1*s2*scale;
2368 double denom2 = (s11 - s1*s1*scale)*(s22 - s2*s2*scale);
2369 result = std::abs(denom2) > DBL_EPSILON ? num/std::sqrt(denom2) : 1.;
2371 else if( method == CV_COMP_BHATTACHARYYA )
2374 s1 = fabs(s1) > FLT_EPSILON ? 1./std::sqrt(s1) : 1.;
2375 result = std::sqrt(std::max(1. - result*s1, 0.));
2382 double cv::compareHist( const SparseMat& H1, const SparseMat& H2, int method )
2385 int i, dims = H1.dims();
2387 CV_Assert( dims > 0 && dims == H2.dims() && H1.type() == H2.type() && H1.type() == CV_32F );
2388 for( i = 0; i < dims; i++ )
2389 CV_Assert( H1.size(i) == H2.size(i) );
2391 const SparseMat *PH1 = &H1, *PH2 = &H2;
2392 if( PH1->nzcount() > PH2->nzcount() && method != CV_COMP_CHISQR && method != CV_COMP_CHISQR_ALT && method != CV_COMP_KL_DIV )
2393 std::swap(PH1, PH2);
2395 SparseMatConstIterator it = PH1->begin();
2396 int N1 = (int)PH1->nzcount(), N2 = (int)PH2->nzcount();
2398 if( (method == CV_COMP_CHISQR) || (method == CV_COMP_CHISQR_ALT) )
2400 for( i = 0; i < N1; i++, ++it )
2402 float v1 = it.value<float>();
2403 const SparseMat::Node* node = it.node();
2404 float v2 = PH2->value<float>(node->idx, (size_t*)&node->hashval);
2406 double b = (method == CV_COMP_CHISQR) ? v1 : v1 + v2;
2407 if( fabs(b) > DBL_EPSILON )
2411 else if( method == CV_COMP_CORREL )
2413 double s1 = 0, s2 = 0, s11 = 0, s12 = 0, s22 = 0;
2415 for( i = 0; i < N1; i++, ++it )
2417 double v1 = it.value<float>();
2418 const SparseMat::Node* node = it.node();
2419 s12 += v1*PH2->value<float>(node->idx, (size_t*)&node->hashval);
2425 for( i = 0; i < N2; i++, ++it )
2427 double v2 = it.value<float>();
2433 for( i = 0; i < H1.dims(); i++ )
2434 total *= H1.size(i);
2435 double scale = 1./total;
2436 double num = s12 - s1*s2*scale;
2437 double denom2 = (s11 - s1*s1*scale)*(s22 - s2*s2*scale);
2438 result = std::abs(denom2) > DBL_EPSILON ? num/std::sqrt(denom2) : 1.;
2440 else if( method == CV_COMP_INTERSECT )
2442 for( i = 0; i < N1; i++, ++it )
2444 float v1 = it.value<float>();
2445 const SparseMat::Node* node = it.node();
2446 float v2 = PH2->value<float>(node->idx, (size_t*)&node->hashval);
2448 result += std::min(v1, v2);
2451 else if( method == CV_COMP_BHATTACHARYYA )
2453 double s1 = 0, s2 = 0;
2455 for( i = 0; i < N1; i++, ++it )
2457 double v1 = it.value<float>();
2458 const SparseMat::Node* node = it.node();
2459 double v2 = PH2->value<float>(node->idx, (size_t*)&node->hashval);
2460 result += std::sqrt(v1*v2);
2465 for( i = 0; i < N2; i++, ++it )
2466 s2 += it.value<float>();
2469 s1 = fabs(s1) > FLT_EPSILON ? 1./std::sqrt(s1) : 1.;
2470 result = std::sqrt(std::max(1. - result*s1, 0.));
2472 else if( method == CV_COMP_KL_DIV )
2474 for( i = 0; i < N1; i++, ++it )
2476 double v1 = it.value<float>();
2477 const SparseMat::Node* node = it.node();
2478 double v2 = PH2->value<float>(node->idx, (size_t*)&node->hashval);
2481 result += v1 * std::log( v1 / v2 );
2485 CV_Error( CV_StsBadArg, "Unknown comparison method" );
2487 if( method == CV_COMP_CHISQR_ALT )
2494 const int CV_HIST_DEFAULT_TYPE = CV_32F;
2496 /* Creates new histogram */
2498 cvCreateHist( int dims, int *sizes, CvHistType type, float** ranges, int uniform )
2500 CvHistogram *hist = 0;
2502 if( (unsigned)dims > CV_MAX_DIM )
2503 CV_Error( CV_BadOrder, "Number of dimensions is out of range" );
2506 CV_Error( CV_HeaderIsNull, "Null <sizes> pointer" );
2508 hist = (CvHistogram *)cvAlloc( sizeof( CvHistogram ));
2509 hist->type = CV_HIST_MAGIC_VAL + ((int)type & 1);
2510 if (uniform) hist->type|= CV_HIST_UNIFORM_FLAG;
2513 if( type == CV_HIST_ARRAY )
2515 hist->bins = cvInitMatNDHeader( &hist->mat, dims, sizes,
2516 CV_HIST_DEFAULT_TYPE );
2517 cvCreateData( hist->bins );
2519 else if( type == CV_HIST_SPARSE )
2520 hist->bins = cvCreateSparseMat( dims, sizes, CV_HIST_DEFAULT_TYPE );
2522 CV_Error( CV_StsBadArg, "Invalid histogram type" );
2525 cvSetHistBinRanges( hist, ranges, uniform );
2531 /* Creates histogram wrapping header for given array */
2532 CV_IMPL CvHistogram*
2533 cvMakeHistHeaderForArray( int dims, int *sizes, CvHistogram *hist,
2534 float *data, float **ranges, int uniform )
2537 CV_Error( CV_StsNullPtr, "Null histogram header pointer" );
2540 CV_Error( CV_StsNullPtr, "Null data pointer" );
2543 hist->type = CV_HIST_MAGIC_VAL;
2544 hist->bins = cvInitMatNDHeader( &hist->mat, dims, sizes, CV_HIST_DEFAULT_TYPE, data );
2549 CV_Error( CV_StsBadArg, "Only uniform bin ranges can be used here "
2550 "(to avoid memory allocation)" );
2551 cvSetHistBinRanges( hist, ranges, uniform );
2559 cvReleaseHist( CvHistogram **hist )
2562 CV_Error( CV_StsNullPtr, "" );
2566 CvHistogram* temp = *hist;
2568 if( !CV_IS_HIST(temp))
2569 CV_Error( CV_StsBadArg, "Invalid histogram header" );
2572 if( CV_IS_SPARSE_HIST( temp ))
2573 cvReleaseSparseMat( (CvSparseMat**)&temp->bins );
2576 cvReleaseData( temp->bins );
2581 cvFree( &temp->thresh2 );
2587 cvClearHist( CvHistogram *hist )
2589 if( !CV_IS_HIST(hist) )
2590 CV_Error( CV_StsBadArg, "Invalid histogram header" );
2591 cvZero( hist->bins );
2595 // Clears histogram bins that are below than threshold
2597 cvThreshHist( CvHistogram* hist, double thresh )
2599 if( !CV_IS_HIST(hist) )
2600 CV_Error( CV_StsBadArg, "Invalid histogram header" );
2602 if( !CV_IS_SPARSE_MAT(hist->bins) )
2605 cvGetMat( hist->bins, &mat, 0, 1 );
2606 cvThreshold( &mat, &mat, thresh, 0, CV_THRESH_TOZERO );
2610 CvSparseMat* mat = (CvSparseMat*)hist->bins;
2611 CvSparseMatIterator iterator;
2614 for( node = cvInitSparseMatIterator( mat, &iterator );
2615 node != 0; node = cvGetNextSparseNode( &iterator ))
2617 float* val = (float*)CV_NODE_VAL( mat, node );
2618 if( *val <= thresh )
2625 // Normalizes histogram (make sum of the histogram bins == factor)
2627 cvNormalizeHist( CvHistogram* hist, double factor )
2631 if( !CV_IS_HIST(hist) )
2632 CV_Error( CV_StsBadArg, "Invalid histogram header" );
2634 if( !CV_IS_SPARSE_HIST(hist) )
2637 cvGetMat( hist->bins, &mat, 0, 1 );
2638 sum = cvSum( &mat ).val[0];
2639 if( fabs(sum) < DBL_EPSILON )
2641 cvScale( &mat, &mat, factor/sum, 0 );
2645 CvSparseMat* mat = (CvSparseMat*)hist->bins;
2646 CvSparseMatIterator iterator;
2650 for( node = cvInitSparseMatIterator( mat, &iterator );
2651 node != 0; node = cvGetNextSparseNode( &iterator ))
2653 sum += *(float*)CV_NODE_VAL(mat,node);
2656 if( fabs(sum) < DBL_EPSILON )
2658 scale = (float)(factor/sum);
2660 for( node = cvInitSparseMatIterator( mat, &iterator );
2661 node != 0; node = cvGetNextSparseNode( &iterator ))
2663 *(float*)CV_NODE_VAL(mat,node) *= scale;
2669 // Retrieves histogram global min, max and their positions
2671 cvGetMinMaxHistValue( const CvHistogram* hist,
2672 float *value_min, float* value_max,
2673 int* idx_min, int* idx_max )
2675 double minVal, maxVal;
2676 int dims, size[CV_MAX_DIM];
2678 if( !CV_IS_HIST(hist) )
2679 CV_Error( CV_StsBadArg, "Invalid histogram header" );
2681 dims = cvGetDims( hist->bins, size );
2683 if( !CV_IS_SPARSE_HIST(hist) )
2686 CvPoint minPt, maxPt;
2688 cvGetMat( hist->bins, &mat, 0, 1 );
2689 cvMinMaxLoc( &mat, &minVal, &maxVal, &minPt, &maxPt );
2694 *idx_min = minPt.y + minPt.x;
2696 *idx_max = maxPt.y + maxPt.x;
2698 else if( dims == 2 )
2701 idx_min[0] = minPt.y, idx_min[1] = minPt.x;
2703 idx_max[0] = maxPt.y, idx_max[1] = maxPt.x;
2705 else if( idx_min || idx_max )
2707 int imin = minPt.y*mat.cols + minPt.x;
2708 int imax = maxPt.y*mat.cols + maxPt.x;
2710 for(int i = dims - 1; i >= 0; i-- )
2714 int t = imin / size[i];
2715 idx_min[i] = imin - t*size[i];
2721 int t = imax / size[i];
2722 idx_max[i] = imax - t*size[i];
2730 CvSparseMat* mat = (CvSparseMat*)hist->bins;
2731 CvSparseMatIterator iterator;
2735 CvSparseNode* minNode = 0;
2736 CvSparseNode* maxNode = 0;
2737 const int *_idx_min = 0, *_idx_max = 0;
2740 for( node = cvInitSparseMatIterator( mat, &iterator );
2741 node != 0; node = cvGetNextSparseNode( &iterator ))
2743 int value = *(int*)CV_NODE_VAL(mat,node);
2744 value = CV_TOGGLE_FLT(value);
2760 _idx_min = CV_NODE_IDX(mat,minNode);
2761 _idx_max = CV_NODE_IDX(mat,maxNode);
2762 m.i = CV_TOGGLE_FLT(minv); minVal = m.f;
2763 m.i = CV_TOGGLE_FLT(maxv); maxVal = m.f;
2767 minVal = maxVal = 0;
2770 for(int i = 0; i < dims; i++ )
2773 idx_min[i] = _idx_min ? _idx_min[i] : -1;
2775 idx_max[i] = _idx_max ? _idx_max[i] : -1;
2780 *value_min = (float)minVal;
2783 *value_max = (float)maxVal;
2787 // Compares two histograms using one of a few methods
2789 cvCompareHist( const CvHistogram* hist1,
2790 const CvHistogram* hist2,
2794 int size1[CV_MAX_DIM], size2[CV_MAX_DIM], total = 1;
2796 if( !CV_IS_HIST(hist1) || !CV_IS_HIST(hist2) )
2797 CV_Error( CV_StsBadArg, "Invalid histogram header[s]" );
2799 if( CV_IS_SPARSE_MAT(hist1->bins) != CV_IS_SPARSE_MAT(hist2->bins))
2800 CV_Error(CV_StsUnmatchedFormats, "One of histograms is sparse and other is not");
2802 if( !CV_IS_SPARSE_MAT(hist1->bins) )
2804 cv::Mat H1 = cv::cvarrToMat(hist1->bins);
2805 cv::Mat H2 = cv::cvarrToMat(hist2->bins);
2806 return cv::compareHist(H1, H2, method);
2809 int dims1 = cvGetDims( hist1->bins, size1 );
2810 int dims2 = cvGetDims( hist2->bins, size2 );
2812 if( dims1 != dims2 )
2813 CV_Error( CV_StsUnmatchedSizes,
2814 "The histograms have different numbers of dimensions" );
2816 for( i = 0; i < dims1; i++ )
2818 if( size1[i] != size2[i] )
2819 CV_Error( CV_StsUnmatchedSizes, "The histograms have different sizes" );
2824 CvSparseMat* mat1 = (CvSparseMat*)(hist1->bins);
2825 CvSparseMat* mat2 = (CvSparseMat*)(hist2->bins);
2826 CvSparseMatIterator iterator;
2827 CvSparseNode *node1, *node2;
2829 if( mat1->heap->active_count > mat2->heap->active_count && method != CV_COMP_CHISQR && method != CV_COMP_CHISQR_ALT && method != CV_COMP_KL_DIV )
2832 CV_SWAP( mat1, mat2, t );
2835 if( (method == CV_COMP_CHISQR) || (method == CV_COMP_CHISQR_ALT) )
2837 for( node1 = cvInitSparseMatIterator( mat1, &iterator );
2838 node1 != 0; node1 = cvGetNextSparseNode( &iterator ))
2840 double v1 = *(float*)CV_NODE_VAL(mat1,node1);
2841 uchar* node2_data = cvPtrND( mat2, CV_NODE_IDX(mat1,node1), 0, 0, &node1->hashval );
2842 double v2 = node2_data ? *(float*)node2_data : 0.f;
2844 double b = (method == CV_COMP_CHISQR) ? v1 : v1 + v2;
2845 if( fabs(b) > DBL_EPSILON )
2849 else if( method == CV_COMP_CORREL )
2851 double s1 = 0, s11 = 0;
2852 double s2 = 0, s22 = 0;
2854 double num, denom2, scale = 1./total;
2856 for( node1 = cvInitSparseMatIterator( mat1, &iterator );
2857 node1 != 0; node1 = cvGetNextSparseNode( &iterator ))
2859 double v1 = *(float*)CV_NODE_VAL(mat1,node1);
2860 uchar* node2_data = cvPtrND( mat2, CV_NODE_IDX(mat1,node1),
2861 0, 0, &node1->hashval );
2864 double v2 = *(float*)node2_data;
2871 for( node2 = cvInitSparseMatIterator( mat2, &iterator );
2872 node2 != 0; node2 = cvGetNextSparseNode( &iterator ))
2874 double v2 = *(float*)CV_NODE_VAL(mat2,node2);
2879 num = s12 - s1*s2*scale;
2880 denom2 = (s11 - s1*s1*scale)*(s22 - s2*s2*scale);
2881 result = fabs(denom2) > DBL_EPSILON ? num/sqrt(denom2) : 1;
2883 else if( method == CV_COMP_INTERSECT )
2885 for( node1 = cvInitSparseMatIterator( mat1, &iterator );
2886 node1 != 0; node1 = cvGetNextSparseNode( &iterator ))
2888 float v1 = *(float*)CV_NODE_VAL(mat1,node1);
2889 uchar* node2_data = cvPtrND( mat2, CV_NODE_IDX(mat1,node1),
2890 0, 0, &node1->hashval );
2893 float v2 = *(float*)node2_data;
2901 else if( method == CV_COMP_BHATTACHARYYA )
2903 double s1 = 0, s2 = 0;
2905 for( node1 = cvInitSparseMatIterator( mat1, &iterator );
2906 node1 != 0; node1 = cvGetNextSparseNode( &iterator ))
2908 double v1 = *(float*)CV_NODE_VAL(mat1,node1);
2909 uchar* node2_data = cvPtrND( mat2, CV_NODE_IDX(mat1,node1),
2910 0, 0, &node1->hashval );
2914 double v2 = *(float*)node2_data;
2915 result += sqrt(v1 * v2);
2919 for( node1 = cvInitSparseMatIterator( mat2, &iterator );
2920 node1 != 0; node1 = cvGetNextSparseNode( &iterator ))
2922 double v2 = *(float*)CV_NODE_VAL(mat2,node1);
2927 s1 = fabs(s1) > FLT_EPSILON ? 1./sqrt(s1) : 1.;
2928 result = 1. - result*s1;
2929 result = sqrt(MAX(result,0.));
2931 else if( method == CV_COMP_KL_DIV )
2933 cv::SparseMat sH1, sH2;
2934 ((const CvSparseMat*)hist1->bins)->copyToSparseMat(sH1);
2935 ((const CvSparseMat*)hist2->bins)->copyToSparseMat(sH2);
2936 result = cv::compareHist( sH1, sH2, CV_COMP_KL_DIV );
2939 CV_Error( CV_StsBadArg, "Unknown comparison method" );
2941 if( method == CV_COMP_CHISQR_ALT )
2947 // copies one histogram to another
2949 cvCopyHist( const CvHistogram* src, CvHistogram** _dst )
2952 CV_Error( CV_StsNullPtr, "Destination double pointer is NULL" );
2954 CvHistogram* dst = *_dst;
2956 if( !CV_IS_HIST(src) || (dst && !CV_IS_HIST(dst)) )
2957 CV_Error( CV_StsBadArg, "Invalid histogram header[s]" );
2960 int size1[CV_MAX_DIM];
2961 bool is_sparse = CV_IS_SPARSE_MAT(src->bins);
2962 int dims1 = cvGetDims( src->bins, size1 );
2964 if( dst && (is_sparse == CV_IS_SPARSE_MAT(dst->bins)))
2966 int size2[CV_MAX_DIM];
2967 int dims2 = cvGetDims( dst->bins, size2 );
2969 if( dims1 == dims2 )
2973 for( i = 0; i < dims1; i++ )
2975 if( size1[i] != size2[i] )
2985 cvReleaseHist( _dst );
2986 dst = cvCreateHist( dims1, size1, !is_sparse ? CV_HIST_ARRAY : CV_HIST_SPARSE, 0, 0 );
2990 if( CV_HIST_HAS_RANGES( src ))
2992 float* ranges[CV_MAX_DIM];
2995 if( CV_IS_UNIFORM_HIST( src ))
2997 for( int i = 0; i < dims1; i++ )
2998 ranges[i] = (float*)src->thresh[i];
3004 thresh = src->thresh2;
3007 cvSetHistBinRanges( dst, thresh, CV_IS_UNIFORM_HIST(src));
3010 cvCopy( src->bins, dst->bins );
3014 // Sets a value range for every histogram bin
3016 cvSetHistBinRanges( CvHistogram* hist, float** ranges, int uniform )
3018 int dims, size[CV_MAX_DIM], total = 0;
3022 CV_Error( CV_StsNullPtr, "NULL ranges pointer" );
3024 if( !CV_IS_HIST(hist) )
3025 CV_Error( CV_StsBadArg, "Invalid histogram header" );
3027 dims = cvGetDims( hist->bins, size );
3028 for( i = 0; i < dims; i++ )
3033 for( i = 0; i < dims; i++ )
3036 CV_Error( CV_StsNullPtr, "One of <ranges> elements is NULL" );
3037 hist->thresh[i][0] = ranges[i][0];
3038 hist->thresh[i][1] = ranges[i][1];
3041 hist->type |= CV_HIST_UNIFORM_FLAG + CV_HIST_RANGES_FLAG;
3047 if( !hist->thresh2 )
3049 hist->thresh2 = (float**)cvAlloc(
3050 dims*sizeof(hist->thresh2[0])+
3051 total*sizeof(hist->thresh2[0][0]));
3053 dim_ranges = (float*)(hist->thresh2 + dims);
3055 for( i = 0; i < dims; i++ )
3057 float val0 = -FLT_MAX;
3060 CV_Error( CV_StsNullPtr, "One of <ranges> elements is NULL" );
3062 for( j = 0; j <= size[i]; j++ )
3064 float val = ranges[i][j];
3066 CV_Error(CV_StsOutOfRange, "Bin ranges should go in ascenting order");
3067 val0 = dim_ranges[j] = val;
3070 hist->thresh2[i] = dim_ranges;
3071 dim_ranges += size[i] + 1;
3074 hist->type |= CV_HIST_RANGES_FLAG;
3075 hist->type &= ~CV_HIST_UNIFORM_FLAG;
3081 cvCalcArrHist( CvArr** img, CvHistogram* hist, int accumulate, const CvArr* mask )
3083 if( !CV_IS_HIST(hist))
3084 CV_Error( CV_StsBadArg, "Bad histogram pointer" );
3087 CV_Error( CV_StsNullPtr, "Null double array pointer" );
3089 int size[CV_MAX_DIM];
3090 int i, dims = cvGetDims( hist->bins, size);
3091 bool uniform = CV_IS_UNIFORM_HIST(hist);
3093 std::vector<cv::Mat> images(dims);
3094 for( i = 0; i < dims; i++ )
3095 images[i] = cv::cvarrToMat(img[i]);
3099 _mask = cv::cvarrToMat(mask);
3101 const float* uranges[CV_MAX_DIM] = {0};
3102 const float** ranges = 0;
3104 if( hist->type & CV_HIST_RANGES_FLAG )
3106 ranges = (const float**)hist->thresh2;
3109 for( i = 0; i < dims; i++ )
3110 uranges[i] = &hist->thresh[i][0];
3115 if( !CV_IS_SPARSE_HIST(hist) )
3117 cv::Mat H = cv::cvarrToMat(hist->bins);
3118 cv::calcHist( &images[0], (int)images.size(), 0, _mask,
3119 H, cvGetDims(hist->bins), H.size, ranges, uniform, accumulate != 0 );
3123 CvSparseMat* sparsemat = (CvSparseMat*)hist->bins;
3126 cvZero( hist->bins );
3128 sparsemat->copyToSparseMat(sH);
3129 cv::calcHist( &images[0], (int)images.size(), 0, _mask, sH, sH.dims(),
3130 sH.dims() > 0 ? sH.hdr->size : 0, ranges, uniform, accumulate != 0, true );
3133 cvZero( sparsemat );
3135 cv::SparseMatConstIterator it = sH.begin();
3136 int nz = (int)sH.nzcount();
3137 for( i = 0; i < nz; i++, ++it )
3138 *(float*)cvPtrND(sparsemat, it.node()->idx, 0, -2) = (float)*(const int*)it.ptr;
3144 cvCalcArrBackProject( CvArr** img, CvArr* dst, const CvHistogram* hist )
3146 if( !CV_IS_HIST(hist))
3147 CV_Error( CV_StsBadArg, "Bad histogram pointer" );
3150 CV_Error( CV_StsNullPtr, "Null double array pointer" );
3152 int size[CV_MAX_DIM];
3153 int i, dims = cvGetDims( hist->bins, size );
3155 bool uniform = CV_IS_UNIFORM_HIST(hist);
3156 const float* uranges[CV_MAX_DIM] = {0};
3157 const float** ranges = 0;
3159 if( hist->type & CV_HIST_RANGES_FLAG )
3161 ranges = (const float**)hist->thresh2;
3164 for( i = 0; i < dims; i++ )
3165 uranges[i] = &hist->thresh[i][0];
3170 std::vector<cv::Mat> images(dims);
3171 for( i = 0; i < dims; i++ )
3172 images[i] = cv::cvarrToMat(img[i]);
3174 cv::Mat _dst = cv::cvarrToMat(dst);
3176 CV_Assert( _dst.size() == images[0].size() && _dst.depth() == images[0].depth() );
3178 if( !CV_IS_SPARSE_HIST(hist) )
3180 cv::Mat H = cv::cvarrToMat(hist->bins);
3181 cv::calcBackProject( &images[0], (int)images.size(),
3182 0, H, _dst, ranges, 1, uniform );
3187 ((const CvSparseMat*)hist->bins)->copyToSparseMat(sH);
3188 cv::calcBackProject( &images[0], (int)images.size(),
3189 0, sH, _dst, ranges, 1, uniform );
3194 ////////////////////// B A C K P R O J E C T P A T C H /////////////////////////
3197 cvCalcArrBackProjectPatch( CvArr** arr, CvArr* dst, CvSize patch_size, CvHistogram* hist,
3198 int method, double norm_factor )
3200 CvHistogram* model = 0;
3202 IplImage imgstub[CV_MAX_DIM], *img[CV_MAX_DIM];
3204 CvMat dststub, *dstmat;
3209 if( !CV_IS_HIST(hist))
3210 CV_Error( CV_StsBadArg, "Bad histogram pointer" );
3213 CV_Error( CV_StsNullPtr, "Null double array pointer" );
3215 if( norm_factor <= 0 )
3216 CV_Error( CV_StsOutOfRange,
3217 "Bad normalization factor (set it to 1.0 if unsure)" );
3219 if( patch_size.width <= 0 || patch_size.height <= 0 )
3220 CV_Error( CV_StsBadSize, "The patch width and height must be positive" );
3222 dims = cvGetDims( hist->bins );
3223 cvNormalizeHist( hist, norm_factor );
3225 for( i = 0; i < dims; i++ )
3228 mat = cvGetMat( arr[i], &stub, 0, 0 );
3229 img[i] = cvGetImage( mat, &imgstub[i] );
3233 dstmat = cvGetMat( dst, &dststub, 0, 0 );
3234 if( CV_MAT_TYPE( dstmat->type ) != CV_32FC1 )
3235 CV_Error( CV_StsUnsupportedFormat, "Resultant image must have 32fC1 type" );
3237 if( dstmat->cols != img[0]->width - patch_size.width + 1 ||
3238 dstmat->rows != img[0]->height - patch_size.height + 1 )
3239 CV_Error( CV_StsUnmatchedSizes,
3240 "The output map must be (W-w+1 x H-h+1), "
3241 "where the input images are (W x H) each and the patch is (w x h)" );
3243 cvCopyHist( hist, &model );
3245 size = cvGetMatSize(dstmat);
3247 roi.width = patch_size.width;
3248 roi.height = patch_size.height;
3250 for( y = 0; y < size.height; y++ )
3252 for( x = 0; x < size.width; x++ )
3258 cvCalcHist( img, model );
3259 cvNormalizeHist( model, norm_factor );
3260 result = cvCompareHist( model, hist, method );
3261 CV_MAT_ELEM( *dstmat, float, y, x ) = (float)result;
3265 cvReleaseHist( &model );
3269 // Calculates Bayes probabilistic histograms
3271 cvCalcBayesianProb( CvHistogram** src, int count, CvHistogram** dst )
3276 CV_Error( CV_StsNullPtr, "NULL histogram array pointer" );
3279 CV_Error( CV_StsOutOfRange, "Too small number of histograms" );
3281 for( i = 0; i < count; i++ )
3283 if( !CV_IS_HIST(src[i]) || !CV_IS_HIST(dst[i]) )
3284 CV_Error( CV_StsBadArg, "Invalid histogram header" );
3286 if( !CV_IS_MATND(src[i]->bins) || !CV_IS_MATND(dst[i]->bins) )
3287 CV_Error( CV_StsBadArg, "The function supports dense histograms only" );
3290 cvZero( dst[0]->bins );
3291 // dst[0] = src[0] + ... + src[count-1]
3292 for( i = 0; i < count; i++ )
3293 cvAdd( src[i]->bins, dst[0]->bins, dst[0]->bins );
3295 cvDiv( 0, dst[0]->bins, dst[0]->bins );
3297 // dst[i] = src[i]*(1/dst[0])
3298 for( i = count - 1; i >= 0; i-- )
3299 cvMul( src[i]->bins, dst[0]->bins, dst[i]->bins );
3304 cvCalcProbDensity( const CvHistogram* hist, const CvHistogram* hist_mask,
3305 CvHistogram* hist_dens, double scale )
3308 CV_Error( CV_StsOutOfRange, "scale must be positive" );
3310 if( !CV_IS_HIST(hist) || !CV_IS_HIST(hist_mask) || !CV_IS_HIST(hist_dens) )
3311 CV_Error( CV_StsBadArg, "Invalid histogram pointer[s]" );
3314 CvArr* arrs[] = { hist->bins, hist_mask->bins, hist_dens->bins };
3316 CvNArrayIterator iterator;
3318 cvInitNArrayIterator( 3, arrs, 0, stubs, &iterator );
3320 if( CV_MAT_TYPE(iterator.hdr[0]->type) != CV_32FC1 )
3321 CV_Error( CV_StsUnsupportedFormat, "All histograms must have 32fC1 type" );
3325 const float* srcdata = (const float*)(iterator.ptr[0]);
3326 const float* maskdata = (const float*)(iterator.ptr[1]);
3327 float* dstdata = (float*)(iterator.ptr[2]);
3330 for( i = 0; i < iterator.size.width; i++ )
3332 float s = srcdata[i];
3333 float m = maskdata[i];
3334 if( s > FLT_EPSILON )
3336 dstdata[i] = (float)(m*scale/s);
3338 dstdata[i] = (float)scale;
3340 dstdata[i] = (float)0;
3343 while( cvNextNArraySlice( &iterator ));
3347 class EqualizeHistCalcHist_Invoker : public cv::ParallelLoopBody
3350 enum {HIST_SZ = 256};
3352 EqualizeHistCalcHist_Invoker(cv::Mat& src, int* histogram, cv::Mutex* histogramLock)
3353 : src_(src), globalHistogram_(histogram), histogramLock_(histogramLock)
3356 void operator()( const cv::Range& rowRange ) const
3358 int localHistogram[HIST_SZ] = {0, };
3360 const size_t sstep = src_.step;
3362 int width = src_.cols;
3363 int height = rowRange.end - rowRange.start;
3365 if (src_.isContinuous())
3371 for (const uchar* ptr = src_.ptr<uchar>(rowRange.start); height--; ptr += sstep)
3374 for (; x <= width - 4; x += 4)
3376 int t0 = ptr[x], t1 = ptr[x+1];
3377 localHistogram[t0]++; localHistogram[t1]++;
3378 t0 = ptr[x+2]; t1 = ptr[x+3];
3379 localHistogram[t0]++; localHistogram[t1]++;
3382 for (; x < width; ++x)
3383 localHistogram[ptr[x]]++;
3386 cv::AutoLock lock(*histogramLock_);
3388 for( int i = 0; i < HIST_SZ; i++ )
3389 globalHistogram_[i] += localHistogram[i];
3392 static bool isWorthParallel( const cv::Mat& src )
3394 return ( src.total() >= 640*480 );
3398 EqualizeHistCalcHist_Invoker& operator=(const EqualizeHistCalcHist_Invoker&);
3401 int* globalHistogram_;
3402 cv::Mutex* histogramLock_;
3405 class EqualizeHistLut_Invoker : public cv::ParallelLoopBody
3408 EqualizeHistLut_Invoker( cv::Mat& src, cv::Mat& dst, int* lut )
3414 void operator()( const cv::Range& rowRange ) const
3416 const size_t sstep = src_.step;
3417 const size_t dstep = dst_.step;
3419 int width = src_.cols;
3420 int height = rowRange.end - rowRange.start;
3423 if (src_.isContinuous() && dst_.isContinuous())
3429 const uchar* sptr = src_.ptr<uchar>(rowRange.start);
3430 uchar* dptr = dst_.ptr<uchar>(rowRange.start);
3432 for (; height--; sptr += sstep, dptr += dstep)
3435 for (; x <= width - 4; x += 4)
3441 dptr[x] = (uchar)x0;
3442 dptr[x+1] = (uchar)x1;
3448 dptr[x+2] = (uchar)x0;
3449 dptr[x+3] = (uchar)x1;
3452 for (; x < width; ++x)
3453 dptr[x] = (uchar)lut[sptr[x]];
3457 static bool isWorthParallel( const cv::Mat& src )
3459 return ( src.total() >= 640*480 );
3463 EqualizeHistLut_Invoker& operator=(const EqualizeHistLut_Invoker&);
3470 CV_IMPL void cvEqualizeHist( const CvArr* srcarr, CvArr* dstarr )
3472 cv::equalizeHist(cv::cvarrToMat(srcarr), cv::cvarrToMat(dstarr));
3479 static bool ocl_equalizeHist(InputArray _src, OutputArray _dst)
3481 const ocl::Device & dev = ocl::Device::getDefault();
3482 int compunits = dev.maxComputeUnits();
3483 size_t wgs = dev.maxWorkGroupSize();
3484 Size size = _src.size();
3485 bool use16 = size.width % 16 == 0 && _src.offset() % 16 == 0 && _src.step() % 16 == 0;
3486 int kercn = dev.isAMD() && use16 ? 16 : std::min(4, ocl::predictOptimalVectorWidth(_src));
3488 ocl::Kernel k1("calculate_histogram", ocl::imgproc::histogram_oclsrc,
3489 format("-D BINS=%d -D HISTS_COUNT=%d -D WGS=%d -D kercn=%d -D T=%s%s",
3490 BINS, compunits, wgs, kercn,
3491 kercn == 4 ? "int" : ocl::typeToStr(CV_8UC(kercn)),
3492 _src.isContinuous() ? " -D HAVE_SRC_CONT" : ""));
3496 UMat src = _src.getUMat(), ghist(1, BINS * compunits, CV_32SC1);
3498 k1.args(ocl::KernelArg::ReadOnly(src),
3499 ocl::KernelArg::PtrWriteOnly(ghist), (int)src.total());
3501 size_t globalsize = compunits * wgs;
3502 if (!k1.run(1, &globalsize, &wgs, false))
3505 wgs = std::min<size_t>(ocl::Device::getDefault().maxWorkGroupSize(), BINS);
3506 UMat lut(1, 256, CV_8UC1);
3507 ocl::Kernel k2("calcLUT", ocl::imgproc::histogram_oclsrc,
3508 format("-D BINS=%d -D HISTS_COUNT=%d -D WGS=%d",
3509 BINS, compunits, (int)wgs));
3510 k2.args(ocl::KernelArg::PtrWriteOnly(lut),
3511 ocl::KernelArg::PtrReadOnly(ghist), (int)_src.total());
3513 // calculation of LUT
3514 if (!k2.run(1, &wgs, &wgs, false))
3517 // execute LUT transparently
3518 LUT(_src, lut, _dst);
3526 void cv::equalizeHist( InputArray _src, OutputArray _dst )
3528 CV_Assert( _src.type() == CV_8UC1 );
3533 CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
3534 ocl_equalizeHist(_src, _dst))
3536 Mat src = _src.getMat();
3537 _dst.create( src.size(), src.type() );
3538 Mat dst = _dst.getMat();
3540 Mutex histogramLockInstance;
3542 const int hist_sz = EqualizeHistCalcHist_Invoker::HIST_SZ;
3543 int hist[hist_sz] = {0,};
3546 EqualizeHistCalcHist_Invoker calcBody(src, hist, &histogramLockInstance);
3547 EqualizeHistLut_Invoker lutBody(src, dst, lut);
3548 cv::Range heightRange(0, src.rows);
3550 if(EqualizeHistCalcHist_Invoker::isWorthParallel(src))
3551 parallel_for_(heightRange, calcBody);
3553 calcBody(heightRange);
3556 while (!hist[i]) ++i;
3558 int total = (int)src.total();
3559 if (hist[i] == total)
3565 float scale = (hist_sz - 1.f)/(total - hist[i]);
3568 for (lut[i++] = 0; i < hist_sz; ++i)
3571 lut[i] = saturate_cast<uchar>(sum * scale);
3574 if(EqualizeHistLut_Invoker::isWorthParallel(src))
3575 parallel_for_(heightRange, lutBody);
3577 lutBody(heightRange);
3580 // ----------------------------------------------------------------------
3582 /* Implementation of RTTI and Generic Functions for CvHistogram */
3583 #define CV_TYPE_NAME_HIST "opencv-hist"
3585 static int icvIsHist( const void * ptr )
3587 return CV_IS_HIST( ((CvHistogram*)ptr) );
3590 static CvHistogram * icvCloneHist( const CvHistogram * src )
3592 CvHistogram * dst=NULL;
3593 cvCopyHist(src, &dst);
3597 static void *icvReadHist( CvFileStorage * fs, CvFileNode * node )
3599 CvHistogram * h = 0;
3602 int have_ranges = 0;
3604 h = (CvHistogram *)cvAlloc( sizeof(CvHistogram) );
3606 type = cvReadIntByName( fs, node, "type", 0 );
3607 is_uniform = cvReadIntByName( fs, node, "is_uniform", 0 );
3608 have_ranges = cvReadIntByName( fs, node, "have_ranges", 0 );
3609 h->type = CV_HIST_MAGIC_VAL | type |
3610 (is_uniform ? CV_HIST_UNIFORM_FLAG : 0) |
3611 (have_ranges ? CV_HIST_RANGES_FLAG : 0);
3613 if(type == CV_HIST_ARRAY)
3615 // read histogram bins
3616 CvMatND* mat = (CvMatND*)cvReadByName( fs, node, "mat" );
3617 int i, sizes[CV_MAX_DIM];
3619 if(!CV_IS_MATND(mat))
3620 CV_Error( CV_StsError, "Expected CvMatND");
3622 for(i=0; i<mat->dims; i++)
3623 sizes[i] = mat->dim[i].size;
3625 cvInitMatNDHeader( &(h->mat), mat->dims, sizes, mat->type, mat->data.ptr );
3626 h->bins = &(h->mat);
3628 // take ownership of refcount pointer as well
3629 h->mat.refcount = mat->refcount;
3631 // increase refcount so freeing temp header doesn't free data
3632 cvIncRefData( mat );
3634 // free temporary header
3635 cvReleaseMatND( &mat );
3639 h->bins = cvReadByName( fs, node, "bins" );
3640 if(!CV_IS_SPARSE_MAT(h->bins)){
3641 CV_Error( CV_StsError, "Unknown Histogram type");
3648 int i, dims, size[CV_MAX_DIM], total = 0;
3650 CvFileNode * thresh_node;
3652 dims = cvGetDims( h->bins, size );
3653 for( i = 0; i < dims; i++ )
3656 thresh_node = cvGetFileNodeByName( fs, node, "thresh" );
3658 CV_Error( CV_StsError, "'thresh' node is missing");
3659 cvStartReadRawData( fs, thresh_node, &reader );
3663 for(i=0; i<dims; i++)
3664 cvReadRawDataSlice( fs, &reader, 2, h->thresh[i], "f" );
3670 h->thresh2 = (float**)cvAlloc(
3671 dims*sizeof(h->thresh2[0])+
3672 total*sizeof(h->thresh2[0][0]));
3673 dim_ranges = (float*)(h->thresh2 + dims);
3674 for(i=0; i < dims; i++)
3676 h->thresh2[i] = dim_ranges;
3677 cvReadRawDataSlice( fs, &reader, size[i]+1, dim_ranges, "f" );
3678 dim_ranges += size[i] + 1;
3686 static void icvWriteHist( CvFileStorage* fs, const char* name,
3687 const void* struct_ptr, CvAttrList /*attributes*/ )
3689 const CvHistogram * hist = (const CvHistogram *) struct_ptr;
3690 int sizes[CV_MAX_DIM];
3693 int is_uniform, have_ranges;
3695 cvStartWriteStruct( fs, name, CV_NODE_MAP, CV_TYPE_NAME_HIST );
3697 is_uniform = (CV_IS_UNIFORM_HIST(hist) ? 1 : 0);
3698 have_ranges = (hist->type & CV_HIST_RANGES_FLAG ? 1 : 0);
3700 cvWriteInt( fs, "type", (hist->type & 1) );
3701 cvWriteInt( fs, "is_uniform", is_uniform );
3702 cvWriteInt( fs, "have_ranges", have_ranges );
3703 if(!CV_IS_SPARSE_HIST(hist))
3704 cvWrite( fs, "mat", &(hist->mat) );
3706 cvWrite( fs, "bins", hist->bins );
3710 dims = cvGetDims( hist->bins, sizes );
3711 cvStartWriteStruct( fs, "thresh", CV_NODE_SEQ + CV_NODE_FLOW );
3713 for(i=0; i<dims; i++){
3714 cvWriteRawData( fs, hist->thresh[i], 2, "f" );
3718 for(i=0; i<dims; i++){
3719 cvWriteRawData( fs, hist->thresh2[i], sizes[i]+1, "f" );
3722 cvEndWriteStruct( fs );
3725 cvEndWriteStruct( fs );
3729 CvType hist_type( CV_TYPE_NAME_HIST, icvIsHist, (CvReleaseFunc)cvReleaseHist,
3730 icvReadHist, icvWriteHist, (CvCloneFunc)icvCloneHist );