1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 /* ////////////////////////////////////////////////////////////////////
45 // Filling CvMat/IplImage instances with random numbers
49 #include "precomp.hpp"
51 #if defined WIN32 || defined WINCE
59 #if defined __SSE2__ || (defined _M_IX86_FP && 2 == _M_IX86_FP)
60 #include "emmintrin.h"
66 ///////////////////////////// Functions Declaration //////////////////////////////////////
69 Multiply-with-carry generator is used here:
70 temp = ( A*X(n) + carry )
71 X(n+1) = temp mod (2^32)
75 #define RNG_NEXT(x) ((uint64)(unsigned)(x)*CV_RNG_COEFF + ((x) >> 32))
77 /***************************************************************************************\
78 * Pseudo-Random Number Generators (PRNGs) *
79 \***************************************************************************************/
81 template<typename T> static void
82 randBits_( T* arr, int len, uint64* state, const Vec2i* p, bool small_flag )
89 for( i = 0; i <= len - 4; i += 4 )
93 temp = RNG_NEXT(temp);
94 t0 = ((int)temp & p[i][0]) + p[i][1];
95 temp = RNG_NEXT(temp);
96 t1 = ((int)temp & p[i+1][0]) + p[i+1][1];
97 arr[i] = saturate_cast<T>(t0);
98 arr[i+1] = saturate_cast<T>(t1);
100 temp = RNG_NEXT(temp);
101 t0 = ((int)temp & p[i+2][0]) + p[i+2][1];
102 temp = RNG_NEXT(temp);
103 t1 = ((int)temp & p[i+3][0]) + p[i+3][1];
104 arr[i+2] = saturate_cast<T>(t0);
105 arr[i+3] = saturate_cast<T>(t1);
110 for( i = 0; i <= len - 4; i += 4 )
113 temp = RNG_NEXT(temp);
115 t0 = (t & p[i][0]) + p[i][1];
116 t1 = ((t >> 8) & p[i+1][0]) + p[i+1][1];
117 arr[i] = saturate_cast<T>(t0);
118 arr[i+1] = saturate_cast<T>(t1);
120 t0 = ((t >> 16) & p[i+2][0]) + p[i+2][1];
121 t1 = ((t >> 24) & p[i+3][0]) + p[i+3][1];
122 arr[i+2] = saturate_cast<T>(t0);
123 arr[i+3] = saturate_cast<T>(t1);
127 for( ; i < len; i++ )
130 temp = RNG_NEXT(temp);
132 t0 = ((int)temp & p[i][0]) + p[i][1];
133 arr[i] = saturate_cast<T>(t0);
147 template<typename T> static void
148 randi_( T* arr, int len, uint64* state, const DivStruct* p )
150 uint64 temp = *state;
152 unsigned t0, t1, v0, v1;
154 for( i = 0; i <= len - 4; i += 4 )
156 temp = RNG_NEXT(temp);
158 temp = RNG_NEXT(temp);
160 v0 = (unsigned)(((uint64)t0 * p[i].M) >> 32);
161 v1 = (unsigned)(((uint64)t1 * p[i+1].M) >> 32);
162 v0 = (v0 + ((t0 - v0) >> p[i].sh1)) >> p[i].sh2;
163 v1 = (v1 + ((t1 - v1) >> p[i+1].sh1)) >> p[i+1].sh2;
164 v0 = t0 - v0*p[i].d + p[i].delta;
165 v1 = t1 - v1*p[i+1].d + p[i+1].delta;
166 arr[i] = saturate_cast<T>((int)v0);
167 arr[i+1] = saturate_cast<T>((int)v1);
169 temp = RNG_NEXT(temp);
171 temp = RNG_NEXT(temp);
173 v0 = (unsigned)(((uint64)t0 * p[i+2].M) >> 32);
174 v1 = (unsigned)(((uint64)t1 * p[i+3].M) >> 32);
175 v0 = (v0 + ((t0 - v0) >> p[i+2].sh1)) >> p[i+2].sh2;
176 v1 = (v1 + ((t1 - v1) >> p[i+3].sh1)) >> p[i+3].sh2;
177 v0 = t0 - v0*p[i+2].d + p[i+2].delta;
178 v1 = t1 - v1*p[i+3].d + p[i+3].delta;
179 arr[i+2] = saturate_cast<T>((int)v0);
180 arr[i+3] = saturate_cast<T>((int)v1);
183 for( ; i < len; i++ )
185 temp = RNG_NEXT(temp);
187 v0 = (unsigned)(((uint64)t0 * p[i].M) >> 32);
188 v0 = (v0 + ((t0 - v0) >> p[i].sh1)) >> p[i].sh2;
189 v0 = t0 - v0*p[i].d + p[i].delta;
190 arr[i] = saturate_cast<T>((int)v0);
197 #define DEF_RANDI_FUNC(suffix, type) \
198 static void randBits_##suffix(type* arr, int len, uint64* state, \
199 const Vec2i* p, bool small_flag) \
200 { randBits_(arr, len, state, p, small_flag); } \
202 static void randi_##suffix(type* arr, int len, uint64* state, \
203 const DivStruct* p, bool ) \
204 { randi_(arr, len, state, p); }
206 DEF_RANDI_FUNC(8u, uchar)
207 DEF_RANDI_FUNC(8s, schar)
208 DEF_RANDI_FUNC(16u, ushort)
209 DEF_RANDI_FUNC(16s, short)
210 DEF_RANDI_FUNC(32s, int)
212 static void randf_32f( float* arr, int len, uint64* state, const Vec2f* p, bool )
214 uint64 temp = *state;
217 for( ; i <= len - 4; i += 4 )
220 f[0] = (float)(int)(temp = RNG_NEXT(temp));
221 f[1] = (float)(int)(temp = RNG_NEXT(temp));
222 f[2] = (float)(int)(temp = RNG_NEXT(temp));
223 f[3] = (float)(int)(temp = RNG_NEXT(temp));
225 // handwritten SSE is required not for performance but for numerical stability!
226 // both 32-bit gcc and MSVC compilers trend to generate double precision SSE
227 // while 64-bit compilers generate single precision SIMD instructions
228 // so manual vectorisation forces all compilers to the single precision
229 #if defined __SSE2__ || (defined _M_IX86_FP && 2 == _M_IX86_FP)
230 __m128 q0 = _mm_loadu_ps((const float*)(p + i));
231 __m128 q1 = _mm_loadu_ps((const float*)(p + i + 2));
233 __m128 q01l = _mm_unpacklo_ps(q0, q1);
234 __m128 q01h = _mm_unpackhi_ps(q0, q1);
236 __m128 p0 = _mm_unpacklo_ps(q01l, q01h);
237 __m128 p1 = _mm_unpackhi_ps(q01l, q01h);
239 _mm_storeu_ps(arr + i, _mm_add_ps(_mm_mul_ps(_mm_loadu_ps(f), p0), p1));
241 arr[i+0] = f[0]*p[i+0][0] + p[i+0][1];
242 arr[i+1] = f[1]*p[i+1][0] + p[i+1][1];
243 arr[i+2] = f[2]*p[i+2][0] + p[i+2][1];
244 arr[i+3] = f[3]*p[i+3][0] + p[i+3][1];
248 for( ; i < len; i++ )
250 temp = RNG_NEXT(temp);
251 #if defined __SSE2__ || (defined _M_IX86_FP && 2 == _M_IX86_FP)
252 _mm_store_ss(arr + i, _mm_add_ss(
253 _mm_mul_ss(_mm_set_ss((float)(int)temp), _mm_set_ss(p[i][0])),
257 arr[i] = (int)temp*p[i][0] + p[i][1];
266 randf_64f( double* arr, int len, uint64* state, const Vec2d* p, bool )
268 uint64 temp = *state;
272 for( i = 0; i <= len - 4; i += 4 )
276 temp = RNG_NEXT(temp);
277 v = (temp >> 32)|(temp << 32);
278 f0 = v*p[i][0] + p[i][1];
279 temp = RNG_NEXT(temp);
280 v = (temp >> 32)|(temp << 32);
281 f1 = v*p[i+1][0] + p[i+1][1];
282 arr[i] = f0; arr[i+1] = f1;
284 temp = RNG_NEXT(temp);
285 v = (temp >> 32)|(temp << 32);
286 f0 = v*p[i+2][0] + p[i+2][1];
287 temp = RNG_NEXT(temp);
288 v = (temp >> 32)|(temp << 32);
289 f1 = v*p[i+3][0] + p[i+3][1];
290 arr[i+2] = f0; arr[i+3] = f1;
293 for( ; i < len; i++ )
295 temp = RNG_NEXT(temp);
296 v = (temp >> 32)|(temp << 32);
297 arr[i] = v*p[i][0] + p[i][1];
303 typedef void (*RandFunc)(uchar* arr, int len, uint64* state, const void* p, bool small_flag);
306 static RandFunc randTab[][8] =
309 (RandFunc)randi_8u, (RandFunc)randi_8s, (RandFunc)randi_16u, (RandFunc)randi_16s,
310 (RandFunc)randi_32s, (RandFunc)randf_32f, (RandFunc)randf_64f, 0
313 (RandFunc)randBits_8u, (RandFunc)randBits_8s, (RandFunc)randBits_16u, (RandFunc)randBits_16s,
314 (RandFunc)randBits_32s, 0, 0, 0
319 The code below implements the algorithm described in
320 "The Ziggurat Method for Generating Random Variables"
321 by Marsaglia and Tsang, Journal of Statistical Software.
324 randn_0_1_32f( float* arr, int len, uint64* state )
326 const float r = 3.442620f; // The start of the right tail
327 const float rng_flt = 2.3283064365386962890625e-10f; // 2^-32
328 static unsigned kn[128];
329 static float wn[128], fn[128];
330 uint64 temp = *state;
331 static bool initialized=false;
336 const double m1 = 2147483648.0;
337 double dn = 3.442619855899, tn = dn, vn = 9.91256303526217e-3;
340 double q = vn/std::exp(-.5*dn*dn);
341 kn[0] = (unsigned)((dn/q)*m1);
344 wn[0] = (float)(q/m1);
345 wn[127] = (float)(dn/m1);
348 fn[127] = (float)std::exp(-.5*dn*dn);
352 dn = std::sqrt(-2.*std::log(vn/dn+std::exp(-.5*dn*dn)));
353 kn[i+1] = (unsigned)((dn/tn)*m1);
355 fn[i] = (float)std::exp(-.5*dn*dn);
356 wn[i] = (float)(dn/m1);
361 for( i = 0; i < len; i++ )
367 temp = RNG_NEXT(temp);
370 if( (unsigned)std::abs(hz) < kn[iz] )
372 if( iz == 0) // iz==0, handles the base strip
376 x = (unsigned)temp*rng_flt;
377 temp = RNG_NEXT(temp);
378 y = (unsigned)temp*rng_flt;
379 temp = RNG_NEXT(temp);
380 x = (float)(-std::log(x+FLT_MIN)*0.2904764);
381 y = (float)-std::log(y+FLT_MIN);
383 while( y + y < x*x );
384 x = hz > 0 ? r + x : -r - x;
387 // iz > 0, handle the wedges of other strips
388 y = (unsigned)temp*rng_flt;
389 temp = RNG_NEXT(temp);
390 if( fn[iz] + y*(fn[iz - 1] - fn[iz]) < std::exp(-.5*x*x) )
399 double RNG::gaussian(double sigma)
402 randn_0_1_32f( &temp, 1, &state );
407 template<typename T, typename PT> static void
408 randnScale_( const float* src, T* dst, int len, int cn, const PT* mean, const PT* stddev, bool stdmtx )
415 PT b = mean[0], a = stddev[0];
416 for( i = 0; i < len; i++ )
417 dst[i] = saturate_cast<T>(src[i]*a + b);
421 for( i = 0; i < len; i++, src += cn, dst += cn )
422 for( k = 0; k < cn; k++ )
423 dst[k] = saturate_cast<T>(src[k]*stddev[k] + mean[k]);
428 for( i = 0; i < len; i++, src += cn, dst += cn )
430 for( j = 0; j < cn; j++ )
433 for( k = 0; k < cn; k++ )
434 s += src[k]*stddev[j*cn + k];
435 dst[j] = saturate_cast<T>(s);
441 static void randnScale_8u( const float* src, uchar* dst, int len, int cn,
442 const float* mean, const float* stddev, bool stdmtx )
443 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); }
445 static void randnScale_8s( const float* src, schar* dst, int len, int cn,
446 const float* mean, const float* stddev, bool stdmtx )
447 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); }
449 static void randnScale_16u( const float* src, ushort* dst, int len, int cn,
450 const float* mean, const float* stddev, bool stdmtx )
451 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); }
453 static void randnScale_16s( const float* src, short* dst, int len, int cn,
454 const float* mean, const float* stddev, bool stdmtx )
455 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); }
457 static void randnScale_32s( const float* src, int* dst, int len, int cn,
458 const float* mean, const float* stddev, bool stdmtx )
459 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); }
461 static void randnScale_32f( const float* src, float* dst, int len, int cn,
462 const float* mean, const float* stddev, bool stdmtx )
463 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); }
465 static void randnScale_64f( const float* src, double* dst, int len, int cn,
466 const double* mean, const double* stddev, bool stdmtx )
467 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); }
469 typedef void (*RandnScaleFunc)(const float* src, uchar* dst, int len, int cn,
470 const uchar*, const uchar*, bool);
472 static RandnScaleFunc randnScaleTab[] =
474 (RandnScaleFunc)randnScale_8u, (RandnScaleFunc)randnScale_8s, (RandnScaleFunc)randnScale_16u,
475 (RandnScaleFunc)randnScale_16s, (RandnScaleFunc)randnScale_32s, (RandnScaleFunc)randnScale_32f,
476 (RandnScaleFunc)randnScale_64f, 0
479 void RNG::fill( InputOutputArray _mat, int disttype,
480 InputArray _param1arg, InputArray _param2arg, bool saturateRange )
482 Mat mat = _mat.getMat(), _param1 = _param1arg.getMat(), _param2 = _param2arg.getMat();
483 int depth = mat.depth(), cn = mat.channels();
484 AutoBuffer<double> _parambuf;
485 int j, k, fast_int_mode = 0, smallFlag = 1;
487 RandnScaleFunc scaleFunc = 0;
489 CV_Assert(_param1.channels() == 1 && (_param1.rows == 1 || _param1.cols == 1) &&
490 (_param1.rows + _param1.cols - 1 == cn || _param1.rows + _param1.cols - 1 == 1 ||
491 (_param1.size() == Size(1, 4) && _param1.type() == CV_64F && cn <= 4)));
492 CV_Assert( _param2.channels() == 1 &&
493 (((_param2.rows == 1 || _param2.cols == 1) &&
494 (_param2.rows + _param2.cols - 1 == cn || _param2.rows + _param2.cols - 1 == 1 ||
495 (_param1.size() == Size(1, 4) && _param1.type() == CV_64F && cn <= 4))) ||
496 (_param2.rows == cn && _param2.cols == cn && disttype == NORMAL)));
505 int n1 = (int)_param1.total();
506 int n2 = (int)_param2.total();
508 if( disttype == UNIFORM )
510 _parambuf.allocate(cn*8 + n1 + n2);
511 double* parambuf = _parambuf;
512 double* p1 = (double*)_param1.data;
513 double* p2 = (double*)_param2.data;
515 if( !_param1.isContinuous() || _param1.type() != CV_64F || n1 != cn )
517 Mat tmp(_param1.size(), CV_64F, parambuf);
518 _param1.convertTo(tmp, CV_64F);
521 for( j = n1; j < cn; j++ )
525 if( !_param2.isContinuous() || _param2.type() != CV_64F || n2 != cn )
527 Mat tmp(_param2.size(), CV_64F, parambuf + cn);
528 _param2.convertTo(tmp, CV_64F);
531 for( j = n2; j < cn; j++ )
535 if( depth <= CV_32S )
537 ip = (Vec2i*)(parambuf + cn*2);
538 for( j = 0, fast_int_mode = 1; j < cn; j++ )
540 double a = min(p1[j], p2[j]);
541 double b = max(p1[j], p2[j]);
544 a = max(a, depth == CV_8U || depth == CV_16U ? 0. :
545 depth == CV_8S ? -128. : depth == CV_16S ? -32768. : (double)INT_MIN);
546 b = min(b, depth == CV_8U ? 256. : depth == CV_16U ? 65536. :
547 depth == CV_8S ? 128. : depth == CV_16S ? 32768. : (double)INT_MAX);
549 ip[j][1] = cvCeil(a);
550 int idiff = ip[j][0] = cvFloor(b) - ip[j][1] - 1;
553 fast_int_mode &= diff <= 4294967296. && (idiff & (idiff+1)) == 0;
555 smallFlag &= idiff <= 255;
561 ip[j][1] = INT_MIN/2;
567 ds = (DivStruct*)(ip + cn);
568 for( j = 0; j < cn; j++ )
570 ds[j].delta = ip[j][1];
571 unsigned d = ds[j].d = (unsigned)(ip[j][0]+1);
573 while(((uint64)1 << l) < d)
575 ds[j].M = (unsigned)(((uint64)1 << 32)*(((uint64)1 << l) - d)/d) + 1;
576 ds[j].sh1 = min(l, 1);
577 ds[j].sh2 = max(l - 1, 0);
581 func = randTab[fast_int_mode][depth];
585 double scale = depth == CV_64F ?
586 5.4210108624275221700372640043497e-20 : // 2**-64
587 2.3283064365386962890625e-10; // 2**-32
588 double maxdiff = saturateRange ? (double)FLT_MAX : DBL_MAX;
590 // for each channel i compute such dparam[0][i] & dparam[1][i],
591 // so that a signed 32/64-bit integer X is transformed to
592 // the range [param1.val[i], param2.val[i]) using
593 // dparam[1][i]*X + dparam[0][i]
594 if( depth == CV_32F )
596 fp = (Vec2f*)(parambuf + cn*2);
597 for( j = 0; j < cn; j++ )
599 fp[j][0] = (float)(std::min(maxdiff, p2[j] - p1[j])*scale);
600 fp[j][1] = (float)((p2[j] + p1[j])*0.5);
605 dp = (Vec2d*)(parambuf + cn*2);
606 for( j = 0; j < cn; j++ )
608 dp[j][0] = std::min(DBL_MAX, p2[j] - p1[j])*scale;
609 dp[j][1] = ((p2[j] + p1[j])*0.5);
613 func = randTab[0][depth];
615 CV_Assert( func != 0 );
617 else if( disttype == CV_RAND_NORMAL )
619 _parambuf.allocate(MAX(n1, cn) + MAX(n2, cn));
620 double* parambuf = _parambuf;
622 int ptype = depth == CV_64F ? CV_64F : CV_32F;
623 int esz = (int)CV_ELEM_SIZE(ptype);
625 if( _param1.isContinuous() && _param1.type() == ptype )
629 Mat tmp(_param1.size(), ptype, parambuf);
630 _param1.convertTo(tmp, ptype);
631 mean = (uchar*)parambuf;
635 for( j = n1*esz; j < cn*esz; j++ )
636 mean[j] = mean[j - n1*esz];
638 if( _param2.isContinuous() && _param2.type() == ptype )
639 stddev = _param2.data;
642 Mat tmp(_param2.size(), ptype, parambuf + cn);
643 _param2.convertTo(tmp, ptype);
644 stddev = (uchar*)(parambuf + cn);
648 for( j = n1*esz; j < cn*esz; j++ )
649 stddev[j] = stddev[j - n1*esz];
651 stdmtx = _param2.rows == cn && _param2.cols == cn;
652 scaleFunc = randnScaleTab[depth];
653 CV_Assert( scaleFunc != 0 );
656 CV_Error( CV_StsBadArg, "Unknown distribution type" );
658 const Mat* arrays[] = {&mat, 0};
660 NAryMatIterator it(arrays, &ptr);
661 int total = (int)it.size, blockSize = std::min((BLOCK_SIZE + cn - 1)/cn, total);
662 size_t esz = mat.elemSize();
663 AutoBuffer<double> buf;
667 if( disttype == UNIFORM )
669 buf.allocate(blockSize*cn*4);
670 param = (uchar*)(double*)buf;
676 DivStruct* p = (DivStruct*)param;
677 for( j = 0; j < blockSize*cn; j += cn )
678 for( k = 0; k < cn; k++ )
683 Vec2i* p = (Vec2i*)param;
684 for( j = 0; j < blockSize*cn; j += cn )
685 for( k = 0; k < cn; k++ )
691 Vec2f* p = (Vec2f*)param;
692 for( j = 0; j < blockSize*cn; j += cn )
693 for( k = 0; k < cn; k++ )
698 Vec2d* p = (Vec2d*)param;
699 for( j = 0; j < blockSize*cn; j += cn )
700 for( k = 0; k < cn; k++ )
706 buf.allocate((blockSize*cn+1)/2);
707 nbuf = (float*)(double*)buf;
710 for( size_t i = 0; i < it.nplanes; i++, ++it )
712 for( j = 0; j < total; j += blockSize )
714 int len = std::min(total - j, blockSize);
716 if( disttype == CV_RAND_UNI )
717 func( ptr, len*cn, &state, param, smallFlag != 0 );
720 randn_0_1_32f(nbuf, len*cn, &state);
721 scaleFunc(nbuf, ptr, len, cn, mean, stddev, stdmtx);
732 // using C++11 thread attribute for local thread data
733 __declspec( thread ) RNG* rng = NULL;
735 void deleteThreadRNGData()
751 # define TLS_OUT_OF_INDEXES ((DWORD)0xFFFFFFFF)
753 static DWORD tlsRNGKey = TLS_OUT_OF_INDEXES;
755 void deleteThreadRNGData()
757 if( tlsRNGKey != TLS_OUT_OF_INDEXES )
758 delete (RNG*)TlsGetValue( tlsRNGKey );
763 if( tlsRNGKey == TLS_OUT_OF_INDEXES )
765 tlsRNGKey = TlsAlloc();
766 CV_Assert(tlsRNGKey != TLS_OUT_OF_INDEXES);
768 RNG* rng = (RNG*)TlsGetValue( tlsRNGKey );
772 TlsSetValue( tlsRNGKey, rng );
779 static pthread_key_t tlsRNGKey = 0;
780 static pthread_once_t tlsRNGKeyOnce = PTHREAD_ONCE_INIT;
782 static void deleteRNG(void* data)
787 static void makeRNGKey()
789 int errcode = pthread_key_create(&tlsRNGKey, deleteRNG);
790 CV_Assert(errcode == 0);
795 pthread_once(&tlsRNGKeyOnce, makeRNGKey);
796 RNG* rng = (RNG*)pthread_getspecific(tlsRNGKey);
800 pthread_setspecific(tlsRNGKey, rng);
809 void cv::randu(InputOutputArray dst, InputArray low, InputArray high)
811 theRNG().fill(dst, RNG::UNIFORM, low, high);
814 void cv::randn(InputOutputArray dst, InputArray mean, InputArray stddev)
816 theRNG().fill(dst, RNG::NORMAL, mean, stddev);
822 template<typename T> static void
823 randShuffle_( Mat& _arr, RNG& rng, double iterFactor )
825 int sz = _arr.rows*_arr.cols, iters = cvRound(iterFactor*sz);
826 if( _arr.isContinuous() )
828 T* arr = (T*)_arr.data;
829 for( int i = 0; i < iters; i++ )
831 int j = (unsigned)rng % sz, k = (unsigned)rng % sz;
832 std::swap( arr[j], arr[k] );
837 uchar* data = _arr.data;
838 size_t step = _arr.step;
839 int cols = _arr.cols;
840 for( int i = 0; i < iters; i++ )
842 int j1 = (unsigned)rng % sz, k1 = (unsigned)rng % sz;
843 int j0 = j1/cols, k0 = k1/cols;
844 j1 -= j0*cols; k1 -= k0*cols;
845 std::swap( ((T*)(data + step*j0))[j1], ((T*)(data + step*k0))[k1] );
850 typedef void (*RandShuffleFunc)( Mat& dst, RNG& rng, double iterFactor );
854 void cv::randShuffle( InputOutputArray _dst, double iterFactor, RNG* _rng )
856 RandShuffleFunc tab[] =
859 randShuffle_<uchar>, // 1
860 randShuffle_<ushort>, // 2
861 randShuffle_<Vec<uchar,3> >, // 3
862 randShuffle_<int>, // 4
864 randShuffle_<Vec<ushort,3> >, // 6
866 randShuffle_<Vec<int,2> >, // 8
868 randShuffle_<Vec<int,3> >, // 12
870 randShuffle_<Vec<int,4> >, // 16
872 randShuffle_<Vec<int,6> >, // 24
874 randShuffle_<Vec<int,8> > // 32
877 Mat dst = _dst.getMat();
878 RNG& rng = _rng ? *_rng : theRNG();
879 CV_Assert( dst.elemSize() <= 32 );
880 RandShuffleFunc func = tab[dst.elemSize()];
881 CV_Assert( func != 0 );
882 func( dst, rng, iterFactor );
885 void cv::randShuffle_( InputOutputArray _dst, double iterFactor )
887 randShuffle(_dst, iterFactor);
891 cvRandArr( CvRNG* _rng, CvArr* arr, int disttype, CvScalar param1, CvScalar param2 )
893 cv::Mat mat = cv::cvarrToMat(arr);
894 // !!! this will only work for current 64-bit MWC RNG !!!
895 cv::RNG& rng = _rng ? (cv::RNG&)*_rng : cv::theRNG();
896 rng.fill(mat, disttype == CV_RAND_NORMAL ?
897 cv::RNG::NORMAL : cv::RNG::UNIFORM, cv::Scalar(param1), cv::Scalar(param2) );
900 CV_IMPL void cvRandShuffle( CvArr* arr, CvRNG* _rng, double iter_factor )
902 cv::Mat dst = cv::cvarrToMat(arr);
903 cv::RNG& rng = _rng ? (cv::RNG&)*_rng : cv::theRNG();
904 cv::randShuffle( dst, iter_factor, &rng );
907 // Mersenne Twister random number generator.
908 // Inspired by http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MT2002/CODES/mt19937ar.c
911 A C-program for MT19937, with initialization improved 2002/1/26.
912 Coded by Takuji Nishimura and Makoto Matsumoto.
914 Before using, initialize the state by using init_genrand(seed)
915 or init_by_array(init_key, key_length).
917 Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura,
920 Redistribution and use in source and binary forms, with or without
921 modification, are permitted provided that the following conditions
924 1. Redistributions of source code must retain the above copyright
925 notice, this list of conditions and the following disclaimer.
927 2. Redistributions in binary form must reproduce the above copyright
928 notice, this list of conditions and the following disclaimer in the
929 documentation and/or other materials provided with the distribution.
931 3. The names of its contributors may not be used to endorse or promote
932 products derived from this software without specific prior written
935 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
936 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
937 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
938 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
939 CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
940 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
941 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
942 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
943 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
944 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
945 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
948 Any feedback is very welcome.
949 http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html
950 email: m-mat @ math.sci.hiroshima-u.ac.jp (remove space)
953 cv::RNG_MT19937::RNG_MT19937(unsigned s) { seed(s); }
955 cv::RNG_MT19937::RNG_MT19937() { seed(5489U); }
957 void cv::RNG_MT19937::seed(unsigned s)
960 for (mti = 1; mti < N; mti++)
962 /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */
963 state[mti] = (1812433253U * (state[mti - 1] ^ (state[mti - 1] >> 30)) + mti);
967 unsigned cv::RNG_MT19937::next()
969 /* mag01[x] = x * MATRIX_A for x=0,1 */
970 static unsigned mag01[2] = { 0x0U, /*MATRIX_A*/ 0x9908b0dfU};
972 const unsigned UPPER_MASK = 0x80000000U;
973 const unsigned LOWER_MASK = 0x7fffffffU;
975 /* generate N words at one time */
980 for (; kk < N - M; ++kk)
982 unsigned y = (state[kk] & UPPER_MASK) | (state[kk + 1] & LOWER_MASK);
983 state[kk] = state[kk + M] ^ (y >> 1) ^ mag01[y & 0x1U];
986 for (; kk < N - 1; ++kk)
988 unsigned y = (state[kk] & UPPER_MASK) | (state[kk + 1] & LOWER_MASK);
989 state[kk] = state[kk + (M - N)] ^ (y >> 1) ^ mag01[y & 0x1U];
992 unsigned y = (state[N - 1] & UPPER_MASK) | (state[0] & LOWER_MASK);
993 state[N - 1] = state[M - 1] ^ (y >> 1) ^ mag01[y & 0x1U];
998 unsigned y = state[mti++];
1002 y ^= (y << 7) & 0x9d2c5680U;
1003 y ^= (y << 15) & 0xefc60000U;
1009 cv::RNG_MT19937::operator unsigned() { return next(); }
1011 cv::RNG_MT19937::operator int() { return (int)next();}
1013 cv::RNG_MT19937::operator float() { return next() * (1.f / 4294967296.f); }
1015 cv::RNG_MT19937::operator double()
1017 unsigned a = next() >> 5;
1018 unsigned b = next() >> 6;
1019 return (a * 67108864.0 + b) * (1.0 / 9007199254740992.0);
1022 int cv::RNG_MT19937::uniform(int a, int b) { return (int)(next() % (b - a) + a); }
1024 float cv::RNG_MT19937::uniform(float a, float b) { return ((float)*this)*(b - a) + a; }
1026 double cv::RNG_MT19937::uniform(double a, double b) { return ((double)*this)*(b - a) + a; }
1028 unsigned cv::RNG_MT19937::operator ()(unsigned b) { return next() % b; }
1030 unsigned cv::RNG_MT19937::operator ()() { return next(); }