1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 /* ////////////////////////////////////////////////////////////////////
45 // Filling CvMat/IplImage instances with random numbers
49 #include "precomp.hpp"
51 #if defined _WIN32 || defined WINCE
59 #if defined __SSE2__ || (defined _M_IX86_FP && 2 == _M_IX86_FP)
60 #include "emmintrin.h"
66 ///////////////////////////// Functions Declaration //////////////////////////////////////
69 Multiply-with-carry generator is used here:
70 temp = ( A*X(n) + carry )
71 X(n+1) = temp mod (2^32)
75 #define RNG_NEXT(x) ((uint64)(unsigned)(x)*CV_RNG_COEFF + ((x) >> 32))
77 /***************************************************************************************\
78 * Pseudo-Random Number Generators (PRNGs) *
79 \***************************************************************************************/
81 template<typename T> static void
82 randBits_( T* arr, int len, uint64* state, const Vec2i* p, bool small_flag )
89 for( i = 0; i <= len - 4; i += 4 )
93 temp = RNG_NEXT(temp);
94 t0 = ((int)temp & p[i][0]) + p[i][1];
95 temp = RNG_NEXT(temp);
96 t1 = ((int)temp & p[i+1][0]) + p[i+1][1];
97 arr[i] = saturate_cast<T>(t0);
98 arr[i+1] = saturate_cast<T>(t1);
100 temp = RNG_NEXT(temp);
101 t0 = ((int)temp & p[i+2][0]) + p[i+2][1];
102 temp = RNG_NEXT(temp);
103 t1 = ((int)temp & p[i+3][0]) + p[i+3][1];
104 arr[i+2] = saturate_cast<T>(t0);
105 arr[i+3] = saturate_cast<T>(t1);
110 for( i = 0; i <= len - 4; i += 4 )
113 temp = RNG_NEXT(temp);
115 t0 = (t & p[i][0]) + p[i][1];
116 t1 = ((t >> 8) & p[i+1][0]) + p[i+1][1];
117 arr[i] = saturate_cast<T>(t0);
118 arr[i+1] = saturate_cast<T>(t1);
120 t0 = ((t >> 16) & p[i+2][0]) + p[i+2][1];
121 t1 = ((t >> 24) & p[i+3][0]) + p[i+3][1];
122 arr[i+2] = saturate_cast<T>(t0);
123 arr[i+3] = saturate_cast<T>(t1);
127 for( ; i < len; i++ )
130 temp = RNG_NEXT(temp);
132 t0 = ((int)temp & p[i][0]) + p[i][1];
133 arr[i] = saturate_cast<T>(t0);
147 template<typename T> static void
148 randi_( T* arr, int len, uint64* state, const DivStruct* p )
150 uint64 temp = *state;
152 unsigned t0, t1, v0, v1;
154 for( i = 0; i <= len - 4; i += 4 )
156 temp = RNG_NEXT(temp);
158 temp = RNG_NEXT(temp);
160 v0 = (unsigned)(((uint64)t0 * p[i].M) >> 32);
161 v1 = (unsigned)(((uint64)t1 * p[i+1].M) >> 32);
162 v0 = (v0 + ((t0 - v0) >> p[i].sh1)) >> p[i].sh2;
163 v1 = (v1 + ((t1 - v1) >> p[i+1].sh1)) >> p[i+1].sh2;
164 v0 = t0 - v0*p[i].d + p[i].delta;
165 v1 = t1 - v1*p[i+1].d + p[i+1].delta;
166 arr[i] = saturate_cast<T>((int)v0);
167 arr[i+1] = saturate_cast<T>((int)v1);
169 temp = RNG_NEXT(temp);
171 temp = RNG_NEXT(temp);
173 v0 = (unsigned)(((uint64)t0 * p[i+2].M) >> 32);
174 v1 = (unsigned)(((uint64)t1 * p[i+3].M) >> 32);
175 v0 = (v0 + ((t0 - v0) >> p[i+2].sh1)) >> p[i+2].sh2;
176 v1 = (v1 + ((t1 - v1) >> p[i+3].sh1)) >> p[i+3].sh2;
177 v0 = t0 - v0*p[i+2].d + p[i+2].delta;
178 v1 = t1 - v1*p[i+3].d + p[i+3].delta;
179 arr[i+2] = saturate_cast<T>((int)v0);
180 arr[i+3] = saturate_cast<T>((int)v1);
183 for( ; i < len; i++ )
185 temp = RNG_NEXT(temp);
187 v0 = (unsigned)(((uint64)t0 * p[i].M) >> 32);
188 v0 = (v0 + ((t0 - v0) >> p[i].sh1)) >> p[i].sh2;
189 v0 = t0 - v0*p[i].d + p[i].delta;
190 arr[i] = saturate_cast<T>((int)v0);
197 #define DEF_RANDI_FUNC(suffix, type) \
198 static void randBits_##suffix(type* arr, int len, uint64* state, \
199 const Vec2i* p, bool small_flag) \
200 { randBits_(arr, len, state, p, small_flag); } \
202 static void randi_##suffix(type* arr, int len, uint64* state, \
203 const DivStruct* p, bool ) \
204 { randi_(arr, len, state, p); }
206 DEF_RANDI_FUNC(8u, uchar)
207 DEF_RANDI_FUNC(8s, schar)
208 DEF_RANDI_FUNC(16u, ushort)
209 DEF_RANDI_FUNC(16s, short)
210 DEF_RANDI_FUNC(32s, int)
212 static void randf_32f( float* arr, int len, uint64* state, const Vec2f* p, bool )
214 uint64 temp = *state;
217 for( ; i <= len - 4; i += 4 )
220 f[0] = (float)(int)(temp = RNG_NEXT(temp));
221 f[1] = (float)(int)(temp = RNG_NEXT(temp));
222 f[2] = (float)(int)(temp = RNG_NEXT(temp));
223 f[3] = (float)(int)(temp = RNG_NEXT(temp));
225 // handwritten SSE is required not for performance but for numerical stability!
226 // both 32-bit gcc and MSVC compilers trend to generate double precision SSE
227 // while 64-bit compilers generate single precision SIMD instructions
228 // so manual vectorisation forces all compilers to the single precision
229 #if defined __SSE2__ || (defined _M_IX86_FP && 2 == _M_IX86_FP)
230 __m128 q0 = _mm_loadu_ps((const float*)(p + i));
231 __m128 q1 = _mm_loadu_ps((const float*)(p + i + 2));
233 __m128 q01l = _mm_unpacklo_ps(q0, q1);
234 __m128 q01h = _mm_unpackhi_ps(q0, q1);
236 __m128 p0 = _mm_unpacklo_ps(q01l, q01h);
237 __m128 p1 = _mm_unpackhi_ps(q01l, q01h);
239 _mm_storeu_ps(arr + i, _mm_add_ps(_mm_mul_ps(_mm_loadu_ps(f), p0), p1));
240 #elif defined __ARM_NEON && defined __aarch64__
241 // handwritten NEON is required not for performance but for numerical stability!
242 // 64bit gcc tends to use fmadd instead of separate multiply and add
243 // use volatile to ensure to separate the multiply and add
244 float32x4x2_t q = vld2q_f32((const float*)(p + i));
246 float32x4_t p0 = q.val[0];
247 float32x4_t p1 = q.val[1];
249 volatile float32x4_t v0 = vmulq_f32(vld1q_f32(f), p0);
250 vst1q_f32(arr+i, vaddq_f32(v0, p1));
252 arr[i+0] = f[0]*p[i+0][0] + p[i+0][1];
253 arr[i+1] = f[1]*p[i+1][0] + p[i+1][1];
254 arr[i+2] = f[2]*p[i+2][0] + p[i+2][1];
255 arr[i+3] = f[3]*p[i+3][0] + p[i+3][1];
259 for( ; i < len; i++ )
261 temp = RNG_NEXT(temp);
262 #if defined __SSE2__ || (defined _M_IX86_FP && 2 == _M_IX86_FP)
263 _mm_store_ss(arr + i, _mm_add_ss(
264 _mm_mul_ss(_mm_set_ss((float)(int)temp), _mm_set_ss(p[i][0])),
267 #elif defined __ARM_NEON && defined __aarch64__
268 float32x2_t t = vadd_f32(vmul_f32(
269 vdup_n_f32((float)(int)temp), vdup_n_f32(p[i][0])),
270 vdup_n_f32(p[i][1]));
271 arr[i] = vget_lane_f32(t, 0);
273 arr[i] = (int)temp*p[i][0] + p[i][1];
282 randf_64f( double* arr, int len, uint64* state, const Vec2d* p, bool )
284 uint64 temp = *state;
288 for( i = 0; i <= len - 4; i += 4 )
292 temp = RNG_NEXT(temp);
293 v = (temp >> 32)|(temp << 32);
294 f0 = v*p[i][0] + p[i][1];
295 temp = RNG_NEXT(temp);
296 v = (temp >> 32)|(temp << 32);
297 f1 = v*p[i+1][0] + p[i+1][1];
298 arr[i] = f0; arr[i+1] = f1;
300 temp = RNG_NEXT(temp);
301 v = (temp >> 32)|(temp << 32);
302 f0 = v*p[i+2][0] + p[i+2][1];
303 temp = RNG_NEXT(temp);
304 v = (temp >> 32)|(temp << 32);
305 f1 = v*p[i+3][0] + p[i+3][1];
306 arr[i+2] = f0; arr[i+3] = f1;
309 for( ; i < len; i++ )
311 temp = RNG_NEXT(temp);
312 v = (temp >> 32)|(temp << 32);
313 arr[i] = v*p[i][0] + p[i][1];
319 typedef void (*RandFunc)(uchar* arr, int len, uint64* state, const void* p, bool small_flag);
322 static RandFunc randTab[][8] =
325 (RandFunc)randi_8u, (RandFunc)randi_8s, (RandFunc)randi_16u, (RandFunc)randi_16s,
326 (RandFunc)randi_32s, (RandFunc)randf_32f, (RandFunc)randf_64f, 0
329 (RandFunc)randBits_8u, (RandFunc)randBits_8s, (RandFunc)randBits_16u, (RandFunc)randBits_16s,
330 (RandFunc)randBits_32s, 0, 0, 0
335 The code below implements the algorithm described in
336 "The Ziggurat Method for Generating Random Variables"
337 by Marsaglia and Tsang, Journal of Statistical Software.
340 randn_0_1_32f( float* arr, int len, uint64* state )
342 const float r = 3.442620f; // The start of the right tail
343 const float rng_flt = 2.3283064365386962890625e-10f; // 2^-32
344 static unsigned kn[128];
345 static float wn[128], fn[128];
346 uint64 temp = *state;
347 static bool initialized=false;
352 const double m1 = 2147483648.0;
353 double dn = 3.442619855899, tn = dn, vn = 9.91256303526217e-3;
356 double q = vn/std::exp(-.5*dn*dn);
357 kn[0] = (unsigned)((dn/q)*m1);
360 wn[0] = (float)(q/m1);
361 wn[127] = (float)(dn/m1);
364 fn[127] = (float)std::exp(-.5*dn*dn);
368 dn = std::sqrt(-2.*std::log(vn/dn+std::exp(-.5*dn*dn)));
369 kn[i+1] = (unsigned)((dn/tn)*m1);
371 fn[i] = (float)std::exp(-.5*dn*dn);
372 wn[i] = (float)(dn/m1);
377 for( i = 0; i < len; i++ )
383 temp = RNG_NEXT(temp);
386 if( (unsigned)std::abs(hz) < kn[iz] )
388 if( iz == 0) // iz==0, handles the base strip
392 x = (unsigned)temp*rng_flt;
393 temp = RNG_NEXT(temp);
394 y = (unsigned)temp*rng_flt;
395 temp = RNG_NEXT(temp);
396 x = (float)(-std::log(x+FLT_MIN)*0.2904764);
397 y = (float)-std::log(y+FLT_MIN);
399 while( y + y < x*x );
400 x = hz > 0 ? r + x : -r - x;
403 // iz > 0, handle the wedges of other strips
404 y = (unsigned)temp*rng_flt;
405 temp = RNG_NEXT(temp);
406 if( fn[iz] + y*(fn[iz - 1] - fn[iz]) < std::exp(-.5*x*x) )
415 double RNG::gaussian(double sigma)
418 randn_0_1_32f( &temp, 1, &state );
423 template<typename T, typename PT> static void
424 randnScale_( const float* src, T* dst, int len, int cn, const PT* mean, const PT* stddev, bool stdmtx )
431 PT b = mean[0], a = stddev[0];
432 for( i = 0; i < len; i++ )
433 dst[i] = saturate_cast<T>(src[i]*a + b);
437 for( i = 0; i < len; i++, src += cn, dst += cn )
438 for( k = 0; k < cn; k++ )
439 dst[k] = saturate_cast<T>(src[k]*stddev[k] + mean[k]);
444 for( i = 0; i < len; i++, src += cn, dst += cn )
446 for( j = 0; j < cn; j++ )
449 for( k = 0; k < cn; k++ )
450 s += src[k]*stddev[j*cn + k];
451 dst[j] = saturate_cast<T>(s);
457 static void randnScale_8u( const float* src, uchar* dst, int len, int cn,
458 const float* mean, const float* stddev, bool stdmtx )
459 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); }
461 static void randnScale_8s( const float* src, schar* dst, int len, int cn,
462 const float* mean, const float* stddev, bool stdmtx )
463 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); }
465 static void randnScale_16u( const float* src, ushort* dst, int len, int cn,
466 const float* mean, const float* stddev, bool stdmtx )
467 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); }
469 static void randnScale_16s( const float* src, short* dst, int len, int cn,
470 const float* mean, const float* stddev, bool stdmtx )
471 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); }
473 static void randnScale_32s( const float* src, int* dst, int len, int cn,
474 const float* mean, const float* stddev, bool stdmtx )
475 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); }
477 static void randnScale_32f( const float* src, float* dst, int len, int cn,
478 const float* mean, const float* stddev, bool stdmtx )
479 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); }
481 static void randnScale_64f( const float* src, double* dst, int len, int cn,
482 const double* mean, const double* stddev, bool stdmtx )
483 { randnScale_(src, dst, len, cn, mean, stddev, stdmtx); }
485 typedef void (*RandnScaleFunc)(const float* src, uchar* dst, int len, int cn,
486 const uchar*, const uchar*, bool);
488 static RandnScaleFunc randnScaleTab[] =
490 (RandnScaleFunc)randnScale_8u, (RandnScaleFunc)randnScale_8s, (RandnScaleFunc)randnScale_16u,
491 (RandnScaleFunc)randnScale_16s, (RandnScaleFunc)randnScale_32s, (RandnScaleFunc)randnScale_32f,
492 (RandnScaleFunc)randnScale_64f, 0
495 void RNG::fill( InputOutputArray _mat, int disttype,
496 InputArray _param1arg, InputArray _param2arg, bool saturateRange )
498 Mat mat = _mat.getMat(), _param1 = _param1arg.getMat(), _param2 = _param2arg.getMat();
499 int depth = mat.depth(), cn = mat.channels();
500 AutoBuffer<double> _parambuf;
502 bool fast_int_mode = false;
503 bool smallFlag = true;
505 RandnScaleFunc scaleFunc = 0;
507 CV_Assert(_param1.channels() == 1 && (_param1.rows == 1 || _param1.cols == 1) &&
508 (_param1.rows + _param1.cols - 1 == cn || _param1.rows + _param1.cols - 1 == 1 ||
509 (_param1.size() == Size(1, 4) && _param1.type() == CV_64F && cn <= 4)));
510 CV_Assert( _param2.channels() == 1 &&
511 (((_param2.rows == 1 || _param2.cols == 1) &&
512 (_param2.rows + _param2.cols - 1 == cn || _param2.rows + _param2.cols - 1 == 1 ||
513 (_param1.size() == Size(1, 4) && _param1.type() == CV_64F && cn <= 4))) ||
514 (_param2.rows == cn && _param2.cols == cn && disttype == NORMAL)));
523 int n1 = (int)_param1.total();
524 int n2 = (int)_param2.total();
526 if( disttype == UNIFORM )
528 _parambuf.allocate(cn*8 + n1 + n2);
529 double* parambuf = _parambuf;
530 double* p1 = _param1.ptr<double>();
531 double* p2 = _param2.ptr<double>();
533 if( !_param1.isContinuous() || _param1.type() != CV_64F || n1 != cn )
535 Mat tmp(_param1.size(), CV_64F, parambuf);
536 _param1.convertTo(tmp, CV_64F);
539 for( j = n1; j < cn; j++ )
543 if( !_param2.isContinuous() || _param2.type() != CV_64F || n2 != cn )
545 Mat tmp(_param2.size(), CV_64F, parambuf + cn);
546 _param2.convertTo(tmp, CV_64F);
549 for( j = n2; j < cn; j++ )
553 if( depth <= CV_32S )
555 ip = (Vec2i*)(parambuf + cn*2);
556 for( j = 0, fast_int_mode = true; j < cn; j++ )
558 double a = std::min(p1[j], p2[j]);
559 double b = std::max(p1[j], p2[j]);
562 a = std::max(a, depth == CV_8U || depth == CV_16U ? 0. :
563 depth == CV_8S ? -128. : depth == CV_16S ? -32768. : (double)INT_MIN);
564 b = std::min(b, depth == CV_8U ? 256. : depth == CV_16U ? 65536. :
565 depth == CV_8S ? 128. : depth == CV_16S ? 32768. : (double)INT_MAX);
567 ip[j][1] = cvCeil(a);
568 int idiff = ip[j][0] = cvFloor(b) - ip[j][1] - 1;
571 fast_int_mode = fast_int_mode && diff <= 4294967296. && (idiff & (idiff+1)) == 0;
573 smallFlag = smallFlag && (idiff <= 255);
579 ip[j][1] = INT_MIN/2;
585 ds = (DivStruct*)(ip + cn);
586 for( j = 0; j < cn; j++ )
588 ds[j].delta = ip[j][1];
589 unsigned d = ds[j].d = (unsigned)(ip[j][0]+1);
591 while(((uint64)1 << l) < d)
593 ds[j].M = (unsigned)(((uint64)1 << 32)*(((uint64)1 << l) - d)/d) + 1;
594 ds[j].sh1 = std::min(l, 1);
595 ds[j].sh2 = std::max(l - 1, 0);
599 func = randTab[fast_int_mode ? 1 : 0][depth];
603 double scale = depth == CV_64F ?
604 5.4210108624275221700372640043497e-20 : // 2**-64
605 2.3283064365386962890625e-10; // 2**-32
606 double maxdiff = saturateRange ? (double)FLT_MAX : DBL_MAX;
608 // for each channel i compute such dparam[0][i] & dparam[1][i],
609 // so that a signed 32/64-bit integer X is transformed to
610 // the range [param1.val[i], param2.val[i]) using
611 // dparam[1][i]*X + dparam[0][i]
612 if( depth == CV_32F )
614 fp = (Vec2f*)(parambuf + cn*2);
615 for( j = 0; j < cn; j++ )
617 fp[j][0] = (float)(std::min(maxdiff, p2[j] - p1[j])*scale);
618 fp[j][1] = (float)((p2[j] + p1[j])*0.5);
623 dp = (Vec2d*)(parambuf + cn*2);
624 for( j = 0; j < cn; j++ )
626 dp[j][0] = std::min(DBL_MAX, p2[j] - p1[j])*scale;
627 dp[j][1] = ((p2[j] + p1[j])*0.5);
631 func = randTab[0][depth];
633 CV_Assert( func != 0 );
635 else if( disttype == CV_RAND_NORMAL )
637 _parambuf.allocate(MAX(n1, cn) + MAX(n2, cn));
638 double* parambuf = _parambuf;
640 int ptype = depth == CV_64F ? CV_64F : CV_32F;
641 int esz = (int)CV_ELEM_SIZE(ptype);
643 if( _param1.isContinuous() && _param1.type() == ptype && n1 >= cn)
644 mean = _param1.ptr();
647 Mat tmp(_param1.size(), ptype, parambuf);
648 _param1.convertTo(tmp, ptype);
649 mean = (uchar*)parambuf;
653 for( j = n1*esz; j < cn*esz; j++ )
654 mean[j] = mean[j - n1*esz];
656 if( _param2.isContinuous() && _param2.type() == ptype && n2 >= cn)
657 stddev = _param2.ptr();
660 Mat tmp(_param2.size(), ptype, parambuf + MAX(n1, cn));
661 _param2.convertTo(tmp, ptype);
662 stddev = (uchar*)(parambuf + MAX(n1, cn));
666 for( j = n2*esz; j < cn*esz; j++ )
667 stddev[j] = stddev[j - n2*esz];
669 stdmtx = _param2.rows == cn && _param2.cols == cn;
670 scaleFunc = randnScaleTab[depth];
671 CV_Assert( scaleFunc != 0 );
674 CV_Error( CV_StsBadArg, "Unknown distribution type" );
676 const Mat* arrays[] = {&mat, 0};
678 NAryMatIterator it(arrays, &ptr, 1);
679 int total = (int)it.size, blockSize = std::min((BLOCK_SIZE + cn - 1)/cn, total);
680 size_t esz = mat.elemSize();
681 AutoBuffer<double> buf;
685 if( disttype == UNIFORM )
687 buf.allocate(blockSize*cn*4);
688 param = (uchar*)(double*)buf;
690 if( depth <= CV_32S )
694 DivStruct* p = (DivStruct*)param;
695 for( j = 0; j < blockSize*cn; j += cn )
696 for( k = 0; k < cn; k++ )
701 Vec2i* p = (Vec2i*)param;
702 for( j = 0; j < blockSize*cn; j += cn )
703 for( k = 0; k < cn; k++ )
707 else if( depth == CV_32F )
709 Vec2f* p = (Vec2f*)param;
710 for( j = 0; j < blockSize*cn; j += cn )
711 for( k = 0; k < cn; k++ )
716 Vec2d* p = (Vec2d*)param;
717 for( j = 0; j < blockSize*cn; j += cn )
718 for( k = 0; k < cn; k++ )
724 buf.allocate((blockSize*cn+1)/2);
725 nbuf = (float*)(double*)buf;
728 for( size_t i = 0; i < it.nplanes; i++, ++it )
730 for( j = 0; j < total; j += blockSize )
732 int len = std::min(total - j, blockSize);
734 if( disttype == CV_RAND_UNI )
735 func( ptr, len*cn, &state, param, smallFlag );
738 randn_0_1_32f(nbuf, len*cn, &state);
739 scaleFunc(nbuf, ptr, len, cn, mean, stddev, stdmtx);
748 cv::RNG& cv::theRNG()
750 return getCoreTlsData().get()->rng;
753 void cv::setRNGSeed(int seed)
755 theRNG() = RNG(static_cast<uint64>(seed));
759 void cv::randu(InputOutputArray dst, InputArray low, InputArray high)
761 CV_INSTRUMENT_REGION()
763 theRNG().fill(dst, RNG::UNIFORM, low, high);
766 void cv::randn(InputOutputArray dst, InputArray mean, InputArray stddev)
768 CV_INSTRUMENT_REGION()
770 theRNG().fill(dst, RNG::NORMAL, mean, stddev);
776 template<typename T> static void
777 randShuffle_( Mat& _arr, RNG& rng, double )
779 unsigned sz = (unsigned)_arr.total();
780 if( _arr.isContinuous() )
782 T* arr = _arr.ptr<T>();
783 for( unsigned i = 0; i < sz; i++ )
785 unsigned j = (unsigned)rng % sz;
786 std::swap( arr[j], arr[i] );
791 CV_Assert( _arr.dims <= 2 );
792 uchar* data = _arr.ptr();
793 size_t step = _arr.step;
794 int rows = _arr.rows;
795 int cols = _arr.cols;
796 for( int i0 = 0; i0 < rows; i0++ )
798 T* p = _arr.ptr<T>(i0);
799 for( int j0 = 0; j0 < cols; j0++ )
801 unsigned k1 = (unsigned)rng % sz;
802 int i1 = (int)(k1 / cols);
803 int j1 = (int)(k1 - (unsigned)i1*(unsigned)cols);
804 std::swap( p[j0], ((T*)(data + step*i1))[j1] );
810 typedef void (*RandShuffleFunc)( Mat& dst, RNG& rng, double iterFactor );
814 void cv::randShuffle( InputOutputArray _dst, double iterFactor, RNG* _rng )
816 CV_INSTRUMENT_REGION()
818 RandShuffleFunc tab[] =
821 randShuffle_<uchar>, // 1
822 randShuffle_<ushort>, // 2
823 randShuffle_<Vec<uchar,3> >, // 3
824 randShuffle_<int>, // 4
826 randShuffle_<Vec<ushort,3> >, // 6
828 randShuffle_<Vec<int,2> >, // 8
830 randShuffle_<Vec<int,3> >, // 12
832 randShuffle_<Vec<int,4> >, // 16
834 randShuffle_<Vec<int,6> >, // 24
836 randShuffle_<Vec<int,8> > // 32
839 Mat dst = _dst.getMat();
840 RNG& rng = _rng ? *_rng : theRNG();
841 CV_Assert( dst.elemSize() <= 32 );
842 RandShuffleFunc func = tab[dst.elemSize()];
843 CV_Assert( func != 0 );
844 func( dst, rng, iterFactor );
848 cvRandArr( CvRNG* _rng, CvArr* arr, int disttype, CvScalar param1, CvScalar param2 )
850 cv::Mat mat = cv::cvarrToMat(arr);
851 // !!! this will only work for current 64-bit MWC RNG !!!
852 cv::RNG& rng = _rng ? (cv::RNG&)*_rng : cv::theRNG();
853 rng.fill(mat, disttype == CV_RAND_NORMAL ?
854 cv::RNG::NORMAL : cv::RNG::UNIFORM, cv::Scalar(param1), cv::Scalar(param2) );
857 CV_IMPL void cvRandShuffle( CvArr* arr, CvRNG* _rng, double iter_factor )
859 cv::Mat dst = cv::cvarrToMat(arr);
860 cv::RNG& rng = _rng ? (cv::RNG&)*_rng : cv::theRNG();
861 cv::randShuffle( dst, iter_factor, &rng );
864 // Mersenne Twister random number generator.
865 // Inspired by http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MT2002/CODES/mt19937ar.c
868 A C-program for MT19937, with initialization improved 2002/1/26.
869 Coded by Takuji Nishimura and Makoto Matsumoto.
871 Before using, initialize the state by using init_genrand(seed)
872 or init_by_array(init_key, key_length).
874 Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura,
877 Redistribution and use in source and binary forms, with or without
878 modification, are permitted provided that the following conditions
881 1. Redistributions of source code must retain the above copyright
882 notice, this list of conditions and the following disclaimer.
884 2. Redistributions in binary form must reproduce the above copyright
885 notice, this list of conditions and the following disclaimer in the
886 documentation and/or other materials provided with the distribution.
888 3. The names of its contributors may not be used to endorse or promote
889 products derived from this software without specific prior written
892 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
893 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
894 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
895 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
896 CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
897 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
898 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
899 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
900 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
901 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
902 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
905 Any feedback is very welcome.
906 http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html
907 email: m-mat @ math.sci.hiroshima-u.ac.jp (remove space)
910 cv::RNG_MT19937::RNG_MT19937(unsigned s) { seed(s); }
912 cv::RNG_MT19937::RNG_MT19937() { seed(5489U); }
914 void cv::RNG_MT19937::seed(unsigned s)
917 for (mti = 1; mti < N; mti++)
919 /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */
920 state[mti] = (1812433253U * (state[mti - 1] ^ (state[mti - 1] >> 30)) + mti);
924 unsigned cv::RNG_MT19937::next()
926 /* mag01[x] = x * MATRIX_A for x=0,1 */
927 static unsigned mag01[2] = { 0x0U, /*MATRIX_A*/ 0x9908b0dfU};
929 const unsigned UPPER_MASK = 0x80000000U;
930 const unsigned LOWER_MASK = 0x7fffffffU;
932 /* generate N words at one time */
937 for (; kk < N - M; ++kk)
939 unsigned y = (state[kk] & UPPER_MASK) | (state[kk + 1] & LOWER_MASK);
940 state[kk] = state[kk + M] ^ (y >> 1) ^ mag01[y & 0x1U];
943 for (; kk < N - 1; ++kk)
945 unsigned y = (state[kk] & UPPER_MASK) | (state[kk + 1] & LOWER_MASK);
946 state[kk] = state[kk + (M - N)] ^ (y >> 1) ^ mag01[y & 0x1U];
949 unsigned y = (state[N - 1] & UPPER_MASK) | (state[0] & LOWER_MASK);
950 state[N - 1] = state[M - 1] ^ (y >> 1) ^ mag01[y & 0x1U];
955 unsigned y = state[mti++];
959 y ^= (y << 7) & 0x9d2c5680U;
960 y ^= (y << 15) & 0xefc60000U;
966 cv::RNG_MT19937::operator unsigned() { return next(); }
968 cv::RNG_MT19937::operator int() { return (int)next();}
970 cv::RNG_MT19937::operator float() { return next() * (1.f / 4294967296.f); }
972 cv::RNG_MT19937::operator double()
974 unsigned a = next() >> 5;
975 unsigned b = next() >> 6;
976 return (a * 67108864.0 + b) * (1.0 / 9007199254740992.0);
979 int cv::RNG_MT19937::uniform(int a, int b) { return (int)(next() % (b - a) + a); }
981 float cv::RNG_MT19937::uniform(float a, float b) { return ((float)*this)*(b - a) + a; }
983 double cv::RNG_MT19937::uniform(double a, double b) { return ((double)*this)*(b - a) + a; }
985 unsigned cv::RNG_MT19937::operator ()(unsigned b) { return next() % b; }
987 unsigned cv::RNG_MT19937::operator ()() { return next(); }