1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
3 * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
5 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved.
7 * This driver produces cryptographically secure pseudorandom data. It is divided
8 * into roughly six sections, each with a section header:
10 * - Initialization and readiness waiting.
11 * - Fast key erasure RNG, the "crng".
12 * - Entropy accumulation and extraction routines.
13 * - Entropy collection routines.
14 * - Userspace reader/writer interfaces.
17 * The high level overview is that there is one input pool, into which
18 * various pieces of data are hashed. Some of that data is then "credited" as
19 * having a certain number of bits of entropy. When enough bits of entropy are
20 * available, the hash is finalized and handed as a key to a stream cipher that
21 * expands it indefinitely for various consumers. This key is periodically
22 * refreshed as the various entropy collectors, described below, add data to the
23 * input pool and credit it. There is currently no Fortuna-like scheduler
24 * involved, which can lead to malicious entropy sources causing a premature
25 * reseed, and the entropy estimates are, at best, conservative guesses.
28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 #include <linux/utsname.h>
31 #include <linux/module.h>
32 #include <linux/kernel.h>
33 #include <linux/major.h>
34 #include <linux/string.h>
35 #include <linux/fcntl.h>
36 #include <linux/slab.h>
37 #include <linux/random.h>
38 #include <linux/poll.h>
39 #include <linux/init.h>
41 #include <linux/blkdev.h>
42 #include <linux/interrupt.h>
44 #include <linux/nodemask.h>
45 #include <linux/spinlock.h>
46 #include <linux/kthread.h>
47 #include <linux/percpu.h>
48 #include <linux/ptrace.h>
49 #include <linux/workqueue.h>
50 #include <linux/irq.h>
51 #include <linux/ratelimit.h>
52 #include <linux/syscalls.h>
53 #include <linux/completion.h>
54 #include <linux/uuid.h>
55 #include <linux/uaccess.h>
56 #include <crypto/chacha.h>
57 #include <crypto/blake2s.h>
58 #include <asm/processor.h>
60 #include <asm/irq_regs.h>
63 /*********************************************************************
65 * Initialization and readiness waiting.
67 * Much of the RNG infrastructure is devoted to various dependencies
68 * being able to wait until the RNG has collected enough entropy and
69 * is ready for safe consumption.
71 *********************************************************************/
74 * crng_init = 0 --> Uninitialized
76 * 2 --> Initialized from input_pool
78 * crng_init is protected by base_crng->lock, and only increases
79 * its value (from 0->1->2).
81 static int crng_init = 0;
82 #define crng_ready() (likely(crng_init > 1))
83 /* Various types of waiters for crng_init->2 transition. */
84 static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
85 static struct fasync_struct *fasync;
86 static DEFINE_SPINLOCK(random_ready_chain_lock);
87 static RAW_NOTIFIER_HEAD(random_ready_chain);
89 /* Control how we warn userspace. */
90 static struct ratelimit_state unseeded_warning =
91 RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
92 static struct ratelimit_state urandom_warning =
93 RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
94 static int ratelimit_disable __read_mostly;
95 module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
96 MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
99 * Returns whether or not the input pool has been seeded and thus guaranteed
100 * to supply cryptographically secure random numbers. This applies to: the
101 * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
102 * ,u64,int,long} family of functions.
104 * Returns: true if the input pool has been seeded.
105 * false if the input pool has not been seeded.
107 bool rng_is_initialized(void)
111 EXPORT_SYMBOL(rng_is_initialized);
113 /* Used by wait_for_random_bytes(), and considered an entropy collector, below. */
114 static void try_to_generate_entropy(void);
117 * Wait for the input pool to be seeded and thus guaranteed to supply
118 * cryptographically secure random numbers. This applies to: the /dev/urandom
119 * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
120 * family of functions. Using any of these functions without first calling
121 * this function forfeits the guarantee of security.
123 * Returns: 0 if the input pool has been seeded.
124 * -ERESTARTSYS if the function was interrupted by a signal.
126 int wait_for_random_bytes(void)
128 while (!crng_ready()) {
131 try_to_generate_entropy();
132 ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
134 return ret > 0 ? 0 : ret;
138 EXPORT_SYMBOL(wait_for_random_bytes);
141 * Add a callback function that will be invoked when the input
142 * pool is initialised.
144 * returns: 0 if callback is successfully added
145 * -EALREADY if pool is already initialised (callback not called)
147 int register_random_ready_notifier(struct notifier_block *nb)
155 spin_lock_irqsave(&random_ready_chain_lock, flags);
157 ret = raw_notifier_chain_register(&random_ready_chain, nb);
158 spin_unlock_irqrestore(&random_ready_chain_lock, flags);
163 * Delete a previously registered readiness callback function.
165 int unregister_random_ready_notifier(struct notifier_block *nb)
170 spin_lock_irqsave(&random_ready_chain_lock, flags);
171 ret = raw_notifier_chain_unregister(&random_ready_chain, nb);
172 spin_unlock_irqrestore(&random_ready_chain_lock, flags);
176 static void process_random_ready_list(void)
180 spin_lock_irqsave(&random_ready_chain_lock, flags);
181 raw_notifier_call_chain(&random_ready_chain, 0, NULL);
182 spin_unlock_irqrestore(&random_ready_chain_lock, flags);
185 #define warn_unseeded_randomness(previous) \
186 _warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous))
188 static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous)
190 #ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
191 const bool print_once = false;
193 static bool print_once __read_mostly;
196 if (print_once || crng_ready() ||
197 (previous && (caller == READ_ONCE(*previous))))
199 WRITE_ONCE(*previous, caller);
200 #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
203 if (__ratelimit(&unseeded_warning))
204 printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n",
205 func_name, caller, crng_init);
209 /*********************************************************************
211 * Fast key erasure RNG, the "crng".
213 * These functions expand entropy from the entropy extractor into
214 * long streams for external consumption using the "fast key erasure"
215 * RNG described at <https://blog.cr.yp.to/20170723-random.html>.
217 * There are a few exported interfaces for use by other drivers:
219 * void get_random_bytes(void *buf, size_t nbytes)
220 * u32 get_random_u32()
221 * u64 get_random_u64()
222 * unsigned int get_random_int()
223 * unsigned long get_random_long()
225 * These interfaces will return the requested number of random bytes
226 * into the given buffer or as a return value. This is equivalent to
227 * a read from /dev/urandom. The integer family of functions may be
228 * higher performance for one-off random integers, because they do a
231 *********************************************************************/
234 CRNG_RESEED_INTERVAL = 300 * HZ,
235 CRNG_INIT_CNT_THRESH = 2 * CHACHA_KEY_SIZE
239 u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
241 unsigned long generation;
244 .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
248 u8 key[CHACHA_KEY_SIZE];
249 unsigned long generation;
253 static DEFINE_PER_CPU(struct crng, crngs) = {
254 .generation = ULONG_MAX,
255 .lock = INIT_LOCAL_LOCK(crngs.lock),
258 /* Used by crng_reseed() to extract a new seed from the input pool. */
259 static bool drain_entropy(void *buf, size_t nbytes, bool force);
262 * This extracts a new crng key from the input pool, but only if there is a
263 * sufficient amount of entropy available or force is true, in order to
264 * mitigate bruteforcing of newly added bits.
266 static void crng_reseed(bool force)
269 unsigned long next_gen;
270 u8 key[CHACHA_KEY_SIZE];
271 bool finalize_init = false;
273 /* Only reseed if we can, to prevent brute forcing a small amount of new bits. */
274 if (!drain_entropy(key, sizeof(key), force))
278 * We copy the new key into the base_crng, overwriting the old one,
279 * and update the generation counter. We avoid hitting ULONG_MAX,
280 * because the per-cpu crngs are initialized to ULONG_MAX, so this
281 * forces new CPUs that come online to always initialize.
283 spin_lock_irqsave(&base_crng.lock, flags);
284 memcpy(base_crng.key, key, sizeof(base_crng.key));
285 next_gen = base_crng.generation + 1;
286 if (next_gen == ULONG_MAX)
288 WRITE_ONCE(base_crng.generation, next_gen);
289 WRITE_ONCE(base_crng.birth, jiffies);
292 finalize_init = true;
294 spin_unlock_irqrestore(&base_crng.lock, flags);
295 memzero_explicit(key, sizeof(key));
297 process_random_ready_list();
298 wake_up_interruptible(&crng_init_wait);
299 kill_fasync(&fasync, SIGIO, POLL_IN);
300 pr_notice("crng init done\n");
301 if (unseeded_warning.missed) {
302 pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
303 unseeded_warning.missed);
304 unseeded_warning.missed = 0;
306 if (urandom_warning.missed) {
307 pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
308 urandom_warning.missed);
309 urandom_warning.missed = 0;
315 * This generates a ChaCha block using the provided key, and then
316 * immediately overwites that key with half the block. It returns
317 * the resultant ChaCha state to the user, along with the second
318 * half of the block containing 32 bytes of random data that may
319 * be used; random_data_len may not be greater than 32.
321 static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
322 u32 chacha_state[CHACHA_STATE_WORDS],
323 u8 *random_data, size_t random_data_len)
325 u8 first_block[CHACHA_BLOCK_SIZE];
327 BUG_ON(random_data_len > 32);
329 chacha_init_consts(chacha_state);
330 memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
331 memset(&chacha_state[12], 0, sizeof(u32) * 4);
332 chacha20_block(chacha_state, first_block);
334 memcpy(key, first_block, CHACHA_KEY_SIZE);
335 memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
336 memzero_explicit(first_block, sizeof(first_block));
340 * Return whether the crng seed is considered to be sufficiently
341 * old that a reseeding might be attempted. This happens if the last
342 * reseeding was CRNG_RESEED_INTERVAL ago, or during early boot, at
343 * an interval proportional to the uptime.
345 static bool crng_has_old_seed(void)
347 static bool early_boot = true;
348 unsigned long interval = CRNG_RESEED_INTERVAL;
350 if (unlikely(READ_ONCE(early_boot))) {
351 time64_t uptime = ktime_get_seconds();
352 if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
353 WRITE_ONCE(early_boot, false);
355 interval = max_t(unsigned int, 5 * HZ,
356 (unsigned int)uptime / 2 * HZ);
358 return time_after(jiffies, READ_ONCE(base_crng.birth) + interval);
362 * This function returns a ChaCha state that you may use for generating
363 * random data. It also returns up to 32 bytes on its own of random data
364 * that may be used; random_data_len may not be greater than 32.
366 static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
367 u8 *random_data, size_t random_data_len)
372 BUG_ON(random_data_len > 32);
375 * For the fast path, we check whether we're ready, unlocked first, and
376 * then re-check once locked later. In the case where we're really not
377 * ready, we do fast key erasure with the base_crng directly, because
378 * this is what crng_pre_init_inject() mutates during early init.
383 spin_lock_irqsave(&base_crng.lock, flags);
384 ready = crng_ready();
386 crng_fast_key_erasure(base_crng.key, chacha_state,
387 random_data, random_data_len);
388 spin_unlock_irqrestore(&base_crng.lock, flags);
394 * If the base_crng is old enough, we try to reseed, which in turn
395 * bumps the generation counter that we check below.
397 if (unlikely(crng_has_old_seed()))
400 local_lock_irqsave(&crngs.lock, flags);
401 crng = raw_cpu_ptr(&crngs);
404 * If our per-cpu crng is older than the base_crng, then it means
405 * somebody reseeded the base_crng. In that case, we do fast key
406 * erasure on the base_crng, and use its output as the new key
407 * for our per-cpu crng. This brings us up to date with base_crng.
409 if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
410 spin_lock(&base_crng.lock);
411 crng_fast_key_erasure(base_crng.key, chacha_state,
412 crng->key, sizeof(crng->key));
413 crng->generation = base_crng.generation;
414 spin_unlock(&base_crng.lock);
418 * Finally, when we've made it this far, our per-cpu crng has an up
419 * to date key, and we can do fast key erasure with it to produce
420 * some random data and a ChaCha state for the caller. All other
421 * branches of this function are "unlikely", so most of the time we
422 * should wind up here immediately.
424 crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
425 local_unlock_irqrestore(&crngs.lock, flags);
429 * This function is for crng_init == 0 only. It loads entropy directly
430 * into the crng's key, without going through the input pool. It is,
431 * generally speaking, not very safe, but we use this only at early
432 * boot time when it's better to have something there rather than
435 * If account is set, then the crng_init_cnt counter is incremented.
436 * This shouldn't be set by functions like add_device_randomness(),
437 * where we can't trust the buffer passed to it is guaranteed to be
438 * unpredictable (so it might not have any entropy at all).
440 * Returns the number of bytes processed from input, which is bounded
441 * by CRNG_INIT_CNT_THRESH if account is true.
443 static size_t crng_pre_init_inject(const void *input, size_t len, bool account)
445 static int crng_init_cnt = 0;
446 struct blake2s_state hash;
449 blake2s_init(&hash, sizeof(base_crng.key));
451 spin_lock_irqsave(&base_crng.lock, flags);
452 if (crng_init != 0) {
453 spin_unlock_irqrestore(&base_crng.lock, flags);
458 len = min_t(size_t, len, CRNG_INIT_CNT_THRESH - crng_init_cnt);
460 blake2s_update(&hash, base_crng.key, sizeof(base_crng.key));
461 blake2s_update(&hash, input, len);
462 blake2s_final(&hash, base_crng.key);
465 crng_init_cnt += len;
466 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
467 ++base_crng.generation;
472 spin_unlock_irqrestore(&base_crng.lock, flags);
475 pr_notice("fast init done\n");
480 static void _get_random_bytes(void *buf, size_t nbytes)
482 u32 chacha_state[CHACHA_STATE_WORDS];
483 u8 tmp[CHACHA_BLOCK_SIZE];
489 len = min_t(size_t, 32, nbytes);
490 crng_make_state(chacha_state, buf, len);
495 if (nbytes < CHACHA_BLOCK_SIZE) {
496 chacha20_block(chacha_state, tmp);
497 memcpy(buf, tmp, nbytes);
498 memzero_explicit(tmp, sizeof(tmp));
502 chacha20_block(chacha_state, buf);
503 if (unlikely(chacha_state[12] == 0))
505 nbytes -= CHACHA_BLOCK_SIZE;
506 buf += CHACHA_BLOCK_SIZE;
509 memzero_explicit(chacha_state, sizeof(chacha_state));
513 * This function is the exported kernel interface. It returns some
514 * number of good random numbers, suitable for key generation, seeding
515 * TCP sequence numbers, etc. It does not rely on the hardware random
516 * number generator. For random bytes direct from the hardware RNG
517 * (when available), use get_random_bytes_arch(). In order to ensure
518 * that the randomness provided by this function is okay, the function
519 * wait_for_random_bytes() should be called and return 0 at least once
520 * at any point prior.
522 void get_random_bytes(void *buf, size_t nbytes)
524 static void *previous;
526 warn_unseeded_randomness(&previous);
527 _get_random_bytes(buf, nbytes);
529 EXPORT_SYMBOL(get_random_bytes);
531 static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
533 bool large_request = nbytes > 256;
536 u32 chacha_state[CHACHA_STATE_WORDS];
537 u8 output[CHACHA_BLOCK_SIZE];
542 len = min_t(size_t, 32, nbytes);
543 crng_make_state(chacha_state, output, len);
545 if (copy_to_user(buf, output, len))
552 if (large_request && need_resched()) {
553 if (signal_pending(current))
558 chacha20_block(chacha_state, output);
559 if (unlikely(chacha_state[12] == 0))
562 len = min_t(size_t, nbytes, CHACHA_BLOCK_SIZE);
563 if (copy_to_user(buf, output, len)) {
573 memzero_explicit(chacha_state, sizeof(chacha_state));
574 memzero_explicit(output, sizeof(output));
579 * Batched entropy returns random integers. The quality of the random
580 * number is good as /dev/urandom. In order to ensure that the randomness
581 * provided by this function is okay, the function wait_for_random_bytes()
582 * should be called and return 0 at least once at any point prior.
584 struct batched_entropy {
587 * We make this 1.5x a ChaCha block, so that we get the
588 * remaining 32 bytes from fast key erasure, plus one full
589 * block from the detached ChaCha state. We can increase
590 * the size of this later if needed so long as we keep the
591 * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE.
593 u64 entropy_u64[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u64))];
594 u32 entropy_u32[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u32))];
597 unsigned long generation;
598 unsigned int position;
602 static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
603 .lock = INIT_LOCAL_LOCK(batched_entropy_u64.lock),
607 u64 get_random_u64(void)
611 struct batched_entropy *batch;
612 static void *previous;
613 unsigned long next_gen;
615 warn_unseeded_randomness(&previous);
617 local_lock_irqsave(&batched_entropy_u64.lock, flags);
618 batch = raw_cpu_ptr(&batched_entropy_u64);
620 next_gen = READ_ONCE(base_crng.generation);
621 if (batch->position >= ARRAY_SIZE(batch->entropy_u64) ||
622 next_gen != batch->generation) {
623 _get_random_bytes(batch->entropy_u64, sizeof(batch->entropy_u64));
625 batch->generation = next_gen;
628 ret = batch->entropy_u64[batch->position];
629 batch->entropy_u64[batch->position] = 0;
631 local_unlock_irqrestore(&batched_entropy_u64.lock, flags);
634 EXPORT_SYMBOL(get_random_u64);
636 static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
637 .lock = INIT_LOCAL_LOCK(batched_entropy_u32.lock),
641 u32 get_random_u32(void)
645 struct batched_entropy *batch;
646 static void *previous;
647 unsigned long next_gen;
649 warn_unseeded_randomness(&previous);
651 local_lock_irqsave(&batched_entropy_u32.lock, flags);
652 batch = raw_cpu_ptr(&batched_entropy_u32);
654 next_gen = READ_ONCE(base_crng.generation);
655 if (batch->position >= ARRAY_SIZE(batch->entropy_u32) ||
656 next_gen != batch->generation) {
657 _get_random_bytes(batch->entropy_u32, sizeof(batch->entropy_u32));
659 batch->generation = next_gen;
662 ret = batch->entropy_u32[batch->position];
663 batch->entropy_u32[batch->position] = 0;
665 local_unlock_irqrestore(&batched_entropy_u32.lock, flags);
668 EXPORT_SYMBOL(get_random_u32);
672 * This function is called when the CPU is coming up, with entry
673 * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
675 int random_prepare_cpu(unsigned int cpu)
678 * When the cpu comes back online, immediately invalidate both
679 * the per-cpu crng and all batches, so that we serve fresh
682 per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
683 per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
684 per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
690 * randomize_page - Generate a random, page aligned address
691 * @start: The smallest acceptable address the caller will take.
692 * @range: The size of the area, starting at @start, within which the
693 * random address must fall.
695 * If @start + @range would overflow, @range is capped.
697 * NOTE: Historical use of randomize_range, which this replaces, presumed that
698 * @start was already page aligned. We now align it regardless.
700 * Return: A page aligned address within [start, start + range). On error,
701 * @start is returned.
703 unsigned long randomize_page(unsigned long start, unsigned long range)
705 if (!PAGE_ALIGNED(start)) {
706 range -= PAGE_ALIGN(start) - start;
707 start = PAGE_ALIGN(start);
710 if (start > ULONG_MAX - range)
711 range = ULONG_MAX - start;
713 range >>= PAGE_SHIFT;
718 return start + (get_random_long() % range << PAGE_SHIFT);
722 * This function will use the architecture-specific hardware random
723 * number generator if it is available. It is not recommended for
724 * use. Use get_random_bytes() instead. It returns the number of
727 size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes)
729 size_t left = nbytes;
734 size_t chunk = min_t(size_t, left, sizeof(unsigned long));
736 if (!arch_get_random_long(&v))
739 memcpy(p, &v, chunk);
744 return nbytes - left;
746 EXPORT_SYMBOL(get_random_bytes_arch);
749 /**********************************************************************
751 * Entropy accumulation and extraction routines.
753 * Callers may add entropy via:
755 * static void mix_pool_bytes(const void *in, size_t nbytes)
757 * After which, if added entropy should be credited:
759 * static void credit_entropy_bits(size_t nbits)
761 * Finally, extract entropy via these two, with the latter one
762 * setting the entropy count to zero and extracting only if there
763 * is POOL_MIN_BITS entropy credited prior or force is true:
765 * static void extract_entropy(void *buf, size_t nbytes)
766 * static bool drain_entropy(void *buf, size_t nbytes, bool force)
768 **********************************************************************/
771 POOL_BITS = BLAKE2S_HASH_SIZE * 8,
772 POOL_MIN_BITS = POOL_BITS /* No point in settling for less. */
775 /* For notifying userspace should write into /dev/random. */
776 static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
779 struct blake2s_state hash;
781 unsigned int entropy_count;
783 .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
784 BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
785 BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
786 .hash.outlen = BLAKE2S_HASH_SIZE,
787 .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
790 static void _mix_pool_bytes(const void *in, size_t nbytes)
792 blake2s_update(&input_pool.hash, in, nbytes);
796 * This function adds bytes into the entropy "pool". It does not
797 * update the entropy estimate. The caller should call
798 * credit_entropy_bits if this is appropriate.
800 static void mix_pool_bytes(const void *in, size_t nbytes)
804 spin_lock_irqsave(&input_pool.lock, flags);
805 _mix_pool_bytes(in, nbytes);
806 spin_unlock_irqrestore(&input_pool.lock, flags);
809 static void credit_entropy_bits(size_t nbits)
811 unsigned int entropy_count, orig, add;
816 add = min_t(size_t, nbits, POOL_BITS);
819 orig = READ_ONCE(input_pool.entropy_count);
820 entropy_count = min_t(unsigned int, POOL_BITS, orig + add);
821 } while (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig);
823 if (!crng_ready() && entropy_count >= POOL_MIN_BITS)
828 * This is an HKDF-like construction for using the hashed collected entropy
829 * as a PRF key, that's then expanded block-by-block.
831 static void extract_entropy(void *buf, size_t nbytes)
834 u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
836 unsigned long rdseed[32 / sizeof(long)];
841 for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) {
842 if (!arch_get_random_seed_long(&block.rdseed[i]) &&
843 !arch_get_random_long(&block.rdseed[i]))
844 block.rdseed[i] = random_get_entropy();
847 spin_lock_irqsave(&input_pool.lock, flags);
849 /* seed = HASHPRF(last_key, entropy_input) */
850 blake2s_final(&input_pool.hash, seed);
852 /* next_key = HASHPRF(seed, RDSEED || 0) */
854 blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
855 blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
857 spin_unlock_irqrestore(&input_pool.lock, flags);
858 memzero_explicit(next_key, sizeof(next_key));
861 i = min_t(size_t, nbytes, BLAKE2S_HASH_SIZE);
862 /* output = HASHPRF(seed, RDSEED || ++counter) */
864 blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
869 memzero_explicit(seed, sizeof(seed));
870 memzero_explicit(&block, sizeof(block));
874 * First we make sure we have POOL_MIN_BITS of entropy in the pool unless force
875 * is true, and then we set the entropy count to zero (but don't actually touch
876 * any data). Only then can we extract a new key with extract_entropy().
878 static bool drain_entropy(void *buf, size_t nbytes, bool force)
880 unsigned int entropy_count;
882 entropy_count = READ_ONCE(input_pool.entropy_count);
883 if (!force && entropy_count < POOL_MIN_BITS)
885 } while (cmpxchg(&input_pool.entropy_count, entropy_count, 0) != entropy_count);
886 extract_entropy(buf, nbytes);
887 wake_up_interruptible(&random_write_wait);
888 kill_fasync(&fasync, SIGIO, POLL_OUT);
893 /**********************************************************************
895 * Entropy collection routines.
897 * The following exported functions are used for pushing entropy into
898 * the above entropy accumulation routines:
900 * void add_device_randomness(const void *buf, size_t size);
901 * void add_input_randomness(unsigned int type, unsigned int code,
902 * unsigned int value);
903 * void add_disk_randomness(struct gendisk *disk);
904 * void add_hwgenerator_randomness(const void *buffer, size_t count,
906 * void add_bootloader_randomness(const void *buf, size_t size);
907 * void add_vmfork_randomness(const void *unique_vm_id, size_t size);
908 * void add_interrupt_randomness(int irq);
910 * add_device_randomness() adds data to the input pool that
911 * is likely to differ between two devices (or possibly even per boot).
912 * This would be things like MAC addresses or serial numbers, or the
913 * read-out of the RTC. This does *not* credit any actual entropy to
914 * the pool, but it initializes the pool to different values for devices
915 * that might otherwise be identical and have very little entropy
916 * available to them (particularly common in the embedded world).
918 * add_input_randomness() uses the input layer interrupt timing, as well
919 * as the event type information from the hardware.
921 * add_disk_randomness() uses what amounts to the seek time of block
922 * layer request events, on a per-disk_devt basis, as input to the
923 * entropy pool. Note that high-speed solid state drives with very low
924 * seek times do not make for good sources of entropy, as their seek
925 * times are usually fairly consistent.
927 * The above two routines try to estimate how many bits of entropy
928 * to credit. They do this by keeping track of the first and second
929 * order deltas of the event timings.
931 * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
932 * entropy as specified by the caller. If the entropy pool is full it will
933 * block until more entropy is needed.
935 * add_bootloader_randomness() is the same as add_hwgenerator_randomness() or
936 * add_device_randomness(), depending on whether or not the configuration
937 * option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
939 * add_vmfork_randomness() adds a unique (but not necessarily secret) ID
940 * representing the current instance of a VM to the pool, without crediting,
941 * and then force-reseeds the crng so that it takes effect immediately.
943 * add_interrupt_randomness() uses the interrupt timing as random
944 * inputs to the entropy pool. Using the cycle counters and the irq source
945 * as inputs, it feeds the input pool roughly once a second or after 64
946 * interrupts, crediting 1 bit of entropy for whichever comes first.
948 **********************************************************************/
950 static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
951 static int __init parse_trust_cpu(char *arg)
953 return kstrtobool(arg, &trust_cpu);
955 early_param("random.trust_cpu", parse_trust_cpu);
958 * The first collection of entropy occurs at system boot while interrupts
959 * are still turned off. Here we push in RDSEED, a timestamp, and utsname().
960 * Depending on the above configuration knob, RDSEED may be considered
961 * sufficient for initialization. Note that much earlier setup may already
962 * have pushed entropy into the input pool by the time we get here.
964 int __init rand_initialize(void)
967 ktime_t now = ktime_get_real();
968 bool arch_init = true;
971 for (i = 0; i < BLAKE2S_BLOCK_SIZE; i += sizeof(rv)) {
972 if (!arch_get_random_seed_long_early(&rv) &&
973 !arch_get_random_long_early(&rv)) {
974 rv = random_get_entropy();
977 _mix_pool_bytes(&rv, sizeof(rv));
979 _mix_pool_bytes(&now, sizeof(now));
980 _mix_pool_bytes(utsname(), sizeof(*(utsname())));
982 extract_entropy(base_crng.key, sizeof(base_crng.key));
983 ++base_crng.generation;
985 if (arch_init && trust_cpu && !crng_ready()) {
987 pr_notice("crng init done (trusting CPU's manufacturer)\n");
990 if (ratelimit_disable) {
991 urandom_warning.interval = 0;
992 unseeded_warning.interval = 0;
998 * Add device- or boot-specific data to the input pool to help
1001 * None of this adds any entropy; it is meant to avoid the problem of
1002 * the entropy pool having similar initial state across largely
1003 * identical devices.
1005 void add_device_randomness(const void *buf, size_t size)
1007 cycles_t cycles = random_get_entropy();
1008 unsigned long flags, now = jiffies;
1010 if (crng_init == 0 && size)
1011 crng_pre_init_inject(buf, size, false);
1013 spin_lock_irqsave(&input_pool.lock, flags);
1014 _mix_pool_bytes(&cycles, sizeof(cycles));
1015 _mix_pool_bytes(&now, sizeof(now));
1016 _mix_pool_bytes(buf, size);
1017 spin_unlock_irqrestore(&input_pool.lock, flags);
1019 EXPORT_SYMBOL(add_device_randomness);
1021 /* There is one of these per entropy source */
1022 struct timer_rand_state {
1023 unsigned long last_time;
1024 long last_delta, last_delta2;
1028 * This function adds entropy to the entropy "pool" by using timing
1029 * delays. It uses the timer_rand_state structure to make an estimate
1030 * of how many bits of entropy this call has added to the pool.
1032 * The number "num" is also added to the pool - it should somehow describe
1033 * the type of event which just happened. This is currently 0-255 for
1034 * keyboard scan codes, and 256 upwards for interrupts.
1036 static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
1038 cycles_t cycles = random_get_entropy();
1039 unsigned long flags, now = jiffies;
1040 long delta, delta2, delta3;
1042 spin_lock_irqsave(&input_pool.lock, flags);
1043 _mix_pool_bytes(&cycles, sizeof(cycles));
1044 _mix_pool_bytes(&now, sizeof(now));
1045 _mix_pool_bytes(&num, sizeof(num));
1046 spin_unlock_irqrestore(&input_pool.lock, flags);
1049 * Calculate number of bits of randomness we probably added.
1050 * We take into account the first, second and third-order deltas
1051 * in order to make our estimate.
1053 delta = now - READ_ONCE(state->last_time);
1054 WRITE_ONCE(state->last_time, now);
1056 delta2 = delta - READ_ONCE(state->last_delta);
1057 WRITE_ONCE(state->last_delta, delta);
1059 delta3 = delta2 - READ_ONCE(state->last_delta2);
1060 WRITE_ONCE(state->last_delta2, delta2);
1074 * delta is now minimum absolute delta.
1075 * Round down by 1 bit on general principles,
1076 * and limit entropy estimate to 12 bits.
1078 credit_entropy_bits(min_t(unsigned int, fls(delta >> 1), 11));
1081 void add_input_randomness(unsigned int type, unsigned int code,
1084 static unsigned char last_value;
1085 static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES };
1087 /* Ignore autorepeat and the like. */
1088 if (value == last_value)
1092 add_timer_randomness(&input_timer_state,
1093 (type << 4) ^ code ^ (code >> 4) ^ value);
1095 EXPORT_SYMBOL_GPL(add_input_randomness);
1098 void add_disk_randomness(struct gendisk *disk)
1100 if (!disk || !disk->random)
1102 /* First major is 1, so we get >= 0x200 here. */
1103 add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
1105 EXPORT_SYMBOL_GPL(add_disk_randomness);
1107 void rand_initialize_disk(struct gendisk *disk)
1109 struct timer_rand_state *state;
1112 * If kzalloc returns null, we just won't use that entropy
1115 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1117 state->last_time = INITIAL_JIFFIES;
1118 disk->random = state;
1124 * Interface for in-kernel drivers of true hardware RNGs.
1125 * Those devices may produce endless random bits and will be throttled
1126 * when our pool is full.
1128 void add_hwgenerator_randomness(const void *buffer, size_t count,
1131 if (unlikely(crng_init == 0)) {
1132 size_t ret = crng_pre_init_inject(buffer, count, true);
1133 mix_pool_bytes(buffer, ret);
1136 if (!count || crng_init == 0)
1141 * Throttle writing if we're above the trickle threshold.
1142 * We'll be woken up again once below POOL_MIN_BITS, when
1143 * the calling thread is about to terminate, or once
1144 * CRNG_RESEED_INTERVAL has elapsed.
1146 wait_event_interruptible_timeout(random_write_wait,
1147 !system_wq || kthread_should_stop() ||
1148 input_pool.entropy_count < POOL_MIN_BITS,
1149 CRNG_RESEED_INTERVAL);
1150 mix_pool_bytes(buffer, count);
1151 credit_entropy_bits(entropy);
1153 EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
1156 * Handle random seed passed by bootloader.
1157 * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise
1158 * it would be regarded as device data.
1159 * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER.
1161 void add_bootloader_randomness(const void *buf, size_t size)
1163 if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
1164 add_hwgenerator_randomness(buf, size, size * 8);
1166 add_device_randomness(buf, size);
1168 EXPORT_SYMBOL_GPL(add_bootloader_randomness);
1170 #if IS_ENABLED(CONFIG_VMGENID)
1171 static BLOCKING_NOTIFIER_HEAD(vmfork_chain);
1174 * Handle a new unique VM ID, which is unique, not secret, so we
1175 * don't credit it, but we do immediately force a reseed after so
1176 * that it's used by the crng posthaste.
1178 void add_vmfork_randomness(const void *unique_vm_id, size_t size)
1180 add_device_randomness(unique_vm_id, size);
1183 pr_notice("crng reseeded due to virtual machine fork\n");
1185 blocking_notifier_call_chain(&vmfork_chain, 0, NULL);
1187 #if IS_MODULE(CONFIG_VMGENID)
1188 EXPORT_SYMBOL_GPL(add_vmfork_randomness);
1191 int register_random_vmfork_notifier(struct notifier_block *nb)
1193 return blocking_notifier_chain_register(&vmfork_chain, nb);
1195 EXPORT_SYMBOL_GPL(register_random_vmfork_notifier);
1197 int unregister_random_vmfork_notifier(struct notifier_block *nb)
1199 return blocking_notifier_chain_unregister(&vmfork_chain, nb);
1201 EXPORT_SYMBOL_GPL(unregister_random_vmfork_notifier);
1205 struct work_struct mix;
1206 unsigned long pool[4];
1212 static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
1214 /* SipHash constants */
1215 .pool = { 0x736f6d6570736575UL, 0x646f72616e646f6dUL,
1216 0x6c7967656e657261UL, 0x7465646279746573UL }
1218 /* HalfSipHash constants */
1219 .pool = { 0, 0, 0x6c796765U, 0x74656462U }
1224 * This is [Half]SipHash-1-x, starting from an empty key. Because
1225 * the key is fixed, it assumes that its inputs are non-malicious,
1226 * and therefore this has no security on its own. s represents the
1227 * 128 or 256-bit SipHash state, while v represents a 128-bit input.
1229 static void fast_mix(unsigned long s[4], const unsigned long *v)
1233 for (i = 0; i < 16 / sizeof(long); ++i) {
1236 s[0] += s[1]; s[1] = rol64(s[1], 13); s[1] ^= s[0]; s[0] = rol64(s[0], 32);
1237 s[2] += s[3]; s[3] = rol64(s[3], 16); s[3] ^= s[2];
1238 s[0] += s[3]; s[3] = rol64(s[3], 21); s[3] ^= s[0];
1239 s[2] += s[1]; s[1] = rol64(s[1], 17); s[1] ^= s[2]; s[2] = rol64(s[2], 32);
1241 s[0] += s[1]; s[1] = rol32(s[1], 5); s[1] ^= s[0]; s[0] = rol32(s[0], 16);
1242 s[2] += s[3]; s[3] = rol32(s[3], 8); s[3] ^= s[2];
1243 s[0] += s[3]; s[3] = rol32(s[3], 7); s[3] ^= s[0];
1244 s[2] += s[1]; s[1] = rol32(s[1], 13); s[1] ^= s[2]; s[2] = rol32(s[2], 16);
1252 * This function is called when the CPU has just come online, with
1253 * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
1255 int random_online_cpu(unsigned int cpu)
1258 * During CPU shutdown and before CPU onlining, add_interrupt_
1259 * randomness() may schedule mix_interrupt_randomness(), and
1260 * set the MIX_INFLIGHT flag. However, because the worker can
1261 * be scheduled on a different CPU during this period, that
1262 * flag will never be cleared. For that reason, we zero out
1263 * the flag here, which runs just after workqueues are onlined
1264 * for the CPU again. This also has the effect of setting the
1265 * irq randomness count to zero so that new accumulated irqs
1268 per_cpu_ptr(&irq_randomness, cpu)->count = 0;
1273 static unsigned long get_reg(struct fast_pool *f, struct pt_regs *regs)
1275 unsigned long *ptr = (unsigned long *)regs;
1280 idx = READ_ONCE(f->reg_idx);
1281 if (idx >= sizeof(struct pt_regs) / sizeof(unsigned long))
1284 WRITE_ONCE(f->reg_idx, idx);
1288 static void mix_interrupt_randomness(struct work_struct *work)
1290 struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
1292 * The size of the copied stack pool is explicitly 16 bytes so that we
1293 * tax mix_pool_byte()'s compression function the same amount on all
1294 * platforms. This means on 64-bit we copy half the pool into this,
1295 * while on 32-bit we copy all of it. The entropy is supposed to be
1296 * sufficiently dispersed between bits that in the sponge-like
1297 * half case, on average we don't wind up "losing" some.
1301 /* Check to see if we're running on the wrong CPU due to hotplug. */
1302 local_irq_disable();
1303 if (fast_pool != this_cpu_ptr(&irq_randomness)) {
1309 * Copy the pool to the stack so that the mixer always has a
1310 * consistent view, before we reenable irqs again.
1312 memcpy(pool, fast_pool->pool, sizeof(pool));
1313 fast_pool->count = 0;
1314 fast_pool->last = jiffies;
1317 if (unlikely(crng_init == 0)) {
1318 crng_pre_init_inject(pool, sizeof(pool), true);
1319 mix_pool_bytes(pool, sizeof(pool));
1321 mix_pool_bytes(pool, sizeof(pool));
1322 credit_entropy_bits(1);
1325 memzero_explicit(pool, sizeof(pool));
1328 void add_interrupt_randomness(int irq)
1330 enum { MIX_INFLIGHT = 1U << 31 };
1331 cycles_t cycles = random_get_entropy();
1332 unsigned long now = jiffies;
1333 struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
1334 struct pt_regs *regs = get_irq_regs();
1335 unsigned int new_count;
1339 unsigned long longs[16 / sizeof(long)];
1343 cycles = get_reg(fast_pool, regs);
1345 if (sizeof(cycles) == 8)
1346 irq_data.u64[0] = cycles ^ rol64(now, 32) ^ irq;
1348 irq_data.u32[0] = cycles ^ irq;
1349 irq_data.u32[1] = now;
1352 if (sizeof(unsigned long) == 8)
1353 irq_data.u64[1] = regs ? instruction_pointer(regs) : _RET_IP_;
1355 irq_data.u32[2] = regs ? instruction_pointer(regs) : _RET_IP_;
1356 irq_data.u32[3] = get_reg(fast_pool, regs);
1359 fast_mix(fast_pool->pool, irq_data.longs);
1360 new_count = ++fast_pool->count;
1362 if (new_count & MIX_INFLIGHT)
1365 if (new_count < 64 && (!time_after(now, fast_pool->last + HZ) ||
1366 unlikely(crng_init == 0)))
1369 if (unlikely(!fast_pool->mix.func))
1370 INIT_WORK(&fast_pool->mix, mix_interrupt_randomness);
1371 fast_pool->count |= MIX_INFLIGHT;
1372 queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix);
1374 EXPORT_SYMBOL_GPL(add_interrupt_randomness);
1377 * Each time the timer fires, we expect that we got an unpredictable
1378 * jump in the cycle counter. Even if the timer is running on another
1379 * CPU, the timer activity will be touching the stack of the CPU that is
1380 * generating entropy..
1382 * Note that we don't re-arm the timer in the timer itself - we are
1383 * happy to be scheduled away, since that just makes the load more
1384 * complex, but we do not want the timer to keep ticking unless the
1385 * entropy loop is running.
1387 * So the re-arming always happens in the entropy loop itself.
1389 static void entropy_timer(struct timer_list *t)
1391 credit_entropy_bits(1);
1395 * If we have an actual cycle counter, see if we can
1396 * generate enough entropy with timing noise
1398 static void try_to_generate_entropy(void)
1402 struct timer_list timer;
1405 stack.cycles = random_get_entropy();
1407 /* Slow counter - or none. Don't even bother */
1408 if (stack.cycles == random_get_entropy())
1411 timer_setup_on_stack(&stack.timer, entropy_timer, 0);
1412 while (!crng_ready() && !signal_pending(current)) {
1413 if (!timer_pending(&stack.timer))
1414 mod_timer(&stack.timer, jiffies + 1);
1415 mix_pool_bytes(&stack.cycles, sizeof(stack.cycles));
1417 stack.cycles = random_get_entropy();
1420 del_timer_sync(&stack.timer);
1421 destroy_timer_on_stack(&stack.timer);
1422 mix_pool_bytes(&stack.cycles, sizeof(stack.cycles));
1426 /**********************************************************************
1428 * Userspace reader/writer interfaces.
1430 * getrandom(2) is the primary modern interface into the RNG and should
1431 * be used in preference to anything else.
1433 * Reading from /dev/random has the same functionality as calling
1434 * getrandom(2) with flags=0. In earlier versions, however, it had
1435 * vastly different semantics and should therefore be avoided, to
1436 * prevent backwards compatibility issues.
1438 * Reading from /dev/urandom has the same functionality as calling
1439 * getrandom(2) with flags=GRND_INSECURE. Because it does not block
1440 * waiting for the RNG to be ready, it should not be used.
1442 * Writing to either /dev/random or /dev/urandom adds entropy to
1443 * the input pool but does not credit it.
1445 * Polling on /dev/random indicates when the RNG is initialized, on
1446 * the read side, and when it wants new entropy, on the write side.
1448 * Both /dev/random and /dev/urandom have the same set of ioctls for
1449 * adding entropy, getting the entropy count, zeroing the count, and
1450 * reseeding the crng.
1452 **********************************************************************/
1454 SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int,
1457 if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
1461 * Requesting insecure and blocking randomness at the same time makes
1464 if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
1467 if (count > INT_MAX)
1470 if (!(flags & GRND_INSECURE) && !crng_ready()) {
1473 if (flags & GRND_NONBLOCK)
1475 ret = wait_for_random_bytes();
1479 return get_random_bytes_user(buf, count);
1482 static __poll_t random_poll(struct file *file, poll_table *wait)
1486 poll_wait(file, &crng_init_wait, wait);
1487 poll_wait(file, &random_write_wait, wait);
1490 mask |= EPOLLIN | EPOLLRDNORM;
1491 if (input_pool.entropy_count < POOL_MIN_BITS)
1492 mask |= EPOLLOUT | EPOLLWRNORM;
1496 static int write_pool(const char __user *ubuf, size_t count)
1500 u8 block[BLAKE2S_BLOCK_SIZE];
1503 len = min(count, sizeof(block));
1504 if (copy_from_user(block, ubuf, len)) {
1510 mix_pool_bytes(block, len);
1515 memzero_explicit(block, sizeof(block));
1519 static ssize_t random_write(struct file *file, const char __user *buffer,
1520 size_t count, loff_t *ppos)
1524 ret = write_pool(buffer, count);
1528 return (ssize_t)count;
1531 static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes,
1534 static int maxwarn = 10;
1536 if (!crng_ready() && maxwarn > 0) {
1538 if (__ratelimit(&urandom_warning))
1539 pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
1540 current->comm, nbytes);
1543 return get_random_bytes_user(buf, nbytes);
1546 static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes,
1551 ret = wait_for_random_bytes();
1554 return get_random_bytes_user(buf, nbytes);
1557 static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1559 int size, ent_count;
1560 int __user *p = (int __user *)arg;
1565 /* Inherently racy, no point locking. */
1566 if (put_user(input_pool.entropy_count, p))
1569 case RNDADDTOENTCNT:
1570 if (!capable(CAP_SYS_ADMIN))
1572 if (get_user(ent_count, p))
1576 credit_entropy_bits(ent_count);
1579 if (!capable(CAP_SYS_ADMIN))
1581 if (get_user(ent_count, p++))
1585 if (get_user(size, p++))
1587 retval = write_pool((const char __user *)p, size);
1590 credit_entropy_bits(ent_count);
1595 * Clear the entropy pool counters. We no longer clear
1596 * the entropy pool, as that's silly.
1598 if (!capable(CAP_SYS_ADMIN))
1600 if (xchg(&input_pool.entropy_count, 0) >= POOL_MIN_BITS) {
1601 wake_up_interruptible(&random_write_wait);
1602 kill_fasync(&fasync, SIGIO, POLL_OUT);
1606 if (!capable(CAP_SYS_ADMIN))
1617 static int random_fasync(int fd, struct file *filp, int on)
1619 return fasync_helper(fd, filp, on, &fasync);
1622 const struct file_operations random_fops = {
1623 .read = random_read,
1624 .write = random_write,
1625 .poll = random_poll,
1626 .unlocked_ioctl = random_ioctl,
1627 .compat_ioctl = compat_ptr_ioctl,
1628 .fasync = random_fasync,
1629 .llseek = noop_llseek,
1632 const struct file_operations urandom_fops = {
1633 .read = urandom_read,
1634 .write = random_write,
1635 .unlocked_ioctl = random_ioctl,
1636 .compat_ioctl = compat_ptr_ioctl,
1637 .fasync = random_fasync,
1638 .llseek = noop_llseek,
1642 /********************************************************************
1646 * These are partly unused legacy knobs with dummy values to not break
1647 * userspace and partly still useful things. They are usually accessible
1648 * in /proc/sys/kernel/random/ and are as follows:
1650 * - boot_id - a UUID representing the current boot.
1652 * - uuid - a random UUID, different each time the file is read.
1654 * - poolsize - the number of bits of entropy that the input pool can
1655 * hold, tied to the POOL_BITS constant.
1657 * - entropy_avail - the number of bits of entropy currently in the
1658 * input pool. Always <= poolsize.
1660 * - write_wakeup_threshold - the amount of entropy in the input pool
1661 * below which write polls to /dev/random will unblock, requesting
1662 * more entropy, tied to the POOL_MIN_BITS constant. It is writable
1663 * to avoid breaking old userspaces, but writing to it does not
1664 * change any behavior of the RNG.
1666 * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL.
1667 * It is writable to avoid breaking old userspaces, but writing
1668 * to it does not change any behavior of the RNG.
1670 ********************************************************************/
1672 #ifdef CONFIG_SYSCTL
1674 #include <linux/sysctl.h>
1676 static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ;
1677 static int sysctl_random_write_wakeup_bits = POOL_MIN_BITS;
1678 static int sysctl_poolsize = POOL_BITS;
1679 static u8 sysctl_bootid[UUID_SIZE];
1682 * This function is used to return both the bootid UUID, and random
1683 * UUID. The difference is in whether table->data is NULL; if it is,
1684 * then a new UUID is generated and returned to the user.
1686 static int proc_do_uuid(struct ctl_table *table, int write, void *buffer,
1687 size_t *lenp, loff_t *ppos)
1689 u8 tmp_uuid[UUID_SIZE], *uuid;
1690 char uuid_string[UUID_STRING_LEN + 1];
1691 struct ctl_table fake_table = {
1692 .data = uuid_string,
1693 .maxlen = UUID_STRING_LEN
1702 generate_random_uuid(uuid);
1704 static DEFINE_SPINLOCK(bootid_spinlock);
1706 spin_lock(&bootid_spinlock);
1708 generate_random_uuid(uuid);
1709 spin_unlock(&bootid_spinlock);
1712 snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid);
1713 return proc_dostring(&fake_table, 0, buffer, lenp, ppos);
1716 /* The same as proc_dointvec, but writes don't change anything. */
1717 static int proc_do_rointvec(struct ctl_table *table, int write, void *buffer,
1718 size_t *lenp, loff_t *ppos)
1720 return write ? 0 : proc_dointvec(table, 0, buffer, lenp, ppos);
1723 static struct ctl_table random_table[] = {
1725 .procname = "poolsize",
1726 .data = &sysctl_poolsize,
1727 .maxlen = sizeof(int),
1729 .proc_handler = proc_dointvec,
1732 .procname = "entropy_avail",
1733 .data = &input_pool.entropy_count,
1734 .maxlen = sizeof(int),
1736 .proc_handler = proc_dointvec,
1739 .procname = "write_wakeup_threshold",
1740 .data = &sysctl_random_write_wakeup_bits,
1741 .maxlen = sizeof(int),
1743 .proc_handler = proc_do_rointvec,
1746 .procname = "urandom_min_reseed_secs",
1747 .data = &sysctl_random_min_urandom_seed,
1748 .maxlen = sizeof(int),
1750 .proc_handler = proc_do_rointvec,
1753 .procname = "boot_id",
1754 .data = &sysctl_bootid,
1756 .proc_handler = proc_do_uuid,
1761 .proc_handler = proc_do_uuid,
1767 * rand_initialize() is called before sysctl_init(),
1768 * so we cannot call register_sysctl_init() in rand_initialize()
1770 static int __init random_sysctls_init(void)
1772 register_sysctl_init("kernel/random", random_table);
1775 device_initcall(random_sysctls_init);