return ok;
}
-static inline bool __must_check arch_get_random_long(unsigned long *v)
+static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
{
/*
* Only support the generic interface after we have detected
* cpufeature code and with potential scheduling between CPUs
* with and without the feature.
*/
- if (cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndr(v))
- return true;
- return false;
+ if (max_longs && cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndr(v))
+ return 1;
+ return 0;
}
-static inline bool __must_check arch_get_random_int(unsigned int *v)
+static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
{
- if (cpus_have_const_cap(ARM64_HAS_RNG)) {
- unsigned long val;
-
- if (__arm64_rndr(&val)) {
- *v = val;
- return true;
- }
- }
- return false;
-}
-
-static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
-{
- struct arm_smccc_res res;
+ if (!max_longs)
+ return 0;
/*
* We prefer the SMCCC call, since its semantics (return actual
* (the output of a pseudo RNG freshly seeded by a TRNG).
*/
if (smccc_trng_available) {
- arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 64, &res);
+ struct arm_smccc_res res;
+
+ max_longs = min_t(size_t, 3, max_longs);
+ arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, max_longs * 64, &res);
if ((int)res.a0 >= 0) {
- *v = res.a3;
- return true;
+ switch (max_longs) {
+ case 3:
+ *v++ = res.a1;
+ fallthrough;
+ case 2:
+ *v++ = res.a2;
+ fallthrough;
+ case 1:
+ *v++ = res.a3;
+ break;
+ }
+ return max_longs;
}
}
* enough to implement this API if no other entropy source exists.
*/
if (cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndrrs(v))
- return true;
+ return 1;
- return false;
-}
-
-static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
-{
- struct arm_smccc_res res;
- unsigned long val;
-
- if (smccc_trng_available) {
- arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 32, &res);
- if ((int)res.a0 >= 0) {
- *v = res.a3 & GENMASK(31, 0);
- return true;
- }
- }
-
- if (cpus_have_const_cap(ARM64_HAS_RNG)) {
- if (__arm64_rndrrs(&val)) {
- *v = val;
- return true;
- }
- }
-
- return false;
+ return 0;
}
static inline bool __init __early_cpu_has_rndr(void)
return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf;
}
-static inline bool __init __must_check
-arch_get_random_seed_long_early(unsigned long *v)
+static inline size_t __init __must_check
+arch_get_random_seed_longs_early(unsigned long *v, size_t max_longs)
{
WARN_ON(system_state != SYSTEM_BOOTING);
+ if (!max_longs)
+ return 0;
+
if (smccc_trng_available) {
struct arm_smccc_res res;
- arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, 64, &res);
+ max_longs = min_t(size_t, 3, max_longs);
+ arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, max_longs * 64, &res);
if ((int)res.a0 >= 0) {
- *v = res.a3;
- return true;
+ switch (max_longs) {
+ case 3:
+ *v++ = res.a1;
+ fallthrough;
+ case 2:
+ *v++ = res.a2;
+ fallthrough;
+ case 1:
+ *v++ = res.a3;
+ break;
+ }
+ return max_longs;
}
}
if (__early_cpu_has_rndr() && __arm64_rndr(v))
- return true;
+ return 1;
- return false;
+ return 0;
}
-#define arch_get_random_seed_long_early arch_get_random_seed_long_early
+#define arch_get_random_seed_longs_early arch_get_random_seed_longs_early
#endif /* _ASM_ARCHRANDOM_H */
* and supported.
*/
- if (arch_get_random_seed_long_early(&raw))
+ if (arch_get_random_seed_longs_early(&raw, 1))
seed ^= raw;
if (!seed) {
#include <asm/machdep.h>
-static inline bool __must_check arch_get_random_long(unsigned long *v)
+static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
{
- return false;
+ return 0;
}
-static inline bool __must_check arch_get_random_int(unsigned int *v)
+static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
{
- return false;
-}
-
-static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
-{
- if (ppc_md.get_random_seed)
- return ppc_md.get_random_seed(v);
-
- return false;
-}
-
-static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
-{
- unsigned long val;
- bool rc;
-
- rc = arch_get_random_seed_long(&val);
- if (rc)
- *v = val;
-
- return rc;
+ if (max_longs && ppc_md.get_random_seed && ppc_md.get_random_seed(v))
+ return 1;
+ return 0;
}
#ifdef CONFIG_PPC_POWERNV
break;
#endif
case H_RANDOM:
- if (!arch_get_random_seed_long(&vcpu->arch.regs.gpr[4]))
+ if (!arch_get_random_seed_longs(&vcpu->arch.regs.gpr[4], 1))
ret = H_HARDWARE;
break;
case H_RPT_INVALIDATE:
DECLARE_STATIC_KEY_FALSE(s390_arch_random_available);
extern atomic64_t s390_arch_random_counter;
-static inline bool __must_check arch_get_random_long(unsigned long *v)
+static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
{
- return false;
+ return 0;
}
-static inline bool __must_check arch_get_random_int(unsigned int *v)
-{
- return false;
-}
-
-static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
-{
- if (static_branch_likely(&s390_arch_random_available)) {
- cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
- atomic64_add(sizeof(*v), &s390_arch_random_counter);
- return true;
- }
- return false;
-}
-
-static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
+static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
{
if (static_branch_likely(&s390_arch_random_available)) {
- cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
- atomic64_add(sizeof(*v), &s390_arch_random_counter);
- return true;
+ cpacf_trng(NULL, 0, (u8 *)v, max_longs * sizeof(*v));
+ atomic64_add(max_longs * sizeof(*v), &s390_arch_random_counter);
+ return max_longs;
}
- return false;
+ return 0;
}
#endif /* _ASM_S390_ARCHRANDOM_H */
/* This is from <os.h>, but better not to #include that in a global header here. */
ssize_t os_getrandom(void *buf, size_t len, unsigned int flags);
-static inline bool __must_check arch_get_random_long(unsigned long *v)
+static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
{
- return os_getrandom(v, sizeof(*v), 0) == sizeof(*v);
-}
+ ssize_t ret;
-static inline bool __must_check arch_get_random_int(unsigned int *v)
-{
- return os_getrandom(v, sizeof(*v), 0) == sizeof(*v);
-}
-
-static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
-{
- return false;
+ ret = os_getrandom(v, max_longs * sizeof(*v), 0);
+ if (ret < 0)
+ return 0;
+ return ret / sizeof(*v);
}
-static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
+static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
{
- return false;
+ return 0;
}
#endif
return false;
}
-static inline bool __must_check rdrand_int(unsigned int *v)
-{
- bool ok;
- unsigned int retry = RDRAND_RETRY_LOOPS;
- do {
- asm volatile("rdrand %[out]"
- CC_SET(c)
- : CC_OUT(c) (ok), [out] "=r" (*v));
- if (ok)
- return true;
- } while (--retry);
- return false;
-}
-
static inline bool __must_check rdseed_long(unsigned long *v)
{
bool ok;
return ok;
}
-static inline bool __must_check rdseed_int(unsigned int *v)
-{
- bool ok;
- asm volatile("rdseed %[out]"
- CC_SET(c)
- : CC_OUT(c) (ok), [out] "=r" (*v));
- return ok;
-}
-
/*
* These are the generic interfaces; they must not be declared if the
* stubs in <linux/random.h> are to be invoked.
*/
-static inline bool __must_check arch_get_random_long(unsigned long *v)
-{
- return static_cpu_has(X86_FEATURE_RDRAND) ? rdrand_long(v) : false;
-}
-
-static inline bool __must_check arch_get_random_int(unsigned int *v)
-{
- return static_cpu_has(X86_FEATURE_RDRAND) ? rdrand_int(v) : false;
-}
-
-static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
+static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
{
- return static_cpu_has(X86_FEATURE_RDSEED) ? rdseed_long(v) : false;
+ return max_longs && static_cpu_has(X86_FEATURE_RDRAND) && rdrand_long(v) ? 1 : 0;
}
-static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
+static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
{
- return static_cpu_has(X86_FEATURE_RDSEED) ? rdseed_int(v) : false;
+ return max_longs && static_cpu_has(X86_FEATURE_RDSEED) && rdseed_long(v) ? 1 : 0;
}
#ifndef CONFIG_UML
* This is run before the entropy pools are initialized,
* but this is hopefully better than nothing.
*/
- if (!arch_get_random_long(&rand)) {
+ if (!arch_get_random_longs(&rand, 1)) {
/* The constant is an arbitrary large prime */
rand = rdtsc();
rand *= 0xc345c6b72fd16123UL;
unsigned long rdseed[32 / sizeof(long)];
size_t counter;
} block;
- size_t i;
+ size_t i, longs;
- for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) {
- if (!arch_get_random_seed_long(&block.rdseed[i]) &&
- !arch_get_random_long(&block.rdseed[i]))
- block.rdseed[i] = random_get_entropy();
+ for (i = 0; i < ARRAY_SIZE(block.rdseed);) {
+ longs = arch_get_random_seed_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i);
+ if (longs) {
+ i += longs;
+ continue;
+ }
+ longs = arch_get_random_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i);
+ if (longs) {
+ i += longs;
+ continue;
+ }
+ block.rdseed[i++] = random_get_entropy();
}
spin_lock_irqsave(&input_pool.lock, flags);
int __init random_init(const char *command_line)
{
ktime_t now = ktime_get_real();
- unsigned int i, arch_bits;
- unsigned long entropy;
+ size_t i, longs, arch_bits;
+ unsigned long entropy[BLAKE2S_BLOCK_SIZE / sizeof(long)];
#if defined(LATENT_ENTROPY_PLUGIN)
static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
_mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
#endif
- for (i = 0, arch_bits = BLAKE2S_BLOCK_SIZE * 8;
- i < BLAKE2S_BLOCK_SIZE; i += sizeof(entropy)) {
- if (!arch_get_random_seed_long_early(&entropy) &&
- !arch_get_random_long_early(&entropy)) {
- entropy = random_get_entropy();
- arch_bits -= sizeof(entropy) * 8;
+ for (i = 0, arch_bits = sizeof(entropy) * 8; i < ARRAY_SIZE(entropy);) {
+ longs = arch_get_random_seed_longs(entropy, ARRAY_SIZE(entropy) - i);
+ if (longs) {
+ _mix_pool_bytes(entropy, sizeof(*entropy) * longs);
+ i += longs;
+ continue;
}
- _mix_pool_bytes(&entropy, sizeof(entropy));
+ longs = arch_get_random_longs(entropy, ARRAY_SIZE(entropy) - i);
+ if (longs) {
+ _mix_pool_bytes(entropy, sizeof(*entropy) * longs);
+ i += longs;
+ continue;
+ }
+ entropy[0] = random_get_entropy();
+ _mix_pool_bytes(entropy, sizeof(*entropy));
+ arch_bits -= sizeof(*entropy) * 8;
+ ++i;
}
_mix_pool_bytes(&now, sizeof(now));
_mix_pool_bytes(utsname(), sizeof(*(utsname())));
#ifndef __ASM_GENERIC_ARCHRANDOM_H__
#define __ASM_GENERIC_ARCHRANDOM_H__
-static inline bool __must_check arch_get_random_long(unsigned long *v)
+static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
{
- return false;
+ return 0;
}
-static inline bool __must_check arch_get_random_int(unsigned int *v)
+static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
{
- return false;
-}
-
-static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
-{
- return false;
-}
-
-static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
-{
- return false;
+ return 0;
}
#endif
* Called from the boot CPU during startup; not valid to call once
* secondary CPUs are up and preemption is possible.
*/
-#ifndef arch_get_random_seed_long_early
-static inline bool __init arch_get_random_seed_long_early(unsigned long *v)
+#ifndef arch_get_random_seed_longs_early
+static inline size_t __init arch_get_random_seed_longs_early(unsigned long *v, size_t max_longs)
{
WARN_ON(system_state != SYSTEM_BOOTING);
- return arch_get_random_seed_long(v);
+ return arch_get_random_seed_longs(v, max_longs);
}
#endif
-#ifndef arch_get_random_long_early
-static inline bool __init arch_get_random_long_early(unsigned long *v)
+#ifndef arch_get_random_longs_early
+static inline bool __init arch_get_random_longs_early(unsigned long *v, size_t max_longs)
{
WARN_ON(system_state != SYSTEM_BOOTING);
- return arch_get_random_long(v);
+ return arch_get_random_longs(v, max_longs);
}
#endif