From 04ec96b768c9dd43946b047c3da60dcc66431370 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Wed, 9 Feb 2022 14:43:25 +0100 Subject: random: make more consistent use of integer types We've been using a flurry of int, unsigned int, size_t, and ssize_t. Let's unify all of this into size_t where it makes sense, as it does in most places, and leave ssize_t for return values with possible errors. In addition, keeping with the convention of other functions in this file, functions that are dealing with raw bytes now take void * consistently instead of a mix of that and u8 *, because much of the time we're actually passing some other structure that is then interpreted as bytes by the function. We also take the opportunity to fix the outdated and incorrect comment in get_random_bytes_arch(). Cc: Theodore Ts'o Reviewed-by: Dominik Brodowski Reviewed-by: Jann Horn Reviewed-by: Eric Biggers Signed-off-by: Jason A. Donenfeld --- include/linux/hw_random.h | 2 +- include/linux/random.h | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h index 8e6dd908da21..1a9fc38f8938 100644 --- a/include/linux/hw_random.h +++ b/include/linux/hw_random.h @@ -61,6 +61,6 @@ extern int devm_hwrng_register(struct device *dev, struct hwrng *rng); extern void hwrng_unregister(struct hwrng *rng); extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng); /** Feed random bits into the pool. */ -extern void add_hwgenerator_randomness(const char *buffer, size_t count, size_t entropy); +extern void add_hwgenerator_randomness(const void *buffer, size_t count, size_t entropy); #endif /* LINUX_HWRANDOM_H_ */ diff --git a/include/linux/random.h b/include/linux/random.h index c45b2693e51f..e92efb39779c 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -20,8 +20,8 @@ struct random_ready_callback { struct module *owner; }; -extern void add_device_randomness(const void *, unsigned int); -extern void add_bootloader_randomness(const void *, unsigned int); +extern void add_device_randomness(const void *, size_t); +extern void add_bootloader_randomness(const void *, size_t); #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) static inline void add_latent_entropy(void) @@ -37,13 +37,13 @@ extern void add_input_randomness(unsigned int type, unsigned int code, unsigned int value) __latent_entropy; extern void add_interrupt_randomness(int irq) __latent_entropy; -extern void get_random_bytes(void *buf, int nbytes); +extern void get_random_bytes(void *buf, size_t nbytes); extern int wait_for_random_bytes(void); extern int __init rand_initialize(void); extern bool rng_is_initialized(void); extern int add_random_ready_callback(struct random_ready_callback *rdy); extern void del_random_ready_callback(struct random_ready_callback *rdy); -extern int __must_check get_random_bytes_arch(void *buf, int nbytes); +extern size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes); #ifndef MODULE extern const struct file_operations random_fops, urandom_fops; @@ -87,7 +87,7 @@ static inline unsigned long get_random_canary(void) /* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes). * Returns the result of the call to wait_for_random_bytes. */ -static inline int get_random_bytes_wait(void *buf, int nbytes) +static inline int get_random_bytes_wait(void *buf, size_t nbytes) { int ret = wait_for_random_bytes(); get_random_bytes(buf, nbytes); -- cgit v1.2.3-59-g8ed1b From 6071a6c0fba2d747742cadcbb3ba26ed756ed73b Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Fri, 11 Feb 2022 12:28:33 +0100 Subject: random: remove useless header comment This really adds nothing at all useful. Cc: Theodore Ts'o Reviewed-by: Dominik Brodowski Reviewed-by: Eric Biggers Signed-off-by: Jason A. Donenfeld --- include/linux/random.h | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/random.h b/include/linux/random.h index e92efb39779c..37e1e8c43d7e 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -1,9 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * include/linux/random.h - * - * Include file for the random number generator. - */ + #ifndef _LINUX_RANDOM_H #define _LINUX_RANDOM_H -- cgit v1.2.3-59-g8ed1b From b777c38239fec5a528e59f55b379e31b1a187524 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Sun, 13 Feb 2022 16:17:01 +0100 Subject: random: pull add_hwgenerator_randomness() declaration into random.h add_hwgenerator_randomness() is a function implemented and documented inside of random.c. It is the way that hardware RNGs push data into it. Therefore, it should be declared in random.h. Otherwise sparse complains with: random.c:1137:6: warning: symbol 'add_hwgenerator_randomness' was not declared. Should it be static? The alternative would be to include hw_random.h into random.c, but that wouldn't really be good for anything except slowing down compile time. Cc: Matt Mackall Cc: Theodore Ts'o Acked-by: Herbert Xu Reviewed-by: Eric Biggers Reviewed-by: Dominik Brodowski Signed-off-by: Jason A. Donenfeld --- drivers/char/hw_random/core.c | 1 + include/linux/hw_random.h | 2 -- include/linux/random.h | 2 ++ 3 files changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index a3db27916256..cfb085de876b 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h index 1a9fc38f8938..aa1d4da03538 100644 --- a/include/linux/hw_random.h +++ b/include/linux/hw_random.h @@ -60,7 +60,5 @@ extern int devm_hwrng_register(struct device *dev, struct hwrng *rng); /** Unregister a Hardware Random Number Generator driver. */ extern void hwrng_unregister(struct hwrng *rng); extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng); -/** Feed random bits into the pool. */ -extern void add_hwgenerator_randomness(const void *buffer, size_t count, size_t entropy); #endif /* LINUX_HWRANDOM_H_ */ diff --git a/include/linux/random.h b/include/linux/random.h index 37e1e8c43d7e..d7354de9351e 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -32,6 +32,8 @@ static inline void add_latent_entropy(void) {} extern void add_input_randomness(unsigned int type, unsigned int code, unsigned int value) __latent_entropy; extern void add_interrupt_randomness(int irq) __latent_entropy; +extern void add_hwgenerator_randomness(const void *buffer, size_t count, + size_t entropy); extern void get_random_bytes(void *buf, size_t nbytes); extern int wait_for_random_bytes(void); -- cgit v1.2.3-59-g8ed1b From 3191dd5a1179ef0fad5a050a1702ae98b6251e8f Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Sun, 13 Feb 2022 22:48:04 +0100 Subject: random: clear fast pool, crng, and batches in cpuhp bring up For the irq randomness fast pool, rather than having to use expensive atomics, which were visibly the most expensive thing in the entire irq handler, simply take care of the extreme edge case of resetting count to zero in the cpuhp online handler, just after workqueues have been reenabled. This simplifies the code a bit and lets us use vanilla variables rather than atomics, and performance should be improved. As well, very early on when the CPU comes up, while interrupts are still disabled, we clear out the per-cpu crng and its batches, so that it always starts with fresh randomness. Cc: Thomas Gleixner Cc: Peter Zijlstra Cc: Theodore Ts'o Cc: Sultan Alsawaf Cc: Dominik Brodowski Acked-by: Sebastian Andrzej Siewior Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 62 +++++++++++++++++++++++++++++++++++----------- include/linux/cpuhotplug.h | 2 ++ include/linux/random.h | 5 ++++ kernel/cpu.c | 11 ++++++++ 4 files changed, 65 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/random.c b/drivers/char/random.c index bca4467e540f..d73a75cbe82d 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -698,6 +698,25 @@ u32 get_random_u32(void) } EXPORT_SYMBOL(get_random_u32); +#ifdef CONFIG_SMP +/* + * This function is called when the CPU is coming up, with entry + * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP. + */ +int random_prepare_cpu(unsigned int cpu) +{ + /* + * When the cpu comes back online, immediately invalidate both + * the per-cpu crng and all batches, so that we serve fresh + * randomness. + */ + per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX; + per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX; + per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX; + return 0; +} +#endif + /** * randomize_page - Generate a random, page aligned address * @start: The smallest acceptable address the caller will take. @@ -1183,7 +1202,7 @@ struct fast_pool { }; struct work_struct mix; unsigned long last; - atomic_t count; + unsigned int count; u16 reg_idx; }; @@ -1219,6 +1238,29 @@ static void fast_mix(u32 pool[4]) static DEFINE_PER_CPU(struct fast_pool, irq_randomness); +#ifdef CONFIG_SMP +/* + * This function is called when the CPU has just come online, with + * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE. + */ +int random_online_cpu(unsigned int cpu) +{ + /* + * During CPU shutdown and before CPU onlining, add_interrupt_ + * randomness() may schedule mix_interrupt_randomness(), and + * set the MIX_INFLIGHT flag. However, because the worker can + * be scheduled on a different CPU during this period, that + * flag will never be cleared. For that reason, we zero out + * the flag here, which runs just after workqueues are onlined + * for the CPU again. This also has the effect of setting the + * irq randomness count to zero so that new accumulated irqs + * are fresh. + */ + per_cpu_ptr(&irq_randomness, cpu)->count = 0; + return 0; +} +#endif + static u32 get_reg(struct fast_pool *f, struct pt_regs *regs) { u32 *ptr = (u32 *)regs; @@ -1243,15 +1285,6 @@ static void mix_interrupt_randomness(struct work_struct *work) local_irq_disable(); if (fast_pool != this_cpu_ptr(&irq_randomness)) { local_irq_enable(); - /* - * If we are unlucky enough to have been moved to another CPU, - * during CPU hotplug while the CPU was shutdown then we set - * our count to zero atomically so that when the CPU comes - * back online, it can enqueue work again. The _release here - * pairs with the atomic_inc_return_acquire in - * add_interrupt_randomness(). - */ - atomic_set_release(&fast_pool->count, 0); return; } @@ -1260,7 +1293,7 @@ static void mix_interrupt_randomness(struct work_struct *work) * consistent view, before we reenable irqs again. */ memcpy(pool, fast_pool->pool32, sizeof(pool)); - atomic_set(&fast_pool->count, 0); + fast_pool->count = 0; fast_pool->last = jiffies; local_irq_enable(); @@ -1296,14 +1329,13 @@ void add_interrupt_randomness(int irq) } fast_mix(fast_pool->pool32); - /* The _acquire here pairs with the atomic_set_release in mix_interrupt_randomness(). */ - new_count = (unsigned int)atomic_inc_return_acquire(&fast_pool->count); + new_count = ++fast_pool->count; if (unlikely(crng_init == 0)) { if (new_count >= 64 && crng_pre_init_inject(fast_pool->pool32, sizeof(fast_pool->pool32), true, true) > 0) { - atomic_set(&fast_pool->count, 0); + fast_pool->count = 0; fast_pool->last = now; if (spin_trylock(&input_pool.lock)) { _mix_pool_bytes(&fast_pool->pool32, sizeof(fast_pool->pool32)); @@ -1321,7 +1353,7 @@ void add_interrupt_randomness(int irq) if (unlikely(!fast_pool->mix.func)) INIT_WORK(&fast_pool->mix, mix_interrupt_randomness); - atomic_or(MIX_INFLIGHT, &fast_pool->count); + fast_pool->count |= MIX_INFLIGHT; queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix); } EXPORT_SYMBOL_GPL(add_interrupt_randomness); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 411a428ace4d..481e565cc5c4 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -100,6 +100,7 @@ enum cpuhp_state { CPUHP_AP_ARM_CACHE_B15_RAC_DEAD, CPUHP_PADATA_DEAD, CPUHP_AP_DTPM_CPU_DEAD, + CPUHP_RANDOM_PREPARE, CPUHP_WORKQUEUE_PREP, CPUHP_POWER_NUMA_PREPARE, CPUHP_HRTIMERS_PREPARE, @@ -240,6 +241,7 @@ enum cpuhp_state { CPUHP_AP_PERF_CSKY_ONLINE, CPUHP_AP_WATCHDOG_ONLINE, CPUHP_AP_WORKQUEUE_ONLINE, + CPUHP_AP_RANDOM_ONLINE, CPUHP_AP_RCUTREE_ONLINE, CPUHP_AP_BASE_CACHEINFO_ONLINE, CPUHP_AP_ONLINE_DYN, diff --git a/include/linux/random.h b/include/linux/random.h index d7354de9351e..6148b8d1ccf3 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -156,4 +156,9 @@ static inline bool __init arch_get_random_long_early(unsigned long *v) } #endif +#ifdef CONFIG_SMP +extern int random_prepare_cpu(unsigned int cpu); +extern int random_online_cpu(unsigned int cpu); +#endif + #endif /* _LINUX_RANDOM_H */ diff --git a/kernel/cpu.c b/kernel/cpu.c index 407a2568f35e..238cba15449f 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #define CREATE_TRACE_POINTS @@ -1659,6 +1660,11 @@ static struct cpuhp_step cpuhp_hp_states[] = { .startup.single = perf_event_init_cpu, .teardown.single = perf_event_exit_cpu, }, + [CPUHP_RANDOM_PREPARE] = { + .name = "random:prepare", + .startup.single = random_prepare_cpu, + .teardown.single = NULL, + }, [CPUHP_WORKQUEUE_PREP] = { .name = "workqueue:prepare", .startup.single = workqueue_prepare_cpu, @@ -1782,6 +1788,11 @@ static struct cpuhp_step cpuhp_hp_states[] = { .startup.single = workqueue_online_cpu, .teardown.single = workqueue_offline_cpu, }, + [CPUHP_AP_RANDOM_ONLINE] = { + .name = "random:online", + .startup.single = random_online_cpu, + .teardown.single = NULL, + }, [CPUHP_AP_RCUTREE_ONLINE] = { .name = "RCU/tree:online", .startup.single = rcutree_online_cpu, -- cgit v1.2.3-59-g8ed1b From 6f98a4bfee72c22f50aedb39fb761567969865fe Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Mon, 7 Feb 2022 17:19:24 +0100 Subject: random: block in /dev/urandom This topic has come up countless times, and usually doesn't go anywhere. This time I thought I'd bring it up with a slightly narrower focus, updated for some developments over the last three years: we finally can make /dev/urandom always secure, in light of the fact that our RNG is now always seeded. Ever since Linus' 50ee7529ec45 ("random: try to actively add entropy rather than passively wait for it"), the RNG does a haveged-style jitter dance around the scheduler, in order to produce entropy (and credit it) for the case when we're stuck in wait_for_random_bytes(). How ever you feel about the Linus Jitter Dance is beside the point: it's been there for three years and usually gets the RNG initialized in a second or so. As a matter of fact, this is what happens currently when people use getrandom(). It's already there and working, and most people have been using it for years without realizing. So, given that the kernel has grown this mechanism for seeding itself from nothing, and that this procedure happens pretty fast, maybe there's no point any longer in having /dev/urandom give insecure bytes. In the past we didn't want the boot process to deadlock, which was understandable. But now, in the worst case, a second goes by, and the problem is resolved. It seems like maybe we're finally at a point when we can get rid of the infamous "urandom read hole". The one slight drawback is that the Linus Jitter Dance relies on random_ get_entropy() being implemented. The first lines of try_to_generate_ entropy() are: stack.now = random_get_entropy(); if (stack.now == random_get_entropy()) return; On most platforms, random_get_entropy() is simply aliased to get_cycles(). The number of machines without a cycle counter or some other implementation of random_get_entropy() in 2022, which can also run a mainline kernel, and at the same time have a both broken and out of date userspace that relies on /dev/urandom never blocking at boot is thought to be exceedingly low. And to be clear: those museum pieces without cycle counters will continue to run Linux just fine, and even /dev/urandom will be operable just like before; the RNG just needs to be seeded first through the usual means, which should already be the case now. On systems that really do want unseeded randomness, we already offer getrandom(GRND_INSECURE), which is in use by, e.g., systemd for seeding their hash tables at boot. Nothing in this commit would affect GRND_INSECURE, and it remains the means of getting those types of random numbers. This patch goes a long way toward eliminating a long overdue userspace crypto footgun. After several decades of endless user confusion, we will finally be able to say, "use any single one of our random interfaces and you'll be fine. They're all the same. It doesn't matter." And that, I think, is really something. Finally all of those blog posts and disagreeing forums and contradictory articles will all become correct about whatever they happened to recommend, and along with it, a whole class of vulnerabilities eliminated. With very minimal downside, we're finally in a position where we can make this change. Cc: Dinh Nguyen Cc: Nick Hu Cc: Max Filippov Cc: Palmer Dabbelt Cc: David S. Miller Cc: Yoshinori Sato Cc: Michal Simek Cc: Borislav Petkov Cc: Guo Ren Cc: Geert Uytterhoeven Cc: Joshua Kinard Cc: David Laight Cc: Dominik Brodowski Cc: Eric Biggers Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: Thomas Gleixner Cc: Andy Lutomirski Cc: Kees Cook Cc: Lennart Poettering Cc: Konstantin Ryabitsev Cc: Linus Torvalds Cc: Greg Kroah-Hartman Cc: Theodore Ts'o Signed-off-by: Jason A. Donenfeld --- drivers/char/mem.c | 2 +- drivers/char/random.c | 72 ++++++++++++-------------------------------------- include/linux/random.h | 2 +- 3 files changed, 19 insertions(+), 57 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/mem.c b/drivers/char/mem.c index cc296f0823bd..9f586025dbe6 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -707,7 +707,7 @@ static const struct memdev { [5] = { "zero", 0666, &zero_fops, FMODE_NOWAIT }, [7] = { "full", 0666, &full_fops, 0 }, [8] = { "random", 0666, &random_fops, 0 }, - [9] = { "urandom", 0666, &urandom_fops, 0 }, + [9] = { "urandom", 0666, &random_fops, 0 }, #ifdef CONFIG_PRINTK [11] = { "kmsg", 0644, &kmsg_fops, 0 }, #endif diff --git a/drivers/char/random.c b/drivers/char/random.c index 8171c3bbf460..9831797e0699 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -89,17 +89,14 @@ static LIST_HEAD(random_ready_list); /* Control how we warn userspace. */ static struct ratelimit_state unseeded_warning = RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3); -static struct ratelimit_state urandom_warning = - RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3); static int ratelimit_disable __read_mostly; module_param_named(ratelimit_disable, ratelimit_disable, int, 0644); MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression"); /* * Returns whether or not the input pool has been seeded and thus guaranteed - * to supply cryptographically secure random numbers. This applies to: the - * /dev/urandom device, the get_random_bytes function, and the get_random_{u32, - * ,u64,int,long} family of functions. + * to supply cryptographically secure random numbers. This applies to + * get_random_bytes() and get_random_{u32,u64,int,long}(). * * Returns: true if the input pool has been seeded. * false if the input pool has not been seeded. @@ -115,10 +112,10 @@ static void try_to_generate_entropy(void); /* * Wait for the input pool to be seeded and thus guaranteed to supply - * cryptographically secure random numbers. This applies to: the /dev/urandom - * device, the get_random_bytes function, and the get_random_{u32,u64,int,long} - * family of functions. Using any of these functions without first calling - * this function forfeits the guarantee of security. + * cryptographically secure random numbers. This applies to + * get_random_bytes() and get_random_{u32,u64,int,long}(). Using any + * of these functions without first calling this function means that + * the returned numbers might not be cryptographically secure. * * Returns: 0 if the input pool has been seeded. * -ERESTARTSYS if the function was interrupted by a signal. @@ -256,10 +253,10 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, void * unsigned long get_random_long() * * These interfaces will return the requested number of random bytes - * into the given buffer or as a return value. This is equivalent to - * a read from /dev/urandom. The integer family of functions may be - * higher performance for one-off random integers, because they do a - * bit of buffering. + * into the given buffer or as a return value. The returned numbers are + * the same as those of getrandom(0). The integer family of functions may + * be higher performance for one-off random integers, because they do a + * bit of buffering and do not invoke reseeding. * *********************************************************************/ @@ -336,11 +333,6 @@ static void crng_reseed(void) unseeded_warning.missed); unseeded_warning.missed = 0; } - if (urandom_warning.missed) { - pr_notice("%d urandom warning(s) missed due to ratelimiting\n", - urandom_warning.missed); - urandom_warning.missed = 0; - } } } @@ -993,10 +985,8 @@ int __init rand_initialize(void) pr_notice("crng init done (trusting CPU's manufacturer)\n"); } - if (ratelimit_disable) { - urandom_warning.interval = 0; + if (ratelimit_disable) unseeded_warning.interval = 0; - } return 0; } @@ -1386,20 +1376,16 @@ static void try_to_generate_entropy(void) * getrandom(2) is the primary modern interface into the RNG and should * be used in preference to anything else. * - * Reading from /dev/random has the same functionality as calling - * getrandom(2) with flags=0. In earlier versions, however, it had - * vastly different semantics and should therefore be avoided, to - * prevent backwards compatibility issues. - * - * Reading from /dev/urandom has the same functionality as calling - * getrandom(2) with flags=GRND_INSECURE. Because it does not block - * waiting for the RNG to be ready, it should not be used. + * Reading from /dev/random and /dev/urandom both have the same effect + * as calling getrandom(2) with flags=0. (In earlier versions, however, + * they each had different semantics.) * * Writing to either /dev/random or /dev/urandom adds entropy to * the input pool but does not credit it. * - * Polling on /dev/random indicates when the RNG is initialized, on - * the read side, and when it wants new entropy, on the write side. + * Polling on /dev/random or /dev/urandom indicates when the RNG + * is initialized, on the read side, and when it wants new entropy, + * on the write side. * * Both /dev/random and /dev/urandom have the same set of ioctls for * adding entropy, getting the entropy count, zeroing the count, and @@ -1484,21 +1470,6 @@ static ssize_t random_write(struct file *file, const char __user *buffer, return (ssize_t)count; } -static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes, - loff_t *ppos) -{ - static int maxwarn = 10; - - if (!crng_ready() && maxwarn > 0) { - maxwarn--; - if (__ratelimit(&urandom_warning)) - pr_notice("%s: uninitialized urandom read (%zd bytes read)\n", - current->comm, nbytes); - } - - return get_random_bytes_user(buf, nbytes); -} - static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { @@ -1585,15 +1556,6 @@ const struct file_operations random_fops = { .llseek = noop_llseek, }; -const struct file_operations urandom_fops = { - .read = urandom_read, - .write = random_write, - .unlocked_ioctl = random_ioctl, - .compat_ioctl = compat_ptr_ioctl, - .fasync = random_fasync, - .llseek = noop_llseek, -}; - /******************************************************************** * diff --git a/include/linux/random.h b/include/linux/random.h index 6148b8d1ccf3..725a4d08c0a0 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -44,7 +44,7 @@ extern void del_random_ready_callback(struct random_ready_callback *rdy); extern size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes); #ifndef MODULE -extern const struct file_operations random_fops, urandom_fops; +extern const struct file_operations random_fops; #endif u32 get_random_u32(void); -- cgit v1.2.3-59-g8ed1b From ae099e8e98fb01395228628be5a4661e3bd86fe4 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Wed, 23 Feb 2022 13:43:44 +0100 Subject: random: add mechanism for VM forks to reinitialize crng When a VM forks, we must immediately mix in additional information to the stream of random output so that two forks or a rollback don't produce the same stream of random numbers, which could have catastrophic cryptographic consequences. This commit adds a simple API, add_vmfork_ randomness(), for that, by force reseeding the crng. This has the added benefit of also draining the entropy pool and setting its timer back, so that any old entropy that was there prior -- which could have already been used by a different fork, or generally gone stale -- does not contribute to the accounting of the next 256 bits. Cc: Dominik Brodowski Cc: Theodore Ts'o Cc: Jann Horn Cc: Eric Biggers Reviewed-by: Ard Biesheuvel Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 50 +++++++++++++++++++++++++++++++++++--------------- include/linux/random.h | 1 + 2 files changed, 36 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/random.c b/drivers/char/random.c index ede97649c5dd..2b323f6bd96c 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -286,14 +286,14 @@ static DEFINE_PER_CPU(struct crng, crngs) = { }; /* Used by crng_reseed() to extract a new seed from the input pool. */ -static bool drain_entropy(void *buf, size_t nbytes); +static bool drain_entropy(void *buf, size_t nbytes, bool force); /* * This extracts a new crng key from the input pool, but only if there is a - * sufficient amount of entropy available, in order to mitigate bruteforcing - * of newly added bits. + * sufficient amount of entropy available or force is true, in order to + * mitigate bruteforcing of newly added bits. */ -static void crng_reseed(void) +static void crng_reseed(bool force) { unsigned long flags; unsigned long next_gen; @@ -301,7 +301,7 @@ static void crng_reseed(void) bool finalize_init = false; /* Only reseed if we can, to prevent brute forcing a small amount of new bits. */ - if (!drain_entropy(key, sizeof(key))) + if (!drain_entropy(key, sizeof(key), force)) return; /* @@ -398,7 +398,7 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], * in turn bumps the generation counter that we check below. */ if (unlikely(time_after(jiffies, READ_ONCE(base_crng.birth) + CRNG_RESEED_INTERVAL))) - crng_reseed(); + crng_reseed(false); local_lock_irqsave(&crngs.lock, flags); crng = raw_cpu_ptr(&crngs); @@ -763,10 +763,10 @@ EXPORT_SYMBOL(get_random_bytes_arch); * * Finally, extract entropy via these two, with the latter one * setting the entropy count to zero and extracting only if there - * is POOL_MIN_BITS entropy credited prior: + * is POOL_MIN_BITS entropy credited prior or force is true: * * static void extract_entropy(void *buf, size_t nbytes) - * static bool drain_entropy(void *buf, size_t nbytes) + * static bool drain_entropy(void *buf, size_t nbytes, bool force) * **********************************************************************/ @@ -824,7 +824,7 @@ static void credit_entropy_bits(size_t nbits) } while (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig); if (crng_init < 2 && entropy_count >= POOL_MIN_BITS) - crng_reseed(); + crng_reseed(false); } /* @@ -874,16 +874,16 @@ static void extract_entropy(void *buf, size_t nbytes) } /* - * First we make sure we have POOL_MIN_BITS of entropy in the pool, and then we - * set the entropy count to zero (but don't actually touch any data). Only then - * can we extract a new key with extract_entropy(). + * First we make sure we have POOL_MIN_BITS of entropy in the pool unless force + * is true, and then we set the entropy count to zero (but don't actually touch + * any data). Only then can we extract a new key with extract_entropy(). */ -static bool drain_entropy(void *buf, size_t nbytes) +static bool drain_entropy(void *buf, size_t nbytes, bool force) { unsigned int entropy_count; do { entropy_count = READ_ONCE(input_pool.entropy_count); - if (entropy_count < POOL_MIN_BITS) + if (!force && entropy_count < POOL_MIN_BITS) return false; } while (cmpxchg(&input_pool.entropy_count, entropy_count, 0) != entropy_count); extract_entropy(buf, nbytes); @@ -907,6 +907,7 @@ static bool drain_entropy(void *buf, size_t nbytes) * void add_hwgenerator_randomness(const void *buffer, size_t count, * size_t entropy); * void add_bootloader_randomness(const void *buf, size_t size); + * void add_vmfork_randomness(const void *unique_vm_id, size_t size); * void add_interrupt_randomness(int irq); * * add_device_randomness() adds data to the input pool that @@ -938,6 +939,10 @@ static bool drain_entropy(void *buf, size_t nbytes) * add_device_randomness(), depending on whether or not the configuration * option CONFIG_RANDOM_TRUST_BOOTLOADER is set. * + * add_vmfork_randomness() adds a unique (but not necessarily secret) ID + * representing the current instance of a VM to the pool, without crediting, + * and then force-reseeds the crng so that it takes effect immediately. + * * add_interrupt_randomness() uses the interrupt timing as random * inputs to the entropy pool. Using the cycle counters and the irq source * as inputs, it feeds the input pool roughly once a second or after 64 @@ -1163,6 +1168,21 @@ void add_bootloader_randomness(const void *buf, size_t size) } EXPORT_SYMBOL_GPL(add_bootloader_randomness); +/* + * Handle a new unique VM ID, which is unique, not secret, so we + * don't credit it, but we do immediately force a reseed after so + * that it's used by the crng posthaste. + */ +void add_vmfork_randomness(const void *unique_vm_id, size_t size) +{ + add_device_randomness(unique_vm_id, size); + if (crng_ready()) { + crng_reseed(true); + pr_notice("crng reseeded due to virtual machine fork\n"); + } +} +EXPORT_SYMBOL_GPL(add_vmfork_randomness); + struct fast_pool { union { u32 pool32[4]; @@ -1534,7 +1554,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) return -EPERM; if (crng_init < 2) return -ENODATA; - crng_reseed(); + crng_reseed(false); return 0; default: return -EINVAL; diff --git a/include/linux/random.h b/include/linux/random.h index 725a4d08c0a0..117468f3a92e 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -34,6 +34,7 @@ extern void add_input_randomness(unsigned int type, unsigned int code, extern void add_interrupt_randomness(int irq) __latent_entropy; extern void add_hwgenerator_randomness(const void *buffer, size_t count, size_t entropy); +extern void add_vmfork_randomness(const void *unique_vm_id, size_t size); extern void get_random_bytes(void *buf, size_t nbytes); extern int wait_for_random_bytes(void); -- cgit v1.2.3-59-g8ed1b From d273845ecb0e0626842782a4497f0c5876139ec3 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 25 Feb 2022 16:55:52 +0100 Subject: ACPI: allow longer device IDs We create a list of ACPI "PNP" IDs which contains _HID, _CID, and CLS entries of the respective devices. However, when making structs for matching, we squeeze those IDs into acpi_device_id, which only has 9 bytes space to store the identifier. The subsystem actually captures the full length of the IDs, and the modalias has the full length, but this struct we use for matching is limited. It originally had 16 bytes, but was changed to only have 9 in 6543becf26ff ("mod/file2alias: make modalias generation safe for cross compiling"), presumably on the theory that it would match the ACPI spec so it didn't matter. Unfortunately, while most people adhere to the ACPI specs, Microsoft decided that its VM Generation Counter device [1] should only be identifiable by _CID with a value of "VM_Gen_Counter", which is longer than 9 characters. To allow device drivers to match identifiers that exceed the 9 byte limit, this simply ups the length to 16, just like it was before the aforementioned commit. Empirical testing indicates that this doesn't actually increase vmlinux size on 64-bit, because the ulong in the same struct caused there to be 7 bytes of padding anyway, and when doing a s/M/Y/g i386_defconfig build, the bzImage only increased by 0.0055%, so negligible. This patch is a prerequisite to add support for VMGenID in Linux, the subsequent patch in this series. It has been confirmed to also work on the udev/modalias side in userspace. [1] https://download.microsoft.com/download/3/1/C/31CFC307-98CA-4CA5-914C-D9772691E214/VirtualMachineGenerationID.docx Signed-off-by: Alexander Graf Co-developed-by: Jason A. Donenfeld [Jason: reworked commit message a bit, went with len=16 approach.] Cc: Mika Westerberg Cc: Andy Shevchenko Cc: Len Brown Cc: Greg Kroah-Hartman Reviewed-by: Ard Biesheuvel Acked-by: Hans de Goede Acked-by: Rafael J. Wysocki Signed-off-by: Jason A. Donenfeld --- include/linux/mod_devicetable.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 4bb71979a8fd..5da5d990ff58 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -211,7 +211,7 @@ struct css_device_id { kernel_ulong_t driver_data; }; -#define ACPI_ID_LEN 9 +#define ACPI_ID_LEN 16 struct acpi_device_id { __u8 id[ACPI_ID_LEN]; -- cgit v1.2.3-59-g8ed1b From a4107d34f960df99ca07fa8eb022425a804f59f3 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Tue, 1 Mar 2022 15:14:04 +0100 Subject: random: do not export add_vmfork_randomness() unless needed Since add_vmfork_randomness() is only called from vmgenid.o, we can guard it in CONFIG_VMGENID, similarly to how we do with add_disk_randomness() and CONFIG_BLOCK. If we ever have multiple things calling into add_vmfork_randomness(), we can add another shared Kconfig symbol for that, but for now, this is good enough. Even though add_vmfork_randomess() is a pretty small function, removing it means that there are only calls to crng_reseed(false) and none to crng_reseed(true), which means the compiler can constant propagate the false, removing branches from crng_reseed() and its descendants. Additionally, we don't even need the symbol to be exported if CONFIG_VMGENID is not a module, so conditionalize that too. Cc: Dominik Brodowski Cc: Theodore Ts'o Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 4 ++++ include/linux/random.h | 2 ++ 2 files changed, 6 insertions(+) (limited to 'include/linux') diff --git a/drivers/char/random.c b/drivers/char/random.c index 2b323f6bd96c..bc2a4f7e3655 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1168,6 +1168,7 @@ void add_bootloader_randomness(const void *buf, size_t size) } EXPORT_SYMBOL_GPL(add_bootloader_randomness); +#if IS_ENABLED(CONFIG_VMGENID) /* * Handle a new unique VM ID, which is unique, not secret, so we * don't credit it, but we do immediately force a reseed after so @@ -1181,7 +1182,10 @@ void add_vmfork_randomness(const void *unique_vm_id, size_t size) pr_notice("crng reseeded due to virtual machine fork\n"); } } +#if IS_MODULE(CONFIG_VMGENID) EXPORT_SYMBOL_GPL(add_vmfork_randomness); +#endif +#endif struct fast_pool { union { diff --git a/include/linux/random.h b/include/linux/random.h index 117468f3a92e..f209f1a78899 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -34,7 +34,9 @@ extern void add_input_randomness(unsigned int type, unsigned int code, extern void add_interrupt_randomness(int irq) __latent_entropy; extern void add_hwgenerator_randomness(const void *buffer, size_t count, size_t entropy); +#if IS_ENABLED(CONFIG_VMGENID) extern void add_vmfork_randomness(const void *unique_vm_id, size_t size); +#endif extern void get_random_bytes(void *buf, size_t nbytes); extern int wait_for_random_bytes(void); -- cgit v1.2.3-59-g8ed1b From 5acd35487dc911541672b3ffc322851769c32a56 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Tue, 1 Mar 2022 20:03:49 +0100 Subject: random: replace custom notifier chain with standard one We previously rolled our own randomness readiness notifier, which only has two users in the whole kernel. Replace this with a more standard atomic notifier block that serves the same purpose with less code. Also unexport the symbols, because no modules use it, only unconditional builtins. The only drawback is that it's possible for a notification handler returning the "stop" code to prevent further processing, but given that there are only two users, and that we're unexporting this anyway, that doesn't seem like a significant drawback for the simplification we receive here. Cc: Greg Kroah-Hartman Cc: Theodore Ts'o Reviewed-by: Dominik Brodowski Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 67 ++++++++++++++------------------------------------ include/linux/random.h | 10 +++----- lib/random32.c | 12 +++++---- lib/vsprintf.c | 10 +++++--- 4 files changed, 35 insertions(+), 64 deletions(-) (limited to 'include/linux') diff --git a/drivers/char/random.c b/drivers/char/random.c index bc2a4f7e3655..aa7bc9a3a864 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -83,8 +83,8 @@ static int crng_init = 0; /* Various types of waiters for crng_init->2 transition. */ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait); static struct fasync_struct *fasync; -static DEFINE_SPINLOCK(random_ready_list_lock); -static LIST_HEAD(random_ready_list); +static DEFINE_SPINLOCK(random_ready_chain_lock); +static RAW_NOTIFIER_HEAD(random_ready_chain); /* Control how we warn userspace. */ static struct ratelimit_state unseeded_warning = @@ -144,72 +144,43 @@ EXPORT_SYMBOL(wait_for_random_bytes); * * returns: 0 if callback is successfully added * -EALREADY if pool is already initialised (callback not called) - * -ENOENT if module for callback is not alive */ -int add_random_ready_callback(struct random_ready_callback *rdy) +int register_random_ready_notifier(struct notifier_block *nb) { - struct module *owner; unsigned long flags; - int err = -EALREADY; + int ret = -EALREADY; if (crng_ready()) - return err; - - owner = rdy->owner; - if (!try_module_get(owner)) - return -ENOENT; - - spin_lock_irqsave(&random_ready_list_lock, flags); - if (crng_ready()) - goto out; - - owner = NULL; - - list_add(&rdy->list, &random_ready_list); - err = 0; - -out: - spin_unlock_irqrestore(&random_ready_list_lock, flags); - - module_put(owner); + return ret; - return err; + spin_lock_irqsave(&random_ready_chain_lock, flags); + if (!crng_ready()) + ret = raw_notifier_chain_register(&random_ready_chain, nb); + spin_unlock_irqrestore(&random_ready_chain_lock, flags); + return ret; } -EXPORT_SYMBOL(add_random_ready_callback); /* * Delete a previously registered readiness callback function. */ -void del_random_ready_callback(struct random_ready_callback *rdy) +int unregister_random_ready_notifier(struct notifier_block *nb) { unsigned long flags; - struct module *owner = NULL; - - spin_lock_irqsave(&random_ready_list_lock, flags); - if (!list_empty(&rdy->list)) { - list_del_init(&rdy->list); - owner = rdy->owner; - } - spin_unlock_irqrestore(&random_ready_list_lock, flags); + int ret; - module_put(owner); + spin_lock_irqsave(&random_ready_chain_lock, flags); + ret = raw_notifier_chain_unregister(&random_ready_chain, nb); + spin_unlock_irqrestore(&random_ready_chain_lock, flags); + return ret; } -EXPORT_SYMBOL(del_random_ready_callback); static void process_random_ready_list(void) { unsigned long flags; - struct random_ready_callback *rdy, *tmp; - spin_lock_irqsave(&random_ready_list_lock, flags); - list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) { - struct module *owner = rdy->owner; - - list_del_init(&rdy->list); - rdy->func(rdy); - module_put(owner); - } - spin_unlock_irqrestore(&random_ready_list_lock, flags); + spin_lock_irqsave(&random_ready_chain_lock, flags); + raw_notifier_call_chain(&random_ready_chain, 0, NULL); + spin_unlock_irqrestore(&random_ready_chain_lock, flags); } #define warn_unseeded_randomness(previous) \ diff --git a/include/linux/random.h b/include/linux/random.h index f209f1a78899..fab1ab0563b4 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -10,11 +10,7 @@ #include -struct random_ready_callback { - struct list_head list; - void (*func)(struct random_ready_callback *rdy); - struct module *owner; -}; +struct notifier_block; extern void add_device_randomness(const void *, size_t); extern void add_bootloader_randomness(const void *, size_t); @@ -42,8 +38,8 @@ extern void get_random_bytes(void *buf, size_t nbytes); extern int wait_for_random_bytes(void); extern int __init rand_initialize(void); extern bool rng_is_initialized(void); -extern int add_random_ready_callback(struct random_ready_callback *rdy); -extern void del_random_ready_callback(struct random_ready_callback *rdy); +extern int register_random_ready_notifier(struct notifier_block *nb); +extern int unregister_random_ready_notifier(struct notifier_block *nb); extern size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes); #ifndef MODULE diff --git a/lib/random32.c b/lib/random32.c index 3c19820796d0..976632003ec6 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -551,9 +551,11 @@ static void prandom_reseed(struct timer_list *unused) * To avoid worrying about whether it's safe to delay that interrupt * long enough to seed all CPUs, just schedule an immediate timer event. */ -static void prandom_timer_start(struct random_ready_callback *unused) +static int prandom_timer_start(struct notifier_block *nb, + unsigned long action, void *data) { mod_timer(&seed_timer, jiffies); + return 0; } #ifdef CONFIG_RANDOM32_SELFTEST @@ -617,13 +619,13 @@ core_initcall(prandom32_state_selftest); */ static int __init prandom_init_late(void) { - static struct random_ready_callback random_ready = { - .func = prandom_timer_start + static struct notifier_block random_ready = { + .notifier_call = prandom_timer_start }; - int ret = add_random_ready_callback(&random_ready); + int ret = register_random_ready_notifier(&random_ready); if (ret == -EALREADY) { - prandom_timer_start(&random_ready); + prandom_timer_start(&random_ready, 0, NULL); ret = 0; } return ret; diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 3b8129dd374c..36574a806a81 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -757,14 +757,16 @@ static void enable_ptr_key_workfn(struct work_struct *work) static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn); -static void fill_random_ptr_key(struct random_ready_callback *unused) +static int fill_random_ptr_key(struct notifier_block *nb, + unsigned long action, void *data) { /* This may be in an interrupt handler. */ queue_work(system_unbound_wq, &enable_ptr_key_work); + return 0; } -static struct random_ready_callback random_ready = { - .func = fill_random_ptr_key +static struct notifier_block random_ready = { + .notifier_call = fill_random_ptr_key }; static int __init initialize_ptr_random(void) @@ -778,7 +780,7 @@ static int __init initialize_ptr_random(void) return 0; } - ret = add_random_ready_callback(&random_ready); + ret = register_random_ready_notifier(&random_ready); if (!ret) { return 0; } else if (ret == -EALREADY) { -- cgit v1.2.3-59-g8ed1b From f3c2682bad7bc6033c837e9c66e5af881fe8d465 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Tue, 1 Mar 2022 20:22:39 +0100 Subject: random: provide notifier for VM fork Drivers such as WireGuard need to learn when VMs fork in order to clear sessions. This commit provides a simple notifier_block for that, with a register and unregister function. When no VM fork detection is compiled in, this turns into a no-op, similar to how the power notifier works. Cc: Dominik Brodowski Cc: Theodore Ts'o Reviewed-by: Greg Kroah-Hartman Signed-off-by: Jason A. Donenfeld --- drivers/char/random.c | 15 +++++++++++++++ include/linux/random.h | 5 +++++ 2 files changed, 20 insertions(+) (limited to 'include/linux') diff --git a/drivers/char/random.c b/drivers/char/random.c index aa7bc9a3a864..d9ca441c14bd 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1140,6 +1140,8 @@ void add_bootloader_randomness(const void *buf, size_t size) EXPORT_SYMBOL_GPL(add_bootloader_randomness); #if IS_ENABLED(CONFIG_VMGENID) +static BLOCKING_NOTIFIER_HEAD(vmfork_chain); + /* * Handle a new unique VM ID, which is unique, not secret, so we * don't credit it, but we do immediately force a reseed after so @@ -1152,10 +1154,23 @@ void add_vmfork_randomness(const void *unique_vm_id, size_t size) crng_reseed(true); pr_notice("crng reseeded due to virtual machine fork\n"); } + blocking_notifier_call_chain(&vmfork_chain, 0, NULL); } #if IS_MODULE(CONFIG_VMGENID) EXPORT_SYMBOL_GPL(add_vmfork_randomness); #endif + +int register_random_vmfork_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&vmfork_chain, nb); +} +EXPORT_SYMBOL_GPL(register_random_vmfork_notifier); + +int unregister_random_vmfork_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&vmfork_chain, nb); +} +EXPORT_SYMBOL_GPL(unregister_random_vmfork_notifier); #endif struct fast_pool { diff --git a/include/linux/random.h b/include/linux/random.h index fab1ab0563b4..c0baffe7afb1 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -32,6 +32,11 @@ extern void add_hwgenerator_randomness(const void *buffer, size_t count, size_t entropy); #if IS_ENABLED(CONFIG_VMGENID) extern void add_vmfork_randomness(const void *unique_vm_id, size_t size); +extern int register_random_vmfork_notifier(struct notifier_block *nb); +extern int unregister_random_vmfork_notifier(struct notifier_block *nb); +#else +static inline int register_random_vmfork_notifier(struct notifier_block *nb) { return 0; } +static inline int unregister_random_vmfork_notifier(struct notifier_block *nb) { return 0; } #endif extern void get_random_bytes(void *buf, size_t nbytes); -- cgit v1.2.3-59-g8ed1b