aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/random.c
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2022-05-13 16:17:12 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2022-05-19 16:54:15 +0200
commit560181c27b582557d633ecb608110075433383af (patch)
treeb86c9fc3e05c6273d2fdc859a734fb4a3936e88b /drivers/char/random.c
parentrandom: make consistent use of buf and len (diff)
downloadlinux-dev-560181c27b582557d633ecb608110075433383af.tar.xz
linux-dev-560181c27b582557d633ecb608110075433383af.zip
random: move initialization functions out of hot pages
Much of random.c is devoted to initializing the rng and accounting for when a sufficient amount of entropy has been added. In a perfect world, this would all happen during init, and so we could mark these functions as __init. But in reality, this isn't the case: sometimes the rng only finishes initializing some seconds after system init is finished. For this reason, at the moment, a whole host of functions that are only used relatively close to system init and then never again are intermixed with functions that are used in hot code all the time. This creates more cache misses than necessary. In order to pack the hot code closer together, this commit moves the initialization functions that can't be marked as __init into .text.unlikely by way of the __cold attribute. Of particular note is moving credit_init_bits() into a macro wrapper that inlines the crng_ready() static branch check. This avoids a function call to a nop+ret, and most notably prevents extra entropy arithmetic from being computed in mix_interrupt_randomness(). Reviewed-by: Dominik Brodowski <linux@dominikbrodowski.net> Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Diffstat (limited to 'drivers/char/random.c')
-rw-r--r--drivers/char/random.c46
1 files changed, 21 insertions, 25 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 0475ed69aa34..7ec700683e42 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -110,7 +110,7 @@ bool rng_is_initialized(void)
}
EXPORT_SYMBOL(rng_is_initialized);
-static void crng_set_ready(struct work_struct *work)
+static void __cold crng_set_ready(struct work_struct *work)
{
static_branch_enable(&crng_is_ready);
}
@@ -149,7 +149,7 @@ EXPORT_SYMBOL(wait_for_random_bytes);
* returns: 0 if callback is successfully added
* -EALREADY if pool is already initialised (callback not called)
*/
-int register_random_ready_notifier(struct notifier_block *nb)
+int __cold register_random_ready_notifier(struct notifier_block *nb)
{
unsigned long flags;
int ret = -EALREADY;
@@ -167,7 +167,7 @@ int register_random_ready_notifier(struct notifier_block *nb)
/*
* Delete a previously registered readiness callback function.
*/
-int unregister_random_ready_notifier(struct notifier_block *nb)
+int __cold unregister_random_ready_notifier(struct notifier_block *nb)
{
unsigned long flags;
int ret;
@@ -178,7 +178,7 @@ int unregister_random_ready_notifier(struct notifier_block *nb)
return ret;
}
-static void process_random_ready_list(void)
+static void __cold process_random_ready_list(void)
{
unsigned long flags;
@@ -188,15 +188,9 @@ static void process_random_ready_list(void)
}
#define warn_unseeded_randomness() \
- _warn_unseeded_randomness(__func__, (void *)_RET_IP_)
-
-static void _warn_unseeded_randomness(const char *func_name, void *caller)
-{
- if (!IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) || crng_ready())
- return;
- printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n",
- func_name, caller, crng_init);
-}
+ if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
+ printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
+ __func__, (void *)_RET_IP_, crng_init)
/*********************************************************************
@@ -615,7 +609,7 @@ EXPORT_SYMBOL(get_random_u32);
* This function is called when the CPU is coming up, with entry
* CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
*/
-int random_prepare_cpu(unsigned int cpu)
+int __cold random_prepare_cpu(unsigned int cpu)
{
/*
* When the cpu comes back online, immediately invalidate both
@@ -790,13 +784,15 @@ static void extract_entropy(void *buf, size_t len)
memzero_explicit(&block, sizeof(block));
}
-static void credit_init_bits(size_t bits)
+#define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
+
+static void __cold _credit_init_bits(size_t bits)
{
static struct execute_work set_ready;
unsigned int new, orig, add;
unsigned long flags;
- if (crng_ready() || !bits)
+ if (!bits)
return;
add = min_t(size_t, bits, POOL_BITS);
@@ -1011,7 +1007,7 @@ EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
* Handle random seed passed by bootloader, and credit it if
* CONFIG_RANDOM_TRUST_BOOTLOADER is set.
*/
-void add_bootloader_randomness(const void *buf, size_t len)
+void __cold add_bootloader_randomness(const void *buf, size_t len)
{
mix_pool_bytes(buf, len);
if (trust_bootloader)
@@ -1027,7 +1023,7 @@ static BLOCKING_NOTIFIER_HEAD(vmfork_chain);
* don't credit it, but we do immediately force a reseed after so
* that it's used by the crng posthaste.
*/
-void add_vmfork_randomness(const void *unique_vm_id, size_t len)
+void __cold add_vmfork_randomness(const void *unique_vm_id, size_t len)
{
add_device_randomness(unique_vm_id, len);
if (crng_ready()) {
@@ -1040,13 +1036,13 @@ void add_vmfork_randomness(const void *unique_vm_id, size_t len)
EXPORT_SYMBOL_GPL(add_vmfork_randomness);
#endif
-int register_random_vmfork_notifier(struct notifier_block *nb)
+int __cold register_random_vmfork_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&vmfork_chain, nb);
}
EXPORT_SYMBOL_GPL(register_random_vmfork_notifier);
-int unregister_random_vmfork_notifier(struct notifier_block *nb)
+int __cold unregister_random_vmfork_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&vmfork_chain, nb);
}
@@ -1091,7 +1087,7 @@ static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2)
* This function is called when the CPU has just come online, with
* entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
*/
-int random_online_cpu(unsigned int cpu)
+int __cold random_online_cpu(unsigned int cpu)
{
/*
* During CPU shutdown and before CPU onlining, add_interrupt_
@@ -1246,7 +1242,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned int nu
if (in_hardirq())
this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1;
else
- credit_init_bits(bits);
+ _credit_init_bits(bits);
}
void add_input_randomness(unsigned int type, unsigned int code, unsigned int value)
@@ -1274,7 +1270,7 @@ void add_disk_randomness(struct gendisk *disk)
}
EXPORT_SYMBOL_GPL(add_disk_randomness);
-void rand_initialize_disk(struct gendisk *disk)
+void __cold rand_initialize_disk(struct gendisk *disk)
{
struct timer_rand_state *state;
@@ -1309,7 +1305,7 @@ struct entropy_timer_state {
*
* So the re-arming always happens in the entropy loop itself.
*/
-static void entropy_timer(struct timer_list *timer)
+static void __cold entropy_timer(struct timer_list *timer)
{
struct entropy_timer_state *state = container_of(timer, struct entropy_timer_state, timer);
@@ -1323,7 +1319,7 @@ static void entropy_timer(struct timer_list *timer)
* If we have an actual cycle counter, see if we can
* generate enough entropy with timing noise
*/
-static void try_to_generate_entropy(void)
+static void __cold try_to_generate_entropy(void)
{
enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = 32 };
struct entropy_timer_state stack;