diff options
Diffstat (limited to '')
| -rw-r--r-- | include/linux/jump_label.h | 16 | ||||
| -rw-r--r-- | kernel/jump_label.c | 36 | 
2 files changed, 46 insertions, 6 deletions
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 0536524bb9eb..68904469fba1 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -117,13 +117,18 @@ struct module;  #include <linux/atomic.h> +#ifdef HAVE_JUMP_LABEL +  static inline int static_key_count(struct static_key *key)  { -	return atomic_read(&key->enabled); +	/* +	 * -1 means the first static_key_slow_inc() is in progress. +	 *  static_key_enabled() must return true, so return 1 here. +	 */ +	int n = atomic_read(&key->enabled); +	return n >= 0 ? n : 1;  } -#ifdef HAVE_JUMP_LABEL -  #define JUMP_TYPE_FALSE	0UL  #define JUMP_TYPE_TRUE	1UL  #define JUMP_TYPE_MASK	1UL @@ -162,6 +167,11 @@ extern void jump_label_apply_nops(struct module *mod);  #else  /* !HAVE_JUMP_LABEL */ +static inline int static_key_count(struct static_key *key) +{ +	return atomic_read(&key->enabled); +} +  static __always_inline void jump_label_init(void)  {  	static_key_initialized = true; diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 05254eeb4b4e..4b353e0be121 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -58,13 +58,36 @@ static void jump_label_update(struct static_key *key);  void static_key_slow_inc(struct static_key *key)  { +	int v, v1; +  	STATIC_KEY_CHECK_USE(); -	if (atomic_inc_not_zero(&key->enabled)) -		return; + +	/* +	 * Careful if we get concurrent static_key_slow_inc() calls; +	 * later calls must wait for the first one to _finish_ the +	 * jump_label_update() process.  At the same time, however, +	 * the jump_label_update() call below wants to see +	 * static_key_enabled(&key) for jumps to be updated properly. +	 * +	 * So give a special meaning to negative key->enabled: it sends +	 * static_key_slow_inc() down the slow path, and it is non-zero +	 * so it counts as "enabled" in jump_label_update().  Note that +	 * atomic_inc_unless_negative() checks >= 0, so roll our own. +	 */ +	for (v = atomic_read(&key->enabled); v > 0; v = v1) { +		v1 = atomic_cmpxchg(&key->enabled, v, v + 1); +		if (likely(v1 == v)) +			return; +	}  	jump_label_lock(); -	if (atomic_inc_return(&key->enabled) == 1) +	if (atomic_read(&key->enabled) == 0) { +		atomic_set(&key->enabled, -1);  		jump_label_update(key); +		atomic_set(&key->enabled, 1); +	} else { +		atomic_inc(&key->enabled); +	}  	jump_label_unlock();  }  EXPORT_SYMBOL_GPL(static_key_slow_inc); @@ -72,6 +95,13 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc);  static void __static_key_slow_dec(struct static_key *key,  		unsigned long rate_limit, struct delayed_work *work)  { +	/* +	 * The negative count check is valid even when a negative +	 * key->enabled is in use by static_key_slow_inc(); a +	 * __static_key_slow_dec() before the first static_key_slow_inc() +	 * returns is unbalanced, because all other static_key_slow_inc() +	 * instances block while the update is in progress. +	 */  	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {  		WARN(atomic_read(&key->enabled) < 0,  		     "jump label: negative count!\n");  | 
