aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/locking/mutex-debug.c4
-rw-r--r--kernel/rcu/srcutiny.c120
-rw-r--r--kernel/rcu/srcutree.c489
-rw-r--r--kernel/rcu/sync.c13
-rw-r--r--kernel/rcu/tree.c50
-rw-r--r--kernel/rcu/tree_exp.h10
-rw-r--r--kernel/rcu/tree_plugin.h46
-rw-r--r--kernel/rcu/update.c3
8 files changed, 382 insertions, 353 deletions
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index 9aa713629387..771d4ca96dda 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -36,7 +36,7 @@ void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter)
{
- SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
+ lockdep_assert_held(&lock->wait_lock);
DEBUG_LOCKS_WARN_ON(list_empty(&lock->wait_list));
DEBUG_LOCKS_WARN_ON(waiter->magic != waiter);
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
@@ -51,7 +51,7 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task)
{
- SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
+ lockdep_assert_held(&lock->wait_lock);
/* Mark the current thread as blocked on the lock: */
task->blocked_on = waiter;
diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c
index b46e6683f8c9..32dfd6522548 100644
--- a/kernel/rcu/srcutiny.c
+++ b/kernel/rcu/srcutiny.c
@@ -37,30 +37,30 @@ int rcu_scheduler_active __read_mostly;
static LIST_HEAD(srcu_boot_list);
static bool srcu_init_done;
-static int init_srcu_struct_fields(struct srcu_struct *sp)
+static int init_srcu_struct_fields(struct srcu_struct *ssp)
{
- sp->srcu_lock_nesting[0] = 0;
- sp->srcu_lock_nesting[1] = 0;
- init_swait_queue_head(&sp->srcu_wq);
- sp->srcu_cb_head = NULL;
- sp->srcu_cb_tail = &sp->srcu_cb_head;
- sp->srcu_gp_running = false;
- sp->srcu_gp_waiting = false;
- sp->srcu_idx = 0;
- INIT_WORK(&sp->srcu_work, srcu_drive_gp);
- INIT_LIST_HEAD(&sp->srcu_work.entry);
+ ssp->srcu_lock_nesting[0] = 0;
+ ssp->srcu_lock_nesting[1] = 0;
+ init_swait_queue_head(&ssp->srcu_wq);
+ ssp->srcu_cb_head = NULL;
+ ssp->srcu_cb_tail = &ssp->srcu_cb_head;
+ ssp->srcu_gp_running = false;
+ ssp->srcu_gp_waiting = false;
+ ssp->srcu_idx = 0;
+ INIT_WORK(&ssp->srcu_work, srcu_drive_gp);
+ INIT_LIST_HEAD(&ssp->srcu_work.entry);
return 0;
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-int __init_srcu_struct(struct srcu_struct *sp, const char *name,
+int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
struct lock_class_key *key)
{
/* Don't re-initialize a lock while it is held. */
- debug_check_no_locks_freed((void *)sp, sizeof(*sp));
- lockdep_init_map(&sp->dep_map, name, key, 0);
- return init_srcu_struct_fields(sp);
+ debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
+ lockdep_init_map(&ssp->dep_map, name, key, 0);
+ return init_srcu_struct_fields(ssp);
}
EXPORT_SYMBOL_GPL(__init_srcu_struct);
@@ -68,15 +68,15 @@ EXPORT_SYMBOL_GPL(__init_srcu_struct);
/*
* init_srcu_struct - initialize a sleep-RCU structure
- * @sp: structure to initialize.
+ * @ssp: structure to initialize.
*
* Must invoke this on a given srcu_struct before passing that srcu_struct
* to any other function. Each srcu_struct represents a separate domain
* of SRCU protection.
*/
-int init_srcu_struct(struct srcu_struct *sp)
+int init_srcu_struct(struct srcu_struct *ssp)
{
- return init_srcu_struct_fields(sp);
+ return init_srcu_struct_fields(ssp);
}
EXPORT_SYMBOL_GPL(init_srcu_struct);
@@ -84,22 +84,22 @@ EXPORT_SYMBOL_GPL(init_srcu_struct);
/*
* cleanup_srcu_struct - deconstruct a sleep-RCU structure
- * @sp: structure to clean up.
+ * @ssp: structure to clean up.
*
* Must invoke this after you are finished using a given srcu_struct that
* was initialized via init_srcu_struct(), else you leak memory.
*/
-void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced)
+void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced)
{
- WARN_ON(sp->srcu_lock_nesting[0] || sp->srcu_lock_nesting[1]);
+ WARN_ON(ssp->srcu_lock_nesting[0] || ssp->srcu_lock_nesting[1]);
if (quiesced)
- WARN_ON(work_pending(&sp->srcu_work));
+ WARN_ON(work_pending(&ssp->srcu_work));
else
- flush_work(&sp->srcu_work);
- WARN_ON(sp->srcu_gp_running);
- WARN_ON(sp->srcu_gp_waiting);
- WARN_ON(sp->srcu_cb_head);
- WARN_ON(&sp->srcu_cb_head != sp->srcu_cb_tail);
+ flush_work(&ssp->srcu_work);
+ WARN_ON(ssp->srcu_gp_running);
+ WARN_ON(ssp->srcu_gp_waiting);
+ WARN_ON(ssp->srcu_cb_head);
+ WARN_ON(&ssp->srcu_cb_head != ssp->srcu_cb_tail);
}
EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
@@ -107,13 +107,13 @@ EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
* Removes the count for the old reader from the appropriate element of
* the srcu_struct.
*/
-void __srcu_read_unlock(struct srcu_struct *sp, int idx)
+void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
{
- int newval = sp->srcu_lock_nesting[idx] - 1;
+ int newval = ssp->srcu_lock_nesting[idx] - 1;
- WRITE_ONCE(sp->srcu_lock_nesting[idx], newval);
- if (!newval && READ_ONCE(sp->srcu_gp_waiting))
- swake_up_one(&sp->srcu_wq);
+ WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval);
+ if (!newval && READ_ONCE(ssp->srcu_gp_waiting))
+ swake_up_one(&ssp->srcu_wq);
}
EXPORT_SYMBOL_GPL(__srcu_read_unlock);
@@ -127,24 +127,24 @@ void srcu_drive_gp(struct work_struct *wp)
int idx;
struct rcu_head *lh;
struct rcu_head *rhp;
- struct srcu_struct *sp;
+ struct srcu_struct *ssp;
- sp = container_of(wp, struct srcu_struct, srcu_work);
- if (sp->srcu_gp_running || !READ_ONCE(sp->srcu_cb_head))
+ ssp = container_of(wp, struct srcu_struct, srcu_work);
+ if (ssp->srcu_gp_running || !READ_ONCE(ssp->srcu_cb_head))
return; /* Already running or nothing to do. */
/* Remove recently arrived callbacks and wait for readers. */
- WRITE_ONCE(sp->srcu_gp_running, true);
+ WRITE_ONCE(ssp->srcu_gp_running, true);
local_irq_disable();
- lh = sp->srcu_cb_head;
- sp->srcu_cb_head = NULL;
- sp->srcu_cb_tail = &sp->srcu_cb_head;
+ lh = ssp->srcu_cb_head;
+ ssp->srcu_cb_head = NULL;
+ ssp->srcu_cb_tail = &ssp->srcu_cb_head;
local_irq_enable();
- idx = sp->srcu_idx;
- WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx);
- WRITE_ONCE(sp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
- swait_event_exclusive(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
- WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
+ idx = ssp->srcu_idx;
+ WRITE_ONCE(ssp->srcu_idx, !ssp->srcu_idx);
+ WRITE_ONCE(ssp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
+ swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx]));
+ WRITE_ONCE(ssp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
/* Invoke the callbacks we removed above. */
while (lh) {
@@ -161,9 +161,9 @@ void srcu_drive_gp(struct work_struct *wp)
* at interrupt level, but the ->srcu_gp_running checks will
* straighten that out.
*/
- WRITE_ONCE(sp->srcu_gp_running, false);
- if (READ_ONCE(sp->srcu_cb_head))
- schedule_work(&sp->srcu_work);
+ WRITE_ONCE(ssp->srcu_gp_running, false);
+ if (READ_ONCE(ssp->srcu_cb_head))
+ schedule_work(&ssp->srcu_work);
}
EXPORT_SYMBOL_GPL(srcu_drive_gp);
@@ -171,7 +171,7 @@ EXPORT_SYMBOL_GPL(srcu_drive_gp);
* Enqueue an SRCU callback on the specified srcu_struct structure,
* initiating grace-period processing if it is not already running.
*/
-void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
+void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
rcu_callback_t func)
{
unsigned long flags;
@@ -179,14 +179,14 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
rhp->func = func;
rhp->next = NULL;
local_irq_save(flags);
- *sp->srcu_cb_tail = rhp;
- sp->srcu_cb_tail = &rhp->next;
+ *ssp->srcu_cb_tail = rhp;
+ ssp->srcu_cb_tail = &rhp->next;
local_irq_restore(flags);
- if (!READ_ONCE(sp->srcu_gp_running)) {
+ if (!READ_ONCE(ssp->srcu_gp_running)) {
if (likely(srcu_init_done))
- schedule_work(&sp->srcu_work);
- else if (list_empty(&sp->srcu_work.entry))
- list_add(&sp->srcu_work.entry, &srcu_boot_list);
+ schedule_work(&ssp->srcu_work);
+ else if (list_empty(&ssp->srcu_work.entry))
+ list_add(&ssp->srcu_work.entry, &srcu_boot_list);
}
}
EXPORT_SYMBOL_GPL(call_srcu);
@@ -194,13 +194,13 @@ EXPORT_SYMBOL_GPL(call_srcu);
/*
* synchronize_srcu - wait for prior SRCU read-side critical-section completion
*/
-void synchronize_srcu(struct srcu_struct *sp)
+void synchronize_srcu(struct srcu_struct *ssp)
{
struct rcu_synchronize rs;
init_rcu_head_on_stack(&rs.head);
init_completion(&rs.completion);
- call_srcu(sp, &rs.head, wakeme_after_rcu);
+ call_srcu(ssp, &rs.head, wakeme_after_rcu);
wait_for_completion(&rs.completion);
destroy_rcu_head_on_stack(&rs.head);
}
@@ -219,13 +219,13 @@ void __init rcu_scheduler_starting(void)
*/
void __init srcu_init(void)
{
- struct srcu_struct *sp;
+ struct srcu_struct *ssp;
srcu_init_done = true;
while (!list_empty(&srcu_boot_list)) {
- sp = list_first_entry(&srcu_boot_list,
+ ssp = list_first_entry(&srcu_boot_list,
struct srcu_struct, srcu_work.entry);
- list_del_init(&sp->srcu_work.entry);
- schedule_work(&sp->srcu_work);
+ list_del_init(&ssp->srcu_work.entry);
+ schedule_work(&ssp->srcu_work);
}
}
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index a8846ed7f352..3600d88d8956 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -56,7 +56,7 @@ static LIST_HEAD(srcu_boot_list);
static bool __read_mostly srcu_init_done;
static void srcu_invoke_callbacks(struct work_struct *work);
-static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay);
+static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
static void process_srcu(struct work_struct *work);
/* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
@@ -92,7 +92,7 @@ do { \
* srcu_read_unlock() running against them. So if the is_static parameter
* is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
*/
-static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
+static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static)
{
int cpu;
int i;
@@ -103,13 +103,13 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
struct srcu_node *snp_first;
/* Work out the overall tree geometry. */
- sp->level[0] = &sp->node[0];
+ ssp->level[0] = &ssp->node[0];
for (i = 1; i < rcu_num_lvls; i++)
- sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1];
+ ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1];
rcu_init_levelspread(levelspread, num_rcu_lvl);
/* Each pass through this loop initializes one srcu_node structure. */
- srcu_for_each_node_breadth_first(sp, snp) {
+ srcu_for_each_node_breadth_first(ssp, snp) {
spin_lock_init(&ACCESS_PRIVATE(snp, lock));
WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
ARRAY_SIZE(snp->srcu_data_have_cbs));
@@ -120,17 +120,17 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
snp->srcu_gp_seq_needed_exp = 0;
snp->grplo = -1;
snp->grphi = -1;
- if (snp == &sp->node[0]) {
+ if (snp == &ssp->node[0]) {
/* Root node, special case. */
snp->srcu_parent = NULL;
continue;
}
/* Non-root node. */
- if (snp == sp->level[level + 1])
+ if (snp == ssp->level[level + 1])
level++;
- snp->srcu_parent = sp->level[level - 1] +
- (snp - sp->level[level]) /
+ snp->srcu_parent = ssp->level[level - 1] +
+ (snp - ssp->level[level]) /
levelspread[level - 1];
}
@@ -141,14 +141,14 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
ARRAY_SIZE(sdp->srcu_unlock_count));
level = rcu_num_lvls - 1;
- snp_first = sp->level[level];
+ snp_first = ssp->level[level];
for_each_possible_cpu(cpu) {
- sdp = per_cpu_ptr(sp->sda, cpu);
+ sdp = per_cpu_ptr(ssp->sda, cpu);
spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
rcu_segcblist_init(&sdp->srcu_cblist);
sdp->srcu_cblist_invoking = false;
- sdp->srcu_gp_seq_needed = sp->srcu_gp_seq;
- sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq;
+ sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
+ sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
sdp->mynode = &snp_first[cpu / levelspread[level]];
for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
if (snp->grplo < 0)
@@ -157,7 +157,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
}
sdp->cpu = cpu;
INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks);
- sdp->sp = sp;
+ sdp->ssp = ssp;
sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
if (is_static)
continue;
@@ -176,35 +176,35 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
* parameter is passed through to init_srcu_struct_nodes(), and
* also tells us that ->sda has already been wired up to srcu_data.
*/
-static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static)
+static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
{
- mutex_init(&sp->srcu_cb_mutex);
- mutex_init(&sp->srcu_gp_mutex);
- sp->srcu_idx = 0;
- sp->srcu_gp_seq = 0;
- sp->srcu_barrier_seq = 0;
- mutex_init(&sp->srcu_barrier_mutex);
- atomic_set(&sp->srcu_barrier_cpu_cnt, 0);
- INIT_DELAYED_WORK(&sp->work, process_srcu);
+ mutex_init(&ssp->srcu_cb_mutex);
+ mutex_init(&ssp->srcu_gp_mutex);
+ ssp->srcu_idx = 0;
+ ssp->srcu_gp_seq = 0;
+ ssp->srcu_barrier_seq = 0;
+ mutex_init(&ssp->srcu_barrier_mutex);
+ atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);
+ INIT_DELAYED_WORK(&ssp->work, process_srcu);
if (!is_static)
- sp->sda = alloc_percpu(struct srcu_data);
- init_srcu_struct_nodes(sp, is_static);
- sp->srcu_gp_seq_needed_exp = 0;
- sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
- smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */
- return sp->sda ? 0 : -ENOMEM;
+ ssp->sda = alloc_percpu(struct srcu_data);
+ init_srcu_struct_nodes(ssp, is_static);
+ ssp->srcu_gp_seq_needed_exp = 0;
+ ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
+ smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */
+ return ssp->sda ? 0 : -ENOMEM;
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-int __init_srcu_struct(struct srcu_struct *sp, const char *name,
+int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
struct lock_class_key *key)
{
/* Don't re-initialize a lock while it is held. */
- debug_check_no_locks_freed((void *)sp, sizeof(*sp));
- lockdep_init_map(&sp->dep_map, name, key, 0);
- spin_lock_init(&ACCESS_PRIVATE(sp, lock));
- return init_srcu_struct_fields(sp, false);
+ debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
+ lockdep_init_map(&ssp->dep_map, name, key, 0);
+ spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
+ return init_srcu_struct_fields(ssp, false);
}
EXPORT_SYMBOL_GPL(__init_srcu_struct);
@@ -212,16 +212,16 @@ EXPORT_SYMBOL_GPL(__init_srcu_struct);
/**
* init_srcu_struct - initialize a sleep-RCU structure
- * @sp: structure to initialize.
+ * @ssp: structure to initialize.
*
* Must invoke this on a given srcu_struct before passing that srcu_struct
* to any other function. Each srcu_struct represents a separate domain
* of SRCU protection.
*/
-int init_srcu_struct(struct srcu_struct *sp)
+int init_srcu_struct(struct srcu_struct *ssp)
{
- spin_lock_init(&ACCESS_PRIVATE(sp, lock));
- return init_srcu_struct_fields(sp, false);
+ spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
+ return init_srcu_struct_fields(ssp, false);
}
EXPORT_SYMBOL_GPL(init_srcu_struct);
@@ -231,37 +231,37 @@ EXPORT_SYMBOL_GPL(init_srcu_struct);
* First-use initialization of statically allocated srcu_struct
* structure. Wiring up the combining tree is more than can be
* done with compile-time initialization, so this check is added
- * to each update-side SRCU primitive. Use sp->lock, which -is-
+ * to each update-side SRCU primitive. Use ssp->lock, which -is-
* compile-time initialized, to resolve races involving multiple
* CPUs trying to garner first-use privileges.
*/
-static void check_init_srcu_struct(struct srcu_struct *sp)
+static void check_init_srcu_struct(struct srcu_struct *ssp)
{
unsigned long flags;
/* The smp_load_acquire() pairs with the smp_store_release(). */
- if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/
+ if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/
return; /* Already initialized. */
- spin_lock_irqsave_rcu_node(sp, flags);
- if (!rcu_seq_state(sp->srcu_gp_seq_needed)) {
- spin_unlock_irqrestore_rcu_node(sp, flags);
+ spin_lock_irqsave_rcu_node(ssp, flags);
+ if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) {
+ spin_unlock_irqrestore_rcu_node(ssp, flags);
return;
}
- init_srcu_struct_fields(sp, true);
- spin_unlock_irqrestore_rcu_node(sp, flags);
+ init_srcu_struct_fields(ssp, true);
+ spin_unlock_irqrestore_rcu_node(ssp, flags);
}
/*
* Returns approximate total of the readers' ->srcu_lock_count[] values
* for the rank of per-CPU counters specified by idx.
*/
-static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx)
+static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
{
int cpu;
unsigned long sum = 0;
for_each_possible_cpu(cpu) {
- struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
+ struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
}
@@ -272,13 +272,13 @@ static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx)
* Returns approximate total of the readers' ->srcu_unlock_count[] values
* for the rank of per-CPU counters specified by idx.
*/
-static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx)
+static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
{
int cpu;
unsigned long sum = 0;
for_each_possible_cpu(cpu) {
- struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
+ struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
}
@@ -289,11 +289,11 @@ static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx)
* Return true if the number of pre-existing readers is determined to
* be zero.
*/
-static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
+static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
{
unsigned long unlocks;
- unlocks = srcu_readers_unlock_idx(sp, idx);
+ unlocks = srcu_readers_unlock_idx(ssp, idx);
/*
* Make sure that a lock is always counted if the corresponding
@@ -329,25 +329,25 @@ static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
* of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
* especially on 64-bit systems.
*/
- return srcu_readers_lock_idx(sp, idx) == unlocks;
+ return srcu_readers_lock_idx(ssp, idx) == unlocks;
}
/**
* srcu_readers_active - returns true if there are readers. and false
* otherwise
- * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
+ * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
*
* Note that this is not an atomic primitive, and can therefore suffer
* severe errors when invoked on an active srcu_struct. That said, it
* can be useful as an error check at cleanup time.
*/
-static bool srcu_readers_active(struct srcu_struct *sp)
+static bool srcu_readers_active(struct srcu_struct *ssp)
{
int cpu;
unsigned long sum = 0;
for_each_possible_cpu(cpu) {
- struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
+ struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
sum += READ_ONCE(cpuc->srcu_lock_count[0]);
sum += READ_ONCE(cpuc->srcu_lock_count[1]);
@@ -363,44 +363,44 @@ static bool srcu_readers_active(struct srcu_struct *sp)
* Return grace-period delay, zero if there are expedited grace
* periods pending, SRCU_INTERVAL otherwise.
*/
-static unsigned long srcu_get_delay(struct srcu_struct *sp)
+static unsigned long srcu_get_delay(struct srcu_struct *ssp)
{
- if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq),
- READ_ONCE(sp->srcu_gp_seq_needed_exp)))
+ if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq),
+ READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
return 0;
return SRCU_INTERVAL;
}
/* Helper for cleanup_srcu_struct() and cleanup_srcu_struct_quiesced(). */
-void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced)
+void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced)
{
int cpu;
- if (WARN_ON(!srcu_get_delay(sp)))
+ if (WARN_ON(!srcu_get_delay(ssp)))
return; /* Just leak it! */
- if (WARN_ON(srcu_readers_active(sp)))
+ if (WARN_ON(srcu_readers_active(ssp)))
return; /* Just leak it! */
if (quiesced) {
- if (WARN_ON(delayed_work_pending(&sp->work)))
+ if (WARN_ON(delayed_work_pending(&ssp->work)))
return; /* Just leak it! */
} else {
- flush_delayed_work(&sp->work);
+ flush_delayed_work(&ssp->work);
}
for_each_possible_cpu(cpu)
if (quiesced) {
- if (WARN_ON(delayed_work_pending(&per_cpu_ptr(sp->sda, cpu)->work)))
+ if (WARN_ON(delayed_work_pending(&per_cpu_ptr(ssp->sda, cpu)->work)))
return; /* Just leak it! */
} else {
- flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work);
+ flush_delayed_work(&per_cpu_ptr(ssp->sda, cpu)->work);
}
- if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
- WARN_ON(srcu_readers_active(sp))) {
+ if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
+ WARN_ON(srcu_readers_active(ssp))) {
pr_info("%s: Active srcu_struct %p state: %d\n",
- __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
+ __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)));
return; /* Caller forgot to stop doing call_srcu()? */
}
- free_percpu(sp->sda);
- sp->sda = NULL;
+ free_percpu(ssp->sda);
+ ssp->sda = NULL;
}
EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
@@ -409,12 +409,12 @@ EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
* srcu_struct.
* Returns an index that must be passed to the matching srcu_read_unlock().
*/
-int __srcu_read_lock(struct srcu_struct *sp)
+int __srcu_read_lock(struct srcu_struct *ssp)
{
int idx;
- idx = READ_ONCE(sp->srcu_idx) & 0x1;
- this_cpu_inc(sp->sda->srcu_lock_count[idx]);
+ idx = READ_ONCE(ssp->srcu_idx) & 0x1;
+ this_cpu_inc(ssp->sda->srcu_lock_count[idx]);
smp_mb(); /* B */ /* Avoid leaking the critical section. */
return idx;
}
@@ -425,10 +425,10 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
* element of the srcu_struct. Note that this may well be a different
* CPU than that which was incremented by the corresponding srcu_read_lock().
*/
-void __srcu_read_unlock(struct srcu_struct *sp, int idx)
+void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
{
smp_mb(); /* C */ /* Avoid leaking the critical section. */
- this_cpu_inc(sp->sda->srcu_unlock_count[idx]);
+ this_cpu_inc(ssp->sda->srcu_unlock_count[idx]);
}
EXPORT_SYMBOL_GPL(__srcu_read_unlock);
@@ -444,20 +444,22 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock);
/*
* Start an SRCU grace period.
*/
-static void srcu_gp_start(struct srcu_struct *sp)
+static void srcu_gp_start(struct srcu_struct *ssp)
{
- struct srcu_data *sdp = this_cpu_ptr(sp->sda);
+ struct srcu_data *sdp = this_cpu_ptr(ssp->sda);
int state;
- lockdep_assert_held(&ACCESS_PRIVATE(sp, lock));
- WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
+ lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
+ WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
+ spin_lock_rcu_node(sdp); /* Interrupts already disabled. */
rcu_segcblist_advance(&sdp->srcu_cblist,
- rcu_seq_current(&sp->srcu_gp_seq));
+ rcu_seq_current(&ssp->srcu_gp_seq));
(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
- rcu_seq_snap(&sp->srcu_gp_seq));
+ rcu_seq_snap(&ssp->srcu_gp_seq));
+ spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */
smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
- rcu_seq_start(&sp->srcu_gp_seq);
- state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
+ rcu_seq_start(&ssp->srcu_gp_seq);
+ state = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
}
@@ -511,7 +513,7 @@ static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
* just-completed grace period, the one corresponding to idx. If possible,
* schedule this invocation on the corresponding CPUs.
*/
-static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp,
+static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
unsigned long mask, unsigned long delay)
{
int cpu;
@@ -519,7 +521,7 @@ static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp,
for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
if (!(mask & (1 << (cpu - snp->grplo))))
continue;
- srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay);
+ srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
}
}
@@ -532,7 +534,7 @@ static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp,
* are initiating callback invocation. This allows the ->srcu_have_cbs[]
* array to have a finite number of elements.
*/
-static void srcu_gp_end(struct srcu_struct *sp)
+static void srcu_gp_end(struct srcu_struct *ssp)
{
unsigned long cbdelay;
bool cbs;
@@ -546,28 +548,28 @@ static void srcu_gp_end(struct srcu_struct *sp)
struct srcu_node *snp;
/* Prevent more than one additional grace period. */
- mutex_lock(&sp->srcu_cb_mutex);
+ mutex_lock(&ssp->srcu_cb_mutex);
/* End the current grace period. */
- spin_lock_irq_rcu_node(sp);
- idx = rcu_seq_state(sp->srcu_gp_seq);
+ spin_lock_irq_rcu_node(ssp);
+ idx = rcu_seq_state(ssp->srcu_gp_seq);
WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
- cbdelay = srcu_get_delay(sp);
- sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
- rcu_seq_end(&sp->srcu_gp_seq);
- gpseq = rcu_seq_current(&sp->srcu_gp_seq);
- if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
- sp->srcu_gp_seq_needed_exp = gpseq;
- spin_unlock_irq_rcu_node(sp);
- mutex_unlock(&sp->srcu_gp_mutex);
+ cbdelay = srcu_get_delay(ssp);
+ ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
+ rcu_seq_end(&ssp->srcu_gp_seq);
+ gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
+ if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))
+ ssp->srcu_gp_seq_needed_exp = gpseq;
+ spin_unlock_irq_rcu_node(ssp);
+ mutex_unlock(&ssp->srcu_gp_mutex);
/* A new grace period can start at this point. But only one. */
/* Initiate callback invocation as needed. */
idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
- srcu_for_each_node_breadth_first(sp, snp) {
+ srcu_for_each_node_breadth_first(ssp, snp) {
spin_lock_irq_rcu_node(snp);
cbs = false;
- last_lvl = snp >= sp->level[rcu_num_lvls - 1];
+ last_lvl = snp >= ssp->level[rcu_num_lvls - 1];
if (last_lvl)
cbs = snp->srcu_have_cbs[idx] == gpseq;
snp->srcu_have_cbs[idx] = gpseq;
@@ -578,12 +580,12 @@ static void srcu_gp_end(struct srcu_struct *sp)
snp->srcu_data_have_cbs[idx] = 0;
spin_unlock_irq_rcu_node(snp);
if (cbs)
- srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
+ srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
/* Occasionally prevent srcu_data counter wrap. */
if (!(gpseq & counter_wrap_check) && last_lvl)
for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
- sdp = per_cpu_ptr(sp->sda, cpu);
+ sdp = per_cpu_ptr(ssp->sda, cpu);
spin_lock_irqsave_rcu_node(sdp, flags);
if (ULONG_CMP_GE(gpseq,
sdp->srcu_gp_seq_needed + 100))
@@ -596,18 +598,18 @@ static void srcu_gp_end(struct srcu_struct *sp)
}
/* Callback initiation done, allow grace periods after next. */
- mutex_unlock(&sp->srcu_cb_mutex);
+ mutex_unlock(&ssp->srcu_cb_mutex);
/* Start a new grace period if needed. */
- spin_lock_irq_rcu_node(sp);
- gpseq = rcu_seq_current(&sp->srcu_gp_seq);
+ spin_lock_irq_rcu_node(ssp);
+ gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
if (!rcu_seq_state(gpseq) &&
- ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) {
- srcu_gp_start(sp);
- spin_unlock_irq_rcu_node(sp);
- srcu_reschedule(sp, 0);
+ ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) {
+ srcu_gp_start(ssp);
+ spin_unlock_irq_rcu_node(ssp);
+ srcu_reschedule(ssp, 0);
} else {
- spin_unlock_irq_rcu_node(sp);
+ spin_unlock_irq_rcu_node(ssp);
}
}
@@ -618,13 +620,13 @@ static void srcu_gp_end(struct srcu_struct *sp)
* but without expediting. To start a completely new grace period,
* whether expedited or not, use srcu_funnel_gp_start() instead.
*/
-static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
+static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
unsigned long s)
{
unsigned long flags;
for (; snp != NULL; snp = snp->srcu_parent) {
- if (rcu_seq_done(&sp->srcu_gp_seq, s) ||
+ if (rcu_seq_done(&ssp->srcu_gp_seq, s) ||
ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
return;
spin_lock_irqsave_rcu_node(snp, flags);
@@ -635,10 +637,10 @@ static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
spin_unlock_irqrestore_rcu_node(snp, flags);
}
- spin_lock_irqsave_rcu_node(sp, flags);
- if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
- sp->srcu_gp_seq_needed_exp = s;
- spin_unlock_irqrestore_rcu_node(sp, flags);
+ spin_lock_irqsave_rcu_node(ssp, flags);
+ if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
+ ssp->srcu_gp_seq_needed_exp = s;
+ spin_unlock_irqrestore_rcu_node(ssp, flags);
}
/*
@@ -651,7 +653,7 @@ static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
* Note that this function also does the work of srcu_funnel_exp_start(),
* in some cases by directly invoking it.
*/
-static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
+static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
unsigned long s, bool do_norm)
{
unsigned long flags;
@@ -661,7 +663,7 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
/* Each pass through the loop does one level of the srcu_node tree. */
for (; snp != NULL; snp = snp->srcu_parent) {
- if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode)
+ if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode)
return; /* GP already done and CBs recorded. */
spin_lock_irqsave_rcu_node(snp, flags);
if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
@@ -676,7 +678,7 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
return;
}
if (!do_norm)
- srcu_funnel_exp_start(sp, snp, s);
+ srcu_funnel_exp_start(ssp, snp, s);
return;
}
snp->srcu_have_cbs[idx] = s;
@@ -688,29 +690,29 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
}
/* Top of tree, must ensure the grace period will be started. */
- spin_lock_irqsave_rcu_node(sp, flags);
- if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) {
+ spin_lock_irqsave_rcu_node(ssp, flags);
+ if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) {
/*
* Record need for grace period s. Pair with load
* acquire setting up for initialization.
*/
- smp_store_release(&sp->srcu_gp_seq_needed, s); /*^^^*/
+ smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/
}
- if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
- sp->srcu_gp_seq_needed_exp = s;
+ if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
+ ssp->srcu_gp_seq_needed_exp = s;
/* If grace period not already done and none in progress, start it. */
- if (!rcu_seq_done(&sp->srcu_gp_seq, s) &&
- rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) {
- WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
- srcu_gp_start(sp);
+ if (!rcu_seq_done(&ssp->srcu_gp_seq, s) &&
+ rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) {
+ WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
+ srcu_gp_start(ssp);
if (likely(srcu_init_done))
- queue_delayed_work(rcu_gp_wq, &sp->work,
- srcu_get_delay(sp));
- else if (list_empty(&sp->work.work.entry))
- list_add(&sp->work.work.entry, &srcu_boot_list);
+ queue_delayed_work(rcu_gp_wq, &ssp->work,
+ srcu_get_delay(ssp));
+ else if (list_empty(&ssp->work.work.entry))
+ list_add(&ssp->work.work.entry, &srcu_boot_list);
}
- spin_unlock_irqrestore_rcu_node(sp, flags);
+ spin_unlock_irqrestore_rcu_node(ssp, flags);
}
/*
@@ -718,12 +720,12 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
* loop an additional time if there is an expedited grace period pending.
* The caller must ensure that ->srcu_idx is not changed while checking.
*/
-static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
+static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
{
for (;;) {
- if (srcu_readers_active_idx_check(sp, idx))
+ if (srcu_readers_active_idx_check(ssp, idx))
return true;
- if (--trycount + !srcu_get_delay(sp) <= 0)
+ if (--trycount + !srcu_get_delay(ssp) <= 0)
return false;
udelay(SRCU_RETRY_CHECK_DELAY);
}
@@ -734,7 +736,7 @@ static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
* use the other rank of the ->srcu_(un)lock_count[] arrays. This allows
* us to wait for pre-existing readers in a starvation-free manner.
*/
-static void srcu_flip(struct srcu_struct *sp)
+static void srcu_flip(struct srcu_struct *ssp)
{
/*
* Ensure that if this updater saw a given reader's increment
@@ -746,7 +748,7 @@ static void srcu_flip(struct srcu_struct *sp)
*/
smp_mb(); /* E */ /* Pairs with B and C. */
- WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1);
+ WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
/*
* Ensure that if the updater misses an __srcu_read_unlock()
@@ -779,7 +781,7 @@ static void srcu_flip(struct srcu_struct *sp)
* negligible when amoritized over that time period, and the extra latency
* of a needlessly non-expedited grace period is similarly negligible.
*/
-static bool srcu_might_be_idle(struct srcu_struct *sp)
+static bool srcu_might_be_idle(struct srcu_struct *ssp)
{
unsigned long curseq;
unsigned long flags;
@@ -788,7 +790,7 @@ static bool srcu_might_be_idle(struct srcu_struct *sp)
/* If the local srcu_data structure has callbacks, not idle. */
local_irq_save(flags);
- sdp = this_cpu_ptr(sp->sda);
+ sdp = this_cpu_ptr(ssp->sda);
if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
local_irq_restore(flags);
return false; /* Callbacks already present, so not idle. */
@@ -804,17 +806,17 @@ static bool srcu_might_be_idle(struct srcu_struct *sp)
/* First, see if enough time has passed since the last GP. */
t = ktime_get_mono_fast_ns();
if (exp_holdoff == 0 ||
- time_in_range_open(t, sp->srcu_last_gp_end,
- sp->srcu_last_gp_end + exp_holdoff))
+ time_in_range_open(t, ssp->srcu_last_gp_end,
+ ssp->srcu_last_gp_end + exp_holdoff))
return false; /* Too soon after last GP. */
/* Next, check for probable idleness. */
- curseq = rcu_seq_current(&sp->srcu_gp_seq);
+ curseq = rcu_seq_current(&ssp->srcu_gp_seq);
smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
- if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed)))
+ if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed)))
return false; /* Grace period in progress, so not idle. */
smp_mb(); /* Order ->srcu_gp_seq with prior access. */
- if (curseq != rcu_seq_current(&sp->srcu_gp_seq))
+ if (curseq != rcu_seq_current(&ssp->srcu_gp_seq))
return false; /* GP # changed, so not idle. */
return true; /* With reasonable probability, idle! */
}
@@ -854,16 +856,17 @@ static void srcu_leak_callback(struct rcu_head *rhp)
* srcu_read_lock(), and srcu_read_unlock() that are all passed the same
* srcu_struct structure.
*/
-void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
+void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
rcu_callback_t func, bool do_norm)
{
unsigned long flags;
+ int idx;
bool needexp = false;
bool needgp = false;
unsigned long s;
struct srcu_data *sdp;
- check_init_srcu_struct(sp);
+ check_init_srcu_struct(ssp);
if (debug_rcu_head_queue(rhp)) {
/* Probable double call_srcu(), so leak the callback. */
WRITE_ONCE(rhp->func, srcu_leak_callback);
@@ -871,13 +874,14 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
return;
}
rhp->func = func;
+ idx = srcu_read_lock(ssp);
local_irq_save(flags);
- sdp = this_cpu_ptr(sp->sda);
+ sdp = this_cpu_ptr(ssp->sda);
spin_lock_rcu_node(sdp);
rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
rcu_segcblist_advance(&sdp->srcu_cblist,
- rcu_seq_current(&sp->srcu_gp_seq));
- s = rcu_seq_snap(&sp->srcu_gp_seq);
+ rcu_seq_current(&ssp->srcu_gp_seq));
+ s = rcu_seq_snap(&ssp->srcu_gp_seq);
(void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
sdp->srcu_gp_seq_needed = s;
@@ -889,14 +893,15 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
}
spin_unlock_irqrestore_rcu_node(sdp, flags);
if (needgp)
- srcu_funnel_gp_start(sp, sdp, s, do_norm);
+ srcu_funnel_gp_start(ssp, sdp, s, do_norm);
else if (needexp)
- srcu_funnel_exp_start(sp, sdp->mynode, s);
+ srcu_funnel_exp_start(ssp, sdp->mynode, s);
+ srcu_read_unlock(ssp, idx);
}
/**
* call_srcu() - Queue a callback for invocation after an SRCU grace period
- * @sp: srcu_struct in queue the callback
+ * @ssp: srcu_struct in queue the callback
* @rhp: structure to be used for queueing the SRCU callback.
* @func: function to be invoked after the SRCU grace period
*
@@ -911,21 +916,21 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
* The callback will be invoked from process context, but must nevertheless
* be fast and must not block.
*/
-void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
+void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
rcu_callback_t func)
{
- __call_srcu(sp, rhp, func, true);
+ __call_srcu(ssp, rhp, func, true);
}
EXPORT_SYMBOL_GPL(call_srcu);
/*
* Helper function for synchronize_srcu() and synchronize_srcu_expedited().
*/
-static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
+static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
{
struct rcu_synchronize rcu;
- RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) ||
+ RCU_LOCKDEP_WARN(lock_is_held(&ssp->dep_map) ||
lock_is_held(&rcu_bh_lock_map) ||
lock_is_held(&rcu_lock_map) ||
lock_is_held(&rcu_sched_lock_map),
@@ -934,10 +939,10 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
return;
might_sleep();
- check_init_srcu_struct(sp);
+ check_init_srcu_struct(ssp);
init_completion(&rcu.completion);
init_rcu_head_on_stack(&rcu.head);
- __call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm);
+ __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
wait_for_completion(&rcu.completion);
destroy_rcu_head_on_stack(&rcu.head);
@@ -953,7 +958,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
/**
* synchronize_srcu_expedited - Brute-force SRCU grace period
- * @sp: srcu_struct with which to synchronize.
+ * @ssp: srcu_struct with which to synchronize.
*
* Wait for an SRCU grace period to elapse, but be more aggressive about
* spinning rather than blocking when waiting.
@@ -961,15 +966,15 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
* Note that synchronize_srcu_expedited() has the same deadlock and
* memory-ordering properties as does synchronize_srcu().
*/
-void synchronize_srcu_expedited(struct srcu_struct *sp)
+void synchronize_srcu_expedited(struct srcu_struct *ssp)
{
- __synchronize_srcu(sp, rcu_gp_is_normal());
+ __synchronize_srcu(ssp, rcu_gp_is_normal());
}
EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
/**
* synchronize_srcu - wait for prior SRCU read-side critical-section completion
- * @sp: srcu_struct with which to synchronize.
+ * @ssp: srcu_struct with which to synchronize.
*
* Wait for the count to drain to zero of both indexes. To avoid the
* possible starvation of synchronize_srcu(), it waits for the count of
@@ -1011,12 +1016,12 @@ EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
* SRCU must also provide it. Note that detecting idleness is heuristic
* and subject to both false positives and negatives.
*/
-void synchronize_srcu(struct srcu_struct *sp)
+void synchronize_srcu(struct srcu_struct *ssp)
{
- if (srcu_might_be_idle(sp) || rcu_gp_is_expedited())
- synchronize_srcu_expedited(sp);
+ if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
+ synchronize_srcu_expedited(ssp);
else
- __synchronize_srcu(sp, true);
+ __synchronize_srcu(ssp, true);
}
EXPORT_SYMBOL_GPL(synchronize_srcu);
@@ -1026,36 +1031,36 @@ EXPORT_SYMBOL_GPL(synchronize_srcu);
static void srcu_barrier_cb(struct rcu_head *rhp)
{
struct srcu_data *sdp;
- struct srcu_struct *sp;
+ struct srcu_struct *ssp;
sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
- sp = sdp->sp;
- if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
- complete(&sp->srcu_barrier_completion);
+ ssp = sdp->ssp;
+ if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
+ complete(&ssp->srcu_barrier_completion);
}
/**
* srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
- * @sp: srcu_struct on which to wait for in-flight callbacks.
+ * @ssp: srcu_struct on which to wait for in-flight callbacks.
*/
-void srcu_barrier(struct srcu_struct *sp)
+void srcu_barrier(struct srcu_struct *ssp)
{
int cpu;
struct srcu_data *sdp;
- unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq);
+ unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
- check_init_srcu_struct(sp);
- mutex_lock(&sp->srcu_barrier_mutex);
- if (rcu_seq_done(&sp->srcu_barrier_seq, s)) {
+ check_init_srcu_struct(ssp);
+ mutex_lock(&ssp->srcu_barrier_mutex);
+ if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) {
smp_mb(); /* Force ordering following return. */
- mutex_unlock(&sp->srcu_barrier_mutex);
+ mutex_unlock(&ssp->srcu_barrier_mutex);
return; /* Someone else did our work for us. */
}
- rcu_seq_start(&sp->srcu_barrier_seq);
- init_completion(&sp->srcu_barrier_completion);
+ rcu_seq_start(&ssp->srcu_barrier_seq);
+ init_completion(&ssp->srcu_barrier_completion);
/* Initial count prevents reaching zero until all CBs are posted. */
- atomic_set(&sp->srcu_barrier_cpu_cnt, 1);
+ atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
/*
* Each pass through this loop enqueues a callback, but only
@@ -1066,39 +1071,39 @@ void srcu_barrier(struct srcu_struct *sp)
* grace period as the last callback already in the queue.
*/
for_each_possible_cpu(cpu) {
- sdp = per_cpu_ptr(sp->sda, cpu);
+ sdp = per_cpu_ptr(ssp->sda, cpu);
spin_lock_irq_rcu_node(sdp);
- atomic_inc(&sp->srcu_barrier_cpu_cnt);
+ atomic_inc(&ssp->srcu_barrier_cpu_cnt);
sdp->srcu_barrier_head.func = srcu_barrier_cb;
debug_rcu_head_queue(&sdp->srcu_barrier_head);
if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
&sdp->srcu_barrier_head, 0)) {
debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
- atomic_dec(&sp->srcu_barrier_cpu_cnt);
+ atomic_dec(&ssp->srcu_barrier_cpu_cnt);
}
spin_unlock_irq_rcu_node(sdp);
}
/* Remove the initial count, at which point reaching zero can happen. */
- if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
- complete(&sp->srcu_barrier_completion);
- wait_for_completion(&sp->srcu_barrier_completion);
+ if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
+ complete(&ssp->srcu_barrier_completion);
+ wait_for_completion(&ssp->srcu_barrier_completion);
- rcu_seq_end(&sp->srcu_barrier_seq);
- mutex_unlock(&sp->srcu_barrier_mutex);
+ rcu_seq_end(&ssp->srcu_barrier_seq);
+ mutex_unlock(&ssp->srcu_barrier_mutex);
}
EXPORT_SYMBOL_GPL(srcu_barrier);
/**
* srcu_batches_completed - return batches completed.
- * @sp: srcu_struct on which to report batch completion.
+ * @ssp: srcu_struct on which to report batch completion.
*
* Report the number of batches, correlated with, but not necessarily
* precisely the same as, the number of grace periods that have elapsed.
*/
-unsigned long srcu_batches_completed(struct srcu_struct *sp)
+unsigned long srcu_batches_completed(struct srcu_struct *ssp)
{
- return sp->srcu_idx;
+ return ssp->srcu_idx;
}
EXPORT_SYMBOL_GPL(srcu_batches_completed);
@@ -1107,11 +1112,11 @@ EXPORT_SYMBOL_GPL(srcu_batches_completed);
* to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
* completed in that state.
*/
-static void srcu_advance_state(struct srcu_struct *sp)
+static void srcu_advance_state(struct srcu_struct *ssp)
{
int idx;
- mutex_lock(&sp->srcu_gp_mutex);
+ mutex_lock(&ssp->srcu_gp_mutex);
/*
* Because readers might be delayed for an extended period after
@@ -1123,47 +1128,47 @@ static void srcu_advance_state(struct srcu_struct *sp)
* The load-acquire ensures that we see the accesses performed
* by the prior grace period.
*/
- idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */
+ idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */
if (idx == SRCU_STATE_IDLE) {
- spin_lock_irq_rcu_node(sp);
- if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
- WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq));
- spin_unlock_irq_rcu_node(sp);
- mutex_unlock(&sp->srcu_gp_mutex);
+ spin_lock_irq_rcu_node(ssp);
+ if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
+ WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq));
+ spin_unlock_irq_rcu_node(ssp);
+ mutex_unlock(&ssp->srcu_gp_mutex);
return;
}
- idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
+ idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
if (idx == SRCU_STATE_IDLE)
- srcu_gp_start(sp);
- spin_unlock_irq_rcu_node(sp);
+ srcu_gp_start(ssp);
+ spin_unlock_irq_rcu_node(ssp);
if (idx != SRCU_STATE_IDLE) {
- mutex_unlock(&sp->srcu_gp_mutex);
+ mutex_unlock(&ssp->srcu_gp_mutex);
return; /* Someone else started the grace period. */
}
}
- if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
- idx = 1 ^ (sp->srcu_idx & 1);
- if (!try_check_zero(sp, idx, 1)) {
- mutex_unlock(&sp->srcu_gp_mutex);
+ if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
+ idx = 1 ^ (ssp->srcu_idx & 1);
+ if (!try_check_zero(ssp, idx, 1)) {
+ mutex_unlock(&ssp->srcu_gp_mutex);
return; /* readers present, retry later. */
}
- srcu_flip(sp);
- rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2);
+ srcu_flip(ssp);
+ rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2);
}
- if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
+ if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
/*
* SRCU read-side critical sections are normally short,
* so check at least twice in quick succession after a flip.
*/
- idx = 1 ^ (sp->srcu_idx & 1);
- if (!try_check_zero(sp, idx, 2)) {
- mutex_unlock(&sp->srcu_gp_mutex);
+ idx = 1 ^ (ssp->srcu_idx & 1);
+ if (!try_check_zero(ssp, idx, 2)) {
+ mutex_unlock(&ssp->srcu_gp_mutex);
return; /* readers present, retry later. */
}
- srcu_gp_end(sp); /* Releases ->srcu_gp_mutex. */
+ srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */
}
}
@@ -1179,14 +1184,14 @@ static void srcu_invoke_callbacks(struct work_struct *work)
struct rcu_cblist ready_cbs;
struct rcu_head *rhp;
struct srcu_data *sdp;
- struct srcu_struct *sp;
+ struct srcu_struct *ssp;
sdp = container_of(work, struct srcu_data, work.work);
- sp = sdp->sp;
+ ssp = sdp->ssp;
rcu_cblist_init(&ready_cbs);
spin_lock_irq_rcu_node(sdp);
rcu_segcblist_advance(&sdp->srcu_cblist,
- rcu_seq_current(&sp->srcu_gp_seq));
+ rcu_seq_current(&ssp->srcu_gp_seq));
if (sdp->srcu_cblist_invoking ||
!rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
spin_unlock_irq_rcu_node(sdp);
@@ -1212,7 +1217,7 @@ static void srcu_invoke_callbacks(struct work_struct *work)
spin_lock_irq_rcu_node(sdp);
rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
- rcu_seq_snap(&sp->srcu_gp_seq));
+ rcu_seq_snap(&ssp->srcu_gp_seq));
sdp->srcu_cblist_invoking = false;
more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
spin_unlock_irq_rcu_node(sdp);
@@ -1224,24 +1229,24 @@ static void srcu_invoke_callbacks(struct work_struct *work)
* Finished one round of SRCU grace period. Start another if there are
* more SRCU callbacks queued, otherwise put SRCU into not-running state.
*/
-static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
+static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
{
bool pushgp = true;
- spin_lock_irq_rcu_node(sp);
- if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
- if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) {
+ spin_lock_irq_rcu_node(ssp);
+ if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
+ if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) {
/* All requests fulfilled, time to go idle. */
pushgp = false;
}
- } else if (!rcu_seq_state(sp->srcu_gp_seq)) {
+ } else if (!rcu_seq_state(ssp->srcu_gp_seq)) {
/* Outstanding request and no GP. Start one. */
- srcu_gp_start(sp);
+ srcu_gp_start(ssp);
}
- spin_unlock_irq_rcu_node(sp);
+ spin_unlock_irq_rcu_node(ssp);
if (pushgp)
- queue_delayed_work(rcu_gp_wq, &sp->work, delay);
+ queue_delayed_work(rcu_gp_wq, &ssp->work, delay);
}
/*
@@ -1249,41 +1254,41 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
*/
static void process_srcu(struct work_struct *work)
{
- struct srcu_struct *sp;
+ struct srcu_struct *ssp;
- sp = container_of(work, struct srcu_struct, work.work);
+ ssp = container_of(work, struct srcu_struct, work.work);
- srcu_advance_state(sp);
- srcu_reschedule(sp, srcu_get_delay(sp));
+ srcu_advance_state(ssp);
+ srcu_reschedule(ssp, srcu_get_delay(ssp));
}
void srcutorture_get_gp_data(enum rcutorture_type test_type,
- struct srcu_struct *sp, int *flags,
+ struct srcu_struct *ssp, int *flags,
unsigned long *gp_seq)
{
if (test_type != SRCU_FLAVOR)
return;
*flags = 0;
- *gp_seq = rcu_seq_current(&sp->srcu_gp_seq);
+ *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq);
}
EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
-void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf)
+void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
{
int cpu;
int idx;
unsigned long s0 = 0, s1 = 0;
- idx = sp->srcu_idx & 0x1;
+ idx = ssp->srcu_idx & 0x1;
pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):",
- tt, tf, rcu_seq_current(&sp->srcu_gp_seq), idx);
+ tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx);
for_each_possible_cpu(cpu) {
unsigned long l0, l1;
unsigned long u0, u1;
long c0, c1;
struct srcu_data *sdp;
- sdp = per_cpu_ptr(sp->sda, cpu);
+ sdp = per_cpu_ptr(ssp->sda, cpu);
u0 = sdp->srcu_unlock_count[!idx];
u1 = sdp->srcu_unlock_count[idx];
@@ -1318,14 +1323,14 @@ early_initcall(srcu_bootup_announce);
void __init srcu_init(void)
{
- struct srcu_struct *sp;
+ struct srcu_struct *ssp;
srcu_init_done = true;
while (!list_empty(&srcu_boot_list)) {
- sp = list_first_entry(&srcu_boot_list, struct srcu_struct,
+ ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,
work.work.entry);
- check_init_srcu_struct(sp);
- list_del_init(&sp->work.work.entry);
- queue_work(rcu_gp_wq, &sp->work.work);
+ check_init_srcu_struct(ssp);
+ list_del_init(&ssp->work.work.entry);
+ queue_work(rcu_gp_wq, &ssp->work.work);
}
}
diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c
index 9d570b1892b0..be10036fa621 100644
--- a/kernel/rcu/sync.c
+++ b/kernel/rcu/sync.c
@@ -125,8 +125,7 @@ void rcu_sync_enter(struct rcu_sync *rsp)
rsp->gp_state = GP_PENDING;
spin_unlock_irq(&rsp->rss_lock);
- BUG_ON(need_wait && need_sync);
-
+ WARN_ON_ONCE(need_wait && need_sync);
if (need_sync) {
gp_ops[rsp->gp_type].sync();
rsp->gp_state = GP_PASSED;
@@ -139,7 +138,7 @@ void rcu_sync_enter(struct rcu_sync *rsp)
* Nobody has yet been allowed the 'fast' path and thus we can
* avoid doing any sync(). The callback will get 'dropped'.
*/
- BUG_ON(rsp->gp_state != GP_PASSED);
+ WARN_ON_ONCE(rsp->gp_state != GP_PASSED);
}
}
@@ -166,8 +165,8 @@ static void rcu_sync_func(struct rcu_head *rhp)
struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
unsigned long flags;
- BUG_ON(rsp->gp_state != GP_PASSED);
- BUG_ON(rsp->cb_state == CB_IDLE);
+ WARN_ON_ONCE(rsp->gp_state != GP_PASSED);
+ WARN_ON_ONCE(rsp->cb_state == CB_IDLE);
spin_lock_irqsave(&rsp->rss_lock, flags);
if (rsp->gp_count) {
@@ -225,7 +224,7 @@ void rcu_sync_dtor(struct rcu_sync *rsp)
{
int cb_state;
- BUG_ON(rsp->gp_count);
+ WARN_ON_ONCE(rsp->gp_count);
spin_lock_irq(&rsp->rss_lock);
if (rsp->cb_state == CB_REPLAY)
@@ -235,6 +234,6 @@ void rcu_sync_dtor(struct rcu_sync *rsp)
if (cb_state != CB_IDLE) {
gp_ops[rsp->gp_type].wait();
- BUG_ON(rsp->cb_state != CB_IDLE);
+ WARN_ON_ONCE(rsp->cb_state != CB_IDLE);
}
}
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 121f833acd04..6ec3abbe90e2 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -500,16 +500,29 @@ void rcu_force_quiescent_state(void)
EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
/*
+ * Convert a ->gp_state value to a character string.
+ */
+static const char *gp_state_getname(short gs)
+{
+ if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
+ return "???";
+ return gp_state_names[gs];
+}
+
+/*
* Show the state of the grace-period kthreads.
*/
void show_rcu_gp_kthreads(void)
{
int cpu;
+ unsigned long j;
struct rcu_data *rdp;
struct rcu_node *rnp;
- pr_info("%s: wait state: %d ->state: %#lx\n", rcu_state.name,
- rcu_state.gp_state, rcu_state.gp_kthread->state);
+ j = jiffies - READ_ONCE(rcu_state.gp_activity);
+ pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %ld\n",
+ rcu_state.name, gp_state_getname(rcu_state.gp_state),
+ rcu_state.gp_state, rcu_state.gp_kthread->state, j);
rcu_for_each_node_breadth_first(rnp) {
if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed))
continue;
@@ -891,12 +904,12 @@ void rcu_irq_enter_irqson(void)
}
/**
- * rcu_is_watching - see if RCU thinks that the current CPU is idle
+ * rcu_is_watching - see if RCU thinks that the current CPU is not idle
*
* Return true if RCU is watching the running CPU, which means that this
* CPU can safely enter RCU read-side critical sections. In other words,
- * if the current CPU is in its idle loop and is neither in an interrupt
- * or NMI handler, return true.
+ * if the current CPU is not in its idle loop or is in an interrupt or
+ * NMI handler, return true.
*/
bool notrace rcu_is_watching(void)
{
@@ -1143,16 +1156,6 @@ static void record_gp_stall_check_time(void)
}
/*
- * Convert a ->gp_state value to a character string.
- */
-static const char *gp_state_getname(short gs)
-{
- if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
- return "???";
- return gp_state_names[gs];
-}
-
-/*
* Complain about starvation of grace-period kthread.
*/
static void rcu_check_gp_kthread_starvation(void)
@@ -2032,9 +2035,9 @@ static void rcu_gp_cleanup(void)
rnp = rcu_get_root();
raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
- /* Declare grace period done. */
- rcu_seq_end(&rcu_state.gp_seq);
+ /* Declare grace period done, trace first to use old GP number. */
trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
+ rcu_seq_end(&rcu_state.gp_seq);
rcu_state.gp_state = RCU_GP_IDLE;
/* Check for GP requests since above loop. */
rdp = this_cpu_ptr(&rcu_data);
@@ -2600,10 +2603,10 @@ static void force_quiescent_state(void)
* This function checks for grace-period requests that fail to motivate
* RCU to come out of its idle mode.
*/
-static void
-rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp)
+void
+rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
+ const unsigned long gpssdelay)
{
- const unsigned long gpssdelay = rcu_jiffies_till_stall_check() * HZ;
unsigned long flags;
unsigned long j;
struct rcu_node *rnp_root = rcu_get_root();
@@ -2690,7 +2693,7 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
local_irq_restore(flags);
}
- rcu_check_gp_start_stall(rnp, rdp);
+ rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
/* If there are callbacks ready, invoke them. */
if (rcu_segcblist_ready_cbs(&rdp->cblist))
@@ -2826,7 +2829,7 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy)
* Very early boot, before rcu_init(). Initialize if needed
* and then drop through to queue the callback.
*/
- BUG_ON(cpu != -1);
+ WARN_ON_ONCE(cpu != -1);
WARN_ON_ONCE(!rcu_is_watching());
if (rcu_segcblist_empty(&rdp->cblist))
rcu_segcblist_init(&rdp->cblist);
@@ -3485,7 +3488,8 @@ static int __init rcu_spawn_gp_kthread(void)
rcu_scheduler_fully_active = 1;
t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
- BUG_ON(IS_ERR(t));
+ if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
+ return 0;
rnp = rcu_get_root();
raw_spin_lock_irqsave_rcu_node(rnp, flags);
rcu_state.gp_kthread = t;
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 8d18c1014e2b..928fe5893a57 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -450,10 +450,12 @@ static void sync_rcu_exp_select_cpus(smp_call_func_t func)
}
INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
preempt_disable();
- cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask);
+ cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
/* If all offline, queue the work on an unbound CPU. */
- if (unlikely(cpu > rnp->grphi))
+ if (unlikely(cpu > rnp->grphi - rnp->grplo))
cpu = WORK_CPU_UNBOUND;
+ else
+ cpu += rnp->grplo;
queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
preempt_enable();
rnp->exp_need_flush = true;
@@ -690,8 +692,10 @@ static void sync_rcu_exp_handler(void *unused)
*/
if (t->rcu_read_lock_nesting > 0) {
raw_spin_lock_irqsave_rcu_node(rnp, flags);
- if (rnp->expmask & rdp->grpmask)
+ if (rnp->expmask & rdp->grpmask) {
rdp->deferred_qs = true;
+ WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, true);
+ }
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 05915e536336..605ff3b06098 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -397,6 +397,11 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
return rnp->gp_tasks != NULL;
}
+/* Bias and limit values for ->rcu_read_lock_nesting. */
+#define RCU_NEST_BIAS INT_MAX
+#define RCU_NEST_NMAX (-INT_MAX / 2)
+#define RCU_NEST_PMAX (INT_MAX / 2)
+
/*
* Preemptible RCU implementation for rcu_read_lock().
* Just increment ->rcu_read_lock_nesting, shared state will be updated
@@ -405,6 +410,8 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
void __rcu_read_lock(void)
{
current->rcu_read_lock_nesting++;
+ if (IS_ENABLED(CONFIG_PROVE_LOCKING))
+ WARN_ON_ONCE(current->rcu_read_lock_nesting > RCU_NEST_PMAX);
barrier(); /* critical section after entry code. */
}
EXPORT_SYMBOL_GPL(__rcu_read_lock);
@@ -424,20 +431,18 @@ void __rcu_read_unlock(void)
--t->rcu_read_lock_nesting;
} else {
barrier(); /* critical section before exit code. */
- t->rcu_read_lock_nesting = INT_MIN;
+ t->rcu_read_lock_nesting = -RCU_NEST_BIAS;
barrier(); /* assign before ->rcu_read_unlock_special load */
if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
rcu_read_unlock_special(t);
barrier(); /* ->rcu_read_unlock_special load before assign */
t->rcu_read_lock_nesting = 0;
}
-#ifdef CONFIG_PROVE_LOCKING
- {
- int rrln = READ_ONCE(t->rcu_read_lock_nesting);
+ if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
+ int rrln = t->rcu_read_lock_nesting;
- WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
+ WARN_ON_ONCE(rrln < 0 && rrln > RCU_NEST_NMAX);
}
-#endif /* #ifdef CONFIG_PROVE_LOCKING */
}
EXPORT_SYMBOL_GPL(__rcu_read_unlock);
@@ -597,7 +602,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
*/
static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
{
- return (this_cpu_ptr(&rcu_data)->deferred_qs ||
+ return (__this_cpu_read(rcu_data.deferred_qs) ||
READ_ONCE(t->rcu_read_unlock_special.s)) &&
t->rcu_read_lock_nesting <= 0;
}
@@ -617,11 +622,11 @@ static void rcu_preempt_deferred_qs(struct task_struct *t)
if (!rcu_preempt_need_deferred_qs(t))
return;
if (couldrecurse)
- t->rcu_read_lock_nesting -= INT_MIN;
+ t->rcu_read_lock_nesting -= RCU_NEST_BIAS;
local_irq_save(flags);
rcu_preempt_deferred_qs_irqrestore(t, flags);
if (couldrecurse)
- t->rcu_read_lock_nesting += INT_MIN;
+ t->rcu_read_lock_nesting += RCU_NEST_BIAS;
}
/*
@@ -642,13 +647,21 @@ static void rcu_read_unlock_special(struct task_struct *t)
local_irq_save(flags);
irqs_were_disabled = irqs_disabled_flags(flags);
- if ((preempt_bh_were_disabled || irqs_were_disabled) &&
- t->rcu_read_unlock_special.b.blocked) {
+ if (preempt_bh_were_disabled || irqs_were_disabled) {
+ WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, false);
/* Need to defer quiescent state until everything is enabled. */
- raise_softirq_irqoff(RCU_SOFTIRQ);
+ if (irqs_were_disabled) {
+ /* Enabling irqs does not reschedule, so... */
+ raise_softirq_irqoff(RCU_SOFTIRQ);
+ } else {
+ /* Enabling BH or preempt does reschedule, so... */
+ set_tsk_need_resched(current);
+ set_preempt_need_resched();
+ }
local_irq_restore(flags);
return;
}
+ WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, false);
rcu_preempt_deferred_qs_irqrestore(t, flags);
}
@@ -1464,7 +1477,8 @@ static void __init rcu_spawn_boost_kthreads(void)
for_each_possible_cpu(cpu)
per_cpu(rcu_cpu_has_work, cpu) = 0;
- BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
+ if (WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec), "%s: Could not start rcub kthread, OOM is now expected behavior\n", __func__))
+ return;
rcu_for_each_leaf_node(rnp)
(void)rcu_spawn_one_boost_kthread(rnp);
}
@@ -2322,7 +2336,8 @@ static int rcu_nocb_kthread(void *arg)
tail = rdp->nocb_follower_tail;
rdp->nocb_follower_tail = &rdp->nocb_follower_head;
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
- BUG_ON(!list);
+ if (WARN_ON_ONCE(!list))
+ continue;
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeNonEmpty"));
/* Each pass through the following loop invokes a callback. */
@@ -2495,7 +2510,8 @@ static void rcu_spawn_one_nocb_kthread(int cpu)
/* Spawn the kthread for this CPU. */
t = kthread_run(rcu_nocb_kthread, rdp_spawn,
"rcuo%c/%d", rcu_state.abbr, cpu);
- BUG_ON(IS_ERR(t));
+ if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo kthread, OOM is now expected behavior\n", __func__))
+ return;
WRITE_ONCE(rdp_spawn->nocb_kthread, t);
}
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index c729ca5e6ee2..1971869c4072 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -820,7 +820,8 @@ static int __init rcu_spawn_tasks_kthread(void)
struct task_struct *t;
t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
- BUG_ON(IS_ERR(t));
+ if (WARN_ONCE(IS_ERR(t), "%s: Could not start Tasks-RCU grace-period kthread, OOM is now expected behavior\n", __func__))
+ return 0;
smp_mb(); /* Ensure others see full kthread. */
WRITE_ONCE(rcu_tasks_kthread_ptr, t);
return 0;