aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r--kernel/rcu/tree.c40
1 files changed, 28 insertions, 12 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 0512221cd84b..a8dd612098bf 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -55,6 +55,7 @@
#include <linux/oom.h>
#include <linux/smpboot.h>
#include <linux/jiffies.h>
+#include <linux/slab.h>
#include <linux/sched/isolation.h>
#include <linux/sched/clock.h>
#include "../time/tick-internal.h"
@@ -2146,7 +2147,6 @@ static void rcu_do_batch(struct rcu_data *rdp)
/* If no callbacks are ready, just return. */
if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
trace_rcu_batch_start(rcu_state.name,
- rcu_segcblist_n_lazy_cbs(&rdp->cblist),
rcu_segcblist_n_cbs(&rdp->cblist), 0);
trace_rcu_batch_end(rcu_state.name, 0,
!rcu_segcblist_empty(&rdp->cblist),
@@ -2168,7 +2168,6 @@ static void rcu_do_batch(struct rcu_data *rdp)
if (unlikely(bl > 100))
tlimit = local_clock() + rcu_resched_ns;
trace_rcu_batch_start(rcu_state.name,
- rcu_segcblist_n_lazy_cbs(&rdp->cblist),
rcu_segcblist_n_cbs(&rdp->cblist), bl);
rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
if (offloaded)
@@ -2179,9 +2178,19 @@ static void rcu_do_batch(struct rcu_data *rdp)
tick_dep_set_task(current, TICK_DEP_BIT_RCU);
rhp = rcu_cblist_dequeue(&rcl);
for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
+ rcu_callback_t f;
+
debug_rcu_head_unqueue(rhp);
- if (__rcu_reclaim(rcu_state.name, rhp))
- rcu_cblist_dequeued_lazy(&rcl);
+
+ rcu_lock_acquire(&rcu_callback_map);
+ trace_rcu_invoke_callback(rcu_state.name, rhp);
+
+ f = rhp->func;
+ WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
+ f(rhp);
+
+ rcu_lock_release(&rcu_callback_map);
+
/*
* Stop only if limit reached and CPU has something to do.
* Note: The rcl structure counts down from zero.
@@ -2583,7 +2592,7 @@ static void rcu_leak_callback(struct rcu_head *rhp)
* is expected to specify a CPU.
*/
static void
-__call_rcu(struct rcu_head *head, rcu_callback_t func, bool lazy)
+__call_rcu(struct rcu_head *head, rcu_callback_t func)
{
unsigned long flags;
struct rcu_data *rdp;
@@ -2618,18 +2627,17 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, bool lazy)
if (rcu_segcblist_empty(&rdp->cblist))
rcu_segcblist_init(&rdp->cblist);
}
+
if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags))
return; // Enqueued onto ->nocb_bypass, so just leave.
/* If we get here, rcu_nocb_try_bypass() acquired ->nocb_lock. */
- rcu_segcblist_enqueue(&rdp->cblist, head, lazy);
+ rcu_segcblist_enqueue(&rdp->cblist, head);
if (__is_kfree_rcu_offset((unsigned long)func))
trace_rcu_kfree_callback(rcu_state.name, head,
(unsigned long)func,
- rcu_segcblist_n_lazy_cbs(&rdp->cblist),
rcu_segcblist_n_cbs(&rdp->cblist));
else
trace_rcu_callback(rcu_state.name, head,
- rcu_segcblist_n_lazy_cbs(&rdp->cblist),
rcu_segcblist_n_cbs(&rdp->cblist));
/* Go handle any RCU core processing required. */
@@ -2679,7 +2687,7 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, bool lazy)
*/
void call_rcu(struct rcu_head *head, rcu_callback_t func)
{
- __call_rcu(head, func, 0);
+ __call_rcu(head, func);
}
EXPORT_SYMBOL_GPL(call_rcu);
@@ -2747,10 +2755,18 @@ static void kfree_rcu_work(struct work_struct *work)
// List "head" is now private, so traverse locklessly.
for (; head; head = next) {
+ unsigned long offset = (unsigned long)head->func;
+
next = head->next;
// Potentially optimize with kfree_bulk in future.
debug_rcu_head_unqueue(head);
- __rcu_reclaim(rcu_state.name, head);
+ rcu_lock_acquire(&rcu_callback_map);
+ trace_rcu_invoke_kfree_callback(rcu_state.name, head, offset);
+
+ /* Could be possible to optimize with kfree_bulk in future */
+ kfree((void *)head - offset);
+
+ rcu_lock_release(&rcu_callback_map);
cond_resched_tasks_rcu_qs();
}
}
@@ -2825,7 +2841,7 @@ static void kfree_rcu_monitor(struct work_struct *work)
*/
void kfree_call_rcu_nobatch(struct rcu_head *head, rcu_callback_t func)
{
- __call_rcu(head, func, 1);
+ __call_rcu(head, func);
}
EXPORT_SYMBOL_GPL(kfree_call_rcu_nobatch);
@@ -3100,7 +3116,7 @@ static void rcu_barrier_func(void *unused)
debug_rcu_head_queue(&rdp->barrier_head);
rcu_nocb_lock(rdp);
WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
- if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) {
+ if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
atomic_inc(&rcu_state.barrier_cpu_count);
} else {
debug_rcu_head_unqueue(&rdp->barrier_head);