diff options
author | 2025-03-06 09:46:34 +0100 | |
---|---|---|
committer | 2025-03-20 10:33:38 +0100 | |
commit | dea2d9221e83ea02b45a60ab88284cd3bb4bb2a4 (patch) | |
tree | 0391b6707ca2cd787f5937f285eb715375bda3c1 /kernel | |
parent | slub: Handle freelist cycle in on_freelist() (diff) | |
parent | slab: don't batch kvfree_rcu() with SLUB_TINY (diff) | |
download | wireguard-linux-dea2d9221e83ea02b45a60ab88284cd3bb4bb2a4.tar.xz wireguard-linux-dea2d9221e83ea02b45a60ab88284cd3bb4bb2a4.zip |
Merge branch 'slab/for-6.15/kfree_rcu_tiny' into slab/for-next
Merge the slab feature branch kfree_rcu_tiny for 6.15:
- Move the TINY_RCU kvfree_rcu() implementation from RCU to SLAB
subsystem and cleanup its integration.
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/rcu/tiny.c | 25 | ||||
-rw-r--r-- | kernel/rcu/tree.c | 9 |
2 files changed, 2 insertions, 32 deletions
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c index 4b3f31911465..7a34a99d4664 100644 --- a/kernel/rcu/tiny.c +++ b/kernel/rcu/tiny.c @@ -85,15 +85,8 @@ void rcu_sched_clock_irq(int user) static inline bool rcu_reclaim_tiny(struct rcu_head *head) { rcu_callback_t f; - unsigned long offset = (unsigned long)head->func; rcu_lock_acquire(&rcu_callback_map); - if (__is_kvfree_rcu_offset(offset)) { - trace_rcu_invoke_kvfree_callback("", head, offset); - kvfree((void *)head - offset); - rcu_lock_release(&rcu_callback_map); - return true; - } trace_rcu_invoke_callback("", head); f = head->func; @@ -159,10 +152,6 @@ void synchronize_rcu(void) } EXPORT_SYMBOL_GPL(synchronize_rcu); -static void tiny_rcu_leak_callback(struct rcu_head *rhp) -{ -} - /* * Post an RCU callback to be invoked after the end of an RCU grace * period. But since we have but one CPU, that would be after any @@ -178,9 +167,6 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func) pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func); mem_dump_obj(head); } - - if (!__is_kvfree_rcu_offset((unsigned long)head->func)) - WRITE_ONCE(head->func, tiny_rcu_leak_callback); return; } @@ -246,17 +232,6 @@ bool poll_state_synchronize_rcu(unsigned long oldstate) } EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu); -#ifdef CONFIG_KASAN_GENERIC -void kvfree_call_rcu(struct rcu_head *head, void *ptr) -{ - if (head) - kasan_record_aux_stack(ptr); - - __kvfree_call_rcu(head, ptr); -} -EXPORT_SYMBOL_GPL(kvfree_call_rcu); -#endif - void __init rcu_init(void) { open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 475f31deed14..5dbc4189037c 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2931,13 +2931,8 @@ static int __init rcu_spawn_core_kthreads(void) static void rcutree_enqueue(struct rcu_data *rdp, struct rcu_head *head, rcu_callback_t func) { rcu_segcblist_enqueue(&rdp->cblist, head); - if (__is_kvfree_rcu_offset((unsigned long)func)) - trace_rcu_kvfree_callback(rcu_state.name, head, - (unsigned long)func, - rcu_segcblist_n_cbs(&rdp->cblist)); - else - trace_rcu_callback(rcu_state.name, head, - rcu_segcblist_n_cbs(&rdp->cblist)); + trace_rcu_callback(rcu_state.name, head, + rcu_segcblist_n_cbs(&rdp->cblist)); trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued")); } |