aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2022-05-15 21:24:53 -0700
committerDavid S. Miller <davem@davemloft.net>2022-05-16 11:33:59 +0100
commit97e719a82b43c6c2bb5eebdb3c5d479a332ac2ac (patch)
tree7e38c6a88703169d84365cb91c9aaab5aabe0adb /net/core
parentnet: tulip: convert to devres (diff)
downloadlinux-dev-97e719a82b43c6c2bb5eebdb3c5d479a332ac2ac.tar.xz
linux-dev-97e719a82b43c6c2bb5eebdb3c5d479a332ac2ac.zip
net: fix possible race in skb_attempt_defer_free()
A cpu can observe sd->defer_count reaching 128, and call smp_call_function_single_async() Problem is that the remote CPU can clear sd->defer_count before the IPI is run/acknowledged. Other cpus can queue more packets and also decide to call smp_call_function_single_async() while the pending IPI was not yet delivered. This is a common issue with smp_call_function_single_async(). Callers must ensure correct synchronization and serialization. I triggered this issue while experimenting smaller threshold. Performing the call to smp_call_function_single_async() under sd->defer_lock protection did not solve the problem. Commit 5a18ceca6350 ("smp: Allow smp_call_function_single_async() to insert locked csd") replaced an informative WARN_ON_ONCE() with a return of -EBUSY, which is often ignored. Test of CSD_FLAG_LOCK presence is racy anyway. Fixes: 68822bdf76f1 ("net: generalize skb freeing deferral to per-cpu lists") Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c7
-rw-r--r--net/core/skbuff.c5
2 files changed, 7 insertions, 5 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index d93456c75b55..a5e663e1a75a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4582,9 +4582,12 @@ static void rps_trigger_softirq(void *data)
#endif /* CONFIG_RPS */
/* Called from hardirq (IPI) context */
-static void trigger_rx_softirq(void *data __always_unused)
+static void trigger_rx_softirq(void *data)
{
+ struct softnet_data *sd = data;
+
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ smp_store_release(&sd->defer_ipi_scheduled, 0);
}
/*
@@ -11382,7 +11385,7 @@ static int __init net_dev_init(void)
INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
sd->cpu = i;
#endif
- INIT_CSD(&sd->defer_csd, trigger_rx_softirq, NULL);
+ INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd);
spin_lock_init(&sd->defer_lock);
init_gro_hash(&sd->backlog);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 2fea964f09d8..b40c8cdf4785 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -6516,8 +6516,7 @@ void skb_attempt_defer_free(struct sk_buff *skb)
sd->defer_count++;
/* kick every time queue length reaches 128.
- * This should avoid blocking in smp_call_function_single_async().
- * This condition should hardly be bit under normal conditions,
+ * This condition should hardly be hit under normal conditions,
* unless cpu suddenly stopped to receive NIC interrupts.
*/
kick = sd->defer_count == 128;
@@ -6527,6 +6526,6 @@ void skb_attempt_defer_free(struct sk_buff *skb)
/* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
* if we are unlucky enough (this seems very unlikely).
*/
- if (unlikely(kick))
+ if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1))
smp_call_function_single_async(cpu, &sd->defer_csd);
}