aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bonding
diff options
context:
space:
mode:
authorJussi Maki <joamaki@gmail.com>2021-06-15 08:54:15 +0000
committerDavid S. Miller <davem@davemloft.net>2021-06-15 11:26:15 -0700
commit848ca9182a7d25bb54955c3aab9a3a2742bf9678 (patch)
tree15449c43a407368fbc404c5d79c5e4189840226e /drivers/net/bonding
parentnetlabel: Fix memory leak in netlbl_mgmt_add_common (diff)
downloadlinux-dev-848ca9182a7d25bb54955c3aab9a3a2742bf9678.tar.xz
linux-dev-848ca9182a7d25bb54955c3aab9a3a2742bf9678.zip
net: bonding: Use per-cpu rr_tx_counter
The round-robin rr_tx_counter was shared across CPUs leading to significant cache thrashing at high packet rates. This patch switches the round-robin packet counter to use a per-cpu variable to decide the destination slave. On a test with 2x100Gbit ICE nic with pktgen_sample_04_many_flows.sh (-s 64 -t 32) the tx rate was 19.6Mpps before and 22.3Mpps after this patch. "perf top -e cache_misses" before: 12.31% [bonding] [k] bond_xmit_roundrobin_slave_get 10.59% [sch_fq_codel] [k] fq_codel_dequeue 9.34% [kernel] [k] skb_release_data after: 15.42% [sch_fq_codel] [k] fq_codel_dequeue 10.06% [kernel] [k] __memset 9.12% [kernel] [k] skb_release_data Signed-off-by: Jussi Maki <joamaki@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bonding')
-rw-r--r--drivers/net/bonding/bond_main.c18
1 files changed, 15 insertions, 3 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index eb79a9f05914..1d9137e77dfc 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4202,16 +4202,16 @@ static u32 bond_rr_gen_slave_id(struct bonding *bond)
slave_id = prandom_u32();
break;
case 1:
- slave_id = bond->rr_tx_counter;
+ slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
break;
default:
reciprocal_packets_per_slave =
bond->params.reciprocal_packets_per_slave;
- slave_id = reciprocal_divide(bond->rr_tx_counter,
+ slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
+ slave_id = reciprocal_divide(slave_id,
reciprocal_packets_per_slave);
break;
}
- bond->rr_tx_counter++;
return slave_id;
}
@@ -4852,6 +4852,9 @@ static void bond_destructor(struct net_device *bond_dev)
if (bond->wq)
destroy_workqueue(bond->wq);
+
+ if (bond->rr_tx_counter)
+ free_percpu(bond->rr_tx_counter);
}
void bond_setup(struct net_device *bond_dev)
@@ -5350,6 +5353,15 @@ static int bond_init(struct net_device *bond_dev)
if (!bond->wq)
return -ENOMEM;
+ if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN) {
+ bond->rr_tx_counter = alloc_percpu(u32);
+ if (!bond->rr_tx_counter) {
+ destroy_workqueue(bond->wq);
+ bond->wq = NULL;
+ return -ENOMEM;
+ }
+ }
+
spin_lock_init(&bond->stats_lock);
netdev_lockdep_set_classes(bond_dev);