aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/marvell/mvneta.c
diff options
context:
space:
mode:
authorJisheng Zhang <Jisheng.Zhang@synaptics.com>2018-08-31 16:11:09 +0800
committerDavid S. Miller <davem@davemloft.net>2018-09-02 14:13:31 -0700
commitbd9f1ee364094c66e387199809e28518b0e73954 (patch)
tree5f37b563e1fb4d69e8b946914ccf149886893d8b /drivers/net/ethernet/marvell/mvneta.c
parentnet: mvneta: enable NETIF_F_RXCSUM by default (diff)
downloadlinux-dev-bd9f1ee364094c66e387199809e28518b0e73954.tar.xz
linux-dev-bd9f1ee364094c66e387199809e28518b0e73954.zip
net: mvneta: reduce smp_processor_id() calling in mvneta_tx_done_gbe
In the loop of mvneta_tx_done_gbe(), we call the smp_processor_id() each time, move the call out of the loop to optimize the code a bit. Before the patch, the loop looks like(under arm64): ldr x1, [x29,#120] ... ldr w24, [x1,#36] ... bl 0 <_raw_spin_lock> str w24, [x27,#132] ... After the patch, the loop looks like(under arm64): ... bl 0 <_raw_spin_lock> str w23, [x28,#132] ... where w23 is loaded so be ready before the loop. >From another side, mvneta_tx_done_gbe() is called from mvneta_poll() which is in non-preemptible context, so it's safe to call the smp_processor_id() function once. Signed-off-by: Jisheng Zhang <Jisheng.Zhang@synaptics.com> Reviewed-by: Gregory CLEMENT <gregory.clement@bootlin.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/marvell/mvneta.c')
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c3
1 files changed, 2 insertions, 1 deletions
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index bcd20ebb2ebd..fe3edb3c2bf4 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2507,12 +2507,13 @@ static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
{
struct mvneta_tx_queue *txq;
struct netdev_queue *nq;
+ int cpu = smp_processor_id();
while (cause_tx_done) {
txq = mvneta_tx_done_policy(pp, cause_tx_done);
nq = netdev_get_tx_queue(pp->dev, txq->id);
- __netif_tx_lock(nq, smp_processor_id());
+ __netif_tx_lock(nq, cpu);
if (txq->count)
mvneta_txq_done(pp, txq);