aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/marvell/mvneta.c
diff options
context:
space:
mode:
authorAndrew Lunn <andrew@lunn.ch>2018-07-18 18:10:50 +0200
committerDavid S. Miller <davem@davemloft.net>2018-07-28 22:12:55 -0700
commit7a86f05faf112463cfbbdfd222012e247de461a1 (patch)
tree0d3b2d2c5f90f1ab08de7a6120d0a3b98ea522d1 /drivers/net/ethernet/marvell/mvneta.c
parentnet: tipc: bcast: Replace GFP_ATOMIC with GFP_KERNEL in tipc_bcast_init() (diff)
downloadlinux-dev-7a86f05faf112463cfbbdfd222012e247de461a1.tar.xz
linux-dev-7a86f05faf112463cfbbdfd222012e247de461a1.zip
net: ethernet: mvneta: Fix napi structure mixup on armada 3700
The mvneta Ethernet driver is used on a few different Marvell SoCs. Some SoCs have per cpu interrupts for Ethernet events. Some SoCs have a single interrupt, independent of the CPU. The driver handles this by having a per CPU napi structure when there are per CPU interrupts, and a global napi structure when there is a single interrupt. When the napi core calls mvneta_poll(), it passes the napi instance. This was not being propagated through the call chain, and instead the per-cpu napi instance was passed to napi_gro_receive() call. This breaks when there is a single global napi instance. Signed-off-by: Andrew Lunn <andrew@lunn.ch> Fixes: 2636ac3cc2b4 ("net: mvneta: Add network support for Armada 3700 SoC") Signed-off-by: Gregory CLEMENT <gregory.clement@bootlin.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/marvell/mvneta.c')
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c22
1 files changed, 12 insertions, 10 deletions
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 0ad2f3f7da85..ec84db47d82d 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1901,10 +1901,10 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
}
/* Main rx processing when using software buffer management */
-static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
+static int mvneta_rx_swbm(struct napi_struct *napi,
+ struct mvneta_port *pp, int rx_todo,
struct mvneta_rx_queue *rxq)
{
- struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
struct net_device *dev = pp->dev;
int rx_done;
u32 rcvd_pkts = 0;
@@ -1959,7 +1959,7 @@ err_drop_frame:
skb->protocol = eth_type_trans(skb, dev);
mvneta_rx_csum(pp, rx_status, skb);
- napi_gro_receive(&port->napi, skb);
+ napi_gro_receive(napi, skb);
rcvd_pkts++;
rcvd_bytes += rx_bytes;
@@ -2001,7 +2001,7 @@ err_drop_frame:
mvneta_rx_csum(pp, rx_status, skb);
- napi_gro_receive(&port->napi, skb);
+ napi_gro_receive(napi, skb);
}
if (rcvd_pkts) {
@@ -2020,10 +2020,10 @@ err_drop_frame:
}
/* Main rx processing when using hardware buffer management */
-static int mvneta_rx_hwbm(struct mvneta_port *pp, int rx_todo,
+static int mvneta_rx_hwbm(struct napi_struct *napi,
+ struct mvneta_port *pp, int rx_todo,
struct mvneta_rx_queue *rxq)
{
- struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
struct net_device *dev = pp->dev;
int rx_done;
u32 rcvd_pkts = 0;
@@ -2085,7 +2085,7 @@ err_drop_frame:
skb->protocol = eth_type_trans(skb, dev);
mvneta_rx_csum(pp, rx_status, skb);
- napi_gro_receive(&port->napi, skb);
+ napi_gro_receive(napi, skb);
rcvd_pkts++;
rcvd_bytes += rx_bytes;
@@ -2129,7 +2129,7 @@ err_drop_frame:
mvneta_rx_csum(pp, rx_status, skb);
- napi_gro_receive(&port->napi, skb);
+ napi_gro_receive(napi, skb);
}
if (rcvd_pkts) {
@@ -2722,9 +2722,11 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
if (rx_queue) {
rx_queue = rx_queue - 1;
if (pp->bm_priv)
- rx_done = mvneta_rx_hwbm(pp, budget, &pp->rxqs[rx_queue]);
+ rx_done = mvneta_rx_hwbm(napi, pp, budget,
+ &pp->rxqs[rx_queue]);
else
- rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]);
+ rx_done = mvneta_rx_swbm(napi, pp, budget,
+ &pp->rxqs[rx_queue]);
}
if (rx_done < budget) {