aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sun/sunvnet.c
diff options
context:
space:
mode:
authorSowmini Varadhan <sowmini.varadhan@oracle.com>2014-10-25 15:12:31 -0400
committerDavid S. Miller <davem@davemloft.net>2014-10-25 16:20:16 -0400
commit13b13dd97c3a45857b2b622add06b1c70b748abf (patch)
tree5ebb758c58de5f7755aca98d23a1b34aa48defe4 /drivers/net/ethernet/sun/sunvnet.c
parentsunvnet: Use RCU to synchronize port usage with vnet_port_remove() (diff)
downloadlinux-dev-13b13dd97c3a45857b2b622add06b1c70b748abf.tar.xz
linux-dev-13b13dd97c3a45857b2b622add06b1c70b748abf.zip
sunvnet: Remove irqsave/irqrestore on vio.lock
After the NAPIfication of sunvnet, we no longer need to synchronize by doing irqsave/restore on vio.lock in the I/O fastpath. NAPI ->poll() is non-reentrant, so all RX processing occurs strictly in a serialized environment. TX reclaim is done in NAPI context, so the netif_tx_lock can be used to serialize critical sections between Tx and Rx paths. Signed-off-by: Sowmini Varadhan <sowmini.varadhan@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/sun/sunvnet.c')
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c30
1 files changed, 5 insertions, 25 deletions
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 966c252c3ca9..c390a2784546 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -842,18 +842,6 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
return NULL;
}
-struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb)
-{
- struct vnet_port *ret;
- unsigned long flags;
-
- spin_lock_irqsave(&vp->lock, flags);
- ret = __tx_port_find(vp, skb);
- spin_unlock_irqrestore(&vp->lock, flags);
-
- return ret;
-}
-
static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port,
unsigned *pending)
{
@@ -914,11 +902,10 @@ static void vnet_clean_timer_expire(unsigned long port0)
struct vnet_port *port = (struct vnet_port *)port0;
struct sk_buff *freeskbs;
unsigned pending;
- unsigned long flags;
- spin_lock_irqsave(&port->vio.lock, flags);
+ netif_tx_lock(port->vp->dev);
freeskbs = vnet_clean_tx_ring(port, &pending);
- spin_unlock_irqrestore(&port->vio.lock, flags);
+ netif_tx_unlock(port->vp->dev);
vnet_free_skbs(freeskbs);
@@ -971,7 +958,6 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct vnet_port *port = NULL;
struct vio_dring_state *dr;
struct vio_net_desc *d;
- unsigned long flags;
unsigned int len;
struct sk_buff *freeskbs = NULL;
int i, err, txi;
@@ -984,7 +970,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto out_dropped;
rcu_read_lock();
- port = tx_port_find(vp, skb);
+ port = __tx_port_find(vp, skb);
if (unlikely(!port))
goto out_dropped;
@@ -1020,8 +1006,6 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto out_dropped;
}
- spin_lock_irqsave(&port->vio.lock, flags);
-
dr = &port->vio.drings[VIO_DRIVER_TX_RING];
if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
if (!netif_queue_stopped(dev)) {
@@ -1055,7 +1039,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
(LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW));
if (err < 0) {
netdev_info(dev, "tx buffer map error %d\n", err);
- goto out_dropped_unlock;
+ goto out_dropped;
}
port->tx_bufs[txi].ncookies = err;
@@ -1108,7 +1092,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_info(dev, "TX trigger error %d\n", err);
d->hdr.state = VIO_DESC_FREE;
dev->stats.tx_carrier_errors++;
- goto out_dropped_unlock;
+ goto out_dropped;
}
ldc_start_done:
@@ -1124,7 +1108,6 @@ ldc_start_done:
netif_wake_queue(dev);
}
- spin_unlock_irqrestore(&port->vio.lock, flags);
(void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT);
rcu_read_unlock();
@@ -1132,9 +1115,6 @@ ldc_start_done:
return NETDEV_TX_OK;
-out_dropped_unlock:
- spin_unlock_irqrestore(&port->vio.lock, flags);
-
out_dropped:
if (pending)
(void)mod_timer(&port->clean_timer,