From 980f140493b6d9d9e1fc67ce594170186be848a1 Mon Sep 17 00:00:00 2001 From: Lino Sanfilippo Date: Fri, 9 Dec 2016 00:55:42 +0100 Subject: net: ethernet: sxgbe: remove private tx queue lock The driver uses a private lock for synchronization of the xmit function and the xmit completion handler, but since the NETIF_F_LLTX flag is not set, the xmit function is also called with the xmit_lock held. On the other hand the completion handler uses the reverse locking order by first taking the private lock and (in case that the tx queue had been stopped) then the xmit_lock. Improve the locking by removing the private lock and using only the xmit_lock for synchronization instead. Signed-off-by: Lino Sanfilippo Signed-off-by: David S. Miller --- drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c | 27 ++++++------------------- 1 file changed, 6 insertions(+), 21 deletions(-) (limited to 'drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c') diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index 5dbe40640da6..cddcff5a00a7 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -426,9 +426,6 @@ static int init_tx_ring(struct device *dev, u8 queue_no, tx_ring->dirty_tx = 0; tx_ring->cur_tx = 0; - /* initialise TX queue lock */ - spin_lock_init(&tx_ring->tx_lock); - return 0; dmamem_err: @@ -743,7 +740,7 @@ static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue *tqueue) dev_txq = netdev_get_tx_queue(priv->dev, queue_no); - spin_lock(&tqueue->tx_lock); + __netif_tx_lock(dev_txq, smp_processor_id()); priv->xstats.tx_clean++; while (tqueue->dirty_tx != tqueue->cur_tx) { @@ -781,18 +778,13 @@ static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue *tqueue) /* wake up queue */ if (unlikely(netif_tx_queue_stopped(dev_txq) && - sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv))) { - netif_tx_lock(priv->dev); - if (netif_tx_queue_stopped(dev_txq) && - sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv)) { - if (netif_msg_tx_done(priv)) - pr_debug("%s: restart transmit\n", __func__); - netif_tx_wake_queue(dev_txq); - } - netif_tx_unlock(priv->dev); + sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv))) { + if (netif_msg_tx_done(priv)) + pr_debug("%s: restart transmit\n", __func__); + netif_tx_wake_queue(dev_txq); } - spin_unlock(&tqueue->tx_lock); + __netif_tx_unlock(dev_txq); } /** @@ -1304,9 +1296,6 @@ static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev) tqueue->hwts_tx_en))) ctxt_desc_req = 1; - /* get the spinlock */ - spin_lock(&tqueue->tx_lock); - if (priv->tx_path_in_lpi_mode) sxgbe_disable_eee_mode(priv); @@ -1316,8 +1305,6 @@ static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev) netdev_err(dev, "%s: Tx Ring is full when %d queue is awake\n", __func__, txq_index); } - /* release the spin lock in case of BUSY */ - spin_unlock(&tqueue->tx_lock); return NETDEV_TX_BUSY; } @@ -1436,8 +1423,6 @@ static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev) priv->hw->dma->enable_dma_transmission(priv->ioaddr, txq_index); - spin_unlock(&tqueue->tx_lock); - return NETDEV_TX_OK; } -- cgit v1.2.3-59-g8ed1b