aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-08-28 01:39:31 -0700
committerDavid S. Miller <davem@davemloft.net>2014-08-28 01:39:31 -0700
commit6f19e12f623067d6a330748f932ca4a81b828ffb (patch)
tree3fbf664e08fe9ed4b396eeeb5200f253d7ad1521
parentixgbe: flush when in xmit_more mode and under descriptor pressure (diff)
downloadlinux-dev-6f19e12f623067d6a330748f932ca4a81b828ffb.tar.xz
linux-dev-6f19e12f623067d6a330748f932ca4a81b828ffb.zip
igb: flush when in xmit_more mode and under descriptor pressure
Mirror the changes made to ixgbe in commit 2367a17390138f68b3aa28f2f220b8d7ff8d91f4 ("ixgbe: flush when in xmit_more mode and under descriptor pressure") Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c78
1 files changed, 39 insertions, 39 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 89c29b40d61c..89de7fee5e94 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -4813,6 +4813,41 @@ static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
+static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
+{
+ struct net_device *netdev = tx_ring->netdev;
+
+ netif_stop_subqueue(netdev, tx_ring->queue_index);
+
+ /* Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it.
+ */
+ smp_mb();
+
+ /* We need to check again in a case another CPU has just
+ * made room available.
+ */
+ if (igb_desc_unused(tx_ring) < size)
+ return -EBUSY;
+
+ /* A reprieve! */
+ netif_wake_subqueue(netdev, tx_ring->queue_index);
+
+ u64_stats_update_begin(&tx_ring->tx_syncp2);
+ tx_ring->tx_stats.restart_queue2++;
+ u64_stats_update_end(&tx_ring->tx_syncp2);
+
+ return 0;
+}
+
+static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
+{
+ if (igb_desc_unused(tx_ring) >= size)
+ return 0;
+ return __igb_maybe_stop_tx(tx_ring, size);
+}
+
static void igb_tx_map(struct igb_ring *tx_ring,
struct igb_tx_buffer *first,
const u8 hdr_len)
@@ -4915,7 +4950,10 @@ static void igb_tx_map(struct igb_ring *tx_ring,
tx_ring->next_to_use = i;
- if (!skb->xmit_more) {
+ /* Make sure there is space in the ring for the next send. */
+ igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail
@@ -4942,41 +4980,6 @@ dma_error:
tx_ring->next_to_use = i;
}
-static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
-{
- struct net_device *netdev = tx_ring->netdev;
-
- netif_stop_subqueue(netdev, tx_ring->queue_index);
-
- /* Herbert's original patch had:
- * smp_mb__after_netif_stop_queue();
- * but since that doesn't exist yet, just open code it.
- */
- smp_mb();
-
- /* We need to check again in a case another CPU has just
- * made room available.
- */
- if (igb_desc_unused(tx_ring) < size)
- return -EBUSY;
-
- /* A reprieve! */
- netif_wake_subqueue(netdev, tx_ring->queue_index);
-
- u64_stats_update_begin(&tx_ring->tx_syncp2);
- tx_ring->tx_stats.restart_queue2++;
- u64_stats_update_end(&tx_ring->tx_syncp2);
-
- return 0;
-}
-
-static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
-{
- if (igb_desc_unused(tx_ring) >= size)
- return 0;
- return __igb_maybe_stop_tx(tx_ring, size);
-}
-
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
struct igb_ring *tx_ring)
{
@@ -5047,9 +5050,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
igb_tx_map(tx_ring, first, hdr_len);
- /* Make sure there is space in the ring for the next send. */
- igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
-
return NETDEV_TX_OK;
out_drop: