aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/igb/igb_main.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2011-08-26 07:44:10 +0000
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-09-19 23:59:34 -0700
commit1cc3bd879288c7f2f47eaebdde38ac5db4bfd082 (patch)
treee25e92ded629735f516b25ba934ad70d1536d89e /drivers/net/ethernet/intel/igb/igb_main.c
parentigb: Replace E1000_XX_DESC_ADV with IGB_XX_DESC (diff)
downloadlinux-dev-1cc3bd879288c7f2f47eaebdde38ac5db4bfd082.tar.xz
linux-dev-1cc3bd879288c7f2f47eaebdde38ac5db4bfd082.zip
igb: Remove multi_tx_table and simplify igb_xmit_frame
Instead of using the multi_tx_table to map possible Tx queues to Tx rings we can just do simple subtraction for the unlikely event that the Tx queue provided exceeds the number of Tx rings. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Aaron Brown <aaron.f.brown@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to '')
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c36
1 files changed, 22 insertions, 14 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 55d643180bfc..7ad25e867add 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1875,7 +1875,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
err = -ENOMEM;
netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
- IGB_ABS_MAX_TX_QUEUES);
+ IGB_MAX_TX_QUEUES);
if (!netdev)
goto err_alloc_etherdev;
@@ -2620,10 +2620,6 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
}
}
- for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
- int r_idx = i % adapter->num_tx_queues;
- adapter->multi_tx_table[i] = adapter->tx_ring[r_idx];
- }
return err;
}
@@ -4363,12 +4359,21 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
+ struct sk_buff *skb)
+{
+ unsigned int r_idx = skb->queue_mapping;
+
+ if (r_idx >= adapter->num_tx_queues)
+ r_idx = r_idx % adapter->num_tx_queues;
+
+ return adapter->tx_ring[r_idx];
+}
+
static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- struct igb_ring *tx_ring;
- int r_idx = 0;
if (test_bit(__IGB_DOWN, &adapter->state)) {
dev_kfree_skb_any(skb);
@@ -4380,14 +4385,17 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
- tx_ring = adapter->multi_tx_table[r_idx];
+ /*
+ * The minimum packet size with TCTL.PSP set is 17 so pad the skb
+ * in order to meet this minimum size requirement.
+ */
+ if (skb->len < 17) {
+ if (skb_padto(skb, 17))
+ return NETDEV_TX_OK;
+ skb->len = 17;
+ }
- /* This goes back to the question of how to logically map a tx queue
- * to a flow. Right now, performance is impacted slightly negatively
- * if using multiple tx queues. If the stack breaks away from a
- * single qdisc implementation, we can look at this again. */
- return igb_xmit_frame_ring(skb, tx_ring);
+ return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
}
/**