aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2009-12-02 16:46:56 +0000
committerDavid S. Miller <davem@davemloft.net>2009-12-02 19:57:11 -0800
commite5a43549f7a58509a91b299a51337d386697b92c (patch)
treecca78d4803f46fcb6947f77c60c213a239f4f8a7
parentixgb: remove use of skb_dma_map from ixgb (diff)
downloadlinux-dev-e5a43549f7a58509a91b299a51337d386697b92c.tar.xz
linux-dev-e5a43549f7a58509a91b299a51337d386697b92c.zip
ixgbe: remove skb_dma_map/unmap calls from driver
This patch removes skb_dma_map/unmap calls from the ixgbe driver due to the fact that the calls don't work with HW IOMMU enabled systems. The problem is that multiple mappings will give different results when HW IOMMU is enabled and the skb_dma_map/unmap calls only have one location to store mappings. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Acked-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ixgbe/ixgbe.h1
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c63
2 files changed, 50 insertions, 14 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 76b052fa3643..7e35e97227bd 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -106,6 +106,7 @@ struct ixgbe_tx_buffer {
unsigned long time_stamp;
u16 length;
u16 next_to_watch;
+ u16 mapped_as_page;
};
struct ixgbe_rx_buffer {
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 5c56f2a741f3..9ba506f6ef29 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -218,10 +218,20 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
struct ixgbe_tx_buffer
*tx_buffer_info)
{
- tx_buffer_info->dma = 0;
+ if (tx_buffer_info->dma) {
+ if (tx_buffer_info->mapped_as_page)
+ pci_unmap_page(adapter->pdev,
+ tx_buffer_info->dma,
+ tx_buffer_info->length,
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_single(adapter->pdev,
+ tx_buffer_info->dma,
+ tx_buffer_info->length,
+ PCI_DMA_TODEVICE);
+ tx_buffer_info->dma = 0;
+ }
if (tx_buffer_info->skb) {
- skb_dma_unmap(&adapter->pdev->dev, tx_buffer_info->skb,
- DMA_TO_DEVICE);
dev_kfree_skb_any(tx_buffer_info->skb);
tx_buffer_info->skb = NULL;
}
@@ -5024,23 +5034,16 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
struct sk_buff *skb, u32 tx_flags,
unsigned int first)
{
+ struct pci_dev *pdev = adapter->pdev;
struct ixgbe_tx_buffer *tx_buffer_info;
unsigned int len;
unsigned int total = skb->len;
unsigned int offset = 0, size, count = 0, i;
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int f;
- dma_addr_t *map;
i = tx_ring->next_to_use;
- if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
- dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
- return 0;
- }
-
- map = skb_shinfo(skb)->dma_maps;
-
if (tx_flags & IXGBE_TX_FLAGS_FCOE)
/* excluding fcoe_crc_eof for FCoE */
total -= sizeof(struct fcoe_crc_eof);
@@ -5051,7 +5054,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
- tx_buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
+ tx_buffer_info->mapped_as_page = false;
+ tx_buffer_info->dma = pci_map_single(pdev,
+ skb->data + offset,
+ size, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -5072,7 +5080,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
frag = &skb_shinfo(skb)->frags[f];
len = min((unsigned int)frag->size, total);
- offset = 0;
+ offset = frag->page_offset;
while (len) {
i++;
@@ -5083,7 +5091,13 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
- tx_buffer_info->dma = map[f] + offset;
+ tx_buffer_info->dma = pci_map_page(adapter->pdev,
+ frag->page,
+ offset, size,
+ PCI_DMA_TODEVICE);
+ tx_buffer_info->mapped_as_page = true;
+ if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ goto dma_error;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
@@ -5100,6 +5114,27 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
tx_ring->tx_buffer_info[first].next_to_watch = i;
return count;
+
+dma_error:
+ dev_err(&pdev->dev, "TX DMA map failed\n");
+
+ /* clear timestamp and dma mappings for failed tx_buffer_info map */
+ tx_buffer_info->dma = 0;
+ tx_buffer_info->time_stamp = 0;
+ tx_buffer_info->next_to_watch = 0;
+ count--;
+
+ /* clear timestamp and dma mappings for remaining portion of packet */
+ while (count >= 0) {
+ count--;
+ i--;
+ if (i < 0)
+ i += tx_ring->count;
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ }
+
+ return count;
}
static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,