aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/realtek/8139cp.c
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2015-09-23 09:45:16 +0100
committerDavid S. Miller <davem@davemloft.net>2015-09-23 14:47:13 -0700
commit7f4c685633e2df9ba10d49a31dda13715745db37 (patch)
tree69c39a67bb822da7028d1d647bc1ff700a82e2f2 /drivers/net/ethernet/realtek/8139cp.c
parent8139cp: Reduce duplicate csum/tso code in cp_start_xmit() (diff)
downloadlinux-dev-7f4c685633e2df9ba10d49a31dda13715745db37.tar.xz
linux-dev-7f4c685633e2df9ba10d49a31dda13715745db37.zip
8139cp: Fix DMA unmapping of transmitted buffers
The low 16 bits of the 'opts1' field in the TX descriptor are supposed to still contain the buffer length when the descriptor is handed back to us. In practice, at least on my hardware, they don't. So stash the original value of the opts1 field and get the length to unmap from there. There are other ways we could have worked out the length, but I actually want a stash of the opts1 field anyway so that I can dump it alongside the contents of the descriptor ring when we suffer a TX timeout. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/realtek/8139cp.c')
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index b3bd8b103593..ec6bd864b263 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -341,6 +341,7 @@ struct cp_private {
unsigned tx_tail;
struct cp_desc *tx_ring;
struct sk_buff *tx_skb[CP_TX_RING_SIZE];
+ u32 tx_opts[CP_TX_RING_SIZE];
unsigned rx_buf_sz;
unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
@@ -665,7 +666,7 @@ static void cp_tx (struct cp_private *cp)
BUG_ON(!skb);
dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
- le32_to_cpu(txd->opts1) & 0xffff,
+ cp->tx_opts[tx_tail] & 0xffff,
PCI_DMA_TODEVICE);
if (status & LastFrag) {
@@ -789,6 +790,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
wmb();
cp->tx_skb[entry] = skb;
+ cp->tx_opts[entry] = opts1;
netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
entry, skb->len);
} else {
@@ -839,6 +841,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
txd->opts1 = cpu_to_le32(ctrl);
wmb();
+
+ cp->tx_opts[entry] = ctrl;
cp->tx_skb[entry] = skb;
}
@@ -851,6 +855,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
txd->opts1 = cpu_to_le32(ctrl);
wmb();
+ cp->tx_opts[first_entry] = ctrl;
netif_dbg(cp, tx_queued, cp->dev, "tx queued, slots %d-%d, skblen %d\n",
first_entry, entry, skb->len);
}
@@ -1093,6 +1098,7 @@ static int cp_init_rings (struct cp_private *cp)
{
memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
+ memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
cp_init_rings_index(cp);
@@ -1150,6 +1156,7 @@ static void cp_clean_rings (struct cp_private *cp)
memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
+ memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);