aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/packetengines/yellowfin.c
diff options
context:
space:
mode:
authorChristophe JAILLET <christophe.jaillet@wanadoo.fr>2020-07-20 14:29:12 +0200
committerDavid S. Miller <davem@davemloft.net>2020-07-20 17:48:23 -0700
commit73e283dfbf036f8830a0aaeb41036245c43245a4 (patch)
treef5aed4184f42e23af19294ba95339d99359b3066 /drivers/net/ethernet/packetengines/yellowfin.c
parentarch, net: remove the last csum_partial_copy() leftovers (diff)
downloadlinux-73e283dfbf036f8830a0aaeb41036245c43245a4.tar.xz
linux-73e283dfbf036f8830a0aaeb41036245c43245a4.zip
net: packetengines: switch from 'pci_' to 'dma_' API
The wrappers in include/linux/pci-dma-compat.h should go away. The patch has been generated with the coccinelle script below and has been hand modified to replace GFP_ with a correct flag. It has been compile tested. When memory is allocated in 'hamachi_init_one()' (hamachi.c), GFP_KERNEL can be used because it is a probe function and no lock is acquired. When memory is allocated in 'yellowfin_init_one()' (yellowfin.c), GFP_KERNEL can be used because it is a probe function and no lock is acquired. @@ @@ - PCI_DMA_BIDIRECTIONAL + DMA_BIDIRECTIONAL @@ @@ - PCI_DMA_TODEVICE + DMA_TO_DEVICE @@ @@ - PCI_DMA_FROMDEVICE + DMA_FROM_DEVICE @@ @@ - PCI_DMA_NONE + DMA_NONE @@ expression e1, e2, e3; @@ - pci_alloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3; @@ - pci_zalloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3, e4; @@ - pci_free_consistent(e1, e2, e3, e4) + dma_free_coherent(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_single(e1, e2, e3, e4) + dma_map_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_single(e1, e2, e3, e4) + dma_unmap_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4, e5; @@ - pci_map_page(e1, e2, e3, e4, e5) + dma_map_page(&e1->dev, e2, e3, e4, e5) @@ expression e1, e2, e3, e4; @@ - pci_unmap_page(e1, e2, e3, e4) + dma_unmap_page(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_sg(e1, e2, e3, e4) + dma_map_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_sg(e1, e2, e3, e4) + dma_unmap_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_cpu(e1, e2, e3, e4) + dma_sync_single_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_device(e1, e2, e3, e4) + dma_sync_single_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_cpu(e1, e2, e3, e4) + dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_device(e1, e2, e3, e4) + dma_sync_sg_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2; @@ - pci_dma_mapping_error(e1, e2) + dma_mapping_error(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_dma_mask(e1, e2) + dma_set_mask(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_consistent_dma_mask(e1, e2) + dma_set_coherent_mask(&e1->dev, e2) Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/packetengines/yellowfin.c')
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c83
1 files changed, 49 insertions, 34 deletions
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 520779f05e1a..647a1431b359 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -434,19 +434,22 @@ static int yellowfin_init_one(struct pci_dev *pdev,
np->drv_flags = drv_flags;
np->base = ioaddr;
- ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
+ ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
+ GFP_KERNEL);
if (!ring_space)
goto err_out_cleardev;
np->tx_ring = ring_space;
np->tx_ring_dma = ring_dma;
- ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
+ ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
+ GFP_KERNEL);
if (!ring_space)
goto err_out_unmap_tx;
np->rx_ring = ring_space;
np->rx_ring_dma = ring_dma;
- ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma);
+ ring_space = dma_alloc_coherent(&pdev->dev, STATUS_TOTAL_SIZE,
+ &ring_dma, GFP_KERNEL);
if (!ring_space)
goto err_out_unmap_rx;
np->tx_status = ring_space;
@@ -505,12 +508,14 @@ static int yellowfin_init_one(struct pci_dev *pdev,
return 0;
err_out_unmap_status:
- pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
- np->tx_status_dma);
+ dma_free_coherent(&pdev->dev, STATUS_TOTAL_SIZE, np->tx_status,
+ np->tx_status_dma);
err_out_unmap_rx:
- pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
+ np->rx_ring_dma);
err_out_unmap_tx:
- pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
+ np->tx_ring_dma);
err_out_cleardev:
pci_iounmap(pdev, ioaddr);
err_out_free_res:
@@ -740,8 +745,10 @@ static int yellowfin_init_ring(struct net_device *dev)
if (skb == NULL)
break;
skb_reserve(skb, 2); /* 16 byte align the IP header. */
- yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
- skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
+ yp->rx_ring[i].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
+ skb->data,
+ yp->rx_buf_sz,
+ DMA_FROM_DEVICE));
}
if (i != RX_RING_SIZE) {
for (j = 0; j < i; j++)
@@ -831,8 +838,9 @@ static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
yp->tx_skbuff[entry] = skb;
#ifdef NO_TXSTATS
- yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
- skb->data, len, PCI_DMA_TODEVICE));
+ yp->tx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
+ skb->data,
+ len, DMA_TO_DEVICE));
yp->tx_ring[entry].result_status = 0;
if (entry >= TX_RING_SIZE-1) {
/* New stop command. */
@@ -847,8 +855,9 @@ static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
yp->cur_tx++;
#else
yp->tx_ring[entry<<1].request_cnt = len;
- yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
- skb->data, len, PCI_DMA_TODEVICE));
+ yp->tx_ring[entry<<1].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
+ skb->data,
+ len, DMA_TO_DEVICE));
/* The input_last (status-write) command is constant, but we must
rewrite the subsequent 'stop' command. */
@@ -923,8 +932,9 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
/* Free the original skb. */
- pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->tx_ring[entry].addr),
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&yp->pci_dev->dev,
+ le32_to_cpu(yp->tx_ring[entry].addr),
+ skb->len, DMA_TO_DEVICE);
dev_consume_skb_irq(skb);
yp->tx_skbuff[entry] = NULL;
}
@@ -980,9 +990,9 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
dev->stats.tx_packets++;
}
/* Free the original skb. */
- pci_unmap_single(yp->pci_dev,
- yp->tx_ring[entry<<1].addr, skb->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&yp->pci_dev->dev,
+ yp->tx_ring[entry << 1].addr,
+ skb->len, DMA_TO_DEVICE);
dev_consume_skb_irq(skb);
yp->tx_skbuff[entry] = 0;
/* Mark status as empty. */
@@ -1055,8 +1065,9 @@ static int yellowfin_rx(struct net_device *dev)
if(!desc->result_status)
break;
- pci_dma_sync_single_for_cpu(yp->pci_dev, le32_to_cpu(desc->addr),
- yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&yp->pci_dev->dev,
+ le32_to_cpu(desc->addr),
+ yp->rx_buf_sz, DMA_FROM_DEVICE);
desc_status = le32_to_cpu(desc->result_status) >> 16;
buf_addr = rx_skb->data;
data_size = (le32_to_cpu(desc->dbdma_cmd) -
@@ -1121,10 +1132,10 @@ static int yellowfin_rx(struct net_device *dev)
without copying to a properly sized skbuff. */
if (pkt_len > rx_copybreak) {
skb_put(skb = rx_skb, pkt_len);
- pci_unmap_single(yp->pci_dev,
- le32_to_cpu(yp->rx_ring[entry].addr),
- yp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&yp->pci_dev->dev,
+ le32_to_cpu(yp->rx_ring[entry].addr),
+ yp->rx_buf_sz,
+ DMA_FROM_DEVICE);
yp->rx_skbuff[entry] = NULL;
} else {
skb = netdev_alloc_skb(dev, pkt_len + 2);
@@ -1133,10 +1144,10 @@ static int yellowfin_rx(struct net_device *dev)
skb_reserve(skb, 2); /* 16 byte align the IP header */
skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
skb_put(skb, pkt_len);
- pci_dma_sync_single_for_device(yp->pci_dev,
- le32_to_cpu(desc->addr),
- yp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&yp->pci_dev->dev,
+ le32_to_cpu(desc->addr),
+ yp->rx_buf_sz,
+ DMA_FROM_DEVICE);
}
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
@@ -1155,8 +1166,10 @@ static int yellowfin_rx(struct net_device *dev)
break; /* Better luck next round. */
yp->rx_skbuff[entry] = skb;
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
- yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
- skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
+ yp->rx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
+ skb->data,
+ yp->rx_buf_sz,
+ DMA_FROM_DEVICE));
}
yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */
@@ -1379,10 +1392,12 @@ static void yellowfin_remove_one(struct pci_dev *pdev)
BUG_ON(!dev);
np = netdev_priv(dev);
- pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
- np->tx_status_dma);
- pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
- pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
+ dma_free_coherent(&pdev->dev, STATUS_TOTAL_SIZE, np->tx_status,
+ np->tx_status_dma);
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
+ np->rx_ring_dma);
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
+ np->tx_ring_dma);
unregister_netdev (dev);
pci_iounmap(pdev, np->base);