aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/freescale/fec_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/freescale/fec_main.c')
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c130
1 files changed, 69 insertions, 61 deletions
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 502da6f48f95..3f3b9eacc286 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -332,11 +332,13 @@ static void fec_dump(struct net_device *ndev)
bdp = txq->tx_bd_base;
do {
- pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n",
+ pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
index,
bdp == txq->cur_tx ? 'S' : ' ',
bdp == txq->dirty_tx ? 'H' : ' ',
- bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen,
+ fec16_to_cpu(bdp->cbd_sc),
+ fec32_to_cpu(bdp->cbd_bufaddr),
+ fec16_to_cpu(bdp->cbd_datlen),
txq->tx_skbuff[index]);
bdp = fec_enet_get_nextdesc(bdp, fep, 0);
index++;
@@ -389,7 +391,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
bdp = fec_enet_get_nextdesc(bdp, fep, queue);
ebdp = (struct bufdesc_ex *)bdp;
- status = bdp->cbd_sc;
+ status = fec16_to_cpu(bdp->cbd_sc);
status &= ~BD_ENET_TX_STATS;
status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
frag_len = skb_shinfo(skb)->frags[frag].size;
@@ -411,7 +413,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0;
- ebdp->cbd_esc = estatus;
+ ebdp->cbd_esc = cpu_to_fec32(estatus);
}
bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
@@ -435,9 +437,9 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
goto dma_mapping_error;
}
- bdp->cbd_bufaddr = addr;
- bdp->cbd_datlen = frag_len;
- bdp->cbd_sc = status;
+ bdp->cbd_bufaddr = cpu_to_fec32(addr);
+ bdp->cbd_datlen = cpu_to_fec16(frag_len);
+ bdp->cbd_sc = cpu_to_fec16(status);
}
return bdp;
@@ -445,8 +447,8 @@ dma_mapping_error:
bdp = txq->cur_tx;
for (i = 0; i < frag; i++) {
bdp = fec_enet_get_nextdesc(bdp, fep, queue);
- dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
- bdp->cbd_datlen, DMA_TO_DEVICE);
+ dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
+ fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
}
return ERR_PTR(-ENOMEM);
}
@@ -483,7 +485,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
/* Fill in a Tx ring entry */
bdp = txq->cur_tx;
last_bdp = bdp;
- status = bdp->cbd_sc;
+ status = fec16_to_cpu(bdp->cbd_sc);
status &= ~BD_ENET_TX_STATS;
/* Set buffer length and buffer pointer */
@@ -539,21 +541,21 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0;
- ebdp->cbd_esc = estatus;
+ ebdp->cbd_esc = cpu_to_fec32(estatus);
}
index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
/* Save skb pointer */
txq->tx_skbuff[index] = skb;
- bdp->cbd_datlen = buflen;
- bdp->cbd_bufaddr = addr;
+ bdp->cbd_datlen = cpu_to_fec16(buflen);
+ bdp->cbd_bufaddr = cpu_to_fec32(addr);
/* Send it on its way. Tell FEC it's ready, interrupt when done,
* it's the last BD of the frame, and to put the CRC on the end.
*/
status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
- bdp->cbd_sc = status;
+ bdp->cbd_sc = cpu_to_fec16(status);
/* If this was the last BD in the ring, start at the beginning again. */
bdp = fec_enet_get_nextdesc(last_bdp, fep, queue);
@@ -585,7 +587,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
unsigned int estatus = 0;
dma_addr_t addr;
- status = bdp->cbd_sc;
+ status = fec16_to_cpu(bdp->cbd_sc);
status &= ~BD_ENET_TX_STATS;
status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
@@ -607,8 +609,8 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- bdp->cbd_datlen = size;
- bdp->cbd_bufaddr = addr;
+ bdp->cbd_datlen = cpu_to_fec16(size);
+ bdp->cbd_bufaddr = cpu_to_fec32(addr);
if (fep->bufdesc_ex) {
if (fep->quirks & FEC_QUIRK_HAS_AVB)
@@ -616,7 +618,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0;
- ebdp->cbd_esc = estatus;
+ ebdp->cbd_esc = cpu_to_fec32(estatus);
}
/* Handle the last BD specially */
@@ -625,10 +627,10 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
if (is_last) {
status |= BD_ENET_TX_INTR;
if (fep->bufdesc_ex)
- ebdp->cbd_esc |= BD_ENET_TX_INT;
+ ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
}
- bdp->cbd_sc = status;
+ bdp->cbd_sc = cpu_to_fec16(status);
return 0;
}
@@ -647,7 +649,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
unsigned short status;
unsigned int estatus = 0;
- status = bdp->cbd_sc;
+ status = fec16_to_cpu(bdp->cbd_sc);
status &= ~BD_ENET_TX_STATS;
status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
@@ -671,8 +673,8 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
}
}
- bdp->cbd_bufaddr = dmabuf;
- bdp->cbd_datlen = hdr_len;
+ bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
+ bdp->cbd_datlen = cpu_to_fec16(hdr_len);
if (fep->bufdesc_ex) {
if (fep->quirks & FEC_QUIRK_HAS_AVB)
@@ -680,10 +682,10 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0;
- ebdp->cbd_esc = estatus;
+ ebdp->cbd_esc = cpu_to_fec32(estatus);
}
- bdp->cbd_sc = status;
+ bdp->cbd_sc = cpu_to_fec16(status);
return 0;
}
@@ -823,15 +825,15 @@ static void fec_enet_bd_init(struct net_device *dev)
/* Initialize the BD for every fragment in the page. */
if (bdp->cbd_bufaddr)
- bdp->cbd_sc = BD_ENET_RX_EMPTY;
+ bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
else
- bdp->cbd_sc = 0;
+ bdp->cbd_sc = cpu_to_fec16(0);
bdp = fec_enet_get_nextdesc(bdp, fep, q);
}
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, fep, q);
- bdp->cbd_sc |= BD_SC_WRAP;
+ bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
rxq->cur_rx = rxq->rx_bd_base;
}
@@ -844,18 +846,18 @@ static void fec_enet_bd_init(struct net_device *dev)
for (i = 0; i < txq->tx_ring_size; i++) {
/* Initialize the BD for every fragment in the page. */
- bdp->cbd_sc = 0;
+ bdp->cbd_sc = cpu_to_fec16(0);
if (txq->tx_skbuff[i]) {
dev_kfree_skb_any(txq->tx_skbuff[i]);
txq->tx_skbuff[i] = NULL;
}
- bdp->cbd_bufaddr = 0;
+ bdp->cbd_bufaddr = cpu_to_fec32(0);
bdp = fec_enet_get_nextdesc(bdp, fep, q);
}
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, fep, q);
- bdp->cbd_sc |= BD_SC_WRAP;
+ bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
txq->dirty_tx = bdp;
}
}
@@ -947,8 +949,10 @@ fec_restart(struct net_device *ndev)
*/
if (fep->quirks & FEC_QUIRK_ENET_MAC) {
memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
- writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
- writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
+ writel((__force u32)cpu_to_be32(temp_mac[0]),
+ fep->hwp + FEC_ADDR_LOW);
+ writel((__force u32)cpu_to_be32(temp_mac[1]),
+ fep->hwp + FEC_ADDR_HIGH);
}
/* Clear any outstanding interrupt. */
@@ -1222,7 +1226,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
while (bdp != READ_ONCE(txq->cur_tx)) {
/* Order the load of cur_tx and cbd_sc */
rmb();
- status = READ_ONCE(bdp->cbd_sc);
+ status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
if (status & BD_ENET_TX_READY)
break;
@@ -1230,10 +1234,12 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
skb = txq->tx_skbuff[index];
txq->tx_skbuff[index] = NULL;
- if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
- dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
- bdp->cbd_datlen, DMA_TO_DEVICE);
- bdp->cbd_bufaddr = 0;
+ if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
+ dma_unmap_single(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
+ fec16_to_cpu(bdp->cbd_datlen),
+ DMA_TO_DEVICE);
+ bdp->cbd_bufaddr = cpu_to_fec32(0);
if (!skb) {
bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
continue;
@@ -1264,7 +1270,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
struct skb_shared_hwtstamps shhwtstamps;
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
- fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps);
+ fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
skb_tstamp_tx(skb, &shhwtstamps);
}
@@ -1324,10 +1330,8 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff
if (off)
skb_reserve(skb, fep->rx_align + 1 - off);
- bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
- FEC_ENET_RX_FRSIZE - fep->rx_align,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+ bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
+ if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
if (net_ratelimit())
netdev_err(ndev, "Rx DMA memory map failed\n");
return -ENOMEM;
@@ -1349,7 +1353,8 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
if (!new_skb)
return false;
- dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
+ dma_sync_single_for_cpu(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
FEC_ENET_RX_FRSIZE - fep->rx_align,
DMA_FROM_DEVICE);
if (!swap)
@@ -1396,7 +1401,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
*/
bdp = rxq->cur_rx;
- while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
+ while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
if (pkt_received >= budget)
break;
@@ -1438,7 +1443,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
/* Process the incoming frame. */
ndev->stats.rx_packets++;
- pkt_len = bdp->cbd_datlen;
+ pkt_len = fec16_to_cpu(bdp->cbd_datlen);
ndev->stats.rx_bytes += pkt_len;
index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep);
@@ -1456,7 +1461,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
ndev->stats.rx_dropped++;
goto rx_processing_done;
}
- dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+ dma_unmap_single(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
FEC_ENET_RX_FRSIZE - fep->rx_align,
DMA_FROM_DEVICE);
}
@@ -1475,7 +1481,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
/* If this is a VLAN packet remove the VLAN Tag */
vlan_packet_rcvd = false;
if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) {
+ fep->bufdesc_ex &&
+ (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
/* Push and remove the vlan tag */
struct vlan_hdr *vlan_header =
(struct vlan_hdr *) (data + ETH_HLEN);
@@ -1491,12 +1498,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
/* Get receive timestamp from the skb */
if (fep->hwts_rx_en && fep->bufdesc_ex)
- fec_enet_hwtstamp(fep, ebdp->ts,
+ fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
skb_hwtstamps(skb));
if (fep->bufdesc_ex &&
(fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
- if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) {
+ if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
/* don't check it */
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else {
@@ -1513,7 +1520,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
napi_gro_receive(&fep->napi, skb);
if (is_copybreak) {
- dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr,
+ dma_sync_single_for_device(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
FEC_ENET_RX_FRSIZE - fep->rx_align,
DMA_FROM_DEVICE);
} else {
@@ -1527,12 +1535,12 @@ rx_processing_done:
/* Mark the buffer empty */
status |= BD_ENET_RX_EMPTY;
- bdp->cbd_sc = status;
+ bdp->cbd_sc = cpu_to_fec16(status);
if (fep->bufdesc_ex) {
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
- ebdp->cbd_esc = BD_ENET_RX_INT;
+ ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
ebdp->cbd_prot = 0;
ebdp->cbd_bdu = 0;
}
@@ -2662,7 +2670,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
rxq->rx_skbuff[i] = NULL;
if (skb) {
dma_unmap_single(&fep->pdev->dev,
- bdp->cbd_bufaddr,
+ fec32_to_cpu(bdp->cbd_bufaddr),
FEC_ENET_RX_FRSIZE - fep->rx_align,
DMA_FROM_DEVICE);
dev_kfree_skb(skb);
@@ -2777,11 +2785,11 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
}
rxq->rx_skbuff[i] = skb;
- bdp->cbd_sc = BD_ENET_RX_EMPTY;
+ bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
if (fep->bufdesc_ex) {
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
- ebdp->cbd_esc = BD_ENET_RX_INT;
+ ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
}
bdp = fec_enet_get_nextdesc(bdp, fep, queue);
@@ -2789,7 +2797,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
/* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, fep, queue);
- bdp->cbd_sc |= BD_SC_WRAP;
+ bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
return 0;
err_alloc:
@@ -2812,12 +2820,12 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
if (!txq->tx_bounce[i])
goto err_alloc;
- bdp->cbd_sc = 0;
- bdp->cbd_bufaddr = 0;
+ bdp->cbd_sc = cpu_to_fec16(0);
+ bdp->cbd_bufaddr = cpu_to_fec32(0);
if (fep->bufdesc_ex) {
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
- ebdp->cbd_esc = BD_ENET_TX_INT;
+ ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
}
bdp = fec_enet_get_nextdesc(bdp, fep, queue);
@@ -2825,7 +2833,7 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
/* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, fep, queue);
- bdp->cbd_sc |= BD_SC_WRAP;
+ bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
return 0;