aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/xilinx/xilinx_axienet_main.c')
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c113
1 files changed, 78 insertions, 35 deletions
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index e7469eb241ad..6ecd1bb5f81d 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -153,6 +153,25 @@ static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
axienet_dma_out32(lp, reg, lower_32_bits(addr));
}
+static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
+ struct axidma_bd *desc)
+{
+ desc->phys = lower_32_bits(addr);
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ desc->phys_msb = upper_32_bits(addr);
+}
+
+static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
+ struct axidma_bd *desc)
+{
+ dma_addr_t ret = desc->phys;
+
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
+
+ return ret;
+}
+
/**
* axienet_dma_bd_release - Release buffer descriptor rings
* @ndev: Pointer to the net_device structure
@@ -176,6 +195,8 @@ static void axienet_dma_bd_release(struct net_device *ndev)
return;
for (i = 0; i < lp->rx_bd_num; i++) {
+ dma_addr_t phys;
+
/* A NULL skb means this descriptor has not been initialised
* at all.
*/
@@ -188,9 +209,11 @@ static void axienet_dma_bd_release(struct net_device *ndev)
* descriptor size, after it had been successfully allocated.
* So a non-zero value in there means we need to unmap it.
*/
- if (lp->rx_bd_v[i].cntrl)
- dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
+ if (lp->rx_bd_v[i].cntrl) {
+ phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
+ dma_unmap_single(ndev->dev.parent, phys,
lp->max_frm_size, DMA_FROM_DEVICE);
+ }
}
dma_free_coherent(ndev->dev.parent,
@@ -235,29 +258,36 @@ static int axienet_dma_bd_init(struct net_device *ndev)
goto out;
for (i = 0; i < lp->tx_bd_num; i++) {
- lp->tx_bd_v[i].next = lp->tx_bd_p +
- sizeof(*lp->tx_bd_v) *
- ((i + 1) % lp->tx_bd_num);
+ dma_addr_t addr = lp->tx_bd_p +
+ sizeof(*lp->tx_bd_v) *
+ ((i + 1) % lp->tx_bd_num);
+
+ lp->tx_bd_v[i].next = lower_32_bits(addr);
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
}
for (i = 0; i < lp->rx_bd_num; i++) {
- lp->rx_bd_v[i].next = lp->rx_bd_p +
- sizeof(*lp->rx_bd_v) *
- ((i + 1) % lp->rx_bd_num);
+ dma_addr_t addr;
+
+ addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
+ ((i + 1) % lp->rx_bd_num);
+ lp->rx_bd_v[i].next = lower_32_bits(addr);
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
if (!skb)
goto out;
lp->rx_bd_v[i].skb = skb;
- lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
- skb->data,
- lp->max_frm_size,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(ndev->dev.parent, lp->rx_bd_v[i].phys)) {
+ addr = dma_map_single(ndev->dev.parent, skb->data,
+ lp->max_frm_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, addr)) {
netdev_err(ndev, "DMA mapping error\n");
goto out;
}
+ desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
lp->rx_bd_v[i].cntrl = lp->max_frm_size;
}
@@ -574,6 +604,7 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
struct axidma_bd *cur_p;
int max_bds = nr_bds;
unsigned int status;
+ dma_addr_t phys;
int i;
if (max_bds == -1)
@@ -589,9 +620,10 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
break;
- dma_unmap_single(ndev->dev.parent, cur_p->phys,
- (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
- DMA_TO_DEVICE);
+ phys = desc_get_phys_addr(lp, cur_p);
+ dma_unmap_single(ndev->dev.parent, phys,
+ (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
+ DMA_TO_DEVICE);
if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
dev_consume_skb_irq(cur_p->skb);
@@ -687,7 +719,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
u32 csum_start_off;
u32 csum_index_off;
skb_frag_t *frag;
- dma_addr_t tail_p;
+ dma_addr_t tail_p, phys;
struct axienet_local *lp = netdev_priv(ndev);
struct axidma_bd *cur_p;
u32 orig_tail_ptr = lp->tx_bd_tail;
@@ -726,14 +758,15 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
}
- cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) {
+ phys = dma_map_single(ndev->dev.parent, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
if (net_ratelimit())
netdev_err(ndev, "TX DMA mapping error\n");
ndev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
+ desc_set_phys_addr(lp, phys, cur_p);
cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
for (ii = 0; ii < num_frag; ii++) {
@@ -741,11 +774,11 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
lp->tx_bd_tail = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
frag = &skb_shinfo(skb)->frags[ii];
- cur_p->phys = dma_map_single(ndev->dev.parent,
- skb_frag_address(frag),
- skb_frag_size(frag),
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) {
+ phys = dma_map_single(ndev->dev.parent,
+ skb_frag_address(frag),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
if (net_ratelimit())
netdev_err(ndev, "TX DMA mapping error\n");
ndev->stats.tx_dropped++;
@@ -755,6 +788,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
}
+ desc_set_phys_addr(lp, phys, cur_p);
cur_p->cntrl = skb_frag_size(frag);
}
@@ -793,10 +827,12 @@ static void axienet_recv(struct net_device *ndev)
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
+ dma_addr_t phys;
+
tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
- dma_unmap_single(ndev->dev.parent, cur_p->phys,
- lp->max_frm_size,
+ phys = desc_get_phys_addr(lp, cur_p);
+ dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
DMA_FROM_DEVICE);
skb = cur_p->skb;
@@ -832,15 +868,16 @@ static void axienet_recv(struct net_device *ndev)
if (!new_skb)
return;
- cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
- lp->max_frm_size,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) {
+ phys = dma_map_single(ndev->dev.parent, new_skb->data,
+ lp->max_frm_size,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
if (net_ratelimit())
netdev_err(ndev, "RX DMA mapping error\n");
dev_kfree_skb(new_skb);
return;
}
+ desc_set_phys_addr(lp, phys, cur_p);
cur_p->cntrl = lp->max_frm_size;
cur_p->status = 0;
@@ -885,7 +922,8 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
return IRQ_NONE;
if (status & XAXIDMA_IRQ_ERROR_MASK) {
dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
- dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
+ dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
+ (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
(lp->tx_bd_v[lp->tx_bd_ci]).phys);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
@@ -934,7 +972,8 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
return IRQ_NONE;
if (status & XAXIDMA_IRQ_ERROR_MASK) {
dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
- dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
+ dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
+ (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
(lp->rx_bd_v[lp->rx_bd_ci]).phys);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
@@ -1616,14 +1655,18 @@ static void axienet_dma_err_handler(struct work_struct *work)
for (i = 0; i < lp->tx_bd_num; i++) {
cur_p = &lp->tx_bd_v[i];
- if (cur_p->cntrl)
- dma_unmap_single(ndev->dev.parent, cur_p->phys,
+ if (cur_p->cntrl) {
+ dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
+
+ dma_unmap_single(ndev->dev.parent, addr,
(cur_p->cntrl &
XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE);
+ }
if (cur_p->skb)
dev_kfree_skb_irq(cur_p->skb);
cur_p->phys = 0;
+ cur_p->phys_msb = 0;
cur_p->cntrl = 0;
cur_p->status = 0;
cur_p->app0 = 0;