aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/xilinx/xilinx_axienet_main.c')
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c1397
1 files changed, 856 insertions, 541 deletions
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 20746b801959..d1d772580da9 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -7,7 +7,7 @@
* Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
* Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
* Copyright (c) 2010 - 2011 PetaLogix
- * Copyright (c) 2019 SED Systems, a division of Calian Ltd.
+ * Copyright (c) 2019 - 2022 Calian Advanced Technologies
* Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
*
* This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
@@ -33,7 +33,7 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/skbuff.h>
-#include <linux/spinlock.h>
+#include <linux/math64.h>
#include <linux/phy.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
@@ -41,8 +41,9 @@
#include "xilinx_axienet.h"
/* Descriptors defines for Tx and Rx DMA */
-#define TX_BD_NUM_DEFAULT 64
+#define TX_BD_NUM_DEFAULT 128
#define RX_BD_NUM_DEFAULT 1024
+#define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1)
#define TX_BD_NUM_MAX 4096
#define RX_BD_NUM_MAX 4096
@@ -132,19 +133,23 @@ static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
return ioread32(lp->dma_regs + reg);
}
-/**
- * axienet_dma_out32 - Memory mapped Axi DMA register write.
- * @lp: Pointer to axienet local structure
- * @reg: Address offset from the base address of the Axi DMA core
- * @value: Value to be written into the Axi DMA register
- *
- * This function writes the desired value into the corresponding Axi DMA
- * register.
- */
-static inline void axienet_dma_out32(struct axienet_local *lp,
- off_t reg, u32 value)
+static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
+ struct axidma_bd *desc)
+{
+ desc->phys = lower_32_bits(addr);
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ desc->phys_msb = upper_32_bits(addr);
+}
+
+static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
+ struct axidma_bd *desc)
{
- iowrite32(value, lp->dma_regs + reg);
+ dma_addr_t ret = desc->phys;
+
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
+
+ return ret;
}
/**
@@ -160,24 +165,111 @@ static void axienet_dma_bd_release(struct net_device *ndev)
int i;
struct axienet_local *lp = netdev_priv(ndev);
+ /* If we end up here, tx_bd_v must have been DMA allocated. */
+ dma_free_coherent(lp->dev,
+ sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
+ lp->tx_bd_v,
+ lp->tx_bd_p);
+
+ if (!lp->rx_bd_v)
+ return;
+
for (i = 0; i < lp->rx_bd_num; i++) {
- dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
- lp->max_frm_size, DMA_FROM_DEVICE);
+ dma_addr_t phys;
+
+ /* A NULL skb means this descriptor has not been initialised
+ * at all.
+ */
+ if (!lp->rx_bd_v[i].skb)
+ break;
+
dev_kfree_skb(lp->rx_bd_v[i].skb);
- }
- if (lp->rx_bd_v) {
- dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
- lp->rx_bd_v,
- lp->rx_bd_p);
- }
- if (lp->tx_bd_v) {
- dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
- lp->tx_bd_v,
- lp->tx_bd_p);
+ /* For each descriptor, we programmed cntrl with the (non-zero)
+ * descriptor size, after it had been successfully allocated.
+ * So a non-zero value in there means we need to unmap it.
+ */
+ if (lp->rx_bd_v[i].cntrl) {
+ phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
+ dma_unmap_single(lp->dev, phys,
+ lp->max_frm_size, DMA_FROM_DEVICE);
+ }
}
+
+ dma_free_coherent(lp->dev,
+ sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
+ lp->rx_bd_v,
+ lp->rx_bd_p);
+}
+
+/**
+ * axienet_usec_to_timer - Calculate IRQ delay timer value
+ * @lp: Pointer to the axienet_local structure
+ * @coalesce_usec: Microseconds to convert into timer value
+ */
+static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
+{
+ u32 result;
+ u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
+
+ if (lp->axi_clk)
+ clk_rate = clk_get_rate(lp->axi_clk);
+
+ /* 1 Timeout Interval = 125 * (clock period of SG clock) */
+ result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
+ (u64)125000000);
+ if (result > 255)
+ result = 255;
+
+ return result;
+}
+
+/**
+ * axienet_dma_start - Set up DMA registers and start DMA operation
+ * @lp: Pointer to the axienet_local structure
+ */
+static void axienet_dma_start(struct axienet_local *lp)
+{
+ /* Start updating the Rx channel control register */
+ lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
+ XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
+ /* Only set interrupt delay timer if not generating an interrupt on
+ * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
+ */
+ if (lp->coalesce_count_rx > 1)
+ lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
+ << XAXIDMA_DELAY_SHIFT) |
+ XAXIDMA_IRQ_DELAY_MASK;
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
+
+ /* Start updating the Tx channel control register */
+ lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
+ XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
+ /* Only set interrupt delay timer if not generating an interrupt on
+ * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
+ */
+ if (lp->coalesce_count_tx > 1)
+ lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
+ << XAXIDMA_DELAY_SHIFT) |
+ XAXIDMA_IRQ_DELAY_MASK;
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
+
+ /* Populate the tail pointer and bring the Rx Axi DMA engine out of
+ * halted state. This will make the Rx side ready for reception.
+ */
+ axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
+ lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
+ axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
+ (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
+
+ /* Write to the RS (Run-stop) bit in the Tx channel control register.
+ * Tx channel is now ready to run. But only after we write to the
+ * tail pointer register that the Tx channel will start transmitting.
+ */
+ axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
+ lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
}
/**
@@ -192,7 +284,6 @@ static void axienet_dma_bd_release(struct net_device *ndev)
*/
static int axienet_dma_bd_init(struct net_device *ndev)
{
- u32 cr;
int i;
struct sk_buff *skb;
struct axienet_local *lp = netdev_priv(ndev);
@@ -203,85 +294,54 @@ static int axienet_dma_bd_init(struct net_device *ndev)
lp->rx_bd_ci = 0;
/* Allocate the Tx and Rx buffer descriptors. */
- lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ lp->tx_bd_v = dma_alloc_coherent(lp->dev,
sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
&lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
- goto out;
+ return -ENOMEM;
- lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ lp->rx_bd_v = dma_alloc_coherent(lp->dev,
sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
&lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v)
goto out;
for (i = 0; i < lp->tx_bd_num; i++) {
- lp->tx_bd_v[i].next = lp->tx_bd_p +
- sizeof(*lp->tx_bd_v) *
- ((i + 1) % lp->tx_bd_num);
+ dma_addr_t addr = lp->tx_bd_p +
+ sizeof(*lp->tx_bd_v) *
+ ((i + 1) % lp->tx_bd_num);
+
+ lp->tx_bd_v[i].next = lower_32_bits(addr);
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
}
for (i = 0; i < lp->rx_bd_num; i++) {
- lp->rx_bd_v[i].next = lp->rx_bd_p +
- sizeof(*lp->rx_bd_v) *
- ((i + 1) % lp->rx_bd_num);
+ dma_addr_t addr;
+
+ addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
+ ((i + 1) % lp->rx_bd_num);
+ lp->rx_bd_v[i].next = lower_32_bits(addr);
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
if (!skb)
goto out;
lp->rx_bd_v[i].skb = skb;
- lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
- skb->data,
- lp->max_frm_size,
- DMA_FROM_DEVICE);
+ addr = dma_map_single(lp->dev, skb->data,
+ lp->max_frm_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(lp->dev, addr)) {
+ netdev_err(ndev, "DMA mapping error\n");
+ goto out;
+ }
+ desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
+
lp->rx_bd_v[i].cntrl = lp->max_frm_size;
}
- /* Start updating the Rx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
- ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = ((cr & ~XAXIDMA_DELAY_MASK) |
- (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- /* Start updating the Tx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
- ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
- (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- /* Populate the tail pointer and bring the Rx Axi DMA engine out of
- * halted state. This will make the Rx side ready for reception.
- */
- axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
- axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
-
- /* Write to the RS (Run-stop) bit in the Tx channel control register.
- * Tx channel is now ready to run. But only after we write to the
- * tail pointer register that the Tx channel will start transmitting.
- */
- axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
+ axienet_dma_start(lp);
return 0;
out:
@@ -303,7 +363,7 @@ static void axienet_set_mac_address(struct net_device *ndev,
struct axienet_local *lp = netdev_priv(ndev);
if (address)
- memcpy(ndev->dev_addr, address, ETH_ALEN);
+ eth_hw_addr_set(ndev, address);
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
@@ -437,9 +497,11 @@ static void axienet_setoptions(struct net_device *ndev, u32 options)
lp->options |= options;
}
-static void __axienet_device_reset(struct axienet_local *lp)
+static int __axienet_device_reset(struct axienet_local *lp)
{
- u32 timeout;
+ u32 value;
+ int ret;
+
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
* process of Axi DMA takes a while to complete as all pending
* commands/transfers will be flushed or completed during this
@@ -448,16 +510,64 @@ static void __axienet_device_reset(struct axienet_local *lp)
* they both reset the entire DMA core, so only one needs to be used.
*/
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
- timeout = DELAY_OF_ONE_MILLISEC;
- while (axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET) &
- XAXIDMA_CR_RESET_MASK) {
- udelay(1);
- if (--timeout == 0) {
- netdev_err(lp->ndev, "%s: DMA reset timeout!\n",
- __func__);
- break;
- }
+ ret = read_poll_timeout(axienet_dma_in32, value,
+ !(value & XAXIDMA_CR_RESET_MASK),
+ DELAY_OF_ONE_MILLISEC, 50000, false, lp,
+ XAXIDMA_TX_CR_OFFSET);
+ if (ret) {
+ dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
+ return ret;
}
+
+ /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
+ ret = read_poll_timeout(axienet_ior, value,
+ value & XAE_INT_PHYRSTCMPLT_MASK,
+ DELAY_OF_ONE_MILLISEC, 50000, false, lp,
+ XAE_IS_OFFSET);
+ if (ret) {
+ dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * axienet_dma_stop - Stop DMA operation
+ * @lp: Pointer to the axienet_local structure
+ */
+static void axienet_dma_stop(struct axienet_local *lp)
+{
+ int count;
+ u32 cr, sr;
+
+ cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ synchronize_irq(lp->rx_irq);
+
+ cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ synchronize_irq(lp->tx_irq);
+
+ /* Give DMAs a chance to halt gracefully */
+ sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
+ msleep(20);
+ sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ }
+
+ sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
+ for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
+ msleep(20);
+ sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
+ }
+
+ /* Do a reset to ensure DMA is really stopped */
+ axienet_lock_mii(lp);
+ __axienet_device_reset(lp);
+ axienet_unlock_mii(lp);
}
/**
@@ -467,23 +577,27 @@ static void __axienet_device_reset(struct axienet_local *lp)
* This function is called to reset and initialize the Axi Ethernet core. This
* is typically called during initialization. It does a reset of the Axi DMA
* Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
- * areconnected to Axi Ethernet reset lines, this in turn resets the Axi
+ * are connected to Axi Ethernet reset lines, this in turn resets the Axi
* Ethernet core. No separate hardware reset is done for the Axi Ethernet
* core.
+ * Returns 0 on success or a negative error number otherwise.
*/
-static void axienet_device_reset(struct net_device *ndev)
+static int axienet_device_reset(struct net_device *ndev)
{
u32 axienet_status;
struct axienet_local *lp = netdev_priv(ndev);
+ int ret;
- __axienet_device_reset(lp);
+ ret = __axienet_device_reset(lp);
+ if (ret)
+ return ret;
lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
lp->options |= XAE_OPTION_VLAN;
lp->options &= (~XAE_OPTION_JUMBO);
if ((ndev->mtu > XAE_MTU) &&
- (ndev->mtu <= XAE_JUMBO_MTU)) {
+ (ndev->mtu <= XAE_JUMBO_MTU)) {
lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
XAE_TRL_SIZE;
@@ -491,9 +605,11 @@ static void axienet_device_reset(struct net_device *ndev)
lp->options |= XAE_OPTION_JUMBO;
}
- if (axienet_dma_bd_init(ndev)) {
+ ret = axienet_dma_bd_init(ndev);
+ if (ret) {
netdev_err(ndev, "%s: descriptor allocation failed\n",
__func__);
+ return ret;
}
axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
@@ -518,59 +634,67 @@ static void axienet_device_reset(struct net_device *ndev)
axienet_setoptions(ndev, lp->options);
netif_trans_update(ndev);
+
+ return 0;
}
/**
- * axienet_start_xmit_done - Invoked once a transmit is completed by the
- * Axi DMA Tx channel.
- * @ndev: Pointer to the net_device structure
+ * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
+ * @lp: Pointer to the axienet_local structure
+ * @first_bd: Index of first descriptor to clean up
+ * @nr_bds: Max number of descriptors to clean up
+ * @force: Whether to clean descriptors even if not complete
+ * @sizep: Pointer to a u32 filled with the total sum of all bytes
+ * in all cleaned-up descriptors. Ignored if NULL.
+ * @budget: NAPI budget (use 0 when not called from NAPI poll)
*
- * This function is invoked from the Axi DMA Tx isr to notify the completion
- * of transmit operation. It clears fields in the corresponding Tx BDs and
- * unmaps the corresponding buffer so that CPU can regain ownership of the
- * buffer. It finally invokes "netif_wake_queue" to restart transmission if
- * required.
+ * Would either be called after a successful transmit operation, or after
+ * there was an error when setting up the chain.
+ * Returns the number of descriptors handled.
*/
-static void axienet_start_xmit_done(struct net_device *ndev)
+static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
+ int nr_bds, bool force, u32 *sizep, int budget)
{
- u32 size = 0;
- u32 packets = 0;
- struct axienet_local *lp = netdev_priv(ndev);
struct axidma_bd *cur_p;
- unsigned int status = 0;
-
- cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
- status = cur_p->status;
- while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
- dma_unmap_single(ndev->dev.parent, cur_p->phys,
- (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
- DMA_TO_DEVICE);
- if (cur_p->skb)
- dev_consume_skb_irq(cur_p->skb);
- /*cur_p->phys = 0;*/
+ unsigned int status;
+ dma_addr_t phys;
+ int i;
+
+ for (i = 0; i < nr_bds; i++) {
+ cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
+ status = cur_p->status;
+
+ /* If force is not specified, clean up only descriptors
+ * that have been completed by the MAC.
+ */
+ if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
+ break;
+
+ /* Ensure we see complete descriptor update */
+ dma_rmb();
+ phys = desc_get_phys_addr(lp, cur_p);
+ dma_unmap_single(lp->dev, phys,
+ (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
+ DMA_TO_DEVICE);
+
+ if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
+ napi_consume_skb(cur_p->skb, budget);
+
cur_p->app0 = 0;
cur_p->app1 = 0;
cur_p->app2 = 0;
cur_p->app4 = 0;
- cur_p->status = 0;
cur_p->skb = NULL;
+ /* ensure our transmit path and device don't prematurely see status cleared */
+ wmb();
+ cur_p->cntrl = 0;
+ cur_p->status = 0;
- size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
- packets++;
-
- if (++lp->tx_bd_ci >= lp->tx_bd_num)
- lp->tx_bd_ci = 0;
- cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
- status = cur_p->status;
+ if (sizep)
+ *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
}
- ndev->stats.tx_packets += packets;
- ndev->stats.tx_bytes += size;
-
- /* Matches barrier in axienet_start_xmit */
- smp_mb();
-
- netif_wake_queue(ndev);
+ return i;
}
/**
@@ -584,19 +708,73 @@ static void axienet_start_xmit_done(struct net_device *ndev)
* This function is invoked before BDs are allocated and transmission starts.
* This function returns 0 if a BD or group of BDs can be allocated for
* transmission. If the BD or any of the BDs are not free the function
- * returns a busy status. This is invoked from axienet_start_xmit.
+ * returns a busy status.
*/
static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
int num_frag)
{
struct axidma_bd *cur_p;
- cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
- if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
+
+ /* Ensure we see all descriptor updates from device or TX polling */
+ rmb();
+ cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
+ lp->tx_bd_num];
+ if (cur_p->cntrl)
return NETDEV_TX_BUSY;
return 0;
}
/**
+ * axienet_tx_poll - Invoked once a transmit is completed by the
+ * Axi DMA Tx channel.
+ * @napi: Pointer to NAPI structure.
+ * @budget: Max number of TX packets to process.
+ *
+ * Return: Number of TX packets processed.
+ *
+ * This function is invoked from the NAPI processing to notify the completion
+ * of transmit operation. It clears fields in the corresponding Tx BDs and
+ * unmaps the corresponding buffer so that CPU can regain ownership of the
+ * buffer. It finally invokes "netif_wake_queue" to restart transmission if
+ * required.
+ */
+static int axienet_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
+ struct net_device *ndev = lp->ndev;
+ u32 size = 0;
+ int packets;
+
+ packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget);
+
+ if (packets) {
+ lp->tx_bd_ci += packets;
+ if (lp->tx_bd_ci >= lp->tx_bd_num)
+ lp->tx_bd_ci %= lp->tx_bd_num;
+
+ u64_stats_update_begin(&lp->tx_stat_sync);
+ u64_stats_add(&lp->tx_packets, packets);
+ u64_stats_add(&lp->tx_bytes, size);
+ u64_stats_update_end(&lp->tx_stat_sync);
+
+ /* Matches barrier in axienet_start_xmit */
+ smp_mb();
+
+ if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
+ netif_wake_queue(ndev);
+ }
+
+ if (packets < budget && napi_complete_done(napi, packets)) {
+ /* Re-enable TX completion interrupts. This should
+ * cause an immediate interrupt if any TX packets are
+ * already pending.
+ */
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
+ }
+ return packets;
+}
+
+/**
* axienet_start_xmit - Starts the transmission.
* @skb: sk_buff pointer that contains data to be Txed.
* @ndev: Pointer to net_device structure.
@@ -617,27 +795,26 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
u32 csum_start_off;
u32 csum_index_off;
skb_frag_t *frag;
- dma_addr_t tail_p;
+ dma_addr_t tail_p, phys;
+ u32 orig_tail_ptr, new_tail_ptr;
struct axienet_local *lp = netdev_priv(ndev);
struct axidma_bd *cur_p;
- num_frag = skb_shinfo(skb)->nr_frags;
- cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+ orig_tail_ptr = lp->tx_bd_tail;
+ new_tail_ptr = orig_tail_ptr;
- if (axienet_check_tx_bd_space(lp, num_frag)) {
- if (netif_queue_stopped(ndev))
- return NETDEV_TX_BUSY;
+ num_frag = skb_shinfo(skb)->nr_frags;
+ cur_p = &lp->tx_bd_v[orig_tail_ptr];
+ if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
+ /* Should not happen as last start_xmit call should have
+ * checked for sufficient space and queue should only be
+ * woken when sufficient space is available.
+ */
netif_stop_queue(ndev);
-
- /* Matches barrier in axienet_start_xmit_done */
- smp_mb();
-
- /* Space might have just been freed - check again */
- if (axienet_check_tx_bd_space(lp, num_frag))
- return NETDEV_TX_BUSY;
-
- netif_wake_queue(ndev);
+ if (net_ratelimit())
+ netdev_warn(ndev, "TX ring unexpectedly full\n");
+ return NETDEV_TX_BUSY;
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -655,113 +832,176 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
}
+ phys = dma_map_single(lp->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(lp->dev, phys))) {
+ if (net_ratelimit())
+ netdev_err(ndev, "TX DMA mapping error\n");
+ ndev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ desc_set_phys_addr(lp, phys, cur_p);
cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
- cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
for (ii = 0; ii < num_frag; ii++) {
- if (++lp->tx_bd_tail >= lp->tx_bd_num)
- lp->tx_bd_tail = 0;
- cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+ if (++new_tail_ptr >= lp->tx_bd_num)
+ new_tail_ptr = 0;
+ cur_p = &lp->tx_bd_v[new_tail_ptr];
frag = &skb_shinfo(skb)->frags[ii];
- cur_p->phys = dma_map_single(ndev->dev.parent,
- skb_frag_address(frag),
- skb_frag_size(frag),
- DMA_TO_DEVICE);
+ phys = dma_map_single(lp->dev,
+ skb_frag_address(frag),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(lp->dev, phys))) {
+ if (net_ratelimit())
+ netdev_err(ndev, "TX DMA mapping error\n");
+ ndev->stats.tx_dropped++;
+ axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
+ true, NULL, 0);
+ return NETDEV_TX_OK;
+ }
+ desc_set_phys_addr(lp, phys, cur_p);
cur_p->cntrl = skb_frag_size(frag);
}
cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
cur_p->skb = skb;
- tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
+ tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
+ if (++new_tail_ptr >= lp->tx_bd_num)
+ new_tail_ptr = 0;
+ WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
+
/* Start the transfer */
- axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
- if (++lp->tx_bd_tail >= lp->tx_bd_num)
- lp->tx_bd_tail = 0;
+ axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
+
+ /* Stop queue if next transmit may not have space */
+ if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
+ netif_stop_queue(ndev);
+
+ /* Matches barrier in axienet_tx_poll */
+ smp_mb();
+
+ /* Space might have just been freed - check again */
+ if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
+ netif_wake_queue(ndev);
+ }
return NETDEV_TX_OK;
}
/**
- * axienet_recv - Is called from Axi DMA Rx Isr to complete the received
- * BD processing.
- * @ndev: Pointer to net_device structure.
+ * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
+ * @napi: Pointer to NAPI structure.
+ * @budget: Max number of RX packets to process.
*
- * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It
- * does minimal processing and invokes "netif_rx" to complete further
- * processing.
+ * Return: Number of RX packets processed.
*/
-static void axienet_recv(struct net_device *ndev)
+static int axienet_rx_poll(struct napi_struct *napi, int budget)
{
u32 length;
u32 csumstatus;
u32 size = 0;
- u32 packets = 0;
+ int packets = 0;
dma_addr_t tail_p = 0;
- struct axienet_local *lp = netdev_priv(ndev);
- struct sk_buff *skb, *new_skb;
struct axidma_bd *cur_p;
+ struct sk_buff *skb, *new_skb;
+ struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
- while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
- tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
+ while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
+ dma_addr_t phys;
- dma_unmap_single(ndev->dev.parent, cur_p->phys,
- lp->max_frm_size,
- DMA_FROM_DEVICE);
+ /* Ensure we see complete descriptor update */
+ dma_rmb();
skb = cur_p->skb;
cur_p->skb = NULL;
- length = cur_p->app4 & 0x0000FFFF;
-
- skb_put(skb, length);
- skb->protocol = eth_type_trans(skb, ndev);
- /*skb_checksum_none_assert(skb);*/
- skb->ip_summed = CHECKSUM_NONE;
-
- /* if we're doing Rx csum offload, set it up */
- if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
- csumstatus = (cur_p->app2 &
- XAE_FULL_CSUM_STATUS_MASK) >> 3;
- if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) ||
- (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* skb could be NULL if a previous pass already received the
+ * packet for this slot in the ring, but failed to refill it
+ * with a newly allocated buffer. In this case, don't try to
+ * receive it again.
+ */
+ if (likely(skb)) {
+ length = cur_p->app4 & 0x0000FFFF;
+
+ phys = desc_get_phys_addr(lp, cur_p);
+ dma_unmap_single(lp->dev, phys, lp->max_frm_size,
+ DMA_FROM_DEVICE);
+
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, lp->ndev);
+ /*skb_checksum_none_assert(skb);*/
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* if we're doing Rx csum offload, set it up */
+ if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
+ csumstatus = (cur_p->app2 &
+ XAE_FULL_CSUM_STATUS_MASK) >> 3;
+ if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
+ csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
+ skb->protocol == htons(ETH_P_IP) &&
+ skb->len > 64) {
+ skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
+ skb->ip_summed = CHECKSUM_COMPLETE;
}
- } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
- skb->protocol == htons(ETH_P_IP) &&
- skb->len > 64) {
- skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
- skb->ip_summed = CHECKSUM_COMPLETE;
- }
- netif_rx(skb);
+ napi_gro_receive(napi, skb);
- size += length;
- packets++;
+ size += length;
+ packets++;
+ }
- new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
+ new_skb = napi_alloc_skb(napi, lp->max_frm_size);
if (!new_skb)
- return;
+ break;
+
+ phys = dma_map_single(lp->dev, new_skb->data,
+ lp->max_frm_size,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(lp->dev, phys))) {
+ if (net_ratelimit())
+ netdev_err(lp->ndev, "RX DMA mapping error\n");
+ dev_kfree_skb(new_skb);
+ break;
+ }
+ desc_set_phys_addr(lp, phys, cur_p);
- cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
- lp->max_frm_size,
- DMA_FROM_DEVICE);
cur_p->cntrl = lp->max_frm_size;
cur_p->status = 0;
cur_p->skb = new_skb;
+ /* Only update tail_p to mark this slot as usable after it has
+ * been successfully refilled.
+ */
+ tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
+
if (++lp->rx_bd_ci >= lp->rx_bd_num)
lp->rx_bd_ci = 0;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
}
- ndev->stats.rx_packets += packets;
- ndev->stats.rx_bytes += size;
+ u64_stats_update_begin(&lp->rx_stat_sync);
+ u64_stats_add(&lp->rx_packets, packets);
+ u64_stats_add(&lp->rx_bytes, size);
+ u64_stats_update_end(&lp->rx_stat_sync);
if (tail_p)
- axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+ axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+
+ if (packets < budget && napi_complete_done(napi, packets)) {
+ /* Re-enable RX completion interrupts. This should
+ * cause an immediate interrupt if any RX packets are
+ * already pending.
+ */
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
+ }
+ return packets;
}
/**
@@ -771,45 +1011,40 @@ static void axienet_recv(struct net_device *ndev)
*
* Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
*
- * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
- * to complete the BD processing.
+ * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
+ * TX BD processing.
*/
static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
{
- u32 cr;
unsigned int status;
struct net_device *ndev = _ndev;
struct axienet_local *lp = netdev_priv(ndev);
status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
- if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
- axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
- axienet_start_xmit_done(lp->ndev);
- goto out;
- }
+
if (!(status & XAXIDMA_IRQ_ALL_MASK))
return IRQ_NONE;
- if (status & XAXIDMA_IRQ_ERROR_MASK) {
- dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
- dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
- (lp->tx_bd_v[lp->tx_bd_ci]).phys);
-
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* Write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* Write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
+
+ if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
+ netdev_err(ndev, "DMA Tx error 0x%x\n", status);
+ netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
+ (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
+ (lp->tx_bd_v[lp->tx_bd_ci]).phys);
+ schedule_work(&lp->dma_err_task);
+ } else {
+ /* Disable further TX completion interrupts and schedule
+ * NAPI to handle the completions.
+ */
+ u32 cr = lp->tx_dma_cr;
- tasklet_schedule(&lp->dma_err_tasklet);
- axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
+ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+
+ napi_schedule(&lp->napi_tx);
}
-out:
+
return IRQ_HANDLED;
}
@@ -820,45 +1055,40 @@ out:
*
* Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
*
- * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
+ * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
* processing.
*/
static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
{
- u32 cr;
unsigned int status;
struct net_device *ndev = _ndev;
struct axienet_local *lp = netdev_priv(ndev);
status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
- if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
- axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
- axienet_recv(lp->ndev);
- goto out;
- }
+
if (!(status & XAXIDMA_IRQ_ALL_MASK))
return IRQ_NONE;
- if (status & XAXIDMA_IRQ_ERROR_MASK) {
- dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
- dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
- (lp->rx_bd_v[lp->rx_bd_ci]).phys);
-
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* Finally write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* write to the Rx channel control register */
+ axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
+
+ if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
+ netdev_err(ndev, "DMA Rx error 0x%x\n", status);
+ netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
+ (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
+ (lp->rx_bd_v[lp->rx_bd_ci]).phys);
+ schedule_work(&lp->dma_err_task);
+ } else {
+ /* Disable further RX completion interrupts and schedule
+ * NAPI receive.
+ */
+ u32 cr = lp->rx_dma_cr;
+
+ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
- tasklet_schedule(&lp->dma_err_tasklet);
- axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
+ napi_schedule(&lp->napi_rx);
}
-out:
+
return IRQ_HANDLED;
}
@@ -891,7 +1121,7 @@ static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
return IRQ_HANDLED;
}
-static void axienet_dma_err_handler(unsigned long data);
+static void axienet_dma_err_handler(struct work_struct *work);
/**
* axienet_open - Driver open routine.
@@ -913,19 +1143,13 @@ static int axienet_open(struct net_device *ndev)
dev_dbg(&ndev->dev, "axienet_open()\n");
- /* Disable the MDIO interface till Axi Ethernet Reset is completed.
- * When we do an Axi Ethernet reset, it resets the complete core
- * including the MDIO. MDIO must be disabled before resetting
- * and re-enabled afterwards.
+ /* When we do an Axi Ethernet reset, it resets the complete core
+ * including the MDIO. MDIO must be disabled before resetting.
* Hold MDIO bus lock to avoid MDIO accesses during the reset.
*/
- mutex_lock(&lp->mii_bus->mdio_lock);
- axienet_mdio_disable(lp);
- axienet_device_reset(ndev);
- ret = axienet_mdio_enable(lp);
- mutex_unlock(&lp->mii_bus->mdio_lock);
- if (ret < 0)
- return ret;
+ axienet_lock_mii(lp);
+ ret = axienet_device_reset(ndev);
+ axienet_unlock_mii(lp);
ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
if (ret) {
@@ -935,9 +1159,11 @@ static int axienet_open(struct net_device *ndev)
phylink_start(lp->phylink);
- /* Enable tasklets for Axi DMA error handling */
- tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
- (unsigned long) lp);
+ /* Enable worker thread for Axi DMA error handling */
+ INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
+
+ napi_enable(&lp->napi_rx);
+ napi_enable(&lp->napi_tx);
/* Enable interrupts for Axi DMA Tx */
ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
@@ -964,9 +1190,11 @@ err_eth_irq:
err_rx_irq:
free_irq(lp->tx_irq, ndev);
err_tx_irq:
+ napi_disable(&lp->napi_tx);
+ napi_disable(&lp->napi_rx);
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
- tasklet_kill(&lp->dma_err_tasklet);
+ cancel_work_sync(&lp->dma_err_task);
dev_err(lp->dev, "request_irq() failed\n");
return ret;
}
@@ -983,49 +1211,24 @@ err_tx_irq:
*/
static int axienet_stop(struct net_device *ndev)
{
- u32 cr, sr;
- int count;
struct axienet_local *lp = netdev_priv(ndev);
dev_dbg(&ndev->dev, "axienet_close()\n");
+ napi_disable(&lp->napi_tx);
+ napi_disable(&lp->napi_rx);
+
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ axienet_dma_stop(lp);
axienet_iow(lp, XAE_IE_OFFSET, 0);
- /* Give DMAs a chance to halt gracefully */
- sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
- for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
- msleep(20);
- sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
- }
-
- sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
- for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
- msleep(20);
- sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
- }
-
- /* Do a reset to ensure DMA is really stopped */
- mutex_lock(&lp->mii_bus->mdio_lock);
- axienet_mdio_disable(lp);
- __axienet_device_reset(lp);
- axienet_mdio_enable(lp);
- mutex_unlock(&lp->mii_bus->mdio_lock);
-
- tasklet_kill(&lp->dma_err_tasklet);
+ cancel_work_sync(&lp->dma_err_task);
if (lp->eth_irq > 0)
free_irq(lp->eth_irq, ndev);
@@ -1083,13 +1286,46 @@ static void axienet_poll_controller(struct net_device *ndev)
}
#endif
+static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ return phylink_mii_ioctl(lp->phylink, rq, cmd);
+}
+
+static void
+axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+ unsigned int start;
+
+ netdev_stats_to_stats64(stats, &dev->stats);
+
+ do {
+ start = u64_stats_fetch_begin_irq(&lp->rx_stat_sync);
+ stats->rx_packets = u64_stats_read(&lp->rx_packets);
+ stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
+ } while (u64_stats_fetch_retry_irq(&lp->rx_stat_sync, start));
+
+ do {
+ start = u64_stats_fetch_begin_irq(&lp->tx_stat_sync);
+ stats->tx_packets = u64_stats_read(&lp->tx_packets);
+ stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
+ } while (u64_stats_fetch_retry_irq(&lp->tx_stat_sync, start));
+}
+
static const struct net_device_ops axienet_netdev_ops = {
.ndo_open = axienet_open,
.ndo_stop = axienet_stop,
.ndo_start_xmit = axienet_start_xmit,
+ .ndo_get_stats64 = axienet_get_stats64,
.ndo_change_mtu = axienet_change_mtu,
.ndo_set_mac_address = netdev_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_eth_ioctl = axienet_ioctl,
.ndo_set_rx_mode = axienet_set_multicast_list,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = axienet_poll_controller,
@@ -1107,8 +1343,8 @@ static const struct net_device_ops axienet_netdev_ops = {
static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *ed)
{
- strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
- strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
+ strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
+ strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
}
/**
@@ -1139,7 +1375,7 @@ static int axienet_ethtools_get_regs_len(struct net_device *ndev)
static void axienet_ethtools_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *ret)
{
- u32 *data = (u32 *) ret;
+ u32 *data = (u32 *)ret;
size_t len = sizeof(u32) * AXIENET_REGS_N;
struct axienet_local *lp = netdev_priv(ndev);
@@ -1170,10 +1406,6 @@ static void axienet_ethtools_get_regs(struct net_device *ndev,
data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
- data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET);
- data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET);
- data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET);
- data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET);
data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
@@ -1189,8 +1421,11 @@ static void axienet_ethtools_get_regs(struct net_device *ndev,
data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
}
-static void axienet_ethtools_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
+static void
+axienet_ethtools_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct axienet_local *lp = netdev_priv(ndev);
@@ -1204,15 +1439,19 @@ static void axienet_ethtools_get_ringparam(struct net_device *ndev,
ering->tx_pending = lp->tx_bd_num;
}
-static int axienet_ethtools_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
+static int
+axienet_ethtools_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct axienet_local *lp = netdev_priv(ndev);
if (ering->rx_pending > RX_BD_NUM_MAX ||
ering->rx_mini_pending ||
ering->rx_jumbo_pending ||
- ering->rx_pending > TX_BD_NUM_MAX)
+ ering->tx_pending < TX_BD_NUM_MIN ||
+ ering->tx_pending > TX_BD_NUM_MAX)
return -EINVAL;
if (netif_running(ndev))
@@ -1266,6 +1505,8 @@ axienet_ethtools_set_pauseparam(struct net_device *ndev,
* axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
* @ndev: Pointer to net_device structure
* @ecoalesce: Pointer to ethtool_coalesce structure
+ * @kernel_coal: ethtool CQE mode setting structure
+ * @extack: extack for reporting error messages
*
* This implements ethtool command for getting the DMA interrupt coalescing
* count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
@@ -1273,17 +1514,18 @@ axienet_ethtools_set_pauseparam(struct net_device *ndev,
*
* Return: 0 always
*/
-static int axienet_ethtools_get_coalesce(struct net_device *ndev,
- struct ethtool_coalesce *ecoalesce)
+static int
+axienet_ethtools_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ecoalesce,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
{
- u32 regval = 0;
struct axienet_local *lp = netdev_priv(ndev);
- regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
- >> XAXIDMA_COALESCE_SHIFT;
- regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
- >> XAXIDMA_COALESCE_SHIFT;
+
+ ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
+ ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
+ ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
+ ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
return 0;
}
@@ -1291,6 +1533,8 @@ static int axienet_ethtools_get_coalesce(struct net_device *ndev,
* axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
* @ndev: Pointer to net_device structure
* @ecoalesce: Pointer to ethtool_coalesce structure
+ * @kernel_coal: ethtool CQE mode setting structure
+ * @extack: extack for reporting error messages
*
* This implements ethtool command for setting the DMA interrupt coalescing
* count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
@@ -1298,8 +1542,11 @@ static int axienet_ethtools_get_coalesce(struct net_device *ndev,
*
* Return: 0, on success, Non-zero error value on failure.
*/
-static int axienet_ethtools_set_coalesce(struct net_device *ndev,
- struct ethtool_coalesce *ecoalesce)
+static int
+axienet_ethtools_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ecoalesce,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
{
struct axienet_local *lp = netdev_priv(ndev);
@@ -1309,31 +1556,14 @@ static int axienet_ethtools_set_coalesce(struct net_device *ndev,
return -EFAULT;
}
- if ((ecoalesce->rx_coalesce_usecs) ||
- (ecoalesce->rx_coalesce_usecs_irq) ||
- (ecoalesce->rx_max_coalesced_frames_irq) ||
- (ecoalesce->tx_coalesce_usecs) ||
- (ecoalesce->tx_coalesce_usecs_irq) ||
- (ecoalesce->tx_max_coalesced_frames_irq) ||
- (ecoalesce->stats_block_coalesce_usecs) ||
- (ecoalesce->use_adaptive_rx_coalesce) ||
- (ecoalesce->use_adaptive_tx_coalesce) ||
- (ecoalesce->pkt_rate_low) ||
- (ecoalesce->rx_coalesce_usecs_low) ||
- (ecoalesce->rx_max_coalesced_frames_low) ||
- (ecoalesce->tx_coalesce_usecs_low) ||
- (ecoalesce->tx_max_coalesced_frames_low) ||
- (ecoalesce->pkt_rate_high) ||
- (ecoalesce->rx_coalesce_usecs_high) ||
- (ecoalesce->rx_max_coalesced_frames_high) ||
- (ecoalesce->tx_coalesce_usecs_high) ||
- (ecoalesce->tx_max_coalesced_frames_high) ||
- (ecoalesce->rate_sample_interval))
- return -EOPNOTSUPP;
if (ecoalesce->rx_max_coalesced_frames)
lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
+ if (ecoalesce->rx_coalesce_usecs)
+ lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
if (ecoalesce->tx_max_coalesced_frames)
lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
+ if (ecoalesce->tx_coalesce_usecs)
+ lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
return 0;
}
@@ -1356,7 +1586,16 @@ axienet_ethtools_set_link_ksettings(struct net_device *ndev,
return phylink_ethtool_ksettings_set(lp->phylink, cmd);
}
+static int axienet_ethtools_nway_reset(struct net_device *dev)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+
+ return phylink_ethtool_nway_reset(lp->phylink);
+}
+
static const struct ethtool_ops axienet_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USECS,
.get_drvinfo = axienet_ethtools_get_drvinfo,
.get_regs_len = axienet_ethtools_get_regs_len,
.get_regs = axienet_ethtools_get_regs,
@@ -1369,78 +1608,96 @@ static const struct ethtool_ops axienet_ethtool_ops = {
.set_coalesce = axienet_ethtools_set_coalesce,
.get_link_ksettings = axienet_ethtools_get_link_ksettings,
.set_link_ksettings = axienet_ethtools_set_link_ksettings,
+ .nway_reset = axienet_ethtools_nway_reset,
};
-static void axienet_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
+static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
{
- struct net_device *ndev = to_net_dev(config->dev);
- struct axienet_local *lp = netdev_priv(ndev);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- /* Only support the mode we are configured for */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != lp->phy_mode) {
- netdev_warn(ndev, "Cannot use PHY mode %s, supported: %s\n",
- phy_modes(state->interface),
- phy_modes(lp->phy_mode));
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
- return;
- }
+ return container_of(pcs, struct axienet_local, pcs);
+}
+
+static void axienet_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
+ phylink_mii_c22_pcs_get_state(pcs_phy, state);
+}
- phylink_set(mask, Asym_Pause);
- phylink_set(mask, Pause);
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 1000baseT_Full);
+static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ phylink_mii_c22_pcs_an_restart(pcs_phy);
}
-static void axienet_mac_pcs_get_state(struct phylink_config *config,
- struct phylink_link_state *state)
+static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
{
- struct net_device *ndev = to_net_dev(config->dev);
+ struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
+ struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
struct axienet_local *lp = netdev_priv(ndev);
- u32 emmc_reg, fcc_reg;
-
- state->interface = lp->phy_mode;
+ int ret;
- emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
- if (emmc_reg & XAE_EMMC_LINKSPD_1000)
- state->speed = SPEED_1000;
- else if (emmc_reg & XAE_EMMC_LINKSPD_100)
- state->speed = SPEED_100;
- else
- state->speed = SPEED_10;
+ if (lp->switch_x_sgmii) {
+ ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
+ interface == PHY_INTERFACE_MODE_SGMII ?
+ XLNX_MII_STD_SELECT_SGMII : 0);
+ if (ret < 0) {
+ netdev_warn(ndev,
+ "Failed to switch PHY interface: %d\n",
+ ret);
+ return ret;
+ }
+ }
- state->pause = 0;
- fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
- if (fcc_reg & XAE_FCC_FCTX_MASK)
- state->pause |= MLO_PAUSE_TX;
- if (fcc_reg & XAE_FCC_FCRX_MASK)
- state->pause |= MLO_PAUSE_RX;
+ ret = phylink_mii_c22_pcs_config(pcs_phy, mode, interface, advertising);
+ if (ret < 0)
+ netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
- state->an_complete = 0;
- state->duplex = 1;
+ return ret;
}
-static void axienet_mac_an_restart(struct phylink_config *config)
+static const struct phylink_pcs_ops axienet_pcs_ops = {
+ .pcs_get_state = axienet_pcs_get_state,
+ .pcs_config = axienet_pcs_config,
+ .pcs_an_restart = axienet_pcs_an_restart,
+};
+
+static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
{
- /* Unsupported, do nothing */
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ if (interface == PHY_INTERFACE_MODE_1000BASEX ||
+ interface == PHY_INTERFACE_MODE_SGMII)
+ return &lp->pcs;
+
+ return NULL;
}
static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
+ /* nothing meaningful to do */
+}
+
+static void axienet_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ /* nothing meaningful to do */
+}
+
+static void axienet_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
struct net_device *ndev = to_net_dev(config->dev);
struct axienet_local *lp = netdev_priv(ndev);
u32 emmc_reg, fcc_reg;
@@ -1448,7 +1705,7 @@ static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
- switch (state->speed) {
+ switch (speed) {
case SPEED_1000:
emmc_reg |= XAE_EMMC_LINKSPD_1000;
break;
@@ -1467,80 +1724,63 @@ static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
- if (state->pause & MLO_PAUSE_TX)
+ if (tx_pause)
fcc_reg |= XAE_FCC_FCTX_MASK;
else
fcc_reg &= ~XAE_FCC_FCTX_MASK;
- if (state->pause & MLO_PAUSE_RX)
+ if (rx_pause)
fcc_reg |= XAE_FCC_FCRX_MASK;
else
fcc_reg &= ~XAE_FCC_FCRX_MASK;
axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
}
-static void axienet_mac_link_down(struct phylink_config *config,
- unsigned int mode,
- phy_interface_t interface)
-{
- /* nothing meaningful to do */
-}
-
-static void axienet_mac_link_up(struct phylink_config *config,
- unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phy)
-{
- /* nothing meaningful to do */
-}
-
static const struct phylink_mac_ops axienet_phylink_ops = {
- .validate = axienet_validate,
- .mac_pcs_get_state = axienet_mac_pcs_get_state,
- .mac_an_restart = axienet_mac_an_restart,
+ .validate = phylink_generic_validate,
+ .mac_select_pcs = axienet_mac_select_pcs,
.mac_config = axienet_mac_config,
.mac_link_down = axienet_mac_link_down,
.mac_link_up = axienet_mac_link_up,
};
/**
- * axienet_dma_err_handler - Tasklet handler for Axi DMA Error
- * @data: Data passed
+ * axienet_dma_err_handler - Work queue task for Axi DMA Error
+ * @work: pointer to work_struct
*
* Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
* Tx/Rx BDs.
*/
-static void axienet_dma_err_handler(unsigned long data)
+static void axienet_dma_err_handler(struct work_struct *work)
{
+ u32 i;
u32 axienet_status;
- u32 cr, i;
- struct axienet_local *lp = (struct axienet_local *) data;
- struct net_device *ndev = lp->ndev;
struct axidma_bd *cur_p;
+ struct axienet_local *lp = container_of(work, struct axienet_local,
+ dma_err_task);
+ struct net_device *ndev = lp->ndev;
+
+ napi_disable(&lp->napi_tx);
+ napi_disable(&lp->napi_rx);
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- /* Disable the MDIO interface till Axi Ethernet Reset is completed.
- * When we do an Axi Ethernet reset, it resets the complete core
- * including the MDIO. MDIO must be disabled before resetting
- * and re-enabled afterwards.
- * Hold MDIO bus lock to avoid MDIO accesses during the reset.
- */
- mutex_lock(&lp->mii_bus->mdio_lock);
- axienet_mdio_disable(lp);
- __axienet_device_reset(lp);
- axienet_mdio_enable(lp);
- mutex_unlock(&lp->mii_bus->mdio_lock);
+
+ axienet_dma_stop(lp);
for (i = 0; i < lp->tx_bd_num; i++) {
cur_p = &lp->tx_bd_v[i];
- if (cur_p->phys)
- dma_unmap_single(ndev->dev.parent, cur_p->phys,
+ if (cur_p->cntrl) {
+ dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
+
+ dma_unmap_single(lp->dev, addr,
(cur_p->cntrl &
XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE);
+ }
if (cur_p->skb)
dev_kfree_skb_irq(cur_p->skb);
cur_p->phys = 0;
+ cur_p->phys_msb = 0;
cur_p->cntrl = 0;
cur_p->status = 0;
cur_p->app0 = 0;
@@ -1565,50 +1805,7 @@ static void axienet_dma_err_handler(unsigned long data)
lp->tx_bd_tail = 0;
lp->rx_bd_ci = 0;
- /* Start updating the Rx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
- (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = ((cr & ~XAXIDMA_DELAY_MASK) |
- (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Finally write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- /* Start updating the Tx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
- (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
- (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Finally write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- /* Populate the tail pointer and bring the Rx Axi DMA engine out of
- * halted state. This will make the Rx side ready for reception.
- */
- axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
- axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
-
- /* Write to the RS (Run-stop) bit in the Tx channel control register.
- * Tx channel is now ready to run. But only after we write to the
- * tail pointer register that the Tx channel will start transmitting
- */
- axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
+ axienet_dma_start(lp);
axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
axienet_status &= ~XAE_RCW1_RX_MASK;
@@ -1629,6 +1826,8 @@ static void axienet_dma_err_handler(unsigned long data)
axienet_set_mac_address(ndev, NULL);
axienet_set_multicast_list(ndev);
axienet_setoptions(ndev, lp->options);
+ napi_enable(&lp->napi_rx);
+ napi_enable(&lp->napi_tx);
}
/**
@@ -1649,8 +1848,9 @@ static int axienet_probe(struct platform_device *pdev)
struct device_node *np;
struct axienet_local *lp;
struct net_device *ndev;
- const void *mac_addr;
struct resource *ethres;
+ u8 mac_addr[ETH_ALEN];
+ int addr_width = 32;
u32 value;
ndev = alloc_etherdev(sizeof(*lp));
@@ -1675,13 +1875,47 @@ static int axienet_probe(struct platform_device *pdev)
lp->options = XAE_OPTION_DEFAULTS;
lp->rx_bd_num = RX_BD_NUM_DEFAULT;
lp->tx_bd_num = TX_BD_NUM_DEFAULT;
+
+ u64_stats_init(&lp->rx_stat_sync);
+ u64_stats_init(&lp->tx_stat_sync);
+
+ netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
+ netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
+
+ lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
+ if (!lp->axi_clk) {
+ /* For backward compatibility, if named AXI clock is not present,
+ * treat the first clock specified as the AXI clock.
+ */
+ lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
+ }
+ if (IS_ERR(lp->axi_clk)) {
+ ret = PTR_ERR(lp->axi_clk);
+ goto free_netdev;
+ }
+ ret = clk_prepare_enable(lp->axi_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
+ goto free_netdev;
+ }
+
+ lp->misc_clks[0].id = "axis_clk";
+ lp->misc_clks[1].id = "ref_clk";
+ lp->misc_clks[2].id = "mgt_clk";
+
+ ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
+ if (ret)
+ goto cleanup_clk;
+
+ ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
+ if (ret)
+ goto cleanup_clk;
+
/* Map device registers */
- ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
+ lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres);
if (IS_ERR(lp->regs)) {
- dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n");
ret = PTR_ERR(lp->regs);
- goto free_netdev;
+ goto cleanup_clk;
}
lp->regs_start = ethres->start;
@@ -1734,6 +1968,9 @@ static int axienet_probe(struct platform_device *pdev)
*/
of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
+ lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
+ "xlnx,switch-x-sgmii");
+
/* Start with the proprietary, and broken phy_type */
ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
if (!ret) {
@@ -1756,12 +1993,18 @@ static int axienet_probe(struct platform_device *pdev)
break;
default:
ret = -EINVAL;
- goto free_netdev;
+ goto cleanup_clk;
}
} else {
ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
if (ret)
- goto free_netdev;
+ goto cleanup_clk;
+ }
+ if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
+ lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
+ dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
+ ret = -EINVAL;
+ goto cleanup_clk;
}
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
@@ -1774,32 +2017,65 @@ static int axienet_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"unable to get DMA resource\n");
of_node_put(np);
- goto free_netdev;
+ goto cleanup_clk;
}
lp->dma_regs = devm_ioremap_resource(&pdev->dev,
&dmares);
lp->rx_irq = irq_of_parse_and_map(np, 1);
lp->tx_irq = irq_of_parse_and_map(np, 0);
of_node_put(np);
- lp->eth_irq = platform_get_irq(pdev, 0);
+ lp->eth_irq = platform_get_irq_optional(pdev, 0);
} else {
/* Check for these resources directly on the Ethernet node. */
- struct resource *res = platform_get_resource(pdev,
- IORESOURCE_MEM, 1);
- lp->dma_regs = devm_ioremap_resource(&pdev->dev, res);
+ lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
lp->rx_irq = platform_get_irq(pdev, 1);
lp->tx_irq = platform_get_irq(pdev, 0);
- lp->eth_irq = platform_get_irq(pdev, 2);
+ lp->eth_irq = platform_get_irq_optional(pdev, 2);
}
if (IS_ERR(lp->dma_regs)) {
dev_err(&pdev->dev, "could not map DMA regs\n");
ret = PTR_ERR(lp->dma_regs);
- goto free_netdev;
+ goto cleanup_clk;
}
if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
dev_err(&pdev->dev, "could not determine irqs\n");
ret = -ENOMEM;
- goto free_netdev;
+ goto cleanup_clk;
+ }
+
+ /* Autodetect the need for 64-bit DMA pointers.
+ * When the IP is configured for a bus width bigger than 32 bits,
+ * writing the MSB registers is mandatory, even if they are all 0.
+ * We can detect this case by writing all 1's to one such register
+ * and see if that sticks: when the IP is configured for 32 bits
+ * only, those registers are RES0.
+ * Those MSB registers were introduced in IP v7.1, which we check first.
+ */
+ if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
+ void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
+
+ iowrite32(0x0, desc);
+ if (ioread32(desc) == 0) { /* sanity check */
+ iowrite32(0xffffffff, desc);
+ if (ioread32(desc) > 0) {
+ lp->features |= XAE_FEATURE_DMA_64BIT;
+ addr_width = 64;
+ dev_info(&pdev->dev,
+ "autodetected 64-bit DMA range\n");
+ }
+ iowrite32(0x0, desc);
+ }
+ }
+ if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
+ dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
+ ret = -EINVAL;
+ goto cleanup_clk;
+ }
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
+ if (ret) {
+ dev_err(&pdev->dev, "No suitable DMA available\n");
+ goto cleanup_clk;
}
/* Check for Ethernet core IRQ (optional) */
@@ -1807,41 +2083,68 @@ static int axienet_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
/* Retrieve the MAC address */
- mac_addr = of_get_mac_address(pdev->dev.of_node);
- if (IS_ERR(mac_addr)) {
- dev_warn(&pdev->dev, "could not find MAC address property: %ld\n",
- PTR_ERR(mac_addr));
- mac_addr = NULL;
+ ret = of_get_mac_address(pdev->dev.of_node, mac_addr);
+ if (!ret) {
+ axienet_set_mac_address(ndev, mac_addr);
+ } else {
+ dev_warn(&pdev->dev, "could not find MAC address property: %d\n",
+ ret);
+ axienet_set_mac_address(ndev, NULL);
}
- axienet_set_mac_address(ndev, mac_addr);
lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
+ lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
+ lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
- lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
- if (lp->phy_node) {
- lp->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(lp->clk)) {
- dev_warn(&pdev->dev, "Failed to get clock: %ld\n",
- PTR_ERR(lp->clk));
- lp->clk = NULL;
- } else {
- ret = clk_prepare_enable(lp->clk);
- if (ret) {
- dev_err(&pdev->dev, "Unable to enable clock: %d\n",
- ret);
- goto free_netdev;
- }
- }
+ /* Reset core now that clocks are enabled, prior to accessing MDIO */
+ ret = __axienet_device_reset(lp);
+ if (ret)
+ goto cleanup_clk;
- ret = axienet_mdio_setup(lp);
- if (ret)
- dev_warn(&pdev->dev,
- "error registering MDIO bus: %d\n", ret);
+ ret = axienet_mdio_setup(lp);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "error registering MDIO bus: %d\n", ret);
+
+ if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
+ lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
+ np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
+ if (!np) {
+ /* Deprecated: Always use "pcs-handle" for pcs_phy.
+ * Falling back to "phy-handle" here is only for
+ * backward compatibility with old device trees.
+ */
+ np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ }
+ if (!np) {
+ dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
+ ret = -EINVAL;
+ goto cleanup_mdio;
+ }
+ lp->pcs_phy = of_mdio_find_device(np);
+ if (!lp->pcs_phy) {
+ ret = -EPROBE_DEFER;
+ of_node_put(np);
+ goto cleanup_mdio;
+ }
+ of_node_put(np);
+ lp->pcs.ops = &axienet_pcs_ops;
+ lp->pcs.poll = true;
}
lp->phylink_config.dev = &ndev->dev;
lp->phylink_config.type = PHYLINK_NETDEV;
+ lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10FD | MAC_100FD | MAC_1000FD;
+
+ __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
+ if (lp->switch_x_sgmii) {
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ lp->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ lp->phylink_config.supported_interfaces);
+ }
lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
lp->phy_mode,
@@ -1849,17 +2152,29 @@ static int axienet_probe(struct platform_device *pdev)
if (IS_ERR(lp->phylink)) {
ret = PTR_ERR(lp->phylink);
dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
- goto free_netdev;
+ goto cleanup_mdio;
}
ret = register_netdev(lp->ndev);
if (ret) {
dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
- goto free_netdev;
+ goto cleanup_phylink;
}
return 0;
+cleanup_phylink:
+ phylink_destroy(lp->phylink);
+
+cleanup_mdio:
+ if (lp->pcs_phy)
+ put_device(&lp->pcs_phy->dev);
+ if (lp->mii_bus)
+ axienet_mdio_teardown(lp);
+cleanup_clk:
+ clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
+ clk_disable_unprepare(lp->axi_clk);
+
free_netdev:
free_netdev(ndev);
@@ -1876,13 +2191,13 @@ static int axienet_remove(struct platform_device *pdev)
if (lp->phylink)
phylink_destroy(lp->phylink);
- axienet_mdio_teardown(lp);
+ if (lp->pcs_phy)
+ put_device(&lp->pcs_phy->dev);
- if (lp->clk)
- clk_disable_unprepare(lp->clk);
+ axienet_mdio_teardown(lp);
- of_node_put(lp->phy_node);
- lp->phy_node = NULL;
+ clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
+ clk_disable_unprepare(lp->axi_clk);
free_netdev(ndev);